Compare commits

...

178 Commits
0.0.1 ... 0.4.1

Author SHA1 Message Date
Stefan Prodan
b8a7ea8534 Merge pull request #35 from stefanprodan/gh-actions
Publish charts with GitHub Actions
2019-01-24 11:52:54 +01:00
stefanprodan
afe4d59d5a Move Helm repository to gh-pages branch 2019-01-24 12:47:36 +02:00
stefanprodan
0f2697df23 Publish charts with GitHub Actions 2019-01-24 12:38:45 +02:00
stefanprodan
05664fa648 Release v0.4.1 2019-01-24 12:17:37 +02:00
Stefan Prodan
3b2564f34b Merge pull request #33 from stefanprodan/loadtest
Add load testing service
2019-01-24 11:04:31 +01:00
stefanprodan
dd0cf2d588 Add load tester dockerfile to docs 2019-01-23 15:12:23 +02:00
stefanprodan
7c66f23c6a Add load tester Helm chart 2019-01-21 21:02:40 +02:00
stefanprodan
a9f034de1a Add load testing diagram 2019-01-21 18:02:44 +02:00
stefanprodan
6ad2dca57a Add load testing setup to docs 2019-01-21 17:29:04 +02:00
stefanprodan
e8353c110b Release load tester v0.0.2 2019-01-21 13:37:26 +02:00
stefanprodan
dbf26ddf53 Add load tester flag to log the cmd output 2019-01-21 13:36:08 +02:00
stefanprodan
acc72d207f Change container image tag format 2019-01-20 17:27:08 +02:00
stefanprodan
a784f83464 Add loadtester manifests 2019-01-20 15:59:41 +02:00
stefanprodan
07d8355363 Rename load testing service to flagger-loadtester 2019-01-20 14:28:45 +02:00
stefanprodan
f7a439274e Go format API types 2019-01-20 14:10:10 +02:00
stefanprodan
bd6d446cb8 Go format scheduler 2019-01-20 14:04:10 +02:00
stefanprodan
385d0e0549 Add load test runner service
- embed rakyll/hey in the runner container image
2019-01-20 14:00:14 +02:00
stefanprodan
02236374d8 Run the wekbooks before the metrics checks
- log warning when no values are found for Istio metric due to lack of traffic
2019-01-20 13:54:44 +02:00
stefanprodan
c46fe55ad0 Release v0.4.0 2019-01-18 12:49:36 +02:00
Stefan Prodan
36a54fbf2a Merge pull request #31 from stefanprodan/reset
Restart analysis if revision changes during validation
2019-01-18 10:25:38 +01:00
stefanprodan
60f6b05397 Refactor scheduler tests 2019-01-18 11:14:27 +02:00
stefanprodan
6d8a7343b7 Add tests for analysis restart and canary promotion 2019-01-18 11:05:40 +02:00
stefanprodan
aff8b117d4 Restart validation if revision changes during analysis 2019-01-17 15:13:59 +02:00
Stefan Prodan
1b3c3b22b3 Merge pull request #29 from stefanprodan/status
Use Kubernetes 1.11 CRD status sub-resource
2019-01-17 13:06:28 +01:00
stefanprodan
1d31b5ed90 Add canary name and namespace to controller logs
- zap key-value: canary=name.namespace
2019-01-17 13:58:10 +02:00
stefanprodan
1ef310f00d Add traffic weight to canary status
- show current weight on kubectl get canaries and kubectl get all
2019-01-16 16:29:59 +02:00
stefanprodan
acdd2c46d5 Refactor Canary status
- add status phases (Initialized, Progressing, Succeeded, Failed)
- rename status revision to LastAppliedSpec
2019-01-16 15:06:38 +02:00
stefanprodan
9872e6bc16 Skip readiness checks if canary analysis finished 2019-01-16 13:18:53 +02:00
stefanprodan
10c2bdec86 Use deep copy when updating the virtual service routes 2019-01-16 13:13:07 +02:00
stefanprodan
4bf3b70048 Use CRD UpdateStatus for Canary status updated
- requires Kubernetes >=1.11
2019-01-16 01:00:39 +02:00
stefanprodan
ada446bbaa Drop compatibility with Kubernetes 1.10 2019-01-16 00:58:51 +02:00
stefanprodan
c4981ef4db Add status and additional printer columns to CRD 2019-01-16 00:57:46 +02:00
Stefan Prodan
d1b84cd31d Merge pull request #28 from stefanprodan/naming
Fix for when canary name is different to the target name
2019-01-15 23:32:41 +01:00
stefanprodan
9232c8647a Check if multiple canaries have the same target
- log an error on target duplication ref #13
2019-01-15 21:43:05 +02:00
stefanprodan
23e8c7d616 Fix for when canary name is different to the target name
- use target name consistent at bootstrap
2019-01-15 21:18:46 +02:00
Stefan Prodan
42607fbd64 Merge pull request #26 from carlossg/service-name
Fix VirtualService routes
2019-01-15 19:38:38 +01:00
stefanprodan
28781a5f02 Use deep copy when updating the deployment object
- fix canary status update logs
2019-01-15 20:37:14 +02:00
stefanprodan
3589e11244 Bump dev version 2019-01-15 20:36:59 +02:00
Carlos Sanchez
5e880d3942 Wrong VirtualService routes
If deployment name is different from canary name
the virtual service routes are created with canary name
but the services are created with deployment name

Note that canary name should match deployment name
2019-01-15 18:44:50 +01:00
stefanprodan
f7e675144d Release v0.3.0 2019-01-11 20:10:41 +02:00
Stefan Prodan
3bff2c339b Merge pull request #20 from stefanprodan/scheduler
Add canary analysis schedule interval to CRD
2019-01-11 19:06:17 +01:00
Stefan Prodan
b035c1e7fb Merge pull request #25 from carlossg/virtualservice-naming
Tries to create VirtualService that already exists
2019-01-11 18:03:57 +01:00
Carlos Sanchez
7ae0d49e80 Tries to create VirtualService that already exists
When canary name is different than deployment name

VirtualService croc-hunter-jenkinsx.jx-staging create error virtualservices.networking.istio.io "croc-hunter-jenkinsx" already exists
2019-01-11 17:47:52 +01:00
Stefan Prodan
07f66e849d Merge branch 'master' into scheduler 2019-01-11 15:07:03 +01:00
Stefan Prodan
06c29051eb Merge pull request #24 from carlossg/log-fix
Fix bad error message
2019-01-11 15:05:37 +01:00
stefanprodan
83118faeb3 Fix autoscalerRef tests 2019-01-11 13:51:44 +02:00
stefanprodan
aa2c28c733 Make autoscalerRef optional
- use anyOf as a workaround for the openAPI object validation not accepting empty values
- fix #23
2019-01-11 13:42:32 +02:00
stefanprodan
10185407f6 Use httpbin.org for webhook testing 2019-01-11 13:12:53 +02:00
Carlos Sanchez
c1bde57c17 Fix bad error message
"controller/scheduler.go:217","msg":"deployment . update error Canary.flagger.app \"jx-staging-croc-hunter-jenkinsx\" is invalid: []: Invalid value: map[string]interface {}{\"metadata\":map[string]interface {}{\"name\":\"jx-staging-croc-hunter-jenkinsx\", \"namespace\":\"jx-staging\", \"selfLink\":\"/apis/flagger.app/v1alpha2/namespaces/jx-staging/canaries/jx-staging-croc-hunter-jenkinsx\", \"uid\":\"b248877e-1406-11e9-bf64-42010a8000c6\", \"resourceVersion\":\"30650895\", \"generation\":1, \"creationTimestamp\":\"2019-01-09T12:04:20Z\"}, \"spec\":map[string]interface {}{\"canaryAnalysis\":map[string]interface {}{\"threshold\":5, \"maxWeight\":50, \"stepWeight\":10, \"metrics\":[]interface {}{map[string]interface {}{\"name\":\"istio_requests_total\", \"interval\":\"1m\", \"threshold\":99}, map[string]interface {}{\"name\":\"istio_request_duration_seconds_bucket\", \"interval\":\"30s\"istio-system/flagger-b486d78c8-fkmbr[flagger]: {"level":"info","ts":"2019-01-09T12:14:05.158Z","caller":"controller/deployer.go:228","msg":"Scaling down jx-staging-croc-hunter-jenkinsx.jx-staging"}
2019-01-09 13:17:17 +01:00
stefanprodan
882b4b2d23 Update the control loop interval flag description 2019-01-08 13:15:10 +02:00
Stefan Prodan
cac585157f Merge pull request #21 from carlossg/patch-1
Qualify letsencrypt api version
2019-01-07 15:45:07 +02:00
Carlos Sanchez
cc2860a49f Qualify letsencrypt api version
Otherwise getting

    error: unable to recognize "./letsencrypt-issuer.yaml": no matches for kind "Issuer" in version "v1alpha2"
2019-01-07 14:38:53 +01:00
stefanprodan
bec96356ec Bump CRD version to v1alpha3
- new field canaryAnalysis.interval
2019-01-07 01:03:31 +02:00
stefanprodan
b5c648ea54 Bump version to 0.3.0-beta.1 2019-01-07 00:30:09 +02:00
stefanprodan
e6e3e500be Schedule canary analysis based on interval 2019-01-07 00:26:01 +02:00
stefanprodan
537e8fdaf7 Add canary analysis interval to CRD 2019-01-07 00:24:43 +02:00
stefanprodan
322c83bdad Add docs site link to chart 2019-01-06 18:18:47 +02:00
stefanprodan
41f0ba0247 Document the CRD target ref and control loop interval 2019-01-05 10:22:00 +02:00
stefanprodan
b67b49fde6 Change the default analysis interval to 1m 2019-01-05 01:05:27 +02:00
stefanprodan
f90ba560b7 Release v0.2.0 2019-01-04 13:44:50 +02:00
Stefan Prodan
2a9641fd68 Merge pull request #18 from stefanprodan/webhooks
Add external checks to canary analysis
2019-01-04 13:24:49 +02:00
stefanprodan
13fffe1323 Document webhooks status codes 2019-01-03 18:46:36 +02:00
stefanprodan
083556baae Document the canary analysis timespan 2019-01-03 18:27:49 +02:00
stefanprodan
5d0939af7d Add webhook docs 2019-01-03 16:11:30 +02:00
stefanprodan
d26255070e Copyright Weaveworks 2019-01-03 14:42:21 +02:00
stefanprodan
b008abd4a7 Fix metrics server offline test 2018-12-27 12:43:43 +02:00
stefanprodan
cbf9e1011d Add tests for metrics server check 2018-12-27 12:42:12 +02:00
stefanprodan
6ec3d7a76f Format observer tests 2018-12-27 12:21:33 +02:00
stefanprodan
ab52752d57 Add observer histogram test 2018-12-27 12:16:10 +02:00
stefanprodan
df3951a7ef Add observer tests 2018-12-27 12:15:16 +02:00
stefanprodan
722d36a8cc Add webhook tests 2018-12-26 17:58:35 +02:00
stefanprodan
e86c02d600 Implement canary external check
- do a HTTP POST for each webhook registered in the canary analysis
- increment the failed checks counter if a webhook returns a non-2xx status code and log the error and the response body if exists
2018-12-26 14:41:35 +02:00
stefanprodan
53546878d5 Make service port mandatory in CRD v1alpha2 2018-12-26 13:55:34 +02:00
stefanprodan
199e3b36c6 Upgrade CRD to v1alpha2
- add required fields for deployment and hpa targets
- make service port mandatory
- add webhooks validation
2018-12-26 13:46:59 +02:00
stefanprodan
0d96bedfee Add webhooks to Canary CRD v1alpha2 2018-12-26 13:42:36 +02:00
Stefan Prodan
9753820579 GitBook: [master] 3 pages modified 2018-12-19 14:32:51 +00:00
Stefan Prodan
197f218ba4 GitBook: [master] one page modified 2018-12-19 14:10:49 +00:00
Stefan Prodan
b4b1a36aba GitBook: [master] 8 pages modified 2018-12-19 13:45:12 +00:00
stefanprodan
cfc848bfa9 Link to docs website 2018-12-19 15:42:16 +02:00
stefanprodan
fcf6f96912 Add overview diagram 2018-12-19 15:30:43 +02:00
Stefan Prodan
1504dcab74 GitBook: [master] 5 pages modified 2018-12-19 13:24:16 +00:00
Stefan Prodan
4e4bc0c4f0 GitBook: [master] 4 pages modified 2018-12-19 13:21:33 +00:00
Stefan Prodan
36ce610465 GitBook: [master] 5 pages modified 2018-12-19 12:46:06 +00:00
stefanprodan
1dc2aa147b Ignore gitbook for GitHub pages 2018-12-19 13:31:18 +02:00
Stefan Prodan
8cc7e4adbb GitBook: [master] 4 pages modified 2018-12-19 11:25:30 +00:00
Stefan Prodan
978f7256a8 GitBook: [master] 2 pages modified 2018-12-19 10:08:59 +00:00
stefanprodan
e799e63e3f Set gitbook root 2018-12-19 12:00:18 +02:00
stefanprodan
5b35854464 init gitbook 2018-12-19 11:56:42 +02:00
stefanprodan
d485498a14 Add email field to charts 2018-12-18 18:38:33 +02:00
stefanprodan
dfa974cf57 Change Grafana chart title 2018-12-18 18:37:04 +02:00
stefanprodan
ee1e2e6fd9 Upgrade Grafana to v5.4.2 2018-12-18 12:58:14 +02:00
Stefan Prodan
eeb3b1ba4d Merge pull request #15 from stefanprodan/chart
Add service account option to Helm chart
2018-12-18 12:12:05 +02:00
Stefan Prodan
b510f0ee02 Merge branch 'master' into chart 2018-12-18 11:56:06 +02:00
stefanprodan
c34737b9ce Use app.kubernetes.io labels 2018-12-18 11:53:42 +02:00
stefanprodan
e4ea4f3994 Make the service account optional 2018-12-18 11:06:53 +02:00
stefanprodan
07359192e7 Add chart prerequisites and icon 2018-12-18 10:31:47 +02:00
stefanprodan
4dd23c42a2 Add Flagger logo and icons 2018-12-18 10:31:05 +02:00
Stefan Prodan
f281021abf Add Slack notifications screen 2018-12-06 16:18:38 +07:00
Stefan Prodan
71137ba3bb Release 0.1.2 2018-12-06 14:00:12 +07:00
Stefan Prodan
6372c7dfcc Merge pull request #14 from stefanprodan/slack
Add details to Slack messages
2018-12-06 13:53:20 +07:00
Stefan Prodan
4584733f6f Change coverage threshold 2018-12-06 13:48:06 +07:00
Stefan Prodan
03408683c0 Add details to Slack messages
- attach canary analysis metadata to init/start messages
- add rollback reason to failed canary messages
2018-12-06 12:51:02 +07:00
Stefan Prodan
29137ae75b Add Alermanager example 2018-12-06 12:49:41 +07:00
Stefan Prodan
6bf85526d0 Add Slack screens with successful and failed canaries 2018-12-06 12:49:15 +07:00
stefanprodan
9f6a30f43e Bump dev version 2018-11-28 15:08:24 +02:00
stefanprodan
11bc0390c4 Release v0.1.1 2018-11-28 14:56:34 +02:00
stefanprodan
9a29ea69d7 Change progress deadline default to 10 minutes 2018-11-28 14:53:12 +02:00
Stefan Prodan
2d8adbaca4 Merge pull request #10 from stefanprodan/deadline
Rollback canary based on the deployment progress deadline check
2018-11-28 14:48:17 +02:00
stefanprodan
f3904ea099 Use canary state constants in recorder 2018-11-27 17:34:48 +02:00
stefanprodan
1b2b13e77f Disable patch coverage 2018-11-27 17:11:57 +02:00
stefanprodan
8878f15806 Clean up isDeploymentReady 2018-11-27 17:11:35 +02:00
stefanprodan
5977ff9bae Add rollback test based on failed checks threshold 2018-11-27 17:00:13 +02:00
stefanprodan
11ef6bdf37 Add progressDeadlineSeconds to canary example 2018-11-27 16:58:21 +02:00
stefanprodan
9c342e35be Add progressDeadlineSeconds validation 2018-11-27 16:35:39 +02:00
stefanprodan
c7e7785b06 Fix canary deployer is ready test 2018-11-27 15:55:04 +02:00
stefanprodan
4cb5ceb48b Rollback canary based on the deployment progress deadline check
- determine if the canary deployment is stuck by checking if there is a minimum replicas unavailable condition and if the last update time exceeds the deadline
- set progress deadline default value to 60 seconds
2018-11-27 15:44:15 +02:00
stefanprodan
5a79402a73 Add canary status state constants 2018-11-27 15:29:06 +02:00
stefanprodan
c24b11ff8b Add ProgressDeadlineSeconds to Canary CRD 2018-11-27 12:16:20 +02:00
stefanprodan
042d3c1a5b Set ProgressDeadlineSeconds for primary deployment on init/promote 2018-11-27 12:10:14 +02:00
stefanprodan
f8821cf30b bump dev version 2018-11-27 11:56:11 +02:00
stefanprodan
8c12cdb21d Release v0.1.0 2018-11-25 21:05:16 +02:00
stefanprodan
923799dce7 Keep CRD on Helm release delete 2018-11-25 20:13:40 +02:00
stefanprodan
ebc932fba5 Add Slack configuration to Helm readme 2018-11-25 20:07:32 +02:00
Stefan Prodan
3d8d30db47 Merge pull request #6 from stefanprodan/quay
Switch to Quay and Go 1.11
2018-11-25 19:35:53 +02:00
stefanprodan
1022c3438a Use go 1.11 for docker build 2018-11-25 19:21:42 +02:00
stefanprodan
9159855df2 Use Quay as container registry in Helm and YAML manifests 2018-11-25 19:20:29 +02:00
Stefan Prodan
7927ac0a5d Push container image to Quay 2018-11-25 18:52:18 +02:00
Stefan Prodan
f438e9a4b2 Merge pull request #4 from stefanprodan/slack
Add Slack notifications
2018-11-25 11:54:15 +02:00
Stefan Prodan
4c70a330d4 Add Slack notifications configuration to readme 2018-11-25 11:46:18 +02:00
Stefan Prodan
d8875a3da1 Add Slack flags to Helm chart 2018-11-25 11:45:38 +02:00
Stefan Prodan
769aff57cb Add Slack notifications for canary events 2018-11-25 11:44:45 +02:00
Stefan Prodan
4138f37f9a Add Slack notifier component 2018-11-25 11:40:35 +02:00
stefanprodan
583c9cc004 Rename Istio client set 2018-11-25 00:05:43 +02:00
Stefan Prodan
c5930e6f70 Update deployment strategy on promotion
- include spec strategy, min ready seconds and revision history limit to initialization and promotion
2018-11-24 20:03:02 +02:00
stefanprodan
423d9bbbb3 Use go 1.11 in Travis 2018-11-24 16:23:20 +02:00
Stefan Prodan
07771f500f Release 0.1.0-beta.7 2018-11-24 15:58:17 +02:00
Stefan Prodan
65bd77c88f Add last transition time to Canary CRD status 2018-11-24 15:48:35 +02:00
Stefan Prodan
82bf63f89b Change website URL 2018-11-15 12:20:53 +02:00
Stefan Prodan
7f735ead07 Set site banner 2018-11-15 10:50:58 +02:00
Stefan Prodan
56ffd618d6 Increase flagger probes timeout to 5s (containerd fix) 2018-11-15 10:38:20 +02:00
Stefan Prodan
19cb34479e Increase probes timeout to 5s (containerd fix) 2018-11-14 15:39:44 +02:00
Stefan Prodan
2d906f0b71 Add Grafana install to helm-up cmd 2018-11-14 15:38:35 +02:00
Stefan Prodan
3eaeec500e Clean coverage artifacts (fix goreleaser) 2018-10-29 21:52:09 +02:00
Stefan Prodan
df98de7d11 Release v0.1.0-beta.6 2018-10-29 21:46:54 +02:00
Stefan Prodan
580924e63b Record canary duration and total
- add Prometheus metrics canary_duration_seconds and canary_total
2018-10-29 21:44:43 +02:00
Stefan Prodan
1b2108001f Add Prometheus registry flag to recorder
- fix tests
2018-10-29 14:04:45 +02:00
Stefan Prodan
3a28768bf9 Update website docs 2018-10-29 13:56:17 +02:00
Stefan Prodan
53c09f40eb Add Prometheus metrics docs
- ref #2
2018-10-29 13:44:20 +02:00
Stefan Prodan
074e57aa12 Add recorder to revision tests 2018-10-29 13:43:54 +02:00
Stefan Prodan
e16dde809d Add recorder to mock controller 2018-10-29 13:34:28 +02:00
Stefan Prodan
188e4ea82e Release v0.1.0-beta.5 2018-10-29 11:26:56 +02:00
Stefan Prodan
4a8aa3b547 Add recorder component
- records the canary analysis status and current weight as Prometheus metrics
2018-10-29 11:25:36 +02:00
Stefan Prodan
6bf4a8f95b Rename user to flagger 2018-10-23 16:58:32 +03:00
Stefan Prodan
c5ea947899 Add codecov badge 2018-10-23 16:44:25 +03:00
Stefan Prodan
344c7db968 Make golint happy and add codecov 2018-10-23 16:36:48 +03:00
Stefan Prodan
65b908e702 Release v0.1.0-beta.2 2018-10-23 13:42:43 +03:00
Stefan Prodan
8e66baa0e7 Update the artifacts yamls to match the naming conventions 2018-10-23 13:39:10 +03:00
Stefan Prodan
667e915700 Update canary dashboard to latest CRD naming conventions 2018-10-23 13:21:57 +03:00
Stefan Prodan
7af103f112 Update Grafana to v5.3.1 2018-10-23 11:21:04 +03:00
Stefan Prodan
8e2f538e4c Add scheduler tests for initialization and revision 2018-10-22 20:14:09 +03:00
Stefan Prodan
be289ef7ce Add router tests 2018-10-22 17:21:06 +03:00
Stefan Prodan
4a074e50c4 Add Istio fake clientset 2018-10-22 17:18:33 +03:00
Stefan Prodan
fa13c92a15 Add deployer status and scaling tests 2018-10-22 16:29:59 +03:00
Stefan Prodan
dbd0908313 Add deployer promote tests 2018-10-22 16:03:06 +03:00
Stefan Prodan
9b5c4586b9 Add deployer sync tests 2018-10-22 16:02:01 +03:00
Stefan Prodan
bfbb272c88 Add Kubernetes fake clientset package 2018-10-22 16:00:50 +03:00
Stefan Prodan
4b4a88cbe5 Publish Helm chart 0.1.0-alpha.2 2018-10-15 11:11:41 +02:00
Stefan Prodan
b022124415 bump version 2018-10-15 11:05:39 +02:00
Stefan Prodan
663dc82574 Controller refactoring part two
- share components between loops
2018-10-11 20:51:12 +03:00
Stefan Prodan
baeee62a26 Controller refactoring
- split controller logic into components (deployer, observer, router and scheduler)
- set the canary analysis final state (failed or finished) in a single run
2018-10-11 19:59:40 +03:00
Stefan Prodan
56f2ee9078 Add contributing and code of conduct docs 2018-10-11 14:33:28 +03:00
Stefan Prodan
a4f890c8b2 Add autoscaling support
- add HorizontalPodAutoscaler reference to CRD
- create primary HPA on canary bootstrap
2018-10-11 11:16:56 +03:00
Stefan Prodan
a03cf43a1d Update CRD in readme and chart 2018-10-11 02:03:04 +03:00
Stefan Prodan
302de10fec Canary CRD refactoring
- set canaries.flagger.app version to v1alpha1
- replace old Canary spec with CanaryDeployment
2018-10-11 01:43:53 +03:00
Stefan Prodan
5a1412549d Merge pull request #1 from stefanprodan/crd-deployment
Add CanaryDeployment kind
2018-10-10 16:57:58 +03:00
Stefan Prodan
e2be4fdaed Add CanaryDeployment kind
- bootstrap the deployments. services and Istio virtual service
- use google/go-cmp to detect changes in the deployment pod spec
2018-10-10 16:57:12 +03:00
Stefan Prodan
3eb60a8447 Add alternative canary routing 2018-10-09 18:18:48 +03:00
Stefan Prodan
276fdfc0ff Rename dashboard 2018-10-09 18:18:04 +03:00
339 changed files with 20332 additions and 4045 deletions

8
.codecov.yml Normal file
View File

@@ -0,0 +1,8 @@
coverage:
status:
project:
default:
target: auto
threshold: 50
base: auto
patch: off

1
.gitbook.yaml Normal file
View File

@@ -0,0 +1 @@
root: ./docs/gitbook

17
.github/main.workflow vendored Normal file
View File

@@ -0,0 +1,17 @@
workflow "Publish Helm charts" {
on = "push"
resolves = ["helm-push"]
}
action "helm-lint" {
uses = "stefanprodan/gh-actions/helm@master"
args = ["lint charts/*"]
}
action "helm-push" {
needs = ["helm-lint"]
uses = "stefanprodan/gh-actions/helm-gh-pages@master"
args = ["charts/*","https://flagger.app"]
secrets = ["GITHUB_TOKEN"]
}

View File

@@ -2,7 +2,7 @@ sudo: required
language: go
go:
- 1.10.x
- 1.11.x
services:
- docker
@@ -14,25 +14,29 @@ addons:
script:
- set -e
- make test
- make test-fmt
- make test-codegen
- go test -race -coverprofile=coverage.txt -covermode=atomic ./pkg/controller/
- make build
after_success:
- if [ -z "$DOCKER_USER" ]; then
echo "PR build, skipping Docker Hub push";
echo "PR build, skipping image push";
else
docker tag stefanprodan/flagger:latest stefanprodan/flagger:${TRAVIS_COMMIT};
echo $DOCKER_PASS | docker login -u=$DOCKER_USER --password-stdin;
docker push stefanprodan/flagger:${TRAVIS_COMMIT};
BRANCH_COMMIT=${TRAVIS_BRANCH}-$(echo ${TRAVIS_COMMIT} | head -c7);
docker tag stefanprodan/flagger:latest quay.io/stefanprodan/flagger:${BRANCH_COMMIT};
echo $DOCKER_PASS | docker login -u=$DOCKER_USER --password-stdin quay.io;
docker push quay.io/stefanprodan/flagger:${BRANCH_COMMIT};
fi
- if [ -z "$TRAVIS_TAG" ]; then
echo "Not a release, skipping Docker Hub push";
echo "Not a release, skipping image push";
else
docker tag stefanprodan/flagger:latest stefanprodan/flagger:$TRAVIS_TAG;
echo $DOCKER_PASS | docker login -u=$DOCKER_USER --password-stdin;
docker push stefanprodan/flagger:latest;
docker push stefanprodan/flagger:$TRAVIS_TAG;
docker tag stefanprodan/flagger:latest quay.io/stefanprodan/flagger:${TRAVIS_TAG};
echo $DOCKER_PASS | docker login -u=$DOCKER_USER --password-stdin quay.io;
docker push quay.io/stefanprodan/flagger:$TRAVIS_TAG;
fi
- bash <(curl -s https://codecov.io/bash)
- rm coverage.txt
deploy:
- provider: script

72
CONTRIBUTING.md Normal file
View File

@@ -0,0 +1,72 @@
# How to Contribute
Flagger is [Apache 2.0 licensed](LICENSE) and accepts contributions via GitHub
pull requests. This document outlines some of the conventions on development
workflow, commit message formatting, contact points and other resources to make
it easier to get your contribution accepted.
We gratefully welcome improvements to documentation as well as to code.
## Certificate of Origin
By contributing to this project you agree to the Developer Certificate of
Origin (DCO). This document was created by the Linux Kernel community and is a
simple statement that you, as a contributor, have the legal right to make the
contribution.
## Chat
The project uses Slack: To join the conversation, simply join the
[Weave community](https://slack.weave.works/) Slack workspace.
## Getting Started
- Fork the repository on GitHub
- If you want to contribute as a developer, continue reading this document for further instructions
- If you have questions, concerns, get stuck or need a hand, let us know
on the Slack channel. We are happy to help and look forward to having
you part of the team. No matter in which capacity.
- Play with the project, submit bugs, submit pull requests!
## Contribution workflow
This is a rough outline of how to prepare a contribution:
- Create a topic branch from where you want to base your work (usually branched from master).
- Make commits of logical units.
- Make sure your commit messages are in the proper format (see below).
- Push your changes to a topic branch in your fork of the repository.
- If you changed code:
- add automated tests to cover your changes
- Submit a pull request to the original repository.
## Acceptance policy
These things will make a PR more likely to be accepted:
- a well-described requirement
- new code and tests follow the conventions in old code and tests
- a good commit message (see below)
- All code must abide [Go Code Review Comments](https://github.com/golang/go/wiki/CodeReviewComments)
- Names should abide [What's in a name](https://talks.golang.org/2014/names.slide#1)
- Code must build on both Linux and Darwin, via plain `go build`
- Code should have appropriate test coverage and tests should be written
to work with `go test`
In general, we will merge a PR once one maintainer has endorsed it.
For substantial changes, more people may become involved, and you might
get asked to resubmit the PR or divide the changes into more than one PR.
### Format of the Commit Message
For Flux we prefer the following rules for good commit messages:
- Limit the subject to 50 characters and write as the continuation
of the sentence "If applied, this commit will ..."
- Explain what and why in the body, if more than a trivial change;
wrap it at 72 characters.
The [following article](https://chris.beams.io/posts/git-commit/#seven-rules)
has some more helpful advice on documenting your work.
This doc is adapted from the [Weaveworks Flux](https://github.com/weaveworks/flux/blob/master/CONTRIBUTING.md)

View File

@@ -1,4 +1,4 @@
FROM golang:1.10
FROM golang:1.11
RUN mkdir -p /go/src/github.com/stefanprodan/flagger/
@@ -13,17 +13,17 @@ RUN GIT_COMMIT=$(git rev-list -1 HEAD) && \
FROM alpine:3.8
RUN addgroup -S app \
&& adduser -S -g app app \
RUN addgroup -S flagger \
&& adduser -S -g flagger flagger \
&& apk --no-cache add ca-certificates
WORKDIR /home/app
WORKDIR /home/flagger
COPY --from=0 /go/src/github.com/stefanprodan/flagger/flagger .
RUN chown -R app:app ./
RUN chown -R flagger:flagger ./
USER app
USER flagger
ENTRYPOINT ["./flagger"]

44
Dockerfile.loadtester Normal file
View File

@@ -0,0 +1,44 @@
FROM golang:1.11 AS hey-builder
RUN mkdir -p /go/src/github.com/rakyll/hey/
WORKDIR /go/src/github.com/rakyll/hey
ADD https://github.com/rakyll/hey/archive/v0.1.1.tar.gz .
RUN tar xzf v0.1.1.tar.gz --strip 1
RUN go get ./...
RUN CGO_ENABLED=0 GOOS=linux GOARCH=amd64 \
go install -ldflags '-w -extldflags "-static"' \
/go/src/github.com/rakyll/hey
FROM golang:1.11 AS builder
RUN mkdir -p /go/src/github.com/stefanprodan/flagger/
WORKDIR /go/src/github.com/stefanprodan/flagger
COPY . .
RUN go test -race ./pkg/loadtester/
RUN CGO_ENABLED=0 GOOS=linux go build -a -installsuffix cgo -o loadtester ./cmd/loadtester/*
FROM alpine:3.8
RUN addgroup -S app \
&& adduser -S -g app app \
&& apk --no-cache add ca-certificates curl
WORKDIR /home/app
COPY --from=hey-builder /go/bin/hey /usr/local/bin/hey
COPY --from=builder /go/src/github.com/stefanprodan/flagger/loadtester .
RUN chown -R app:app ./
USER app
ENTRYPOINT ["./loadtester"]

73
Gopkg.lock generated
View File

@@ -25,14 +25,6 @@
revision = "8991bc29aa16c548c550c7ff78260e27b9ab7c73"
version = "v1.1.1"
[[projects]]
digest = "1:ade392a843b2035effb4b4a2efa2c3bab3eb29b992e98bacf9c898b0ecb54e45"
name = "github.com/fatih/color"
packages = ["."]
pruneopts = "NUT"
revision = "5b77d2a35fb0ede96d138fc9a99f5c9b6aef11b4"
version = "v1.7.0"
[[projects]]
digest = "1:81466b4218bf6adddac2572a30ac733a9255919bc2f470b4827a317bd4ee1756"
name = "github.com/ghodss/yaml"
@@ -92,10 +84,11 @@
revision = "4030bb1f1f0c35b30ca7009e9ebd06849dd45306"
[[projects]]
digest = "1:2e3c336fc7fde5c984d2841455a658a6d626450b1754a854b3b32e7a8f49a07a"
digest = "1:d2754cafcab0d22c13541618a8029a70a8959eb3525ff201fe971637e2274cd0"
name = "github.com/google/go-cmp"
packages = [
"cmp",
"cmp/cmpopts",
"cmp/internal/diff",
"cmp/internal/function",
"cmp/internal/value",
@@ -170,7 +163,7 @@
revision = "f2b4162afba35581b6d4a50d3b8f34e33c144682"
[[projects]]
digest = "1:555e31114bd0e89c6340c47ab73162e8c8d873e4d88914310923566f487bfcd5"
digest = "1:03a74b0d86021c8269b52b7c908eb9bb3852ff590b363dad0a807cf58cec2f89"
name = "github.com/knative/pkg"
packages = [
"apis",
@@ -182,31 +175,19 @@
"apis/istio/common/v1alpha1",
"apis/istio/v1alpha3",
"client/clientset/versioned",
"client/clientset/versioned/fake",
"client/clientset/versioned/scheme",
"client/clientset/versioned/typed/authentication/v1alpha1",
"client/clientset/versioned/typed/authentication/v1alpha1/fake",
"client/clientset/versioned/typed/duck/v1alpha1",
"client/clientset/versioned/typed/duck/v1alpha1/fake",
"client/clientset/versioned/typed/istio/v1alpha3",
"client/clientset/versioned/typed/istio/v1alpha3/fake",
"signals",
]
pruneopts = "NUT"
revision = "c15d7c8f2220a7578b33504df6edefa948c845ae"
[[projects]]
digest = "1:08c231ec84231a7e23d67e4b58f975e1423695a32467a362ee55a803f9de8061"
name = "github.com/mattn/go-colorable"
packages = ["."]
pruneopts = "NUT"
revision = "167de6bfdfba052fa6b2d3664c8f5272e23c9072"
version = "v0.0.9"
[[projects]]
digest = "1:bffa444ca07c69c599ae5876bc18b25bfd5fa85b297ca10a25594d284a7e9c5d"
name = "github.com/mattn/go-isatty"
packages = ["."]
pruneopts = "NUT"
revision = "6ca4dbf54d38eea1a992b3c722a76a5d1c4cb25c"
version = "v0.0.4"
[[projects]]
digest = "1:5985ef4caf91ece5d54817c11ea25f182697534f8ae6521eadcd628c142ac4b6"
name = "github.com/matttproud/golang_protobuf_extensions"
@@ -547,42 +528,72 @@
version = "kubernetes-1.11.0"
[[projects]]
digest = "1:29e55bcff61dd3d1f768724450a3933ea76e6277684796eb7c315154f41db902"
digest = "1:c7d6cf5e28c377ab4000b94b6b9ff562c4b13e7e8b948ad943f133c5104be011"
name = "k8s.io/client-go"
packages = [
"discovery",
"discovery/fake",
"kubernetes",
"kubernetes/fake",
"kubernetes/scheme",
"kubernetes/typed/admissionregistration/v1alpha1",
"kubernetes/typed/admissionregistration/v1alpha1/fake",
"kubernetes/typed/admissionregistration/v1beta1",
"kubernetes/typed/admissionregistration/v1beta1/fake",
"kubernetes/typed/apps/v1",
"kubernetes/typed/apps/v1/fake",
"kubernetes/typed/apps/v1beta1",
"kubernetes/typed/apps/v1beta1/fake",
"kubernetes/typed/apps/v1beta2",
"kubernetes/typed/apps/v1beta2/fake",
"kubernetes/typed/authentication/v1",
"kubernetes/typed/authentication/v1/fake",
"kubernetes/typed/authentication/v1beta1",
"kubernetes/typed/authentication/v1beta1/fake",
"kubernetes/typed/authorization/v1",
"kubernetes/typed/authorization/v1/fake",
"kubernetes/typed/authorization/v1beta1",
"kubernetes/typed/authorization/v1beta1/fake",
"kubernetes/typed/autoscaling/v1",
"kubernetes/typed/autoscaling/v1/fake",
"kubernetes/typed/autoscaling/v2beta1",
"kubernetes/typed/autoscaling/v2beta1/fake",
"kubernetes/typed/batch/v1",
"kubernetes/typed/batch/v1/fake",
"kubernetes/typed/batch/v1beta1",
"kubernetes/typed/batch/v1beta1/fake",
"kubernetes/typed/batch/v2alpha1",
"kubernetes/typed/batch/v2alpha1/fake",
"kubernetes/typed/certificates/v1beta1",
"kubernetes/typed/certificates/v1beta1/fake",
"kubernetes/typed/core/v1",
"kubernetes/typed/core/v1/fake",
"kubernetes/typed/events/v1beta1",
"kubernetes/typed/events/v1beta1/fake",
"kubernetes/typed/extensions/v1beta1",
"kubernetes/typed/extensions/v1beta1/fake",
"kubernetes/typed/networking/v1",
"kubernetes/typed/networking/v1/fake",
"kubernetes/typed/policy/v1beta1",
"kubernetes/typed/policy/v1beta1/fake",
"kubernetes/typed/rbac/v1",
"kubernetes/typed/rbac/v1/fake",
"kubernetes/typed/rbac/v1alpha1",
"kubernetes/typed/rbac/v1alpha1/fake",
"kubernetes/typed/rbac/v1beta1",
"kubernetes/typed/rbac/v1beta1/fake",
"kubernetes/typed/scheduling/v1alpha1",
"kubernetes/typed/scheduling/v1alpha1/fake",
"kubernetes/typed/scheduling/v1beta1",
"kubernetes/typed/scheduling/v1beta1/fake",
"kubernetes/typed/settings/v1alpha1",
"kubernetes/typed/settings/v1alpha1/fake",
"kubernetes/typed/storage/v1",
"kubernetes/typed/storage/v1/fake",
"kubernetes/typed/storage/v1alpha1",
"kubernetes/typed/storage/v1alpha1/fake",
"kubernetes/typed/storage/v1beta1",
"kubernetes/typed/storage/v1beta1/fake",
"pkg/apis/clientauthentication",
"pkg/apis/clientauthentication/v1alpha1",
"pkg/apis/clientauthentication/v1beta1",
@@ -675,24 +686,29 @@
analyzer-name = "dep"
analyzer-version = 1
input-imports = [
"github.com/fatih/color",
"github.com/google/go-cmp/cmp",
"github.com/google/go-cmp/cmp/cmpopts",
"github.com/istio/glog",
"github.com/knative/pkg/apis/istio/v1alpha3",
"github.com/knative/pkg/client/clientset/versioned",
"github.com/knative/pkg/client/clientset/versioned/fake",
"github.com/knative/pkg/signals",
"github.com/prometheus/client_golang/prometheus/promhttp",
"go.uber.org/zap",
"go.uber.org/zap/zapcore",
"k8s.io/api/apps/v1",
"k8s.io/api/autoscaling/v1",
"k8s.io/api/autoscaling/v2beta1",
"k8s.io/api/core/v1",
"k8s.io/apimachinery/pkg/api/errors",
"k8s.io/apimachinery/pkg/api/resource",
"k8s.io/apimachinery/pkg/apis/meta/v1",
"k8s.io/apimachinery/pkg/labels",
"k8s.io/apimachinery/pkg/runtime",
"k8s.io/apimachinery/pkg/runtime/schema",
"k8s.io/apimachinery/pkg/runtime/serializer",
"k8s.io/apimachinery/pkg/types",
"k8s.io/apimachinery/pkg/util/intstr",
"k8s.io/apimachinery/pkg/util/runtime",
"k8s.io/apimachinery/pkg/util/sets/types",
"k8s.io/apimachinery/pkg/util/wait",
@@ -700,6 +716,7 @@
"k8s.io/client-go/discovery",
"k8s.io/client-go/discovery/fake",
"k8s.io/client-go/kubernetes",
"k8s.io/client-go/kubernetes/fake",
"k8s.io/client-go/kubernetes/scheme",
"k8s.io/client-go/kubernetes/typed/core/v1",
"k8s.io/client-go/plugin/pkg/client/auth/gcp",

View File

@@ -186,7 +186,7 @@
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Copyright 2018 Weaveworks. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.

View File

@@ -3,14 +3,20 @@ VERSION?=$(shell grep 'VERSION' pkg/version/version.go | awk '{ print $$4 }' | t
VERSION_MINOR:=$(shell grep 'VERSION' pkg/version/version.go | awk '{ print $$4 }' | tr -d '"' | rev | cut -d'.' -f2- | rev)
PATCH:=$(shell grep 'VERSION' pkg/version/version.go | awk '{ print $$4 }' | tr -d '"' | awk -F. '{print $$NF}')
SOURCE_DIRS = cmd pkg/apis pkg/controller pkg/server pkg/logging pkg/version
LT_VERSION?=$(shell grep 'VERSION' cmd/loadtester/main.go | awk '{ print $$4 }' | tr -d '"' | head -n1)
run:
go run cmd/flagger/* -kubeconfig=$$HOME/.kube/config -log-level=info -metrics-server=https://prometheus.istio.weavedx.com
go run cmd/flagger/* -kubeconfig=$$HOME/.kube/config -log-level=info \
-metrics-server=https://prometheus.iowa.weavedx.com \
-slack-url=https://hooks.slack.com/services/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 \
-slack-channel="devops-alerts"
build:
docker build -t stefanprodan/flagger:$(TAG) . -f Dockerfile
push:
docker push stefanprodan/flagger:$(TAG)
docker tag stefanprodan/flagger:$(TAG) quay.io/stefanprodan/flagger:$(VERSION)
docker push quay.io/stefanprodan/flagger:$(VERSION)
fmt:
gofmt -l -s -w $(SOURCE_DIRS)
@@ -25,12 +31,13 @@ test: test-fmt test-codegen
go test ./...
helm-package:
cd charts/ && helm package flagger/ && helm package podinfo-flagger/ && helm package grafana/
cd charts/ && helm package flagger/ && helm package grafana/ && helm package loadtester/
mv charts/*.tgz docs/
helm repo index docs --url https://stefanprodan.github.io/flagger --merge ./docs/index.yaml
helm-up:
helm upgrade --install flagger ./charts/flagger --namespace=istio-system --set crd.create=false
helm upgrade --install flagger-grafana ./charts/grafana --namespace=istio-system
version-set:
@next="$(TAG)" && \
@@ -52,10 +59,10 @@ version-up:
dev-up: version-up
@echo "Starting build/push/deploy pipeline for $(VERSION)"
docker build -t stefanprodan/flagger:$(VERSION) . -f Dockerfile
docker push stefanprodan/flagger:$(VERSION)
docker build -t quay.io/stefanprodan/flagger:$(VERSION) . -f Dockerfile
docker push quay.io/stefanprodan/flagger:$(VERSION)
kubectl apply -f ./artifacts/flagger/crd.yaml
helm upgrade --install flagger ./charts/flagger --namespace=istio-system --set crd.create=false
helm upgrade -i flagger ./charts/flagger --namespace=istio-system --set crd.create=false
release:
git tag $(VERSION)
@@ -68,3 +75,11 @@ release-set: fmt version-set helm-package
git tag $(VERSION)
git push origin $(VERSION)
reset-test:
kubectl delete -f ./artifacts/namespaces
kubectl apply -f ./artifacts/namespaces
kubectl apply -f ./artifacts/canaries
loadtester-push:
docker build -t quay.io/stefanprodan/flagger-loadtester:$(LT_VERSION) . -f Dockerfile.loadtester
docker push quay.io/stefanprodan/flagger-loadtester:$(LT_VERSION)

337
README.md
View File

@@ -2,13 +2,14 @@
[![build](https://travis-ci.org/stefanprodan/flagger.svg?branch=master)](https://travis-ci.org/stefanprodan/flagger)
[![report](https://goreportcard.com/badge/github.com/stefanprodan/flagger)](https://goreportcard.com/report/github.com/stefanprodan/flagger)
[![codecov](https://codecov.io/gh/stefanprodan/flagger/branch/master/graph/badge.svg)](https://codecov.io/gh/stefanprodan/flagger)
[![license](https://img.shields.io/github/license/stefanprodan/flagger.svg)](https://github.com/stefanprodan/flagger/blob/master/LICENSE)
[![release](https://img.shields.io/github/release/stefanprodan/flagger/all.svg)](https://github.com/stefanprodan/flagger/releases)
Flagger is a Kubernetes operator that automates the promotion of canary deployments
using Istio routing for traffic shifting and Prometheus metrics for canary analysis.
The project is currently in experimental phase and it is expected that breaking changes
to the API will be made in the upcoming releases.
using Istio routing for traffic shifting and Prometheus metrics for canary analysis.
The canary analysis can be extended with webhooks for running integration tests,
load tests or any other custom validation.
### Install
@@ -19,50 +20,50 @@ Deploy Flagger in the `istio-system` namespace using Helm:
```bash
# add the Helm repository
helm repo add flagger https://stefanprodan.github.io/flagger
helm repo add flagger https://flagger.app
# install or upgrade
helm upgrade -i flagger flagger/flagger \
--namespace=istio-system \
--set metricsServer=http://prometheus.istio-system:9090 \
--set controlLoopInterval=1m
--set metricsServer=http://prometheus.istio-system:9090
```
Flagger is compatible with Kubernetes >1.10.0 and Istio >1.0.0.
Flagger is compatible with Kubernetes >1.11.0 and Istio >1.0.0.
### Usage
Flagger requires two Kubernetes [deployments](https://kubernetes.io/docs/concepts/workloads/controllers/deployment/):
one for the version you want to upgrade called _primary_ and one for the _canary_.
Each deployment must have a corresponding ClusterIP [service](https://kubernetes.io/docs/concepts/services-networking/service/)
that exposes a port named http or https. These services are used as destinations in a Istio [virtual service](https://istio.io/docs/reference/config/istio.networking.v1alpha3/#VirtualService).
Flagger takes a Kubernetes deployment and creates a series of objects
(Kubernetes [deployments](https://kubernetes.io/docs/concepts/workloads/controllers/deployment/),
ClusterIP [services](https://kubernetes.io/docs/concepts/services-networking/service/) and
Istio [virtual services](https://istio.io/docs/reference/config/istio.networking.v1alpha3/#VirtualService))
to drive the canary analysis and promotion.
![flagger-overview](https://raw.githubusercontent.com/stefanprodan/flagger/master/docs/diagrams/flagger-overview.png)
![flagger-overview](https://raw.githubusercontent.com/stefanprodan/flagger/master/docs/diagrams/flagger-canary-overview.png)
Gated canary promotion stages:
* scan for canary deployments
* check Istio virtual service routes are mapped to primary and canary ClusterIP services
* check primary and canary deployments status
* halt rollout if a rolling update is underway
* halt rollout if pods are unhealthy
* halt advancement if a rolling update is underway
* halt advancement if pods are unhealthy
* increase canary traffic weight percentage from 0% to 5% (step weight)
* check canary HTTP request success rate and latency
* halt rollout if any metric is under the specified threshold
* halt advancement if any metric is under the specified threshold
* increment the failed checks counter
* check if the number of failed checks reached the threshold
* route all traffic to primary
* scale to zero the canary deployment and mark it as failed
* wait for the canary deployment to be updated (revision bump) and start over
* increase canary traffic weight by 5% (step weight) till it reaches 50% (max weight)
* halt rollout while canary request success rate is under the threshold
* halt rollout while canary request duration P99 is over the threshold
* halt rollout if the primary or canary deployment becomes unhealthy
* halt rollout while canary deployment is being scaled up/down by HPA
* halt advancement while canary request success rate is under the threshold
* halt advancement while canary request duration P99 is over the threshold
* halt advancement if the primary or canary deployment becomes unhealthy
* halt advancement while canary deployment is being scaled up/down by HPA
* promote canary to primary
* copy canary deployment spec template over primary
* wait for primary rolling update to finish
* halt rollout if pods are unhealthy
* halt advancement if pods are unhealthy
* route all traffic to primary
* scale to zero the canary deployment
* mark rollout as finished
@@ -70,69 +71,41 @@ Gated canary promotion stages:
You can change the canary analysis _max weight_ and the _step weight_ percentage in the Flagger's custom resource.
Assuming the primary deployment is named _podinfo_ and the canary one _podinfo-canary_, Flagger will require
a virtual service configured with weight-based routing:
For a deployment named _podinfo_, a canary promotion can be defined using Flagger's custom resource:
```yaml
apiVersion: networking.istio.io/v1alpha3
kind: VirtualService
metadata:
name: podinfo
spec:
hosts:
- podinfo
http:
- route:
- destination:
host: podinfo
port:
number: 9898
weight: 100
- destination:
host: podinfo-canary
port:
number: 9898
weight: 0
```
Primary and canary services should expose a port named http:
```yaml
apiVersion: v1
kind: Service
metadata:
name: podinfo-canary
spec:
type: ClusterIP
selector:
app: podinfo-canary
ports:
- name: http
port: 9898
targetPort: 9898
```
Based on the two deployments, services and virtual service, a canary promotion can be defined using Flagger's custom resource:
```yaml
apiVersion: flagger.app/v1beta1
apiVersion: flagger.app/v1alpha3
kind: Canary
metadata:
name: podinfo
namespace: test
spec:
targetKind: Deployment
virtualService:
# deployment reference
targetRef:
apiVersion: apps/v1
kind: Deployment
name: podinfo
primary:
# the maximum time in seconds for the canary deployment
# to make progress before it is rollback (default 600s)
progressDeadlineSeconds: 60
# HPA reference (optional)
autoscalerRef:
apiVersion: autoscaling/v2beta1
kind: HorizontalPodAutoscaler
name: podinfo
host: podinfo
canary:
name: podinfo-canary
host: podinfo-canary
service:
# container port
port: 9898
# Istio gateways (optional)
gateways:
- public-gateway.istio-system.svc.cluster.local
# Istio virtual service host names (optional)
hosts:
- podinfo.example.com
canaryAnalysis:
# max number of failed checks
# before rolling back the canary
# schedule interval (default 60s)
interval: 1m
# max number of failed metric checks before rollback
threshold: 10
# max traffic percentage routed to canary
# percentage (0-100)
@@ -140,6 +113,7 @@ spec:
# canary increment step
# percentage (0-100)
stepWeight: 5
# Istio Prometheus checks
metrics:
- name: istio_requests_total
# minimum req success rate (non 5xx responses)
@@ -150,7 +124,14 @@ spec:
# maximum req duration P99
# milliseconds
threshold: 500
interval: 1m
interval: 30s
# external checks (optional)
webhooks:
- name: load-test
url: http://flagger-loadtester.test/
timeout: 5s
metadata:
cmd: "hey -z 1m -q 10 -c 2 http://podinfo.test:9898/"
```
The canary analysis is using the following promql queries:
@@ -196,9 +177,23 @@ histogram_quantile(0.99,
)
```
### Automated canary analysis, promotions and rollbacks
The canary analysis can be extended with webhooks.
Flagger will call the webhooks (HTTP POST) and determine from the response status code (HTTP 2xx) if the canary is failing or not.
![flagger-canary](https://raw.githubusercontent.com/stefanprodan/flagger/master/docs/diagrams/flagger-canary-hpa.png)
Webhook payload:
```json
{
"name": "podinfo",
"namespace": "test",
"metadata": {
"test": "all",
"token": "16688eb5e9f289f1991c"
}
}
```
### Automated canary analysis, promotions and rollbacks
Create a test namespace with Istio sidecar injection enabled:
@@ -208,66 +203,89 @@ export REPO=https://raw.githubusercontent.com/stefanprodan/flagger/master
kubectl apply -f ${REPO}/artifacts/namespaces/test.yaml
```
Create the primary deployment, service and hpa:
Create a deployment and a horizontal pod autoscaler:
```bash
kubectl apply -f ${REPO}/artifacts/workloads/primary-deployment.yaml
kubectl apply -f ${REPO}/artifacts/workloads/primary-service.yaml
kubectl apply -f ${REPO}/artifacts/workloads/primary-hpa.yaml
kubectl apply -f ${REPO}/artifacts/canaries/deployment.yaml
kubectl apply -f ${REPO}/artifacts/canaries/hpa.yaml
```
Create the canary deployment, service and hpa:
Deploy the load testing service to generate traffic during the canary analysis:
```bash
kubectl apply -f ${REPO}/artifacts/workloads/canary-deployment.yaml
kubectl apply -f ${REPO}/artifacts/workloads/canary-service.yaml
kubectl apply -f ${REPO}/artifacts/workloads/canary-hpa.yaml
kubectl -n test apply -f ${REPO}/artifacts/loadtester/deployment.yaml
kubectl -n test apply -f ${REPO}/artifacts/loadtester/service.yaml
```
Create a virtual service (replace the Istio gateway and the internet domain with your own):
Create a canary promotion custom resource (replace the Istio gateway and the internet domain with your own):
```bash
kubectl apply -f ${REPO}/artifacts/workloads/virtual-service.yaml
kubectl apply -f ${REPO}/artifacts/canaries/canary.yaml
```
Create a canary promotion custom resource:
After a couple of seconds Flagger will create the canary objects:
```bash
kubectl apply -f ${REPO}/artifacts/rollouts/podinfo.yaml
# applied
deployment.apps/podinfo
horizontalpodautoscaler.autoscaling/podinfo
canary.flagger.app/podinfo
# generated
deployment.apps/podinfo-primary
horizontalpodautoscaler.autoscaling/podinfo-primary
service/podinfo
service/podinfo-canary
service/podinfo-primary
virtualservice.networking.istio.io/podinfo
```
Canary promotion output:
![flagger-canary-steps](https://raw.githubusercontent.com/stefanprodan/flagger/master/docs/diagrams/flagger-canary-steps.png)
Trigger a canary deployment by updating the container image:
```bash
kubectl -n test set image deployment/podinfo \
podinfod=quay.io/stefanprodan/podinfo:1.4.0
```
Flagger detects that the deployment revision changed and starts a new canary analysis:
```
kubectl -n test describe canary/podinfo
Status:
Canary Revision: 16271121
Failed Checks: 6
State: finished
Canary Weight: 0
Failed Checks: 0
Last Transition Time: 2019-01-16T13:47:16Z
Phase: Succeeded
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal Synced 3m flagger Starting canary deployment for podinfo.test
Normal Synced 3m flagger New revision detected podinfo.test
Normal Synced 3m flagger Scaling up podinfo.test
Warning Synced 3m flagger Waiting for podinfo.test rollout to finish: 0 of 1 updated replicas are available
Normal Synced 3m flagger Advance podinfo.test canary weight 5
Normal Synced 3m flagger Advance podinfo.test canary weight 10
Normal Synced 3m flagger Advance podinfo.test canary weight 15
Warning Synced 3m flagger Halt podinfo.test advancement request duration 2.525s > 500ms
Warning Synced 3m flagger Halt podinfo.test advancement request duration 1.567s > 500ms
Warning Synced 3m flagger Halt podinfo.test advancement request duration 823ms > 500ms
Normal Synced 2m flagger Advance podinfo.test canary weight 20
Normal Synced 2m flagger Advance podinfo.test canary weight 25
Normal Synced 1m flagger Advance podinfo.test canary weight 30
Warning Synced 1m flagger Halt podinfo.test advancement success rate 82.33% < 99%
Warning Synced 1m flagger Halt podinfo.test advancement success rate 87.22% < 99%
Warning Synced 1m flagger Halt podinfo.test advancement success rate 94.74% < 99%
Normal Synced 1m flagger Advance podinfo.test canary weight 35
Normal Synced 55s flagger Advance podinfo.test canary weight 40
Normal Synced 45s flagger Advance podinfo.test canary weight 45
Normal Synced 35s flagger Advance podinfo.test canary weight 50
Normal Synced 25s flagger Copying podinfo-canary.test template spec to podinfo.test
Warning Synced 15s flagger Waiting for podinfo.test rollout to finish: 1 of 2 updated replicas are available
Normal Synced 5s flagger Promotion completed! Scaling down podinfo-canary.test
Normal Synced 25s flagger Copying podinfo.test template spec to podinfo-primary.test
Warning Synced 15s flagger Waiting for podinfo-primary.test rollout to finish: 1 of 2 updated replicas are available
Normal Synced 5s flagger Promotion completed! Scaling down podinfo.test
```
You can monitor all canaries with:
```bash
watch kubectl get canaries --all-namespaces
NAMESPACE NAME STATUS WEIGHT LASTTRANSITIONTIME
test podinfo Progressing 5 2019-01-16T14:05:07Z
```
During the canary analysis you can generate HTTP 500 errors and high latency to test if Flagger pauses the rollout.
@@ -298,9 +316,10 @@ the canary is scaled to zero and the rollout is marked as failed.
kubectl -n test describe canary/podinfo
Status:
Canary Revision: 16695041
Failed Checks: 10
State: failed
Canary Weight: 0
Failed Checks: 10
Last Transition Time: 2019-01-16T13:47:16Z
Phase: Failed
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
@@ -313,45 +332,8 @@ Events:
Normal Synced 2m flagger Halt podinfo.test advancement success rate 55.06% < 99%
Normal Synced 2m flagger Halt podinfo.test advancement success rate 47.00% < 99%
Normal Synced 2m flagger (combined from similar events): Halt podinfo.test advancement success rate 38.08% < 99%
Warning Synced 1m flagger Rolling back podinfo-canary.test failed checks threshold reached 10
Warning Synced 1m flagger Canary failed! Scaling down podinfo-canary.test
```
Trigger a new canary deployment by updating the canary image:
```bash
kubectl -n test set image deployment/podinfo-canary \
podinfod=quay.io/stefanprodan/podinfo:1.2.1
```
Steer detects that the canary revision changed and starts a new rollout:
```
kubectl -n test describe canary/podinfo
Status:
Canary Revision: 19871136
Failed Checks: 0
State: finished
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal Synced 3m flagger New revision detected podinfo-canary.test old 17211012 new 17246876
Normal Synced 3m flagger Scaling up podinfo.test
Warning Synced 3m flagger Waiting for podinfo.test rollout to finish: 0 of 1 updated replicas are available
Normal Synced 3m flagger Advance podinfo.test canary weight 5
Normal Synced 3m flagger Advance podinfo.test canary weight 10
Normal Synced 3m flagger Advance podinfo.test canary weight 15
Normal Synced 2m flagger Advance podinfo.test canary weight 20
Normal Synced 2m flagger Advance podinfo.test canary weight 25
Normal Synced 1m flagger Advance podinfo.test canary weight 30
Normal Synced 1m flagger Advance podinfo.test canary weight 35
Normal Synced 55s flagger Advance podinfo.test canary weight 40
Normal Synced 45s flagger Advance podinfo.test canary weight 45
Normal Synced 35s flagger Advance podinfo.test canary weight 50
Normal Synced 25s flagger Copying podinfo-canary.test template spec to podinfo.test
Warning Synced 15s flagger Waiting for podinfo.test rollout to finish: 1 of 2 updated replicas are available
Normal Synced 5s flagger Promotion completed! Scaling down podinfo-canary.test
Warning Synced 1m flagger Rolling back podinfo.test failed checks threshold reached 10
Warning Synced 1m flagger Canary failed! Scaling down podinfo.test
```
### Monitoring
@@ -388,18 +370,73 @@ Advance podinfo.test canary weight 40
Halt podinfo.test advancement request duration 1.515s > 500ms
Advance podinfo.test canary weight 45
Advance podinfo.test canary weight 50
Copying podinfo-canary.test template spec to podinfo-primary.test
Scaling down podinfo-canary.test
Promotion completed! podinfo-canary.test revision 81289
Copying podinfo.test template spec to podinfo-primary.test
Halt podinfo-primary.test advancement waiting for rollout to finish: 1 old replicas are pending termination
Scaling down podinfo.test
Promotion completed! podinfo.test
```
Flagger exposes Prometheus metrics that can be used to determine the canary analysis status and the destination weight values:
```bash
# Canaries total gauge
flagger_canary_total{namespace="test"} 1
# Canary promotion last known status gauge
# 0 - running, 1 - successful, 2 - failed
flagger_canary_status{name="podinfo" namespace="test"} 1
# Canary traffic weight gauge
flagger_canary_weight{workload="podinfo-primary" namespace="test"} 95
flagger_canary_weight{workload="podinfo" namespace="test"} 5
# Seconds spent performing canary analysis histogram
flagger_canary_duration_seconds_bucket{name="podinfo",namespace="test",le="10"} 6
flagger_canary_duration_seconds_bucket{name="podinfo",namespace="test",le="+Inf"} 6
flagger_canary_duration_seconds_sum{name="podinfo",namespace="test"} 17.3561329
flagger_canary_duration_seconds_count{name="podinfo",namespace="test"} 6
```
### Alerting
Flagger can be configured to send Slack notifications:
```bash
helm upgrade -i flagger flagger/flagger \
--namespace=istio-system \
--set slack.url=https://hooks.slack.com/services/YOUR/SLACK/WEBHOOK \
--set slack.channel=general \
--set slack.user=flagger
```
Once configured with a Slack incoming webhook, Flagger will post messages when a canary deployment has been initialized,
when a new revision has been detected and if the canary analysis failed or succeeded.
![flagger-slack](https://raw.githubusercontent.com/stefanprodan/flagger/master/docs/screens/slack-canary-notifications.png)
A canary deployment will be rolled back if the progress deadline exceeded or if the analysis
reached the maximum number of failed checks:
![flagger-slack-errors](https://raw.githubusercontent.com/stefanprodan/flagger/master/docs/screens/slack-canary-failed.png)
Besides Slack, you can use Alertmanager to trigger alerts when a canary deployment failed:
```yaml
- alert: canary_rollback
expr: flagger_canary_status > 1
for: 1m
labels:
severity: warning
annotations:
summary: "Canary failed"
description: "Workload {{ $labels.name }} namespace {{ $labels.namespace }}"
```
### Roadmap
* Extend the canary analysis and promotion to other types than Kubernetes deployments such as Flux Helm releases or OpenFaaS functions
* Extend the validation mechanism to support other metrics than HTTP success rate and latency
* Add support for comparing the canary metrics to the primary ones and do the validation based on the derivation between the two
* Alerting: Trigger Alertmanager on successful or failed promotions (Prometheus instrumentation of the canary analysis)
* Reporting: publish canary analysis results to Slack/Jira/etc
* Extend the canary analysis and promotion to other types than Kubernetes deployments such as Flux Helm releases or OpenFaaS functions
### Contributing

View File

@@ -0,0 +1,58 @@
apiVersion: flagger.app/v1alpha3
kind: Canary
metadata:
name: podinfo
namespace: test
spec:
# deployment reference
targetRef:
apiVersion: apps/v1
kind: Deployment
name: podinfo
# the maximum time in seconds for the canary deployment
# to make progress before it is rollback (default 600s)
progressDeadlineSeconds: 60
# HPA reference (optional)
autoscalerRef:
apiVersion: autoscaling/v2beta1
kind: HorizontalPodAutoscaler
name: podinfo
service:
# container port
port: 9898
# Istio gateways (optional)
gateways:
- public-gateway.istio-system.svc.cluster.local
# Istio virtual service host names (optional)
hosts:
- app.iowa.weavedx.com
canaryAnalysis:
# schedule interval (default 60s)
interval: 10s
# max number of failed metric checks before rollback
threshold: 10
# max traffic percentage routed to canary
# percentage (0-100)
maxWeight: 50
# canary increment step
# percentage (0-100)
stepWeight: 5
# Istio Prometheus checks
metrics:
- name: istio_requests_total
# minimum req success rate (non 5xx responses)
# percentage (0-100)
threshold: 99
interval: 1m
- name: istio_request_duration_seconds_bucket
# maximum req duration P99
# milliseconds
threshold: 500
interval: 30s
# external checks (optional)
webhooks:
- name: load-test
url: http://flagger-loadtester.test/
timeout: 5s
metadata:
cmd: "hey -z 1m -q 10 -c 2 http://podinfo.test:9898/"

View File

@@ -0,0 +1,67 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: podinfo
namespace: test
labels:
app: podinfo
spec:
minReadySeconds: 5
revisionHistoryLimit: 5
progressDeadlineSeconds: 60
strategy:
rollingUpdate:
maxUnavailable: 0
type: RollingUpdate
selector:
matchLabels:
app: podinfo
template:
metadata:
annotations:
prometheus.io/scrape: "true"
labels:
app: podinfo
spec:
containers:
- name: podinfod
image: quay.io/stefanprodan/podinfo:1.3.0
imagePullPolicy: IfNotPresent
ports:
- containerPort: 9898
name: http
protocol: TCP
command:
- ./podinfo
- --port=9898
- --level=info
- --random-delay=false
- --random-error=false
env:
- name: PODINFO_UI_COLOR
value: blue
livenessProbe:
exec:
command:
- podcli
- check
- http
- localhost:9898/healthz
initialDelaySeconds: 5
timeoutSeconds: 5
readinessProbe:
exec:
command:
- podcli
- check
- http
- localhost:9898/readyz
initialDelaySeconds: 5
timeoutSeconds: 5
resources:
limits:
cpu: 2000m
memory: 512Mi
requests:
cpu: 100m
memory: 64Mi

View File

@@ -0,0 +1,19 @@
apiVersion: autoscaling/v2beta1
kind: HorizontalPodAutoscaler
metadata:
name: podinfo
namespace: test
spec:
scaleTargetRef:
apiVersion: apps/v1
kind: Deployment
name: podinfo
minReplicas: 2
maxReplicas: 4
metrics:
- type: Resource
resource:
name: cpu
# scale up if usage is above
# 99% of the requested CPU (100m)
targetAverageUtilization: 99

View File

@@ -4,47 +4,80 @@ metadata:
name: canaries.flagger.app
spec:
group: flagger.app
version: v1beta1
version: v1alpha3
versions:
- name: v1beta1
- name: v1alpha3
served: true
storage: true
- name: v1alpha2
served: true
storage: false
- name: v1alpha1
served: true
storage: false
names:
plural: canaries
singular: canary
kind: Canary
categories:
- all
scope: Namespaced
subresources:
status: {}
additionalPrinterColumns:
- name: Status
type: string
JSONPath: .status.phase
- name: Weight
type: string
JSONPath: .status.canaryWeight
- name: LastTransitionTime
type: string
JSONPath: .status.lastTransitionTime
validation:
openAPIV3Schema:
properties:
spec:
required:
- targetKind
- virtualService
- primary
- canary
- canaryAnalysis
- targetRef
- service
- canaryAnalysis
properties:
targetKind:
type: string
virtualService:
progressDeadlineSeconds:
type: number
targetRef:
type: object
required: ['apiVersion', 'kind', 'name']
properties:
apiVersion:
type: string
kind:
type: string
name:
type: string
primary:
autoscalerRef:
anyOf:
- type: string
- type: object
required: ['apiVersion', 'kind', 'name']
properties:
apiVersion:
type: string
kind:
type: string
name:
type: string
host:
type: string
canary:
service:
type: object
required: ['port']
properties:
name:
type: string
host:
type: string
port:
type: number
canaryAnalysis:
properties:
interval:
type: string
pattern: "^[0-9]+(m|s)"
threshold:
type: number
maxWeight:
@@ -56,12 +89,27 @@ spec:
properties:
items:
type: object
required: ['name', 'interval', 'threshold']
properties:
name:
type: string
interval:
type: string
pattern: "^[0-9]+(m)"
pattern: "^[0-9]+(m|s)"
threshold:
type: number
webhooks:
type: array
properties:
items:
type: object
required: ['name', 'url', 'timeout']
properties:
name:
type: string
url:
type: string
format: url
timeout:
type: string
pattern: "^[0-9]+(m|s)"

View File

@@ -22,7 +22,7 @@ spec:
serviceAccountName: flagger
containers:
- name: flagger
image: stefanprodan/flagger:0.0.1
image: quay.io/stefanprodan/flagger:0.4.1
imagePullPolicy: Always
ports:
- name: http
@@ -41,6 +41,7 @@ spec:
- --timeout=2
- --spider
- http://localhost:8080/healthz
timeoutSeconds: 5
readinessProbe:
exec:
command:
@@ -50,6 +51,7 @@ spec:
- --timeout=2
- --spider
- http://localhost:8080/healthz
timeoutSeconds: 5
resources:
limits:
memory: "512Mi"

View File

@@ -0,0 +1,60 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: flagger-loadtester
labels:
app: flagger-loadtester
spec:
selector:
matchLabels:
app: flagger-loadtester
template:
metadata:
labels:
app: flagger-loadtester
annotations:
prometheus.io/scrape: "true"
spec:
containers:
- name: loadtester
image: quay.io/stefanprodan/flagger-loadtester:0.1.0
imagePullPolicy: IfNotPresent
ports:
- name: http
containerPort: 8080
command:
- ./loadtester
- -port=8080
- -log-level=info
- -timeout=1h
- -log-cmd-output=true
livenessProbe:
exec:
command:
- wget
- --quiet
- --tries=1
- --timeout=4
- --spider
- http://localhost:8080/healthz
timeoutSeconds: 5
readinessProbe:
exec:
command:
- wget
- --quiet
- --tries=1
- --timeout=4
- --spider
- http://localhost:8080/healthz
timeoutSeconds: 5
resources:
limits:
memory: "512Mi"
cpu: "1000m"
requests:
memory: "32Mi"
cpu: "10m"
securityContext:
readOnlyRootFilesystem: true
runAsUser: 10001

View File

@@ -0,0 +1,15 @@
apiVersion: v1
kind: Service
metadata:
name: flagger-loadtester
labels:
app: flagger-loadtester
spec:
type: ClusterIP
selector:
app: flagger-loadtester
ports:
- name: http
port: 80
protocol: TCP
targetPort: http

View File

@@ -1,42 +0,0 @@
# monitor events: watch "kubectl -n test describe rollout/podinfo | sed -n 35,1000p"
# run tester: kubectl run -n test tester --image=quay.io/stefanprodan/podinfo:1.2.1 -- ./podinfo --port=9898
# generate latency: watch curl http://podinfo-canary:9898/delay/1
# generate errors: watch curl http://podinfo-canary:9898/status/500
# run load test: kubectl run -n test -it --rm --restart=Never hey --image=stefanprodan/loadtest -- sh
# generate load: hey -z 2m -h2 -m POST -d '{test: 1}' -c 10 -q 5 http://podinfo:9898/api/echo
apiVersion: flagger.app/v1beta1
kind: Canary
metadata:
name: podinfo
namespace: test
spec:
targetKind: Deployment
virtualService:
name: podinfo
primary:
name: podinfo
host: podinfo
canary:
name: podinfo-canary
host: podinfo-canary
canaryAnalysis:
# max number of failed metric checks
# before rolling back the canary
threshold: 5
# max traffic percentage routed to canary
# percentage (0-100)
maxWeight: 50
# canary increment step
# percentage (0-100)
stepWeight: 10
metrics:
- name: istio_requests_total
# minimum req success rate (non 5xx responses)
# percentage (0-100)
threshold: 99
interval: 1m
- name: istio_request_duration_seconds_bucket
# maximum req duration P99
# milliseconds
threshold: 500
interval: 30s

View File

@@ -1,36 +0,0 @@
apiVersion: flagger.app/v1beta1
kind: Canary
metadata:
name: podinfoc
namespace: test
spec:
targetKind: Deployment
virtualService:
name: podinfoc
primary:
name: podinfoc-primary
host: podinfoc-primary
canary:
name: podinfoc-canary
host: podinfoc-canary
canaryAnalysis:
# max number of failed metric checks
# before rolling back the canary
threshold: 10
# max traffic percentage routed to canary
# percentage (0-100)
maxWeight: 50
# canary increment step
# percentage (0-100)
stepWeight: 10
metrics:
- name: istio_requests_total
# minimum req success rate (non 5xx responses)
# percentage (0-100)
threshold: 99
interval: 1m
- name: istio_request_duration_seconds_bucket
# maximum req duration P99
# milliseconds
threshold: 500
interval: 30s

View File

@@ -0,0 +1,34 @@
apiVersion: networking.istio.io/v1alpha3
kind: VirtualService
metadata:
name: podinfo
namespace: test
spec:
gateways:
- public-gateway.istio-system.svc.cluster.local
- mesh
hosts:
- podinfo.iowa.weavedx.com
- podinfo
http:
- match:
- headers:
user-agent:
regex: ^(?!.*Chrome)(?=.*\bSafari\b).*$
route:
- destination:
host: podinfo-primary
port:
number: 9898
weight: 0
- destination:
host: podinfo
port:
number: 9898
weight: 100
- route:
- destination:
host: podinfo-primary
port:
number: 9898
weight: 100

View File

@@ -0,0 +1,25 @@
apiVersion: networking.istio.io/v1alpha3
kind: VirtualService
metadata:
name: podinfo
namespace: test
labels:
app: podinfo
spec:
gateways:
- public-gateway.istio-system.svc.cluster.local
- mesh
hosts:
- podinfo.iowa.weavedx.com
- podinfo
http:
- route:
- destination:
host: podinfo-primary
port:
number: 9898
weight: 100
mirror:
host: podinfo
port:
number: 9898

View File

@@ -0,0 +1,26 @@
apiVersion: networking.istio.io/v1alpha3
kind: VirtualService
metadata:
name: podinfo
namespace: test
labels:
app: podinfo
spec:
gateways:
- public-gateway.istio-system.svc.cluster.local
- mesh
hosts:
- podinfo.iowa.weavedx.com
- podinfo
http:
- route:
- destination:
host: podinfo-primary
port:
number: 9898
weight: 100
- destination:
host: podinfo
port:
number: 9898
weight: 0

View File

@@ -1,10 +1,10 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: podinfo-canary
name: podinfo
namespace: test
labels:
app: podinfo-canary
app: podinfo
spec:
replicas: 1
strategy:
@@ -13,13 +13,13 @@ spec:
type: RollingUpdate
selector:
matchLabels:
app: podinfo-canary
app: podinfo
template:
metadata:
annotations:
prometheus.io/scrape: "true"
labels:
app: podinfo-canary
app: podinfo
spec:
containers:
- name: podinfod

View File

@@ -1,13 +1,13 @@
apiVersion: autoscaling/v2beta1
kind: HorizontalPodAutoscaler
metadata:
name: podinfo-canary
name: podinfo
namespace: test
spec:
scaleTargetRef:
apiVersion: apps/v1
kind: Deployment
name: podinfo-canary
name: podinfo
minReplicas: 2
maxReplicas: 3
metrics:

View File

@@ -1,14 +1,14 @@
apiVersion: v1
kind: Service
metadata:
name: podinfo-canary
name: podinfo
namespace: test
labels:
app: podinfo-canary
app: podinfo
spec:
type: ClusterIP
selector:
app: podinfo-canary
app: podinfo
ports:
- name: http
port: 9898

View File

@@ -1,10 +1,10 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: podinfo
name: podinfo-primary
namespace: test
labels:
app: podinfo
app: podinfo-primary
spec:
replicas: 1
strategy:
@@ -13,13 +13,13 @@ spec:
type: RollingUpdate
selector:
matchLabels:
app: podinfo
app: podinfo-primary
template:
metadata:
annotations:
prometheus.io/scrape: "true"
labels:
app: podinfo
app: podinfo-primary
spec:
containers:
- name: podinfod

View File

@@ -1,13 +1,13 @@
apiVersion: autoscaling/v2beta1
kind: HorizontalPodAutoscaler
metadata:
name: podinfo
name: podinfo-primary
namespace: test
spec:
scaleTargetRef:
apiVersion: apps/v1
kind: Deployment
name: podinfo
name: podinfo-primary
minReplicas: 2
maxReplicas: 4
metrics:

View File

@@ -1,14 +1,14 @@
apiVersion: v1
kind: Service
metadata:
name: podinfo
name: podinfo-primary
namespace: test
labels:
app: podinfo
app: podinfo-primary
spec:
type: ClusterIP
selector:
app: podinfo
app: podinfo-primary
ports:
- name: http
port: 9898

View File

@@ -8,18 +8,19 @@ metadata:
spec:
gateways:
- public-gateway.istio-system.svc.cluster.local
- mesh
hosts:
- app.istio.weavedx.com
- podinfo.istio.weavedx.com
- podinfo
http:
- route:
- destination:
host: podinfo
host: podinfo-primary
port:
number: 9898
weight: 100
- destination:
host: podinfo-canary
host: podinfo
port:
number: 9898
weight: 0

View File

@@ -1,6 +1,19 @@
apiVersion: v1
name: flagger
version: 0.0.1
appVersion: 0.0.1
version: 0.4.1
appVersion: 0.4.1
kubeVersion: ">=1.11.0-0"
engine: gotpl
description: Flagger is a Kubernetes operator that automates the promotion of canary deployments using Istio routing for traffic shifting and Prometheus metrics for canary analysis.
home: https://github.com/stefanprodan/flagger
home: https://docs.flagger.app
icon: https://raw.githubusercontent.com/stefanprodan/flagger/master/docs/logo/flagger-icon.png
sources:
- https://github.com/stefanprodan/flagger
maintainers:
- name: stefanprodan
url: https://github.com/stefanprodan
email: stefanprodan@users.noreply.github.com
keywords:
- canary
- istio
- gitops

View File

@@ -1,14 +1,29 @@
# Flagger
Flagger is a Kubernetes operator that automates the promotion of canary deployments
using Istio routing for traffic shifting and Prometheus metrics for canary analysis.
[Flagger](https://github.com/stefanprodan/flagger) is a Kubernetes operator that automates the promotion of
canary deployments using Istio routing for traffic shifting and Prometheus metrics for canary analysis.
Flagger implements a control loop that gradually shifts traffic to the canary while measuring key performance indicators
like HTTP requests success rate, requests average duration and pods health.
Based on the KPIs analysis a canary is promoted or aborted and the analysis result is published to Slack.
## Prerequisites
* Kubernetes >= 1.11
* Istio >= 1.0
* Prometheus >= 2.6
## Installing the Chart
Add Flagger Helm repository:
```console
helm repo add flagger https://flagger.app
```
To install the chart with the release name `flagger`:
```console
$ helm upgrade --install flagger ./charts/flagger --namespace=istio-system
$ helm install --name flagger --namespace istio-system flagger/flagger
```
The command deploys Flagger on the Kubernetes cluster in the istio-system namespace.
@@ -30,9 +45,15 @@ The following tables lists the configurable parameters of the Flagger chart and
Parameter | Description | Default
--- | --- | ---
`image.repository` | image repository | `stefanprodan/flagger`
`image.repository` | image repository | `quay.io/stefanprodan/flagger`
`image.tag` | image tag | `<VERSION>`
`image.pullPolicy` | image pull policy | `IfNotPresent`
`metricsServer` | Prometheus URL | `http://prometheus.istio-system:9090`
`slack.url` | Slack incoming webhook | None
`slack.channel` | Slack channel | None
`slack.user` | Slack username | `flagger`
`rbac.create` | if `true`, create and use RBAC resources | `true`
`crd.create` | if `true`, create Flagger's CRDs | `true`
`resources.requests/cpu` | pod CPU request | `10m`
`resources.requests/memory` | pod memory request | `32Mi`
`resources.limits/cpu` | pod CPU limit | `1000m`
@@ -44,19 +65,20 @@ Parameter | Description | Default
Specify each parameter using the `--set key=value[,key=value]` argument to `helm upgrade`. For example,
```console
$ helm upgrade --install flagger ./charts/flagger \
--namespace=istio-system \
--set=image.tag=0.0.2
$ helm upgrade -i flagger flagger/flagger \
--namespace istio-system \
--set slack.url=https://hooks.slack.com/services/YOUR/SLACK/WEBHOOK \
--set slack.channel=general
```
Alternatively, a YAML file that specifies the values for the above parameters can be provided while installing the chart. For example,
```console
$ helm upgrade --install flagger ./charts/flagger \
--namespace=istio-system \
$ helm upgrade -i flagger flagger/flagger \
--namespace istio-system \
-f values.yaml
```
> **Tip**: You can use the default [values.yaml](values.yaml)
```

View File

@@ -1,4 +1,10 @@
{{/* vim: set filetype=mustache: */}}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "flagger.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Expand the name of the chart.
*/}}
@@ -25,8 +31,12 @@ If release name contains chart name it will be used as a full name.
{{- end -}}
{{/*
Create chart name and version as used by the chart label.
Create the name of the service account to use
*/}}
{{- define "flagger.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
{{- define "flagger.serviceAccountName" -}}
{{- if .Values.serviceAccount.create -}}
{{ default (include "flagger.fullname" .) .Values.serviceAccount.name }}
{{- else -}}
{{ default "default" .Values.serviceAccount.name }}
{{- end -}}
{{- end -}}

View File

@@ -1,9 +1,11 @@
{{- if .Values.serviceAccount.create }}
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ template "flagger.name" . }}
name: {{ template "flagger.serviceAccountName" . }}
labels:
app: {{ template "flagger.name" . }}
chart: {{ template "flagger.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
helm.sh/chart: {{ template "flagger.chart" . }}
app.kubernetes.io/name: {{ template "flagger.name" . }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- end }}

View File

@@ -5,47 +5,80 @@ metadata:
name: canaries.flagger.app
spec:
group: flagger.app
version: v1beta1
version: v1alpha3
versions:
- name: v1beta1
- name: v1alpha3
served: true
storage: true
- name: v1alpha2
served: true
storage: false
- name: v1alpha1
served: true
storage: false
names:
plural: canaries
singular: canary
kind: Canary
categories:
- all
scope: Namespaced
subresources:
status: {}
additionalPrinterColumns:
- name: Status
type: string
JSONPath: .status.phase
- name: Weight
type: string
JSONPath: .status.canaryWeight
- name: LastTransitionTime
type: string
JSONPath: .status.lastTransitionTime
validation:
openAPIV3Schema:
properties:
spec:
required:
- targetKind
- virtualService
- primary
- canary
- targetRef
- service
- canaryAnalysis
properties:
targetKind:
type: string
virtualService:
progressDeadlineSeconds:
type: number
targetRef:
type: object
required: ['apiVersion', 'kind', 'name']
properties:
apiVersion:
type: string
kind:
type: string
name:
type: string
primary:
autoscalerRef:
anyOf:
- type: string
- type: object
required: ['apiVersion', 'kind', 'name']
properties:
apiVersion:
type: string
kind:
type: string
name:
type: string
host:
type: string
canary:
service:
type: object
required: ['port']
properties:
name:
type: string
host:
type: string
port:
type: number
canaryAnalysis:
properties:
interval:
type: string
pattern: "^[0-9]+(m|s)"
threshold:
type: number
maxWeight:
@@ -57,12 +90,28 @@ spec:
properties:
items:
type: object
required: ['name', 'interval', 'threshold']
properties:
name:
type: string
interval:
type: string
pattern: "^[0-9]+(m)"
pattern: "^[0-9]+(m|s)"
threshold:
type: number
webhooks:
type: array
properties:
items:
type: object
required: ['name', 'url', 'timeout']
properties:
name:
type: string
url:
type: string
format: url
timeout:
type: string
pattern: "^[0-9]+(m|s)"
{{- end }}

View File

@@ -3,25 +3,25 @@ kind: Deployment
metadata:
name: {{ include "flagger.fullname" . }}
labels:
app: {{ include "flagger.name" . }}
chart: {{ include "flagger.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
helm.sh/chart: {{ template "flagger.chart" . }}
app.kubernetes.io/name: {{ template "flagger.name" . }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/instance: {{ .Release.Name }}
spec:
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app: {{ include "flagger.name" . }}
release: {{ .Release.Name }}
app.kubernetes.io/name: {{ template "flagger.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
template:
metadata:
labels:
app: {{ include "flagger.name" . }}
release: {{ .Release.Name }}
app.kubernetes.io/name: {{ template "flagger.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
spec:
serviceAccountName: flagger
serviceAccountName: {{ template "flagger.serviceAccountName" . }}
containers:
- name: flagger
securityContext:
@@ -35,26 +35,32 @@ spec:
command:
- ./flagger
- -log-level=info
- -control-loop-interval={{ .Values.controlLoopInterval }}
- -metrics-server={{ .Values.metricsServer }}
{{- if .Values.slack.url }}
- -slack-url={{ .Values.slack.url }}
- -slack-user={{ .Values.slack.user }}
- -slack-channel={{ .Values.slack.channel }}
{{- end }}
livenessProbe:
exec:
command:
- wget
- --quiet
- --tries=1
- --timeout=2
- --timeout=4
- --spider
- http://localhost:8080/healthz
timeoutSeconds: 5
readinessProbe:
exec:
command:
- wget
- --quiet
- --tries=1
- --timeout=2
- --timeout=4
- --spider
- http://localhost:8080/healthz
timeoutSeconds: 5
resources:
{{ toYaml .Values.resources | indent 12 }}
{{- with .Values.nodeSelector }}

View File

@@ -4,10 +4,10 @@ kind: ClusterRole
metadata:
name: {{ template "flagger.fullname" . }}
labels:
app: {{ template "flagger.name" . }}
chart: {{ template "flagger.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
helm.sh/chart: {{ template "flagger.chart" . }}
app.kubernetes.io/name: {{ template "flagger.name" . }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/instance: {{ .Release.Name }}
rules:
- apiGroups: ['*']
resources: ['*']
@@ -20,16 +20,16 @@ kind: ClusterRoleBinding
metadata:
name: {{ template "flagger.fullname" . }}
labels:
app: {{ template "flagger.name" . }}
chart: {{ template "flagger.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
helm.sh/chart: {{ template "flagger.chart" . }}
app.kubernetes.io/name: {{ template "flagger.name" . }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/instance: {{ .Release.Name }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: {{ template "flagger.fullname" . }}
subjects:
- name: {{ template "flagger.name" . }}
namespace: {{ .Release.Namespace | quote }}
- name: {{ template "flagger.serviceAccountName" . }}
namespace: {{ .Release.Namespace }}
kind: ServiceAccount
{{- end }}

View File

@@ -1,17 +1,30 @@
# Default values for flagger.
image:
repository: stefanprodan/flagger
tag: 0.0.1
repository: quay.io/stefanprodan/flagger
tag: 0.4.1
pullPolicy: IfNotPresent
controlLoopInterval: "10s"
metricsServer: "http://prometheus.istio-system.svc.cluster.local:9090"
crd:
slack:
user: flagger
channel:
# incoming webhook https://api.slack.com/incoming-webhooks
url:
serviceAccount:
# serviceAccount.create: Whether to create a service account or not
create: true
# serviceAccount.name: The name of the service account to create or use
name: ""
rbac:
# rbac.create: `true` if rbac resources should be created
create: true
crd:
# crd.create: `true` if custom resource definitions should be created
create: true
nameOverride: ""

View File

@@ -1,6 +1,13 @@
apiVersion: v1
name: grafana
version: 5.2.4
appVersion: 5.2.0
description: A Helm chart for monitoring progressive deployments powered by Istio and Flagger
home: https://github.com/stefanprodan/flagger
version: 0.1.0
appVersion: 5.4.2
description: Grafana dashboards for monitoring Flagger canary deployments
icon: https://raw.githubusercontent.com/stefanprodan/flagger/master/docs/logo/flagger-icon.png
home: https://flagger.app
sources:
- https://github.com/stefanprodan/flagger
maintainers:
- name: stefanprodan
url: https://github.com/stefanprodan
email: stefanprodan@users.noreply.github.com

View File

@@ -1,16 +1,31 @@
# Weave Cloud Grafana
# Flagger Grafana
Grafana v5 with Kubernetes dashboards and Prometheus and Weave Cloud data sources.
Grafana dashboards for monitoring progressive deployments powered by Istio, Prometheus and Flagger.
![flagger-grafana](https://raw.githubusercontent.com/stefanprodan/flagger/master/docs/screens/grafana-canary-analysis.png)
## Prerequisites
* Kubernetes >= 1.11
* Istio >= 1.0
* Prometheus >= 2.6
## Installing the Chart
To install the chart with the release name `my-release`:
Add Flagger Helm repository:
```console
$ helm install stable/grafana --name my-release \
--set service.type=NodePort \
--set token=WEAVE-TOKEN \
--set password=admin
helm repo add flagger https://flagger.app
```
To install the chart with the release name `flagger-grafana`:
```console
helm upgrade -i flagger-grafana flagger/grafana \
--namespace=istio-system \
--set url=http://prometheus:9090 \
--set user=admin \
--set password=admin
```
The command deploys Grafana on the Kubernetes cluster in the default namespace.
@@ -18,10 +33,10 @@ The [configuration](#configuration) section lists the parameters that can be con
## Uninstalling the Chart
To uninstall/delete the `my-release` deployment:
To uninstall/delete the `flagger-grafana` deployment:
```console
$ helm delete --purge my-release
helm delete --purge flagger-grafana
```
The command removes all the Kubernetes components associated with the chart and deletes the release.
@@ -34,32 +49,31 @@ Parameter | Description | Default
--- | --- | ---
`image.repository` | Image repository | `grafana/grafana`
`image.pullPolicy` | Image pull policy | `IfNotPresent`
`image.tag` | Image tag | `5.0.1`
`image.tag` | Image tag | `<VERSION>`
`replicaCount` | desired number of pods | `1`
`resources` | pod resources | `none`
`tolerations` | List of node taints to tolerate | `[]`
`affinity` | node/pod affinities | `node`
`nodeSelector` | node labels for pod assignment | `{}`
`service.type` | type of service | `LoadBalancer`
`url` | Prometheus URL, used when Weave token is empty | `http://prometheus:9090`
`service.type` | type of service | `ClusterIP`
`url` | Prometheus URL, used when Weave Cloud token is empty | `http://prometheus:9090`
`token` | Weave Cloud token | `none`
`user` | Grafana admin username | `admin`
`password` | Grafana admin password | `none`
`password` | Grafana admin password | `admin`
Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example,
```console
$ helm install stable/grafana --name my-release \
--set=token=WEAVE-TOKEN \
--set password=admin
helm install flagger/grafana --name flagger-grafana \
--set token=WEAVE-CLOUD-TOKEN
```
Alternatively, a YAML file that specifies the values for the above parameters can be provided while installing the chart. For example,
```console
$ helm install stable/grafana --name my-release -f values.yaml
helm install flagger/grafana --name flagger-grafana -f values.yaml
```
> **Tip**: You can use the default [values.yaml](values.yaml)
```

View File

@@ -21,7 +21,7 @@
"links": [],
"panels": [
{
"content": "<div class=\"dashboard-header text-center\">\n<span>RED: $primary.$namespace</span>\n</div>",
"content": "<div class=\"dashboard-header text-center\">\n<span>RED: $canary.$namespace</span>\n</div>",
"gridPos": {
"h": 3,
"w": 24,
@@ -568,7 +568,7 @@
]
},
{
"content": "<div class=\"dashboard-header text-center\">\n<span>USE: $primary.$namespace</span>\n</div>",
"content": "<div class=\"dashboard-header text-center\">\n<span>USE: $canary.$namespace</span>\n</div>",
"gridPos": {
"h": 3,
"w": 24,
@@ -712,7 +712,7 @@
"targets": [
{
"$$hashKey": "object:1685",
"expr": "sum(rate(container_cpu_usage_seconds_total{cpu=\"total\",namespace=\"$namespace\",pod_name=~\"$canary.*\", container_name!~\"POD|istio-proxy\"}[1m])) by (pod_name)",
"expr": "sum(rate(container_cpu_usage_seconds_total{cpu=\"total\",namespace=\"$namespace\",pod_name=~\"$canary.*\", pod_name!~\"$primary.*\", container_name!~\"POD|istio-proxy\"}[1m])) by (pod_name)",
"format": "time_series",
"hide": false,
"intervalFactor": 1,
@@ -890,7 +890,7 @@
"targets": [
{
"$$hashKey": "object:1685",
"expr": "sum(container_memory_working_set_bytes{namespace=\"$namespace\",pod_name=~\"$canary.*\", container_name!~\"POD|istio-proxy\"}) by (pod_name)",
"expr": "sum(container_memory_working_set_bytes{namespace=\"$namespace\",pod_name=~\"$canary.*\", pod_name!~\"$primary.*\", container_name!~\"POD|istio-proxy\"}) by (pod_name)",
"format": "time_series",
"hide": false,
"interval": "",
@@ -1098,7 +1098,7 @@
"targets": [
{
"$$hashKey": "object:2598",
"expr": "sum(rate (container_network_receive_bytes_total{namespace=\"$namespace\",pod_name=~\"$canary.*\"}[1m])) ",
"expr": "sum(rate (container_network_receive_bytes_total{namespace=\"$namespace\",pod_name=~\"$canary.*\",pod_name!~\"$primary.*\"}[1m])) ",
"format": "time_series",
"intervalFactor": 1,
"legendFormat": "received",
@@ -1106,7 +1106,7 @@
},
{
"$$hashKey": "object:3245",
"expr": "-sum (rate (container_network_transmit_bytes_total{namespace=\"$namespace\",pod_name=~\"$canary.*\"}[1m]))",
"expr": "-sum (rate (container_network_transmit_bytes_total{namespace=\"$namespace\",pod_name=~\"$canary.*\",pod_name!~\"$primary.*\"}[1m]))",
"format": "time_series",
"intervalFactor": 1,
"legendFormat": "transmited",
@@ -1153,7 +1153,7 @@
]
},
{
"content": "<div class=\"dashboard-header text-center\">\n<span>IN/OUTBOUND: $primary.$namespace</span>\n</div>",
"content": "<div class=\"dashboard-header text-center\">\n<span>IN/OUTBOUND: $canary.$namespace</span>\n</div>",
"gridPos": {
"h": 3,
"w": 24,
@@ -1653,7 +1653,7 @@
]
},
"timezone": "",
"title": "Flagger",
"title": "Canary analysis",
"uid": "RdykD7tiz",
"version": 2
}

View File

@@ -6,7 +6,7 @@ replicaCount: 1
image:
repository: grafana/grafana
tag: 5.2.4
tag: 5.4.2
pullPolicy: IfNotPresent
service:

View File

@@ -19,3 +19,4 @@
.project
.idea/
*.tmproj
.vscode/

View File

@@ -0,0 +1,20 @@
apiVersion: v1
name: loadtester
version: 0.1.0
appVersion: 0.1.0
kubeVersion: ">=1.11.0-0"
engine: gotpl
description: Flagger's load testing services based on rakyll/hey that generates traffic during canary analysis when configured as a webhook.
home: https://docs.flagger.app
icon: https://raw.githubusercontent.com/stefanprodan/flagger/master/docs/logo/flagger-icon.png
sources:
- https://github.com/stefanprodan/flagger
maintainers:
- name: stefanprodan
url: https://github.com/stefanprodan
email: stefanprodan@users.noreply.github.com
keywords:
- canary
- istio
- gitops
- load testing

View File

@@ -0,0 +1,78 @@
# Flagger load testing service
[Flagger's](https://github.com/stefanprodan/flagger) load testing service is based on
[rakyll/hey](https://github.com/rakyll/hey)
and can be used to generates traffic during canary analysis when configured as a webhook.
## Prerequisites
* Kubernetes >= 1.11
* Istio >= 1.0
## Installing the Chart
Add Flagger Helm repository:
```console
helm repo add flagger https://flagger.app
```
To install the chart with the release name `flagger-loadtester`:
```console
helm upgrade -i flagger-loadtester flagger/loadtester
```
The command deploys Grafana on the Kubernetes cluster in the default namespace.
> **Tip**: Note that the namespace where you deploy the load tester should have the Istio sidecar injection enabled
The [configuration](#configuration) section lists the parameters that can be configured during installation.
## Uninstalling the Chart
To uninstall/delete the `flagger-loadtester` deployment:
```console
helm delete --purge flagger-loadtester
```
The command removes all the Kubernetes components associated with the chart and deletes the release.
## Configuration
The following tables lists the configurable parameters of the load tester chart and their default values.
Parameter | Description | Default
--- | --- | ---
`image.repository` | Image repository | `quay.io/stefanprodan/flagger-loadtester`
`image.pullPolicy` | Image pull policy | `IfNotPresent`
`image.tag` | Image tag | `<VERSION>`
`replicaCount` | desired number of pods | `1`
`resources.requests.cpu` | CPU requests | `10m`
`resources.requests.memory` | memory requests | `64Mi`
`tolerations` | List of node taints to tolerate | `[]`
`affinity` | node/pod affinities | `node`
`nodeSelector` | node labels for pod assignment | `{}`
`service.type` | type of service | `ClusterIP`
`service.port` | ClusterIP port | `80`
`cmd.logOutput` | Log the command output to stderr | `true`
`cmd.timeout` | Command execution timeout | `1h`
`logLevel` | Log level can be debug, info, warning, error or panic | `info`
Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example,
```console
helm install flagger/loadtester --name flagger-loadtester \
--set cmd.logOutput=false
```
Alternatively, a YAML file that specifies the values for the above parameters can be provided while installing the chart. For example,
```console
helm install flagger/loadtester --name flagger-loadtester -f values.yaml
```
> **Tip**: You can use the default [values.yaml](values.yaml)

View File

@@ -0,0 +1 @@
Flagger's load testing service is available at http://{{ include "loadtester.fullname" . }}.{{ .Release.Namespace }}/

View File

@@ -2,35 +2,31 @@
{{/*
Expand the name of the chart.
*/}}
{{- define "podinfo-flagger.name" -}}
{{- define "loadtester.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
The release name is used as a full name.
If release name contains chart name it will be used as a full name.
*/}}
{{- define "podinfo-flagger.fullname" -}}
{{- define "loadtester.fullname" -}}
{{- if .Values.fullnameOverride -}}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- $name := default .Chart.Name .Values.nameOverride -}}
{{- if contains $name .Release.Name -}}
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- end -}}
{{- define "podinfo-flagger.primary" -}}
{{- printf "%s-%s" .Release.Name "primary" | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- define "podinfo-flagger.canary" -}}
{{- printf "%s-%s" .Release.Name "canary" | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "podinfo-flagger.chart" -}}
{{- define "loadtester.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
{{- end -}}

View File

@@ -0,0 +1,66 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ include "loadtester.fullname" . }}
labels:
app.kubernetes.io/name: {{ include "loadtester.name" . }}
helm.sh/chart: {{ include "loadtester.chart" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
spec:
replicas: {{ .Values.replicaCount }}
selector:
matchLabels:
app: {{ include "loadtester.name" . }}
template:
metadata:
labels:
app: {{ include "loadtester.name" . }}
spec:
containers:
- name: {{ .Chart.Name }}
image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
imagePullPolicy: {{ .Values.image.pullPolicy }}
ports:
- name: http
containerPort: 8080
command:
- ./loadtester
- -port=8080
- -log-level={{ .Values.logLevel }}
- -timeout={{ .Values.cmd.timeout }}
- -log-cmd-output={{ .Values.cmd.logOutput }}
livenessProbe:
exec:
command:
- wget
- --quiet
- --tries=1
- --timeout=4
- --spider
- http://localhost:8080/healthz
timeoutSeconds: 5
readinessProbe:
exec:
command:
- wget
- --quiet
- --tries=1
- --timeout=4
- --spider
- http://localhost:8080/healthz
timeoutSeconds: 5
resources:
{{- toYaml .Values.resources | nindent 12 }}
{{- with .Values.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.affinity }}
affinity:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.tolerations }}
tolerations:
{{- toYaml . | nindent 8 }}
{{- end }}

View File

@@ -0,0 +1,18 @@
apiVersion: v1
kind: Service
metadata:
name: {{ include "loadtester.fullname" . }}
labels:
app.kubernetes.io/name: {{ include "loadtester.name" . }}
helm.sh/chart: {{ include "loadtester.chart" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
spec:
type: {{ .Values.service.type }}
ports:
- port: {{ .Values.service.port }}
targetPort: http
protocol: TCP
name: http
selector:
app: {{ include "loadtester.name" . }}

View File

@@ -0,0 +1,29 @@
replicaCount: 1
image:
repository: quay.io/stefanprodan/flagger-loadtester
tag: 0.1.0
pullPolicy: IfNotPresent
logLevel: info
cmd:
logOutput: true
timeout: 1h
nameOverride: ""
fullnameOverride: ""
service:
type: ClusterIP
port: 80
resources:
requests:
cpu: 10m
memory: 64Mi
nodeSelector: {}
tolerations: []
affinity: {}

View File

@@ -1,12 +0,0 @@
apiVersion: v1
version: 2.0.0
appVersion: 1.2.1
engine: gotpl
name: podinfo-flagger
description: Podinfo Helm chart for Flagger progressive delivery
home: https://github.com/stefanprodan/k8s-podinfo
maintainers:
- email: stefanprodan@users.noreply.github.com
name: stefanprodan
sources:
- https://github.com/stefanprodan/flagger

View File

@@ -1,56 +0,0 @@
# Podinfo Istio
Podinfo is a tiny web application made with Go
that showcases best practices of running microservices in Kubernetes.
## Installing the Chart
Create an Istio enabled namespace:
```console
kubectl create namespace test
kubectl label namespace test istio-injection=enabled
```
Create an Istio Gateway in the `istio-system` namespace named `public-gateway`:
```yaml
apiVersion: networking.istio.io/v1alpha3
kind: Gateway
metadata:
name: public-gateway
namespace: istio-system
spec:
selector:
istio: ingressgateway
servers:
- port:
number: 80
name: http
protocol: HTTP
hosts:
- "*"
tls:
httpsRedirect: true
- port:
number: 443
name: https
protocol: HTTPS
hosts:
- "*"
tls:
mode: SIMPLE
privateKey: /etc/istio/ingressgateway-certs/tls.key
serverCertificate: /etc/istio/ingressgateway-certs/tls.crt
```
Create the `frontend` release by specifying the external domain name:
```console
helm upgrade frontend -i ./charts/podinfo-flagger \
--namespace=test \
--set gateway.enabled=true \
--set gateway.host=podinfo.example.com
```

View File

@@ -1 +0,0 @@
{{ template "podinfo-flagger.fullname" . }} has been deployed successfully!

View File

@@ -1,63 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ template "podinfo-flagger.canary" . }}
labels:
app: {{ template "podinfo-flagger.fullname" . }}
chart: {{ template "podinfo-flagger.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
spec:
strategy:
type: RollingUpdate
rollingUpdate:
maxUnavailable: 0
selector:
matchLabels:
app: {{ template "podinfo-flagger.canary" . }}
template:
metadata:
labels:
app: {{ template "podinfo-flagger.canary" . }}
annotations:
prometheus.io/scrape: "true"
spec:
terminationGracePeriodSeconds: 30
containers:
- name: podinfod
image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
imagePullPolicy: {{ .Values.imagePullPolicy }}
command:
- ./podinfo
- --port={{ .Values.containerPort }}
- --level={{ .Values.logLevel }}
env:
- name: PODINFO_UI_COLOR
value: green
ports:
- name: http
containerPort: {{ .Values.containerPort }}
protocol: TCP
livenessProbe:
exec:
command:
- podcli
- check
- http
- localhost:{{ .Values.containerPort }}/healthz
readinessProbe:
exec:
command:
- podcli
- check
- http
- localhost:{{ .Values.containerPort }}/readyz
periodSeconds: 3
volumeMounts:
- name: data
mountPath: /data
resources:
{{ toYaml .Values.resources | indent 12 }}
volumes:
- name: data
emptyDir: {}

View File

@@ -1,21 +0,0 @@
apiVersion: autoscaling/v2beta1
kind: HorizontalPodAutoscaler
metadata:
name: {{ template "podinfo-flagger.canary" . }}
labels:
app: {{ template "podinfo-flagger.fullname" . }}
chart: {{ template "podinfo-flagger.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
spec:
scaleTargetRef:
apiVersion: apps/v1
kind: Deployment
name: {{ template "podinfo-flagger.canary" . }}
minReplicas: {{ .Values.hpa.minReplicas }}
maxReplicas: {{ .Values.hpa.maxReplicas }}
metrics:
- type: Resource
resource:
name: cpu
targetAverageUtilization: {{ .Values.hpa.targetAverageUtilization }}

View File

@@ -1,18 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: {{ template "podinfo-flagger.canary" . }}
labels:
app: {{ template "podinfo-flagger.fullname" . }}
chart: {{ template "podinfo-flagger.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
spec:
type: ClusterIP
ports:
- port: {{ .Values.containerPort }}
targetPort: http
protocol: TCP
name: http
selector:
app: {{ template "podinfo-flagger.canary" . }}

View File

@@ -1,63 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ template "podinfo-flagger.primary" . }}
labels:
app: {{ template "podinfo-flagger.fullname" . }}
chart: {{ template "podinfo-flagger.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
spec:
strategy:
type: RollingUpdate
rollingUpdate:
maxUnavailable: 0
selector:
matchLabels:
app: {{ template "podinfo-flagger.primary" . }}
template:
metadata:
labels:
app: {{ template "podinfo-flagger.primary" . }}
annotations:
prometheus.io/scrape: "true"
spec:
terminationGracePeriodSeconds: 30
containers:
- name: podinfod
image: "quay.io/stefanprodan/podinfo:1.1.1"
imagePullPolicy: {{ .Values.imagePullPolicy }}
command:
- ./podinfo
- --port={{ .Values.containerPort }}
- --level={{ .Values.logLevel }}
env:
- name: PODINFO_UI_COLOR
value: blue
ports:
- name: http
containerPort: {{ .Values.containerPort }}
protocol: TCP
livenessProbe:
exec:
command:
- podcli
- check
- http
- localhost:{{ .Values.containerPort }}/healthz
readinessProbe:
exec:
command:
- podcli
- check
- http
- localhost:{{ .Values.containerPort }}/readyz
periodSeconds: 3
volumeMounts:
- name: data
mountPath: /data
resources:
{{ toYaml .Values.resources | indent 12 }}
volumes:
- name: data
emptyDir: {}

View File

@@ -1,21 +0,0 @@
apiVersion: autoscaling/v2beta1
kind: HorizontalPodAutoscaler
metadata:
name: {{ template "podinfo-flagger.primary" . }}
labels:
app: {{ template "podinfo-flagger.fullname" . }}
chart: {{ template "podinfo-flagger.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
spec:
scaleTargetRef:
apiVersion: apps/v1
kind: Deployment
name: {{ template "podinfo-flagger.primary" . }}
minReplicas: {{ .Values.hpa.minReplicas }}
maxReplicas: {{ .Values.hpa.maxReplicas }}
metrics:
- type: Resource
resource:
name: cpu
targetAverageUtilization: {{ .Values.hpa.targetAverageUtilization }}

View File

@@ -1,18 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: {{ template "podinfo-flagger.primary" . }}
labels:
app: {{ template "podinfo-flagger.fullname" . }}
chart: {{ template "podinfo-flagger.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
spec:
type: ClusterIP
ports:
- port: {{ .Values.containerPort }}
targetPort: http
protocol: TCP
name: http
selector:
app: {{ template "podinfo-flagger.primary" . }}

View File

@@ -1,30 +0,0 @@
apiVersion: networking.istio.io/v1alpha3
kind: VirtualService
metadata:
name: {{ template "podinfo-flagger.fullname" . }}
labels:
app: {{ template "podinfo-flagger.fullname" . }}
chart: {{ template "podinfo-flagger.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
spec:
hosts:
- {{ template "podinfo-flagger.fullname" . }}
{{- if .Values.gateway.enabled }}
- {{ .Values.gateway.host }}
gateways:
- {{ .Values.gateway.name }}
{{- end }}
http:
- route:
- destination:
host: {{ template "podinfo-flagger.primary" . }}
port:
number: {{ .Values.containerPort }}
weight: 100
- destination:
host: {{ template "podinfo-flagger.canary" . }}
port:
number: {{ .Values.containerPort }}
weight: 0
timeout: {{ .Values.timeout }}

View File

@@ -1,30 +0,0 @@
# Default values for podinfo-flagger.
image:
repository: quay.io/stefanprodan/podinfo
tag: "1.2.0"
# enable the gateway when exposing the service outside the cluster
gateway:
enabled: false
name: public-gateway.istio-system.svc.cluster.local
# external domain name
host:
hpa:
minReplicas: 2
maxReplicas: 4
targetAverageUtilization: 99
timeout: 30s
logLevel: info
containerPort: 9898
imagePullPolicy: IfNotPresent
resources:
limits:
cpu: 2000m
memory: 512Mi
requests:
cpu: 100m
memory: 64Mi

View File

@@ -6,12 +6,13 @@ import (
"time"
_ "github.com/istio/glog"
sharedclientset "github.com/knative/pkg/client/clientset/versioned"
istioclientset "github.com/knative/pkg/client/clientset/versioned"
"github.com/knative/pkg/signals"
clientset "github.com/stefanprodan/flagger/pkg/client/clientset/versioned"
informers "github.com/stefanprodan/flagger/pkg/client/informers/externalversions"
"github.com/stefanprodan/flagger/pkg/controller"
"github.com/stefanprodan/flagger/pkg/logging"
"github.com/stefanprodan/flagger/pkg/notifier"
"github.com/stefanprodan/flagger/pkg/server"
"github.com/stefanprodan/flagger/pkg/version"
"k8s.io/client-go/kubernetes"
@@ -27,15 +28,21 @@ var (
controlLoopInterval time.Duration
logLevel string
port string
slackURL string
slackUser string
slackChannel string
)
func init() {
flag.StringVar(&kubeconfig, "kubeconfig", "", "Path to a kubeconfig. Only required if out-of-cluster.")
flag.StringVar(&masterURL, "master", "", "The address of the Kubernetes API server. Overrides any value in kubeconfig. Only required if out-of-cluster.")
flag.StringVar(&metricsServer, "metrics-server", "http://prometheus:9090", "Prometheus URL")
flag.DurationVar(&controlLoopInterval, "control-loop-interval", 10*time.Second, "wait interval between rollouts")
flag.DurationVar(&controlLoopInterval, "control-loop-interval", 10*time.Second, "Kubernetes API sync interval")
flag.StringVar(&logLevel, "log-level", "debug", "Log level can be: debug, info, warning, error.")
flag.StringVar(&port, "port", "8080", "Port to listen on.")
flag.StringVar(&slackURL, "slack-url", "", "Slack hook URL.")
flag.StringVar(&slackUser, "slack-user", "flagger", "Slack user name.")
flag.StringVar(&slackChannel, "slack-channel", "", "Slack channel.")
}
func main() {
@@ -59,18 +66,18 @@ func main() {
logger.Fatalf("Error building kubernetes clientset: %v", err)
}
sharedClient, err := sharedclientset.NewForConfig(cfg)
istioClient, err := istioclientset.NewForConfig(cfg)
if err != nil {
logger.Fatalf("Error building shared clientset: %v", err)
logger.Fatalf("Error building istio clientset: %v", err)
}
rolloutClient, err := clientset.NewForConfig(cfg)
flaggerClient, err := clientset.NewForConfig(cfg)
if err != nil {
logger.Fatalf("Error building example clientset: %s", err.Error())
}
rolloutInformerFactory := informers.NewSharedInformerFactory(rolloutClient, time.Second*30)
rolloutInformer := rolloutInformerFactory.Flagger().V1beta1().Canaries()
flaggerInformerFactory := informers.NewSharedInformerFactory(flaggerClient, time.Second*30)
canaryInformer := flaggerInformerFactory.Flagger().V1alpha3().Canaries()
logger.Infof("Starting flagger version %s revision %s", version.VERSION, version.REVISION)
@@ -88,24 +95,35 @@ func main() {
logger.Errorf("Metrics server %s unreachable %v", metricsServer, err)
}
var slack *notifier.Slack
if slackURL != "" {
slack, err = notifier.NewSlack(slackURL, slackUser, slackChannel)
if err != nil {
logger.Errorf("Notifier %v", err)
} else {
logger.Infof("Slack notifications enabled for channel %s", slack.Channel)
}
}
// start HTTP server
go server.ListenAndServe(port, 3*time.Second, logger, stopCh)
c := controller.NewController(
kubeClient,
sharedClient,
rolloutClient,
rolloutInformer,
istioClient,
flaggerClient,
canaryInformer,
controlLoopInterval,
metricsServer,
logger,
slack,
)
rolloutInformerFactory.Start(stopCh)
flaggerInformerFactory.Start(stopCh)
logger.Info("Waiting for informer caches to sync")
for _, synced := range []cache.InformerSynced{
rolloutInformer.Informer().HasSynced,
canaryInformer.Informer().HasSynced,
} {
if ok := cache.WaitForCacheSync(stopCh, synced); !ok {
logger.Fatalf("Failed to wait for cache sync")

44
cmd/loadtester/main.go Normal file
View File

@@ -0,0 +1,44 @@
package main
import (
"flag"
"github.com/knative/pkg/signals"
"github.com/stefanprodan/flagger/pkg/loadtester"
"github.com/stefanprodan/flagger/pkg/logging"
"log"
"time"
)
var VERSION = "0.1.0"
var (
logLevel string
port string
timeout time.Duration
logCmdOutput bool
)
func init() {
flag.StringVar(&logLevel, "log-level", "debug", "Log level can be: debug, info, warning, error.")
flag.StringVar(&port, "port", "9090", "Port to listen on.")
flag.DurationVar(&timeout, "timeout", time.Hour, "Command exec timeout.")
flag.BoolVar(&logCmdOutput, "log-cmd-output", true, "Log command output to stderr")
}
func main() {
flag.Parse()
logger, err := logging.NewLogger(logLevel)
if err != nil {
log.Fatalf("Error creating logger: %v", err)
}
defer logger.Sync()
stopCh := signals.SetupSignalHandler()
taskRunner := loadtester.NewTaskRunner(logger, timeout, logCmdOutput)
go taskRunner.Start(100*time.Millisecond, stopCh)
logger.Infof("Starting load tester v%s API on port %s", VERSION, port)
loadtester.ListenAndServe(port, time.Minute, logger, taskRunner, stopCh)
}

73
code-of-conduct.md Normal file
View File

@@ -0,0 +1,73 @@
# Contributor Covenant Code of Conduct
## Our Pledge
In the interest of fostering an open and welcoming environment, we as
contributors and maintainers pledge to making participation in our project and
our community a harassment-free experience for everyone, regardless of age, body
size, disability, ethnicity, gender identity and expression, level of experience,
education, socio-economic status, nationality, personal appearance, race,
religion, or sexual identity and orientation.
## Our Standards
Examples of behavior that contributes to creating a positive environment
include:
* Using welcoming and inclusive language
* Being respectful of differing viewpoints and experiences
* Gracefully accepting constructive criticism
* Focusing on what is best for the community
* Showing empathy towards other community members
Examples of unacceptable behavior by participants include:
* The use of sexualized language or imagery and unwelcome sexual attention or
advances
* Trolling, insulting/derogatory comments, and personal or political attacks
* Public or private harassment
* Publishing others' private information, such as a physical or electronic
address, without explicit permission
* Other conduct which could reasonably be considered inappropriate in a
professional setting
## Our Responsibilities
Project maintainers are responsible for clarifying the standards of acceptable
behavior and are expected to take appropriate and fair corrective action in
response to any instances of unacceptable behavior.
Project maintainers have the right and responsibility to remove, edit, or
reject comments, commits, code, wiki edits, issues, and other contributions
that are not aligned to this Code of Conduct, or to ban temporarily or
permanently any contributor for other behaviors that they deem inappropriate,
threatening, offensive, or harmful.
## Scope
This Code of Conduct applies both within project spaces and in public spaces
when an individual is representing the project or its community. Examples of
representing a project or community include using an official project e-mail
address, posting via an official social media account, or acting as an appointed
representative at an online or offline event. Representation of a project may be
further defined and clarified by project maintainers.
## Enforcement
Instances of abusive, harassing, or otherwise unacceptable behavior
may be reported by contacting stefan.prodan(at)gmail.com.
All complaints will be reviewed and investigated and will result in a response that is deemed
necessary and appropriate to the circumstances. The project team is
obligated to maintain confidentiality with regard to the reporter of
an incident. Further details of specific enforcement policies may be
posted separately.
Project maintainers who do not follow or enforce the Code of Conduct in good
faith may face temporary or permanent repercussions as determined by other
members of the project's leadership.
## Attribution
This Code of Conduct is adapted from the [Contributor Covenant](https://www.contributor-covenant.org), version 1.4,
available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html

View File

@@ -1 +0,0 @@
flagger.app

View File

@@ -1,413 +0,0 @@
# flagger
[![build](https://travis-ci.org/stefanprodan/flagger.svg?branch=master)](https://travis-ci.org/stefanprodan/flagger)
[![report](https://goreportcard.com/badge/github.com/stefanprodan/flagger)](https://goreportcard.com/report/github.com/stefanprodan/flagger)
[![license](https://img.shields.io/github/license/stefanprodan/flagger.svg)](https://github.com/stefanprodan/flagger/blob/master/LICENSE)
[![release](https://img.shields.io/github/release/stefanprodan/flagger/all.svg)](https://github.com/stefanprodan/flagger/releases)
Flagger is a Kubernetes operator that automates the promotion of canary deployments
using Istio routing for traffic shifting and Prometheus metrics for canary analysis.
The project is currently in experimental phase and it is expected that breaking changes
to the API will be made in the upcoming releases.
### Install
Before installing Flagger make sure you have Istio setup up with Prometheus enabled.
If you are new to Istio you can follow my [Istio service mesh walk-through](https://github.com/stefanprodan/istio-gke).
Deploy Flagger in the `istio-system` namespace using Helm:
```bash
# add the Helm repository
helm repo add flagger https://stefanprodan.github.io/flagger
# install or upgrade
helm upgrade -i flagger flagger/flagger \
--namespace=istio-system \
--set metricsServer=http://prometheus.istio-system:9090 \
--set controlLoopInterval=1m
```
Flagger is compatible with Kubernetes >1.10.0 and Istio >1.0.0.
### Usage
Flagger requires two Kubernetes [deployments](https://kubernetes.io/docs/concepts/workloads/controllers/deployment/):
one for the version you want to upgrade called _primary_ and one for the _canary_.
Each deployment must have a corresponding ClusterIP [service](https://kubernetes.io/docs/concepts/services-networking/service/)
that exposes a port named http or https. These services are used as destinations in a Istio [virtual service](https://istio.io/docs/reference/config/istio.networking.v1alpha3/#VirtualService).
![flagger-overview](https://raw.githubusercontent.com/stefanprodan/flagger/master/docs/diagrams/flagger-overview.png)
Gated canary promotion stages:
* scan for canary deployments
* check Istio virtual service routes are mapped to primary and canary ClusterIP services
* check primary and canary deployments status
* halt rollout if a rolling update is underway
* halt rollout if pods are unhealthy
* increase canary traffic weight percentage from 0% to 5% (step weight)
* check canary HTTP request success rate and latency
* halt rollout if any metric is under the specified threshold
* increment the failed checks counter
* check if the number of failed checks reached the threshold
* route all traffic to primary
* scale to zero the canary deployment and mark it as failed
* wait for the canary deployment to be updated (revision bump) and start over
* increase canary traffic weight by 5% (step weight) till it reaches 50% (max weight)
* halt rollout while canary request success rate is under the threshold
* halt rollout while canary request duration P99 is over the threshold
* halt rollout if the primary or canary deployment becomes unhealthy
* halt rollout while canary deployment is being scaled up/down by HPA
* promote canary to primary
* copy canary deployment spec template over primary
* wait for primary rolling update to finish
* halt rollout if pods are unhealthy
* route all traffic to primary
* scale to zero the canary deployment
* mark rollout as finished
* wait for the canary deployment to be updated (revision bump) and start over
You can change the canary analysis _max weight_ and the _step weight_ percentage in the Flagger's custom resource.
Assuming the primary deployment is named _podinfo_ and the canary one _podinfo-canary_, Flagger will require
a virtual service configured with weight-based routing:
```yaml
apiVersion: networking.istio.io/v1alpha3
kind: VirtualService
metadata:
name: podinfo
spec:
hosts:
- podinfo
http:
- route:
- destination:
host: podinfo
port:
number: 9898
weight: 100
- destination:
host: podinfo-canary
port:
number: 9898
weight: 0
```
Primary and canary services should expose a port named http:
```yaml
apiVersion: v1
kind: Service
metadata:
name: podinfo-canary
spec:
type: ClusterIP
selector:
app: podinfo-canary
ports:
- name: http
port: 9898
targetPort: 9898
```
Based on the two deployments, services and virtual service, a canary promotion can be defined using Flagger's custom resource:
```yaml
apiVersion: flagger.app/v1beta1
kind: Canary
metadata:
name: podinfo
namespace: test
spec:
targetKind: Deployment
virtualService:
name: podinfo
primary:
name: podinfo
host: podinfo
canary:
name: podinfo-canary
host: podinfo-canary
canaryAnalysis:
# max number of failed checks
# before rolling back the canary
threshold: 10
# max traffic percentage routed to canary
# percentage (0-100)
maxWeight: 50
# canary increment step
# percentage (0-100)
stepWeight: 5
metrics:
- name: istio_requests_total
# minimum req success rate (non 5xx responses)
# percentage (0-100)
threshold: 99
interval: 1m
- name: istio_request_duration_seconds_bucket
# maximum req duration P99
# milliseconds
threshold: 500
interval: 1m
```
The canary analysis is using the following promql queries:
_HTTP requests success rate percentage_
```promql
sum(
rate(
istio_requests_total{
reporter="destination",
destination_workload_namespace=~"$namespace",
destination_workload=~"$workload",
response_code!~"5.*"
}[$interval]
)
)
/
sum(
rate(
istio_requests_total{
reporter="destination",
destination_workload_namespace=~"$namespace",
destination_workload=~"$workload"
}[$interval]
)
)
```
_HTTP requests milliseconds duration P99_
```promql
histogram_quantile(0.99,
sum(
irate(
istio_request_duration_seconds_bucket{
reporter="destination",
destination_workload=~"$workload",
destination_workload_namespace=~"$namespace"
}[$interval]
)
) by (le)
)
```
### Automated canary analysis, promotions and rollbacks
![flagger-canary](https://raw.githubusercontent.com/stefanprodan/flagger/master/docs/diagrams/flagger-canary-hpa.png)
Create a test namespace with Istio sidecar injection enabled:
```bash
export REPO=https://raw.githubusercontent.com/stefanprodan/flagger/master
kubectl apply -f ${REPO}/artifacts/namespaces/test.yaml
```
Create the primary deployment, service and hpa:
```bash
kubectl apply -f ${REPO}/artifacts/workloads/primary-deployment.yaml
kubectl apply -f ${REPO}/artifacts/workloads/primary-service.yaml
kubectl apply -f ${REPO}/artifacts/workloads/primary-hpa.yaml
```
Create the canary deployment, service and hpa:
```bash
kubectl apply -f ${REPO}/artifacts/workloads/canary-deployment.yaml
kubectl apply -f ${REPO}/artifacts/workloads/canary-service.yaml
kubectl apply -f ${REPO}/artifacts/workloads/canary-hpa.yaml
```
Create a virtual service (replace the Istio gateway and the internet domain with your own):
```bash
kubectl apply -f ${REPO}/artifacts/workloads/virtual-service.yaml
```
Create a canary promotion custom resource:
```bash
kubectl apply -f ${REPO}/artifacts/rollouts/podinfo.yaml
```
Canary promotion output:
```
kubectl -n test describe canary/podinfo
Status:
Canary Revision: 16271121
Failed Checks: 6
State: finished
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal Synced 3m flagger Starting canary deployment for podinfo.test
Normal Synced 3m flagger Advance podinfo.test canary weight 5
Normal Synced 3m flagger Advance podinfo.test canary weight 10
Normal Synced 3m flagger Advance podinfo.test canary weight 15
Warning Synced 3m flagger Halt podinfo.test advancement request duration 2.525s > 500ms
Warning Synced 3m flagger Halt podinfo.test advancement request duration 1.567s > 500ms
Warning Synced 3m flagger Halt podinfo.test advancement request duration 823ms > 500ms
Normal Synced 2m flagger Advance podinfo.test canary weight 20
Normal Synced 2m flagger Advance podinfo.test canary weight 25
Normal Synced 1m flagger Advance podinfo.test canary weight 30
Warning Synced 1m flagger Halt podinfo.test advancement success rate 82.33% < 99%
Warning Synced 1m flagger Halt podinfo.test advancement success rate 87.22% < 99%
Warning Synced 1m flagger Halt podinfo.test advancement success rate 94.74% < 99%
Normal Synced 1m flagger Advance podinfo.test canary weight 35
Normal Synced 55s flagger Advance podinfo.test canary weight 40
Normal Synced 45s flagger Advance podinfo.test canary weight 45
Normal Synced 35s flagger Advance podinfo.test canary weight 50
Normal Synced 25s flagger Copying podinfo-canary.test template spec to podinfo.test
Warning Synced 15s flagger Waiting for podinfo.test rollout to finish: 1 of 2 updated replicas are available
Normal Synced 5s flagger Promotion completed! Scaling down podinfo-canary.test
```
During the canary analysis you can generate HTTP 500 errors and high latency to test if Flagger pauses the rollout.
Create a tester pod and exec into it:
```bash
kubectl -n test run tester --image=quay.io/stefanprodan/podinfo:1.2.1 -- ./podinfo --port=9898
kubectl -n test exec -it tester-xx-xx sh
```
Generate HTTP 500 errors:
```bash
watch curl http://podinfo-canary:9898/status/500
```
Generate latency:
```bash
watch curl http://podinfo-canary:9898/delay/1
```
When the number of failed checks reaches the canary analysis threshold, the traffic is routed back to the primary,
the canary is scaled to zero and the rollout is marked as failed.
```
kubectl -n test describe canary/podinfo
Status:
Canary Revision: 16695041
Failed Checks: 10
State: failed
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal Synced 3m flagger Starting canary deployment for podinfo.test
Normal Synced 3m flagger Advance podinfo.test canary weight 5
Normal Synced 3m flagger Advance podinfo.test canary weight 10
Normal Synced 3m flagger Advance podinfo.test canary weight 15
Normal Synced 3m flagger Halt podinfo.test advancement success rate 69.17% < 99%
Normal Synced 2m flagger Halt podinfo.test advancement success rate 61.39% < 99%
Normal Synced 2m flagger Halt podinfo.test advancement success rate 55.06% < 99%
Normal Synced 2m flagger Halt podinfo.test advancement success rate 47.00% < 99%
Normal Synced 2m flagger (combined from similar events): Halt podinfo.test advancement success rate 38.08% < 99%
Warning Synced 1m flagger Rolling back podinfo-canary.test failed checks threshold reached 10
Warning Synced 1m flagger Canary failed! Scaling down podinfo-canary.test
```
Trigger a new canary deployment by updating the canary image:
```bash
kubectl -n test set image deployment/podinfo-canary \
podinfod=quay.io/stefanprodan/podinfo:1.2.1
```
Steer detects that the canary revision changed and starts a new rollout:
```
kubectl -n test describe canary/podinfo
Status:
Canary Revision: 19871136
Failed Checks: 0
State: finished
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal Synced 3m flagger New revision detected podinfo-canary.test old 17211012 new 17246876
Normal Synced 3m flagger Scaling up podinfo.test
Warning Synced 3m flagger Waiting for podinfo.test rollout to finish: 0 of 1 updated replicas are available
Normal Synced 3m flagger Advance podinfo.test canary weight 5
Normal Synced 3m flagger Advance podinfo.test canary weight 10
Normal Synced 3m flagger Advance podinfo.test canary weight 15
Normal Synced 2m flagger Advance podinfo.test canary weight 20
Normal Synced 2m flagger Advance podinfo.test canary weight 25
Normal Synced 1m flagger Advance podinfo.test canary weight 30
Normal Synced 1m flagger Advance podinfo.test canary weight 35
Normal Synced 55s flagger Advance podinfo.test canary weight 40
Normal Synced 45s flagger Advance podinfo.test canary weight 45
Normal Synced 35s flagger Advance podinfo.test canary weight 50
Normal Synced 25s flagger Copying podinfo-canary.test template spec to podinfo.test
Warning Synced 15s flagger Waiting for podinfo.test rollout to finish: 1 of 2 updated replicas are available
Normal Synced 5s flagger Promotion completed! Scaling down podinfo-canary.test
```
### Monitoring
Flagger comes with a Grafana dashboard made for canary analysis.
Install Grafana with Helm:
```bash
helm upgrade -i flagger-grafana flagger/grafana \
--namespace=istio-system \
--set url=http://prometheus.istio-system:9090
```
The dashboard shows the RED and USE metrics for the primary and canary workloads:
![flagger-grafana](https://raw.githubusercontent.com/stefanprodan/flagger/master/docs/screens/grafana-canary-analysis.png)
The canary errors and latency spikes have been recorded as Kubernetes events and logged by Flagger in json format:
```
kubectl -n istio-system logs deployment/flagger --tail=100 | jq .msg
Starting canary deployment for podinfo.test
Advance podinfo.test canary weight 5
Advance podinfo.test canary weight 10
Advance podinfo.test canary weight 15
Advance podinfo.test canary weight 20
Advance podinfo.test canary weight 25
Advance podinfo.test canary weight 30
Advance podinfo.test canary weight 35
Halt podinfo.test advancement success rate 98.69% < 99%
Advance podinfo.test canary weight 40
Halt podinfo.test advancement request duration 1.515s > 500ms
Advance podinfo.test canary weight 45
Advance podinfo.test canary weight 50
Copying podinfo-canary.test template spec to podinfo-primary.test
Scaling down podinfo-canary.test
Promotion completed! podinfo-canary.test revision 81289
```
### Roadmap
* Extend the canary analysis and promotion to other types than Kubernetes deployments such as Flux Helm releases or OpenFaaS functions
* Extend the validation mechanism to support other metrics than HTTP success rate and latency
* Add support for comparing the canary metrics to the primary ones and do the validation based on the derivation between the two
* Alerting: Trigger Alertmanager on successful or failed promotions (Prometheus instrumentation of the canary analysis)
* Reporting: publish canary analysis results to Slack/Jira/etc
### Contributing
Flagger is Apache 2.0 licensed and accepts contributions via GitHub pull requests.
When submitting bug reports please include as much details as possible:
* which Flagger version
* which Flagger CRD version
* which Kubernetes/Istio version
* what configuration (canary, virtual service and workloads definitions)
* what happened (Flagger, Istio Pilot and Proxy logs)

View File

@@ -1,64 +0,0 @@
title: Flagger - Istio Progressive Delivery Kubernetes Operator
remote_theme: errordeveloper/simple-project-homepage
repository: stefanprodan/flagger
by_weaveworks: true
url: "https://stefanprodan.github.io/flagger"
baseurl: "/"
twitter:
username: "stefanprodan"
author:
twitter: "stefanprodan"
# Set default og:image
defaults:
- scope: {path: ""}
values: {image: "logo/logo-flagger.png"}
# See: https://material.io/guidelines/style/color.html
# Use color-name-value, like pink-200 or deep-purple-100
brand_color: "amber-400"
# How article URLs are structured.
# See: https://jekyllrb.com/docs/permalinks/
permalink: posts/:title/
# "UA-NNNNNNNN-N"
google_analytics: ""
# Language. For example, if you write in Japanese, use "ja"
lang: "en"
# How many posts are visible on the home page without clicking "View More"
num_posts_visible_initially: 5
# Date format: See http://strftime.net/
date_format: "%b %-d, %Y"
plugins:
- jekyll-feed
- jekyll-readme-index
- jekyll-seo-tag
- jekyll-sitemap
- jemoji
# # required for local builds with starefossen/github-pages
# - jekyll-github-metadata
# - jekyll-mentions
# - jekyll-redirect-from
# - jekyll-remote-theme
exclude:
- CNAME
- Dockerfile
- Gopkg.lock
- Gopkg.toml
- LICENSE
- Makefile
- add-model.sh
- build
- cmd
- pkg
- tag_release.sh
- vendor

Binary file not shown.

After

Width:  |  Height:  |  Size: 207 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 159 KiB

Binary file not shown.

23
docs/gitbook/README.md Normal file
View File

@@ -0,0 +1,23 @@
---
description: Flagger is an Istio progressive delivery Kubernetes operator
---
# Introduction
[Flagger](https://github.com/stefanprodan/flagger) is a **Kubernetes** operator that automates the promotion of canary
deployments using **Istio** routing for traffic shifting and **Prometheus** metrics for canary analysis.
The canary analysis can be extended with webhooks for running integration tests,
load tests or any other custom validation.
Flagger implements a control loop that gradually shifts traffic to the canary while measuring key performance
indicators like HTTP requests success rate, requests average duration and pods health.
Based on the **KPIs** analysis a canary is promoted or aborted and the analysis result is published to **Slack**.
![Flagger overview diagram](https://raw.githubusercontent.com/stefanprodan/flagger/master/docs/diagrams/flagger-canary-overview.png)
Flagger can be configured with Kubernetes custom resources \(canaries.flagger.app kind\) and is compatible with
any CI/CD solutions made for Kubernetes. Since Flagger is declarative and reacts to Kubernetes events,
it can be used in **GitOps** pipelines together with Weave Flux or JenkinsX.
This project is sponsored by [Weaveworks](https://www.weave.works/)

17
docs/gitbook/SUMMARY.md Normal file
View File

@@ -0,0 +1,17 @@
# Table of contents
* [Introduction](README.md)
* [How it works](how-it-works.md)
## Install
* [Install Flagger](install/install-flagger.md)
* [Install Grafana](install/install-grafana.md)
* [Install Istio](install/install-istio.md)
## Usage
* [Canary Deployments](usage/progressive-delivery.md)
* [Monitoring](usage/monitoring.md)
* [Alerting](usage/alerting.md)

View File

@@ -0,0 +1,361 @@
# How it works
[Flagger](https://github.com/stefanprodan/flagger) takes a Kubernetes deployment and optionally
a horizontal pod autoscaler \(HPA\) and creates a series of objects
\(Kubernetes deployments, ClusterIP services and Istio virtual services\) to drive the canary analysis and promotion.
![Flagger Canary Process](https://raw.githubusercontent.com/stefanprodan/flagger/master/docs/diagrams/flagger-canary-hpa.png)
### Canary Custom Resource
For a deployment named _podinfo_, a canary promotion can be defined using Flagger's custom resource:
```yaml
apiVersion: flagger.app/v1alpha3
kind: Canary
metadata:
name: podinfo
namespace: test
spec:
# deployment reference
targetRef:
apiVersion: apps/v1
kind: Deployment
name: podinfo
# the maximum time in seconds for the canary deployment
# to make progress before it is rollback (default 600s)
progressDeadlineSeconds: 60
# HPA reference (optional)
autoscalerRef:
apiVersion: autoscaling/v2beta1
kind: HorizontalPodAutoscaler
name: podinfo
service:
# container port
port: 9898
# Istio gateways (optional)
gateways:
- public-gateway.istio-system.svc.cluster.local
# Istio virtual service host names (optional)
hosts:
- podinfo.example.com
canaryAnalysis:
# schedule interval (default 60s)
interval: 1m
# max number of failed metric checks before rollback
threshold: 10
# max traffic percentage routed to canary
# percentage (0-100)
maxWeight: 50
# canary increment step
# percentage (0-100)
stepWeight: 5
# Istio Prometheus checks
metrics:
- name: istio_requests_total
# minimum req success rate (non 5xx responses)
# percentage (0-100)
threshold: 99
interval: 1m
- name: istio_request_duration_seconds_bucket
# maximum req duration P99
# milliseconds
threshold: 500
interval: 30s
# external checks (optional)
webhooks:
- name: integration-tests
url: http://podinfo.test:9898/echo
timeout: 1m
# key-value pairs (optional)
metadata:
test: "all"
token: "16688eb5e9f289f1991c"
```
**Note** that the target deployment must have a single label selector in the format `app: <DEPLOYMENT-NAME>`:
```yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: podinfo
spec:
selector:
matchLabels:
app: podinfo
template:
metadata:
labels:
app: podinfo
```
The target deployment should expose a TCP port that will be used by Flagger to create the ClusterIP Service and
the Istio Virtual Service. The container port from the target deployment should match the `service.port` value.
### Canary Deployment
![Flagger Canary Stages](https://raw.githubusercontent.com/stefanprodan/flagger/master/docs/diagrams/flagger-canary-steps.png)
Gated canary promotion stages:
* scan for canary deployments
* creates the primary deployment if needed
* check Istio virtual service routes are mapped to primary and canary ClusterIP services
* check primary and canary deployments status
* halt advancement if a rolling update is underway
* halt advancement if pods are unhealthy
* increase canary traffic weight percentage from 0% to 5% \(step weight\)
* check canary HTTP request success rate and latency
* halt advancement if any metric is under the specified threshold
* increment the failed checks counter
* check if the number of failed checks reached the threshold
* route all traffic to primary
* scale to zero the canary deployment and mark it as failed
* wait for the canary deployment to be updated \(revision bump\) and start over
* increase canary traffic weight by 5% \(step weight\) till it reaches 50% \(max weight\)
* halt advancement if the primary or canary deployment becomes unhealthy
* halt advancement while canary deployment is being scaled up/down by HPA
* halt advancement if any of the webhook calls are failing
* halt advancement while canary request success rate is under the threshold
* halt advancement while canary request duration P99 is over the threshold
* promote canary to primary
* copy canary deployment spec template over primary
* wait for primary rolling update to finish
* halt advancement if pods are unhealthy
* route all traffic to primary
* scale to zero the canary deployment
* mark the canary deployment as finished
* wait for the canary deployment to be updated \(revision bump\) and start over
You can change the canary analysis _max weight_ and the _step weight_ percentage in the Flagger's custom resource.
### Canary Analysis
The canary analysis runs periodically until it reaches the maximum traffic weight or the failed checks threshold.
Spec:
```yaml
canaryAnalysis:
# schedule interval (default 60s)
interval: 1m
# max number of failed metric checks before rollback
threshold: 10
# max traffic percentage routed to canary
# percentage (0-100)
maxWeight: 50
# canary increment step
# percentage (0-100)
stepWeight: 2
```
The above analysis, if it succeeds, will run for 25 minutes while validating the HTTP metrics and webhooks every minute.
You can determine the minimum time that it takes to validate and promote a canary deployment using this formula:
```
interval * (maxWeight / stepWeight)
```
And the time it takes for a canary to be rollback when the metrics or webhook checks are failing:
```
interval * threshold
```
### HTTP Metrics
The canary analysis is using the following Prometheus queries:
**HTTP requests success rate percentage**
Spec:
```yaml
canaryAnalysis:
metrics:
- name: istio_requests_total
# minimum req success rate (non 5xx responses)
# percentage (0-100)
threshold: 99
interval: 1m
```
Query:
```javascript
sum(
rate(
istio_requests_total{
reporter="destination",
destination_workload_namespace=~"$namespace",
destination_workload=~"$workload",
response_code!~"5.*"
}[$interval]
)
)
/
sum(
rate(
istio_requests_total{
reporter="destination",
destination_workload_namespace=~"$namespace",
destination_workload=~"$workload"
}[$interval]
)
)
```
**HTTP requests milliseconds duration P99**
Spec:
```yaml
canaryAnalysis:
metrics:
- name: istio_request_duration_seconds_bucket
# maximum req duration P99
# milliseconds
threshold: 500
interval: 1m
```
Query:
```javascript
histogram_quantile(0.99,
sum(
irate(
istio_request_duration_seconds_bucket{
reporter="destination",
destination_workload=~"$workload",
destination_workload_namespace=~"$namespace"
}[$interval]
)
) by (le)
)
```
> **Note** that the metric interval should be lower or equal to the control loop interval.
### Webhooks
The canary analysis can be extended with webhooks.
Flagger will call each webhook URL and determine from the response status code (HTTP 2xx) if the canary is failing or not.
Spec:
```yaml
canaryAnalysis:
webhooks:
- name: integration-tests
url: http://podinfo.test:9898/echo
timeout: 30s
metadata:
test: "all"
token: "16688eb5e9f289f1991c"
- name: load-tests
url: http://podinfo.test:9898/echo
timeout: 30s
metadata:
key1: "val1"
key2: "val2"
```
> **Note** that the sum of all webhooks timeouts should be lower than the control loop interval.
Webhook payload (HTTP POST):
```json
{
"name": "podinfo",
"namespace": "test",
"metadata": {
"test": "all",
"token": "16688eb5e9f289f1991c"
}
}
```
Response status codes:
* 200-202 - advance canary by increasing the traffic weight
* timeout or non-2xx - halt advancement and increment failed checks
On a non-2xx response Flagger will include the response body (if any) in the failed checks log and Kubernetes events.
### Load Testing
For workloads that are not receiving constant traffic Flagger can be configured with a webhook,
that when called, will start a load test for the target workload.
If the target workload doesn't receive any traffic during the canary analysis,
Flagger metric checks will fail with "no values found for metric istio_requests_total".
Flagger comes with a load testing service based on [rakyll/hey](https://github.com/rakyll/hey)
that generates traffic during analysis when configured as a webhook.
![Flagger Load Testing Webhook](https://raw.githubusercontent.com/stefanprodan/flagger/master/docs/diagrams/flagger-load-testing.png)
First you need to deploy the load test runner in a namespace with Istio sidecar injection enabled:
```bash
export REPO=https://raw.githubusercontent.com/stefanprodan/flagger/master
kubectl -n test apply -f ${REPO}/artifacts/loadtester/deployment.yaml
kubectl -n test apply -f ${REPO}/artifacts/loadtester/service.yaml
```
Or by using Helm:
```bash
helm repo add flagger https://flagger.app
helm upgrade -i flagger-loadtester flagger/loadtester \
--namepace=test \
--set cmd.logOutput=true \
--set cmd.timeout=1h
```
When deployed the load tester API will be available at `http://flagger-loadtester.test/`.
Now you can add webhooks to the canary analysis spec:
```yaml
webhooks:
- name: load-test-get
url: http://flagger-loadtester.test/
timeout: 5s
metadata:
cmd: "hey -z 1m -q 10 -c 2 http://podinfo.test:9898/"
- name: load-test-post
url: http://flagger-loadtester.test/
timeout: 5s
metadata:
cmd: "hey -z 1m -q 10 -c 2 -m POST -d '{test: 2}' http://podinfo.test:9898/echo"
```
When the canary analysis starts, Flagger will call the webhooks and the load tester will run the `hey` commands
in the background, if they are not already running. This will ensure that during the
analysis, the `podinfo.test` virtual service will receive a steady steam of GET and POST requests.
If your workload is exposed outside the mesh with the Istio Gateway and TLS you can point `hey` to the
public URL and use HTTP2.
```yaml
webhooks:
- name: load-test-get
url: http://flagger-loadtester.test/
timeout: 5s
metadata:
cmd: "hey -z 1m -q 10 -c 2 -h2 https://podinfo.example.com/"
```
The load tester can run arbitrary commands as long as the binary is present in the container image.
For example if you you want to replace `hey` with another CLI, you can create your own Docker image:
```dockerfile
FROM quay.io/stefanprodan/flagger-loadtester:<VER>
RUN curl -Lo /usr/local/bin/my-cli https://github.com/user/repo/releases/download/ver/my-cli \
&& chmod +x /usr/local/bin/my-cli
```

View File

@@ -0,0 +1,75 @@
# Install Flagger
Before installing Flagger make sure you have [Istio](https://istio.io) running with Prometheus enabled.
If you are new to Istio you can follow this GKE guide
[Istio service mesh walk-through](https://docs.flagger.app/install/install-istio).
**Prerequisites**
* Kubernetes &gt;= 1.11
* Istio &gt;= 1.0
* Prometheus &gt;= 2.6
### Install with Helm and Tiller
Add Flagger Helm repository:
```bash
helm repo add flagger https://flagger.app
```
Deploy Flagger in the _**istio-system**_ namespace:
```bash
helm upgrade -i flagger flagger/flagger \
--namespace=istio-system \
--set metricsServer=http://prometheus.istio-system:9090
```
Enable **Slack** notifications:
```bash
helm upgrade -i flagger flagger/flagger \
--namespace=istio-system \
--set slack.url=https://hooks.slack.com/services/YOUR/SLACK/WEBHOOK \
--set slack.channel=general \
--set slack.user=flagger
```
### Install with kubectl
If you don't have Tiller you can use the helm template command and apply the generated yaml with kubectl:
```bash
# generate
helm template flagger/flagger \
--name flagger \
--namespace=istio-system \
--set metricsServer=http://prometheus.istio-system:9090 \
--set controlLoopInterval=1m > $HOME/flagger.yaml
# apply
kubectl apply -f $HOME/flagger.yaml
```
### Uninstall
To uninstall/delete the flagger release with Helm run:
```text
helm delete --purge flagger
```
The command removes all the Kubernetes components associated with the chart and deletes the release.
> **Note** that on uninstall the Canary CRD will not be removed.
Deleting the CRD will make Kubernetes remove all the objects owned by Flagger like Istio virtual services,
Kubernetes deployments and ClusterIP services.
If you want to remove all the objects created by Flagger you have delete the Canary CRD with kubectl:
```text
kubectl delete crd canaries.flagger.app
```

View File

@@ -0,0 +1,48 @@
# Install Grafana
Flagger comes with a Grafana dashboard made for monitoring the canary analysis.
### Install with Helm and Tiller
Add Flagger Helm repository:
```bash
helm repo add flagger https://flagger.app
```
Deploy Grafana in the _**istio-system**_ namespace:
```bash
helm upgrade -i flagger-grafana flagger/grafana \
--namespace=istio-system \
--set url=http://prometheus:9090 \
--set user=admin \
--set password=admin
```
### Install with kubectl
If you don't have Tiller you can use the helm template command and apply the generated yaml with kubectl:
```bash
# generate
helm template flagger/grafana \
--name flagger-grafana \
--namespace=istio-system \
--set user=admin \
--set password=admin > $HOME/flagger-grafana.yaml
# apply
kubectl apply -f $HOME/flagger-grafana.yaml
```
### Uninstall
To uninstall/delete the Grafana release with Helm run:
```text
helm delete --purge flagger-grafana
```
The command removes all the Kubernetes components associated with the chart and deletes the release.

View File

@@ -0,0 +1,460 @@
# Install Istio
This guide walks you through setting up Istio with Jaeger, Prometheus, Grafana and
Lets Encrypt TLS for ingress gateway on Google Kubernetes Engine.
![Istio GKE diagram](https://raw.githubusercontent.com/stefanprodan/istio-gke/master/docs/screens/istio-gcp-overview.png)
### Prerequisites
You will be creating a cluster on Googles Kubernetes Engine \(GKE\),
if you dont have an account you can sign up [here](https://cloud.google.com/free/) for free credits.
Login into GCP, create a project and enable billing for it.
Install the [gcloud](https://cloud.google.com/sdk/) command line utility and configure your project with `gcloud init`.
Set the default project \(replace `PROJECT_ID` with your own project\):
```text
gcloud config set project PROJECT_ID
```
Set the default compute region and zone:
```text
gcloud config set compute/region europe-west3
gcloud config set compute/zone europe-west3-a
```
Enable the Kubernetes and Cloud DNS services for your project:
```text
gcloud services enable container.googleapis.com
gcloud services enable dns.googleapis.com
```
Install the `kubectl` command-line tool:
```text
gcloud components install kubectl
```
Install the `helm` command-line tool:
```text
brew install kubernetes-helm
```
### GKE cluster setup
Create a cluster with three nodes using the latest Kubernetes version:
```bash
k8s_version=$(gcloud container get-server-config --format=json \
| jq -r '.validNodeVersions[0]')
gcloud container clusters create istio \
--cluster-version=${k8s_version} \
--zone=europe-west3-a \
--num-nodes=3 \
--machine-type=n1-highcpu-4 \
--preemptible \
--no-enable-cloud-logging \
--disk-size=30 \
--enable-autorepair \
--scopes=gke-default,compute-rw,storage-rw
```
The above command will create a default node pool consisting of `n1-highcpu-4` \(vCPU: 4, RAM 3.60GB, DISK: 30GB\)
preemptible VMs. Preemptible VMs are up to 80% cheaper than regular instances and are terminated and replaced
after a maximum of 24 hours.
Set up credentials for `kubectl`:
```bash
gcloud container clusters get-credentials istio -z=europe-west3-a
```
Create a cluster admin role binding:
```bash
kubectl create clusterrolebinding "cluster-admin-$(whoami)" \
--clusterrole=cluster-admin \
--user="$(gcloud config get-value core/account)"
```
Validate your setup with:
```bash
kubectl get nodes -o wide
```
### Cloud DNS setup
You will need an internet domain and access to the registrar to change the name servers to Google Cloud DNS.
Create a managed zone named `istio` in Cloud DNS \(replace `example.com` with your domain\):
```bash
gcloud dns managed-zones create \
--dns-name="example.com." \
--description="Istio zone" "istio"
```
Look up your zone's name servers:
```bash
gcloud dns managed-zones describe istio
```
Update your registrar's name server records with the records returned by the above command.
Wait for the name servers to change \(replace `example.com` with your domain\):
```bash
watch dig +short NS example.com
```
Create a static IP address named `istio-gateway-ip` in the same region as your GKE cluster:
```bash
gcloud compute addresses create istio-gateway-ip --region europe-west3
```
Find the static IP address:
```bash
gcloud compute addresses describe istio-gateway-ip --region europe-west3
```
Create the following DNS records \(replace `example.com` with your domain and set your Istio Gateway IP\):
```bash
DOMAIN="example.com"
GATEWAYIP="35.198.98.90"
gcloud dns record-sets transaction start --zone=istio
gcloud dns record-sets transaction add --zone=istio \
--name="${DOMAIN}" --ttl=300 --type=A ${GATEWAYIP}
gcloud dns record-sets transaction add --zone=istio \
--name="www.${DOMAIN}" --ttl=300 --type=A ${GATEWAYIP}
gcloud dns record-sets transaction add --zone=istio \
--name="*.${DOMAIN}" --ttl=300 --type=A ${GATEWAYIP}
gcloud dns record-sets transaction execute --zone istio
```
Verify that the wildcard DNS is working \(replace `example.com` with your domain\):
```bash
watch host test.example.com
```
### Install Istio with Helm
Download the latest Istio release:
```bash
curl -L https://git.io/getLatestIstio | sh -
```
Navigate to `istio-x.x.x` dir and copy the Istio CLI in your bin:
```bash
cd istio-x.x.x/
sudo cp ./bin/istioctl /usr/local/bin/istioctl
```
Apply the Istio CRDs:
```bash
kubectl apply -f ./install/kubernetes/helm/istio/templates/crds.yaml
```
Create a service account and a cluster role binding for Tiller:
```bash
kubectl apply -f ./install/kubernetes/helm/helm-service-account.yaml
```
Deploy Tiller in the `kube-system` namespace:
```bash
helm init --service-account tiller
```
Find the GKE IP ranges:
```bash
gcloud container clusters describe istio --zone=europe-west3-a \
| grep -e clusterIpv4Cidr -e servicesIpv4Cidr
```
You'll be using the IP ranges to allow unrestricted egress traffic for services running inside the service mesh.
Configure Istio with Prometheus, Jaeger, and cert-manager:
```yaml
global:
nodePort: false
proxy:
# replace with your GKE IP ranges
includeIPRanges: "10.28.0.0/14,10.7.240.0/20"
sidecarInjectorWebhook:
enabled: true
enableNamespacesByDefault: false
gateways:
enabled: true
istio-ingressgateway:
replicaCount: 2
autoscaleMin: 2
autoscaleMax: 3
# replace with your Istio Gateway IP
loadBalancerIP: "35.198.98.90"
type: LoadBalancer
pilot:
enabled: true
replicaCount: 1
autoscaleMin: 1
autoscaleMax: 1
resources:
requests:
cpu: 500m
memory: 1024Mi
grafana:
enabled: true
security:
enabled: true
adminUser: admin
# change the password
adminPassword: admin
prometheus:
enabled: true
servicegraph:
enabled: true
tracing:
enabled: true
jaeger:
tag: 1.7
certmanager:
enabled: true
```
Save the above file as `my-istio.yaml` and install Istio with Helm:
```bash
helm upgrade --install istio ./install/kubernetes/helm/istio \
--namespace=istio-system \
-f ./my-istio.yaml
```
Verify that Istio workloads are running:
```text
kubectl -n istio-system get pods
```
### Configure Istio Gateway with LE TLS
![Istio Let&apos;s Encrypt diagram](https://raw.githubusercontent.com/stefanprodan/istio-gke/master/docs/screens/istio-cert-manager-gcp.png)
Create a Istio Gateway in istio-system namespace with HTTPS redirect:
```yaml
apiVersion: networking.istio.io/v1alpha3
kind: Gateway
metadata:
name: public-gateway
namespace: istio-system
spec:
selector:
istio: ingressgateway
servers:
- port:
number: 80
name: http
protocol: HTTP
hosts:
- "*"
tls:
httpsRedirect: true
- port:
number: 443
name: https
protocol: HTTPS
hosts:
- "*"
tls:
mode: SIMPLE
privateKey: /etc/istio/ingressgateway-certs/tls.key
serverCertificate: /etc/istio/ingressgateway-certs/tls.crt
```
Save the above resource as istio-gateway.yaml and then apply it:
```text
kubectl apply -f ./istio-gateway.yaml
```
Create a service account with Cloud DNS admin role \(replace `my-gcp-project` with your project ID\):
```bash
GCP_PROJECT=my-gcp-project
gcloud iam service-accounts create dns-admin \
--display-name=dns-admin \
--project=${GCP_PROJECT}
gcloud iam service-accounts keys create ./gcp-dns-admin.json \
--iam-account=dns-admin@${GCP_PROJECT}.iam.gserviceaccount.com \
--project=${GCP_PROJECT}
gcloud projects add-iam-policy-binding ${GCP_PROJECT} \
--member=serviceAccount:dns-admin@${GCP_PROJECT}.iam.gserviceaccount.com \
--role=roles/dns.admin
```
Create a Kubernetes secret with the GCP Cloud DNS admin key:
```bash
kubectl create secret generic cert-manager-credentials \
--from-file=./gcp-dns-admin.json \
--namespace=istio-system
```
Create a letsencrypt issuer for CloudDNS \(replace `email@example.com` with a valid email address and
`my-gcp-project`with your project ID\):
```yaml
apiVersion: certmanager.k8s.io/v1alpha1
kind: Issuer
metadata:
name: letsencrypt-prod
namespace: istio-system
spec:
acme:
server: https://acme-v02.api.letsencrypt.org/directory
email: email@example.com
privateKeySecretRef:
name: letsencrypt-prod
dns01:
providers:
- name: cloud-dns
clouddns:
serviceAccountSecretRef:
name: cert-manager-credentials
key: gcp-dns-admin.json
project: my-gcp-project
```
Save the above resource as letsencrypt-issuer.yaml and then apply it:
```text
kubectl apply -f ./letsencrypt-issuer.yaml
```
Create a wildcard certificate \(replace `example.com` with your domain\):
```yaml
apiVersion: certmanager.k8s.io/v1alpha1
kind: Certificate
metadata:
name: istio-gateway
namespace: istio-system
spec:
secretname: istio-ingressgateway-certs
issuerRef:
name: letsencrypt-prod
commonName: "*.example.com"
acme:
config:
- dns01:
provider: cloud-dns
domains:
- "*.example.com"
- "example.com"
```
Save the above resource as of-cert.yaml and then apply it:
```text
kubectl apply -f ./of-cert.yaml
```
In a couple of seconds cert-manager should fetch a wildcard certificate from letsencrypt.org:
```text
kubectl -n istio-system logs deployment/certmanager -f
Certificate issued successfully
Certificate istio-system/istio-gateway scheduled for renewal in 1438 hours
```
Recreate Istio ingress gateway pods:
```bash
kubectl -n istio-system delete pods -l istio=ingressgateway
```
Note that Istio gateway doesn't reload the certificates from the TLS secret on cert-manager renewal.
Since the GKE cluster is made out of preemptible VMs the gateway pods will be replaced once every 24h,
if your not using preemptible nodes then you need to manually kill the gateway pods every two months
before the certificate expires.
### Expose services outside the service mesh
In order to expose services via the Istio Gateway you have to create a Virtual Service attached to Istio Gateway.
Create a virtual service in `istio-system` namespace for Grafana \(replace `example.com` with your domain\):
```yaml
apiVersion: networking.istio.io/v1alpha3
kind: VirtualService
metadata:
name: grafana
namespace: istio-system
spec:
hosts:
- "grafana.example.com"
gateways:
- public-gateway.istio-system.svc.cluster.local
http:
- route:
- destination:
host: grafana
timeout: 30s
```
Save the above resource as grafana-virtual-service.yaml and then apply it:
```bash
kubectl apply -f ./grafana-virtual-service.yaml
```
Navigate to `http://grafana.example.com` in your browser and you should be redirected to the HTTPS version.
Check that HTTP2 is enabled:
```bash
curl -I --http2 https://grafana.example.com
HTTP/2 200
content-type: text/html; charset=UTF-8
x-envoy-upstream-service-time: 3
server: envoy
```

View File

@@ -0,0 +1,39 @@
# Alerting
### Slack
Flagger can be configured to send Slack notifications:
```bash
helm upgrade -i flagger flagger/flagger \
--namespace=istio-system \
--set slack.url=https://hooks.slack.com/services/YOUR/SLACK/WEBHOOK \
--set slack.channel=general \
--set slack.user=flagger
```
Once configured with a Slack incoming **webhook**, Flagger will post messages when a canary deployment
has been initialised, when a new revision has been detected and if the canary analysis failed or succeeded.
![flagger-slack](https://raw.githubusercontent.com/stefanprodan/flagger/master/docs/screens/slack-canary-notifications.png)
A canary deployment will be rolled back if the progress deadline exceeded or if the analysis reached the
maximum number of failed checks:
![flagger-slack-errors](https://raw.githubusercontent.com/stefanprodan/flagger/master/docs/screens/slack-canary-failed.png)
### Prometheus Alert Manager
Besides Slack, you can use Alertmanager to trigger alerts when a canary deployment failed:
```yaml
- alert: canary_rollback
expr: flagger_canary_status > 1
for: 1m
labels:
severity: warning
annotations:
summary: "Canary failed"
description: "Workload {{ $labels.name }} namespace {{ $labels.namespace }}"
```

View File

@@ -0,0 +1,69 @@
# Monitoring
### Grafana
Flagger comes with a Grafana dashboard made for canary analysis. Install Grafana with Helm:
```bash
helm upgrade -i flagger-grafana flagger/grafana \
--namespace=istio-system \
--set url=http://prometheus:9090 \
--set user=admin \
--set password=admin
```
The dashboard shows the RED and USE metrics for the primary and canary workloads:
![canary dashboard](https://raw.githubusercontent.com/stefanprodan/flagger/master/docs/screens/grafana-canary-analysis.png)
### Logging
The canary errors and latency spikes have been recorded as Kubernetes events and logged by Flagger in json format:
```text
kubectl -n istio-system logs deployment/flagger --tail=100 | jq .msg
Starting canary deployment for podinfo.test
Advance podinfo.test canary weight 5
Advance podinfo.test canary weight 10
Advance podinfo.test canary weight 15
Advance podinfo.test canary weight 20
Advance podinfo.test canary weight 25
Advance podinfo.test canary weight 30
Advance podinfo.test canary weight 35
Halt podinfo.test advancement success rate 98.69% < 99%
Advance podinfo.test canary weight 40
Halt podinfo.test advancement request duration 1.515s > 500ms
Advance podinfo.test canary weight 45
Advance podinfo.test canary weight 50
Copying podinfo.test template spec to podinfo-primary.test
Halt podinfo-primary.test advancement waiting for rollout to finish: 1 old replicas are pending termination
Scaling down podinfo.test
Promotion completed! podinfo.test
```
### Metrics
Flagger exposes Prometheus metrics that can be used to determine the canary analysis status and
the destination weight values:
```bash
# Canaries total gauge
flagger_canary_total{namespace="test"} 1
# Canary promotion last known status gauge
# 0 - running, 1 - successful, 2 - failed
flagger_canary_status{name="podinfo" namespace="test"} 1
# Canary traffic weight gauge
flagger_canary_weight{workload="podinfo-primary" namespace="test"} 95
flagger_canary_weight{workload="podinfo" namespace="test"} 5
# Seconds spent performing canary analysis histogram
flagger_canary_duration_seconds_bucket{name="podinfo",namespace="test",le="10"} 6
flagger_canary_duration_seconds_bucket{name="podinfo",namespace="test",le="+Inf"} 6
flagger_canary_duration_seconds_sum{name="podinfo",namespace="test"} 17.3561329
flagger_canary_duration_seconds_count{name="podinfo",namespace="test"} 6
```

View File

@@ -0,0 +1,208 @@
# Canary Deployments
This guide shows you how to use Istio and Flagger to automate canary deployments.
Create a test namespace with Istio sidecar injection enabled:
```bash
export REPO=https://raw.githubusercontent.com/stefanprodan/flagger/master
kubectl apply -f ${REPO}/artifacts/namespaces/test.yaml
```
Create a deployment and a horizontal pod autoscaler:
```bash
kubectl apply -f ${REPO}/artifacts/canaries/deployment.yaml
kubectl apply -f ${REPO}/artifacts/canaries/hpa.yaml
```
Deploy the load testing service to generate traffic during the canary analysis:
```bash
kubectl -n test apply -f ${REPO}/artifacts/loadtester/deployment.yaml
kubectl -n test apply -f ${REPO}/artifacts/loadtester/service.yaml
```
Create a canary custom resource \(replace example.com with your own domain\):
```yaml
apiVersion: flagger.app/v1alpha3
kind: Canary
metadata:
name: podinfo
namespace: test
spec:
# deployment reference
targetRef:
apiVersion: apps/v1
kind: Deployment
name: podinfo
# the maximum time in seconds for the canary deployment
# to make progress before it is rollback (default 600s)
progressDeadlineSeconds: 60
# HPA reference (optional)
autoscalerRef:
apiVersion: autoscaling/v2beta1
kind: HorizontalPodAutoscaler
name: podinfo
service:
# container port
port: 9898
# Istio gateways (optional)
gateways:
- public-gateway.istio-system.svc.cluster.local
# Istio virtual service host names (optional)
hosts:
- app.example.com
canaryAnalysis:
# schedule interval (default 60s)
interval: 1m
# max number of failed metric checks before rollback
threshold: 5
# max traffic percentage routed to canary
# percentage (0-100)
maxWeight: 50
# canary increment step
# percentage (0-100)
stepWeight: 10
metrics:
- name: istio_requests_total
# minimum req success rate (non 5xx responses)
# percentage (0-100)
threshold: 99
interval: 1m
- name: istio_request_duration_seconds_bucket
# maximum req duration P99
# milliseconds
threshold: 500
interval: 30s
# generate traffic during analysis
webhooks:
- name: load-test
url: http://flagger-loadtester.test/
timeout: 5s
metadata:
cmd: "hey -z 1m -q 10 -c 2 http://podinfo.test:9898/"
```
Save the above resource as podinfo-canary.yaml and then apply it:
```bash
kubectl apply -f ./podinfo-canary.yaml
```
After a couple of seconds Flagger will create the canary objects:
```bash
# applied
deployment.apps/podinfo
horizontalpodautoscaler.autoscaling/podinfo
canary.flagger.app/podinfo
# generated
deployment.apps/podinfo-primary
horizontalpodautoscaler.autoscaling/podinfo-primary
service/podinfo
service/podinfo-canary
service/podinfo-primary
virtualservice.networking.istio.io/podinfo
```
Trigger a canary deployment by updating the container image:
```bash
kubectl -n test set image deployment/podinfo \
podinfod=quay.io/stefanprodan/podinfo:1.4.0
```
Flagger detects that the deployment revision changed and starts a new rollout:
```text
kubectl -n test describe canary/podinfo
Status:
Canary Weight: 0
Failed Checks: 0
Phase: Succeeded
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal Synced 3m flagger New revision detected podinfo.test
Normal Synced 3m flagger Scaling up podinfo.test
Warning Synced 3m flagger Waiting for podinfo.test rollout to finish: 0 of 1 updated replicas are available
Normal Synced 3m flagger Advance podinfo.test canary weight 5
Normal Synced 3m flagger Advance podinfo.test canary weight 10
Normal Synced 3m flagger Advance podinfo.test canary weight 15
Normal Synced 2m flagger Advance podinfo.test canary weight 20
Normal Synced 2m flagger Advance podinfo.test canary weight 25
Normal Synced 1m flagger Advance podinfo.test canary weight 30
Normal Synced 1m flagger Advance podinfo.test canary weight 35
Normal Synced 55s flagger Advance podinfo.test canary weight 40
Normal Synced 45s flagger Advance podinfo.test canary weight 45
Normal Synced 35s flagger Advance podinfo.test canary weight 50
Normal Synced 25s flagger Copying podinfo.test template spec to podinfo-primary.test
Warning Synced 15s flagger Waiting for podinfo-primary.test rollout to finish: 1 of 2 updated replicas are available
Normal Synced 5s flagger Promotion completed! Scaling down podinfo.test
```
You can monitor all canaries with:
```bash
watch kubectl get canaries --all-namespaces
NAMESPACE NAME STATUS WEIGHT LASTTRANSITIONTIME
test podinfo Progressing 15 2019-01-16T14:05:07Z
prod frontend Succeeded 0 2019-01-15T16:15:07Z
prod backend Failed 0 2019-01-14T17:05:07Z
```
During the canary analysis you can generate HTTP 500 errors and high latency to test if Flagger pauses the rollout.
Create a tester pod and exec into it:
```bash
kubectl -n test run tester \
--image=quay.io/stefanprodan/podinfo:1.2.1 \
-- ./podinfo --port=9898
kubectl -n test exec -it tester-xx-xx sh
```
Generate HTTP 500 errors:
```bash
watch curl http://podinfo-canary:9898/status/500
```
Generate latency:
```bash
watch curl http://podinfo-canary:9898/delay/1
```
When the number of failed checks reaches the canary analysis threshold, the traffic is routed back to the primary, the canary is scaled to zero and the rollout is marked as failed.
```text
kubectl -n test describe canary/podinfo
Status:
Canary Weight: 0
Failed Checks: 10
Phase: Failed
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal Synced 3m flagger Starting canary deployment for podinfo.test
Normal Synced 3m flagger Advance podinfo.test canary weight 5
Normal Synced 3m flagger Advance podinfo.test canary weight 10
Normal Synced 3m flagger Advance podinfo.test canary weight 15
Normal Synced 3m flagger Halt podinfo.test advancement success rate 69.17% < 99%
Normal Synced 2m flagger Halt podinfo.test advancement success rate 61.39% < 99%
Normal Synced 2m flagger Halt podinfo.test advancement success rate 55.06% < 99%
Normal Synced 2m flagger Halt podinfo.test advancement success rate 47.00% < 99%
Normal Synced 2m flagger (combined from similar events): Halt podinfo.test advancement success rate 38.08% < 99%
Warning Synced 1m flagger Rolling back podinfo.test failed checks threshold reached 10
Warning Synced 1m flagger Canary failed! Scaling down podinfo.test
```

Binary file not shown.

View File

@@ -1,61 +0,0 @@
apiVersion: v1
entries:
flagger:
- apiVersion: v1
appVersion: 0.0.1
created: 2018-10-07T13:00:10.107115+03:00
description: Flagger is a Kubernetes operator that automates the promotion of
canary deployments using Istio routing for traffic shifting and Prometheus metrics
for canary analysis.
digest: ddf0eda9975979bdfc26f10c7b3c15f4a31ebe0484be3285fbaec99b893871be
home: https://github.com/stefanprodan/flagger
name: flagger
urls:
- https://stefanprodan.github.io/flagger/flagger-0.0.1.tgz
version: 0.0.1
grafana:
- apiVersion: v1
appVersion: 5.2.0
created: 2018-10-07T13:00:10.10763+03:00
description: A Helm chart for monitoring progressive deployments powered by Istio
and Flagger
digest: 1e19e9010d2939e19d0f8465287676b185f4772f5df5a75092a8fce0b4a50e5e
home: https://github.com/stefanprodan/flagger
name: grafana
urls:
- https://stefanprodan.github.io/flagger/grafana-5.2.4.tgz
version: 5.2.4
podinfo-flagger:
- apiVersion: v1
appVersion: 1.2.1
created: 2018-10-07T13:00:10.108001+03:00
description: Podinfo Helm chart for Flagger progressive delivery
digest: 35ce41d10df123a785ba359491668a143c5918ce6d3d24f5b545ca2cb9134d91
engine: gotpl
home: https://github.com/stefanprodan/k8s-podinfo
maintainers:
- email: stefanprodan@users.noreply.github.com
name: stefanprodan
name: podinfo-flagger
sources:
- https://github.com/stefanprodan/flagger
urls:
- https://stefanprodan.github.io/flagger/podinfo-flagger-2.0.0.tgz
version: 2.0.0
- apiVersion: v1
appVersion: 1.2.1
created: 2018-10-04T14:48:57.586403+03:00
description: Podinfo Helm chart for Flagger progressive delivery
digest: f559f387aa45005be085af207b8b4c91776e489fcb6ca7e60f20913ecd21184e
engine: gotpl
home: https://github.com/stefanprodan/k8s-podinfo
maintainers:
- email: stefanprodan@users.noreply.github.com
name: stefanprodan
name: podinfo-flagger
sources:
- https://github.com/stefanprodan/flagger
urls:
- https://stefanprodan.github.io/flagger/podinfo-flagger-1.2.1.tgz
version: 1.2.1
generated: 2018-10-07T13:00:10.106238+03:00

BIN
docs/logo/flagger-horiz.jpg Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 19 KiB

BIN
docs/logo/flagger-horiz.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 18 KiB

BIN
docs/logo/flagger-icon.jpg Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 30 KiB

BIN
docs/logo/flagger-icon.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 28 KiB

BIN
docs/logo/flagger-vert.jpg Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 31 KiB

BIN
docs/logo/flagger-vert.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 28 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 34 KiB

View File

@@ -1,20 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<svg width="512px" height="534px" viewBox="0 0 512 534" version="1.1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink">
<!-- Generator: Sketch 51.2 (57519) - http://www.bohemiancoding.com/sketch -->
<title>istio-refresh</title>
<desc>Created with Sketch.</desc>
<defs></defs>
<g id="Page-1" stroke="none" stroke-width="1" fill="none" fill-rule="evenodd">
<g id="Logo" transform="translate(-82.000000, -67.000000)" fill-rule="nonzero">
<g id="istio-refresh">
<polygon id="hull" fill="#466BB0" points="467.486761 456 322.182535 499.591268 235 456"></polygon>
<polygon id="mainsail" fill="#466BB0" points="322.182535 426.834648 322.182535 267 235 441.365071"></polygon>
<polygon id="headsail" fill="#466BB0" points="466.773803 441.608451 336 151 336 427.078028"></polygon>
<g id="arrows" transform="translate(338.000000, 338.000000) rotate(-24.000000) translate(-338.000000, -338.000000) translate(82.000000, 82.000000)">
<path d="M256.000042,0 C114.772094,0 0.00128552135,114.771366 0.00128552135,256 C-0.0639978677,260.61614 2.36127785,264.909841 6.34842716,267.236963 C10.3355765,269.564084 15.2668702,269.564084 19.2540195,267.236963 C23.2411688,264.909841 25.6664445,260.61614 25.6011612,256 C25.6011612,128.607834 128.608494,25.6 256.000042,25.6 C335.938457,25.6 406.118967,66.3034496 447.449109,128 L413.301505,128 C408.685388,127.934716 404.391707,130.360004 402.064597,134.347172 C399.737487,138.334341 399.737487,143.265659 402.064597,147.252828 C404.391707,151.239996 408.685388,153.665284 413.301505,153.6 L470.549003,153.6 L486.398923,153.6 L486.398923,70.7176471 C486.446301,67.257803 485.091067,63.9261384 482.642017,61.4817859 C480.192966,59.0374333 476.858715,57.6886086 473.398986,57.7426467 C466.339613,57.8529965 460.702373,63.6580397 460.799047,70.7176471 L460.799047,102.525005 C414.068394,40.3001766 339.69529,0 256.000042,0 Z" id="Shape" fill="#35A2EE"></path>
<path d="M498.998861,243.024998 C491.939488,243.135348 486.302248,248.940392 486.398923,256 C486.398923,383.392166 383.391589,486.4 256.000042,486.4 C176.054933,486.4 105.870698,445.69303 64.5509751,384 L92.9338727,384.024998 C97.54999,384.090282 101.843671,381.664995 104.170781,377.677826 C106.497891,373.690657 106.497891,368.75934 104.170781,364.772171 C101.843671,360.785002 97.54999,358.359715 92.9338727,358.424998 L44.1010713,358.4 C42.4667939,358.077951 40.7853733,358.077951 39.151096,358.4 L25.6011612,358.4 L25.6011612,432.243137 C25.5358778,436.859277 27.9611535,441.152979 31.9483028,443.4801 C35.9354521,445.807221 40.8667458,445.807221 44.8538951,443.4801 C48.8410445,441.152979 51.2663202,436.859277 51.2010368,432.243137 L51.2010368,409.424998 C97.9168162,471.668595 172.282881,512 256.000042,512 C397.227989,512 511.998798,397.228634 511.998798,256 C512.046177,252.540156 510.690943,249.208491 508.241893,246.764138 C505.792842,244.319785 502.458591,242.97096 498.998861,243.024998 Z" id="Path" fill="#1DCBA2"></path>
</g>
</g>
</g>
</g>
</svg>

Before

Width:  |  Height:  |  Size: 3.1 KiB

Binary file not shown.

Binary file not shown.

Before

Width:  |  Height:  |  Size: 620 KiB

After

Width:  |  Height:  |  Size: 442 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 78 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 113 KiB

View File

@@ -23,6 +23,6 @@ CODEGEN_PKG=${CODEGEN_PKG:-$(cd ${SCRIPT_ROOT}; ls -d -1 ./vendor/k8s.io/code-ge
${CODEGEN_PKG}/generate-groups.sh "deepcopy,client,informer,lister" \
github.com/stefanprodan/flagger/pkg/client github.com/stefanprodan/flagger/pkg/apis \
flagger:v1beta1 \
flagger:v1alpha3 \
--go-header-file ${SCRIPT_ROOT}/hack/boilerplate.go.txt

View File

@@ -16,6 +16,6 @@ limitations under the License.
// +k8s:deepcopy-gen=package
// Package v1beta1 is the v1beta1 version of the API.
// Package v1alpha3 is the v1alpha3 version of the API.
// +groupName=flagger.app
package v1beta1
package v1alpha3

View File

@@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
package v1beta1
package v1alpha3
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@@ -25,7 +25,7 @@ import (
)
// SchemeGroupVersion is group version used to register these objects
var SchemeGroupVersion = schema.GroupVersion{Group: rollout.GroupName, Version: "v1beta1"}
var SchemeGroupVersion = schema.GroupVersion{Group: rollout.GroupName, Version: "v1alpha3"}
// Kind takes an unqualified kind and returns back a Group qualified GroupKind
func Kind(kind string) schema.GroupKind {

View File

@@ -0,0 +1,163 @@
/*
Copyright 2018 The Flagger Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha3
import (
hpav1 "k8s.io/api/autoscaling/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"time"
)
const (
CanaryKind = "Canary"
ProgressDeadlineSeconds = 600
AnalysisInterval = 60 * time.Second
)
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// Canary is a specification for a Canary resource
type Canary struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec CanarySpec `json:"spec"`
Status CanaryStatus `json:"status"`
}
// CanarySpec is the spec for a Canary resource
type CanarySpec struct {
// reference to target resource
TargetRef hpav1.CrossVersionObjectReference `json:"targetRef"`
// reference to autoscaling resource
// +optional
AutoscalerRef *hpav1.CrossVersionObjectReference `json:"autoscalerRef,omitempty"`
// virtual service spec
Service CanaryService `json:"service"`
// metrics and thresholds
CanaryAnalysis CanaryAnalysis `json:"canaryAnalysis"`
// the maximum time in seconds for a canary deployment to make progress
// before it is considered to be failed. Defaults to ten minutes.
ProgressDeadlineSeconds *int32 `json:"progressDeadlineSeconds,omitempty"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// CanaryList is a list of Canary resources
type CanaryList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata"`
Items []Canary `json:"items"`
}
// CanaryPhase is a label for the condition of a canary at the current time
type CanaryPhase string
const (
// CanaryInitialized means the primary deployment, hpa and ClusterIP services
// have been created along with the Istio virtual service
CanaryInitialized CanaryPhase = "Initialized"
// CanaryProgressing means the canary analysis is underway
CanaryProgressing CanaryPhase = "Progressing"
// CanarySucceeded means the canary analysis has been successful
// and the canary deployment has been promoted
CanarySucceeded CanaryPhase = "Succeeded"
// CanaryFailed means the canary analysis failed
// and the canary deployment has been scaled to zero
CanaryFailed CanaryPhase = "Failed"
)
// CanaryStatus is used for state persistence (read-only)
type CanaryStatus struct {
Phase CanaryPhase `json:"phase"`
FailedChecks int `json:"failedChecks"`
CanaryWeight int `json:"canaryWeight"`
// +optional
LastAppliedSpec string `json:"lastAppliedSpec,omitempty"`
// +optional
LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty"`
}
// CanaryService is used to create ClusterIP services
// and Istio Virtual Service
type CanaryService struct {
Port int32 `json:"port"`
Gateways []string `json:"gateways"`
Hosts []string `json:"hosts"`
}
// CanaryAnalysis is used to describe how the analysis should be done
type CanaryAnalysis struct {
Interval string `json:"interval"`
Threshold int `json:"threshold"`
MaxWeight int `json:"maxWeight"`
StepWeight int `json:"stepWeight"`
Metrics []CanaryMetric `json:"metrics"`
Webhooks []CanaryWebhook `json:"webhooks,omitempty"`
}
// CanaryMetric holds the reference to Istio metrics used for canary analysis
type CanaryMetric struct {
Name string `json:"name"`
Interval string `json:"interval"`
Threshold int `json:"threshold"`
}
// CanaryWebhook holds the reference to external checks used for canary analysis
type CanaryWebhook struct {
Name string `json:"name"`
URL string `json:"url"`
Timeout string `json:"timeout"`
// +optional
Metadata *map[string]string `json:"metadata,omitempty"`
}
// CanaryWebhookPayload holds the deployment info and metadata sent to webhooks
type CanaryWebhookPayload struct {
Name string `json:"name"`
Namespace string `json:"namespace"`
Metadata map[string]string `json:"metadata,omitempty"`
}
// GetProgressDeadlineSeconds returns the progress deadline (default 600s)
func (c *Canary) GetProgressDeadlineSeconds() int {
if c.Spec.ProgressDeadlineSeconds != nil {
return int(*c.Spec.ProgressDeadlineSeconds)
}
return ProgressDeadlineSeconds
}
// GetAnalysisInterval returns the canary analysis interval (default 60s)
func (c *Canary) GetAnalysisInterval() time.Duration {
if c.Spec.CanaryAnalysis.Interval == "" {
return AnalysisInterval
}
interval, err := time.ParseDuration(c.Spec.CanaryAnalysis.Interval)
if err != nil {
return AnalysisInterval
}
return interval
}

View File

@@ -18,9 +18,10 @@ limitations under the License.
// Code generated by deepcopy-gen. DO NOT EDIT.
package v1beta1
package v1alpha3
import (
v1 "k8s.io/api/autoscaling/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
)
@@ -30,7 +31,7 @@ func (in *Canary) DeepCopyInto(out *Canary) {
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
out.Status = in.Status
in.Status.DeepCopyInto(&out.Status)
return
}
@@ -57,9 +58,16 @@ func (in *CanaryAnalysis) DeepCopyInto(out *CanaryAnalysis) {
*out = *in
if in.Metrics != nil {
in, out := &in.Metrics, &out.Metrics
*out = make([]Metric, len(*in))
*out = make([]CanaryMetric, len(*in))
copy(*out, *in)
}
if in.Webhooks != nil {
in, out := &in.Webhooks, &out.Webhooks
*out = make([]CanaryWebhook, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
@@ -106,13 +114,64 @@ func (in *CanaryList) DeepCopyObject() runtime.Object {
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CanaryMetric) DeepCopyInto(out *CanaryMetric) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CanaryMetric.
func (in *CanaryMetric) DeepCopy() *CanaryMetric {
if in == nil {
return nil
}
out := new(CanaryMetric)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CanaryService) DeepCopyInto(out *CanaryService) {
*out = *in
if in.Gateways != nil {
in, out := &in.Gateways, &out.Gateways
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.Hosts != nil {
in, out := &in.Hosts, &out.Hosts
*out = make([]string, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CanaryService.
func (in *CanaryService) DeepCopy() *CanaryService {
if in == nil {
return nil
}
out := new(CanaryService)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CanarySpec) DeepCopyInto(out *CanarySpec) {
*out = *in
out.Primary = in.Primary
out.Canary = in.Canary
out.TargetRef = in.TargetRef
if in.AutoscalerRef != nil {
in, out := &in.AutoscalerRef, &out.AutoscalerRef
*out = new(v1.CrossVersionObjectReference)
**out = **in
}
in.Service.DeepCopyInto(&out.Service)
in.CanaryAnalysis.DeepCopyInto(&out.CanaryAnalysis)
out.VirtualService = in.VirtualService
if in.ProgressDeadlineSeconds != nil {
in, out := &in.ProgressDeadlineSeconds, &out.ProgressDeadlineSeconds
*out = new(int32)
**out = **in
}
return
}
@@ -129,6 +188,7 @@ func (in *CanarySpec) DeepCopy() *CanarySpec {
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CanaryStatus) DeepCopyInto(out *CanaryStatus) {
*out = *in
in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime)
return
}
@@ -143,49 +203,51 @@ func (in *CanaryStatus) DeepCopy() *CanaryStatus {
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Metric) DeepCopyInto(out *Metric) {
func (in *CanaryWebhook) DeepCopyInto(out *CanaryWebhook) {
*out = *in
if in.Metadata != nil {
in, out := &in.Metadata, &out.Metadata
*out = new(map[string]string)
if **in != nil {
in, out := *in, *out
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Metric.
func (in *Metric) DeepCopy() *Metric {
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CanaryWebhook.
func (in *CanaryWebhook) DeepCopy() *CanaryWebhook {
if in == nil {
return nil
}
out := new(Metric)
out := new(CanaryWebhook)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Target) DeepCopyInto(out *Target) {
func (in *CanaryWebhookPayload) DeepCopyInto(out *CanaryWebhookPayload) {
*out = *in
if in.Metadata != nil {
in, out := &in.Metadata, &out.Metadata
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Target.
func (in *Target) DeepCopy() *Target {
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CanaryWebhookPayload.
func (in *CanaryWebhookPayload) DeepCopy() *CanaryWebhookPayload {
if in == nil {
return nil
}
out := new(Target)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *VirtualService) DeepCopyInto(out *VirtualService) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualService.
func (in *VirtualService) DeepCopy() *VirtualService {
if in == nil {
return nil
}
out := new(VirtualService)
out := new(CanaryWebhookPayload)
in.DeepCopyInto(out)
return out
}

View File

@@ -1,81 +0,0 @@
/*
Copyright 2018 The Flagger Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1beta1
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// Canary is a specification for a Canary resource
type Canary struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec CanarySpec `json:"spec"`
Status CanaryStatus `json:"status"`
}
// CanarySpec is the spec for a Canary resource
type CanarySpec struct {
TargetKind string `json:"targetKind"`
Primary Target `json:"primary"`
Canary Target `json:"canary"`
CanaryAnalysis CanaryAnalysis `json:"canaryAnalysis"`
VirtualService VirtualService `json:"virtualService"`
}
type Target struct {
Name string `json:"name"`
Host string `json:"host"`
}
type VirtualService struct {
Name string `json:"name"`
}
type CanaryAnalysis struct {
Threshold int `json:"threshold"`
MaxWeight int `json:"maxWeight"`
StepWeight int `json:"stepWeight"`
Metrics []Metric `json:"metrics"`
}
type Metric struct {
Name string `json:"name"`
Interval string `json:"interval"`
Threshold int `json:"threshold"`
}
// CanaryStatus is the status for a Canary resource
type CanaryStatus struct {
State string `json:"state"`
CanaryRevision string `json:"canaryRevision"`
FailedChecks int `json:"failedChecks"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// CanaryList is a list of Canary resources
type CanaryList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata"`
Items []Canary `json:"items"`
}

View File

@@ -19,7 +19,7 @@ limitations under the License.
package versioned
import (
flaggerv1beta1 "github.com/stefanprodan/flagger/pkg/client/clientset/versioned/typed/flagger/v1beta1"
flaggerv1alpha3 "github.com/stefanprodan/flagger/pkg/client/clientset/versioned/typed/flagger/v1alpha3"
discovery "k8s.io/client-go/discovery"
rest "k8s.io/client-go/rest"
flowcontrol "k8s.io/client-go/util/flowcontrol"
@@ -27,27 +27,27 @@ import (
type Interface interface {
Discovery() discovery.DiscoveryInterface
FlaggerV1beta1() flaggerv1beta1.FlaggerV1beta1Interface
FlaggerV1alpha3() flaggerv1alpha3.FlaggerV1alpha3Interface
// Deprecated: please explicitly pick a version if possible.
Flagger() flaggerv1beta1.FlaggerV1beta1Interface
Flagger() flaggerv1alpha3.FlaggerV1alpha3Interface
}
// Clientset contains the clients for groups. Each group has exactly one
// version included in a Clientset.
type Clientset struct {
*discovery.DiscoveryClient
flaggerV1beta1 *flaggerv1beta1.FlaggerV1beta1Client
flaggerV1alpha3 *flaggerv1alpha3.FlaggerV1alpha3Client
}
// FlaggerV1beta1 retrieves the FlaggerV1beta1Client
func (c *Clientset) FlaggerV1beta1() flaggerv1beta1.FlaggerV1beta1Interface {
return c.flaggerV1beta1
// FlaggerV1alpha3 retrieves the FlaggerV1alpha3Client
func (c *Clientset) FlaggerV1alpha3() flaggerv1alpha3.FlaggerV1alpha3Interface {
return c.flaggerV1alpha3
}
// Deprecated: Flagger retrieves the default version of FlaggerClient.
// Please explicitly pick a version.
func (c *Clientset) Flagger() flaggerv1beta1.FlaggerV1beta1Interface {
return c.flaggerV1beta1
func (c *Clientset) Flagger() flaggerv1alpha3.FlaggerV1alpha3Interface {
return c.flaggerV1alpha3
}
// Discovery retrieves the DiscoveryClient
@@ -66,7 +66,7 @@ func NewForConfig(c *rest.Config) (*Clientset, error) {
}
var cs Clientset
var err error
cs.flaggerV1beta1, err = flaggerv1beta1.NewForConfig(&configShallowCopy)
cs.flaggerV1alpha3, err = flaggerv1alpha3.NewForConfig(&configShallowCopy)
if err != nil {
return nil, err
}
@@ -82,7 +82,7 @@ func NewForConfig(c *rest.Config) (*Clientset, error) {
// panics if there is an error in the config.
func NewForConfigOrDie(c *rest.Config) *Clientset {
var cs Clientset
cs.flaggerV1beta1 = flaggerv1beta1.NewForConfigOrDie(c)
cs.flaggerV1alpha3 = flaggerv1alpha3.NewForConfigOrDie(c)
cs.DiscoveryClient = discovery.NewDiscoveryClientForConfigOrDie(c)
return &cs
@@ -91,7 +91,7 @@ func NewForConfigOrDie(c *rest.Config) *Clientset {
// New creates a new Clientset for the given RESTClient.
func New(c rest.Interface) *Clientset {
var cs Clientset
cs.flaggerV1beta1 = flaggerv1beta1.New(c)
cs.flaggerV1alpha3 = flaggerv1alpha3.New(c)
cs.DiscoveryClient = discovery.NewDiscoveryClient(c)
return &cs

Some files were not shown because too many files have changed in this diff Show More