Compare commits

...

183 Commits

Author SHA1 Message Date
Stefan Prodan
fbdf38e990 Merge pull request #124 from weaveworks/release-v0.10.0
Release v0.10.0 (AWS App Mesh edition)
2019-03-27 14:20:50 +02:00
stefanprodan
ef5bf70386 Update go to 1.12 2019-03-27 13:02:30 +02:00
stefanprodan
274c1469b4 Update changelog v0.10.0 2019-03-27 09:46:06 +02:00
stefanprodan
960d506360 Upgrade mesh definition to v1beta1 2019-03-27 09:40:11 +02:00
stefanprodan
79a6421178 Move load tester to Weaveworks Quay 2019-03-27 09:35:34 +02:00
Stefan Prodan
8b5c004860 Merge pull request #123 from weaveworks/appmesh-v1beta1
Update App Mesh to v1beta1
2019-03-26 21:53:34 +02:00
stefanprodan
f54768772e Fix App Mesh success rate graph 2019-03-26 21:30:18 +02:00
stefanprodan
b9075dc6f9 Update App Mesh to v1beta1 2019-03-26 20:29:40 +02:00
Stefan Prodan
107596ad54 Merge pull request #122 from weaveworks/prep-0.10.0
Reconcile ClusterIP services and prep v0.10.0
2019-03-26 17:25:57 +02:00
stefanprodan
3c6a2b1508 Update changelog for v0.10.0 2019-03-26 17:12:46 +02:00
stefanprodan
f996cba354 Set Grafana dashboards readable IDs 2019-03-26 17:12:46 +02:00
stefanprodan
bdd864fbdd Add port and mesh name to CRD validation 2019-03-26 17:12:46 +02:00
stefanprodan
ca074ef13f Rename router sync to reconcile 2019-03-26 17:12:46 +02:00
stefanprodan
ddd3a8251e Reconcile ClusterIP services
- add svc update tests fix: #114
2019-03-26 17:12:46 +02:00
stefanprodan
f5b862dc1b Add App Mesh docs to readme 2019-03-26 17:12:45 +02:00
stefanprodan
d45d475f61 Add Grafana dashboard link and update screen 2019-03-26 17:12:45 +02:00
stefanprodan
d0f72ea3fa Bump Flagger version to 0.10.0 2019-03-26 17:12:45 +02:00
stefanprodan
5ed5d1e5b6 Fix load tester install command for zsh 2019-03-26 11:57:01 +02:00
stefanprodan
311b14026e Release loadtester chart v0.2.0 2019-03-26 11:33:07 +02:00
stefanprodan
67cd722b54 Release Grafana chart v1.1.0 2019-03-26 11:16:42 +02:00
stefanprodan
7f6247eb7b Add jq requirement to App Mesh installer 2019-03-26 10:14:44 +02:00
Stefan Prodan
f3fd515521 Merge pull request #121 from weaveworks/stats
Fix canary status prom metric
2019-03-26 09:58:54 +02:00
stefanprodan
9db5dd0d7f Update changelog 2019-03-26 09:58:40 +02:00
stefanprodan
d07925d79d Fix canary status prom metrics 2019-03-25 17:26:22 +02:00
Stefan Prodan
662d0f31ff Merge pull request #119 from weaveworks/appmesh-ref
App Mesh docs
2019-03-25 15:13:35 +02:00
stefanprodan
2c5ad0bf8f Disable App Mesh ingress for load tester 2019-03-25 15:00:57 +02:00
stefanprodan
0ea76b986a Prevent the CRD from being removed by Helm 2019-03-25 15:00:23 +02:00
stefanprodan
3c4253c336 Docs fixes 2019-03-25 14:59:55 +02:00
stefanprodan
77ba28e91c Use App Mesh install script 2019-03-25 09:41:56 +02:00
stefanprodan
6399e7586c Update overview diagram 2019-03-24 15:10:13 +02:00
Stefan Prodan
1caa62adc8 Merge pull request #118 from weaveworks/appmesh
App Mesh refactoring + docs
2019-03-24 13:53:40 +02:00
stefanprodan
8fa558f124 Add intro do App Mesh tutorial 2019-03-24 13:38:00 +02:00
stefanprodan
191228633b Add App Mesh backend example 2019-03-24 13:11:03 +02:00
stefanprodan
8a981f935a Add App Mesh GitOps diagram 2019-03-24 12:26:35 +02:00
stefanprodan
a8ea9adbcc Add the Slack notifications to App Mesh tutorial 2019-03-23 17:27:53 +02:00
stefanprodan
685d94c44b Add Grafana screen to App Mesh docs 2019-03-23 16:26:24 +02:00
stefanprodan
153ed1b044 Add the App Mesh ingress to docs 2019-03-23 16:09:30 +02:00
stefanprodan
7788f3a1ba Add virtual node for App Mesh ingress 2019-03-23 15:56:52 +02:00
stefanprodan
cd99225f9b Add App Mesh canary deployments tutorial 2019-03-23 15:41:04 +02:00
stefanprodan
33ba3b8d4a Add App Mesh canary demo definitions 2019-03-23 15:40:27 +02:00
stefanprodan
d222dd1069 Change GitHub raw URLs to Weaveworks org 2019-03-23 13:38:57 +02:00
stefanprodan
39fd3d46ba Add App Mesh ingress gateway deployment 2019-03-23 13:22:33 +02:00
stefanprodan
419c1804b6 Add App Mesh telemetry deployment 2019-03-23 13:21:52 +02:00
stefanprodan
ae0351ddad Exclude the namespace from AppMesh object names
ref: https://github.com/aws/aws-app-mesh-controller-for-k8s/issues/14
2019-03-23 11:25:39 +02:00
stefanprodan
941be15762 Fix typo in comments 2019-03-23 11:25:31 +02:00
stefanprodan
578ebcf6ed Use pod name filter in Envoy metrics query
Add support for Weave Cloud Prometheus agent
2019-03-23 11:20:51 +02:00
Stefan Prodan
27ab4b08f9 Merge pull request #112 from weaveworks/appmesh-grafana
Add AppMesh Grafana dashboard
2019-03-21 16:19:05 +02:00
stefanprodan
428b2208ba Add Flagger logo svg format (CNCF landscape) 2019-03-21 16:05:51 +02:00
Stefan Prodan
438c553d60 Merge pull request #113 from tanordheim/service-port-name
Allow setting name of ports in generated services
2019-03-21 15:38:21 +02:00
Trond Nordheim
90cb293182 Add GRPC canary analysis custom query example 2019-03-21 14:27:11 +01:00
Trond Nordheim
1f9f93ebe4 Add portName information to how it works-guide 2019-03-21 12:41:44 +01:00
Trond Nordheim
f5b97fbb74 Add support for naming generated service ports 2019-03-21 12:37:02 +01:00
stefanprodan
ce79244126 Add maintainers 2019-03-21 12:34:28 +02:00
stefanprodan
3af5d767d8 Add AppMesh dashboard screen 2019-03-21 12:19:04 +02:00
stefanprodan
3ce3efd2f2 Fix Grafana port forward command 2019-03-21 12:11:11 +02:00
stefanprodan
8108edea31 Add AppMesh Grafana dashboard 2019-03-21 12:06:50 +02:00
Stefan Prodan
5d80087ab3 Merge pull request #109 from weaveworks/appmesh-docs
Add EKS App Mesh install docs
2019-03-21 11:02:35 +02:00
stefanprodan
6593be584d Remove namespace from Prometheus URL 2019-03-21 10:52:21 +02:00
stefanprodan
a0f63f858f Update changelog and roadmap 2019-03-21 09:57:49 +02:00
stefanprodan
49914f3bd5 Add EKS App Mesh install link to readme 2019-03-20 23:46:14 +02:00
stefanprodan
71988a8b98 Add EKS App Mesh install guide 2019-03-20 23:40:09 +02:00
stefanprodan
d65be6ef58 Add AppMesh virtual node to load tester chart 2019-03-20 23:39:34 +02:00
Stefan Prodan
38c40d02e7 Merge pull request #108 from weaveworks/ww
Move to weaveworks org
2019-03-20 19:10:56 +02:00
stefanprodan
9e071b9d60 Move to weaveworks Quay 2019-03-20 18:52:27 +02:00
stefanprodan
b4ae060122 Move to weaveworks org 2019-03-20 18:26:04 +02:00
Stefan Prodan
436656e81b Merge pull request #107 from stefanprodan/appmesh
AWS App Mesh integration
2019-03-20 17:57:25 +02:00
stefanprodan
d7e111b7d4 Add mesh provider option to Helm chart 2019-03-20 12:59:35 +02:00
stefanprodan
4b6126dd1a Add Envoy HTTP success rate metric check 2019-03-19 15:52:26 +02:00
stefanprodan
89faa70196 Fix canary virtual node DNS discovery 2019-03-19 15:51:17 +02:00
stefanprodan
6ed9d4a1db Add AppMesh CRDs to Flagger's RBAC 2019-03-18 10:13:46 +02:00
stefanprodan
9d0e38c2e1 Disable primary readiness check in tests 2019-03-17 12:50:48 +02:00
stefanprodan
8b758fd616 Set primary min replicas 2019-03-17 12:30:11 +02:00
stefanprodan
14369d8be3 Fix virtual node backends 2019-03-17 12:29:04 +02:00
stefanprodan
7b4153113e Fix router tests 2019-03-17 11:14:47 +02:00
stefanprodan
7d340c5e61 Change mesh providers based on cmd flag 2019-03-17 10:52:52 +02:00
stefanprodan
337c94376d Add AppMesh routing tests 2019-03-17 10:29:40 +02:00
stefanprodan
0ef1d0b2f1 Implement AppMesh routing ops 2019-03-17 10:29:21 +02:00
stefanprodan
5cf67bd4e0 Add AppMesh router sync tests 2019-03-16 14:14:24 +02:00
stefanprodan
f22be17852 Add AppMesh router sync implementation
Sync virtual nodes and virtual services
2019-03-16 14:13:58 +02:00
stefanprodan
48e79d5dd4 Add mesh provider flag 2019-03-16 14:12:23 +02:00
stefanprodan
59f5a0654a Add AppMesh fields to Canary CRD 2019-03-16 14:11:24 +02:00
stefanprodan
6da2e11683 Add AppMesh CRDs and Kubernetes client 2019-03-16 14:10:09 +02:00
stefanprodan
802c087a4b Fix Istio Gateway certificate
fix: #102
2019-03-14 17:52:58 +02:00
Stefan Prodan
ed2048e9f3 Merge pull request #105 from stefanprodan/svc
Copy pod labels from canary to primary
2019-03-14 01:59:20 +02:00
stefanprodan
437b1d30c0 Copy labels from canary to primary 2019-03-14 01:49:07 +02:00
stefanprodan
ba1788cbc5 Change default ClusterIP to point to primary
- ensures that the routing works without a service mesh
2019-03-14 01:48:10 +02:00
Stefan Prodan
773094a20d Merge pull request #99 from stefanprodan/loadtester-v0.2.0
Release loadtester v0.2.0
2019-03-12 12:07:47 +02:00
stefanprodan
5aa39106a0 Update loadtester cmd for e2e testing 2019-03-12 11:58:16 +02:00
stefanprodan
a9167801ba Release loadtester v0.2.0 2019-03-12 11:55:31 +02:00
Stefan Prodan
62f4a6cb96 Merge pull request #90 from cloudang/ngrinder
Support delegation to external load testing tools
2019-03-12 11:47:30 +02:00
Stefan Prodan
ea2b41e96e Merge pull request #98 from peterj/master
Upgrade Alpine to 3.9
2019-03-12 10:18:16 +02:00
Alex Wong
d28ce650e9 fix typo 2019-03-12 15:40:20 +08:00
Alex Wong
1bfcdba499 update vendor 2019-03-12 15:00:55 +08:00
Alex Wong
e48faa9144 add docs for ngrinder load testing 2019-03-12 14:59:55 +08:00
Alex Wong
33fbe99561 remove logCmdOutput from docs and k8s resources definition 2019-03-12 14:35:39 +08:00
Alex Wong
989925b484 update canary spec example, cmd flag logCmdOutput moved here 2019-03-12 14:33:23 +08:00
Alex Wong
7dd66559e7 add metadata field 'cmd' 2019-03-12 14:31:30 +08:00
Alex Wong
2ef1c5608e remove logCmdOutput flag 2019-03-12 14:31:00 +08:00
Alex Wong
b5932e8905 support time duration literal 2019-03-12 14:29:50 +08:00
Peter Jausovec
37999d3250 Upgrade Alpine to 3.9. Fixes #89 2019-03-11 20:17:15 -07:00
Stefan Prodan
83985ae482 Merge pull request #93 from stefanprodan/release-v0.9.0
Release v0.9.0
2019-03-11 15:26:00 +02:00
Stefan Prodan
3adfcc837e Merge pull request #94 from stefanprodan/fix-abtest-routing
Fix A/B Testing HTTP URI match conditions
2019-03-11 15:15:42 +02:00
stefanprodan
c720fee3ab Target the canary header in the load test 2019-03-11 15:04:01 +02:00
stefanprodan
881387e522 Fix HTTP URI match conditions 2019-03-11 14:54:17 +02:00
stefanprodan
d9f3378e29 Add change log for v0.9.0 2019-03-11 14:03:55 +02:00
stefanprodan
ba87620225 Release v0.9.0 2019-03-11 13:57:10 +02:00
Stefan Prodan
1cd0c49872 Merge pull request #88 from stefanprodan/ab-testing
A/B testing - canary with session affinity
2019-03-11 13:55:06 +02:00
stefanprodan
12ac96deeb Document how to enable A/B testing 2019-03-11 12:58:33 +02:00
Alex Wong
17e6f35785 add gock.v1 dependency 2019-03-11 10:07:50 +08:00
Stefan Prodan
bd115633a3 Merge pull request #91 from huydinhle/update-analysis-interval
Sync job when canary's interval changes
fix #86
2019-03-09 19:58:34 +02:00
stefanprodan
86ea172380 Fix weight metric report 2019-03-08 23:28:45 +02:00
stefanprodan
d87bbbbc1e Add A/B testing tutorial 2019-03-08 21:26:52 +02:00
Huy Le
6196f69f4d Create New Job when Canary's Interval changes
- Currently whenever the Canary analysis interval changes, flagger does
not reflect this into canary's job.
- This change will make sure the canary analysis interval got updated whenever
the Canary object's interval changes
2019-03-08 10:27:34 -08:00
Alex Wong
be31bcf22f mocked test 2019-03-08 22:20:29 +08:00
Alex Wong
cba2135c69 add comments 2019-03-08 22:20:16 +08:00
Alex Wong
2e52573499 add gock dep 2019-03-08 22:20:02 +08:00
Alex Wong
b2ce1ed1fb test for ngrinder task 2019-03-08 21:30:26 +08:00
Alex Wong
77a485af74 poll ngrinder task status 2019-03-08 21:29:58 +08:00
stefanprodan
d8b847a973 Mention session affinity in docs 2019-03-08 15:05:44 +02:00
stefanprodan
e80a3d3232 Add A/B testing scheduling unit tests 2019-03-08 13:06:39 +02:00
stefanprodan
780ba82385 Log namespace restriction if one exists 2019-03-08 13:05:25 +02:00
stefanprodan
6ba69dce0a Add iterations field to CRD validation 2019-03-08 12:31:35 +02:00
stefanprodan
3c7a561db8 Add Istio routes A/B testing unit tests 2019-03-08 12:24:43 +02:00
stefanprodan
49c942bea0 Add A/B testing examples 2019-03-08 11:55:04 +02:00
stefanprodan
bf1ca293dc Implement fix routing for canary analysis
Allow A/B testing scenarios where instead of weighted routing the traffic is split between the primary and canary based on HTTP headers or cookies.
2019-03-08 11:54:41 +02:00
stefanprodan
62b906d30b Add canary HTTP match conditions and iterations 2019-03-08 11:49:32 +02:00
Alex Wong
65bf048189 add ngrinder support 2019-03-08 15:50:44 +08:00
Alex Wong
a498ed8200 move original cmd tester to standalone source 2019-03-08 15:50:26 +08:00
Alex Wong
9f12bbcd98 refactoring loadtester to support external testing platform 2019-03-08 15:49:35 +08:00
Stefan Prodan
fcd520787d Merge pull request #84 from stefanprodan/release-v0.8.0
Release v0.8.0
2019-03-06 21:30:09 +02:00
stefanprodan
e2417e4e40 Skip e2e tests for release branches 2019-03-06 21:21:48 +02:00
stefanprodan
70a2cbf1c6 Add change log for v0.8.0 2019-03-06 21:17:37 +02:00
stefanprodan
fa0c6af6aa Release v0.8.0 2019-03-06 21:17:13 +02:00
Stefan Prodan
4f1abd0c8d Merge pull request #83 from stefanprodan/cors-policy
Add CORS policy support
2019-03-06 20:31:37 +02:00
stefanprodan
41e839aa36 Fix virtual service example 2019-03-06 15:56:20 +02:00
stefanprodan
2fd1593ad2 Use service headers to set Envoy timeout 2019-03-06 15:38:14 +02:00
stefanprodan
27b601c5aa Add CORS policy example 2019-03-06 15:37:28 +02:00
stefanprodan
5fc69134e3 Add CORS policy test 2019-03-06 15:34:51 +02:00
stefanprodan
9adc0698bb Add CORS policy to Istio router 2019-03-06 15:34:36 +02:00
stefanprodan
119c2ff464 Add CORS policy to Canary CRD 2019-03-06 15:33:53 +02:00
Stefan Prodan
f3a4201c7d Merge pull request #82 from stefanprodan/headers-ops
Add support for HTTP request header manipulation rules
2019-03-06 14:58:05 +02:00
stefanprodan
8b6aa73df0 Fix request header test 2019-03-06 13:51:04 +02:00
stefanprodan
1d4dfb0883 Add request header add test 2019-03-06 13:46:19 +02:00
stefanprodan
eab7f126a6 Use request.add for header append operation 2019-03-06 13:45:46 +02:00
stefanprodan
fe7547d83e Update Envoy headers example 2019-03-06 12:42:34 +02:00
stefanprodan
7d0df82861 Add header manipulation rules to Canary CRD 2019-03-06 12:41:53 +02:00
stefanprodan
7f0cd27591 Add Header manipulation rules to Istio Virtual Service 2019-03-06 12:17:41 +02:00
Stefan Prodan
e094c2ae14 Merge pull request #80 from stefanprodan/istio
Add Istio k8s client
2019-03-06 11:55:27 +02:00
Stefan Prodan
a5d438257f Merge pull request #78 from huydinhle/namespace-watcher
Add namespace flag
2019-03-06 11:10:17 +02:00
Huy Le
d8cb8f1064 Added Namespace Flag for Flagger
- introduce the namespace flag for flagger to watch a single namespace
for Canary Objects
2019-03-05 20:57:00 -08:00
stefanprodan
a8d8bb2d6f Fix go fmt 2019-03-06 01:54:31 +02:00
stefanprodan
a76ea5917c Remove knative pkg
CORS and RetryOn are missing from the knative pkg.
Until Istio has an official k8s client, we'll maintain our own.
2019-03-06 01:47:13 +02:00
stefanprodan
b0b6198ec8 Add Istio virtual service and signal packages 2019-03-06 01:43:09 +02:00
Stefan Prodan
eda97f35d2 Merge pull request #73 from huydinhle/fined-grained-rbac
Fine-grained RBAC
2019-03-06 00:06:40 +02:00
Huy Le
2b6507d35a fine-grained rbac for flagger helm 2019-03-05 11:29:34 -08:00
stefanprodan
f7c4d5aa0b Disable PR comments when coverage doesn't change 2019-03-05 16:25:30 +02:00
Stefan Prodan
74f07cffa6 Merge pull request #72 from stefanprodan/router
Refactor routing management
2019-03-05 12:28:11 +02:00
Stefan Prodan
79c8ff0af8 Merge pull request #74 from cloudang/options
Command line options for easier debugging
2019-03-05 12:07:03 +02:00
stefanprodan
ac544eea4b Extend test coverage to all packages 2019-03-05 11:59:40 +02:00
Alex Wong
231a32331b move flags to main packages 2019-03-05 17:48:55 +08:00
Alex Wong
104e8ef050 Add options for customizing threadiness, logger encoding, and global logger level 2019-03-05 14:30:23 +08:00
Alex Wong
296015faff update .gitignore 2019-03-05 12:15:27 +08:00
stefanprodan
9a9964c968 Add ClusterIP host to virtual service 2019-03-05 02:27:56 +02:00
stefanprodan
0d05d86e32 Add Istio routing tests 2019-03-05 02:18:07 +02:00
stefanprodan
9680ca98f2 Rename service router to Kubernetes router 2019-03-05 02:12:52 +02:00
stefanprodan
42b850ca52 Replace controller routing management with router pkg 2019-03-05 02:04:55 +02:00
stefanprodan
3f5c22d863 Extract routing to dedicated package
- split routing management into Kubernetes service router and Istio Virtual service router
2019-03-05 02:02:58 +02:00
Stefan Prodan
535a92e871 Merge pull request #70 from stefanprodan/append-headers
Allow headers to be appended to HTTP requests
2019-03-04 10:39:43 +02:00
stefanprodan
3411a6a981 Add delay Envoy shutdown tip to docs 2019-03-03 14:03:34 +02:00
stefanprodan
b5adee271c Add zero downtime deployments tutorial 2019-03-03 13:24:15 +02:00
stefanprodan
e2abcd1323 Add append headers PR to changelog 2019-03-03 10:33:08 +02:00
Stefan Prodan
25fbe7ecb6 Merge pull request #71 from huydinhle/namepace-typo
Fixed namepace typo in the repo
2019-03-03 10:29:29 +02:00
Huy Le
6befee79c2 Fixed namepace typo in the repo 2019-03-02 13:49:42 -08:00
stefanprodan
f09c5a60f1 Add Envoy headers to e2e tests 2019-03-02 14:26:17 +02:00
stefanprodan
52e89ff509 Add Envoy timeout and retry policy to docs 2019-03-02 13:48:19 +02:00
stefanprodan
35e20406ef Append HTTP headers when configuring routing 2019-03-02 13:35:36 +02:00
stefanprodan
c6e96ff1bb Add append headers field to Canary CRD 2019-03-02 13:33:03 +02:00
Stefan Prodan
793ab524b0 Merge pull request #68 from stefanprodan/fix-docs
Add Getting Help section to readme
2019-03-02 10:36:40 +02:00
stefanprodan
5a479d0187 Add Weaveworks Slack links 2019-03-02 10:26:54 +02:00
stefanprodan
a23e4f1d2a Add timeout and reties example to docs 2019-03-02 10:26:34 +02:00
Stefan Prodan
bd35a3f61c Merge pull request #66 from stefanprodan/fix-mesh
Avoid mesh gateway duplicates
2019-03-02 01:27:00 +02:00
stefanprodan
197e987d5f Avoid mesh gateway duplicates 2019-03-01 13:09:27 +02:00
stefanprodan
7f29beb639 Don't run e2e tests for docs branches 2019-02-28 18:55:58 +02:00
556 changed files with 81751 additions and 24547 deletions

View File

@@ -13,4 +13,10 @@ workflows:
version: 2
build-and-test:
jobs:
- e2e-testing
- e2e-testing:
filters:
branches:
ignore:
- gh-pages
- /docs-.*/
- /release-.*/

View File

@@ -6,3 +6,6 @@ coverage:
threshold: 50
base: auto
patch: off
comment:
require_changes: yes

1
.github/CODEOWNERS vendored Normal file
View File

@@ -0,0 +1 @@
* @stefanprodan

3
.gitignore vendored
View File

@@ -13,4 +13,5 @@
.DS_Store
bin/
artifacts/gcloud/
artifacts/gcloud/
.idea

View File

@@ -1,7 +1,7 @@
builds:
- main: ./cmd/flagger
binary: flagger
ldflags: -s -w -X github.com/stefanprodan/flagger/pkg/version.REVISION={{.Commit}}
ldflags: -s -w -X github.com/weaveworks/flagger/pkg/version.REVISION={{.Commit}}
goos:
- linux
goarch:

View File

@@ -2,7 +2,7 @@ sudo: required
language: go
go:
- 1.11.x
- 1.12.x
services:
- docker
@@ -21,7 +21,7 @@ script:
- set -e
- make test-fmt
- make test-codegen
- go test -race -coverprofile=coverage.txt -covermode=atomic ./pkg/controller/
- go test -race -coverprofile=coverage.txt -covermode=atomic $(go list ./pkg/...)
- make build
after_success:
@@ -29,16 +29,16 @@ after_success:
echo "PR build, skipping image push";
else
BRANCH_COMMIT=${TRAVIS_BRANCH}-$(echo ${TRAVIS_COMMIT} | head -c7);
docker tag stefanprodan/flagger:latest quay.io/stefanprodan/flagger:${BRANCH_COMMIT};
docker tag weaveworks/flagger:latest quay.io/weaveworks/flagger:${BRANCH_COMMIT};
echo $DOCKER_PASS | docker login -u=$DOCKER_USER --password-stdin quay.io;
docker push quay.io/stefanprodan/flagger:${BRANCH_COMMIT};
docker push quay.io/weaveworks/flagger:${BRANCH_COMMIT};
fi
- if [ -z "$TRAVIS_TAG" ]; then
echo "Not a release, skipping image push";
else
docker tag stefanprodan/flagger:latest quay.io/stefanprodan/flagger:${TRAVIS_TAG};
docker tag weaveworks/flagger:latest quay.io/weaveworks/flagger:${TRAVIS_TAG};
echo $DOCKER_PASS | docker login -u=$DOCKER_USER --password-stdin quay.io;
docker push quay.io/stefanprodan/flagger:$TRAVIS_TAG;
docker push quay.io/weaveworks/flagger:$TRAVIS_TAG;
fi
- bash <(curl -s https://codecov.io/bash)
- rm coverage.txt

View File

@@ -2,14 +2,63 @@
All notable changes to this project are documented in this file.
## 0.10.0 (2019-03-27)
Adds support for App Mesh
#### Features
- AWS App Mesh integration
[#107](https://github.com/weaveworks/flagger/pull/107)
[#123](https://github.com/weaveworks/flagger/pull/123)
#### Improvements
- Reconcile Kubernetes ClusterIP services [#122](https://github.com/weaveworks/flagger/pull/122)
#### Fixes
- Preserve pod labels on canary promotion [#105](https://github.com/weaveworks/flagger/pull/105)
- Fix canary status Prometheus metric [#121](https://github.com/weaveworks/flagger/pull/121)
## 0.9.0 (2019-03-11)
Allows A/B testing scenarios where instead of weighted routing, the traffic is split between the
primary and canary based on HTTP headers or cookies.
#### Features
- A/B testing - canary with session affinity [#88](https://github.com/weaveworks/flagger/pull/88)
#### Fixes
- Update the analysis interval when the custom resource changes [#91](https://github.com/weaveworks/flagger/pull/91)
## 0.8.0 (2019-03-06)
Adds support for CORS policy and HTTP request headers manipulation
#### Features
- CORS policy support [#83](https://github.com/weaveworks/flagger/pull/83)
- Allow headers to be appended to HTTP requests [#82](https://github.com/weaveworks/flagger/pull/82)
#### Improvements
- Refactor the routing management
[#72](https://github.com/weaveworks/flagger/pull/72)
[#80](https://github.com/weaveworks/flagger/pull/80)
- Fine-grained RBAC [#73](https://github.com/weaveworks/flagger/pull/73)
- Add option to limit Flagger to a single namespace [#78](https://github.com/weaveworks/flagger/pull/78)
## 0.7.0 (2019-02-28)
Adds support for custom metric checks, HTTP timeouts and HTTP retries
#### Features
- Allow custom promql queries in the canary analysis spec [##60](https://github.com/stefanprodan/flagger/pull/#60)
- Add HTTP timeout and retries to canary service spec [##62](https://github.com/stefanprodan/flagger/pull/#62)
- Allow custom promql queries in the canary analysis spec [#60](https://github.com/weaveworks/flagger/pull/60)
- Add HTTP timeout and retries to canary service spec [#62](https://github.com/weaveworks/flagger/pull/62)
## 0.6.0 (2019-02-25)
@@ -19,15 +68,15 @@ to be customized in the service spec of the canary custom resource.
#### Features
- Add HTTP match conditions and URI rewrite to the canary service spec [#55](https://github.com/stefanprodan/flagger/pull/55)
- Add HTTP match conditions and URI rewrite to the canary service spec [#55](https://github.com/weaveworks/flagger/pull/55)
- Update virtual service when the canary service spec changes
[#54](https://github.com/stefanprodan/flagger/pull/54)
[#51](https://github.com/stefanprodan/flagger/pull/51)
[#54](https://github.com/weaveworks/flagger/pull/54)
[#51](https://github.com/weaveworks/flagger/pull/51)
#### Improvements
- Run e2e testing on [Kubernetes Kind](https://github.com/kubernetes-sigs/kind) for canary promotion
[#53](https://github.com/stefanprodan/flagger/pull/53)
[#53](https://github.com/weaveworks/flagger/pull/53)
## 0.5.1 (2019-02-14)
@@ -35,15 +84,15 @@ Allows skipping the analysis phase to ship changes directly to production
#### Features
- Add option to skip the canary analysis [#46](https://github.com/stefanprodan/flagger/pull/46)
- Add option to skip the canary analysis [#46](https://github.com/weaveworks/flagger/pull/46)
#### Fixes
- Reject deployment if the pod label selector doesn't match `app: <DEPLOYMENT_NAME>` [#43](https://github.com/stefanprodan/flagger/pull/43)
- Reject deployment if the pod label selector doesn't match `app: <DEPLOYMENT_NAME>` [#43](https://github.com/weaveworks/flagger/pull/43)
## 0.5.0 (2019-01-30)
Track changes in ConfigMaps and Secrets [#37](https://github.com/stefanprodan/flagger/pull/37)
Track changes in ConfigMaps and Secrets [#37](https://github.com/weaveworks/flagger/pull/37)
#### Features
@@ -59,7 +108,7 @@ Track changes in ConfigMaps and Secrets [#37](https://github.com/stefanprodan/fl
## 0.4.1 (2019-01-24)
Load testing webhook [#35](https://github.com/stefanprodan/flagger/pull/35)
Load testing webhook [#35](https://github.com/weaveworks/flagger/pull/35)
#### Features
@@ -73,7 +122,7 @@ Load testing webhook [#35](https://github.com/stefanprodan/flagger/pull/35)
## 0.4.0 (2019-01-18)
Restart canary analysis if revision changes [#31](https://github.com/stefanprodan/flagger/pull/31)
Restart canary analysis if revision changes [#31](https://github.com/weaveworks/flagger/pull/31)
#### Breaking changes
@@ -94,7 +143,7 @@ Restart canary analysis if revision changes [#31](https://github.com/stefanproda
## 0.3.0 (2019-01-11)
Configurable canary analysis duration [#20](https://github.com/stefanprodan/flagger/pull/20)
Configurable canary analysis duration [#20](https://github.com/weaveworks/flagger/pull/20)
#### Breaking changes
@@ -109,7 +158,7 @@ Configurable canary analysis duration [#20](https://github.com/stefanprodan/flag
## 0.2.0 (2019-01-04)
Webhooks [#18](https://github.com/stefanprodan/flagger/pull/18)
Webhooks [#18](https://github.com/weaveworks/flagger/pull/18)
#### Features
@@ -120,7 +169,7 @@ Webhooks [#18](https://github.com/stefanprodan/flagger/pull/18)
## 0.1.2 (2018-12-06)
Improve Slack notifications [#14](https://github.com/stefanprodan/flagger/pull/14)
Improve Slack notifications [#14](https://github.com/weaveworks/flagger/pull/14)
#### Features
@@ -129,7 +178,7 @@ Improve Slack notifications [#14](https://github.com/stefanprodan/flagger/pull/1
## 0.1.1 (2018-11-28)
Canary progress deadline [#10](https://github.com/stefanprodan/flagger/pull/10)
Canary progress deadline [#10](https://github.com/weaveworks/flagger/pull/10)
#### Features

View File

@@ -1,17 +1,17 @@
FROM golang:1.11
FROM golang:1.12
RUN mkdir -p /go/src/github.com/stefanprodan/flagger/
RUN mkdir -p /go/src/github.com/weaveworks/flagger/
WORKDIR /go/src/github.com/stefanprodan/flagger
WORKDIR /go/src/github.com/weaveworks/flagger
COPY . .
RUN GIT_COMMIT=$(git rev-list -1 HEAD) && \
CGO_ENABLED=0 GOOS=linux go build -ldflags "-s -w \
-X github.com/stefanprodan/flagger/pkg/version.REVISION=${GIT_COMMIT}" \
-X github.com/weaveworks/flagger/pkg/version.REVISION=${GIT_COMMIT}" \
-a -installsuffix cgo -o flagger ./cmd/flagger/*
FROM alpine:3.8
FROM alpine:3.9
RUN addgroup -S flagger \
&& adduser -S -g flagger flagger \
@@ -19,7 +19,7 @@ RUN addgroup -S flagger \
WORKDIR /home/flagger
COPY --from=0 /go/src/github.com/stefanprodan/flagger/flagger .
COPY --from=0 /go/src/github.com/weaveworks/flagger/flagger .
RUN chown -R flagger:flagger ./

View File

@@ -1,4 +1,4 @@
FROM golang:1.11 AS hey-builder
FROM golang:1.12 AS hey-builder
RUN mkdir -p /go/src/github.com/rakyll/hey/
@@ -16,9 +16,9 @@ RUN CGO_ENABLED=0 GOOS=linux GOARCH=amd64 \
FROM golang:1.11 AS builder
RUN mkdir -p /go/src/github.com/stefanprodan/flagger/
RUN mkdir -p /go/src/github.com/weaveworks/flagger/
WORKDIR /go/src/github.com/stefanprodan/flagger
WORKDIR /go/src/github.com/weaveworks/flagger
COPY . .
@@ -26,7 +26,7 @@ RUN go test -race ./pkg/loadtester/
RUN CGO_ENABLED=0 GOOS=linux go build -a -installsuffix cgo -o loadtester ./cmd/loadtester/*
FROM alpine:3.8
FROM alpine:3.9
RUN addgroup -S app \
&& adduser -S -g app app \
@@ -35,7 +35,7 @@ RUN addgroup -S app \
WORKDIR /home/app
COPY --from=hey-builder /go/bin/hey /usr/local/bin/hey
COPY --from=builder /go/src/github.com/stefanprodan/flagger/loadtester .
COPY --from=builder /go/src/github.com/weaveworks/flagger/loadtester .
RUN chown -R app:app ./

160
Gopkg.lock generated
View File

@@ -2,12 +2,12 @@
[[projects]]
digest = "1:5c3894b2aa4d6bead0ceeea6831b305d62879c871780e7b76296ded1b004bc57"
digest = "1:4d6f036ea3fe636bcb2e89850bcdc62a771354e157cd51b8b22a2de8562bf663"
name = "cloud.google.com/go"
packages = ["compute/metadata"]
pruneopts = "NUT"
revision = "97efc2c9ffd9fe8ef47f7f3203dc60bbca547374"
version = "v0.28.0"
revision = "c9474f2f8deb81759839474b6bd1726bbfe1c1c4"
version = "v0.36.0"
[[projects]]
branch = "master"
@@ -34,15 +34,15 @@
version = "v1.0.0"
[[projects]]
digest = "1:8679b8a64f3613e9749c5640c3535c83399b8e69f67ce54d91dc73f6d77373af"
digest = "1:a1b2a5e38f79688ee8250942d5fa960525fceb1024c855c7bc76fa77b0f3cca2"
name = "github.com/gogo/protobuf"
packages = [
"proto",
"sortkeys",
]
pruneopts = "NUT"
revision = "636bf0302bc95575d69441b25a2603156ffdddf1"
version = "v1.1.1"
revision = "ba06b47c162d49f2af050fb4c75bcbc86a159d5c"
version = "v1.2.1"
[[projects]]
branch = "master"
@@ -55,14 +55,14 @@
[[projects]]
branch = "master"
digest = "1:3fb07f8e222402962fa190eb060608b34eddfb64562a18e2167df2de0ece85d8"
digest = "1:b7cb6054d3dff43b38ad2e92492f220f57ae6087ee797dca298139776749ace8"
name = "github.com/golang/groupcache"
packages = ["lru"]
pruneopts = "NUT"
revision = "24b0969c4cb722950103eed87108c8d291a8df00"
revision = "5b532d6fd5efaf7fa130d4e859a2fde0fc3a9e1b"
[[projects]]
digest = "1:63ccdfbd20f7ccd2399d0647a7d100b122f79c13bb83da9660b1598396fd9f62"
digest = "1:2d0636a8c490d2272dd725db26f74a537111b99b9dbdda0d8b98febe63702aa4"
name = "github.com/golang/protobuf"
packages = [
"proto",
@@ -72,8 +72,8 @@
"ptypes/timestamp",
]
pruneopts = "NUT"
revision = "aa810b61a9c79d51363740d207bb46cf8e620ed5"
version = "v1.2.0"
revision = "c823c79ea1570fb5ff454033735a8e68575d1d0f"
version = "v1.3.0"
[[projects]]
branch = "master"
@@ -119,33 +119,41 @@
[[projects]]
branch = "master"
digest = "1:7fdf3223c7372d1ced0b98bf53457c5e89d89aecbad9a77ba9fcc6e01f9e5621"
digest = "1:a86d65bc23eea505cd9139178e4d889733928fe165c7a008f41eaab039edf9df"
name = "github.com/gregjones/httpcache"
packages = [
".",
"diskcache",
]
pruneopts = "NUT"
revision = "9cad4c3443a7200dd6400aef47183728de563a38"
revision = "3befbb6ad0cc97d4c25d851e9528915809e1a22f"
[[projects]]
digest = "1:b42cde0e1f3c816dd57f57f7bbcf05ca40263ad96f168714c130c611fc0856a6"
digest = "1:7313d6b9095eb86581402557bbf3871620cf82adf41853c5b9bee04b894290c7"
name = "github.com/h2non/parth"
packages = ["."]
pruneopts = "NUT"
revision = "b4df798d65426f8c8ab5ca5f9987aec5575d26c9"
version = "v2.0.1"
[[projects]]
digest = "1:52094d0f8bdf831d1a2401e9b6fee5795fdc0b2a2d1f8bb1980834c289e79129"
name = "github.com/hashicorp/golang-lru"
packages = [
".",
"simplelru",
]
pruneopts = "NUT"
revision = "20f1fb78b0740ba8c3cb143a61e86ba5c8669768"
version = "v0.5.0"
revision = "7087cb70de9f7a8bc0a10c375cb0d2280a8edf9c"
version = "v0.5.1"
[[projects]]
digest = "1:9a52adf44086cead3b384e5d0dbf7a1c1cce65e67552ee3383a8561c42a18cd3"
digest = "1:aaa38889f11896ee3644d77e17dc7764cc47f5f3d3b488268df2af2b52541c5f"
name = "github.com/imdario/mergo"
packages = ["."]
pruneopts = "NUT"
revision = "9f23e2d6bd2a77f959b2bf6acdbefd708a83a4a4"
version = "v0.3.6"
revision = "7c29201646fa3de8506f701213473dd407f19646"
version = "v0.3.7"
[[projects]]
branch = "master"
@@ -162,27 +170,6 @@
pruneopts = "NUT"
revision = "f2b4162afba35581b6d4a50d3b8f34e33c144682"
[[projects]]
digest = "1:05ddd9088c0cfb8eaa3adf3626977caa6d96b3959a3bd8c91fef932fd1696c34"
name = "github.com/knative/pkg"
packages = [
"apis/istio",
"apis/istio/authentication",
"apis/istio/authentication/v1alpha1",
"apis/istio/common/v1alpha1",
"apis/istio/v1alpha3",
"client/clientset/versioned",
"client/clientset/versioned/fake",
"client/clientset/versioned/scheme",
"client/clientset/versioned/typed/authentication/v1alpha1",
"client/clientset/versioned/typed/authentication/v1alpha1/fake",
"client/clientset/versioned/typed/istio/v1alpha3",
"client/clientset/versioned/typed/istio/v1alpha3/fake",
"signals",
]
pruneopts = "NUT"
revision = "f9612ef73847258e381e749c4f45b0f5e03b66e9"
[[projects]]
digest = "1:5985ef4caf91ece5d54817c11ea25f182697534f8ae6521eadcd628c142ac4b6"
name = "github.com/matttproud/golang_protobuf_extensions"
@@ -240,11 +227,10 @@
name = "github.com/prometheus/client_model"
packages = ["go"]
pruneopts = "NUT"
revision = "5c3871d89910bfb32f5fcab2aa4b9ec68e65a99f"
revision = "fd36f4220a901265f90734c3183c5f0c91daa0b8"
[[projects]]
branch = "master"
digest = "1:fad5a35eea6a1a33d6c8f949fbc146f24275ca809ece854248187683f52cc30b"
digest = "1:4e776079b966091d3e6e12ed2aaf728bea5cd1175ef88bb654e03adbf5d4f5d3"
name = "github.com/prometheus/common"
packages = [
"expfmt",
@@ -252,28 +238,30 @@
"model",
]
pruneopts = "NUT"
revision = "c7de2306084e37d54b8be01f3541a8464345e9a5"
revision = "cfeb6f9992ffa54aaa4f2170ade4067ee478b250"
version = "v0.2.0"
[[projects]]
branch = "master"
digest = "1:26a2f5e891cc4d2321f18a0caa84c8e788663c17bed6a487f3cbe2c4295292d0"
digest = "1:0a2e604afa3cbf53a1ddade2f240ee8472eded98856dd8c7cfbfea392ddbbfc7"
name = "github.com/prometheus/procfs"
packages = [
".",
"internal/util",
"iostats",
"nfs",
"xfs",
]
pruneopts = "NUT"
revision = "418d78d0b9a7b7de3a6bbc8a23def624cc977bb2"
revision = "bbced9601137e764853b2fad7ec3e2dc4c504e02"
[[projects]]
digest = "1:e3707aeaccd2adc89eba6c062fec72116fe1fc1ba71097da85b4d8ae1668a675"
digest = "1:9d8420bbf131d1618bde6530af37c3799340d3762cc47210c1d9532a4c3a2779"
name = "github.com/spf13/pflag"
packages = ["."]
pruneopts = "NUT"
revision = "9a97c102cda95a86cec2345a6f09f55a939babf5"
version = "v1.0.2"
revision = "298182f68c66c05229eb03ac171abe6e309ee79a"
version = "v1.0.3"
[[projects]]
digest = "1:22f696cee54865fb8e9ff91df7b633f6b8f22037a8015253c6b6a71ca82219c7"
@@ -308,15 +296,15 @@
[[projects]]
branch = "master"
digest = "1:3f3a05ae0b95893d90b9b3b5afdb79a9b3d96e4e36e099d841ae602e4aca0da8"
digest = "1:058e9504b9a79bfe86092974d05bb3298d2aa0c312d266d43148de289a5065d9"
name = "golang.org/x/crypto"
packages = ["ssh/terminal"]
pruneopts = "NUT"
revision = "0e37d006457bf46f9e6692014ba72ef82c33022c"
revision = "8dd112bcdc25174059e45e07517d9fc663123347"
[[projects]]
branch = "master"
digest = "1:1400b8e87c2c9bd486ea1a13155f59f8f02d385761206df05c0b7db007a53b2c"
digest = "1:e3477b53a5c2fb71a7c9688e9b3d58be702807a5a88def8b9a327259d46e4979"
name = "golang.org/x/net"
packages = [
"context",
@@ -327,11 +315,11 @@
"idna",
]
pruneopts = "NUT"
revision = "26e67e76b6c3f6ce91f7c52def5af501b4e0f3a2"
revision = "16b79f2e4e95ea23b2bf9903c9809ff7b013ce85"
[[projects]]
branch = "master"
digest = "1:bc2b221d465bb28ce46e8d472ecdc424b9a9b541bd61d8c311c5f29c8dd75b1b"
digest = "1:17ee74a4d9b6078611784b873cdbfe91892d2c73052c430724e66fcc015b6c7b"
name = "golang.org/x/oauth2"
packages = [
".",
@@ -341,18 +329,18 @@
"jwt",
]
pruneopts = "NUT"
revision = "d2e6202438beef2727060aa7cabdd924d92ebfd9"
revision = "e64efc72b421e893cbf63f17ba2221e7d6d0b0f3"
[[projects]]
branch = "master"
digest = "1:44261e94b6095310a2df925fd68632d399a00eb153b52566a7b3697f7c70638c"
digest = "1:a0d91ab4d23badd4e64e115c6e6ba7dd56bd3cde5d287845822fb2599ac10236"
name = "golang.org/x/sys"
packages = [
"unix",
"windows",
]
pruneopts = "NUT"
revision = "1561086e645b2809fb9f8a1e2a38160bf8d53bf4"
revision = "30e92a19ae4a77dde818b8c3d41d51e4850cba12"
[[projects]]
digest = "1:e7071ed636b5422cc51c0e3a6cebc229d6c9fffc528814b519a980641422d619"
@@ -379,26 +367,35 @@
[[projects]]
branch = "master"
digest = "1:c9e7a4b4d47c0ed205d257648b0e5b0440880cb728506e318f8ac7cd36270bc4"
digest = "1:9fdc2b55e8e0fafe4b41884091e51e77344f7dc511c5acedcfd98200003bff90"
name = "golang.org/x/time"
packages = ["rate"]
pruneopts = "NUT"
revision = "fbb02b2291d28baffd63558aa44b4b56f178d650"
revision = "85acf8d2951cb2a3bde7632f9ff273ef0379bcbd"
[[projects]]
branch = "master"
digest = "1:45751dc3302c90ea55913674261b2d74286b05cdd8e3ae9606e02e4e77f4353f"
digest = "1:e46d8e20161401a9cf8765dfa428494a3492a0b56fe114156b7da792bf41ba78"
name = "golang.org/x/tools"
packages = [
"go/ast/astutil",
"go/gcexportdata",
"go/internal/cgo",
"go/internal/gcimporter",
"go/internal/packagesdriver",
"go/packages",
"go/types/typeutil",
"imports",
"internal/fastwalk",
"internal/gopathwalk",
"internal/module",
"internal/semver",
]
pruneopts = "NUT"
revision = "90fa682c2a6e6a37b3a1364ce2fe1d5e41af9d6d"
revision = "f8c04913dfb7b2339a756441456bdbe0af6eb508"
[[projects]]
digest = "1:e2da54c7866453ac5831c61c7ec5d887f39328cac088c806553303bff4048e6f"
digest = "1:d395d49d784dd3a11938a3e85091b6570664aa90ff2767a626565c6c130fa7e9"
name = "google.golang.org/appengine"
packages = [
".",
@@ -413,8 +410,16 @@
"urlfetch",
]
pruneopts = "NUT"
revision = "ae0ab99deb4dc413a2b4bd6c8bdd0eb67f1e4d06"
version = "v1.2.0"
revision = "e9657d882bb81064595ca3b56cbe2546bbabf7b1"
version = "v1.4.0"
[[projects]]
digest = "1:fe9eb931d7b59027c4a3467f7edc16cc8552dac5328039bec05045143c18e1ce"
name = "gopkg.in/h2non/gock.v1"
packages = ["."]
pruneopts = "NUT"
revision = "ba88c4862a27596539531ce469478a91bc5a0511"
version = "v1.0.14"
[[projects]]
digest = "1:2d1fbdc6777e5408cabeb02bf336305e724b925ff4546ded0fa8715a7267922a"
@@ -425,12 +430,12 @@
version = "v0.9.1"
[[projects]]
digest = "1:7c95b35057a0ff2e19f707173cc1a947fa43a6eb5c4d300d196ece0334046082"
digest = "1:18108594151654e9e696b27b181b953f9a90b16bf14d253dd1b397b025a1487f"
name = "gopkg.in/yaml.v2"
packages = ["."]
pruneopts = "NUT"
revision = "5420a8b6744d3b0345ab293f6fcba19c978f1183"
version = "v2.2.1"
revision = "51d6538a90f86fe93ac480b35f37b2be17fef232"
version = "v2.2.2"
[[projects]]
digest = "1:8960ef753a87391086a307122d23cd5007cee93c28189437e4f1b6ed72bffc50"
@@ -653,7 +658,7 @@
[[projects]]
branch = "master"
digest = "1:5249c83f0fb9e277b2d28c19eca814feac7ef05dc762e4deaf0a2e4b1a7c5df3"
digest = "1:61024ed77a53ac618effed55043bf6a9afbdeb64136bd6a5b0c992d4c0363766"
name = "k8s.io/gengo"
packages = [
"args",
@@ -666,15 +671,23 @@
"types",
]
pruneopts = "NUT"
revision = "4242d8e6c5dba56827bb7bcf14ad11cda38f3991"
revision = "0689ccc1d7d65d9dd1bedcc3b0b1ed7df91ba266"
[[projects]]
digest = "1:c263611800c3a97991dbcf9d3bc4de390f6224aaa8ca0a7226a9d734f65a416a"
name = "k8s.io/klog"
packages = ["."]
pruneopts = "NUT"
revision = "71442cd4037d612096940ceb0f3fec3f7fff66e0"
version = "v0.2.0"
[[projects]]
branch = "master"
digest = "1:a2c842a1e0aed96fd732b535514556323a6f5edfded3b63e5e0ab1bce188aa54"
digest = "1:03a96603922fc1f6895ae083e1e16d943b55ef0656b56965351bd87e7d90485f"
name = "k8s.io/kube-openapi"
packages = ["pkg/util/proto"]
pruneopts = "NUT"
revision = "e3762e86a74c878ffed47484592986685639c2cd"
revision = "b3a7cee44a305be0a69e1b9ac03018307287e1b0"
[solve-meta]
analyzer-name = "dep"
@@ -683,14 +696,11 @@
"github.com/google/go-cmp/cmp",
"github.com/google/go-cmp/cmp/cmpopts",
"github.com/istio/glog",
"github.com/knative/pkg/apis/istio/v1alpha3",
"github.com/knative/pkg/client/clientset/versioned",
"github.com/knative/pkg/client/clientset/versioned/fake",
"github.com/knative/pkg/signals",
"github.com/prometheus/client_golang/prometheus",
"github.com/prometheus/client_golang/prometheus/promhttp",
"go.uber.org/zap",
"go.uber.org/zap/zapcore",
"gopkg.in/h2non/gock.v1",
"k8s.io/api/apps/v1",
"k8s.io/api/autoscaling/v1",
"k8s.io/api/autoscaling/v2beta1",

View File

@@ -11,6 +11,10 @@ required = [
name = "go.uber.org/zap"
version = "v1.9.1"
[[constraint]]
name = "gopkg.in/h2non/gock.v1"
version = "v1.0.14"
[[override]]
name = "gopkg.in/yaml.v2"
version = "v2.2.1"
@@ -45,10 +49,6 @@ required = [
name = "github.com/google/go-cmp"
version = "v0.2.0"
[[constraint]]
name = "github.com/knative/pkg"
revision = "f9612ef73847258e381e749c4f45b0f5e03b66e9"
[[override]]
name = "github.com/golang/glog"
source = "github.com/istio/glog"

5
MAINTAINERS Normal file
View File

@@ -0,0 +1,5 @@
The maintainers are generally available in Slack at
https://weave-community.slack.com/messages/flagger/ (obtain an invitation
at https://slack.weave.works/).
Stefan Prodan, Weaveworks <stefan@weave.works> (Slack: @stefan Twitter: @stefanprodan)

View File

@@ -11,12 +11,18 @@ run:
-slack-url=https://hooks.slack.com/services/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 \
-slack-channel="devops-alerts"
run-appmesh:
go run cmd/flagger/* -kubeconfig=$$HOME/.kube/config -log-level=info -mesh-provider=appmesh \
-metrics-server=http://acfc235624ca911e9a94c02c4171f346-1585187926.us-west-2.elb.amazonaws.com:9090 \
-slack-url=https://hooks.slack.com/services/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 \
-slack-channel="devops-alerts"
build:
docker build -t stefanprodan/flagger:$(TAG) . -f Dockerfile
docker build -t weaveworks/flagger:$(TAG) . -f Dockerfile
push:
docker tag stefanprodan/flagger:$(TAG) quay.io/stefanprodan/flagger:$(VERSION)
docker push quay.io/stefanprodan/flagger:$(VERSION)
docker tag weaveworks/flagger:$(TAG) quay.io/weaveworks/flagger:$(VERSION)
docker push quay.io/weaveworks/flagger:$(VERSION)
fmt:
gofmt -l -s -w $(SOURCE_DIRS)
@@ -33,7 +39,7 @@ test: test-fmt test-codegen
helm-package:
cd charts/ && helm package ./*
mv charts/*.tgz docs/
helm repo index docs --url https://stefanprodan.github.io/flagger --merge ./docs/index.yaml
helm repo index docs --url https://weaveworks.github.io/flagger --merge ./docs/index.yaml
helm-up:
helm upgrade --install flagger ./charts/flagger --namespace=istio-system --set crd.create=false
@@ -82,5 +88,5 @@ reset-test:
kubectl apply -f ./artifacts/canaries
loadtester-push:
docker build -t quay.io/stefanprodan/flagger-loadtester:$(LT_VERSION) . -f Dockerfile.loadtester
docker push quay.io/stefanprodan/flagger-loadtester:$(LT_VERSION)
docker build -t quay.io/weaveworks/flagger-loadtester:$(LT_VERSION) . -f Dockerfile.loadtester
docker push quay.io/weaveworks/flagger-loadtester:$(LT_VERSION)

View File

@@ -1,13 +1,13 @@
# flagger
[![build](https://travis-ci.org/stefanprodan/flagger.svg?branch=master)](https://travis-ci.org/stefanprodan/flagger)
[![report](https://goreportcard.com/badge/github.com/stefanprodan/flagger)](https://goreportcard.com/report/github.com/stefanprodan/flagger)
[![codecov](https://codecov.io/gh/stefanprodan/flagger/branch/master/graph/badge.svg)](https://codecov.io/gh/stefanprodan/flagger)
[![license](https://img.shields.io/github/license/stefanprodan/flagger.svg)](https://github.com/stefanprodan/flagger/blob/master/LICENSE)
[![release](https://img.shields.io/github/release/stefanprodan/flagger/all.svg)](https://github.com/stefanprodan/flagger/releases)
[![build](https://travis-ci.org/weaveworks/flagger.svg?branch=master)](https://travis-ci.org/weaveworks/flagger)
[![report](https://goreportcard.com/badge/github.com/weaveworks/flagger)](https://goreportcard.com/report/github.com/weaveworks/flagger)
[![codecov](https://codecov.io/gh/weaveworks/flagger/branch/master/graph/badge.svg)](https://codecov.io/gh/weaveworks/flagger)
[![license](https://img.shields.io/github/license/weaveworks/flagger.svg)](https://github.com/weaveworks/flagger/blob/master/LICENSE)
[![release](https://img.shields.io/github/release/weaveworks/flagger/all.svg)](https://github.com/weaveworks/flagger/releases)
Flagger is a Kubernetes operator that automates the promotion of canary deployments
using Istio routing for traffic shifting and Prometheus metrics for canary analysis.
using Istio or App Mesh routing for traffic shifting and Prometheus metrics for canary analysis.
The canary analysis can be extended with webhooks for running acceptance tests,
load tests or any other custom validation.
@@ -15,7 +15,7 @@ Flagger implements a control loop that gradually shifts traffic to the canary wh
indicators like HTTP requests success rate, requests average duration and pods health.
Based on analysis of the KPIs a canary is promoted or aborted, and the analysis result is published to Slack.
![flagger-overview](https://raw.githubusercontent.com/stefanprodan/flagger/master/docs/diagrams/flagger-canary-overview.png)
![flagger-overview](https://raw.githubusercontent.com/weaveworks/flagger/master/docs/diagrams/flagger-canary-overview.png)
### Documentation
@@ -23,10 +23,11 @@ Flagger documentation can be found at [docs.flagger.app](https://docs.flagger.ap
* Install
* [Flagger install on Kubernetes](https://docs.flagger.app/install/flagger-install-on-kubernetes)
* [Flagger install on GKE](https://docs.flagger.app/install/flagger-install-on-google-cloud)
* [Flagger install on GKE Istio](https://docs.flagger.app/install/flagger-install-on-google-cloud)
* [Flagger install on EKS App Mesh](https://docs.flagger.app/install/flagger-install-on-eks-appmesh)
* How it works
* [Canary custom resource](https://docs.flagger.app/how-it-works#canary-custom-resource)
* [Virtual Service](https://docs.flagger.app/how-it-works#virtual-service)
* [Routing](https://docs.flagger.app/how-it-works#istio-routing)
* [Canary deployment stages](https://docs.flagger.app/how-it-works#canary-deployment)
* [Canary analysis](https://docs.flagger.app/how-it-works#canary-analysis)
* [HTTP metrics](https://docs.flagger.app/how-it-works#http-metrics)
@@ -34,35 +35,18 @@ Flagger documentation can be found at [docs.flagger.app](https://docs.flagger.ap
* [Webhooks](https://docs.flagger.app/how-it-works#webhooks)
* [Load testing](https://docs.flagger.app/how-it-works#load-testing)
* Usage
* [Canary promotions and rollbacks](https://docs.flagger.app/usage/progressive-delivery)
* [Istio canary deployments](https://docs.flagger.app/usage/progressive-delivery)
* [Istio A/B testing](https://docs.flagger.app/usage/ab-testing)
* [App Mesh canary deployments](https://docs.flagger.app/usage/appmesh-progressive-delivery)
* [Monitoring](https://docs.flagger.app/usage/monitoring)
* [Alerting](https://docs.flagger.app/usage/alerting)
* Tutorials
* [Canary deployments with Helm charts and Weave Flux](https://docs.flagger.app/tutorials/canary-helm-gitops)
### Install
Before installing Flagger make sure you have Istio setup up with Prometheus enabled.
If you are new to Istio you can follow my [Istio service mesh walk-through](https://github.com/stefanprodan/istio-gke).
Deploy Flagger in the `istio-system` namespace using Helm:
```bash
# add the Helm repository
helm repo add flagger https://flagger.app
# install or upgrade
helm upgrade -i flagger flagger/flagger \
--namespace=istio-system \
--set metricsServer=http://prometheus.istio-system:9090
```
Flagger is compatible with Kubernetes >1.11.0 and Istio >1.0.0.
### Canary CRD
Flagger takes a Kubernetes deployment and optionally a horizontal pod autoscaler (HPA),
then creates a series of objects (Kubernetes deployments, ClusterIP services and Istio virtual services).
then creates a series of objects (Kubernetes deployments, ClusterIP services and Istio or App Mesh virtual services).
These objects expose the application on the mesh and drive the canary analysis and promotion.
Flagger keeps track of ConfigMaps and Secrets referenced by a Kubernetes Deployment and triggers a canary analysis if any of those objects change.
@@ -106,11 +90,17 @@ spec:
# HTTP rewrite (optional)
rewrite:
uri: /
# timeout for HTTP requests (optional)
timeout: 5s
# retry policy when a HTTP request fails (optional)
retries:
attempts: 3
# Envoy timeout and retry policy (optional)
headers:
request:
add:
x-envoy-upstream-rq-timeout-ms: "15000"
x-envoy-max-retries: "10"
x-envoy-retry-on: "gateway-error,connect-failure,refused-stream"
# cross-origin resource sharing policy (optional)
corsPolicy:
allowOrigin:
- example.com
# promote the canary without analysing it (default false)
skipAnalysis: false
# define the canary analysis timing and KPIs
@@ -161,8 +151,7 @@ For more details on how the canary analysis and promotion works please [read the
### Roadmap
* Add A/B testing capabilities using fixed routing based on HTTP headers and cookies match conditions
* Integrate with other service mesh technologies like AWS AppMesh and Linkerd v2
* Integrate with other service mesh technologies like Linkerd v2, Super Gloo or Consul Mesh
* Add support for comparing the canary metrics to the primary ones and do the validation based on the derivation between the two
### Contributing
@@ -175,3 +164,16 @@ When submitting bug reports please include as much details as possible:
* which Kubernetes/Istio version
* what configuration (canary, virtual service and workloads definitions)
* what happened (Flagger, Istio Pilot and Proxy logs)
### Getting Help
If you have any questions about Flagger and progressive delivery:
* Read the Flagger [docs](https://docs.flagger.app).
* Invite yourself to the [Weave community slack](https://slack.weave.works/)
and join the [#flagger](https://weave-community.slack.com/messages/flagger/) channel.
* Join the [Weave User Group](https://www.meetup.com/pro/Weave/) and get invited to online talks,
hands-on training and meetups in your area.
* File an [issue](https://github.com/weaveworks/flagger/issues/new).
Your feedback is always welcome!

View File

@@ -0,0 +1,61 @@
apiVersion: flagger.app/v1alpha3
kind: Canary
metadata:
name: abtest
namespace: test
spec:
# deployment reference
targetRef:
apiVersion: apps/v1
kind: Deployment
name: abtest
# the maximum time in seconds for the canary deployment
# to make progress before it is rollback (default 600s)
progressDeadlineSeconds: 60
# HPA reference (optional)
autoscalerRef:
apiVersion: autoscaling/v2beta1
kind: HorizontalPodAutoscaler
name: abtest
service:
# container port
port: 9898
# Istio gateways (optional)
gateways:
- public-gateway.istio-system.svc.cluster.local
# Istio virtual service host names (optional)
hosts:
- abtest.istio.weavedx.com
canaryAnalysis:
# schedule interval (default 60s)
interval: 10s
# max number of failed metric checks before rollback
threshold: 10
# total number of iterations
iterations: 10
# canary match condition
match:
- headers:
user-agent:
regex: "^(?!.*Chrome)(?=.*\bSafari\b).*$"
- headers:
cookie:
regex: "^(.*?;)?(type=insider)(;.*)?$"
metrics:
- name: istio_requests_total
# minimum req success rate (non 5xx responses)
# percentage (0-100)
threshold: 99
interval: 1m
- name: istio_request_duration_seconds_bucket
# maximum req duration P99
# milliseconds
threshold: 500
interval: 30s
# external checks (optional)
webhooks:
- name: load-test
url: http://flagger-loadtester.test/
timeout: 5s
metadata:
cmd: "hey -z 1m -q 10 -c 2 -H 'Cookie: type=insider' http://podinfo.test:9898/"

View File

@@ -0,0 +1,67 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: abtest
namespace: test
labels:
app: abtest
spec:
minReadySeconds: 5
revisionHistoryLimit: 5
progressDeadlineSeconds: 60
strategy:
rollingUpdate:
maxUnavailable: 0
type: RollingUpdate
selector:
matchLabels:
app: abtest
template:
metadata:
annotations:
prometheus.io/scrape: "true"
labels:
app: abtest
spec:
containers:
- name: podinfod
image: quay.io/stefanprodan/podinfo:1.4.0
imagePullPolicy: IfNotPresent
ports:
- containerPort: 9898
name: http
protocol: TCP
command:
- ./podinfo
- --port=9898
- --level=info
- --random-delay=false
- --random-error=false
env:
- name: PODINFO_UI_COLOR
value: blue
livenessProbe:
exec:
command:
- podcli
- check
- http
- localhost:9898/healthz
initialDelaySeconds: 5
timeoutSeconds: 5
readinessProbe:
exec:
command:
- podcli
- check
- http
- localhost:9898/readyz
initialDelaySeconds: 5
timeoutSeconds: 5
resources:
limits:
cpu: 2000m
memory: 512Mi
requests:
cpu: 100m
memory: 64Mi

View File

@@ -0,0 +1,19 @@
apiVersion: autoscaling/v2beta1
kind: HorizontalPodAutoscaler
metadata:
name: abtest
namespace: test
spec:
scaleTargetRef:
apiVersion: apps/v1
kind: Deployment
name: abtest
minReplicas: 2
maxReplicas: 4
metrics:
- type: Resource
resource:
name: cpu
# scale up if usage is above
# 99% of the requested CPU (100m)
targetAverageUtilization: 99

View File

@@ -0,0 +1,50 @@
apiVersion: flagger.app/v1alpha3
kind: Canary
metadata:
name: podinfo
namespace: test
spec:
# deployment reference
targetRef:
apiVersion: apps/v1
kind: Deployment
name: podinfo
# the maximum time in seconds for the canary deployment
# to make progress before it is rollback (default 600s)
progressDeadlineSeconds: 60
# HPA reference (optional)
autoscalerRef:
apiVersion: autoscaling/v2beta1
kind: HorizontalPodAutoscaler
name: podinfo
service:
# container port
port: 9898
# App Mesh reference
meshName: global
# define the canary analysis timing and KPIs
canaryAnalysis:
# schedule interval (default 60s)
interval: 10s
# max number of failed metric checks before rollback
threshold: 10
# max traffic percentage routed to canary
# percentage (0-100)
maxWeight: 50
# canary increment step
# percentage (0-100)
stepWeight: 5
# App Mesh Prometheus checks
metrics:
- name: envoy_cluster_upstream_rq
# minimum req success rate (non 5xx responses)
# percentage (0-100)
threshold: 99
interval: 1m
# external checks (optional)
webhooks:
- name: load-test
url: http://flagger-loadtester.test/
timeout: 5s
metadata:
cmd: "hey -z 1m -q 10 -c 2 http://podinfo.test:9898/"

View File

@@ -0,0 +1,65 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: podinfo
namespace: test
labels:
app: podinfo
spec:
minReadySeconds: 5
revisionHistoryLimit: 5
progressDeadlineSeconds: 60
strategy:
rollingUpdate:
maxUnavailable: 0
type: RollingUpdate
selector:
matchLabels:
app: podinfo
template:
metadata:
annotations:
prometheus.io/scrape: "true"
labels:
app: podinfo
spec:
containers:
- name: podinfod
image: quay.io/stefanprodan/podinfo:1.4.0
imagePullPolicy: IfNotPresent
ports:
- containerPort: 9898
name: http
protocol: TCP
command:
- ./podinfo
- --port=9898
- --level=info
env:
- name: PODINFO_UI_COLOR
value: blue
livenessProbe:
exec:
command:
- podcli
- check
- http
- localhost:9898/healthz
initialDelaySeconds: 5
timeoutSeconds: 5
readinessProbe:
exec:
command:
- podcli
- check
- http
- localhost:9898/readyz
initialDelaySeconds: 5
timeoutSeconds: 5
resources:
limits:
cpu: 2000m
memory: 512Mi
requests:
cpu: 100m
memory: 64Mi

View File

@@ -0,0 +1,6 @@
apiVersion: appmesh.k8s.aws/v1beta1
kind: Mesh
metadata:
name: global
spec:
serviceDiscoveryType: dns

View File

@@ -0,0 +1,19 @@
apiVersion: autoscaling/v2beta1
kind: HorizontalPodAutoscaler
metadata:
name: podinfo
namespace: test
spec:
scaleTargetRef:
apiVersion: apps/v1
kind: Deployment
name: podinfo
minReplicas: 2
maxReplicas: 4
metrics:
- type: Resource
resource:
name: cpu
# scale up if usage is above
# 99% of the requested CPU (100m)
targetAverageUtilization: 99

View File

@@ -0,0 +1,177 @@
---
kind: ConfigMap
apiVersion: v1
metadata:
name: ingress-config
namespace: test
labels:
app: ingress
data:
envoy.yaml: |
static_resources:
listeners:
- address:
socket_address:
address: 0.0.0.0
port_value: 80
filter_chains:
- filters:
- name: envoy.http_connection_manager
config:
access_log:
- name: envoy.file_access_log
config:
path: /dev/stdout
codec_type: auto
stat_prefix: ingress_http
http_filters:
- name: envoy.router
config: {}
route_config:
name: local_route
virtual_hosts:
- name: local_service
domains: ["*"]
routes:
- match:
prefix: "/"
route:
cluster: podinfo
host_rewrite: podinfo.test
timeout: 15s
retry_policy:
retry_on: "gateway-error,connect-failure,refused-stream"
num_retries: 10
per_try_timeout: 5s
clusters:
- name: podinfo
connect_timeout: 0.30s
type: strict_dns
lb_policy: round_robin
http2_protocol_options: {}
hosts:
- socket_address:
address: podinfo.test
port_value: 9898
admin:
access_log_path: /dev/null
address:
socket_address:
address: 0.0.0.0
port_value: 9999
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: ingress
namespace: test
labels:
app: ingress
spec:
replicas: 1
selector:
matchLabels:
app: ingress
strategy:
type: RollingUpdate
rollingUpdate:
maxUnavailable: 0
template:
metadata:
labels:
app: ingress
annotations:
prometheus.io/path: "/stats/prometheus"
prometheus.io/port: "9999"
prometheus.io/scrape: "true"
# dummy port to exclude ingress from mesh traffic
# only egress should go over the mesh
appmesh.k8s.aws/ports: "444"
spec:
terminationGracePeriodSeconds: 30
containers:
- name: ingress
image: "envoyproxy/envoy-alpine:d920944aed67425f91fc203774aebce9609e5d9a"
securityContext:
capabilities:
drop:
- ALL
add:
- NET_BIND_SERVICE
command:
- /usr/bin/dumb-init
- --
args:
- /usr/local/bin/envoy
- --base-id 30
- --v2-config-only
- -l
- $loglevel
- -c
- /config/envoy.yaml
ports:
- name: admin
containerPort: 9999
protocol: TCP
- name: http
containerPort: 80
protocol: TCP
- name: https
containerPort: 443
protocol: TCP
livenessProbe:
initialDelaySeconds: 5
tcpSocket:
port: admin
readinessProbe:
initialDelaySeconds: 5
tcpSocket:
port: admin
resources:
requests:
cpu: 100m
memory: 64Mi
volumeMounts:
- name: config
mountPath: /config
volumes:
- name: config
configMap:
name: ingress-config
---
kind: Service
apiVersion: v1
metadata:
name: ingress
namespace: test
spec:
selector:
app: ingress
ports:
- protocol: TCP
name: http
port: 80
targetPort: 80
- protocol: TCP
name: https
port: 443
targetPort: 443
type: LoadBalancer
---
apiVersion: appmesh.k8s.aws/v1beta1
kind: VirtualNode
metadata:
name: ingress
namespace: test
spec:
meshName: global
listeners:
- portMapping:
port: 80
protocol: http
serviceDiscovery:
dns:
hostName: ingress.test
backends:
- virtualService:
virtualServiceName: podinfo.test

View File

@@ -26,15 +26,21 @@ spec:
# Istio virtual service host names (optional)
hosts:
- app.istio.weavedx.com
# Istio virtual service HTTP match conditions (optional)
# HTTP match conditions (optional)
match:
- uri:
prefix: /
# Istio virtual service HTTP rewrite (optional)
# HTTP rewrite (optional)
rewrite:
uri: /
# for emergency cases when you want to ship changes
# in production without analysing the canary
# Envoy timeout and retry policy (optional)
headers:
request:
add:
x-envoy-upstream-rq-timeout-ms: "15000"
x-envoy-max-retries: "10"
x-envoy-retry-on: "gateway-error,connect-failure,refused-stream"
# promote the canary without analysing it (default false)
skipAnalysis: false
canaryAnalysis:
# schedule interval (default 60s)
@@ -65,4 +71,6 @@ spec:
url: http://flagger-loadtester.test/
timeout: 5s
metadata:
type: cmd
cmd: "hey -z 1m -q 10 -c 2 http://podinfo.test:9898/"
logCmdOutput: "true"

View File

@@ -0,0 +1,264 @@
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
name: prometheus
labels:
app: prometheus
rules:
- apiGroups: [""]
resources:
- nodes
- services
- endpoints
- pods
- nodes/proxy
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources:
- configmaps
verbs: ["get"]
- nonResourceURLs: ["/metrics"]
verbs: ["get"]
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: prometheus
labels:
app: prometheus
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: prometheus
subjects:
- kind: ServiceAccount
name: prometheus
namespace: appmesh-system
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: prometheus
namespace: appmesh-system
labels:
app: prometheus
---
apiVersion: v1
kind: ConfigMap
metadata:
name: prometheus
namespace: appmesh-system
labels:
app: prometheus
data:
prometheus.yml: |-
global:
scrape_interval: 5s
scrape_configs:
# Scrape config for AppMesh Envoy sidecar
- job_name: 'appmesh-envoy'
metrics_path: /stats/prometheus
kubernetes_sd_configs:
- role: pod
relabel_configs:
- source_labels: [__meta_kubernetes_pod_container_name]
action: keep
regex: '^envoy$'
- source_labels: [__address__, __meta_kubernetes_pod_annotation_prometheus_io_port]
action: replace
regex: ([^:]+)(?::\d+)?;(\d+)
replacement: ${1}:9901
target_label: __address__
- action: labelmap
regex: __meta_kubernetes_pod_label_(.+)
- source_labels: [__meta_kubernetes_namespace]
action: replace
target_label: kubernetes_namespace
- source_labels: [__meta_kubernetes_pod_name]
action: replace
target_label: kubernetes_pod_name
# Exclude high cardinality metrics
metric_relabel_configs:
- source_labels: [ cluster_name ]
regex: '(outbound|inbound|prometheus_stats).*'
action: drop
- source_labels: [ tcp_prefix ]
regex: '(outbound|inbound|prometheus_stats).*'
action: drop
- source_labels: [ listener_address ]
regex: '(.+)'
action: drop
- source_labels: [ http_conn_manager_listener_prefix ]
regex: '(.+)'
action: drop
- source_labels: [ http_conn_manager_prefix ]
regex: '(.+)'
action: drop
- source_labels: [ __name__ ]
regex: 'envoy_tls.*'
action: drop
- source_labels: [ __name__ ]
regex: 'envoy_tcp_downstream.*'
action: drop
- source_labels: [ __name__ ]
regex: 'envoy_http_(stats|admin).*'
action: drop
- source_labels: [ __name__ ]
regex: 'envoy_cluster_(lb|retry|bind|internal|max|original).*'
action: drop
# Scrape config for API servers
- job_name: 'kubernetes-apiservers'
kubernetes_sd_configs:
- role: endpoints
namespaces:
names:
- default
scheme: https
tls_config:
ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
relabel_configs:
- source_labels: [__meta_kubernetes_service_name, __meta_kubernetes_endpoint_port_name]
action: keep
regex: kubernetes;https
# Scrape config for nodes
- job_name: 'kubernetes-nodes'
scheme: https
tls_config:
ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
kubernetes_sd_configs:
- role: node
relabel_configs:
- action: labelmap
regex: __meta_kubernetes_node_label_(.+)
- target_label: __address__
replacement: kubernetes.default.svc:443
- source_labels: [__meta_kubernetes_node_name]
regex: (.+)
target_label: __metrics_path__
replacement: /api/v1/nodes/${1}/proxy/metrics
# scrape config for cAdvisor
- job_name: 'kubernetes-cadvisor'
scheme: https
tls_config:
ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
kubernetes_sd_configs:
- role: node
relabel_configs:
- action: labelmap
regex: __meta_kubernetes_node_label_(.+)
- target_label: __address__
replacement: kubernetes.default.svc:443
- source_labels: [__meta_kubernetes_node_name]
regex: (.+)
target_label: __metrics_path__
replacement: /api/v1/nodes/${1}/proxy/metrics/cadvisor
# scrape config for pods
- job_name: kubernetes-pods
kubernetes_sd_configs:
- role: pod
relabel_configs:
- action: keep
regex: true
source_labels:
- __meta_kubernetes_pod_annotation_prometheus_io_scrape
- source_labels: [ __address__ ]
regex: '.*9901.*'
action: drop
- action: replace
regex: (.+)
source_labels:
- __meta_kubernetes_pod_annotation_prometheus_io_path
target_label: __metrics_path__
- action: replace
regex: ([^:]+)(?::\d+)?;(\d+)
replacement: $1:$2
source_labels:
- __address__
- __meta_kubernetes_pod_annotation_prometheus_io_port
target_label: __address__
- action: labelmap
regex: __meta_kubernetes_pod_label_(.+)
- action: replace
source_labels:
- __meta_kubernetes_namespace
target_label: kubernetes_namespace
- action: replace
source_labels:
- __meta_kubernetes_pod_name
target_label: kubernetes_pod_name
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: prometheus
namespace: appmesh-system
labels:
app: prometheus
spec:
replicas: 1
selector:
matchLabels:
app: prometheus
template:
metadata:
labels:
app: prometheus
annotations:
version: "appmesh-v1alpha1"
spec:
serviceAccountName: prometheus
containers:
- name: prometheus
image: "docker.io/prom/prometheus:v2.7.1"
imagePullPolicy: IfNotPresent
args:
- '--storage.tsdb.retention=6h'
- '--config.file=/etc/prometheus/prometheus.yml'
ports:
- containerPort: 9090
name: http
livenessProbe:
httpGet:
path: /-/healthy
port: 9090
readinessProbe:
httpGet:
path: /-/ready
port: 9090
resources:
requests:
cpu: 10m
memory: 128Mi
volumeMounts:
- name: config-volume
mountPath: /etc/prometheus
volumes:
- name: config-volume
configMap:
name: prometheus
---
apiVersion: v1
kind: Service
metadata:
name: prometheus
namespace: appmesh-system
labels:
name: prometheus
spec:
selector:
app: prometheus
ports:
- name: http
protocol: TCP
port: 9090

View File

@@ -13,11 +13,50 @@ metadata:
labels:
app: flagger
rules:
- apiGroups: ['*']
resources: ['*']
verbs: ['*']
- nonResourceURLs: ['*']
verbs: ['*']
- apiGroups:
- ""
resources:
- events
- configmaps
- secrets
- services
verbs: ["*"]
- apiGroups:
- apps
resources:
- deployments
verbs: ["*"]
- apiGroups:
- autoscaling
resources:
- horizontalpodautoscalers
verbs: ["*"]
- apiGroups:
- flagger.app
resources:
- canaries
- canaries/status
verbs: ["*"]
- apiGroups:
- networking.istio.io
resources:
- virtualservices
- virtualservices/status
verbs: ["*"]
- apiGroups:
- appmesh.k8s.aws
resources:
- meshes
- meshes/status
- virtualnodes
- virtualnodes/status
- virtualservices
- virtualservices/status
verbs: ["*"]
- nonResourceURLs:
- /version
verbs:
- get
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding

View File

@@ -73,6 +73,10 @@ spec:
properties:
port:
type: number
portName:
type: string
meshName:
type: string
timeout:
type: string
skipAnalysis:
@@ -82,6 +86,8 @@ spec:
interval:
type: string
pattern: "^[0-9]+(m|s)"
iterations:
type: number
threshold:
type: number
maxWeight:

View File

@@ -22,7 +22,7 @@ spec:
serviceAccountName: flagger
containers:
- name: flagger
image: quay.io/stefanprodan/flagger:0.7.0
image: quay.io/weaveworks/flagger:0.10.0
imagePullPolicy: IfNotPresent
ports:
- name: http

View File

@@ -17,7 +17,7 @@ spec:
spec:
containers:
- name: loadtester
image: quay.io/stefanprodan/flagger-loadtester:0.1.0
image: quay.io/stefanprodan/flagger-loadtester:0.2.0
imagePullPolicy: IfNotPresent
ports:
- name: http
@@ -27,7 +27,6 @@ spec:
- -port=8080
- -log-level=info
- -timeout=1h
- -log-cmd-output=true
livenessProbe:
exec:
command:

View File

@@ -4,3 +4,4 @@ metadata:
name: test
labels:
istio-injection: enabled
appmesh.k8s.aws/sidecarInjectorWebhook: enabled

View File

@@ -8,13 +8,17 @@ spec:
- public-gateway.istio-system.svc.cluster.local
- mesh
hosts:
- podinfo.iowa.weavedx.com
- app.istio.weavedx.com
- podinfo
http:
- match:
- headers:
user-agent:
regex: ^(?!.*Chrome)(?=.*\bSafari\b).*$
uri:
prefix: "/version/"
rewrite:
uri: /api/info
route:
- destination:
host: podinfo-primary
@@ -26,7 +30,12 @@ spec:
port:
number: 9898
weight: 100
- route:
- match:
- uri:
prefix: "/version/"
rewrite:
uri: /api/info
route:
- destination:
host: podinfo-primary
port:

View File

@@ -23,7 +23,7 @@ spec:
spec:
containers:
- name: podinfod
image: quay.io/stefanprodan/podinfo:1.2.0
image: quay.io/stefanprodan/podinfo:1.4.0
imagePullPolicy: IfNotPresent
ports:
- containerPort: 9898
@@ -67,9 +67,3 @@ spec:
requests:
cpu: 100m
memory: 16Mi
volumeMounts:
- mountPath: /data
name: data
volumes:
- emptyDir: {}
name: data

View File

@@ -1,10 +1,8 @@
apiVersion: v1
kind: Service
metadata:
name: podinfo
name: podinfo-canary
namespace: test
labels:
app: podinfo
spec:
type: ClusterIP
selector:

View File

@@ -23,7 +23,7 @@ spec:
spec:
containers:
- name: podinfod
image: quay.io/stefanprodan/podinfo:1.1.1
image: quay.io/stefanprodan/podinfo:1.4.1
imagePullPolicy: IfNotPresent
ports:
- containerPort: 9898

View File

@@ -0,0 +1,14 @@
apiVersion: v1
kind: Service
metadata:
name: podinfo
namespace: test
spec:
type: ClusterIP
selector:
app: podinfo-primary
ports:
- name: http
port: 9898
protocol: TCP
targetPort: http

View File

@@ -1,14 +1,14 @@
apiVersion: v1
name: flagger
version: 0.7.0
appVersion: 0.7.0
version: 0.10.0
appVersion: 0.10.0
kubeVersion: ">=1.11.0-0"
engine: gotpl
description: Flagger is a Kubernetes operator that automates the promotion of canary deployments using Istio routing for traffic shifting and Prometheus metrics for canary analysis.
home: https://docs.flagger.app
icon: https://raw.githubusercontent.com/stefanprodan/flagger/master/docs/logo/flagger-icon.png
icon: https://raw.githubusercontent.com/weaveworks/flagger/master/docs/logo/flagger-icon.png
sources:
- https://github.com/stefanprodan/flagger
- https://github.com/weaveworks/flagger
maintainers:
- name: stefanprodan
url: https://github.com/stefanprodan
@@ -16,4 +16,5 @@ maintainers:
keywords:
- canary
- istio
- appmesh
- gitops

View File

@@ -1,6 +1,6 @@
# Flagger
[Flagger](https://github.com/stefanprodan/flagger) is a Kubernetes operator that automates the promotion of
[Flagger](https://github.com/weaveworks/flagger) is a Kubernetes operator that automates the promotion of
canary deployments using Istio routing for traffic shifting and Prometheus metrics for canary analysis.
Flagger implements a control loop that gradually shifts traffic to the canary while measuring key performance indicators
like HTTP requests success rate, requests average duration and pods health.

View File

@@ -3,6 +3,8 @@ apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: canaries.flagger.app
annotations:
helm.sh/resource-policy: keep
spec:
group: flagger.app
version: v1alpha3
@@ -74,6 +76,10 @@ spec:
properties:
port:
type: number
portName:
type: string
meshName:
type: string
timeout:
type: string
skipAnalysis:
@@ -83,6 +89,8 @@ spec:
interval:
type: string
pattern: "^[0-9]+(m|s)"
iterations:
type: number
threshold:
type: number
maxWeight:

View File

@@ -35,7 +35,13 @@ spec:
command:
- ./flagger
- -log-level=info
{{- if .Values.meshProvider }}
- -mesh-provider={{ .Values.meshProvider }}
{{- end }}
- -metrics-server={{ .Values.metricsServer }}
{{- if .Values.namespace }}
- -namespace={{ .Values.namespace }}
{{- end }}
{{- if .Values.slack.url }}
- -slack-url={{ .Values.slack.url }}
- -slack-user={{ .Values.slack.user }}

View File

@@ -9,11 +9,50 @@ metadata:
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/instance: {{ .Release.Name }}
rules:
- apiGroups: ['*']
resources: ['*']
verbs: ['*']
- nonResourceURLs: ['*']
verbs: ['*']
- apiGroups:
- ""
resources:
- events
- configmaps
- secrets
- services
verbs: ["*"]
- apiGroups:
- apps
resources:
- deployments
verbs: ["*"]
- apiGroups:
- autoscaling
resources:
- horizontalpodautoscalers
verbs: ["*"]
- apiGroups:
- flagger.app
resources:
- canaries
- canaries/status
verbs: ["*"]
- apiGroups:
- networking.istio.io
resources:
- virtualservices
- virtualservices/status
verbs: ["*"]
- apiGroups:
- appmesh.k8s.aws
resources:
- meshes
- meshes/status
- virtualnodes
- virtualnodes/status
- virtualservices
- virtualservices/status
verbs: ["*"]
- nonResourceURLs:
- /version
verbs:
- get
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding

View File

@@ -1,11 +1,17 @@
# Default values for flagger.
image:
repository: quay.io/stefanprodan/flagger
tag: 0.7.0
repository: quay.io/weaveworks/flagger
tag: 0.10.0
pullPolicy: IfNotPresent
metricsServer: "http://prometheus.istio-system.svc.cluster.local:9090"
metricsServer: "http://prometheus:9090"
# accepted values are istio or appmesh (defaults to istio)
meshProvider: ""
# single namespace restriction
namespace: ""
slack:
user: flagger

View File

@@ -1,12 +1,12 @@
apiVersion: v1
name: grafana
version: 1.0.0
version: 1.1.0
appVersion: 5.4.3
description: Grafana dashboards for monitoring Flagger canary deployments
icon: https://raw.githubusercontent.com/stefanprodan/flagger/master/docs/logo/flagger-icon.png
icon: https://raw.githubusercontent.com/weaveworks/flagger/master/docs/logo/flagger-icon.png
home: https://flagger.app
sources:
- https://github.com/stefanprodan/flagger
- https://github.com/weaveworks/flagger
maintainers:
- name: stefanprodan
url: https://github.com/stefanprodan

View File

@@ -2,7 +2,7 @@
Grafana dashboards for monitoring progressive deployments powered by Istio, Prometheus and Flagger.
![flagger-grafana](https://raw.githubusercontent.com/stefanprodan/flagger/master/docs/screens/grafana-canary-analysis.png)
![flagger-grafana](https://raw.githubusercontent.com/weaveworks/flagger/master/docs/screens/grafana-canary-analysis.png)
## Prerequisites

File diff suppressed because it is too large Load Diff

View File

@@ -1583,11 +1583,7 @@
"list": [
{
"allValue": null,
"current": {
"selected": true,
"text": "test",
"value": "test"
},
"current": null,
"datasource": "prometheus",
"definition": "",
"hide": 0,
@@ -1609,11 +1605,7 @@
},
{
"allValue": null,
"current": {
"selected": false,
"text": "backend-primary",
"value": "backend-primary"
},
"current": null,
"datasource": "prometheus",
"definition": "",
"hide": 0,
@@ -1635,11 +1627,7 @@
},
{
"allValue": null,
"current": {
"selected": true,
"text": "backend",
"value": "backend"
},
"current": null,
"datasource": "prometheus",
"definition": "",
"hide": 0,
@@ -1691,7 +1679,7 @@
]
},
"timezone": "",
"title": "Flagger canary",
"uid": "RdykD7tiz",
"title": "Istio Canary",
"uid": "flagger-istio",
"version": 3
}

View File

@@ -1,15 +1,7 @@
1. Get the application URL by running these commands:
{{- if contains "NodePort" .Values.service.type }}
export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "grafana.fullname" . }})
export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}")
echo http://$NODE_IP:$NODE_PORT
{{- else if contains "LoadBalancer" .Values.service.type }}
NOTE: It may take a few minutes for the LoadBalancer IP to be available.
You can watch the status of by running 'kubectl get svc -w {{ template "grafana.fullname" . }}'
export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "grafana.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}')
echo http://$SERVICE_IP:{{ .Values.service.port }}
{{- else if contains "ClusterIP" .Values.service.type }}
export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app={{ template "grafana.name" . }},release={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
echo "Visit http://127.0.0.1:8080 to use your application"
kubectl port-forward $POD_NAME 8080:80
{{- end }}
1. Run the port forward command:
kubectl -n {{ .Release.Namespace }} port-forward svc/{{ .Release.Name }} 3000:80
2. Navigate to:
http://localhost:3000

View File

@@ -1,14 +1,14 @@
apiVersion: v1
name: loadtester
version: 0.1.0
appVersion: 0.1.0
version: 0.2.0
appVersion: 0.2.0
kubeVersion: ">=1.11.0-0"
engine: gotpl
description: Flagger's load testing services based on rakyll/hey that generates traffic during canary analysis when configured as a webhook.
home: https://docs.flagger.app
icon: https://raw.githubusercontent.com/stefanprodan/flagger/master/docs/logo/flagger-icon.png
icon: https://raw.githubusercontent.com/weaveworks/flagger/master/docs/logo/flagger-icon.png
sources:
- https://github.com/stefanprodan/flagger
- https://github.com/weaveworks/flagger
maintainers:
- name: stefanprodan
url: https://github.com/stefanprodan
@@ -16,5 +16,6 @@ maintainers:
keywords:
- canary
- istio
- appmesh
- gitops
- load testing

View File

@@ -1,6 +1,6 @@
# Flagger load testing service
[Flagger's](https://github.com/stefanprodan/flagger) load testing service is based on
[Flagger's](https://github.com/weaveworks/flagger) load testing service is based on
[rakyll/hey](https://github.com/rakyll/hey)
and can be used to generates traffic during canary analysis when configured as a webhook.
@@ -56,15 +56,15 @@ Parameter | Description | Default
`nodeSelector` | node labels for pod assignment | `{}`
`service.type` | type of service | `ClusterIP`
`service.port` | ClusterIP port | `80`
`cmd.logOutput` | Log the command output to stderr | `true`
`cmd.timeout` | Command execution timeout | `1h`
`logLevel` | Log level can be debug, info, warning, error or panic | `info`
`meshName` | AWS App Mesh name | `none`
`backends` | AWS App Mesh virtual services | `none`
Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example,
```console
helm install flagger/loadtester --name flagger-loadtester \
--set cmd.logOutput=false
helm install flagger/loadtester --name flagger-loadtester
```
Alternatively, a YAML file that specifies the values for the above parameters can be provided while installing the chart. For example,

View File

@@ -16,6 +16,8 @@ spec:
metadata:
labels:
app: {{ include "loadtester.name" . }}
annotations:
appmesh.k8s.aws/ports: "444"
spec:
containers:
- name: {{ .Chart.Name }}
@@ -29,7 +31,6 @@ spec:
- -port=8080
- -log-level={{ .Values.logLevel }}
- -timeout={{ .Values.cmd.timeout }}
- -log-cmd-output={{ .Values.cmd.logOutput }}
livenessProbe:
exec:
command:

View File

@@ -0,0 +1,27 @@
{{- if .Values.meshName }}
apiVersion: appmesh.k8s.aws/v1beta1
kind: VirtualNode
metadata:
name: {{ include "loadtester.fullname" . }}
labels:
app.kubernetes.io/name: {{ include "loadtester.name" . }}
helm.sh/chart: {{ include "loadtester.chart" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
spec:
meshName: {{ .Values.meshName }}
listeners:
- portMapping:
port: 444
protocol: http
serviceDiscovery:
dns:
hostName: {{ include "loadtester.fullname" . }}.{{ .Release.Namespace }}
{{- if .Values.backends }}
backends:
{{- range .Values.backends }}
- virtualService:
virtualServiceName: {{ . }}
{{- end }}
{{- end }}
{{- end }}

View File

@@ -1,13 +1,12 @@
replicaCount: 1
image:
repository: quay.io/stefanprodan/flagger-loadtester
tag: 0.1.0
repository: quay.io/weaveworks/flagger-loadtester
tag: 0.2.0
pullPolicy: IfNotPresent
logLevel: info
cmd:
logOutput: true
timeout: 1h
nameOverride: ""
@@ -27,3 +26,9 @@ nodeSelector: {}
tolerations: []
affinity: {}
# App Mesh virtual node settings
meshName: ""
#backends:
# - app1.namespace
# - app2.namespace

View File

@@ -4,9 +4,9 @@ appVersion: 1.4.0
name: podinfo
engine: gotpl
description: Flagger canary deployment demo chart
home: https://github.com/stefanprodan/flagger
home: https://github.com/weaveworks/flagger
maintainers:
- email: stefanprodan@users.noreply.github.com
name: stefanprodan
sources:
- https://github.com/stefanprodan/flagger
- https://github.com/weaveworks/flagger

View File

@@ -2,23 +2,22 @@ package main
import (
"flag"
"log"
"time"
_ "github.com/istio/glog"
istioclientset "github.com/knative/pkg/client/clientset/versioned"
"github.com/knative/pkg/signals"
clientset "github.com/stefanprodan/flagger/pkg/client/clientset/versioned"
informers "github.com/stefanprodan/flagger/pkg/client/informers/externalversions"
"github.com/stefanprodan/flagger/pkg/controller"
"github.com/stefanprodan/flagger/pkg/logging"
"github.com/stefanprodan/flagger/pkg/notifier"
"github.com/stefanprodan/flagger/pkg/server"
"github.com/stefanprodan/flagger/pkg/version"
clientset "github.com/weaveworks/flagger/pkg/client/clientset/versioned"
informers "github.com/weaveworks/flagger/pkg/client/informers/externalversions"
"github.com/weaveworks/flagger/pkg/controller"
"github.com/weaveworks/flagger/pkg/logging"
"github.com/weaveworks/flagger/pkg/notifier"
"github.com/weaveworks/flagger/pkg/server"
"github.com/weaveworks/flagger/pkg/signals"
"github.com/weaveworks/flagger/pkg/version"
"go.uber.org/zap"
"k8s.io/client-go/kubernetes"
_ "k8s.io/client-go/plugin/pkg/client/auth/gcp"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/clientcmd"
"log"
"time"
)
var (
@@ -31,6 +30,11 @@ var (
slackURL string
slackUser string
slackChannel string
threadiness int
zapReplaceGlobals bool
zapEncoding string
namespace string
meshProvider string
)
func init() {
@@ -43,15 +47,24 @@ func init() {
flag.StringVar(&slackURL, "slack-url", "", "Slack hook URL.")
flag.StringVar(&slackUser, "slack-user", "flagger", "Slack user name.")
flag.StringVar(&slackChannel, "slack-channel", "", "Slack channel.")
flag.IntVar(&threadiness, "threadiness", 2, "Worker concurrency.")
flag.BoolVar(&zapReplaceGlobals, "zap-replace-globals", false, "Whether to change the logging level of the global zap logger.")
flag.StringVar(&zapEncoding, "zap-encoding", "json", "Zap logger encoding.")
flag.StringVar(&namespace, "namespace", "", "Namespace that flagger would watch canary object")
flag.StringVar(&meshProvider, "mesh-provider", "istio", "Service mesh provider, can be istio or appmesh")
}
func main() {
flag.Parse()
logger, err := logging.NewLogger(logLevel)
logger, err := logging.NewLoggerWithEncoding(logLevel, zapEncoding)
if err != nil {
log.Fatalf("Error creating logger: %v", err)
}
if zapReplaceGlobals {
zap.ReplaceGlobals(logger.Desugar())
}
defer logger.Sync()
stopCh := signals.SetupSignalHandler()
@@ -66,7 +79,7 @@ func main() {
logger.Fatalf("Error building kubernetes clientset: %v", err)
}
istioClient, err := istioclientset.NewForConfig(cfg)
meshClient, err := clientset.NewForConfig(cfg)
if err != nil {
logger.Fatalf("Error building istio clientset: %v", err)
}
@@ -76,7 +89,8 @@ func main() {
logger.Fatalf("Error building example clientset: %s", err.Error())
}
flaggerInformerFactory := informers.NewSharedInformerFactory(flaggerClient, time.Second*30)
flaggerInformerFactory := informers.NewSharedInformerFactoryWithOptions(flaggerClient, time.Second*30, informers.WithNamespace(namespace))
canaryInformer := flaggerInformerFactory.Flagger().V1alpha3().Canaries()
logger.Infof("Starting flagger version %s revision %s", version.VERSION, version.REVISION)
@@ -87,6 +101,9 @@ func main() {
}
logger.Infof("Connected to Kubernetes API %s", ver)
if namespace != "" {
logger.Infof("Watching namespace %s", namespace)
}
ok, err := controller.CheckMetricsServer(metricsServer)
if ok {
@@ -110,13 +127,14 @@ func main() {
c := controller.NewController(
kubeClient,
istioClient,
meshClient,
flaggerClient,
canaryInformer,
controlLoopInterval,
metricsServer,
logger,
slack,
meshProvider,
)
flaggerInformerFactory.Start(stopCh)
@@ -132,7 +150,7 @@ func main() {
// start controller
go func(ctrl *controller.Controller) {
if err := ctrl.Run(2, stopCh); err != nil {
if err := ctrl.Run(threadiness, stopCh); err != nil {
logger.Fatalf("Error running controller: %v", err)
}
}(c)

View File

@@ -2,40 +2,47 @@ package main
import (
"flag"
"github.com/knative/pkg/signals"
"github.com/stefanprodan/flagger/pkg/loadtester"
"github.com/stefanprodan/flagger/pkg/logging"
"github.com/weaveworks/flagger/pkg/loadtester"
"github.com/weaveworks/flagger/pkg/logging"
"github.com/weaveworks/flagger/pkg/signals"
"go.uber.org/zap"
"log"
"time"
)
var VERSION = "0.1.0"
var VERSION = "0.2.0"
var (
logLevel string
port string
timeout time.Duration
logCmdOutput bool
logLevel string
port string
timeout time.Duration
zapReplaceGlobals bool
zapEncoding string
)
func init() {
flag.StringVar(&logLevel, "log-level", "debug", "Log level can be: debug, info, warning, error.")
flag.StringVar(&port, "port", "9090", "Port to listen on.")
flag.DurationVar(&timeout, "timeout", time.Hour, "Command exec timeout.")
flag.BoolVar(&logCmdOutput, "log-cmd-output", true, "Log command output to stderr")
flag.DurationVar(&timeout, "timeout", time.Hour, "Load test exec timeout.")
flag.BoolVar(&zapReplaceGlobals, "zap-replace-globals", false, "Whether to change the logging level of the global zap logger.")
flag.StringVar(&zapEncoding, "zap-encoding", "json", "Zap logger encoding.")
}
func main() {
flag.Parse()
logger, err := logging.NewLogger(logLevel)
logger, err := logging.NewLoggerWithEncoding(logLevel, zapEncoding)
if err != nil {
log.Fatalf("Error creating logger: %v", err)
}
if zapReplaceGlobals {
zap.ReplaceGlobals(logger.Desugar())
}
defer logger.Sync()
stopCh := signals.SetupSignalHandler()
taskRunner := loadtester.NewTaskRunner(logger, timeout, logCmdOutput)
taskRunner := loadtester.NewTaskRunner(logger, timeout)
go taskRunner.Start(100*time.Millisecond, stopCh)

Binary file not shown.

After

Width:  |  Height:  |  Size: 158 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 207 KiB

After

Width:  |  Height:  |  Size: 46 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 32 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 183 KiB

After

Width:  |  Height:  |  Size: 46 KiB

View File

@@ -1,11 +1,11 @@
---
description: Flagger is an Istio progressive delivery Kubernetes operator
description: Flagger is a progressive delivery Kubernetes operator
---
# Introduction
[Flagger](https://github.com/stefanprodan/flagger) is a **Kubernetes** operator that automates the promotion of canary
deployments using **Istio** routing for traffic shifting and **Prometheus** metrics for canary analysis.
[Flagger](https://github.com/weaveworks/flagger) is a **Kubernetes** operator that automates the promotion of canary
deployments using **Istio** or **App Mesh** routing for traffic shifting and **Prometheus** metrics for canary analysis.
The canary analysis can be extended with webhooks for running
system integration/acceptance tests, load tests, or any other custom validation.
@@ -13,7 +13,7 @@ Flagger implements a control loop that gradually shifts traffic to the canary wh
indicators like HTTP requests success rate, requests average duration and pods health.
Based on analysis of the **KPIs** a canary is promoted or aborted, and the analysis result is published to **Slack**.
![Flagger overview diagram](https://raw.githubusercontent.com/stefanprodan/flagger/master/docs/diagrams/flagger-canary-overview.png)
![Flagger overview diagram](https://raw.githubusercontent.com/weaveworks/flagger/master/docs/diagrams/flagger-canary-overview.png)
Flagger can be configured with Kubernetes custom resources and is compatible with
any CI/CD solutions made for Kubernetes. Since Flagger is declarative and reacts to Kubernetes events,

View File

@@ -6,14 +6,18 @@
## Install
* [Flagger Install on Kubernetes](install/flagger-install-on-kubernetes.md)
* [Flagger Install on Google Cloud](install/flagger-install-on-google-cloud.md)
* [Flagger Install on GKE Istio](install/flagger-install-on-google-cloud.md)
* [Flagger Install on EKS App Mesh](install/flagger-install-on-eks-appmesh.md)
## Usage
* [Canary Deployments](usage/progressive-delivery.md)
* [Istio Canary Deployments](usage/progressive-delivery.md)
* [Istio A/B Testing](usage/ab-testing.md)
* [App Mesh Canary Deployments](usage/appmesh-progressive-delivery.md)
* [Monitoring](usage/monitoring.md)
* [Alerting](usage/alerting.md)
## Tutorials
* [Canaries with Helm charts and GitOps](tutorials/canary-helm-gitops.md)
* [Zero downtime deployments](tutorials/zero-downtime-deployments.md)

View File

@@ -1,10 +1,10 @@
# How it works
[Flagger](https://github.com/stefanprodan/flagger) takes a Kubernetes deployment and optionally
[Flagger](https://github.com/weaveworks/flagger) takes a Kubernetes deployment and optionally
a horizontal pod autoscaler \(HPA\) and creates a series of objects
\(Kubernetes deployments, ClusterIP services and Istio virtual services\) to drive the canary analysis and promotion.
![Flagger Canary Process](https://raw.githubusercontent.com/stefanprodan/flagger/master/docs/diagrams/flagger-canary-hpa.png)
![Flagger Canary Process](https://raw.githubusercontent.com/weaveworks/flagger/master/docs/diagrams/flagger-canary-hpa.png)
### Canary Custom Resource
@@ -33,25 +33,14 @@ spec:
service:
# container port
port: 9898
# service port name (optional, will default to "http")
portName: http-podinfo
# Istio gateways (optional)
gateways:
- public-gateway.istio-system.svc.cluster.local
# Istio virtual service host names (optional)
hosts:
- podinfo.example.com
# HTTP match conditions (optional)
match:
- uri:
prefix: /
# HTTP rewrite (optional)
rewrite:
uri: /
# timeout for HTTP requests (optional)
timeout: 5s
# retry policy when a HTTP request fails (optional)
retries:
attempts: 3
perTryTimeout: 3s
# promote the canary without analysing it (default false)
skipAnalysis: false
# define the canary analysis timing and KPIs
@@ -109,9 +98,11 @@ spec:
The target deployment should expose a TCP port that will be used by Flagger to create the ClusterIP Service and
the Istio Virtual Service. The container port from the target deployment should match the `service.port` value.
### Virtual Service
### Istio routing
Flagger creates an Istio Virtual Service based on the Canary service spec.
Flagger creates an Istio Virtual Service based on the Canary service spec. The service configuration lets you expose
an app inside or outside the mesh.
You can also define HTTP match conditions, URI rewrite rules, CORS policies, timeout and retries.
The following spec exposes the `frontend` workload inside the mesh on `frontend.test.svc.cluster.local:9898`
and outside the mesh on `frontend.example.com`. You'll have to specify an Istio ingress gateway for external hosts.
@@ -122,22 +113,42 @@ kind: Canary
metadata:
name: frontend
namespace: test
spec:
service:
# container port
port: 9898
# service port name (optional, will default to "http")
portName: http-frontend
# Istio gateways (optional)
gateways:
- public-gateway.istio-system.svc.cluster.local
# Istio virtual service host names (optional)
hosts:
- frontend.example.com
# Istio virtual service HTTP match conditions (optional)
# HTTP match conditions (optional)
match:
- uri:
prefix: /
# Istio virtual service HTTP rewrite (optional)
# HTTP rewrite (optional)
rewrite:
uri: /
# Envoy timeout and retry policy (optional)
headers:
request:
add:
x-envoy-upstream-rq-timeout-ms: "15000"
x-envoy-max-retries: "10"
x-envoy-retry-on: "gateway-error,connect-failure,refused-stream"
# cross-origin resource sharing policy (optional)
corsPolicy:
allowOrigin:
- example.com
allowMethods:
- GET
allowCredentials: false
allowHeaders:
- x-some-header
maxAge: 24h
```
For the above spec Flagger will generate the following virtual service:
@@ -163,25 +174,37 @@ spec:
- frontend.example.com
- frontend
http:
- match:
- uri:
prefix: /
rewrite:
uri: /
route:
- destination:
host: frontend-primary
port:
number: 9898
weight: 100
- destination:
host: frontend-canary
port:
number: 9898
weight: 0
- appendHeaders:
x-envoy-max-retries: "10"
x-envoy-retry-on: gateway-error,connect-failure,refused-stream
x-envoy-upstream-rq-timeout-ms: "15000"
corsPolicy:
allowHeaders:
- x-some-header
allowMethods:
- GET
allowOrigin:
- example.com
maxAge: 24h
match:
- uri:
prefix: /
rewrite:
uri: /
route:
- destination:
host: podinfo-primary
port:
number: 9898
weight: 100
- destination:
host: podinfo-canary
port:
number: 9898
weight: 0
```
Flagger keeps in sync the virtual service with the canary service spec. Any direct modification of the virtual
Flagger keeps in sync the virtual service with the canary service spec. Any direct modification to the virtual
service spec will be overwritten.
To expose a workload inside the mesh on `http://backend.test.svc.cluster.local:9898`,
@@ -228,7 +251,7 @@ and for backend HTTP APIs that are accessible only from inside the mesh.
### Canary Stages
![Flagger Canary Stages](https://raw.githubusercontent.com/stefanprodan/flagger/master/docs/diagrams/flagger-canary-steps.png)
![Flagger Canary Stages](https://raw.githubusercontent.com/weaveworks/flagger/master/docs/diagrams/flagger-canary-steps.png)
A canary deployment is triggered by changes in any of the following objects:
@@ -308,6 +331,49 @@ At any time you can set the `spec.skipAnalysis: true`.
When skip analysis is enabled, Flagger checks if the canary deployment is healthy and
promotes it without analysing it. If an analysis is underway, Flagger cancels it and runs the promotion.
### A/B Testing
Besides weighted routing, Flagger can be configured to route traffic to the canary based on HTTP match conditions.
In an A/B testing scenario, you'll be using HTTP headers or cookies to target a certain segment of your users.
This is particularly useful for frontend applications that require session affinity.
You can enable A/B testing by specifying the HTTP match conditions and the number of iterations:
```yaml
canaryAnalysis:
# schedule interval (default 60s)
interval: 1m
# total number of iterations
iterations: 10
# max number of failed iterations before rollback
threshold: 2
# canary match condition
match:
- headers:
user-agent:
regex: "^(?!.*Chrome).*Safari.*"
- headers:
cookie:
regex: "^(.*?;)?(user=test)(;.*)?$"
```
If Flagger finds a HTTP match condition, it will ignore the `maxWeight` and `stepWeight` settings.
The above configuration will run an analysis for ten minutes targeting the Safari users and those that have a test cookie.
You can determine the minimum time that it takes to validate and promote a canary deployment using this formula:
```
interval * iterations
```
And the time it takes for a canary to be rollback when the metrics or webhook checks are failing:
```
interval * threshold
```
Make sure that the analysis threshold is lower than the number of iterations.
### HTTP Metrics
The canary analysis is using the following Prometheus queries:
@@ -422,6 +488,37 @@ The above configuration validates the canary by checking
if the HTTP 404 req/sec percentage is below 5 percent of the total traffic.
If the 404s rate reaches the 5% threshold, then the canary fails.
```yaml
canaryAnalysis:
threshold: 1
maxWeight: 50
stepWeight: 5
metrics:
- name: "rpc error rate"
threshold: 5
query: |
100 - (sum
rate(
grpc_server_handled_total{
grpc_service="my.TestService",
grpc_code!="OK"
}[1m]
)
)
/
sum(
rate(
grpc_server_started_total{
grpc_service="my.TestService"
}[1m]
)
) * 100
```
The above configuration validates the canary by checking if the percentage of
non-OK GRPC req/sec is below 5 percent of the total requests. If the non-OK
rate reaches the 5% threshold, then the canary fails.
When specifying a query, Flagger will run the promql query and convert the result to float64.
Then it compares the query result value with the metric threshold value.
@@ -482,12 +579,12 @@ Flagger metric checks will fail with "no values found for metric istio_requests_
Flagger comes with a load testing service based on [rakyll/hey](https://github.com/rakyll/hey)
that generates traffic during analysis when configured as a webhook.
![Flagger Load Testing Webhook](https://raw.githubusercontent.com/stefanprodan/flagger/master/docs/diagrams/flagger-load-testing.png)
![Flagger Load Testing Webhook](https://raw.githubusercontent.com/weaveworks/flagger/master/docs/diagrams/flagger-load-testing.png)
First you need to deploy the load test runner in a namespace with Istio sidecar injection enabled:
```bash
export REPO=https://raw.githubusercontent.com/stefanprodan/flagger/master
export REPO=https://raw.githubusercontent.com/weaveworks/flagger/master
kubectl -n test apply -f ${REPO}/artifacts/loadtester/deployment.yaml
kubectl -n test apply -f ${REPO}/artifacts/loadtester/service.yaml
@@ -499,8 +596,7 @@ Or by using Helm:
helm repo add flagger https://flagger.app
helm upgrade -i flagger-loadtester flagger/loadtester \
--namepace=test \
--set cmd.logOutput=true \
--namespace=test \
--set cmd.timeout=1h
```
@@ -514,11 +610,13 @@ webhooks:
url: http://flagger-loadtester.test/
timeout: 5s
metadata:
type: cmd
cmd: "hey -z 1m -q 10 -c 2 http://podinfo.test:9898/"
- name: load-test-post
url: http://flagger-loadtester.test/
timeout: 5s
metadata:
type: cmd
cmd: "hey -z 1m -q 10 -c 2 -m POST -d '{test: 2}' http://podinfo.test:9898/echo"
```
@@ -535,6 +633,7 @@ webhooks:
url: http://flagger-loadtester.test/
timeout: 5s
metadata:
type: cmd
cmd: "hey -z 1m -q 10 -c 2 -h2 https://podinfo.example.com/"
```
@@ -547,3 +646,32 @@ FROM quay.io/stefanprodan/flagger-loadtester:<VER>
RUN curl -Lo /usr/local/bin/my-cli https://github.com/user/repo/releases/download/ver/my-cli \
&& chmod +x /usr/local/bin/my-cli
```
### Load Testing Delegation
The load tester can also forward testing tasks to external tools, by now [nGrinder](https://github.com/naver/ngrinder)
is supported.
To use this feature, add a load test task of type 'ngrinder' to the canary analysis spec:
```yaml
webhooks:
- name: load-test-post
url: http://flagger-loadtester.test/
timeout: 5s
metadata:
# type of this load test task, cmd or ngrinder
type: ngrinder
# base url of your nGrinder controller server
server: http://ngrinder-server:port
# id of the test to clone from, the test must have been defined.
clone: 100
# user name and base64 encoded password to authenticate against the nGrinder server
username: admin
passwd: YWRtaW4=
# the interval between between nGrinder test status polling, default to 1s
pollInterval: 5s
```
When the canary analysis starts, the load tester will initiate a [clone_and_start request](https://github.com/naver/ngrinder/wiki/REST-API-PerfTest)
to the nGrinder server and start a new performance test. the load tester will periodically poll the nGrinder server
for the status of the test, and prevent duplicate requests from being sent in subsequent analysis loops.

View File

@@ -0,0 +1,188 @@
# Flagger install on AWS
This guide walks you through setting up Flagger and AWS App Mesh on EKS.
### App Mesh
The App Mesh integration with EKS is made out of the following components:
* Kubernetes custom resources
* `mesh.appmesh.k8s.aws` defines a logical boundary for network traffic between the services
* `virtualnode.appmesh.k8s.aws` defines a logical pointer to a Kubernetes workload
* `virtualservice.appmesh.k8s.aws` defines the routing rules for a workload inside the mesh
* CRD controller - keeps the custom resources in sync with the App Mesh control plane
* Admission controller - injects the Envoy sidecar and assigns Kubernetes pods to App Mesh virtual nodes
* Metrics server - Prometheus instance that collects and stores Envoy's metrics
Prerequisites:
* jq
* homebrew
* openssl
* kubectl
* AWS CLI (default region us-west-2)
### Create a Kubernetes cluster
In order to create an EKS cluster you can use [eksctl](https://eksctl.io).
Eksctl is an open source command-line utility made by Weaveworks in collaboration with Amazon,
its a Kubernetes-native tool written in Go.
On MacOS you can install eksctl with Homebrew:
```bash
brew tap weaveworks/tap
brew install weaveworks/tap/eksctl
```
Create an EKS cluster:
```bash
eksctl create cluster --name=appmesh \
--region=us-west-2 \
--appmesh-access
```
The above command will create a two nodes cluster with App Mesh
[IAM policy](https://docs.aws.amazon.com/app-mesh/latest/userguide/MESH_IAM_user_policies.html)
attached to the EKS node instance role.
Verify the install with:
```bash
kubectl get nodes
```
### Install Helm
Install the [Helm](https://docs.helm.sh/using_helm/#installing-helm) command-line tool:
```text
brew install kubernetes-helm
```
Create a service account and a cluster role binding for Tiller:
```bash
kubectl -n kube-system create sa tiller
kubectl create clusterrolebinding tiller-cluster-rule \
--clusterrole=cluster-admin \
--serviceaccount=kube-system:tiller
```
Deploy Tiller in the `kube-system` namespace:
```bash
helm init --service-account tiller
```
You should consider using SSL between Helm and Tiller, for more information on securing your Helm
installation see [docs.helm.sh](https://docs.helm.sh/using_helm/#securing-your-helm-installation).
### Enable horizontal pod auto-scaling
Install the Horizontal Pod Autoscaler (HPA) metrics provider:
```bash
helm upgrade -i metrics-server stable/metrics-server \
--namespace kube-system
```
After a minute, the metrics API should report CPU and memory usage for pods.
You can very the metrics API with:
```bash
kubectl -n kube-system top pods
```
### Install the App Mesh components
Run the App Mesh installer:
```bash
curl -fsSL https://git.io/get-app-mesh-eks.sh | bash -
```
The installer does the following:
* creates the `appmesh-system` namespace
* generates a certificate signed by Kubernetes CA
* registers the App Mesh mutating webhook
* deploys the App Mesh webhook in `appmesh-system` namespace
* deploys the App Mesh CRDs
* deploys the App Mesh controller in `appmesh-system` namespace
* creates a mesh called `global`
Verify that the global mesh is active:
```bash
kubectl describe mesh
Status:
Mesh Condition:
Status: True
Type: MeshActive
```
### Install Prometheus
In order to collect the App Mesh metrics that Flagger needs to run the canary analysis,
you'll need to setup a Prometheus instance to scrape the Envoy sidecars.
Deploy Prometheus in the `appmesh-system` namespace:
```bash
REPO=https://raw.githubusercontent.com/weaveworks/flagger/master
kubectl apply -f ${REPO}/artifacts/eks/appmesh-prometheus.yaml
```
### Install Flagger and Grafana
Add Flagger Helm repository:
```bash
helm repo add flagger https://flagger.app
```
Deploy Flagger in the _**appmesh-system**_ namespace:
```bash
helm upgrade -i flagger flagger/flagger \
--namespace=appmesh-system \
--set meshProvider=appmesh \
--set metricsServer=http://prometheus.appmesh-system:9090
```
You can install Flagger in any namespace as long as it can talk to the Istio Prometheus service on port 9090.
You can enable **Slack** notifications with:
```bash
helm upgrade -i flagger flagger/flagger \
--namespace=appmesh-system \
--set meshProvider=appmesh \
--set metricsServer=http://prometheus.appmesh:9090 \
--set slack.url=https://hooks.slack.com/services/YOUR/SLACK/WEBHOOK \
--set slack.channel=general \
--set slack.user=flagger
```
Flagger comes with a Grafana dashboard made for monitoring the canary analysis.
Deploy Grafana in the _**appmesh-system**_ namespace:
```bash
helm upgrade -i flagger-grafana flagger/grafana \
--namespace=appmesh-system \
--set url=http://prometheus.appmesh-system:9090
```
You can access Grafana using port forwarding:
```bash
kubectl -n appmesh-system port-forward svc/flagger-grafana 3000:80
```
Now that you have Flagger running you can try the
[App Mesh canary deployments tutorial](https://docs.flagger.app/usage/appmesh-progressive-delivery).

View File

@@ -2,7 +2,7 @@
This guide walks you through setting up Flagger and Istio on Google Kubernetes Engine.
![GKE Cluster Overview](https://raw.githubusercontent.com/stefanprodan/flagger/master/docs/diagrams/flagger-gke-istio.png)
![GKE Cluster Overview](https://raw.githubusercontent.com/weaveworks/flagger/master/docs/diagrams/flagger-gke-istio.png)
### Prerequisites
@@ -208,12 +208,12 @@ stable/cert-manager
### Istio Gateway TLS setup
![Istio Let&apos;s Encrypt](https://raw.githubusercontent.com/stefanprodan/flagger/master/docs/diagrams/istio-cert-manager-gke.png)
![Istio Let&apos;s Encrypt](https://raw.githubusercontent.com/weaveworks/flagger/master/docs/diagrams/istio-cert-manager-gke.png)
Create a generic Istio Gateway to expose services outside the mesh on HTTPS:
```bash
REPO=https://raw.githubusercontent.com/stefanprodan/flagger/master
REPO=https://raw.githubusercontent.com/weaveworks/flagger/master
kubectl apply -f ${REPO}/artifacts/gke/istio-gateway.yaml
```
@@ -284,7 +284,7 @@ metadata:
name: istio-gateway
namespace: istio-system
spec:
secretname: istio-ingressgateway-certs
secretName: istio-ingressgateway-certs
issuerRef:
name: letsencrypt-prod
commonName: "*.example.com"
@@ -332,7 +332,7 @@ Because Flagger uses the Istio HTTP metrics to run the canary analysis you have
Prometheus configuration that's similar to the one that comes with the official Istio Helm chart.
```bash
REPO=https://raw.githubusercontent.com/stefanprodan/flagger/master
REPO=https://raw.githubusercontent.com/weaveworks/flagger/master
kubectl apply -f ${REPO}/artifacts/gke/istio-prometheus.yaml
```

View File

@@ -113,7 +113,7 @@ kubectl apply -f $HOME/flagger-grafana.yaml
You can access Grafana using port forwarding:
```bash
kubectl -n istio-system port-forward svc/flagger-grafana 3000:3000
kubectl -n istio-system port-forward svc/flagger-grafana 3000:80
```
### Install Load Tester
@@ -125,15 +125,14 @@ Deploy the load test runner with Helm:
```bash
helm upgrade -i flagger-loadtester flagger/loadtester \
--namepace=test \
--set cmd.logOutput=true \
--namespace=test \
--set cmd.timeout=1h
```
Deploy with kubectl:
```bash
export REPO=https://raw.githubusercontent.com/stefanprodan/flagger/master
export REPO=https://raw.githubusercontent.com/weaveworks/flagger/master
kubectl -n test apply -f ${REPO}/artifacts/loadtester/deployment.yaml
kubectl -n test apply -f ${REPO}/artifacts/loadtester/service.yaml

View File

@@ -29,7 +29,7 @@ You can find the chart source [here](https://github.com/stefanprodan/flagger/tre
Create a test namespace with Istio sidecar injection enabled:
```bash
export REPO=https://raw.githubusercontent.com/stefanprodan/flagger/master
export REPO=https://raw.githubusercontent.com/weaveworks/flagger/master
kubectl apply -f ${REPO}/artifacts/namespaces/test.yaml
```
@@ -79,7 +79,7 @@ Flagger will route all traffic to the primary pods and scale to zero the `fronte
Open your browser and navigate to the frontend URL:
![Podinfo Frontend](https://raw.githubusercontent.com/stefanprodan/flagger/master/docs/screens/demo-frontend.png)
![Podinfo Frontend](https://raw.githubusercontent.com/weaveworks/flagger/master/docs/screens/demo-frontend.png)
Now let's install the `backend` release without exposing it outside the mesh:
@@ -104,7 +104,7 @@ frontend Initialized 0 2019-02-12T17:50:50Z
Click on the ping button in the `frontend` UI to trigger a HTTP POST request
that will reach the `backend` app:
![Jaeger Tracing](https://raw.githubusercontent.com/stefanprodan/flagger/master/docs/screens/demo-frontend-jaeger.png)
![Jaeger Tracing](https://raw.githubusercontent.com/weaveworks/flagger/master/docs/screens/demo-frontend-jaeger.png)
We'll use the `/echo` endpoint (same as the one the ping button calls)
to generate load on both apps during a canary deployment.
@@ -115,7 +115,7 @@ First let's install a load testing service that will generate traffic during ana
```bash
helm upgrade -i flagger-loadtester flagger/loadtester \
--namepace=test
--namespace=test
```
Enable the load tester and deploy a new `frontend` version:
@@ -155,7 +155,7 @@ You can monitor the canary deployment with Grafana. Open the Flagger dashboard,
select `test` from the namespace dropdown, `frontend-primary` from the primary dropdown and `frontend` from the
canary dropdown.
![Flagger Grafana Dashboard](https://raw.githubusercontent.com/stefanprodan/flagger/master/docs/screens/demo-frontend-dashboard.png)
![Flagger Grafana Dashboard](https://raw.githubusercontent.com/weaveworks/flagger/master/docs/screens/demo-frontend-dashboard.png)
Now trigger a canary deployment for the `backend` app, but this time you'll change a value in the configmap:
@@ -213,7 +213,7 @@ Copying backend.test template spec to backend-primary.test
Promotion completed! Scaling down backend.test
```
![Flagger Grafana Dashboard](https://raw.githubusercontent.com/stefanprodan/flagger/master/docs/screens/demo-backend-dashboard.png)
![Flagger Grafana Dashboard](https://raw.githubusercontent.com/weaveworks/flagger/master/docs/screens/demo-backend-dashboard.png)
If the number of failed checks reaches the canary analysis threshold, the traffic is routed back to the primary,
the canary is scaled to zero and the rollout is marked as failed.
@@ -237,7 +237,7 @@ In the [GitOps model](https://www.weave.works/technologies/gitops/),
any change to production must be committed in source control
prior to being applied on the cluster. This way rollback and audit logs are provided by Git.
![Helm GitOps Canary Deployment](https://raw.githubusercontent.com/stefanprodan/flagger/master/docs/diagrams/flagger-flux-gitops.png)
![Helm GitOps Canary Deployment](https://raw.githubusercontent.com/weaveworks/flagger/master/docs/diagrams/flagger-flux-gitops.png)
In order to apply the GitOps pipeline model to Flagger canary deployments you'll need
a Git repository with your workloads definitions in YAML format,

View File

@@ -0,0 +1,206 @@
# Zero downtime deployments
This is a list of things you should consider when dealing with a high traffic production environment if you want to
minimise the impact of rolling updates and downscaling.
### Deployment strategy
Limit the number of unavailable pods during a rolling update:
```yaml
apiVersion: apps/v1
kind: Deployment
spec:
progressDeadlineSeconds: 120
strategy:
type: RollingUpdate
rollingUpdate:
maxUnavailable: 0
```
The default progress deadline for a deployment is ten minutes.
You should consider adjusting this value to make the deployment process fail faster.
### Liveness health check
You application should expose a HTTP endpoint that Kubernetes can call to determine if
your app transitioned to a broken state from which it can't recover and needs to be restarted.
```yaml
readinessProbe:
exec:
command:
- wget
- --quiet
- --tries=1
- --timeout=4
- --spider
- http://localhost:8080/healthz
timeoutSeconds: 5
initialDelaySeconds: 5
```
If you've enabled mTLS, you'll have to use `exec` for liveness and readiness checks since
kubelet is not part of the service mesh and doesn't have access to the TLS cert.
### Readiness health check
You application should expose a HTTP endpoint that Kubernetes can call to determine if
your app is ready to receive traffic.
```yaml
livenessProbe:
exec:
command:
- wget
- --quiet
- --tries=1
- --timeout=4
- --spider
- http://localhost:8080/readyz
timeoutSeconds: 5
initialDelaySeconds: 5
periodSeconds: 5
```
If your app depends on external services, you should check if those services are available before allowing Kubernetes
to route traffic to an app instance. Keep in mind that the Envoy sidecar can have a slower startup than your app.
This means that on application start you should retry for at least a couple of seconds any external connection.
### Graceful shutdown
Before a pod gets terminated, Kubernetes sends a `SIGTERM` signal to every container and waits for period of
time (30s by default) for all containers to exit gracefully. If your app doesn't handle the `SIGTERM` signal or if it
doesn't exit within the grace period, Kubernetes will kill the container and any inflight requests that your app is
processing will fail.
```yaml
apiVersion: apps/v1
kind: Deployment
spec:
template:
spec:
terminationGracePeriodSeconds: 60
containers:
- name: app
lifecycle:
preStop:
exec:
command:
- sleep
- "10"
```
Your app container should have a `preStop` hook that delays the container shutdown.
This will allow the service mesh to drain the traffic and remove this pod from all other Envoy sidecars before your app
becomes unavailable.
### Delay Envoy shutdown
Even if your app reacts to `SIGTERM` and tries to complete the inflight requests before shutdown, that
doesn't mean that the response will make it back to the caller. If the Envoy sidecar shuts down before your app, then
the caller will receive a 503 error.
To mitigate this issue you can add a `preStop` hook to the Istio proxy and wait for the main app to exist before Envoy exists.
```bash
#!/bin/bash
set -e
if ! pidof envoy &>/dev/null; then
exit 0
fi
if ! pidof pilot-agent &>/dev/null; then
exit 0
fi
while [ $(netstat -plunt | grep tcp | grep -v envoy | wc -l | xargs) -ne 0 ]; do
sleep 1;
done
exit 0
```
You'll have to build your own Envoy docker image with the above script and
modify the Istio injection webhook with the `preStop` directive.
Thanks to Stono for his excellent [tips](https://github.com/istio/istio/issues/12183) on minimising 503s.
### Resource requests and limits
Setting CPU and memory requests/limits for all workloads is a mandatory step if you're running a production system.
Without limits your nodes could run out of memory or become unresponsive due to CPU exhausting.
Without CPU and memory requests,
the Kubernetes scheduler will not be able to make decisions about which nodes to place pods on.
```yaml
apiVersion: apps/v1
kind: Deployment
spec:
template:
spec:
containers:
- name: app
resources:
limits:
cpu: 1000m
memory: 1Gi
requests:
cpu: 100m
memory: 128Mi
```
Note that without resource requests the horizontal pod autoscaler can't determine when to scale your app.
### Autoscaling
A production environment should be able to handle traffic bursts without impacting the quality of service.
This can be achieved with Kubernetes autoscaling capabilities.
Autoscaling in Kubernetes has two dimensions: the Cluster Autoscaler that deals with node scaling operations and
the Horizontal Pod Autoscaler that automatically scales the number of pods in a deployment.
```yaml
apiVersion: autoscaling/v2beta1
kind: HorizontalPodAutoscaler
spec:
scaleTargetRef:
apiVersion: apps/v1
kind: Deployment
name: app
minReplicas: 2
maxReplicas: 4
metrics:
- type: Resource
resource:
name: cpu
targetAverageValue: 900m
- type: Resource
resource:
name: memory
targetAverageValue: 768Mi
```
The above HPA ensures your app will be scaled up before the pods reach the CPU or memory limits.
### Ingress retries
To minimise the impact of downscaling operations you can make use of Envoy retry capabilities.
```yaml
apiVersion: flagger.app/v1alpha3
kind: Canary
spec:
service:
port: 9898
gateways:
- public-gateway.istio-system.svc.cluster.local
hosts:
- app.example.com
appendHeaders:
x-envoy-upstream-rq-timeout-ms: "15000"
x-envoy-max-retries: "10"
x-envoy-retry-on: "gateway-error,connect-failure,refused-stream"
```
When the HPA scales down your app, your users could run into 503 errors.
The above configuration will make Envoy retry the HTTP requests that failed due to gateway errors.

View File

@@ -0,0 +1,213 @@
# Istio A/B Testing
This guide shows you how to automate A/B testing with Istio and Flagger.
Besides weighted routing, Flagger can be configured to route traffic to the canary based on HTTP match conditions.
In an A/B testing scenario, you'll be using HTTP headers or cookies to target a certain segment of your users.
This is particularly useful for frontend applications that require session affinity.
![Flagger A/B Testing Stages](https://raw.githubusercontent.com/weaveworks/flagger/master/docs/diagrams/flagger-abtest-steps.png)
### Bootstrap
Create a test namespace with Istio sidecar injection enabled:
```bash
export REPO=https://raw.githubusercontent.com/weaveworks/flagger/master
kubectl apply -f ${REPO}/artifacts/namespaces/test.yaml
```
Create a deployment and a horizontal pod autoscaler:
```bash
kubectl apply -f ${REPO}/artifacts/ab-testing/deployment.yaml
kubectl apply -f ${REPO}/artifacts/ab-testing/hpa.yaml
```
Deploy the load testing service to generate traffic during the canary analysis:
```bash
kubectl -n test apply -f ${REPO}/artifacts/loadtester/deployment.yaml
kubectl -n test apply -f ${REPO}/artifacts/loadtester/service.yaml
```
Create a canary custom resource (replace example.com with your own domain):
```yaml
apiVersion: flagger.app/v1alpha3
kind: Canary
metadata:
name: abtest
namespace: test
spec:
# deployment reference
targetRef:
apiVersion: apps/v1
kind: Deployment
name: abtest
# the maximum time in seconds for the canary deployment
# to make progress before it is rollback (default 600s)
progressDeadlineSeconds: 60
# HPA reference (optional)
autoscalerRef:
apiVersion: autoscaling/v2beta1
kind: HorizontalPodAutoscaler
name: abtest
service:
# container port
port: 9898
# Istio gateways (optional)
gateways:
- public-gateway.istio-system.svc.cluster.local
# Istio virtual service host names (optional)
hosts:
- app.example.com
canaryAnalysis:
# schedule interval (default 60s)
interval: 1m
# total number of iterations
iterations: 10
# max number of failed iterations before rollback
threshold: 2
# canary match condition
match:
- headers:
user-agent:
regex: "^(?!.*Chrome).*Safari.*"
- headers:
cookie:
regex: "^(.*?;)?(type=insider)(;.*)?$"
metrics:
- name: istio_requests_total
# minimum req success rate (non 5xx responses)
# percentage (0-100)
threshold: 99
interval: 1m
- name: istio_request_duration_seconds_bucket
# maximum req duration P99
# milliseconds
threshold: 500
interval: 30s
# generate traffic during analysis
webhooks:
- name: load-test
url: http://flagger-loadtester.test/
timeout: 5s
metadata:
cmd: "hey -z 1m -q 10 -c 2 -H 'Cookie: type=insider' http://podinfo.test:9898/"
```
The above configuration will run an analysis for ten minutes targeting Safari users and those that have an insider cookie.
Save the above resource as podinfo-abtest.yaml and then apply it:
```bash
kubectl apply -f ./podinfo-abtest.yaml
```
After a couple of seconds Flagger will create the canary objects:
```bash
# applied
deployment.apps/abtest
horizontalpodautoscaler.autoscaling/abtest
canary.flagger.app/abtest
# generated
deployment.apps/abtest-primary
horizontalpodautoscaler.autoscaling/abtest-primary
service/abtest
service/abtest-canary
service/abtest-primary
virtualservice.networking.istio.io/abtest
```
### Automated canary promotion
Trigger a canary deployment by updating the container image:
```bash
kubectl -n test set image deployment/abtest \
podinfod=quay.io/stefanprodan/podinfo:1.4.1
```
Flagger detects that the deployment revision changed and starts a new rollout:
```text
kubectl -n test describe canary/abtest
Status:
Failed Checks: 0
Phase: Succeeded
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal Synced 3m flagger New revision detected abtest.test
Normal Synced 3m flagger Scaling up abtest.test
Warning Synced 3m flagger Waiting for abtest.test rollout to finish: 0 of 1 updated replicas are available
Normal Synced 3m flagger Advance abtest.test canary iteration 1/10
Normal Synced 3m flagger Advance abtest.test canary iteration 2/10
Normal Synced 3m flagger Advance abtest.test canary iteration 3/10
Normal Synced 2m flagger Advance abtest.test canary iteration 4/10
Normal Synced 2m flagger Advance abtest.test canary iteration 5/10
Normal Synced 1m flagger Advance abtest.test canary iteration 6/10
Normal Synced 1m flagger Advance abtest.test canary iteration 7/10
Normal Synced 55s flagger Advance abtest.test canary iteration 8/10
Normal Synced 45s flagger Advance abtest.test canary iteration 9/10
Normal Synced 35s flagger Advance abtest.test canary iteration 10/10
Normal Synced 25s flagger Copying abtest.test template spec to abtest-primary.test
Warning Synced 15s flagger Waiting for abtest-primary.test rollout to finish: 1 of 2 updated replicas are available
Normal Synced 5s flagger Promotion completed! Scaling down abtest.test
```
**Note** that if you apply new changes to the deployment during the canary analysis, Flagger will restart the analysis.
You can monitor all canaries with:
```bash
watch kubectl get canaries --all-namespaces
NAMESPACE NAME STATUS WEIGHT LASTTRANSITIONTIME
test abtest Progressing 100 2019-03-16T14:05:07Z
prod frontend Succeeded 0 2019-03-15T16:15:07Z
prod backend Failed 0 2019-03-14T17:05:07Z
```
### Automated rollback
During the canary analysis you can generate HTTP 500 errors and high latency to test Flagger's rollback.
Generate HTTP 500 errors:
```bash
watch curl -b 'type=insider' http://app.example.com/status/500
```
Generate latency:
```bash
watch curl -b 'type=insider' http://app.example.com/delay/1
```
When the number of failed checks reaches the canary analysis threshold, the traffic is routed back to the primary,
the canary is scaled to zero and the rollout is marked as failed.
```text
kubectl -n test describe canary/abtest
Status:
Failed Checks: 2
Phase: Failed
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal Synced 3m flagger Starting canary deployment for abtest.test
Normal Synced 3m flagger Advance abtest.test canary iteration 1/10
Normal Synced 3m flagger Advance abtest.test canary iteration 2/10
Normal Synced 3m flagger Advance abtest.test canary iteration 3/10
Normal Synced 3m flagger Halt abtest.test advancement success rate 69.17% < 99%
Normal Synced 2m flagger Halt abtest.test advancement success rate 61.39% < 99%
Warning Synced 2m flagger Rolling back abtest.test failed checks threshold reached 2
Warning Synced 1m flagger Canary failed! Scaling down abtest.test
```

View File

@@ -15,12 +15,12 @@ helm upgrade -i flagger flagger/flagger \
Once configured with a Slack incoming **webhook**, Flagger will post messages when a canary deployment
has been initialised, when a new revision has been detected and if the canary analysis failed or succeeded.
![flagger-slack](https://raw.githubusercontent.com/stefanprodan/flagger/master/docs/screens/slack-canary-notifications.png)
![Slack Notifications](https://raw.githubusercontent.com/weaveworks/flagger/master/docs/screens/slack-canary-notifications.png)
A canary deployment will be rolled back if the progress deadline exceeded or if the analysis reached the
maximum number of failed checks:
![flagger-slack-errors](https://raw.githubusercontent.com/stefanprodan/flagger/master/docs/screens/slack-canary-failed.png)
![Slack Notifications](https://raw.githubusercontent.com/weaveworks/flagger/master/docs/screens/slack-canary-failed.png)
### Prometheus Alert Manager

View File

@@ -0,0 +1,286 @@
# App Mesh Canary Deployments
This guide shows you how to use App Mesh and Flagger to automate canary deployments.
You'll need an EKS cluster configured with App Mesh, you can find the install guide
[here](https://docs.flagger.app/install/flagger-install-on-eks-appmesh).
### Bootstrap
Flagger takes a Kubernetes deployment and optionally a horizontal pod autoscaler (HPA),
then creates a series of objects (Kubernetes deployments, ClusterIP services, App Mesh virtual nodes and services).
These objects expose the application on the mesh and drive the canary analysis and promotion.
The only App Mesh object you need to create by yourself is the mesh resource.
Create a mesh called `global`:
```bash
export REPO=https://raw.githubusercontent.com/weaveworks/flagger/master
kubectl apply -f ${REPO}/artifacts/appmesh/global-mesh.yaml
```
Create a test namespace with App Mesh sidecar injection enabled:
```bash
kubectl apply -f ${REPO}/artifacts/namespaces/test.yaml
```
Create a deployment and a horizontal pod autoscaler:
```bash
kubectl apply -f ${REPO}/artifacts/appmesh/deployment.yaml
kubectl apply -f ${REPO}/artifacts/appmesh/hpa.yaml
```
Deploy the load testing service to generate traffic during the canary analysis:
```bash
helm upgrade -i flagger-loadtester flagger/loadtester \
--namespace=test \
--set meshName=global.appmesh-system \
--set "backends[0]=podinfo.test"
```
Create a canary custom resource:
```yaml
apiVersion: flagger.app/v1alpha3
kind: Canary
metadata:
name: podinfo
namespace: test
spec:
# deployment reference
targetRef:
apiVersion: apps/v1
kind: Deployment
name: podinfo
# the maximum time in seconds for the canary deployment
# to make progress before it is rollback (default 600s)
progressDeadlineSeconds: 60
# HPA reference (optional)
autoscalerRef:
apiVersion: autoscaling/v2beta1
kind: HorizontalPodAutoscaler
name: podinfo
service:
# container port
port: 9898
# App Mesh reference
meshName: global.appmesh-system
# App Mesh egress (optional)
backends:
- backend.test
# define the canary analysis timing and KPIs
canaryAnalysis:
# schedule interval (default 60s)
interval: 10s
# max number of failed metric checks before rollback
threshold: 10
# max traffic percentage routed to canary
# percentage (0-100)
maxWeight: 50
# canary increment step
# percentage (0-100)
stepWeight: 5
# App Mesh Prometheus checks
metrics:
- name: envoy_cluster_upstream_rq
# minimum req success rate (non 5xx responses)
# percentage (0-100)
threshold: 99
interval: 1m
# external checks (optional)
webhooks:
- name: load-test
url: http://flagger-loadtester.test/
timeout: 5s
metadata:
cmd: "hey -z 1m -q 10 -c 2 http://podinfo.test:9898/"
```
Save the above resource as podinfo-canary.yaml and then apply it:
```bash
kubectl apply -f ./podinfo-canary.yaml
```
After a couple of seconds Flagger will create the canary objects:
```bash
# applied
deployment.apps/podinfo
horizontalpodautoscaler.autoscaling/podinfo
canary.flagger.app/podinfo
# generated Kubernetes objects
deployment.apps/podinfo-primary
horizontalpodautoscaler.autoscaling/podinfo-primary
service/podinfo
service/podinfo-canary
service/podinfo-primary
# generated App Mesh objects
virtualnode.appmesh.k8s.aws/podinfo
virtualnode.appmesh.k8s.aws/podinfo-canary
virtualnode.appmesh.k8s.aws/podinfo-primary
virtualservice.appmesh.k8s.aws/podinfo.test
```
The App Mesh specific settings are:
```yaml
service:
port: 9898
meshName: global.appmesh-system
backends:
- backend1.test
- backend2.test
```
App Mesh blocks all egress traffic by default. If your application needs to call another service, you have to create an
App Mesh virtual service for it and add the virtual service name to the backend list.
### Setup App Mesh ingress (optional)
In order to expose the podinfo app outside the mesh you'll be using an Envoy ingress and an AWS classic load balancer.
The ingress binds to an internet domain and forwards the calls into the mesh through the App Mesh sidecar.
If podinfo becomes unavailable due to a HPA downscaling or a node restart,
the ingress will retry the calls for a short period of time.
Deploy the ingress and the AWS ELB service:
```bash
kubectl apply -f ${REPO}/artifacts/appmesh/ingress.yaml
```
Find the ingress public address:
```bash
kubectl -n test describe svc/ingress | grep Ingress
LoadBalancer Ingress: yyy-xx.us-west-2.elb.amazonaws.com
```
Wait for the ELB to become active:
```bash
watch curl -sS ${INGRESS_URL}
```
Open your browser and navigate to the ingress address to access podinfo UI.
### Automated canary promotion
Trigger a canary deployment by updating the container image:
```bash
kubectl -n test set image deployment/podinfo \
podinfod=quay.io/stefanprodan/podinfo:1.4.1
```
Flagger detects that the deployment revision changed and starts a new rollout:
```text
kubectl -n test describe canary/podinfo
Status:
Canary Weight: 0
Failed Checks: 0
Phase: Succeeded
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal Synced 3m flagger New revision detected podinfo.test
Normal Synced 3m flagger Scaling up podinfo.test
Warning Synced 3m flagger Waiting for podinfo.test rollout to finish: 0 of 1 updated replicas are available
Normal Synced 3m flagger Advance podinfo.test canary weight 5
Normal Synced 3m flagger Advance podinfo.test canary weight 10
Normal Synced 3m flagger Advance podinfo.test canary weight 15
Normal Synced 2m flagger Advance podinfo.test canary weight 20
Normal Synced 2m flagger Advance podinfo.test canary weight 25
Normal Synced 1m flagger Advance podinfo.test canary weight 30
Normal Synced 1m flagger Advance podinfo.test canary weight 35
Normal Synced 55s flagger Advance podinfo.test canary weight 40
Normal Synced 45s flagger Advance podinfo.test canary weight 45
Normal Synced 35s flagger Advance podinfo.test canary weight 50
Normal Synced 25s flagger Copying podinfo.test template spec to podinfo-primary.test
Warning Synced 15s flagger Waiting for podinfo-primary.test rollout to finish: 1 of 2 updated replicas are available
Normal Synced 5s flagger Promotion completed! Scaling down podinfo.test
```
**Note** that if you apply new changes to the deployment during the canary analysis, Flagger will restart the analysis.
During the analysis the canarys progress can be monitored with Grafana. The App Mesh dashboard URL is
http://localhost:3000/d/flagger-appmesh/appmesh-canary?refresh=10s&orgId=1&var-namespace=test&var-primary=podinfo-primary&var-canary=podinfo
![App Mesh Canary Dashboard](https://raw.githubusercontent.com/weaveworks/flagger/master/docs/screens/flagger-grafana-appmesh.png)
You can monitor all canaries with:
```bash
watch kubectl get canaries --all-namespaces
NAMESPACE NAME STATUS WEIGHT LASTTRANSITIONTIME
test podinfo Progressing 15 2019-03-16T14:05:07Z
prod frontend Succeeded 0 2019-03-15T16:15:07Z
prod backend Failed 0 2019-03-14T17:05:07Z
```
If youve enabled the Slack notifications, you should receive the following messages:
![Flagger Slack Notifications](https://raw.githubusercontent.com/weaveworks/flagger/master/docs/screens/slack-canary-notifications.png)
### Automated rollback
During the canary analysis you can generate HTTP 500 errors to test if Flagger pauses the rollout.
Trigger a canary deployment:
```bash
kubectl -n test set image deployment/podinfo \
podinfod=quay.io/stefanprodan/podinfo:1.4.2
```
Exec into the load tester pod with:
```bash
kubectl -n test exec -it flagger-loadtester-xx-xx sh
```
Generate HTTP 500 errors:
```bash
hey -z 1m -c 5 -q 5 http://podinfo.test:9898/status/500
```
When the number of failed checks reaches the canary analysis threshold, the traffic is routed back to the primary,
the canary is scaled to zero and the rollout is marked as failed.
```text
kubectl -n test describe canary/podinfo
Status:
Canary Weight: 0
Failed Checks: 10
Phase: Failed
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal Synced 3m flagger Starting canary deployment for podinfo.test
Normal Synced 3m flagger Advance podinfo.test canary weight 5
Normal Synced 3m flagger Advance podinfo.test canary weight 10
Normal Synced 3m flagger Advance podinfo.test canary weight 15
Normal Synced 3m flagger Halt podinfo.test advancement success rate 69.17% < 99%
Normal Synced 2m flagger Halt podinfo.test advancement success rate 61.39% < 99%
Normal Synced 2m flagger Halt podinfo.test advancement success rate 55.06% < 99%
Normal Synced 2m flagger Halt podinfo.test advancement success rate 47.00% < 99%
Normal Synced 2m flagger (combined from similar events): Halt podinfo.test advancement success rate 38.08% < 99%
Warning Synced 1m flagger Rolling back podinfo.test failed checks threshold reached 10
Warning Synced 1m flagger Canary failed! Scaling down podinfo.test
```
If youve enabled the Slack notifications, youll receive a message if the progress deadline is exceeded,
or if the analysis reached the maximum number of failed checks:
![Flagger Slack Notifications](https://raw.githubusercontent.com/weaveworks/flagger/master/docs/screens/slack-canary-failed.png)

View File

@@ -6,15 +6,13 @@ Flagger comes with a Grafana dashboard made for canary analysis. Install Grafana
```bash
helm upgrade -i flagger-grafana flagger/grafana \
--namespace=istio-system \
--set url=http://prometheus:9090 \
--set user=admin \
--set password=admin
--namespace=istio-system \ # or appmesh-system
--set url=http://prometheus:9090
```
The dashboard shows the RED and USE metrics for the primary and canary workloads:
![canary dashboard](https://raw.githubusercontent.com/stefanprodan/flagger/master/docs/screens/grafana-canary-analysis.png)
![Canary Dashboard](https://raw.githubusercontent.com/weaveworks/flagger/master/docs/screens/grafana-canary-analysis.png)
### Logging

View File

@@ -1,11 +1,13 @@
# Canary Deployments
# Istio Canary Deployments
This guide shows you how to use Istio and Flagger to automate canary deployments.
### Bootstrap
Create a test namespace with Istio sidecar injection enabled:
```bash
export REPO=https://raw.githubusercontent.com/stefanprodan/flagger/master
export REPO=https://raw.githubusercontent.com/weaveworks/flagger/master
kubectl apply -f ${REPO}/artifacts/namespaces/test.yaml
```
@@ -109,6 +111,8 @@ service/podinfo-primary
virtualservice.networking.istio.io/podinfo
```
### Automated canary promotion
Trigger a canary deployment by updating the container image:
```bash
@@ -159,6 +163,8 @@ prod frontend Succeeded 0 2019-01-15T16:15:07Z
prod backend Failed 0 2019-01-14T17:05:07Z
```
### Automated rollback
During the canary analysis you can generate HTTP 500 errors and high latency to test if Flagger pauses the rollout.
Create a tester pod and exec into it:

Binary file not shown.

Before

Width:  |  Height:  |  Size: 19 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 18 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 31 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 28 KiB

View File

@@ -0,0 +1,71 @@
<?xml version="1.0" encoding="utf-8"?>
<!-- Generator: Adobe Illustrator 16.0.3, SVG Export Plug-In . SVG Version: 6.00 Build 0) -->
<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN" "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd">
<svg version="1.1" id="Layer_1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" x="0px" y="0px"
width="900px" height="900px" viewBox="0 0 900 900" enable-background="new 0 0 900 900" xml:space="preserve">
<g>
<defs>
<rect id="SVGID_1_" width="900" height="900"/>
</defs>
<clipPath id="SVGID_2_">
<use xlink:href="#SVGID_1_" overflow="visible"/>
</clipPath>
<path clip-path="url(#SVGID_2_)" fill="#33324B" d="M356.003,268.915L586.001,63.471c-18.262-14.35-38.76-25.978-60.934-34.188
L356.003,180.298V268.915z"/>
<path clip-path="url(#SVGID_2_)" fill="#33324B" d="M289.914,239.333l-62.307,55.655c5.674,23.087,14.884,44.774,27.05,64.454
l35.257-31.493V239.333z"/>
<path clip-path="url(#SVGID_2_)" fill="#40C6F1" d="M479.023,70.412V17.631c-10.554-1.513-21.329-2.338-32.302-2.338
c-11.484,0-22.764,0.871-33.787,2.527v111.626L479.023,70.412z"/>
<path clip-path="url(#SVGID_2_)" fill="#40C6F1" d="M479.023,380.863V159.029l-66.089,59.033v246.046
c11.023,1.655,22.303,2.526,33.787,2.526c13.449,0,26.601-1.244,39.404-3.5l176.98-158.086c6.01-20.321,9.287-41.815,9.287-64.085
c0-10.488-0.774-20.79-2.159-30.898L479.023,380.863z"/>
<path clip-path="url(#SVGID_2_)" fill="#F04E27" d="M479.023,315.734l177.305-158.376c-8.446-21.157-19.978-40.739-34.079-58.175
L479.023,227.118V315.734z"/>
<path clip-path="url(#SVGID_2_)" fill="#F04E27" d="M412.935,286.152l-56.931,50.854V34.328
c-24.657,10.841-46.992,25.96-66.09,44.426v324.42c14.897,14.403,31.742,26.797,50.147,36.688l72.874-65.094V286.152z"/>
<polygon clip-path="url(#SVGID_2_)" fill="#33324B" points="175.804,603.774 207.413,485.167 249.859,485.167 202.897,650.136
152.926,650.136 124.93,539.054 96.933,650.136 46.961,650.136 0,485.167 42.446,485.167 74.055,603.774 105.362,485.167
144.497,485.167 "/>
<path clip-path="url(#SVGID_2_)" fill="#33324B" d="M406.409,595.948c-4.816,32.812-30.103,56.293-73.452,56.293
c-46.059,0-80.076-27.695-80.076-83.686c0-56.293,35.222-85.495,80.076-85.495c49.67,0,75.861,36.426,75.861,85.191v8.73H295.025
c0,18.365,7.526,41.543,37.328,41.543c16.558,0,29.502-8.127,32.814-23.479L406.409,595.948z M365.77,547.784
c-1.506-22.88-16.256-31.308-33.114-31.308c-19.266,0-34.017,11.738-36.124,31.308H365.77z"/>
<path clip-path="url(#SVGID_2_)" fill="#33324B" d="M530.318,543.269v-5.721c0-12.944-5.42-21.374-29.803-21.374
c-30.404,0-31.308,16.859-31.308,22.58h-39.134c0-24.386,10.234-55.693,71.646-55.693c58.702,0,69.238,29.201,69.238,52.982v90.912
c0,6.624,0.301,12.041,0.903,16.256l1.204,6.925h-41.544v-16.557c-11.739,11.738-27.995,18.662-51.176,18.662
c-32.511,0-57.196-18.965-57.196-51.777c0-37.93,35.824-46.059,50.273-48.465L530.318,543.269z M530.318,574.275l-15.955,2.406
c-8.73,1.506-19.267,3.011-25.588,4.215c-6.925,1.205-12.945,2.71-17.46,6.321c-4.215,3.313-6.321,7.829-6.321,13.546
c0,4.818,1.806,19.87,24.385,19.87c13.243,0,24.984-4.816,31.307-11.14c8.128-8.127,9.633-18.362,9.633-26.49V574.275z"/>
<polygon clip-path="url(#SVGID_2_)" fill="#33324B" points="687.764,650.136 632.675,650.136 578.488,485.167 623.041,485.167
660.972,611.303 699.203,485.167 742.552,485.167 "/>
<path clip-path="url(#SVGID_2_)" fill="#33324B" d="M897.591,595.948c-4.816,32.812-30.103,56.293-73.452,56.293
c-46.059,0-80.075-27.695-80.075-83.686c0-56.293,35.222-85.495,80.075-85.495c49.67,0,75.861,36.426,75.861,85.191v8.73H786.208
c0,18.365,7.526,41.543,37.328,41.543c16.558,0,29.502-8.127,32.812-23.479L897.591,595.948z M856.952,547.784
c-1.506-22.88-16.256-31.308-33.114-31.308c-19.266,0-34.017,11.738-36.124,31.308H856.952z"/>
</g>
<g id="flagger1">
<path id="fagger" fill="#566991" d="M138.27,737.32h-30.625v113.925H87.8V737.32H64.77v-16.905H87.8v-22.05
c0-18.62,11.515-29.89,32.095-29.89c8.575,0,20.09,0.489,20.09,0.489v17.15c-1.225,0-10.535-0.245-13.965-0.245
c-10.535,0-18.375,1.225-18.375,14.945v19.6h30.625V737.32z M179.675,851.245h-20.09v-181.3h20.09V851.245z M293.519,800.285
V785.34c0,0-28.42,3.675-42.875,5.88c-13.965,1.96-25.235,6.615-25.235,22.051c0,12.984,8.82,22.784,26.705,22.784
C276.858,836.055,293.519,821.845,293.519,800.285z M239.618,776.521c12.495-2.45,53.9-7.351,53.9-7.351v-7.104
c0-18.13-7.84-26.705-30.87-26.705c-24.745,0-31.605,11.27-32.585,24.5h-20.09c0-16.905,9.555-41.16,52.185-41.16
c44.835,0,51.45,23.275,51.45,44.1v67.375c0,6.125,0.245,15.926,1.715,21.07h-20.335c0,0-0.49-7.105-0.49-17.64
c-6.37,8.33-19.11,19.354-45.815,19.354c-25.235,0-44.1-14.7-44.1-39.2C204.583,786.565,226.388,779.215,239.618,776.521z
M338.271,782.646c0-42.386,24.99-63.945,55.86-63.945c22.295,0,36.015,10.535,40.425,20.58v-18.865h19.845V850.51
c0,25.235-12.25,49.49-58.31,49.49c-34.3,0-55.125-13.72-55.86-42.63h20.335c0,14.945,10.535,25.97,36.015,25.97
c29.399,0,37.729-13.475,37.729-35.035v-22.784c-4.41,10.045-18.62,20.58-40.915,20.58
C362.527,846.101,338.271,825.03,338.271,782.646z M434.312,782.4c0-33.32-14.7-46.55-38.22-46.55
c-20.825,0-37.485,11.76-37.485,46.305s16.66,46.795,37.485,46.795C419.611,828.95,434.312,815.72,434.312,782.4z M478.575,782.646
c0-42.386,24.99-63.945,55.859-63.945c22.296,0,36.016,10.535,40.426,20.58v-18.865h19.845V850.51
c0,25.235-12.25,49.49-58.311,49.49c-34.3,0-55.125-13.72-55.859-42.63h20.335c0,14.945,10.535,25.97,36.015,25.97
c29.4,0,37.73-13.475,37.73-35.035v-22.784c-4.41,10.045-18.62,20.58-40.915,20.58C502.83,846.101,478.575,825.03,478.575,782.646z
M574.615,782.4c0-33.32-14.7-46.55-38.221-46.55c-20.824,0-37.484,11.76-37.484,46.305s16.66,46.795,37.484,46.795
C559.915,828.95,574.615,815.72,574.615,782.4z M714.674,806.165l20.335,0.245c-2.695,23.03-18.62,46.55-56.105,46.55
c-34.79,0-60.025-23.765-60.025-66.885c0-40.67,24.99-67.375,59.78-67.375c36.75,0,57.085,27.439,57.085,70.56h-96.285
c0.49,24.745,10.78,46.795,39.69,46.795C700.463,836.055,712.958,822.825,714.674,806.165z M640.193,773.335h73.745
c-1.96-27.685-17.885-37.729-35.771-37.729C658.078,735.605,642.644,749.08,640.193,773.335z M829.497,740.995h-8.82
c-23.521,0-39.936,8.085-39.936,38.955v71.295h-20.09v-130.83h19.846v22.54c8.574-16.66,25.97-22.54,39.689-22.54h9.311V740.995z"
/>
</g>
</svg>

After

Width:  |  Height:  |  Size: 6.3 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 133 KiB

View File

@@ -22,7 +22,6 @@ SCRIPT_ROOT=$(dirname ${BASH_SOURCE})/..
CODEGEN_PKG=${CODEGEN_PKG:-$(cd ${SCRIPT_ROOT}; ls -d -1 ./vendor/k8s.io/code-generator 2>/dev/null || echo ../code-generator)}
${CODEGEN_PKG}/generate-groups.sh "deepcopy,client,informer,lister" \
github.com/stefanprodan/flagger/pkg/client github.com/stefanprodan/flagger/pkg/apis \
flagger:v1alpha3 \
github.com/weaveworks/flagger/pkg/client github.com/weaveworks/flagger/pkg/apis \
"appmesh:v1beta1 istio:v1alpha3 flagger:v1alpha3" \
--go-header-file ${SCRIPT_ROOT}/hack/boilerplate.go.txt

View File

@@ -0,0 +1,5 @@
package appmesh
const (
GroupName = "appmesh.k8s.aws"
)

View File

@@ -0,0 +1,5 @@
// +k8s:deepcopy-gen=package
// Package v1beta1 is the v1beta1 version of the API.
// +groupName=appmesh.k8s.aws
package v1beta1

View File

@@ -1,30 +1,15 @@
/*
Copyright 2018 The Knative Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
package v1beta1
import (
"github.com/knative/pkg/apis/istio/authentication"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"github.com/weaveworks/flagger/pkg/apis/appmesh"
)
// SchemeGroupVersion is group version used to register these objects
var SchemeGroupVersion = schema.GroupVersion{Group: authentication.GroupName, Version: "v1alpha1"}
var SchemeGroupVersion = schema.GroupVersion{Group: appmesh.GroupName, Version: "v1beta1"}
// Kind takes an unqualified kind and returns back a Group qualified GroupKind
func Kind(kind string) schema.GroupKind {
@@ -41,11 +26,15 @@ var (
AddToScheme = SchemeBuilder.AddToScheme
)
// Adds the list of known types to Scheme.
// Adds the list of known types to api.Scheme.
func addKnownTypes(scheme *runtime.Scheme) error {
scheme.AddKnownTypes(SchemeGroupVersion,
&Policy{},
&PolicyList{},
&Mesh{},
&MeshList{},
&VirtualService{},
&VirtualServiceList{},
&VirtualNode{},
&VirtualNodeList{},
)
metav1.AddToGroupVersion(scheme, SchemeGroupVersion)
return nil

View File

@@ -0,0 +1,286 @@
package v1beta1
import (
api "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// App Mesh Custom Resource API types.
// This API follows the conventions described in
// https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md
// +genclient
// +genclient:nonNamespaced
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// Mesh is a specification for a Mesh resource
type Mesh struct {
metav1.TypeMeta `json:",inline"`
// +optional
metav1.ObjectMeta `json:"metadata,omitempty"`
// +optional
Spec MeshSpec `json:"spec,omitempty"`
// +optional
Status MeshStatus `json:"status,omitempty"`
}
type MeshServiceDiscoveryType string
const (
Dns MeshServiceDiscoveryType = "Dns"
)
// MeshSpec is the spec for a Mesh resource
type MeshSpec struct {
// +optional
ServiceDiscoveryType *MeshServiceDiscoveryType `json:"serviceDiscoveryType,omitempty"`
}
// MeshStatus is the status for a Mesh resource
type MeshStatus struct {
// MeshArn is the AppMesh Mesh object's Amazon Resource Name
// +optional
MeshArn *string `json:"meshArn,omitempty"`
Conditions []MeshCondition `json:"meshCondition"`
}
type MeshConditionType string
const (
// MeshActive is Active when the Appmesh Mesh has been created or found via the API
MeshActive MeshConditionType = "MeshActive"
)
type MeshCondition struct {
// Type of mesh condition.
Type MeshConditionType `json:"type"`
// Status of the condition, one of True, False, Unknown.
Status api.ConditionStatus `json:"status"`
// Last time the condition transitioned from one status to another.
// +optional
LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"`
// The reason for the condition's last transition.
// +optional
Reason *string `json:"reason,omitempty"`
// A human readable message indicating details about the transition.
// +optional
Message *string `json:"message,omitempty"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// MeshList is a list of Mesh resources
type MeshList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata"`
Items []Mesh `json:"items"`
}
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// VirtualService is a specification for a VirtualService resource
type VirtualService struct {
metav1.TypeMeta `json:",inline"`
// +optional
metav1.ObjectMeta `json:"metadata,omitempty"`
// +optional
Spec VirtualServiceSpec `json:"spec,omitempty"`
// +optional
Status VirtualServiceStatus `json:"status,omitempty"`
}
// VirtualServiceSpec is the spec for a VirtualService resource
type VirtualServiceSpec struct {
MeshName string `json:"meshName"`
// +optional
VirtualRouter *VirtualRouter `json:"virtualRouter,omitempty"`
// +optional
Routes []Route `json:"routes,omitempty"`
}
type VirtualRouter struct {
Name string `json:"name"`
}
type Route struct {
Name string `json:"name"`
Http HttpRoute `json:"http"`
}
type HttpRoute struct {
Match HttpRouteMatch `json:"match"`
Action HttpRouteAction `json:"action"`
}
type HttpRouteMatch struct {
Prefix string `json:"prefix"`
}
type HttpRouteAction struct {
WeightedTargets []WeightedTarget `json:"weightedTargets"`
}
type WeightedTarget struct {
VirtualNodeName string `json:"virtualNodeName"`
Weight int64 `json:"weight"`
}
// VirtualServiceStatus is the status for a VirtualService resource
type VirtualServiceStatus struct {
// VirtualServiceArn is the AppMesh VirtualService object's Amazon Resource Name
// +optional
VirtualServiceArn *string `json:"virtualServiceArn,omitempty"`
// VirtualRouterArn is the AppMesh VirtualRouter object's Amazon Resource Name
// +optional
VirtualRouterArn *string `json:"virtualRouterArn,omitempty"`
// RouteArns is a list of AppMesh Route objects' Amazon Resource Names
// +optional
RouteArns []string `json:"routeArns,omitempty"`
Conditions []VirtualServiceCondition `json:"conditions"`
}
type VirtualServiceConditionType string
const (
// VirtualServiceActive is Active when the Appmesh Service has been created or found via the API
VirtualServiceActive VirtualServiceConditionType = "VirtualServiceActive"
VirtualRouterActive VirtualServiceConditionType = "VirtualRouterActive"
RoutesActive VirtualServiceConditionType = "RoutesActive"
VirtualServiceMeshMarkedForDeletion VirtualServiceConditionType = "MeshMarkedForDeletion"
)
type VirtualServiceCondition struct {
// Type of mesh service condition.
Type VirtualServiceConditionType `json:"type"`
// Status of the condition, one of True, False, Unknown.
Status api.ConditionStatus `json:"status"`
// Last time the condition transitioned from one status to another.
// +optional
LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"`
// The reason for the condition's last transition.
// +optional
Reason *string `json:"reason,omitempty"`
// A human readable message indicating details about the transition.
// +optional
Message *string `json:"message,omitempty"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// VirtualServiceList is a list of VirtualService resources
type VirtualServiceList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata"`
Items []VirtualService `json:"items"`
}
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// VirtualNode is a specification for a VirtualNode resource
type VirtualNode struct {
metav1.TypeMeta `json:",inline"`
// +optional
metav1.ObjectMeta `json:"metadata,omitempty"`
// +optional
Spec VirtualNodeSpec `json:"spec,omitempty"`
// +optional
Status VirtualNodeStatus `json:"status,omitempty"`
}
// VirtualNodeSpec is the spec for a VirtualNode resource
type VirtualNodeSpec struct {
MeshName string `json:"meshName"`
// +optional
Listeners []Listener `json:"listeners,omitempty"`
// +optional
ServiceDiscovery *ServiceDiscovery `json:"serviceDiscovery,omitempty"`
// +optional
Backends []Backend `json:"backends,omitempty"`
}
type Listener struct {
PortMapping PortMapping `json:"portMapping"`
}
type PortMapping struct {
Port int64 `json:"port"`
Protocol string `json:"protocol"`
}
type ServiceDiscovery struct {
// +optional
CloudMap *CloudMapServiceDiscovery `json:"cloudMap,omitempty"`
// +optional
Dns *DnsServiceDiscovery `json:"dns,omitempty"`
}
type CloudMapServiceDiscovery struct {
CloudMapServiceName string `json:"cloudMapServiceName"`
}
type DnsServiceDiscovery struct {
HostName string `json:"hostName"`
}
type Backend struct {
VirtualService VirtualServiceBackend `json:"virtualService"`
}
type VirtualServiceBackend struct {
VirtualServiceName string `json:"virtualServiceName"`
}
// VirtualNodeStatus is the status for a VirtualNode resource
type VirtualNodeStatus struct {
MeshArn *string `json:"meshArn,omitempty"`
// VirtualNodeArn is the AppMesh VirtualNode object's Amazon Resource Name
// +optional
VirtualNodeArn *string `json:"virtualNodeArn,omitempty"`
// CloudMapServiceArn is a CloudMap Service object's Amazon Resource Name
// +optional
CloudMapServiceArn *string `json:"cloudMapServiceArn,omitempty"`
// +optional
QueryParameters map[string]string `json:"queryParameters,omitempty"`
Conditions []VirtualNodeCondition `json:"conditions"`
}
type VirtualNodeConditionType string
const (
// VirtualNodeActive is Active when the Appmesh Node has been created or found via the API
VirtualNodeActive VirtualNodeConditionType = "VirtualNodeActive"
VirtualNodeMeshMarkedForDeletion VirtualNodeConditionType = "MeshMarkedForDeletion"
)
type VirtualNodeCondition struct {
// Type of mesh node condition.
Type VirtualNodeConditionType `json:"type"`
// Status of the condition, one of True, False, Unknown.
Status api.ConditionStatus `json:"status"`
// Last time the condition transitioned from one status to another.
// +optional
LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"`
// The reason for the condition's last transition.
// +optional
Reason *string `json:"reason,omitempty"`
// A human readable message indicating details about the transition.
// +optional
Message *string `json:"reason,omitempty"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// VirtualNodeList is a list of VirtualNode resources
type VirtualNodeList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata"`
Items []VirtualNode `json:"items"`
}

View File

@@ -0,0 +1,717 @@
// +build !ignore_autogenerated
/*
Copyright The Flagger Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by deepcopy-gen. DO NOT EDIT.
package v1beta1
import (
runtime "k8s.io/apimachinery/pkg/runtime"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Backend) DeepCopyInto(out *Backend) {
*out = *in
out.VirtualService = in.VirtualService
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Backend.
func (in *Backend) DeepCopy() *Backend {
if in == nil {
return nil
}
out := new(Backend)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CloudMapServiceDiscovery) DeepCopyInto(out *CloudMapServiceDiscovery) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudMapServiceDiscovery.
func (in *CloudMapServiceDiscovery) DeepCopy() *CloudMapServiceDiscovery {
if in == nil {
return nil
}
out := new(CloudMapServiceDiscovery)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DnsServiceDiscovery) DeepCopyInto(out *DnsServiceDiscovery) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DnsServiceDiscovery.
func (in *DnsServiceDiscovery) DeepCopy() *DnsServiceDiscovery {
if in == nil {
return nil
}
out := new(DnsServiceDiscovery)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *HttpRoute) DeepCopyInto(out *HttpRoute) {
*out = *in
out.Match = in.Match
in.Action.DeepCopyInto(&out.Action)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HttpRoute.
func (in *HttpRoute) DeepCopy() *HttpRoute {
if in == nil {
return nil
}
out := new(HttpRoute)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *HttpRouteAction) DeepCopyInto(out *HttpRouteAction) {
*out = *in
if in.WeightedTargets != nil {
in, out := &in.WeightedTargets, &out.WeightedTargets
*out = make([]WeightedTarget, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HttpRouteAction.
func (in *HttpRouteAction) DeepCopy() *HttpRouteAction {
if in == nil {
return nil
}
out := new(HttpRouteAction)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *HttpRouteMatch) DeepCopyInto(out *HttpRouteMatch) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HttpRouteMatch.
func (in *HttpRouteMatch) DeepCopy() *HttpRouteMatch {
if in == nil {
return nil
}
out := new(HttpRouteMatch)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Listener) DeepCopyInto(out *Listener) {
*out = *in
out.PortMapping = in.PortMapping
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Listener.
func (in *Listener) DeepCopy() *Listener {
if in == nil {
return nil
}
out := new(Listener)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Mesh) DeepCopyInto(out *Mesh) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
in.Status.DeepCopyInto(&out.Status)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Mesh.
func (in *Mesh) DeepCopy() *Mesh {
if in == nil {
return nil
}
out := new(Mesh)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *Mesh) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *MeshCondition) DeepCopyInto(out *MeshCondition) {
*out = *in
if in.LastTransitionTime != nil {
in, out := &in.LastTransitionTime, &out.LastTransitionTime
*out = (*in).DeepCopy()
}
if in.Reason != nil {
in, out := &in.Reason, &out.Reason
*out = new(string)
**out = **in
}
if in.Message != nil {
in, out := &in.Message, &out.Message
*out = new(string)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MeshCondition.
func (in *MeshCondition) DeepCopy() *MeshCondition {
if in == nil {
return nil
}
out := new(MeshCondition)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *MeshList) DeepCopyInto(out *MeshList) {
*out = *in
out.TypeMeta = in.TypeMeta
out.ListMeta = in.ListMeta
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]Mesh, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MeshList.
func (in *MeshList) DeepCopy() *MeshList {
if in == nil {
return nil
}
out := new(MeshList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *MeshList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *MeshSpec) DeepCopyInto(out *MeshSpec) {
*out = *in
if in.ServiceDiscoveryType != nil {
in, out := &in.ServiceDiscoveryType, &out.ServiceDiscoveryType
*out = new(MeshServiceDiscoveryType)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MeshSpec.
func (in *MeshSpec) DeepCopy() *MeshSpec {
if in == nil {
return nil
}
out := new(MeshSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *MeshStatus) DeepCopyInto(out *MeshStatus) {
*out = *in
if in.MeshArn != nil {
in, out := &in.MeshArn, &out.MeshArn
*out = new(string)
**out = **in
}
if in.Conditions != nil {
in, out := &in.Conditions, &out.Conditions
*out = make([]MeshCondition, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MeshStatus.
func (in *MeshStatus) DeepCopy() *MeshStatus {
if in == nil {
return nil
}
out := new(MeshStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PortMapping) DeepCopyInto(out *PortMapping) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PortMapping.
func (in *PortMapping) DeepCopy() *PortMapping {
if in == nil {
return nil
}
out := new(PortMapping)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Route) DeepCopyInto(out *Route) {
*out = *in
in.Http.DeepCopyInto(&out.Http)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Route.
func (in *Route) DeepCopy() *Route {
if in == nil {
return nil
}
out := new(Route)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ServiceDiscovery) DeepCopyInto(out *ServiceDiscovery) {
*out = *in
if in.CloudMap != nil {
in, out := &in.CloudMap, &out.CloudMap
*out = new(CloudMapServiceDiscovery)
**out = **in
}
if in.Dns != nil {
in, out := &in.Dns, &out.Dns
*out = new(DnsServiceDiscovery)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceDiscovery.
func (in *ServiceDiscovery) DeepCopy() *ServiceDiscovery {
if in == nil {
return nil
}
out := new(ServiceDiscovery)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *VirtualNode) DeepCopyInto(out *VirtualNode) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
in.Status.DeepCopyInto(&out.Status)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualNode.
func (in *VirtualNode) DeepCopy() *VirtualNode {
if in == nil {
return nil
}
out := new(VirtualNode)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *VirtualNode) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *VirtualNodeCondition) DeepCopyInto(out *VirtualNodeCondition) {
*out = *in
if in.LastTransitionTime != nil {
in, out := &in.LastTransitionTime, &out.LastTransitionTime
*out = (*in).DeepCopy()
}
if in.Reason != nil {
in, out := &in.Reason, &out.Reason
*out = new(string)
**out = **in
}
if in.Message != nil {
in, out := &in.Message, &out.Message
*out = new(string)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualNodeCondition.
func (in *VirtualNodeCondition) DeepCopy() *VirtualNodeCondition {
if in == nil {
return nil
}
out := new(VirtualNodeCondition)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *VirtualNodeList) DeepCopyInto(out *VirtualNodeList) {
*out = *in
out.TypeMeta = in.TypeMeta
out.ListMeta = in.ListMeta
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]VirtualNode, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualNodeList.
func (in *VirtualNodeList) DeepCopy() *VirtualNodeList {
if in == nil {
return nil
}
out := new(VirtualNodeList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *VirtualNodeList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *VirtualNodeSpec) DeepCopyInto(out *VirtualNodeSpec) {
*out = *in
if in.Listeners != nil {
in, out := &in.Listeners, &out.Listeners
*out = make([]Listener, len(*in))
copy(*out, *in)
}
if in.ServiceDiscovery != nil {
in, out := &in.ServiceDiscovery, &out.ServiceDiscovery
*out = new(ServiceDiscovery)
(*in).DeepCopyInto(*out)
}
if in.Backends != nil {
in, out := &in.Backends, &out.Backends
*out = make([]Backend, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualNodeSpec.
func (in *VirtualNodeSpec) DeepCopy() *VirtualNodeSpec {
if in == nil {
return nil
}
out := new(VirtualNodeSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *VirtualNodeStatus) DeepCopyInto(out *VirtualNodeStatus) {
*out = *in
if in.MeshArn != nil {
in, out := &in.MeshArn, &out.MeshArn
*out = new(string)
**out = **in
}
if in.VirtualNodeArn != nil {
in, out := &in.VirtualNodeArn, &out.VirtualNodeArn
*out = new(string)
**out = **in
}
if in.CloudMapServiceArn != nil {
in, out := &in.CloudMapServiceArn, &out.CloudMapServiceArn
*out = new(string)
**out = **in
}
if in.QueryParameters != nil {
in, out := &in.QueryParameters, &out.QueryParameters
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
if in.Conditions != nil {
in, out := &in.Conditions, &out.Conditions
*out = make([]VirtualNodeCondition, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualNodeStatus.
func (in *VirtualNodeStatus) DeepCopy() *VirtualNodeStatus {
if in == nil {
return nil
}
out := new(VirtualNodeStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *VirtualRouter) DeepCopyInto(out *VirtualRouter) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualRouter.
func (in *VirtualRouter) DeepCopy() *VirtualRouter {
if in == nil {
return nil
}
out := new(VirtualRouter)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *VirtualService) DeepCopyInto(out *VirtualService) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
in.Status.DeepCopyInto(&out.Status)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualService.
func (in *VirtualService) DeepCopy() *VirtualService {
if in == nil {
return nil
}
out := new(VirtualService)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *VirtualService) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *VirtualServiceBackend) DeepCopyInto(out *VirtualServiceBackend) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualServiceBackend.
func (in *VirtualServiceBackend) DeepCopy() *VirtualServiceBackend {
if in == nil {
return nil
}
out := new(VirtualServiceBackend)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *VirtualServiceCondition) DeepCopyInto(out *VirtualServiceCondition) {
*out = *in
if in.LastTransitionTime != nil {
in, out := &in.LastTransitionTime, &out.LastTransitionTime
*out = (*in).DeepCopy()
}
if in.Reason != nil {
in, out := &in.Reason, &out.Reason
*out = new(string)
**out = **in
}
if in.Message != nil {
in, out := &in.Message, &out.Message
*out = new(string)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualServiceCondition.
func (in *VirtualServiceCondition) DeepCopy() *VirtualServiceCondition {
if in == nil {
return nil
}
out := new(VirtualServiceCondition)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *VirtualServiceList) DeepCopyInto(out *VirtualServiceList) {
*out = *in
out.TypeMeta = in.TypeMeta
out.ListMeta = in.ListMeta
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]VirtualService, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualServiceList.
func (in *VirtualServiceList) DeepCopy() *VirtualServiceList {
if in == nil {
return nil
}
out := new(VirtualServiceList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *VirtualServiceList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *VirtualServiceSpec) DeepCopyInto(out *VirtualServiceSpec) {
*out = *in
if in.VirtualRouter != nil {
in, out := &in.VirtualRouter, &out.VirtualRouter
*out = new(VirtualRouter)
**out = **in
}
if in.Routes != nil {
in, out := &in.Routes, &out.Routes
*out = make([]Route, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualServiceSpec.
func (in *VirtualServiceSpec) DeepCopy() *VirtualServiceSpec {
if in == nil {
return nil
}
out := new(VirtualServiceSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *VirtualServiceStatus) DeepCopyInto(out *VirtualServiceStatus) {
*out = *in
if in.VirtualServiceArn != nil {
in, out := &in.VirtualServiceArn, &out.VirtualServiceArn
*out = new(string)
**out = **in
}
if in.VirtualRouterArn != nil {
in, out := &in.VirtualRouterArn, &out.VirtualRouterArn
*out = new(string)
**out = **in
}
if in.RouteArns != nil {
in, out := &in.RouteArns, &out.RouteArns
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.Conditions != nil {
in, out := &in.Conditions, &out.Conditions
*out = make([]VirtualServiceCondition, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualServiceStatus.
func (in *VirtualServiceStatus) DeepCopy() *VirtualServiceStatus {
if in == nil {
return nil
}
out := new(VirtualServiceStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *WeightedTarget) DeepCopyInto(out *WeightedTarget) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WeightedTarget.
func (in *WeightedTarget) DeepCopy() *WeightedTarget {
if in == nil {
return nil
}
out := new(WeightedTarget)
in.DeepCopyInto(out)
return out
}

View File

@@ -21,7 +21,7 @@ import (
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
rollout "github.com/stefanprodan/flagger/pkg/apis/flagger"
rollout "github.com/weaveworks/flagger/pkg/apis/flagger"
)
// SchemeGroupVersion is group version used to register these objects

View File

@@ -17,10 +17,11 @@ limitations under the License.
package v1alpha3
import (
istiov1alpha3 "github.com/knative/pkg/apis/istio/v1alpha3"
"time"
istiov1alpha3 "github.com/weaveworks/flagger/pkg/apis/istio/v1alpha3"
hpav1 "k8s.io/api/autoscaling/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"time"
)
const (
@@ -98,6 +99,7 @@ type CanaryStatus struct {
Phase CanaryPhase `json:"phase"`
FailedChecks int `json:"failedChecks"`
CanaryWeight int `json:"canaryWeight"`
Iterations int `json:"iterations"`
// +optional
TrackedConfigs *map[string]string `json:"trackedConfigs,omitempty"`
// +optional
@@ -109,23 +111,32 @@ type CanaryStatus struct {
// CanaryService is used to create ClusterIP services
// and Istio Virtual Service
type CanaryService struct {
Port int32 `json:"port"`
Gateways []string `json:"gateways"`
Hosts []string `json:"hosts"`
Match []istiov1alpha3.HTTPMatchRequest `json:"match,omitempty"`
Rewrite *istiov1alpha3.HTTPRewrite `json:"rewrite,omitempty"`
Timeout string `json:"timeout,omitempty"`
Retries *istiov1alpha3.HTTPRetry `json:"retries,omitempty"`
Port int32 `json:"port"`
PortName string `json:"portName,omitempty"`
Match []istiov1alpha3.HTTPMatchRequest `json:"match,omitempty"`
Rewrite *istiov1alpha3.HTTPRewrite `json:"rewrite,omitempty"`
Timeout string `json:"timeout,omitempty"`
Retries *istiov1alpha3.HTTPRetry `json:"retries,omitempty"`
Headers *istiov1alpha3.Headers `json:"headers,omitempty"`
CorsPolicy *istiov1alpha3.CorsPolicy `json:"corsPolicy,omitempty"`
//Istio
Gateways []string `json:"gateways,omitempty"`
Hosts []string `json:"hosts,omitempty"`
// App Mesh
MeshName string `json:"meshName,omitempty"`
Backends []string `json:"backends,omitempty"`
}
// CanaryAnalysis is used to describe how the analysis should be done
type CanaryAnalysis struct {
Interval string `json:"interval"`
Threshold int `json:"threshold"`
MaxWeight int `json:"maxWeight"`
StepWeight int `json:"stepWeight"`
Metrics []CanaryMetric `json:"metrics"`
Webhooks []CanaryWebhook `json:"webhooks,omitempty"`
Interval string `json:"interval"`
Threshold int `json:"threshold"`
MaxWeight int `json:"maxWeight"`
StepWeight int `json:"stepWeight"`
Metrics []CanaryMetric `json:"metrics"`
Webhooks []CanaryWebhook `json:"webhooks,omitempty"`
Match []istiov1alpha3.HTTPMatchRequest `json:"match,omitempty"`
Iterations int `json:"iterations,omitempty"`
}
// CanaryMetric holds the reference to Istio metrics used for canary analysis

View File

@@ -21,7 +21,7 @@ limitations under the License.
package v1alpha3
import (
istiov1alpha3 "github.com/knative/pkg/apis/istio/v1alpha3"
istiov1alpha3 "github.com/weaveworks/flagger/pkg/apis/istio/v1alpha3"
v1 "k8s.io/api/autoscaling/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
)
@@ -69,6 +69,13 @@ func (in *CanaryAnalysis) DeepCopyInto(out *CanaryAnalysis) {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.Match != nil {
in, out := &in.Match, &out.Match
*out = make([]istiov1alpha3.HTTPMatchRequest, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
@@ -134,16 +141,6 @@ func (in *CanaryMetric) DeepCopy() *CanaryMetric {
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CanaryService) DeepCopyInto(out *CanaryService) {
*out = *in
if in.Gateways != nil {
in, out := &in.Gateways, &out.Gateways
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.Hosts != nil {
in, out := &in.Hosts, &out.Hosts
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.Match != nil {
in, out := &in.Match, &out.Match
*out = make([]istiov1alpha3.HTTPMatchRequest, len(*in))
@@ -161,6 +158,31 @@ func (in *CanaryService) DeepCopyInto(out *CanaryService) {
*out = new(istiov1alpha3.HTTPRetry)
**out = **in
}
if in.Headers != nil {
in, out := &in.Headers, &out.Headers
*out = new(istiov1alpha3.Headers)
(*in).DeepCopyInto(*out)
}
if in.CorsPolicy != nil {
in, out := &in.CorsPolicy, &out.CorsPolicy
*out = new(istiov1alpha3.CorsPolicy)
(*in).DeepCopyInto(*out)
}
if in.Gateways != nil {
in, out := &in.Gateways, &out.Gateways
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.Hosts != nil {
in, out := &in.Hosts, &out.Hosts
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.Backends != nil {
in, out := &in.Backends, &out.Backends
*out = make([]string, len(*in))
copy(*out, *in)
}
return
}

View File

@@ -0,0 +1,19 @@
package v1alpha1
// Describes how to match a given string in HTTP headers. Match is
// case-sensitive.
type StringMatch struct {
// Specified exactly one of the fields below.
// exact string match
Exact string `json:"exact,omitempty"`
// prefix-based match
Prefix string `json:"prefix,omitempty"`
// suffix-based match.
Suffix string `json:"suffix,omitempty"`
// ECMAscript style regex-based match
Regex string `json:"regex,omitempty"`
}

View File

@@ -0,0 +1,5 @@
package istio
const (
GroupName = "networking.istio.io"
)

View File

@@ -0,0 +1,7 @@
// Api versions allow the api contract for a resource to be changed while keeping
// backward compatibility by support multiple concurrent versions
// of the same resource
// +k8s:deepcopy-gen=package
// +groupName=networking.istio.io
package v1alpha3

View File

@@ -1,23 +1,7 @@
/*
Copyright 2018 The Knative Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha3
import (
"github.com/knative/pkg/apis/istio"
"github.com/weaveworks/flagger/pkg/apis/istio"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
@@ -45,11 +29,7 @@ var (
func addKnownTypes(scheme *runtime.Scheme) error {
scheme.AddKnownTypes(SchemeGroupVersion,
&VirtualService{},
&Gateway{},
&DestinationRule{},
&VirtualServiceList{},
&GatewayList{},
&DestinationRuleList{},
)
metav1.AddToGroupVersion(scheme, SchemeGroupVersion)
return nil

View File

@@ -1,23 +1,8 @@
/*
Copyright 2018 The Knative Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// proto: https://github.com/istio/api/blob/master/networking/v1alpha3/virtual_service.proto
package v1alpha3
import (
"github.com/knative/pkg/apis/istio/common/v1alpha1"
"github.com/weaveworks/flagger/pkg/apis/istio/common/v1alpha1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
@@ -32,7 +17,7 @@ type VirtualService struct {
Spec VirtualServiceSpec `json:"spec"`
}
// A VirtualService defines a set of traffic routing rules to apply when a host is
// VirtualServiceSpec defines a set of traffic routing rules to apply when a host is
// addressed. Each routing rule defines matching criteria for traffic of a specific
// protocol. If the traffic is matched, then it is sent to a named destination service
// (or subset/version of it) defined in the registry.
@@ -40,107 +25,264 @@ type VirtualService struct {
// The source of traffic can also be matched in a routing rule. This allows routing
// to be customized for specific client contexts.
//
// The following example routes all HTTP traffic by default to
// The following example on Kubernetes, routes all HTTP traffic by default to
// pods of the reviews service with label "version: v1". In addition,
// HTTP requests containing /wpcatalog/, /consumercatalog/ url prefixes will
// be rewritten to /newcatalog and sent to pods with label "version: v2". The
// rules will be applied at the gateway named "bookinfo" as well as at all
// the sidecars in the mesh (indicated by the reserved gateway name
// "mesh").
// HTTP requests with path starting with /wpcatalog/ or /consumercatalog/ will
// be rewritten to /newcatalog and sent to pods with label "version: v2".
//
// apiVersion: networking.istio.io/v1alpha3
// kind: VirtualService
// metadata:
// name: reviews-route
// spec:
// hosts:
// - reviews
// gateways: # if omitted, defaults to "mesh"
// - bookinfo
// - mesh
// http:
// - match:
// - uri:
// prefix: "/wpcatalog"
// - uri:
// prefix: "/consumercatalog"
// rewrite:
// uri: "/newcatalog"
// route:
// - destination:
// host: reviews
// subset: v2
// - route:
// - destination:
// host: reviews
// subset: v1
//
// ```yaml
// apiVersion: networking.istio.io/v1alpha3
// kind: VirtualService
// metadata:
// name: reviews-route
// spec:
// hosts:
// - reviews.prod.svc.cluster.local
// http:
// - match:
// - uri:
// prefix: "/wpcatalog"
// - uri:
// prefix: "/consumercatalog"
// rewrite:
// uri: "/newcatalog"
// route:
// - destination:
// host: reviews.prod.svc.cluster.local
// subset: v2
// - route:
// - destination:
// host: reviews.prod.svc.cluster.local
// subset: v1
// ```
//
// A subset/version of a route destination is identified with a reference
// to a named service subset which must be declared in a corresponding
// DestinationRule.
// `DestinationRule`.
//
// apiVersion: networking.istio.io/v1alpha3
// kind: DestinationRule
// metadata:
// name: reviews-destination
// spec:
// host: reviews
// subsets:
// - name: v1
// labels:
// version: v1
// - name: v2
// labels:
// version: v2
// ```yaml
// apiVersion: networking.istio.io/v1alpha3
// kind: DestinationRule
// metadata:
// name: reviews-destination
// spec:
// host: reviews.prod.svc.cluster.local
// subsets:
// - name: v1
// labels:
// version: v1
// - name: v2
// labels:
// version: v2
// ```
//
// A host name can be defined by only one VirtualService. A single
// VirtualService can be used to describe traffic properties for multiple
// HTTP and TCP ports.
type VirtualServiceSpec struct {
// REQUIRED. The destination address for traffic captured by this virtual
// service. Could be a DNS name with wildcard prefix or a CIDR
// prefix. Depending on the platform, short-names can also be used
// instead of a FQDN (i.e. has no dots in the name). In such a scenario,
// the FQDN of the host would be derived based on the underlying
// platform.
// REQUIRED. The destination hosts to which traffic is being sent. Could
// be a DNS name with wildcard prefix or an IP address. Depending on the
// platform, short-names can also be used instead of a FQDN (i.e. has no
// dots in the name). In such a scenario, the FQDN of the host would be
// derived based on the underlying platform.
//
// For example on Kubernetes, when hosts contains a short name, Istio will
// interpret the short name based on the namespace of the rule. Thus, when a
// client namespace applies a rule in the "default" namespace containing a name
// "reviews, Istio will setup routes to the "reviews.default.svc.cluster.local"
// service. However, if a different name such as "reviews.sales.svc.cluster.local"
// is used, it would be treated as a FQDN during virtual host matching.
// In Consul, a plain service name would be resolved to the FQDN
// "reviews.service.consul".
// **A host name can be defined by only one VirtualService**. A single
// VirtualService can be used to describe traffic properties for multiple
// HTTP and TCP ports.
//
// Note that the hosts field applies to both HTTP and TCP
// services. Service inside the mesh, i.e., those found in the service
// registry, must always be referred to using their alphanumeric
// names. IP addresses or CIDR prefixes are allowed only for services
// defined via the Gateway.
// *Note for Kubernetes users*: When short names are used (e.g. "reviews"
// instead of "reviews.default.svc.cluster.local"), Istio will interpret
// the short name based on the namespace of the rule, not the service. A
// rule in the "default" namespace containing a host "reviews will be
// interpreted as "reviews.default.svc.cluster.local", irrespective of
// the actual namespace associated with the reviews service. _To avoid
// potential misconfigurations, it is recommended to always use fully
// qualified domain names over short names._
//
// The hosts field applies to both HTTP and TCP services. Service inside
// the mesh, i.e., those found in the service registry, must always be
// referred to using their alphanumeric names. IP addresses are allowed
// only for services defined via the Gateway.
Hosts []string `json:"hosts"`
// The names of gateways and sidecars that should apply these routes. A
// single VirtualService is used for sidecars inside the mesh as well
// as for one or more gateways. The selection condition imposed by this field
// can be overridden using the source field in the match conditions of HTTP/TCP
// routes. The reserved word "mesh" is used to imply all the sidecars in
// the mesh. When this field is omitted, the default gateway ("mesh")
// will be used, which would apply the rule to all sidecars in the
// mesh. If a list of gateway names is provided, the rules will apply
// only to the gateways. To apply the rules to both gateways and sidecars,
// specify "mesh" as one of the gateway names.
// single VirtualService is used for sidecars inside the mesh as well as
// for one or more gateways. The selection condition imposed by this
// field can be overridden using the source field in the match conditions
// of protocol-specific routes. The reserved word `mesh` is used to imply
// all the sidecars in the mesh. When this field is omitted, the default
// gateway (`mesh`) will be used, which would apply the rule to all
// sidecars in the mesh. If a list of gateway names is provided, the
// rules will apply only to the gateways. To apply the rules to both
// gateways and sidecars, specify `mesh` as one of the gateway names.
Gateways []string `json:"gateways,omitempty"`
// An ordered list of route rules for HTTP traffic.
// The first rule matching an incoming request is used.
// An ordered list of route rules for HTTP traffic. HTTP routes will be
// applied to platform service ports named 'http-*'/'http2-*'/'grpc-*', gateway
// ports with protocol HTTP/HTTP2/GRPC/ TLS-terminated-HTTPS and service
// entry ports using HTTP/HTTP2/GRPC protocols. The first rule matching
// an incoming request is used.
Http []HTTPRoute `json:"http,omitempty"`
// An ordered list of route rules for TCP traffic.
// The first rule matching an incoming request is used.
// An ordered list of route rules for opaque TCP traffic. TCP routes will
// be applied to any port that is not a HTTP or TLS port. The first rule
// matching an incoming request is used.
Tcp []TCPRoute `json:"tcp,omitempty"`
}
// Destination indicates the network addressable service to which the
// request/connection will be sent after processing a routing rule. The
// destination.host should unambiguously refer to a service in the service
// registry. Istio's service registry is composed of all the services found
// in the platform's service registry (e.g., Kubernetes services, Consul
// services), as well as services declared through the
// [ServiceEntry](#ServiceEntry) resource.
//
// *Note for Kubernetes users*: When short names are used (e.g. "reviews"
// instead of "reviews.default.svc.cluster.local"), Istio will interpret
// the short name based on the namespace of the rule, not the service. A
// rule in the "default" namespace containing a host "reviews will be
// interpreted as "reviews.default.svc.cluster.local", irrespective of the
// actual namespace associated with the reviews service. _To avoid potential
// misconfigurations, it is recommended to always use fully qualified
// domain names over short names._
//
// The following Kubernetes example routes all traffic by default to pods
// of the reviews service with label "version: v1" (i.e., subset v1), and
// some to subset v2, in a kubernetes environment.
//
// ```yaml
// apiVersion: networking.istio.io/v1alpha3
// kind: VirtualService
// metadata:
// name: reviews-route
// namespace: foo
// spec:
// hosts:
// - reviews # interpreted as reviews.foo.svc.cluster.local
// http:
// - match:
// - uri:
// prefix: "/wpcatalog"
// - uri:
// prefix: "/consumercatalog"
// rewrite:
// uri: "/newcatalog"
// route:
// - destination:
// host: reviews # interpreted as reviews.foo.svc.cluster.local
// subset: v2
// - route:
// - destination:
// host: reviews # interpreted as reviews.foo.svc.cluster.local
// subset: v1
// ```
//
// And the associated DestinationRule
//
// ```yaml
// apiVersion: networking.istio.io/v1alpha3
// kind: DestinationRule
// metadata:
// name: reviews-destination
// namespace: foo
// spec:
// host: reviews # interpreted as reviews.foo.svc.cluster.local
// subsets:
// - name: v1
// labels:
// version: v1
// - name: v2
// labels:
// version: v2
// ```
//
// The following VirtualService sets a timeout of 5s for all calls to
// productpage.prod.svc.cluster.local service in Kubernetes. Notice that
// there are no subsets defined in this rule. Istio will fetch all
// instances of productpage.prod.svc.cluster.local service from the service
// registry and populate the sidecar's load balancing pool. Also, notice
// that this rule is set in the istio-system namespace but uses the fully
// qualified domain name of the productpage service,
// productpage.prod.svc.cluster.local. Therefore the rule's namespace does
// not have an impact in resolving the name of the productpage service.
//
// ```yaml
// apiVersion: networking.istio.io/v1alpha3
// kind: VirtualService
// metadata:
// name: my-productpage-rule
// namespace: istio-system
// spec:
// hosts:
// - productpage.prod.svc.cluster.local # ignores rule namespace
// http:
// - timeout: 5s
// route:
// - destination:
// host: productpage.prod.svc.cluster.local
// ```
//
// To control routing for traffic bound to services outside the mesh, external
// services must first be added to Istio's internal service registry using the
// ServiceEntry resource. VirtualServices can then be defined to control traffic
// bound to these external services. For example, the following rules define a
// Service for wikipedia.org and set a timeout of 5s for http requests.
//
// ```yaml
// apiVersion: networking.istio.io/v1alpha3
// kind: ServiceEntry
// metadata:
// name: external-svc-wikipedia
// spec:
// hosts:
// - wikipedia.org
// location: MESH_EXTERNAL
// ports:
// - number: 80
// name: example-http
// protocol: HTTP
// resolution: DNS
//
// apiVersion: networking.istio.io/v1alpha3
// kind: VirtualService
// metadata:
// name: my-wiki-rule
// spec:
// hosts:
// - wikipedia.org
// http:
// - timeout: 5s
// route:
// - destination:
// host: wikipedia.org
// ```
type Destination struct {
// REQUIRED. The name of a service from the service registry. Service
// names are looked up from the platform's service registry (e.g.,
// Kubernetes services, Consul services, etc.) and from the hosts
// declared by [ServiceEntry](#ServiceEntry). Traffic forwarded to
// destinations that are not found in either of the two, will be dropped.
//
// *Note for Kubernetes users*: When short names are used (e.g. "reviews"
// instead of "reviews.default.svc.cluster.local"), Istio will interpret
// the short name based on the namespace of the rule, not the service. A
// rule in the "default" namespace containing a host "reviews will be
// interpreted as "reviews.default.svc.cluster.local", irrespective of
// the actual namespace associated with the reviews service. _To avoid
// potential misconfigurations, it is recommended to always use fully
// qualified domain names over short names._
Host string `json:"host"`
// The name of a subset within the service. Applicable only to services
// within the mesh. The subset must be defined in a corresponding
// DestinationRule.
Subset string `json:"subset,omitempty"`
// Specifies the port on the host that is being addressed. If a service
// exposes only a single port it is not required to explicitly select the
// port.
Port PortSelector `json:"port,omitempty"`
}
// Describes match conditions and actions for routing HTTP/1.1, HTTP2, and
// gRPC traffic. See VirtualService for usage examples.
type HTTPRoute struct {
@@ -166,15 +308,6 @@ type HTTPRoute struct {
// Redirect primitive. Rewrite will be performed before forwarding.
Rewrite *HTTPRewrite `json:"rewrite,omitempty"`
// Indicates that a HTTP/1.1 client connection to this particular route
// should be allowed (and expected) to upgrade to a WebSocket connection.
// The default is false. Istio's reference sidecar implementation (Envoy)
// expects the first request to this route to contain the WebSocket
// upgrade headers. Otherwise, the request will be rejected. Note that
// Websocket allows secondary protocol negotiation which may then be
// subject to further routing rules based on the protocol selected.
WebsocketUpgrade bool `json:"websocketUpgrade,omitempty"`
// Timeout for HTTP requests.
Timeout string `json:"timeout,omitempty"`
@@ -192,12 +325,44 @@ type HTTPRoute struct {
// destination.
Mirror *Destination `json:"mirror,omitempty"`
// Cross-Origin Resource Sharing policy (CORS). Refer to
// https://developer.mozilla.org/en-US/docs/Web/HTTP/CORS
// for further details about cross origin resource sharing.
CorsPolicy *CorsPolicy `json:"corsPolicy,omitempty"`
// Additional HTTP headers to add before forwarding a request to the
// destination service.
AppendHeaders map[string]string `json:"appendHeaders,omitempty"`
// Http headers to remove before returning the response to the caller
RemoveResponseHeaders map[string]string `json:"removeResponseHeaders,omitempty"`
// Header manipulation rules
Headers *Headers `json:"headers,omitempty"`
}
// Header manipulation rules
type Headers struct {
// Header manipulation rules to apply before forwarding a request
// to the destination service
Request *HeaderOperations `json:"request,omitempty"`
// Header manipulation rules to apply before returning a response
// to the caller
Response *HeaderOperations `json:"response,omitempty"`
}
// HeaderOperations Describes the header manipulations to apply
type HeaderOperations struct {
// Overwrite the headers specified by key with the given values
Set map[string]string `json:"set"`
// Append the given values to the headers specified by keys
// (will create a comma-separated list of values)
Add map[string]string `json:"add"`
// Remove the specified headers
Remove []string `json:"remove"`
}
// HttpMatchRequest specifies a set of criterion to be met in order for the
@@ -283,6 +448,22 @@ type HTTPMatchRequest struct {
//
// **Note:** The keys `uri`, `scheme`, `method`, and `authority` will be ignored.
Headers map[string]v1alpha1.StringMatch `json:"headers,omitempty"`
// Specifies the ports on the host that is being addressed. Many services
// only expose a single port or label ports with the protocols they support,
// in these cases it is not required to explicitly select the port.
Port uint32 `json:"port,omitempty"`
// One or more labels that constrain the applicability of a rule to
// workloads with the given labels. If the VirtualService has a list of
// gateways specified at the top, it should include the reserved gateway
// `mesh` in order for this field to be applicable.
SourceLabels map[string]string `json:"sourceLabels,omitempty"`
// Names of gateways where the rule should be applied to. Gateway names
// at the top of the VirtualService (if any) are overridden. The gateway match is
// independent of sourceLabels.
Gateways []string `json:"gateways,omitempty"`
}
type DestinationWeight struct {
@@ -297,137 +478,6 @@ type DestinationWeight struct {
Weight int `json:"weight"`
}
// Destination indicates the network addressable service to which the
// request/connection will be sent after processing a routing rule. The
// destination.name should unambiguously refer to a service in the service
// registry. It can be a short name or a fully qualified domain name from
// the service registry, a resolvable DNS name, an IP address or a service
// name from the service registry and a subset name. The order of inference
// is as follows:
//
// 1. Service registry lookup. The entire name is looked up in the service
// registry. If the lookup succeeds, the search terminates. The requests
// will be routed to any instance of the service in the mesh. When the
// service name consists of a single word, the FQDN will be constructed in
// a platform specific manner. For example, in Kubernetes, the namespace
// associated with the routing rule will be used to identify the service as
// <servicename>.<rulenamespace>. However, if the service name contains
// multiple words separated by a dot (e.g., reviews.prod), the name in its
// entirety would be looked up in the service registry.
//
// 2. Runtime DNS lookup by the proxy. If step 1 fails, and the name is not
// an IP address, it will be considered as a DNS name that is not in the
// service registry (e.g., wikipedia.org). The sidecar/gateway will resolve
// the DNS and load balance requests appropriately. See Envoy's strict_dns
// for details.
//
// The following example routes all traffic by default to pods of the
// reviews service with label "version: v1" (i.e., subset v1), and some
// to subset v2, in a kubernetes environment.
//
// apiVersion: networking.istio.io/v1alpha3
// kind: VirtualService
// metadata:
// name: reviews-route
// spec:
// hosts:
// - reviews # namespace is same as the client/caller's namespace
// http:
// - match:
// - uri:
// prefix: "/wpcatalog"
// - uri:
// prefix: "/consumercatalog"
// rewrite:
// uri: "/newcatalog"
// route:
// - destination:
// host: reviews
// subset: v2
// - route:
// - destination:
// host: reviews
// subset: v1
//
// And the associated DestinationRule
//
// apiVersion: networking.istio.io/v1alpha3
// kind: DestinationRule
// metadata:
// name: reviews-destination
// spec:
// host: reviews
// subsets:
// - name: v1
// labels:
// version: v1
// - name: v2
// labels:
// version: v2
//
// The following VirtualService sets a timeout of 5s for all calls to
// productpage.prod service. Notice that there are no subsets defined in
// this rule. Istio will fetch all instances of productpage.prod service
// from the service registry and populate the sidecar's load balancing
// pool.
//
// apiVersion: networking.istio.io/v1alpha3
// kind: VirtualService
// metadata:
// name: my-productpage-rule
// spec:
// hosts:
// - productpage.prod # in kubernetes, this applies only to prod namespace
// http:
// - timeout: 5s
// route:
// - destination:
// host: productpage.prod
//
// The following sets a timeout of 5s for all calls to the external
// service wikipedia.org, as there is no internal service of that name.
//
// apiVersion: networking.istio.io/v1alpha3
// kind: VirtualService
// metadata:
// name: my-wiki-rule
// spec:
// hosts:
// - wikipedia.org
// http:
// - timeout: 5s
// route:
// - destination:
// host: wikipedia.org
//
type Destination struct {
// REQUIRED. The name of a service from the service registry. Service
// names are looked up from the platform's service registry (e.g.,
// Kubernetes services, Consul services, etc.) and from the hosts
// declared by [ServiceEntry](#ServiceEntry). Traffic forwarded to
// destinations that are not found in either of the two, will be dropped.
//
// *Note for Kubernetes users*: When short names are used (e.g. "reviews"
// instead of "reviews.default.svc.cluster.local"), Istio will interpret
// the short name based on the namespace of the rule, not the service. A
// rule in the "default" namespace containing a host "reviews will be
// interpreted as "reviews.default.svc.cluster.local", irrespective of
// the actual namespace associated with the reviews service. _To avoid
// potential misconfigurations, it is recommended to always use fully
// qualified domain names over short names._
Host string `json:"host"`
// The name of a subset within the service. Applicable only to services
// within the mesh. The subset must be defined in a corresponding
// DestinationRule.
Subset string `json:"subset,omitempty"`
// Specifies the port on the host that is being addressed. If a service
// exposes only a single port it is not required to explicitly select the
// port.
Port PortSelector `json:"port,omitempty"`
}
// PortSelector specifies the number of a port to be used for
// matching or selection for final routing.
type PortSelector struct {
@@ -578,21 +628,24 @@ type HTTPRewrite struct {
// example, the following rule sets the maximum number of retries to 3 when
// calling ratings:v1 service, with a 2s timeout per retry attempt.
//
// apiVersion: networking.istio.io/v1alpha3
// kind: VirtualService
// metadata:
// name: ratings-route
// spec:
// hosts:
// - ratings
// http:
// - route:
// - destination:
// host: ratings
// subset: v1
// retries:
// attempts: 3
// perTryTimeout: 2s
// ```yaml
// apiVersion: networking.istio.io/v1alpha3
// kind: VirtualService
// metadata:
// name: ratings-route
// spec:
// hosts:
// - ratings.prod.svc.cluster.local
// http:
// - route:
// - destination:
// host: ratings.prod.svc.cluster.local
// subset: v1
// retries:
// attempts: 3
// perTryTimeout: 2s
// retryOn: gateway-error,connect-failure,refused-stream
// ```
//
type HTTPRetry struct {
// REQUIRED. Number of retries for a given request. The interval
@@ -602,6 +655,13 @@ type HTTPRetry struct {
// Timeout per retry attempt for a given request. format: 1h/1m/1s/1ms. MUST BE >=1ms.
PerTryTimeout string `json:"perTryTimeout"`
// Specifies the conditions under which retry takes place.
// One or more policies can be specified using a , delimited list.
// The supported policies can be found in
// <https://www.envoyproxy.io/docs/envoy/latest/configuration/http_filters/router_filter#x-envoy-retry-on>
// and <https://www.envoyproxy.io/docs/envoy/latest/configuration/http_filters/router_filter#x-envoy-retry-grpc-on>
RetryOn string `json:"retryOn"`
}
// Describes the Cross-Origin Resource Sharing (CORS) policy, for a given

View File

@@ -1,7 +1,7 @@
// +build !ignore_autogenerated
/*
Copyright 2018 The Knative Authors
Copyright The Flagger Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -21,57 +21,10 @@ limitations under the License.
package v1alpha3
import (
v1alpha1 "github.com/knative/pkg/apis/istio/common/v1alpha1"
v1alpha1 "github.com/weaveworks/flagger/pkg/apis/istio/common/v1alpha1"
runtime "k8s.io/apimachinery/pkg/runtime"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ConnectionPoolSettings) DeepCopyInto(out *ConnectionPoolSettings) {
*out = *in
if in.Tcp != nil {
in, out := &in.Tcp, &out.Tcp
*out = new(TCPSettings)
**out = **in
}
if in.Http != nil {
in, out := &in.Http, &out.Http
*out = new(HTTPSettings)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectionPoolSettings.
func (in *ConnectionPoolSettings) DeepCopy() *ConnectionPoolSettings {
if in == nil {
return nil
}
out := new(ConnectionPoolSettings)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ConsistentHashLB) DeepCopyInto(out *ConsistentHashLB) {
*out = *in
if in.HttpCookie != nil {
in, out := &in.HttpCookie, &out.HttpCookie
*out = new(HTTPCookie)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConsistentHashLB.
func (in *ConsistentHashLB) DeepCopy() *ConsistentHashLB {
if in == nil {
return nil
}
out := new(ConsistentHashLB)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CorsPolicy) DeepCopyInto(out *CorsPolicy) {
*out = *in
@@ -125,94 +78,6 @@ func (in *Destination) DeepCopy() *Destination {
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DestinationRule) DeepCopyInto(out *DestinationRule) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DestinationRule.
func (in *DestinationRule) DeepCopy() *DestinationRule {
if in == nil {
return nil
}
out := new(DestinationRule)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *DestinationRule) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DestinationRuleList) DeepCopyInto(out *DestinationRuleList) {
*out = *in
out.TypeMeta = in.TypeMeta
out.ListMeta = in.ListMeta
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]DestinationRule, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DestinationRuleList.
func (in *DestinationRuleList) DeepCopy() *DestinationRuleList {
if in == nil {
return nil
}
out := new(DestinationRuleList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *DestinationRuleList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DestinationRuleSpec) DeepCopyInto(out *DestinationRuleSpec) {
*out = *in
if in.TrafficPolicy != nil {
in, out := &in.TrafficPolicy, &out.TrafficPolicy
*out = new(TrafficPolicy)
(*in).DeepCopyInto(*out)
}
if in.Subsets != nil {
in, out := &in.Subsets, &out.Subsets
*out = make([]Subset, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DestinationRuleSpec.
func (in *DestinationRuleSpec) DeepCopy() *DestinationRuleSpec {
if in == nil {
return nil
}
out := new(DestinationRuleSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DestinationWeight) DeepCopyInto(out *DestinationWeight) {
*out = *in
@@ -230,112 +95,6 @@ func (in *DestinationWeight) DeepCopy() *DestinationWeight {
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Gateway) DeepCopyInto(out *Gateway) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Gateway.
func (in *Gateway) DeepCopy() *Gateway {
if in == nil {
return nil
}
out := new(Gateway)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *Gateway) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *GatewayList) DeepCopyInto(out *GatewayList) {
*out = *in
out.TypeMeta = in.TypeMeta
out.ListMeta = in.ListMeta
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]Gateway, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GatewayList.
func (in *GatewayList) DeepCopy() *GatewayList {
if in == nil {
return nil
}
out := new(GatewayList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *GatewayList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *GatewaySpec) DeepCopyInto(out *GatewaySpec) {
*out = *in
if in.Servers != nil {
in, out := &in.Servers, &out.Servers
*out = make([]Server, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.Selector != nil {
in, out := &in.Selector, &out.Selector
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GatewaySpec.
func (in *GatewaySpec) DeepCopy() *GatewaySpec {
if in == nil {
return nil
}
out := new(GatewaySpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *HTTPCookie) DeepCopyInto(out *HTTPCookie) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPCookie.
func (in *HTTPCookie) DeepCopy() *HTTPCookie {
if in == nil {
return nil
}
out := new(HTTPCookie)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *HTTPFaultInjection) DeepCopyInto(out *HTTPFaultInjection) {
*out = *in
@@ -392,6 +151,18 @@ func (in *HTTPMatchRequest) DeepCopyInto(out *HTTPMatchRequest) {
(*out)[key] = val
}
}
if in.SourceLabels != nil {
in, out := &in.SourceLabels, &out.SourceLabels
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
if in.Gateways != nil {
in, out := &in.Gateways, &out.Gateways
*out = make([]string, len(*in))
copy(*out, *in)
}
return
}
@@ -493,6 +264,11 @@ func (in *HTTPRoute) DeepCopyInto(out *HTTPRoute) {
*out = new(Destination)
**out = **in
}
if in.CorsPolicy != nil {
in, out := &in.CorsPolicy, &out.CorsPolicy
*out = new(CorsPolicy)
(*in).DeepCopyInto(*out)
}
if in.AppendHeaders != nil {
in, out := &in.AppendHeaders, &out.AppendHeaders
*out = make(map[string]string, len(*in))
@@ -507,6 +283,11 @@ func (in *HTTPRoute) DeepCopyInto(out *HTTPRoute) {
(*out)[key] = val
}
}
if in.Headers != nil {
in, out := &in.Headers, &out.Headers
*out = new(Headers)
(*in).DeepCopyInto(*out)
}
return
}
@@ -521,17 +302,62 @@ func (in *HTTPRoute) DeepCopy() *HTTPRoute {
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *HTTPSettings) DeepCopyInto(out *HTTPSettings) {
func (in *HeaderOperations) DeepCopyInto(out *HeaderOperations) {
*out = *in
if in.Set != nil {
in, out := &in.Set, &out.Set
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
if in.Add != nil {
in, out := &in.Add, &out.Add
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
if in.Remove != nil {
in, out := &in.Remove, &out.Remove
*out = make([]string, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPSettings.
func (in *HTTPSettings) DeepCopy() *HTTPSettings {
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HeaderOperations.
func (in *HeaderOperations) DeepCopy() *HeaderOperations {
if in == nil {
return nil
}
out := new(HTTPSettings)
out := new(HeaderOperations)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Headers) DeepCopyInto(out *Headers) {
*out = *in
if in.Request != nil {
in, out := &in.Request, &out.Request
*out = new(HeaderOperations)
(*in).DeepCopyInto(*out)
}
if in.Response != nil {
in, out := &in.Response, &out.Response
*out = new(HeaderOperations)
(*in).DeepCopyInto(*out)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Headers.
func (in *Headers) DeepCopy() *Headers {
if in == nil {
return nil
}
out := new(Headers)
in.DeepCopyInto(out)
return out
}
@@ -596,59 +422,6 @@ func (in *L4MatchAttributes) DeepCopy() *L4MatchAttributes {
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *LoadBalancerSettings) DeepCopyInto(out *LoadBalancerSettings) {
*out = *in
if in.ConsistentHash != nil {
in, out := &in.ConsistentHash, &out.ConsistentHash
*out = new(ConsistentHashLB)
(*in).DeepCopyInto(*out)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoadBalancerSettings.
func (in *LoadBalancerSettings) DeepCopy() *LoadBalancerSettings {
if in == nil {
return nil
}
out := new(LoadBalancerSettings)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *OutlierDetection) DeepCopyInto(out *OutlierDetection) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OutlierDetection.
func (in *OutlierDetection) DeepCopy() *OutlierDetection {
if in == nil {
return nil
}
out := new(OutlierDetection)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Port) DeepCopyInto(out *Port) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Port.
func (in *Port) DeepCopy() *Port {
if in == nil {
return nil
}
out := new(Port)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PortSelector) DeepCopyInto(out *PortSelector) {
*out = *in
@@ -665,98 +438,6 @@ func (in *PortSelector) DeepCopy() *PortSelector {
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PortTrafficPolicy) DeepCopyInto(out *PortTrafficPolicy) {
*out = *in
out.Port = in.Port
if in.LoadBalancer != nil {
in, out := &in.LoadBalancer, &out.LoadBalancer
*out = new(LoadBalancerSettings)
(*in).DeepCopyInto(*out)
}
if in.ConnectionPool != nil {
in, out := &in.ConnectionPool, &out.ConnectionPool
*out = new(ConnectionPoolSettings)
(*in).DeepCopyInto(*out)
}
if in.OutlierDetection != nil {
in, out := &in.OutlierDetection, &out.OutlierDetection
*out = new(OutlierDetection)
**out = **in
}
if in.Tls != nil {
in, out := &in.Tls, &out.Tls
*out = new(TLSSettings)
(*in).DeepCopyInto(*out)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PortTrafficPolicy.
func (in *PortTrafficPolicy) DeepCopy() *PortTrafficPolicy {
if in == nil {
return nil
}
out := new(PortTrafficPolicy)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Server) DeepCopyInto(out *Server) {
*out = *in
out.Port = in.Port
if in.Hosts != nil {
in, out := &in.Hosts, &out.Hosts
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.TLS != nil {
in, out := &in.TLS, &out.TLS
*out = new(TLSOptions)
(*in).DeepCopyInto(*out)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Server.
func (in *Server) DeepCopy() *Server {
if in == nil {
return nil
}
out := new(Server)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Subset) DeepCopyInto(out *Subset) {
*out = *in
if in.Labels != nil {
in, out := &in.Labels, &out.Labels
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
if in.TrafficPolicy != nil {
in, out := &in.TrafficPolicy, &out.TrafficPolicy
*out = new(TrafficPolicy)
(*in).DeepCopyInto(*out)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Subset.
func (in *Subset) DeepCopy() *Subset {
if in == nil {
return nil
}
out := new(Subset)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *TCPRoute) DeepCopyInto(out *TCPRoute) {
*out = *in
@@ -781,107 +462,6 @@ func (in *TCPRoute) DeepCopy() *TCPRoute {
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *TCPSettings) DeepCopyInto(out *TCPSettings) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TCPSettings.
func (in *TCPSettings) DeepCopy() *TCPSettings {
if in == nil {
return nil
}
out := new(TCPSettings)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *TLSOptions) DeepCopyInto(out *TLSOptions) {
*out = *in
if in.SubjectAltNames != nil {
in, out := &in.SubjectAltNames, &out.SubjectAltNames
*out = make([]string, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TLSOptions.
func (in *TLSOptions) DeepCopy() *TLSOptions {
if in == nil {
return nil
}
out := new(TLSOptions)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *TLSSettings) DeepCopyInto(out *TLSSettings) {
*out = *in
if in.SubjectAltNames != nil {
in, out := &in.SubjectAltNames, &out.SubjectAltNames
*out = make([]string, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TLSSettings.
func (in *TLSSettings) DeepCopy() *TLSSettings {
if in == nil {
return nil
}
out := new(TLSSettings)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *TrafficPolicy) DeepCopyInto(out *TrafficPolicy) {
*out = *in
if in.LoadBalancer != nil {
in, out := &in.LoadBalancer, &out.LoadBalancer
*out = new(LoadBalancerSettings)
(*in).DeepCopyInto(*out)
}
if in.ConnectionPool != nil {
in, out := &in.ConnectionPool, &out.ConnectionPool
*out = new(ConnectionPoolSettings)
(*in).DeepCopyInto(*out)
}
if in.OutlierDetection != nil {
in, out := &in.OutlierDetection, &out.OutlierDetection
*out = new(OutlierDetection)
**out = **in
}
if in.Tls != nil {
in, out := &in.Tls, &out.Tls
*out = new(TLSSettings)
(*in).DeepCopyInto(*out)
}
if in.PortLevelSettings != nil {
in, out := &in.PortLevelSettings, &out.PortLevelSettings
*out = make([]PortTrafficPolicy, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TrafficPolicy.
func (in *TrafficPolicy) DeepCopy() *TrafficPolicy {
if in == nil {
return nil
}
out := new(TrafficPolicy)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *VirtualService) DeepCopyInto(out *VirtualService) {
*out = *in

View File

@@ -19,7 +19,9 @@ limitations under the License.
package versioned
import (
flaggerv1alpha3 "github.com/stefanprodan/flagger/pkg/client/clientset/versioned/typed/flagger/v1alpha3"
appmeshv1beta1 "github.com/weaveworks/flagger/pkg/client/clientset/versioned/typed/appmesh/v1beta1"
flaggerv1alpha3 "github.com/weaveworks/flagger/pkg/client/clientset/versioned/typed/flagger/v1alpha3"
networkingv1alpha3 "github.com/weaveworks/flagger/pkg/client/clientset/versioned/typed/istio/v1alpha3"
discovery "k8s.io/client-go/discovery"
rest "k8s.io/client-go/rest"
flowcontrol "k8s.io/client-go/util/flowcontrol"
@@ -27,16 +29,35 @@ import (
type Interface interface {
Discovery() discovery.DiscoveryInterface
AppmeshV1beta1() appmeshv1beta1.AppmeshV1beta1Interface
// Deprecated: please explicitly pick a version if possible.
Appmesh() appmeshv1beta1.AppmeshV1beta1Interface
FlaggerV1alpha3() flaggerv1alpha3.FlaggerV1alpha3Interface
// Deprecated: please explicitly pick a version if possible.
Flagger() flaggerv1alpha3.FlaggerV1alpha3Interface
NetworkingV1alpha3() networkingv1alpha3.NetworkingV1alpha3Interface
// Deprecated: please explicitly pick a version if possible.
Networking() networkingv1alpha3.NetworkingV1alpha3Interface
}
// Clientset contains the clients for groups. Each group has exactly one
// version included in a Clientset.
type Clientset struct {
*discovery.DiscoveryClient
flaggerV1alpha3 *flaggerv1alpha3.FlaggerV1alpha3Client
appmeshV1beta1 *appmeshv1beta1.AppmeshV1beta1Client
flaggerV1alpha3 *flaggerv1alpha3.FlaggerV1alpha3Client
networkingV1alpha3 *networkingv1alpha3.NetworkingV1alpha3Client
}
// AppmeshV1beta1 retrieves the AppmeshV1beta1Client
func (c *Clientset) AppmeshV1beta1() appmeshv1beta1.AppmeshV1beta1Interface {
return c.appmeshV1beta1
}
// Deprecated: Appmesh retrieves the default version of AppmeshClient.
// Please explicitly pick a version.
func (c *Clientset) Appmesh() appmeshv1beta1.AppmeshV1beta1Interface {
return c.appmeshV1beta1
}
// FlaggerV1alpha3 retrieves the FlaggerV1alpha3Client
@@ -50,6 +71,17 @@ func (c *Clientset) Flagger() flaggerv1alpha3.FlaggerV1alpha3Interface {
return c.flaggerV1alpha3
}
// NetworkingV1alpha3 retrieves the NetworkingV1alpha3Client
func (c *Clientset) NetworkingV1alpha3() networkingv1alpha3.NetworkingV1alpha3Interface {
return c.networkingV1alpha3
}
// Deprecated: Networking retrieves the default version of NetworkingClient.
// Please explicitly pick a version.
func (c *Clientset) Networking() networkingv1alpha3.NetworkingV1alpha3Interface {
return c.networkingV1alpha3
}
// Discovery retrieves the DiscoveryClient
func (c *Clientset) Discovery() discovery.DiscoveryInterface {
if c == nil {
@@ -66,10 +98,18 @@ func NewForConfig(c *rest.Config) (*Clientset, error) {
}
var cs Clientset
var err error
cs.appmeshV1beta1, err = appmeshv1beta1.NewForConfig(&configShallowCopy)
if err != nil {
return nil, err
}
cs.flaggerV1alpha3, err = flaggerv1alpha3.NewForConfig(&configShallowCopy)
if err != nil {
return nil, err
}
cs.networkingV1alpha3, err = networkingv1alpha3.NewForConfig(&configShallowCopy)
if err != nil {
return nil, err
}
cs.DiscoveryClient, err = discovery.NewDiscoveryClientForConfig(&configShallowCopy)
if err != nil {
@@ -82,7 +122,9 @@ func NewForConfig(c *rest.Config) (*Clientset, error) {
// panics if there is an error in the config.
func NewForConfigOrDie(c *rest.Config) *Clientset {
var cs Clientset
cs.appmeshV1beta1 = appmeshv1beta1.NewForConfigOrDie(c)
cs.flaggerV1alpha3 = flaggerv1alpha3.NewForConfigOrDie(c)
cs.networkingV1alpha3 = networkingv1alpha3.NewForConfigOrDie(c)
cs.DiscoveryClient = discovery.NewDiscoveryClientForConfigOrDie(c)
return &cs
@@ -91,7 +133,9 @@ func NewForConfigOrDie(c *rest.Config) *Clientset {
// New creates a new Clientset for the given RESTClient.
func New(c rest.Interface) *Clientset {
var cs Clientset
cs.appmeshV1beta1 = appmeshv1beta1.New(c)
cs.flaggerV1alpha3 = flaggerv1alpha3.New(c)
cs.networkingV1alpha3 = networkingv1alpha3.New(c)
cs.DiscoveryClient = discovery.NewDiscoveryClient(c)
return &cs

View File

@@ -19,9 +19,13 @@ limitations under the License.
package fake
import (
clientset "github.com/stefanprodan/flagger/pkg/client/clientset/versioned"
flaggerv1alpha3 "github.com/stefanprodan/flagger/pkg/client/clientset/versioned/typed/flagger/v1alpha3"
fakeflaggerv1alpha3 "github.com/stefanprodan/flagger/pkg/client/clientset/versioned/typed/flagger/v1alpha3/fake"
clientset "github.com/weaveworks/flagger/pkg/client/clientset/versioned"
appmeshv1beta1 "github.com/weaveworks/flagger/pkg/client/clientset/versioned/typed/appmesh/v1beta1"
fakeappmeshv1beta1 "github.com/weaveworks/flagger/pkg/client/clientset/versioned/typed/appmesh/v1beta1/fake"
flaggerv1alpha3 "github.com/weaveworks/flagger/pkg/client/clientset/versioned/typed/flagger/v1alpha3"
fakeflaggerv1alpha3 "github.com/weaveworks/flagger/pkg/client/clientset/versioned/typed/flagger/v1alpha3/fake"
networkingv1alpha3 "github.com/weaveworks/flagger/pkg/client/clientset/versioned/typed/istio/v1alpha3"
fakenetworkingv1alpha3 "github.com/weaveworks/flagger/pkg/client/clientset/versioned/typed/istio/v1alpha3/fake"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/client-go/discovery"
@@ -71,6 +75,16 @@ func (c *Clientset) Discovery() discovery.DiscoveryInterface {
var _ clientset.Interface = &Clientset{}
// AppmeshV1beta1 retrieves the AppmeshV1beta1Client
func (c *Clientset) AppmeshV1beta1() appmeshv1beta1.AppmeshV1beta1Interface {
return &fakeappmeshv1beta1.FakeAppmeshV1beta1{Fake: &c.Fake}
}
// Appmesh retrieves the AppmeshV1beta1Client
func (c *Clientset) Appmesh() appmeshv1beta1.AppmeshV1beta1Interface {
return &fakeappmeshv1beta1.FakeAppmeshV1beta1{Fake: &c.Fake}
}
// FlaggerV1alpha3 retrieves the FlaggerV1alpha3Client
func (c *Clientset) FlaggerV1alpha3() flaggerv1alpha3.FlaggerV1alpha3Interface {
return &fakeflaggerv1alpha3.FakeFlaggerV1alpha3{Fake: &c.Fake}
@@ -80,3 +94,13 @@ func (c *Clientset) FlaggerV1alpha3() flaggerv1alpha3.FlaggerV1alpha3Interface {
func (c *Clientset) Flagger() flaggerv1alpha3.FlaggerV1alpha3Interface {
return &fakeflaggerv1alpha3.FakeFlaggerV1alpha3{Fake: &c.Fake}
}
// NetworkingV1alpha3 retrieves the NetworkingV1alpha3Client
func (c *Clientset) NetworkingV1alpha3() networkingv1alpha3.NetworkingV1alpha3Interface {
return &fakenetworkingv1alpha3.FakeNetworkingV1alpha3{Fake: &c.Fake}
}
// Networking retrieves the NetworkingV1alpha3Client
func (c *Clientset) Networking() networkingv1alpha3.NetworkingV1alpha3Interface {
return &fakenetworkingv1alpha3.FakeNetworkingV1alpha3{Fake: &c.Fake}
}

View File

@@ -19,7 +19,9 @@ limitations under the License.
package fake
import (
flaggerv1alpha3 "github.com/stefanprodan/flagger/pkg/apis/flagger/v1alpha3"
appmeshv1beta1 "github.com/weaveworks/flagger/pkg/apis/appmesh/v1beta1"
flaggerv1alpha3 "github.com/weaveworks/flagger/pkg/apis/flagger/v1alpha3"
networkingv1alpha3 "github.com/weaveworks/flagger/pkg/apis/istio/v1alpha3"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
schema "k8s.io/apimachinery/pkg/runtime/schema"
@@ -50,5 +52,7 @@ func init() {
// After this, RawExtensions in Kubernetes types will serialize kube-aggregator types
// correctly.
func AddToScheme(scheme *runtime.Scheme) {
appmeshv1beta1.AddToScheme(scheme)
flaggerv1alpha3.AddToScheme(scheme)
networkingv1alpha3.AddToScheme(scheme)
}

View File

@@ -19,7 +19,9 @@ limitations under the License.
package scheme
import (
flaggerv1alpha3 "github.com/stefanprodan/flagger/pkg/apis/flagger/v1alpha3"
appmeshv1beta1 "github.com/weaveworks/flagger/pkg/apis/appmesh/v1beta1"
flaggerv1alpha3 "github.com/weaveworks/flagger/pkg/apis/flagger/v1alpha3"
networkingv1alpha3 "github.com/weaveworks/flagger/pkg/apis/istio/v1alpha3"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
schema "k8s.io/apimachinery/pkg/runtime/schema"
@@ -50,5 +52,7 @@ func init() {
// After this, RawExtensions in Kubernetes types will serialize kube-aggregator types
// correctly.
func AddToScheme(scheme *runtime.Scheme) {
appmeshv1beta1.AddToScheme(scheme)
flaggerv1alpha3.AddToScheme(scheme)
networkingv1alpha3.AddToScheme(scheme)
}

View File

@@ -1,5 +1,5 @@
/*
Copyright 2018 The Knative Authors
Copyright The Flagger Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -16,31 +16,41 @@ limitations under the License.
// Code generated by client-gen. DO NOT EDIT.
package v1alpha1
package v1beta1
import (
v1alpha1 "github.com/knative/pkg/apis/istio/authentication/v1alpha1"
"github.com/knative/pkg/client/clientset/versioned/scheme"
v1beta1 "github.com/weaveworks/flagger/pkg/apis/appmesh/v1beta1"
"github.com/weaveworks/flagger/pkg/client/clientset/versioned/scheme"
serializer "k8s.io/apimachinery/pkg/runtime/serializer"
rest "k8s.io/client-go/rest"
)
type AuthenticationV1alpha1Interface interface {
type AppmeshV1beta1Interface interface {
RESTClient() rest.Interface
PoliciesGetter
MeshesGetter
VirtualNodesGetter
VirtualServicesGetter
}
// AuthenticationV1alpha1Client is used to interact with features provided by the authentication.istio.io group.
type AuthenticationV1alpha1Client struct {
// AppmeshV1beta1Client is used to interact with features provided by the appmesh.k8s.aws group.
type AppmeshV1beta1Client struct {
restClient rest.Interface
}
func (c *AuthenticationV1alpha1Client) Policies(namespace string) PolicyInterface {
return newPolicies(c, namespace)
func (c *AppmeshV1beta1Client) Meshes() MeshInterface {
return newMeshes(c)
}
// NewForConfig creates a new AuthenticationV1alpha1Client for the given config.
func NewForConfig(c *rest.Config) (*AuthenticationV1alpha1Client, error) {
func (c *AppmeshV1beta1Client) VirtualNodes(namespace string) VirtualNodeInterface {
return newVirtualNodes(c, namespace)
}
func (c *AppmeshV1beta1Client) VirtualServices(namespace string) VirtualServiceInterface {
return newVirtualServices(c, namespace)
}
// NewForConfig creates a new AppmeshV1beta1Client for the given config.
func NewForConfig(c *rest.Config) (*AppmeshV1beta1Client, error) {
config := *c
if err := setConfigDefaults(&config); err != nil {
return nil, err
@@ -49,12 +59,12 @@ func NewForConfig(c *rest.Config) (*AuthenticationV1alpha1Client, error) {
if err != nil {
return nil, err
}
return &AuthenticationV1alpha1Client{client}, nil
return &AppmeshV1beta1Client{client}, nil
}
// NewForConfigOrDie creates a new AuthenticationV1alpha1Client for the given config and
// NewForConfigOrDie creates a new AppmeshV1beta1Client for the given config and
// panics if there is an error in the config.
func NewForConfigOrDie(c *rest.Config) *AuthenticationV1alpha1Client {
func NewForConfigOrDie(c *rest.Config) *AppmeshV1beta1Client {
client, err := NewForConfig(c)
if err != nil {
panic(err)
@@ -62,13 +72,13 @@ func NewForConfigOrDie(c *rest.Config) *AuthenticationV1alpha1Client {
return client
}
// New creates a new AuthenticationV1alpha1Client for the given RESTClient.
func New(c rest.Interface) *AuthenticationV1alpha1Client {
return &AuthenticationV1alpha1Client{c}
// New creates a new AppmeshV1beta1Client for the given RESTClient.
func New(c rest.Interface) *AppmeshV1beta1Client {
return &AppmeshV1beta1Client{c}
}
func setConfigDefaults(config *rest.Config) error {
gv := v1alpha1.SchemeGroupVersion
gv := v1beta1.SchemeGroupVersion
config.GroupVersion = &gv
config.APIPath = "/apis"
config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs}
@@ -82,7 +92,7 @@ func setConfigDefaults(config *rest.Config) error {
// RESTClient returns a RESTClient that is used to communicate
// with API server by this client implementation.
func (c *AuthenticationV1alpha1Client) RESTClient() rest.Interface {
func (c *AppmeshV1beta1Client) RESTClient() rest.Interface {
if c == nil {
return nil
}

View File

@@ -1,5 +1,5 @@
/*
Copyright 2018 The Knative Authors
Copyright The Flagger Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -17,4 +17,4 @@ limitations under the License.
// Code generated by client-gen. DO NOT EDIT.
// This package has the automatically generated typed clients.
package v1alpha1
package v1beta1

View File

@@ -1,5 +1,5 @@
/*
Copyright 2018 The Knative Authors
Copyright The Flagger Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.

View File

@@ -1,5 +1,5 @@
/*
Copyright 2018 The Knative Authors
Copyright The Flagger Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -19,22 +19,30 @@ limitations under the License.
package fake
import (
v1alpha1 "github.com/knative/pkg/client/clientset/versioned/typed/authentication/v1alpha1"
v1beta1 "github.com/weaveworks/flagger/pkg/client/clientset/versioned/typed/appmesh/v1beta1"
rest "k8s.io/client-go/rest"
testing "k8s.io/client-go/testing"
)
type FakeAuthenticationV1alpha1 struct {
type FakeAppmeshV1beta1 struct {
*testing.Fake
}
func (c *FakeAuthenticationV1alpha1) Policies(namespace string) v1alpha1.PolicyInterface {
return &FakePolicies{c, namespace}
func (c *FakeAppmeshV1beta1) Meshes() v1beta1.MeshInterface {
return &FakeMeshes{c}
}
func (c *FakeAppmeshV1beta1) VirtualNodes(namespace string) v1beta1.VirtualNodeInterface {
return &FakeVirtualNodes{c, namespace}
}
func (c *FakeAppmeshV1beta1) VirtualServices(namespace string) v1beta1.VirtualServiceInterface {
return &FakeVirtualServices{c, namespace}
}
// RESTClient returns a RESTClient that is used to communicate
// with API server by this client implementation.
func (c *FakeAuthenticationV1alpha1) RESTClient() rest.Interface {
func (c *FakeAppmeshV1beta1) RESTClient() rest.Interface {
var ret *rest.RESTClient
return ret
}

View File

@@ -0,0 +1,131 @@
/*
Copyright The Flagger Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by client-gen. DO NOT EDIT.
package fake
import (
v1beta1 "github.com/weaveworks/flagger/pkg/apis/appmesh/v1beta1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
labels "k8s.io/apimachinery/pkg/labels"
schema "k8s.io/apimachinery/pkg/runtime/schema"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
testing "k8s.io/client-go/testing"
)
// FakeMeshes implements MeshInterface
type FakeMeshes struct {
Fake *FakeAppmeshV1beta1
}
var meshesResource = schema.GroupVersionResource{Group: "appmesh.k8s.aws", Version: "v1beta1", Resource: "meshes"}
var meshesKind = schema.GroupVersionKind{Group: "appmesh.k8s.aws", Version: "v1beta1", Kind: "Mesh"}
// Get takes name of the mesh, and returns the corresponding mesh object, and an error if there is any.
func (c *FakeMeshes) Get(name string, options v1.GetOptions) (result *v1beta1.Mesh, err error) {
obj, err := c.Fake.
Invokes(testing.NewRootGetAction(meshesResource, name), &v1beta1.Mesh{})
if obj == nil {
return nil, err
}
return obj.(*v1beta1.Mesh), err
}
// List takes label and field selectors, and returns the list of Meshes that match those selectors.
func (c *FakeMeshes) List(opts v1.ListOptions) (result *v1beta1.MeshList, err error) {
obj, err := c.Fake.
Invokes(testing.NewRootListAction(meshesResource, meshesKind, opts), &v1beta1.MeshList{})
if obj == nil {
return nil, err
}
label, _, _ := testing.ExtractFromListOptions(opts)
if label == nil {
label = labels.Everything()
}
list := &v1beta1.MeshList{ListMeta: obj.(*v1beta1.MeshList).ListMeta}
for _, item := range obj.(*v1beta1.MeshList).Items {
if label.Matches(labels.Set(item.Labels)) {
list.Items = append(list.Items, item)
}
}
return list, err
}
// Watch returns a watch.Interface that watches the requested meshes.
func (c *FakeMeshes) Watch(opts v1.ListOptions) (watch.Interface, error) {
return c.Fake.
InvokesWatch(testing.NewRootWatchAction(meshesResource, opts))
}
// Create takes the representation of a mesh and creates it. Returns the server's representation of the mesh, and an error, if there is any.
func (c *FakeMeshes) Create(mesh *v1beta1.Mesh) (result *v1beta1.Mesh, err error) {
obj, err := c.Fake.
Invokes(testing.NewRootCreateAction(meshesResource, mesh), &v1beta1.Mesh{})
if obj == nil {
return nil, err
}
return obj.(*v1beta1.Mesh), err
}
// Update takes the representation of a mesh and updates it. Returns the server's representation of the mesh, and an error, if there is any.
func (c *FakeMeshes) Update(mesh *v1beta1.Mesh) (result *v1beta1.Mesh, err error) {
obj, err := c.Fake.
Invokes(testing.NewRootUpdateAction(meshesResource, mesh), &v1beta1.Mesh{})
if obj == nil {
return nil, err
}
return obj.(*v1beta1.Mesh), err
}
// UpdateStatus was generated because the type contains a Status member.
// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
func (c *FakeMeshes) UpdateStatus(mesh *v1beta1.Mesh) (*v1beta1.Mesh, error) {
obj, err := c.Fake.
Invokes(testing.NewRootUpdateSubresourceAction(meshesResource, "status", mesh), &v1beta1.Mesh{})
if obj == nil {
return nil, err
}
return obj.(*v1beta1.Mesh), err
}
// Delete takes name of the mesh and deletes it. Returns an error if one occurs.
func (c *FakeMeshes) Delete(name string, options *v1.DeleteOptions) error {
_, err := c.Fake.
Invokes(testing.NewRootDeleteAction(meshesResource, name), &v1beta1.Mesh{})
return err
}
// DeleteCollection deletes a collection of objects.
func (c *FakeMeshes) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
action := testing.NewRootDeleteCollectionAction(meshesResource, listOptions)
_, err := c.Fake.Invokes(action, &v1beta1.MeshList{})
return err
}
// Patch applies the patch and returns the patched mesh.
func (c *FakeMeshes) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.Mesh, err error) {
obj, err := c.Fake.
Invokes(testing.NewRootPatchSubresourceAction(meshesResource, name, data, subresources...), &v1beta1.Mesh{})
if obj == nil {
return nil, err
}
return obj.(*v1beta1.Mesh), err
}

Some files were not shown because too many files have changed in this diff Show More