Compare commits

...

127 Commits

Author SHA1 Message Date
Stefan Prodan
1cca5a455b Merge pull request #422 from weaveworks/prep-0.23.0
Release v0.23.0
2020-02-06 15:06:23 +02:00
stefanprodan
1b651500a1 Release v0.23.0 2020-02-06 14:49:04 +02:00
Stefan Prodan
e457b6d35c Merge pull request #420 from ta924/manualrollback
Add support for gated rollback
2020-02-06 13:48:32 +02:00
Tanner Altares
402dda71e6 manual push to trigger build 2020-02-05 19:17:45 -06:00
Tanner Altares
69e969ac51 modify the hook name 2020-02-05 14:49:35 -06:00
Tanner Altares
edbc373109 add docs for manual rollback 2020-02-05 14:14:13 -06:00
Tanner Altares
1d23c0f0a2 update CRD manifest to add rollback enum to webhook validation 2020-02-05 10:29:32 -06:00
Tanner Altares
fa950e1a48 support gated rollback 2020-01-30 15:11:59 -06:00
Stefan Prodan
e31ecbedf0 Merge pull request #416 from weaveworks/service-name
Implement service name override
2020-01-28 21:22:41 +02:00
stefanprodan
b982c9e2ae Fix service pod selector 2020-01-26 18:52:15 +02:00
stefanprodan
3766c843fe Add service name field to docs 2020-01-26 13:00:07 +02:00
stefanprodan
e00d9962d6 Use service name override in Kubernetes e2e tests 2020-01-26 12:59:51 +02:00
stefanprodan
940e547e88 Implement service name override
Use targetRef.name as the Kubernetes service name prefix only if service name is not specified
Warn about routing conflicts when service name changes
2020-01-26 12:48:49 +02:00
stefanprodan
e3ecebc9ae Add service name field to Canary CRD 2020-01-26 12:46:08 +02:00
stefanprodan
c38bd144e4 Update Kubernetes packages to v1.17.1 2020-01-25 12:51:44 +02:00
Stefan Prodan
2be6f3d678 Merge pull request #412 from weaveworks/prep-release-0.22.0
Release v0.22.0
2020-01-16 19:50:25 +02:00
stefanprodan
3d7091a56b Use Kubernetes v1.17.0 in e2e tests 2020-01-16 19:33:17 +02:00
stefanprodan
1f0305949e Update Prometheus to v2.15.2 2020-01-16 14:48:06 +02:00
stefanprodan
1332db85c5 Add selector-labels example to docs
Fix: #403
2020-01-16 14:38:50 +02:00
stefanprodan
1f06ec838d Release Flagger v0.22.0 2020-01-16 14:32:33 +02:00
Stefan Prodan
308351918c Merge pull request #411 from weaveworks/contour-up
Update Contour to v1.1 and add Linkerd header
2020-01-16 14:22:51 +02:00
stefanprodan
558a1fc6e6 Add Linkerd l5d-dst-override header to Contour routes 2020-01-16 11:26:02 +02:00
stefanprodan
bc3256e1c5 Update Contour to v1.1 2020-01-16 11:08:55 +02:00
Stefan Prodan
6eaf421f98 Merge pull request #409 from weaveworks/event-webhook
Implement event dispatching webhook
2020-01-16 11:02:32 +02:00
stefanprodan
1271f12d3f Add the event webhook type to docs 2020-01-15 14:29:51 +02:00
stefanprodan
4776b1d285 Implement events dispatching for the event webhook type 2020-01-15 14:12:22 +02:00
stefanprodan
e4dc923299 Add event webhook type to CRD 2020-01-15 14:10:38 +02:00
Stefan Prodan
98ba38d436 Merge pull request #408 from weaveworks/e2e-updates
e2e: Update Kubernetes Kind to v0.7.0
2020-01-15 13:27:14 +02:00
stefanprodan
9d765feb38 Remove deprecated Kind command from e2e 2020-01-14 13:12:54 +02:00
stefanprodan
7e6a70bdbf Update Kubernetes Kind to v0.7.0 2020-01-14 12:55:20 +02:00
Stefan Prodan
455ec1b6e7 Merge pull request #407 from weaveworks/istio-1.4
Update Istio e2e to v1.4.3
2020-01-14 12:48:12 +02:00
Stefan Prodan
3b152a370f Merge pull request #406 from weaveworks/kube-1.17
Update Kubernetes packages to 1.17
2020-01-13 16:03:40 +02:00
stefanprodan
8d7d5e6810 Update Istio e2e to v1.4.3 2020-01-11 20:59:00 +02:00
stefanprodan
8dc4c03258 Update Kubernetes packages to 1.17 2020-01-11 18:24:31 +02:00
Stefan Prodan
0082b3307b Merge pull request #401 from mrparkers/event-webhook
adds general purpose event webhook
2020-01-11 17:54:32 +02:00
Michael Parker
b1a9c33d36 add docs 2020-01-09 16:11:03 -06:00
Michael Parker
6e06cf1074 use unix timestamp ms 2020-01-09 16:10:56 -06:00
Michael Parker
8d61e6f893 rename 2020-01-09 14:26:53 -06:00
Michael Parker
9c71e70a0a webhook tests 2020-01-09 14:25:43 -06:00
Michael Parker
91395ea1ab deepcopy canary for failed notification 2020-01-09 11:05:22 -06:00
Michael Parker
0894304fce use canary copy for new revision notification 2020-01-09 10:45:13 -06:00
Michael Parker
9cfa0ac43f update event payload schema 2020-01-07 11:11:52 -06:00
Michael Parker
1d5029d607 Merge branch 'event-webhook' of github.com:mrparkers/flagger into event-webhook 2020-01-07 09:39:13 -06:00
Michael Parker
e6d1880c93 use correct event type 2020-01-07 09:38:14 -06:00
Michael Parker
6da533090a Update controller.go 2020-01-06 19:12:39 -06:00
Michael Parker
17efcaa6d1 update helm chart 2020-01-06 16:35:52 -06:00
Michael Parker
38dfda9d8f add event-webhook command line flag 2020-01-06 16:35:42 -06:00
stefanprodan
0abc254ef2 Add Contour TLS guide to docs 2020-01-06 16:29:04 +02:00
Stefan Prodan
db427b5e54 Merge pull request #400 from weaveworks/release-0.21.0
Release 0.21.0
2020-01-06 10:23:46 +00:00
stefanprodan
b49d63bdfe Update e2e tests to Linkerd 2.6.1 2020-01-06 12:02:53 +02:00
stefanprodan
c84f7addff Release 0.21.0 2020-01-06 11:43:48 +02:00
Stefan Prodan
5d72398925 Merge pull request #397 from weaveworks/contour
Add support for Contour ingress controller
2020-01-06 08:08:47 +00:00
stefanprodan
11d16468c9 Add Contour TLS guide link to docs 2019-12-29 13:36:55 +02:00
Stefan Prodan
82b61d69b7 Merge pull request #399 from int128/pod-monitor
Add PodMonitor template to flagger chart
2019-12-24 14:35:39 +02:00
Hidetake Iwata
824391321f Add PodMonitor template to flagger chart 2019-12-24 12:55:40 +09:00
stefanprodan
a7c242e437 Add user agent match examples to Contour docs 2019-12-20 18:26:18 +02:00
stefanprodan
1544610203 Add Contour e2e test for canary rollback 2019-12-20 14:38:06 +02:00
stefanprodan
14ca775ed9 Set Contour namespace in kustomization 2019-12-20 14:33:03 +02:00
stefanprodan
f1d29f5951 Set Contour idle timeout to 5m 2019-12-20 14:32:24 +02:00
stefanprodan
ad0a66ffcc Add Contour usage docs and diagrams 2019-12-20 11:47:44 +02:00
stefanprodan
4288fa261c Add Contour reference to docs 2019-12-20 11:47:00 +02:00
stefanprodan
a537637dc9 Add Flagger Kustomize installer for Contour 2019-12-20 11:46:23 +02:00
stefanprodan
851c6701b3 Add unit tests for Contour prefix, timeout and retries 2019-12-19 19:06:47 +02:00
stefanprodan
bb4591106a Add Contour URL prefix 2019-12-19 18:48:31 +02:00
stefanprodan
7641190ecb Add Contour timeout and retry policies 2019-12-19 18:27:35 +02:00
stefanprodan
02b579f128 Add unit tests for Contour routes 2019-12-19 15:30:53 +02:00
stefanprodan
9cf6b407f1 Add unit tests for Contour router reconciliation 2019-12-19 15:15:02 +02:00
stefanprodan
c3564176f8 Add unit tests for Contour observer 2019-12-19 12:41:39 +02:00
stefanprodan
ae9cf57fd5 Add e2e tests for Contour header routing 2019-12-19 12:22:57 +02:00
stefanprodan
ae63b01373 Implement Contour A/B testing 2019-12-19 12:02:20 +02:00
stefanprodan
c066a9163b Set HTTPProxy status on init 2019-12-19 09:58:32 +02:00
stefanprodan
38b04f2690 Add Contour canary e2e tests 2019-12-19 09:38:23 +02:00
stefanprodan
ee0e7b091a Implement Contour router for traffic shifting 2019-12-18 19:29:17 +02:00
stefanprodan
e922c3e9d9 Add Contour metrics 2019-12-18 19:29:17 +02:00
stefanprodan
2c31a4bf90 Add Contour CRD to Flagger RBAC 2019-12-18 19:29:17 +02:00
stefanprodan
7332e6b173 Add Contour HTTPProxy CRD and clientset 2019-12-18 19:29:17 +02:00
Stefan Prodan
968d67a7c3 Merge pull request #386 from mumoshu/envoy-canary-analysis
feat: Support for canary analysis on deployments and services behind Envoy
2019-12-18 19:22:18 +02:00
Yusuke Kuoka
266b957fc6 Fix CrossoverServiceObserver's ID 2019-12-18 22:11:21 +09:00
Yusuke Kuoka
357ef86c8b Differentiate AppMesh observer vs Crossover observer
To not break AppMesh integration.
2019-12-18 22:03:30 +09:00
Yusuke Kuoka
d75ade5e8c Fix envoy dashboard, scheduler, and envoy metrics provider to correctly pass canary analysis and show graphs 2019-12-18 10:55:49 +09:00
Yusuke Kuoka
806b95c8ce Do send http requests only to canary for canary analysis 2019-12-18 09:06:22 +09:00
Yusuke Kuoka
bf58cd763f Do use correct envoy metrics for canary analysis 2019-12-18 09:05:37 +09:00
Yusuke Kuoka
52856177e3 Fix trafficsplits api version for envoy+crossover 2019-12-18 09:03:41 +09:00
Yusuke Kuoka
58c3cebaac Fix the dashboard and the steps to browse it 2019-12-17 20:18:33 +09:00
Yusuke Kuoka
1e5d05c3fc Improve Envoy/Crossover installation experience with the chart registry 2019-12-17 17:02:50 +09:00
Yusuke Kuoka
020129bf5c Fix misconfiguration 2019-12-17 15:45:16 +09:00
Stefan Prodan
3ff0786e1f Merge pull request #394 from weaveworks/helm-tester-v3.0.1
Update Helm tester to Helm v3.0.1
2019-12-17 08:21:57 +02:00
stefanprodan
a60dc55dad Update Helm tester to Helm v3.0.1 2019-12-17 00:10:11 +02:00
Stefan Prodan
ff6acae544 Merge pull request #391 from weaveworks/appmesh-docs-fix
App Mesh docs fixes
2019-12-06 00:13:34 +07:00
stefanprodan
09b5295c85 Fix App Mesh gateway namespace 2019-12-05 23:39:13 +07:00
stefanprodan
9e423a6f71 Fix metrics-server install for EKS 2019-12-05 23:36:58 +07:00
Stefan Prodan
0ef05edf1e Merge pull request #390 from weaveworks/e2e-kube-1.16
Update e2e tests to Kubernetes v1.16
2019-12-05 18:06:39 +07:00
stefanprodan
a59901aaa9 Update e2e tests to Kubernetes 1.16 2019-12-04 15:35:36 +07:00
Stefan Prodan
53be3e07d2 Merge pull request #389 from weaveworks/release-0.20.4
Release 0.20.4
2019-12-03 14:56:40 +07:00
stefanprodan
2eb2ae52cd Release v0.20.4 2019-12-03 14:31:07 +07:00
stefanprodan
7bcc76eca0 Update Grafana to 6.5.1 2019-12-03 14:30:03 +07:00
Yusuke Kuoka
0d531e7bd1 Fix loadtester config in the envoy doc 2019-12-01 23:29:21 +09:00
Yusuke Kuoka
08851f83c7 Make envoy + crossover installation a bit more understandable 2019-12-01 23:25:29 +09:00
Stefan Prodan
295f5d7b39 Merge pull request #384 from weaveworks/svc-init
Add initialization phase to Kubernetes router
2019-12-01 10:08:18 +07:00
Yusuke Kuoka
a828524957 Add the guide for using Envoy and Crossover for Deployment targets
Ref #385
2019-11-30 13:03:01 +09:00
Yusuke Kuoka
6661406b75 Metrics provider for deployments and services behind Envoy
Assumes `envoy:smi` as the mesh provider name as I've successfully tested the progressive delivery for Envoy + Crossover with it.

This enhances Flagger to translate it to the metrics provider name of `envoy` for deployment targets, or `envoy:service` for service targets.

The `envoy` metrics provider is equivalent to `appmesh`, as both relies on the same set of standard metrics exposed by Envoy itself.

The `envoy:service` is almost the same as the `envoy` provider, but removing the condition on pod name, as we only need to filter on the backing service name = envoy_cluster_name. We don't consider other Envoy xDS implementations that uses anything that is different to original servicen ames as `envoy_cluster_name`, for now.

Ref #385
2019-11-30 13:03:01 +09:00
stefanprodan
8766523279 Add initialization phase to Kubernetes router
Create Kubernetes services before deployments because Envoy's readiness depends on existing ClusterIPs
2019-11-27 22:15:04 +02:00
Stefan Prodan
b02a6da614 Merge pull request #383 from weaveworks/e2e-ups
Update nginx-ingress to 1.26.0
2019-11-27 18:51:27 +02:00
stefanprodan
89d7cb1b04 Update nginx-ingress to 1.26.0 2019-11-27 17:48:37 +02:00
Stefan Prodan
59d18de753 Merge pull request #372 from mumoshu/svc-support
feat: Canary-release anything behind K8s service
2019-11-27 16:44:56 +02:00
Yusuke Kuoka
e1d8703a15 Refactor to merge KubernetesServiceRouter into ServiceController
The current design is that everything related to managing the targeted resource should go into the respective implementation of `canary.Controller`. In the service-canary use-case our target is Service so rather than splitting and scattering the logics over Controller and Router, everything should naturally go to `ServiceController`. Maybe at the time of writing the first implementation, I was confusing the target service vs the router.
2019-11-27 22:40:40 +09:00
Yusuke Kuoka
1ba595bc6f feat: Canary-release anything behind K8s service
Resolves #371

---

This adds the support for `corev1.Service` as the `targetRef.kind`, so that we can use Flagger just for canary analysis and traffic-shifting on existing and pre-created services. Flagger doesn't touch deployments and HPAs in this mode.

This is useful for keeping your full-control on the resources backing the service to be canary-released, including pods(behind a ClusterIP service) and external services(behind an ExternalName service).

Major use-case in my mind are:

- Canary-release a K8s cluster. You create two clusters and a master cluster. In the master cluster, you create two `ExternalName` services pointing to (the hostname of the loadbalancer of the targeted app instance in) each cluster. Flagger runs on the master cluster and helps safely rolling-out a new K8s cluster by doing a canary release on the `ExternalName` service.
- You want annotations and labels added to the service for integrating with things like external lbs(without extending Flagger to support customizing any aspect of the K8s service it manages

**Design**:

A canary release on a K8s service is almost the same as one on a K8s deployment. The only fundamental difference is that it operates only on a set of K8s services.

For example, one may start by creating two Helm releases for `podinfo-blue` and `podinfo-green`, and a K8s service `podinfo`. The `podinfo` service should initially have the same `Spec` as that of  `podinfo-blue`.

On a new release, you update `podinfo-green`, then trigger Flagger by updating the K8s service `podinfo` so that it points to pods or `externalName` as declared in `podinfo-green`. Flagger does the rest. The end result is the traffic to `podinfo` is gradually and safely shifted from `podinfo-blue` to `podinfo-green`.

**How it works**:

Under the hood, Flagger maintains two K8s services, `podinfo-primary` and `podinfo-canary`. Compared to canaries on K8s deployments, it doesn't create the service named `podinfo`, as it is already provided by YOU.

Once Flagger detects the change in the `podinfo` service, it updates the `podinfo-canary` service and the routes, then analyzes the canary. On successful analysis, it promotes the canary service to the `podinfo-primary` service. You expose the `podinfo` service via any L7 ingress solution or a service mesh so that the traffic is managed by Flagger for safe deployments.

**Giving it a try**:

To give it a try, create a `Canary` as usual, but its `targetRef` pointed to a K8s service:

```
apiVersion: flagger.app/v1alpha3
kind: Canary
metadata:
  name: podinfo
spec:
  provider: kubernetes
  targetRef:
    apiVersion: core/v1
    kind: Service
    name: podinfo
  service:
    port: 9898
  canaryAnalysis:
    # schedule interval (default 60s)
    interval: 10s
    # max number of failed checks before rollback
    threshold: 2
    # number of checks to run before rollback
    iterations: 2
    # Prometheus checks based on
    # http_request_duration_seconds histogram
    metrics: []
```

Create a K8s service named `podinfo`, and update it. Now watch for the services `podinfo`, `podinfo-primary`, `podinfo-canary`.

Flagger tracks `podinfo` service for changes. Upon any change, it reconciles `podinfo-primary` and `podinfo-canary` services. `podinfo-canary` always replicate the latest `podinfo`. In contract, `podinfo-primary` replicates the latest successful `podinfo-canary`.

**Notes**:

- For the canary cluster use-case, we would need to write a K8s operator to, e.g. for App Mesh, sync `ExternalName` services to AppMesh `VirtualNode`s. But that's another story!
2019-11-27 09:07:29 +09:00
Stefan Prodan
446a2b976c Merge pull request #380 from weaveworks/skip-primary-check
Skip primary check on skip analysis
2019-11-26 14:25:57 +02:00
stefanprodan
9af6ade54d Skip primary check on skip analysis 2019-11-25 23:48:22 +02:00
Stefan Prodan
3fbe62aa47 Merge pull request #378 from weaveworks/refac-deployer
Refactor canary package
2019-11-25 21:03:16 +02:00
stefanprodan
4454c9b5b5 Add canary factory for Kubernetes targets
- extract Kubernetes operations to controller interface
- implement controller interface for kind Deployment
2019-11-25 18:45:19 +02:00
Stefan Prodan
c2cf9bf4b1 Merge pull request #373 from sfxworks/deployment-fix
Upgrade deployment spec to apps v1
2019-11-23 16:55:14 +00:00
Samuel Walker
3afc7978bd upgrade deployment spec to apps v1 2019-11-18 11:10:15 -05:00
stefanprodan
7a0ba8b477 Update v0.20.3 changelog 2019-11-13 14:06:14 +02:00
Stefan Prodan
0eb21a98a5 Merge pull request #368 from weaveworks/wrk
Add wrk to load tester tools
2019-11-13 13:59:28 +02:00
stefanprodan
2876092912 Update flagger-appmesh-gateway to 1.1.0 2019-11-13 13:07:59 +02:00
stefanprodan
3dbfa34a53 Add wrk to load tester tools
- add wrk v4.0.2
- update Helm v2 to 2.16.1
- update Helm v3 to 3.0.0-rc.3
2019-11-13 12:54:47 +02:00
Stefan Prodan
656f81787c Merge pull request #367 from andrew-demb/patch-1
Fixed readiness/liveness probe example in docs
2019-11-13 12:10:19 +02:00
Andrii Dembitskyi
920d558fde Fixed readiness/liveness probe example in docs 2019-11-13 09:24:12 +02:00
stefanprodan
638a9f1c93 Fix App Mesh gateway deployment 2019-11-12 13:18:45 +02:00
stefanprodan
f1c3ee7a82 Release v0.20.3 2019-11-11 19:14:05 +02:00
Stefan Prodan
878f106573 Merge pull request #365 from weaveworks/appmesh-gateway-chart
Add App Mesh gateway chart
2019-11-08 21:40:21 +02:00
stefanprodan
945eded6bf Add the App Mesh Gateway to docs 2019-11-08 21:02:51 +02:00
stefanprodan
f94f9c23d6 Patch cluster role bindings in kustomization 2019-11-08 12:40:14 +02:00
stefanprodan
527b73e8ef Use App Mesh Prometheus in kustomization 2019-11-08 12:39:45 +02:00
stefanprodan
d4555c5919 Use weaveworks logo in Helm charts 2019-11-08 12:38:47 +02:00
stefanprodan
560bb93e3d Add App Mesh gateway Helm chart 2019-11-08 12:38:06 +02:00
157 changed files with 8479 additions and 1103 deletions

View File

@@ -88,10 +88,21 @@ jobs:
- attach_workspace:
at: /tmp/bin
- run: test/container-build.sh
- run: test/e2e-kind.sh
- run: test/e2e-kind.sh v1.17.0
- run: test/e2e-kubernetes.sh
- run: test/e2e-kubernetes-tests.sh
e2e-kubernetes-svc-testing:
machine: true
steps:
- checkout
- attach_workspace:
at: /tmp/bin
- run: test/container-build.sh
- run: test/e2e-kind.sh
- run: test/e2e-kubernetes.sh
- run: test/e2e-kubernetes-svc-tests.sh
e2e-smi-istio-testing:
machine: true
steps:
@@ -139,6 +150,17 @@ jobs:
- run: test/e2e-linkerd.sh
- run: test/e2e-linkerd-tests.sh
e2e-contour-testing:
machine: true
steps:
- checkout
- attach_workspace:
at: /tmp/bin
- run: test/container-build.sh
- run: test/e2e-kind.sh
- run: test/e2e-contour.sh
- run: test/e2e-contour-tests.sh
push-helm-charts:
docker:
- image: circleci/golang:1.13
@@ -204,6 +226,9 @@ workflows:
- e2e-linkerd-testing:
requires:
- build-binary
- e2e-contour-testing:
requires:
- build-binary
- push-container:
requires:
- build-binary

View File

@@ -2,6 +2,67 @@
All notable changes to this project are documented in this file.
## 0.23.0 (2020-02-06)
Adds support for service name configuration and rollback webhook
#### Features
- Implement service name override [#416](https://github.com/weaveworks/flagger/pull/416)
- Add support for gated rollback [#420](https://github.com/weaveworks/flagger/pull/420)
## 0.22.0 (2020-01-16)
Adds event dispatching through webhooks
#### Features
- Implement event dispatching webhook [#409](https://github.com/weaveworks/flagger/pull/409)
- Add general purpose event webhook [#401](https://github.com/weaveworks/flagger/pull/401)
#### Improvements
- Update Contour to v1.1 and add Linkerd header [#411](https://github.com/weaveworks/flagger/pull/411)
- Update Istio e2e to v1.4.3 [#407](https://github.com/weaveworks/flagger/pull/407)
- Update Kubernetes packages to 1.17 [#406](https://github.com/weaveworks/flagger/pull/406)
## 0.21.0 (2020-01-06)
Adds support for Contour ingress controller
#### Features
- Add support for Contour ingress controller [#397](https://github.com/weaveworks/flagger/pull/397)
- Add support for Envoy managed by Crossover via SMI [#386](https://github.com/weaveworks/flagger/pull/386)
- Extend canary target ref to Kubernetes Service kind [#372](https://github.com/weaveworks/flagger/pull/372)
#### Improvements
- Add Prometheus operator PodMonitor template to Helm chart [#399](https://github.com/weaveworks/flagger/pull/399)
- Update e2e tests to Kubernetes v1.16 [#390](https://github.com/weaveworks/flagger/pull/390)
## 0.20.4 (2019-12-03)
Adds support for taking over a running deployment without disruption
#### Improvements
- Add initialization phase to Kubernetes router [#384](https://github.com/weaveworks/flagger/pull/384)
- Add canary controller interface and Kubernetes deployment kind implementation [#378](https://github.com/weaveworks/flagger/pull/378)
#### Fixes
- Skip primary check on skip analysis [#380](https://github.com/weaveworks/flagger/pull/380)
## 0.20.3 (2019-11-13)
Adds wrk to load tester tools and the App Mesh gateway chart to Flagger Helm repository
#### Improvements
- Add wrk to load tester tools [#368](https://github.com/weaveworks/flagger/pull/368)
- Add App Mesh gateway chart [#365](https://github.com/weaveworks/flagger/pull/365)
## 0.20.2 (2019-11-07)
Adds support for exposing canaries outside the cluster using App Mesh Gateway annotations

View File

@@ -1,44 +1,64 @@
FROM bats/bats:v1.1.0
FROM alpine:3.10.3 as build
RUN addgroup -S app \
&& adduser -S -g app app \
&& apk --no-cache add ca-certificates curl jq
WORKDIR /home/app
RUN apk --no-cache add alpine-sdk perl curl
RUN curl -sSLo hey "https://storage.googleapis.com/hey-release/hey_linux_amd64" && \
chmod +x hey && mv hey /usr/local/bin/hey
# verify hey works
RUN hey -n 1 -c 1 https://flagger.app > /dev/null && echo $? | grep 0
RUN curl -sSL "https://get.helm.sh/helm-v2.15.1-linux-amd64.tar.gz" | tar xvz && \
RUN HELM2_VERSION=2.16.1 && \
curl -sSL "https://get.helm.sh/helm-v${HELM2_VERSION}-linux-amd64.tar.gz" | tar xvz && \
chmod +x linux-amd64/helm && mv linux-amd64/helm /usr/local/bin/helm && \
chmod +x linux-amd64/tiller && mv linux-amd64/tiller /usr/local/bin/tiller && \
rm -rf linux-amd64
chmod +x linux-amd64/tiller && mv linux-amd64/tiller /usr/local/bin/tiller
RUN curl -sSL "https://get.helm.sh/helm-v3.0.0-rc.2-linux-amd64.tar.gz" | tar xvz && \
chmod +x linux-amd64/helm && mv linux-amd64/helm /usr/local/bin/helmv3 && \
rm -rf linux-amd64
RUN HELM3_VERSION=3.0.1 && \
curl -sSL "https://get.helm.sh/helm-v${HELM3_VERSION}-linux-amd64.tar.gz" | tar xvz && \
chmod +x linux-amd64/helm && mv linux-amd64/helm /usr/local/bin/helmv3
RUN GRPC_HEALTH_PROBE_VERSION=v0.3.1 && \
wget -qO /usr/local/bin/grpc_health_probe https://github.com/grpc-ecosystem/grpc-health-probe/releases/download/${GRPC_HEALTH_PROBE_VERSION}/grpc_health_probe-linux-amd64 && \
chmod +x /usr/local/bin/grpc_health_probe
RUN curl -sSL "https://github.com/bojand/ghz/releases/download/v0.39.0/ghz_0.39.0_Linux_x86_64.tar.gz" | tar xz -C /tmp && \
mv /tmp/ghz /usr/local/bin && chmod +x /usr/local/bin/ghz && rm -rf /tmp/ghz-web
RUN GHZ_VERSION=0.39.0 && \
curl -sSL "https://github.com/bojand/ghz/releases/download/v${GHZ_VERSION}/ghz_${GHZ_VERSION}_Linux_x86_64.tar.gz" | tar xz -C /tmp && \
mv /tmp/ghz /usr/local/bin && chmod +x /usr/local/bin/ghz
RUN HELM_TILLER_VERSION=0.9.3 && \
curl -sSL "https://github.com/rimusz/helm-tiller/archive/v${HELM_TILLER_VERSION}.tar.gz" | tar xz -C /tmp && \
mv /tmp/helm-tiller-${HELM_TILLER_VERSION} /tmp/helm-tiller
RUN WRK_VERSION=4.0.2 && \
cd /tmp && git clone -b ${WRK_VERSION} https://github.com/wg/wrk
RUN cd /tmp/wrk && make
FROM bats/bats:v1.1.0
RUN addgroup -S app && \
adduser -S -g app app && \
apk --no-cache add ca-certificates curl jq libgcc
WORKDIR /home/app
COPY --from=build /usr/local/bin/hey /usr/local/bin/
COPY --from=build /tmp/wrk/wrk /usr/local/bin/
COPY --from=build /usr/local/bin/helm /usr/local/bin/
COPY --from=build /usr/local/bin/tiller /usr/local/bin/
COPY --from=build /usr/local/bin/ghz /usr/local/bin/
COPY --from=build /usr/local/bin/helmv3 /usr/local/bin/
COPY --from=build /usr/local/bin/grpc_health_probe /usr/local/bin/
COPY --from=build /tmp/helm-tiller /tmp/helm-tiller
ADD https://raw.githubusercontent.com/grpc/grpc-proto/master/grpc/health/v1/health.proto /tmp/ghz/health.proto
RUN ls /tmp
COPY ./bin/loadtester .
RUN chown -R app:app ./
USER app
RUN curl -sSL "https://github.com/rimusz/helm-tiller/archive/v0.9.3.tar.gz" | tar xvz && \
helm init --client-only && helm plugin install helm-tiller-0.9.3 && helm plugin list
# test load generator tools
RUN hey -n 1 -c 1 https://flagger.app > /dev/null && echo $? | grep 0
RUN wrk -d 1s -c 1 -t 1 https://flagger.app > /dev/null && echo $? | grep 0
# install Helm v2 plugins
RUN helm init --client-only && helm plugin install /tmp/helm-tiller
ENTRYPOINT ["./loadtester"]

View File

@@ -7,7 +7,7 @@
[![release](https://img.shields.io/github/release/weaveworks/flagger/all.svg)](https://github.com/weaveworks/flagger/releases)
Flagger is a Kubernetes operator that automates the promotion of canary deployments
using Istio, Linkerd, App Mesh, NGINX or Gloo routing for traffic shifting and Prometheus metrics for canary analysis.
using Istio, Linkerd, App Mesh, NGINX, Contour or Gloo routing for traffic shifting and Prometheus metrics for canary analysis.
The canary analysis can be extended with webhooks for running acceptance tests,
load tests or any other custom validation.
@@ -43,6 +43,8 @@ Flagger documentation can be found at [docs.flagger.app](https://docs.flagger.ap
* [App Mesh canary deployments](https://docs.flagger.app/usage/appmesh-progressive-delivery)
* [NGINX ingress controller canary deployments](https://docs.flagger.app/usage/nginx-progressive-delivery)
* [Gloo ingress controller canary deployments](https://docs.flagger.app/usage/gloo-progressive-delivery)
* [Contour Canary Deployments](https://docs.flagger.app/usage/contour-progressive-delivery)
* [Crossover canary deployments](https://docs.flagger.app/usage/crossover-progressive-delivery)
* [Blue/Green deployments](https://docs.flagger.app/usage/blue-green)
* [Monitoring](https://docs.flagger.app/usage/monitoring)
* [Alerting](https://docs.flagger.app/usage/alerting)
@@ -68,7 +70,7 @@ metadata:
namespace: test
spec:
# service mesh provider (optional)
# can be: kubernetes, istio, linkerd, appmesh, nginx, gloo, supergloo
# can be: kubernetes, istio, linkerd, appmesh, nginx, contour, gloo, supergloo
provider: istio
# deployment reference
targetRef:
@@ -84,6 +86,8 @@ spec:
kind: HorizontalPodAutoscaler
name: podinfo
service:
# service name (optional)
name: podinfo
# ClusterIP port number
port: 9898
# container port name or number (optional)
@@ -149,21 +153,21 @@ For more details on how the canary analysis and promotion works please [read the
## Features
| Feature | Istio | Linkerd | App Mesh | NGINX | Gloo | Kubernetes CNI |
| -------------------------------------------- | ------------------ | ------------------ |------------------ |------------------ |------------------ |------------------ |
| Canary deployments (weighted traffic) | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_minus_sign: |
| A/B testing (headers and cookies routing) | :heavy_check_mark: | :heavy_minus_sign: | :heavy_check_mark: | :heavy_check_mark: | :heavy_minus_sign: | :heavy_minus_sign: |
| Blue/Green deployments (traffic switch) | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: |
| Webhooks (acceptance/load testing) | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: |
| Manual gating (approve/pause/resume) | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: |
| Request success rate check (L7 metric) | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_minus_sign: |
| Request duration check (L7 metric) | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_minus_sign: |
| Custom promql checks | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: |
| Traffic policy, CORS, retries and timeouts | :heavy_check_mark: | :heavy_minus_sign: | :heavy_minus_sign: | :heavy_minus_sign: | :heavy_minus_sign: | :heavy_minus_sign: |
| Feature | Istio | Linkerd | App Mesh | NGINX | Gloo | Contour | CNI |
| -------------------------------------------- | ------------------ | ------------------ |------------------ |------------------ |------------------ |------------------ |------------------ |
| Canary deployments (weighted traffic) | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_minus_sign: |
| A/B testing (headers and cookies routing) | :heavy_check_mark: | :heavy_minus_sign: | :heavy_check_mark: | :heavy_check_mark: | :heavy_minus_sign: | :heavy_check_mark: | :heavy_minus_sign: |
| Blue/Green deployments (traffic switch) | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: |
| Webhooks (acceptance/load testing) | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: |
| Manual gating (approve/pause/resume) | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: |
| Request success rate check (L7 metric) | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_minus_sign: |
| Request duration check (L7 metric) | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_minus_sign: |
| Custom promql checks | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: |
| Traffic policy, CORS, retries and timeouts | :heavy_check_mark: | :heavy_minus_sign: | :heavy_minus_sign: | :heavy_minus_sign: | :heavy_minus_sign: | :heavy_check_mark: | :heavy_minus_sign: |
## Roadmap
* Integrate with other ingress controllers like Contour, HAProxy, ALB
* Integrate with other service mesh like Consul Connect and ingress controllers like HAProxy, ALB
* Add support for comparing the canary metrics to the primary ones and do the validation based on the derivation between the two
## Contributing
@@ -174,9 +178,9 @@ When submitting bug reports please include as much details as possible:
* which Flagger version
* which Flagger CRD version
* which Kubernetes/Istio version
* what configuration (canary, virtual service and workloads definitions)
* what happened (Flagger, Istio Pilot and Proxy logs)
* which Kubernetes version
* what configuration (canary, ingress and workloads definitions)
* what happened (Flagger and Proxy logs)
## Getting Help
@@ -189,4 +193,4 @@ If you have any questions about Flagger and progressive delivery:
hands-on training and meetups in your area.
* File an [issue](https://github.com/weaveworks/flagger/issues/new).
Your feedback is always welcome!
Your feedback is always welcome!

View File

@@ -81,6 +81,11 @@ rules:
- virtualservices
- gateways
verbs: ["*"]
- apiGroups:
- projectcontour.io
resources:
- httpproxies
verbs: ["*"]
- nonResourceURLs:
- /version
verbs:

View File

@@ -115,6 +115,9 @@ spec:
type: object
required: ["port"]
properties:
name:
description: Kubernetes service name
type: string
port:
description: Container port number
type: number
@@ -245,6 +248,8 @@ spec:
- rollout
- confirm-promotion
- post-rollout
- event
- rollback
url:
description: URL address of this webhook
type: string

View File

@@ -22,7 +22,7 @@ spec:
serviceAccountName: flagger
containers:
- name: flagger
image: weaveworks/flagger:0.20.2
image: weaveworks/flagger:0.23.0
imagePullPolicy: IfNotPresent
ports:
- name: http

View File

@@ -17,7 +17,7 @@ spec:
spec:
containers:
- name: loadtester
image: weaveworks/flagger-loadtester:0.11.0
image: weaveworks/flagger-loadtester:0.12.1
imagePullPolicy: IfNotPresent
ports:
- name: http

View File

@@ -0,0 +1,21 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*~
# Various IDEs
.project
.idea/
*.tmproj

View File

@@ -0,0 +1,19 @@
apiVersion: v1
name: appmesh-gateway
description: Flagger Gateway for AWS App Mesh is an edge L7 load balancer that exposes applications outside the mesh.
version: 1.1.1
appVersion: 1.1.0
home: https://flagger.app
icon: https://raw.githubusercontent.com/weaveworks/flagger/master/docs/logo/weaveworks.png
sources:
- https://github.com/stefanprodan/appmesh-gateway
maintainers:
- name: Stefan Prodan
url: https://github.com/stefanprodan
email: stefanprodan@users.noreply.github.com
keywords:
- flagger
- appmesh
- envoy
- gateway
- ingress

View File

@@ -0,0 +1,87 @@
# Flagger Gateway for App Mesh
[Flagger Gateway for App Mesh](https://github.com/stefanprodan/appmesh-gateway) is an
Envoy-powered load balancer that exposes applications outside the mesh.
The gateway facilitates canary deployments and A/B testing for user-facing web applications running on AWS App Mesh.
## Prerequisites
* Kubernetes >= 1.13
* [App Mesh controller](https://github.com/aws/eks-charts/tree/master/stable/appmesh-controller) >= 0.2.0
* [App Mesh inject](https://github.com/aws/eks-charts/tree/master/stable/appmesh-inject) >= 0.2.0
## Installing the Chart
Add Flagger Helm repository:
```console
$ helm repo add flagger https://flagger.app
```
Create a namespace with App Mesh sidecar injection enabled:
```sh
kubectl create ns flagger-system
kubectl label namespace test appmesh.k8s.aws/sidecarInjectorWebhook=enabled
```
Install App Mesh Gateway for an existing mesh:
```sh
helm upgrade -i appmesh-gateway flagger/appmesh-gateway \
--namespace flagger-system \
--set mesh.name=global
```
Optionally you can create a mesh at install time:
```sh
helm upgrade -i appmesh-gateway flagger/appmesh-gateway \
--namespace flagger-system \
--set mesh.name=global \
--set mesh.create=true
```
The [configuration](#configuration) section lists the parameters that can be configured during installation.
## Uninstalling the Chart
To uninstall/delete the `appmesh-gateway` deployment:
```console
helm delete --purge appmesh-gateway
```
The command removes all the Kubernetes components associated with the chart and deletes the release.
## Configuration
The following tables lists the configurable parameters of the chart and their default values.
Parameter | Description | Default
--- | --- | ---
`service.type` | When set to LoadBalancer it creates an AWS NLB | `LoadBalancer`
`proxy.access_log_path` | to enable the access logs, set it to `/dev/stdout` | `/dev/null`
`proxy.image.repository` | image repository | `envoyproxy/envoy`
`proxy.image.tag` | image tag | `<VERSION>`
`proxy.image.pullPolicy` | image pull policy | `IfNotPresent`
`controller.image.repository` | image repository | `weaveworks/flagger-appmesh-gateway`
`controller.image.tag` | image tag | `<VERSION>`
`controller.image.pullPolicy` | image pull policy | `IfNotPresent`
`resources.requests/cpu` | pod CPU request | `100m`
`resources.requests/memory` | pod memory request | `128Mi`
`resources.limits/memory` | pod memory limit | `2Gi`
`nodeSelector` | node labels for pod assignment | `{}`
`tolerations` | list of node taints to tolerate | `[]`
`rbac.create` | if `true`, create and use RBAC resources | `true`
`rbac.pspEnabled` | If `true`, create and use a restricted pod security policy | `false`
`serviceAccount.create` | If `true`, create a new service account | `true`
`serviceAccount.name` | Service account to be used | None
`mesh.create` | If `true`, create mesh custom resource | `false`
`mesh.name` | The name of the mesh to use | `global`
`mesh.discovery` | The service discovery type to use, can be dns or cloudmap | `dns`
`hpa.enabled` | `true` if HPA resource should be created, metrics-server is required | `true`
`hpa.maxReplicas` | number of max replicas | `3`
`hpa.cpu` | average total CPU usage per pod (1-100) | `99`
`hpa.memory` | average memory usage per pod (100Mi-1Gi) | None
`discovery.optIn` | `true` if only services with the 'expose' annotation are discoverable | `true`

View File

@@ -0,0 +1 @@
App Mesh Gateway installed!

View File

@@ -0,0 +1,56 @@
{{/* vim: set filetype=mustache: */}}
{{/*
Expand the name of the chart.
*/}}
{{- define "appmesh-gateway.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "appmesh-gateway.fullname" -}}
{{- if .Values.fullnameOverride -}}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- $name := default .Chart.Name .Values.nameOverride -}}
{{- if contains $name .Release.Name -}}
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "appmesh-gateway.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Common labels
*/}}
{{- define "appmesh-gateway.labels" -}}
app.kubernetes.io/name: {{ include "appmesh-gateway.name" . }}
helm.sh/chart: {{ include "appmesh-gateway.chart" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- if .Chart.AppVersion }}
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
{{- end }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- end -}}
{{/*
Create the name of the service account to use
*/}}
{{- define "appmesh-gateway.serviceAccountName" -}}
{{- if .Values.serviceAccount.create -}}
{{ default (include "appmesh-gateway.fullname" .) .Values.serviceAccount.name }}
{{- else -}}
{{ default "default" .Values.serviceAccount.name }}
{{- end -}}
{{- end -}}

View File

@@ -0,0 +1,8 @@
{{- if .Values.serviceAccount.create }}
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ template "appmesh-gateway.serviceAccountName" . }}
labels:
{{ include "appmesh-gateway.labels" . | indent 4 }}
{{- end }}

View File

@@ -0,0 +1,41 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ template "appmesh-gateway.fullname" . }}
labels:
{{ include "appmesh-gateway.labels" . | indent 4 }}
data:
envoy.yaml: |-
admin:
access_log_path: {{ .Values.proxy.access_log_path }}
address:
socket_address:
address: 0.0.0.0
port_value: 8081
dynamic_resources:
ads_config:
api_type: GRPC
grpc_services:
- envoy_grpc:
cluster_name: xds
cds_config:
ads: {}
lds_config:
ads: {}
static_resources:
clusters:
- name: xds
connect_timeout: 0.50s
type: static
http2_protocol_options: {}
load_assignment:
cluster_name: xds
endpoints:
- lb_endpoints:
- endpoint:
address:
socket_address:
address: 127.0.0.1
port_value: 18000

View File

@@ -0,0 +1,144 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ template "appmesh-gateway.fullname" . }}
labels:
{{ include "appmesh-gateway.labels" . | indent 4 }}
spec:
replicas: {{ .Values.replicaCount }}
strategy:
type: Recreate
selector:
matchLabels:
app.kubernetes.io/name: {{ include "appmesh-gateway.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
template:
metadata:
labels:
app.kubernetes.io/name: {{ include "appmesh-gateway.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/part-of: appmesh
annotations:
prometheus.io/scrape: "true"
prometheus.io/path: "/stats/prometheus"
prometheus.io/port: "8081"
# exclude inbound traffic on port 8080
appmesh.k8s.aws/ports: "444"
# exclude egress traffic to xDS server and Kubernetes API
appmesh.k8s.aws/egressIgnoredPorts: "18000,22,443"
checksum/config: {{ include (print $.Template.BasePath "/config.yaml") . | sha256sum | quote }}
spec:
serviceAccountName: {{ include "appmesh-gateway.serviceAccountName" . }}
terminationGracePeriodSeconds: 45
affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- podAffinityTerm:
labelSelector:
matchLabels:
app.kubernetes.io/name: {{ include "appmesh-gateway.name" . }}
topologyKey: kubernetes.io/hostname
weight: 100
volumes:
- name: appmesh-gateway-config
configMap:
name: {{ template "appmesh-gateway.fullname" . }}
containers:
- name: controller
image: "{{ .Values.controller.image.repository }}:{{ .Values.controller.image.tag }}"
imagePullPolicy: {{ .Values.controller.image.pullPolicy }}
securityContext:
readOnlyRootFilesystem: true
runAsUser: 10001
capabilities:
drop:
- ALL
add:
- NET_BIND_SERVICE
command:
- ./flagger-appmesh-gateway
- --opt-in={{ .Values.discovery.optIn }}
- --gateway-mesh={{ .Values.mesh.name }}
- --gateway-name=$(POD_SERVICE_ACCOUNT)
- --gateway-namespace=$(POD_NAMESPACE)
env:
- name: POD_SERVICE_ACCOUNT
valueFrom:
fieldRef:
fieldPath: spec.serviceAccountName
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
ports:
- name: grpc
containerPort: 18000
protocol: TCP
livenessProbe:
initialDelaySeconds: 5
tcpSocket:
port: grpc
readinessProbe:
initialDelaySeconds: 5
tcpSocket:
port: grpc
resources:
limits:
memory: 1Gi
requests:
cpu: 10m
memory: 32Mi
- name: proxy
image: "{{ .Values.proxy.image.repository }}:{{ .Values.proxy.image.tag }}"
imagePullPolicy: {{ .Values.proxy.image.pullPolicy }}
securityContext:
capabilities:
drop:
- ALL
add:
- NET_BIND_SERVICE
args:
- -c
- /config/envoy.yaml
- --service-cluster $(POD_NAMESPACE)
- --service-node $(POD_NAME)
- --log-level info
- --base-id 1234
ports:
- name: admin
containerPort: 8081
protocol: TCP
- name: http
containerPort: 8080
protocol: TCP
livenessProbe:
initialDelaySeconds: 5
tcpSocket:
port: admin
readinessProbe:
initialDelaySeconds: 5
httpGet:
path: /ready
port: admin
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
volumeMounts:
- name: appmesh-gateway-config
mountPath: /config
resources:
{{ toYaml .Values.resources | indent 12 }}
{{- with .Values.nodeSelector }}
nodeSelector:
{{ toYaml . | indent 8 }}
{{- end }}
{{- with .Values.tolerations }}
tolerations:
{{ toYaml . | indent 8 }}
{{- end }}

View File

@@ -0,0 +1,28 @@
{{- if .Values.hpa.enabled }}
apiVersion: autoscaling/v2beta1
kind: HorizontalPodAutoscaler
metadata:
name: {{ template "appmesh-gateway.fullname" . }}
labels:
{{ include "appmesh-gateway.labels" . | indent 4 }}
spec:
scaleTargetRef:
apiVersion: apps/v1
kind: Deployment
name: {{ template "appmesh-gateway.fullname" . }}
minReplicas: {{ .Values.replicaCount }}
maxReplicas: {{ .Values.hpa.maxReplicas }}
metrics:
{{- if .Values.hpa.cpu }}
- type: Resource
resource:
name: cpu
targetAverageUtilization: {{ .Values.hpa.cpu }}
{{- end }}
{{- if .Values.hpa.memory }}
- type: Resource
resource:
name: memory
targetAverageValue: {{ .Values.hpa.memory }}
{{- end }}
{{- end }}

View File

@@ -0,0 +1,12 @@
{{- if .Values.mesh.create }}
apiVersion: appmesh.k8s.aws/v1beta1
kind: Mesh
metadata:
name: {{ .Values.mesh.name }}
annotations:
helm.sh/resource-policy: keep
labels:
{{ include "appmesh-gateway.labels" . | indent 4 }}
spec:
serviceDiscoveryType: {{ .Values.mesh.discovery }}
{{- end }}

View File

@@ -0,0 +1,57 @@
{{- if .Values.rbac.pspEnabled }}
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
name: {{ template "appmesh-gateway.fullname" . }}
labels:
{{ include "appmesh-gateway.labels" . | indent 4 }}
annotations:
seccomp.security.alpha.kubernetes.io/allowedProfileNames: '*'
spec:
privileged: false
hostIPC: false
hostNetwork: false
hostPID: false
readOnlyRootFilesystem: false
allowPrivilegeEscalation: false
allowedCapabilities:
- '*'
fsGroup:
rule: RunAsAny
runAsUser:
rule: RunAsAny
seLinux:
rule: RunAsAny
supplementalGroups:
rule: RunAsAny
volumes:
- '*'
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: {{ template "appmesh-gateway.fullname" . }}-psp
labels:
{{ include "appmesh-gateway.labels" . | indent 4 }}
rules:
- apiGroups: ['policy']
resources: ['podsecuritypolicies']
verbs: ['use']
resourceNames:
- {{ template "appmesh-gateway.fullname" . }}
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: {{ template "appmesh-gateway.fullname" . }}-psp
labels:
{{ include "appmesh-gateway.labels" . | indent 4 }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: {{ template "appmesh-gateway.fullname" . }}-psp
subjects:
- kind: ServiceAccount
name: {{ template "appmesh-gateway.serviceAccountName" . }}
namespace: {{ .Release.Namespace }}
{{- end }}

View File

@@ -0,0 +1,39 @@
{{- if .Values.rbac.create }}
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
name: {{ template "appmesh-gateway.fullname" . }}
labels:
{{ include "appmesh-gateway.labels" . | indent 4 }}
rules:
- apiGroups:
- ""
resources:
- services
verbs: ["*"]
- apiGroups:
- appmesh.k8s.aws
resources:
- meshes
- meshes/status
- virtualnodes
- virtualnodes/status
- virtualservices
- virtualservices/status
verbs: ["*"]
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: {{ template "appmesh-gateway.fullname" . }}
labels:
{{ include "appmesh-gateway.labels" . | indent 4 }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: {{ template "appmesh-gateway.fullname" . }}
subjects:
- name: {{ template "appmesh-gateway.serviceAccountName" . }}
namespace: {{ .Release.Namespace }}
kind: ServiceAccount
{{- end }}

View File

@@ -0,0 +1,24 @@
apiVersion: v1
kind: Service
metadata:
name: {{ template "appmesh-gateway.fullname" . }}
annotations:
gateway.appmesh.k8s.aws/expose: "false"
{{- if eq .Values.service.type "LoadBalancer" }}
service.beta.kubernetes.io/aws-load-balancer-type: "nlb"
{{- end }}
labels:
{{ include "appmesh-gateway.labels" . | indent 4 }}
spec:
type: {{ .Values.service.type }}
{{- if eq .Values.service.type "LoadBalancer" }}
externalTrafficPolicy: Local
{{- end }}
ports:
- port: {{ .Values.service.port }}
targetPort: http
protocol: TCP
name: http
selector:
app.kubernetes.io/name: {{ include "appmesh-gateway.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}

View File

@@ -0,0 +1,69 @@
# Default values for appmesh-gateway.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
replicaCount: 1
discovery:
# discovery.optIn `true` if only services with the 'expose' annotation are discoverable
optIn: true
proxy:
access_log_path: /dev/null
image:
repository: docker.io/envoyproxy/envoy
tag: v1.12.0
pullPolicy: IfNotPresent
controller:
image:
repository: weaveworks/flagger-appmesh-gateway
tag: v1.1.0
pullPolicy: IfNotPresent
nameOverride: ""
fullnameOverride: ""
service:
# service.type: When set to LoadBalancer it creates an AWS NLB
type: LoadBalancer
port: 80
hpa:
# hpa.enabled `true` if HPA resource should be created, metrics-server is required
enabled: true
maxReplicas: 3
# hpa.cpu average total CPU usage per pod (1-100)
cpu: 99
# hpa.memory average memory usage per pod (100Mi-1Gi)
memory:
resources:
limits:
memory: 2Gi
requests:
cpu: 100m
memory: 128Mi
nodeSelector: {}
tolerations: []
serviceAccount:
# serviceAccount.create: Whether to create a service account or not
create: true
# serviceAccount.name: The name of the service account to create or use
name: ""
rbac:
# rbac.create: `true` if rbac resources should be created
create: true
# rbac.pspEnabled: `true` if PodSecurityPolicy resources should be created
pspEnabled: false
mesh:
# mesh.create: `true` if mesh resource should be created
create: false
# mesh.name: The name of the mesh to use
name: "global"
# mesh.discovery: The service discovery type to use, can be dns or cloudmap
discovery: dns

View File

@@ -1,21 +1,23 @@
apiVersion: v1
name: flagger
version: 0.20.2
appVersion: 0.20.2
version: 0.23.0
appVersion: 0.23.0
kubeVersion: ">=1.11.0-0"
engine: gotpl
description: Flagger is a Kubernetes operator that automates the promotion of canary deployments using Istio, Linkerd, App Mesh, Gloo or NGINX routing for traffic shifting and Prometheus metrics for canary analysis.
home: https://docs.flagger.app
icon: https://raw.githubusercontent.com/weaveworks/flagger/master/docs/logo/flagger-icon.png
description: Flagger is a progressive delivery operator for Kubernetes
home: https://flagger.app
icon: https://raw.githubusercontent.com/weaveworks/flagger/master/docs/logo/weaveworks.png
sources:
- https://github.com/weaveworks/flagger
- https://github.com/weaveworks/flagger
maintainers:
- name: stefanprodan
url: https://github.com/stefanprodan
email: stefanprodan@users.noreply.github.com
- name: stefanprodan
url: https://github.com/stefanprodan
email: stefanprodan@users.noreply.github.com
keywords:
- canary
- istio
- appmesh
- linkerd
- gitops
- flagger
- istio
- appmesh
- linkerd
- gloo
- gitops
- canary

View File

@@ -1,7 +1,8 @@
# Flagger
[Flagger](https://github.com/weaveworks/flagger) is a Kubernetes operator that automates the promotion of
canary deployments using Istio, Linkerd, App Mesh, NGINX or Gloo routing for traffic shifting and Prometheus metrics for canary analysis.
[Flagger](https://github.com/weaveworks/flagger) is a Kubernetes operator that automates the promotion of canary
deployments using Istio, Linkerd, App Mesh, NGINX or Gloo routing for traffic shifting and Prometheus metrics for canary analysis.
Flagger implements a control loop that gradually shifts traffic to the canary while measuring key performance indicators
like HTTP requests success rate, requests average duration and pods health.
Based on the KPIs analysis a canary is promoted or aborted and the analysis result is published to Slack or MS Teams.
@@ -45,6 +46,16 @@ $ helm upgrade -i flagger flagger/flagger \
--set metricsServer=http://linkerd-prometheus:9090
```
To install the chart with the release name `flagger` for AWS App Mesh:
```console
$ helm upgrade -i flagger flagger/flagger \
--namespace=appmesh-system \
--set crd.create=false \
--set meshProvider=appmesh \
--set metricsServer=http://appmesh-prometheus:9090
```
The [configuration](#configuration) section lists the parameters that can be configured during installation.
## Uninstalling the Chart
@@ -72,7 +83,12 @@ Parameter | Description | Default
`slack.url` | Slack incoming webhook | None
`slack.channel` | Slack channel | None
`slack.user` | Slack username | `flagger`
`eventWebhook` | If set, Flagger will publish events to the given webhook | None
`msteams.url` | Microsoft Teams incoming webhook | None
`podMonitor.enabled` | if `true`, create a PodMonitor for [monitoring the metrics](https://docs.flagger.app/usage/monitoring#metrics) | `false`
`podMonitor.namespace` | the namespace where the PodMonitor is created | the same namespace
`podMonitor.interval` | interval at which metrics should be scraped | `15s`
`podMonitor.podMonitor` | additional labels to add to the PodMonitor | `{}`
`leaderElection.enabled` | leader election must be enabled when running more than one replica | `false`
`leaderElection.replicaCount` | number of replicas | `1`
`ingressAnnotationsPrefix` | annotations prefix for ingresses | `custom.ingress.kubernetes.io`
@@ -91,7 +107,7 @@ Specify each parameter using the `--set key=value[,key=value]` argument to `helm
```console
$ helm upgrade -i flagger flagger/flagger \
--namespace istio-system \
--namespace flagger-system \
--set crd.create=false \
--set slack.url=https://hooks.slack.com/services/YOUR/SLACK/WEBHOOK \
--set slack.channel=general

View File

@@ -116,6 +116,9 @@ spec:
type: object
required: ['port']
properties:
name:
description: Kubernetes service name
type: string
port:
description: Container port number
type: number
@@ -246,6 +249,8 @@ spec:
- rollout
- confirm-promotion
- post-rollout
- event
- rollback
url:
description: URL address of this webhook
type: string

View File

@@ -82,6 +82,9 @@ spec:
{{- if .Values.ingressAnnotationsPrefix }}
- -ingress-annotations-prefix={{ .Values.ingressAnnotationsPrefix }}
{{- end }}
{{- if .Values.eventWebhook }}
- -event-webhook={{ .Values.eventWebhook }}
{{- end }}
livenessProbe:
exec:
command:

View File

@@ -0,0 +1,27 @@
{{- if .Values.podMonitor.enabled }}
apiVersion: monitoring.coreos.com/v1
kind: PodMonitor
metadata:
labels:
helm.sh/chart: {{ template "flagger.chart" . }}
app.kubernetes.io/name: {{ template "flagger.name" . }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- range $k, $v := .Values.podMonitor.additionalLabels }}
{{ $k }}: {{ $v | quote }}
{{- end }}
name: {{ include "flagger.fullname" . }}
namespace: {{ .Values.podMonitor.namespace | default .Release.Namespace }}
spec:
podMetricsEndpoints:
- interval: {{ .Values.podMonitor.interval }}
path: /metrics
port: http
namespaceSelector:
matchNames:
- {{ .Release.Namespace }}
selector:
matchLabels:
app.kubernetes.io/name: {{ template "flagger.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- end }}

View File

@@ -230,7 +230,7 @@ spec:
serviceAccountName: {{ template "flagger.serviceAccountName" . }}-prometheus
containers:
- name: prometheus
image: "docker.io/prom/prometheus:v2.12.0"
image: "docker.io/prom/prometheus:v2.15.2"
imagePullPolicy: IfNotPresent
args:
- '--storage.tsdb.retention=2h'

View File

@@ -77,6 +77,11 @@ rules:
- virtualservices
- gateways
verbs: ["*"]
- apiGroups:
- projectcontour.io
resources:
- httpproxies
verbs: ["*"]
- nonResourceURLs:
- /version
verbs:

View File

@@ -2,13 +2,14 @@
image:
repository: weaveworks/flagger
tag: 0.20.2
tag: 0.23.0
pullPolicy: IfNotPresent
pullSecret:
podAnnotations:
prometheus.io/scrape: "true"
prometheus.io/port: "8080"
appmesh.k8s.aws/sidecarInjectorWebhook: disabled
metricsServer: "http://prometheus:9090"
@@ -28,10 +29,19 @@ slack:
# incoming webhook https://api.slack.com/incoming-webhooks
url:
# when specified, flagger will publish events to the provided webhook
eventWebhook: ""
msteams:
# MS Teams incoming webhook URL
url:
podMonitor:
enabled: false
namespace:
interval: 15s
additionalLabels: {}
#env:
#- name: SLACK_URL
# valueFrom:

View File

@@ -1,13 +1,20 @@
apiVersion: v1
name: grafana
version: 1.3.0
appVersion: 6.2.5
version: 1.4.0
appVersion: 6.5.1
description: Grafana dashboards for monitoring Flagger canary deployments
icon: https://raw.githubusercontent.com/weaveworks/flagger/master/docs/logo/flagger-icon.png
icon: https://raw.githubusercontent.com/weaveworks/flagger/master/docs/logo/weaveworks.png
home: https://flagger.app
sources:
- https://github.com/weaveworks/flagger
- https://github.com/weaveworks/flagger
maintainers:
- name: stefanprodan
url: https://github.com/stefanprodan
email: stefanprodan@users.noreply.github.com
- name: stefanprodan
url: https://github.com/stefanprodan
email: stefanprodan@users.noreply.github.com
keywords:
- flagger
- grafana
- canary
- istio
- appmesh

View File

@@ -1,13 +1,12 @@
# Flagger Grafana
Grafana dashboards for monitoring progressive deployments powered by Istio, Prometheus and Flagger.
Grafana dashboards for monitoring progressive deployments powered by Flagger and Prometheus.
![flagger-grafana](https://raw.githubusercontent.com/weaveworks/flagger/master/docs/screens/grafana-canary-analysis.png)
## Prerequisites
* Kubernetes >= 1.11
* Istio >= 1.0
* Prometheus >= 2.6
## Installing the Chart
@@ -18,14 +17,20 @@ Add Flagger Helm repository:
helm repo add flagger https://flagger.app
```
To install the chart with the release name `flagger-grafana`:
To install the chart for Istio run:
```console
helm upgrade -i flagger-grafana flagger/grafana \
--namespace=istio-system \
--set url=http://prometheus:9090 \
--set user=admin \
--set password=admin
--set url=http://prometheus:9090
```
To install the chart for AWS App Mesh run:
```console
helm upgrade -i flagger-grafana flagger/grafana \
--namespace=appmesh-system \
--set url=http://appmesh-prometheus:9090
```
The command deploys Grafana on the Kubernetes cluster in the default namespace.
@@ -56,10 +61,7 @@ Parameter | Description | Default
`affinity` | node/pod affinities | `node`
`nodeSelector` | node labels for pod assignment | `{}`
`service.type` | type of service | `ClusterIP`
`url` | Prometheus URL, used when Weave Cloud token is empty | `http://prometheus:9090`
`token` | Weave Cloud token | `none`
`user` | Grafana admin username | `admin`
`password` | Grafana admin password | `admin`
`url` | Prometheus URL | `http://prometheus:9090`
Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example,

File diff suppressed because it is too large Load Diff

View File

@@ -1,4 +1,4 @@
apiVersion: apps/v1beta2
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ template "grafana.fullname" . }}

View File

@@ -6,7 +6,7 @@ replicaCount: 1
image:
repository: grafana/grafana
tag: 6.2.5
tag: 6.5.1
pullPolicy: IfNotPresent
podAnnotations: {}

View File

@@ -1,12 +1,12 @@
apiVersion: v1
name: loadtester
version: 0.11.0
appVersion: 0.11.0
version: 0.12.1
appVersion: 0.12.1
kubeVersion: ">=1.11.0-0"
engine: gotpl
description: Flagger's load testing services based on rakyll/hey and bojand/ghz that generates traffic during canary analysis when configured as a webhook.
home: https://docs.flagger.app
icon: https://raw.githubusercontent.com/weaveworks/flagger/master/docs/logo/flagger-icon.png
icon: https://raw.githubusercontent.com/weaveworks/flagger/master/docs/logo/weaveworks.png
sources:
- https://github.com/weaveworks/flagger
maintainers:
@@ -14,8 +14,10 @@ maintainers:
url: https://github.com/stefanprodan
email: stefanprodan@users.noreply.github.com
keywords:
- canary
- flagger
- istio
- appmesh
- linkerd
- gloo
- gitops
- load testing

View File

@@ -2,7 +2,7 @@ replicaCount: 1
image:
repository: weaveworks/flagger-loadtester
tag: 0.11.0
tag: 0.12.1
pullPolicy: IfNotPresent
podAnnotations:

View File

@@ -3,10 +3,12 @@ version: 3.1.0
appVersion: 3.1.0
name: podinfo
engine: gotpl
description: Flagger canary deployment demo chart
home: https://flagger.app
maintainers:
- email: stefanprodan@users.noreply.github.com
name: stefanprodan
description: Flagger canary deployment demo application
home: https://docs.flagger.app
icon: https://raw.githubusercontent.com/weaveworks/flagger/master/docs/logo/weaveworks.png
sources:
- https://github.com/weaveworks/flagger
- https://github.com/stefanprodan/podinfo
maintainers:
- name: stefanprodan
url: https://github.com/stefanprodan
email: stefanprodan@users.noreply.github.com

View File

@@ -9,17 +9,7 @@ import (
"strings"
"time"
"github.com/Masterminds/semver"
clientset "github.com/weaveworks/flagger/pkg/client/clientset/versioned"
informers "github.com/weaveworks/flagger/pkg/client/informers/externalversions"
"github.com/weaveworks/flagger/pkg/controller"
"github.com/weaveworks/flagger/pkg/logger"
"github.com/weaveworks/flagger/pkg/metrics"
"github.com/weaveworks/flagger/pkg/notifier"
"github.com/weaveworks/flagger/pkg/router"
"github.com/weaveworks/flagger/pkg/server"
"github.com/weaveworks/flagger/pkg/signals"
"github.com/weaveworks/flagger/pkg/version"
"github.com/Masterminds/semver/v3"
"go.uber.org/zap"
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/client-go/kubernetes"
@@ -30,6 +20,18 @@ import (
"k8s.io/client-go/tools/leaderelection/resourcelock"
"k8s.io/client-go/transport"
_ "k8s.io/code-generator/cmd/client-gen/generators"
"github.com/weaveworks/flagger/pkg/canary"
clientset "github.com/weaveworks/flagger/pkg/client/clientset/versioned"
informers "github.com/weaveworks/flagger/pkg/client/informers/externalversions"
"github.com/weaveworks/flagger/pkg/controller"
"github.com/weaveworks/flagger/pkg/logger"
"github.com/weaveworks/flagger/pkg/metrics"
"github.com/weaveworks/flagger/pkg/notifier"
"github.com/weaveworks/flagger/pkg/router"
"github.com/weaveworks/flagger/pkg/server"
"github.com/weaveworks/flagger/pkg/signals"
"github.com/weaveworks/flagger/pkg/version"
)
var (
@@ -43,6 +45,7 @@ var (
slackURL string
slackUser string
slackChannel string
eventWebhook string
threadiness int
zapReplaceGlobals bool
zapEncoding string
@@ -65,6 +68,7 @@ func init() {
flag.StringVar(&slackURL, "slack-url", "", "Slack hook URL.")
flag.StringVar(&slackUser, "slack-user", "flagger", "Slack user name.")
flag.StringVar(&slackChannel, "slack-channel", "", "Slack channel.")
flag.StringVar(&eventWebhook, "event-webhook", "", "Webhook for publishing flagger events")
flag.StringVar(&msteamsURL, "msteams-url", "", "MS Teams incoming webhook URL.")
flag.IntVar(&threadiness, "threadiness", 2, "Worker concurrency.")
flag.BoolVar(&zapReplaceGlobals, "zap-replace-globals", false, "Whether to change the logging level of the global zap logger.")
@@ -159,7 +163,7 @@ func main() {
logger.Infof("Watching namespace %s", namespace)
}
observerFactory, err := metrics.NewFactory(metricsServer, meshProvider, 5*time.Second)
observerFactory, err := metrics.NewFactory(metricsServer, 5*time.Second)
if err != nil {
logger.Fatalf("Error building prometheus client: %s", err.Error())
}
@@ -178,6 +182,12 @@ func main() {
go server.ListenAndServe(port, 3*time.Second, logger, stopCh)
routerFactory := router.NewFactory(cfg, kubeClient, flaggerClient, ingressAnnotationsPrefix, logger, meshClient)
configTracker := canary.ConfigTracker{
Logger: logger,
KubeClient: kubeClient,
FlaggerClient: flaggerClient,
}
canaryFactory := canary.NewFactory(kubeClient, flaggerClient, configTracker, labels, logger)
c := controller.NewController(
kubeClient,
@@ -187,11 +197,12 @@ func main() {
controlLoopInterval,
logger,
notifierClient,
canaryFactory,
routerFactory,
observerFactory,
meshProvider,
version.VERSION,
labels,
eventWebhook,
)
flaggerInformerFactory.Start(stopCh)

View File

@@ -10,7 +10,7 @@ import (
"time"
)
var VERSION = "0.11.0"
var VERSION = "0.12.1"
var (
logLevel string
port string

Binary file not shown.

After

Width:  |  Height:  |  Size: 40 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 37 KiB

View File

@@ -5,7 +5,7 @@ description: Flagger is a progressive delivery Kubernetes operator
# Introduction
[Flagger](https://github.com/weaveworks/flagger) is a **Kubernetes** operator that automates the promotion of canary
deployments using **Istio**, **Linkerd**, **App Mesh**, **NGINX** or **Gloo** routing for traffic shifting and **Prometheus** metrics for canary analysis.
deployments using **Istio**, **Linkerd**, **App Mesh**, **NGINX**, **Contour** or **Gloo** routing for traffic shifting and **Prometheus** metrics for canary analysis.
The canary analysis can be extended with webhooks for running system integration/acceptance tests, load tests, or any other custom validation.
Flagger implements a control loop that gradually shifts traffic to the canary while measuring key performance
@@ -16,7 +16,7 @@ Based on analysis of the **KPIs** a canary is promoted or aborted, and the analy
Flagger can be configured with Kubernetes custom resources and is compatible with
any CI/CD solutions made for Kubernetes. Since Flagger is declarative and reacts to Kubernetes events,
it can be used in **GitOps** pipelines together with Weave Flux or JenkinsX.
it can be used in **GitOps** pipelines together with Flux CD or JenkinsX.
This project is sponsored by [Weaveworks](https://www.weave.works/)

View File

@@ -19,6 +19,8 @@
* [App Mesh Canary Deployments](usage/appmesh-progressive-delivery.md)
* [NGINX Canary Deployments](usage/nginx-progressive-delivery.md)
* [Gloo Canary Deployments](usage/gloo-progressive-delivery.md)
* [Contour Canary Deployments](usage/contour-progressive-delivery.md)
* [Crossover Canary Deployments](usage/crossover-progressive-delivery.md)
* [Blue/Green Deployments](usage/blue-green.md)
* [Monitoring](usage/monitoring.md)
* [Alerting](usage/alerting.md)

View File

@@ -6,13 +6,13 @@
Flagger can run automated application analysis, promotion and rollback for the following deployment strategies:
* Canary (progressive traffic shifting)
* Istio, Linkerd, App Mesh, NGINX, Gloo
* Istio, Linkerd, App Mesh, NGINX, Contour, Gloo
* Canary (traffic mirroring)
* Istio
* A/B Testing (HTTP headers and cookies traffic routing)
* Istio, App Mesh, NGINX
* Istio, App Mesh, NGINX, Contour
* Blue/Green (traffic switch)
* Kubernetes CNI, Istio, Linkerd, App Mesh, NGINX, Gloo
* Kubernetes CNI, Istio, Linkerd, App Mesh, NGINX, Contour, Gloo
For Canary deployments and A/B testing you'll need a Layer 7 traffic management solution like a service mesh or an ingress controller.
For Blue/Green deployments no service mesh or ingress controller is required.
@@ -58,6 +58,21 @@ App Mesh example:
Note that App Mesh supports a single condition.
Contour example:
```yaml
canaryAnalysis:
interval: 1m
threshold: 10
iterations: 2
match:
- headers:
user-agent:
prefix: "Chrome"
```
Note that Contour does not support regex, you can use prefix, suffix or exact.
NGINX example:
```yaml
@@ -173,6 +188,8 @@ spec:
kind: Deployment
name: podinfo
service:
# service name (optional)
name: podinfo
# ClusterIP port number (required)
port: 9898
# container port name or number
@@ -181,19 +198,21 @@ spec:
portName: http
```
If the `service.name` is not specified, then `targetRef.name` is used for the apex domain and canary/primary services name prefix.
You should treat the service name as an immutable field, changing it could result in routing conflicts.
Based on the canary spec service, Flagger generates the following Kubernetes ClusterIP service:
* `<targetRef.name>.<namespace>.svc.cluster.local`
* `<service.name>.<namespace>.svc.cluster.local`
selector `app=<name>-primary`
* `<targetRef.name>-primary.<namespace>.svc.cluster.local`
* `<service.name>-primary.<namespace>.svc.cluster.local`
selector `app=<name>-primary`
* `<targetRef.name>-canary.<namespace>.svc.cluster.local`
* `<service.name>-canary.<namespace>.svc.cluster.local`
selector `app=<name>`
This ensures that traffic coming from a namespace outside the mesh to `podinfo.test:9898`
will be routed to the latest stable release of your app.
```yaml
apiVersion: v1
kind: Service

View File

@@ -32,6 +32,8 @@ spec:
kind: HorizontalPodAutoscaler
name: podinfo
service:
# service name (optional)
name: podinfo
# ClusterIP port number
port: 9898
# ClusterIP port name can be http or grpc (default http)
@@ -93,8 +95,10 @@ spec:
app: podinfo
```
Besides `app` Flagger supports `name` and `app.kubernetes.io/name` selectors. If you use a different
convention you can specify your label with the `-selector-labels` flag.
Besides `app` Flagger supports `name` and `app.kubernetes.io/name` selectors.
If you use a different convention you can specify your label with
the `-selector-labels=my-app-label` command flag in the Flagger deployment manifest under containers args
or by setting `--set selectorLabels=my-app-label` when installing Flagger with Helm.
The target deployment should expose a TCP port that will be used by Flagger to create the ClusterIP Services.
The container port from the target deployment should match the `service.port` or `service.targetPort`.
@@ -386,7 +390,7 @@ sum(
)
```
Envoy query (App Mesh or Gloo):
Envoy query (App Mesh, Contour or Gloo):
```javascript
sum(
@@ -439,7 +443,7 @@ histogram_quantile(0.99,
)
```
Envoy query (App Mesh or Gloo):
Envoy query (App Mesh, Contour or Gloo):
```javascript
histogram_quantile(0.99,
@@ -534,19 +538,24 @@ Then it compares the query result value with the metric threshold value.
The canary analysis can be extended with webhooks. Flagger will call each webhook URL and
determine from the response status code (HTTP 2xx) if the canary is failing or not.
There are three types of hooks:
* Confirm-rollout hooks are executed before scaling up the canary deployment and can be used for manual approval.
There are several types of hooks:
* **confirm-rollout** hooks are executed before scaling up the canary deployment and can be used for manual approval.
The rollout is paused until the hook returns a successful HTTP status code.
* Pre-rollout hooks are executed before routing traffic to canary.
* **pre-rollout** hooks are executed before routing traffic to canary.
The canary advancement is paused if a pre-rollout hook fails and if the number of failures reach the
threshold the canary will be rollback.
* Rollout hooks are executed during the analysis on each iteration before the metric checks.
* **rollout** hooks are executed during the analysis on each iteration before the metric checks.
If a rollout hook call fails the canary advancement is paused and eventfully rolled back.
* Confirm-promotion hooks are executed before the promotion step.
* **confirm-promotion** hooks are executed before the promotion step.
The canary promotion is paused until the hooks return HTTP 200.
While the promotion is paused, Flagger will continue to run the metrics checks and rollout hooks.
* Post-rollout hooks are executed after the canary has been promoted or rolled back.
* **post-rollout** hooks are executed after the canary has been promoted or rolled back.
If a post rollout hook fails the error is logged.
* **rollback** hooks are executed while a canary deployment is in either Progressing or Waiting status.
This provides the ability to rollback during analysis or while waiting for a confirmation. If a rollback hook
returns a successful HTTP status code, Flagger will stop the analysis and mark the canary release as failed.
* **event** hooks are executed every time Flagger emits a Kubernetes event. When configured,
every action that Flagger takes during a canary deployment will be sent as JSON via an HTTP POST request.
Spec:
@@ -578,6 +587,12 @@ Spec:
timeout: 5s
metadata:
some: "message"
- name: "rollback gate"
type: rollback
url: http://flagger-loadtester.test/gate/halt
- name: "send to Slack"
type: event
url: http://event-recevier.notifications/slack
```
> **Note** that the sum of all rollout webhooks timeouts should be lower than the analysis interval.
@@ -603,6 +618,24 @@ Response status codes:
On a non-2xx response Flagger will include the response body (if any) in the failed checks log and Kubernetes events.
Event payload (HTTP POST):
```json
{
"name": "string (canary name)",
"namespace": "string (canary namespace)",
"phase": "string (canary phase)",
"metadata": {
"eventMessage": "string (canary event message)",
"eventType": "string (canary event type)",
"timestamp": "string (unix timestamp ms)"
}
}
```
The event receiver can create alerts based on the received phase
(possible values: ` Initialized`, `Waiting`, `Progressing`, `Promoting`, `Finalising`, `Succeeded` or `Failed`).
### Load Testing
For workloads that are not receiving constant traffic Flagger can be configured with a webhook,
@@ -803,6 +836,10 @@ For manual approval of a canary deployment you can use the `confirm-rollout` and
The confirmation rollout hooks are executed before the pre-rollout hooks.
Flagger will halt the canary traffic shifting and analysis until the confirm webhook returns HTTP status 200.
For manual rollback of a canary deployment you can use the `rollback` webhook. The rollback hook will be called
during the analysis and confirmation states. If a rollback webhook returns a successful HTTP status code, Flagger
will shift all traffic back to the primary instance and fail the canary.
Manual gating with Flagger's tester:
```yaml
@@ -871,4 +908,14 @@ While the promotion is paused, Flagger will continue to run the metrics checks a
url: http://flagger-loadtester.test/gate/halt
```
The `rollback` hook type can be used to manually rollback the canary promotion.
```yaml
canaryAnalysis:
webhooks:
- name: "rollback"
type: rollback
url: http://flagger-loadtester.test/gate/halt
```
If you have notifications enabled, Flagger will post a message to Slack or MS Teams if a canary promotion is waiting for approval.

View File

@@ -12,7 +12,7 @@ The App Mesh integration with EKS is made out of the following components:
* `virtualservice.appmesh.k8s.aws` defines the routing rules for a workload inside the mesh
* CRD controller - keeps the custom resources in sync with the App Mesh control plane
* Admission controller - injects the Envoy sidecar and assigns Kubernetes pods to App Mesh virtual nodes
* Metrics server - Prometheus instance that collects and stores Envoy's metrics
* Telemetry service - Prometheus instance that collects and stores Envoy's metrics
### Create a Kubernetes cluster
@@ -79,7 +79,8 @@ Install the Horizontal Pod Autoscaler (HPA) metrics provider:
```bash
helm upgrade -i metrics-server stable/metrics-server \
--namespace kube-system
--namespace kube-system \
--set args[0]=--kubelet-preferred-address-types=InternalIP
```
After a minute, the metrics API should report CPU and memory usage for pods.
@@ -100,7 +101,7 @@ kubectl create ns appmesh-system
Apply the App Mesh CRDs:
```sh
kubectl apply -f https://raw.githubusercontent.com/aws/eks-charts/master/stable/appmesh-controller/crds/crds.yaml
kubectl apply -k github.com/aws/eks-charts/stable/appmesh-controller//crds
```
Add the EKS repository to Helm:
@@ -113,10 +114,10 @@ Install the App Mesh CRD controller:
```sh
helm upgrade -i appmesh-controller eks/appmesh-controller \
--wait --namespace appmesh-system --version 0.2.0
--wait --namespace appmesh-system
```
Install the App Mesh admission controller:
Install the App Mesh admission controller and create a mesh called `global`:
```sh
helm upgrade -i appmesh-inject eks/appmesh-inject \

View File

@@ -43,6 +43,16 @@ helm upgrade -i flagger flagger/flagger \
--set metricsServer=http://linkerd-prometheus:9090
```
Deploy Flagger for App Mesh:
```bash
helm upgrade -i flagger flagger/flagger \
--namespace=appmesh-system \
--set crd.create=false \
--set meshProvider=appmesh \
--set metricsServer=http://appmesh-prometheus:9090
```
You can install Flagger in any namespace as long as it can talk to the Prometheus service on port 9090.
Enable **Slack** notifications:
@@ -159,7 +169,7 @@ Install Flagger for AWS App Mesh:
kubectl apply -k github.com/weaveworks/flagger//kustomize/appmesh
```
This deploys Flagger and Prometheus (configured to scrape the App Mesh Envoy sidecars) in the `appmesh-system` namespace.
This deploys Flagger and sets the metrics server URL to App Mesh's Prometheus instance.
Install Flagger for Linkerd:

View File

@@ -27,7 +27,7 @@ You application should expose a HTTP endpoint that Kubernetes can call to determ
your app transitioned to a broken state from which it can't recover and needs to be restarted.
```yaml
readinessProbe:
livenessProbe:
exec:
command:
- wget
@@ -49,7 +49,7 @@ You application should expose a HTTP endpoint that Kubernetes can call to determ
your app is ready to receive traffic.
```yaml
livenessProbe:
readinessProbe:
exec:
command:
- wget

View File

@@ -53,3 +53,52 @@ Besides Slack, you can use Alertmanager to trigger alerts when a canary deployme
description: "Workload {{ $labels.name }} namespace {{ $labels.namespace }}"
```
### Event Webhook
Flagger can be configured to send event payloads to a specified webhook:
```bash
helm upgrade -i flagger flagger/flagger \
--set eventWebhook=https://example.com/flagger-canary-event-webhook
```
When configured, every action that Flagger takes during a canary deployment will be sent as JSON via an HTTP POST
request. The JSON payload has the following schema:
```json
{
"name": "string (canary name)",
"namespace": "string (canary namespace)",
"phase": "string (canary phase)",
"metadata": {
"eventMessage": "string (canary event message)",
"eventType": "string (canary event type)",
"timestamp": "string (unix timestamp ms)"
}
}
```
Example:
```json
{
"name": "podinfo",
"namespace": "default",
"phase": "Progressing",
"metadata": {
"eventMessage": "New revision detected! Scaling up podinfo.default",
"eventType": "Normal",
"timestamp": "1578607635167"
}
}
```
The event webhook can be overwritten at canary level with:
```yaml
canaryAnalysis:
webhooks:
- name: "send to Slack"
type: event
url: http://event-recevier.notifications/slack
```

View File

@@ -14,22 +14,33 @@ The only App Mesh object you need to create by yourself is the mesh resource.
Create a mesh called `global`:
```bash
export REPO=https://raw.githubusercontent.com/weaveworks/flagger/master
kubectl apply -f ${REPO}/artifacts/appmesh/global-mesh.yaml
cat << EOF | kubectl apply -f -
apiVersion: appmesh.k8s.aws/v1beta1
kind: Mesh
metadata:
name: global
spec:
serviceDiscoveryType: dns
EOF
```
Create a test namespace with App Mesh sidecar injection enabled:
```bash
kubectl apply -f ${REPO}/artifacts/namespaces/test.yaml
cat << EOF | kubectl apply -f -
apiVersion: v1
kind: Namespace
metadata:
name: test
labels:
appmesh.k8s.aws/sidecarInjectorWebhook: enabled
EOF
```
Create a deployment and a horizontal pod autoscaler:
```bash
kubectl apply -f ${REPO}/artifacts/appmesh/deployment.yaml
kubectl apply -f ${REPO}/artifacts/appmesh/hpa.yaml
kubectl apply -k github.com/weaveworks/flagger//kustomize/podinfo
```
Deploy the load testing service to generate traffic during the canary analysis:
@@ -72,20 +83,25 @@ spec:
portName: http
# App Mesh reference
meshName: global
# App Mesh ingress (optional)
hosts:
- "*"
# App Mesh ingress timeout (optional)
timeout: 5s
# App Mesh egress (optional)
backends:
- backend.test
# App Mesh retry policy (optional)
retries:
attempts: 3
perTryTimeout: 1s
perTryTimeout: 5s
retryOn: "gateway-error,client-error,stream-error"
# define the canary analysis timing and KPIs
canaryAnalysis:
# schedule interval (default 60s)
interval: 10s
interval: 1m
# max number of failed metric checks before rollback
threshold: 10
threshold: 5
# max traffic percentage routed to canary
# percentage (0-100)
maxWeight: 50
@@ -166,37 +182,42 @@ The App Mesh specific settings are:
App Mesh blocks all egress traffic by default. If your application needs to call another service, you have to create an
App Mesh virtual service for it and add the virtual service name to the backend list.
### Setup App Mesh ingress (optional)
### Setup App Mesh Gateway (optional)
In order to expose the podinfo app outside the mesh you'll be using an Envoy ingress and an AWS classic load balancer.
The ingress binds to an internet domain and forwards the calls into the mesh through the App Mesh sidecar.
If podinfo becomes unavailable due to a HPA downscaling or a node restart,
the ingress will retry the calls for a short period of time.
In order to expose the podinfo app outside the mesh you'll be using an Envoy-powered ingress gateway and an AWS network load balancer.
The gateway binds to an internet domain and forwards the calls into the mesh through the App Mesh sidecar.
If podinfo becomes unavailable due to a cluster downscaling or a node restart,
the gateway will retry the calls for a short period of time.
Deploy the ingress and the AWS ELB service:
Deploy the gateway behind an AWS NLB:
```bash
kubectl apply -f ${REPO}/artifacts/appmesh/ingress.yaml
helm upgrade -i appmesh-gateway flagger/appmesh-gateway \
--namespace test \
--set mesh.name=global
```
Find the ingress public address:
Find the gateway public address:
```bash
kubectl -n test describe svc/ingress | grep Ingress
LoadBalancer Ingress: yyy-xx.us-west-2.elb.amazonaws.com
export URL="http://$(kubectl -n test get svc/appmesh-gateway -ojson | jq -r ".status.loadBalancer.ingress[].hostname")"
echo $URL
```
Wait for the ELB to become active:
Wait for the NLB to become active:
```bash
watch curl -sS ${INGRESS_URL}
watch curl -sS $URL
```
Open your browser and navigate to the ingress address to access podinfo UI.
### Automated canary promotion
A canary deployment is triggered by changes in any of the following objects:
* Deployment PodSpec (container image, command, ports, env, resources, etc)
* ConfigMaps and Secrets mounted as volumes or mapped to environment variables
Trigger a canary deployment by updating the container image:
```bash
@@ -237,11 +258,6 @@ When the canary analysis starts, Flagger will call the pre-rollout webhooks befo
**Note** that if you apply new changes to the deployment during the canary analysis, Flagger will restart the analysis.
A canary deployment is triggered by changes in any of the following objects:
* Deployment PodSpec (container image, command, ports, env, resources, etc)
* ConfigMaps mounted as volumes or mapped to environment variables
* Secrets mounted as volumes or mapped to environment variables
During the analysis the canarys progress can be monitored with Grafana. The App Mesh dashboard URL is
http://localhost:3000/d/flagger-appmesh/appmesh-canary?refresh=10s&orgId=1&var-namespace=test&var-primary=podinfo-primary&var-canary=podinfo
@@ -264,7 +280,7 @@ If youve enabled the Slack notifications, you should receive the following me
### Automated rollback
During the canary analysis you can generate HTTP 500 errors to test if Flagger pauses the rollout.
During the canary analysis you can generate HTTP 500 errors or high latency to test if Flagger pauses the rollout.
Trigger a canary deployment:
@@ -295,25 +311,20 @@ When the number of failed checks reaches the canary analysis threshold, the traf
the canary is scaled to zero and the rollout is marked as failed.
```text
kubectl -n test describe canary/podinfo
kubectl -n appmesh-system logs deploy/flagger -f | jq .msg
Status:
Canary Weight: 0
Failed Checks: 5
Phase: Failed
Events:
Starting canary analysis for podinfo.test
Pre-rollout check acceptance-test passed
Advance podinfo.test canary weight 5
Advance podinfo.test canary weight 10
Advance podinfo.test canary weight 15
Halt podinfo.test advancement success rate 69.17% < 99%
Halt podinfo.test advancement success rate 61.39% < 99%
Halt podinfo.test advancement success rate 55.06% < 99%
Halt podinfo.test advancement request duration 1.20s > 0.5s
Halt podinfo.test advancement request duration 1.45s > 0.5s
Rolling back podinfo.test failed checks threshold reached 5
Canary failed! Scaling down podinfo.test
New revision detected! Starting canary analysis for podinfo.test
Pre-rollout check acceptance-test passed
Advance podinfo.test canary weight 5
Advance podinfo.test canary weight 10
Advance podinfo.test canary weight 15
Halt podinfo.test advancement success rate 69.17% < 99%
Halt podinfo.test advancement success rate 61.39% < 99%
Halt podinfo.test advancement success rate 55.06% < 99%
Halt podinfo.test advancement request duration 1.20s > 0.5s
Halt podinfo.test advancement request duration 1.45s > 0.5s
Rolling back podinfo.test failed checks threshold reached 5
Canary failed! Scaling down podinfo.test
```
If youve enabled the Slack notifications, youll receive a message if the progress deadline is exceeded,
@@ -334,7 +345,7 @@ Edit the canary analysis, remove the max/step weight and add the match condition
```yaml
canaryAnalysis:
interval: 1m
threshold: 10
threshold: 5
iterations: 10
match:
- headers:

View File

@@ -0,0 +1,434 @@
# Contour Canary Deployments
This guide shows you how to use [Contour](https://projectcontour.io/) ingress controller and Flagger to automate canary releases and A/B testing.
![Flagger Contour Overview](https://raw.githubusercontent.com/weaveworks/flagger/master/docs/diagrams/flagger-contour-overview.png)
### Prerequisites
Flagger requires a Kubernetes cluster **v1.11** or newer and Contour **v1.0** or newer.
Install Contour on a cluster with LoadBalancer support:
```bash
kubectl apply -f https://projectcontour.io/quickstart/contour.yaml
```
The above command will deploy Contour and an Envoy daemonset in the `projectcontour` namespace.
Install Flagger using Kustomize (kubectl 1.14) in the `projectcontour` namespace:
```bash
kubectl apply -k github.com/weaveworks/flagger//kustomize/contour
```
The above command will deploy Flagger and Prometheus configured to scrape the Contour's Envoy instances.
You can also enable Slack or MS Teams notifications,
see the Kustomize install [docs](https://docs.flagger.app/install/flagger-install-on-kubernetes#install-flagger-with-kustomize).
Or you can install Flagger using Helm:
```sh
helm repo add flagger https://flagger.app
helm upgrade -i flagger flagger/flagger \
--namespace projectcontour \
--set meshProvider=contour \
--set prometheus.install=true \
--set slack.url=https://hooks.slack.com/services/YOUR/SLACK/WEBHOOK \
--set slack.channel=general \
--set slack.user=flagger
```
### Bootstrap
Flagger takes a Kubernetes deployment and optionally a horizontal pod autoscaler (HPA),
then creates a series of objects (Kubernetes deployments, ClusterIP services and Contour HTTPProxy).
These objects expose the application in the cluster and drive the canary analysis and promotion.
Create a test namespace:
```bash
kubectl create ns test
```
Install the load testing service to generate traffic during the canary analysis:
```bash
kubectl apply -k github.com/weaveworks/flagger//kustomize/tester
```
Create a deployment and a horizontal pod autoscaler:
```bash
kubectl apply -k github.com/weaveworks/flagger//kustomize/podinfo
```
Create a canary custom resource (replace `app.example.com` with your own domain):
```yaml
apiVersion: flagger.app/v1alpha3
kind: Canary
metadata:
name: podinfo
namespace: test
spec:
# deployment reference
targetRef:
apiVersion: apps/v1
kind: Deployment
name: podinfo
# HPA reference
autoscalerRef:
apiVersion: autoscaling/v2beta1
kind: HorizontalPodAutoscaler
name: podinfo
service:
# service port
port: 80
# container port
targetPort: 9898
# Contour request timeout
timeout: 15s
# Contour retry policy
retries:
attempts: 3
perTryTimeout: 5s
# define the canary analysis timing and KPIs
canaryAnalysis:
# schedule interval (default 60s)
interval: 30s
# max number of failed metric checks before rollback
threshold: 5
# max traffic percentage routed to canary
# percentage (0-100)
maxWeight: 50
# canary increment step
# percentage (0-100)
stepWeight: 5
# Contour Prometheus checks
metrics:
- name: request-success-rate
# minimum req success rate (non 5xx responses)
# percentage (0-100)
threshold: 99
interval: 1m
- name: request-duration
# maximum req duration P99 in milliseconds
threshold: 500
interval: 30s
# testing
webhooks:
- name: acceptance-test
type: pre-rollout
url: http://flagger-loadtester.test/
timeout: 30s
metadata:
type: bash
cmd: "curl -sd 'test' http://podinfo-canary.test/token | grep token"
- name: load-test
url: http://flagger-loadtester.test/
type: rollout
timeout: 5s
metadata:
cmd: "hey -z 1m -q 10 -c 2 -host app.example.com http://envoy.projectcontour"
```
Save the above resource as podinfo-canary.yaml and then apply it:
```bash
kubectl apply -f ./podinfo-canary.yaml
```
The canary analysis will run for five minutes while validating the HTTP metrics and rollout hooks every half a minute.
After a couple of seconds Flagger will create the canary objects:
```bash
# applied
deployment.apps/podinfo
horizontalpodautoscaler.autoscaling/podinfo
canary.flagger.app/podinfo
# generated
deployment.apps/podinfo-primary
horizontalpodautoscaler.autoscaling/podinfo-primary
service/podinfo
service/podinfo-canary
service/podinfo-primary
httpproxy.projectcontour.io/podinfo
```
After the boostrap, the podinfo deployment will be scaled to zero and the traffic to `podinfo.test` will be routed
to the primary pods. During the canary analysis, the `podinfo-canary.test` address can be used to target directly the canary pods.
### Expose the app outside the cluster
Find the external address of Contour's Envoy load balancer:
```bash
export ADDRESS="$(kubectl -n projectcontour get svc/envoy -ojson \
| jq -r ".status.loadBalancer.ingress[].hostname")"
echo $ADDRESS
```
Configure your DNS server with a CNAME record (AWS) or A record (GKE/AKS/DOKS) and point a domain e.g. `app.example.com` to the LB address.
Create a HTTPProxy definition and include the podinfo proxy generated by Flagger (replace `app.example.com` with your own domain):
```yaml
apiVersion: projectcontour.io/v1
kind: HTTPProxy
metadata:
name: podinfo-ingress
namespace: test
spec:
virtualhost:
fqdn: app.example.com
includes:
- name: podinfo
namespace: test
conditions:
- prefix: /
```
Save the above resource as podinfo-ingress.yaml and then apply it:
```bash
kubectl apply -f ./podinfo-ingress.yaml
```
Verify that Contour processed the proxy definition with:
```sh
kubectl -n test get httpproxies
NAME FQDN STATUS
podinfo valid
podinfo-ingress app.example.com valid
```
Now you can access podinfo UI using your domain address.
Note that you should be using HTTPS when exposing production workloads on internet.
You can obtain free TLS certs from Let's Encrypt, read this [guide](https://github.com/stefanprodan/eks-contour-ingress)
on how to configure cert-manager to secure Contour with TLS certificates.
### Automated canary promotion
Flagger implements a control loop that gradually shifts traffic to the canary while measuring key performance indicators
like HTTP requests success rate, requests average duration and pod health.
Based on analysis of the KPIs a canary is promoted or aborted.
![Flagger Canary Stages](https://raw.githubusercontent.com/weaveworks/flagger/master/docs/diagrams/flagger-canary-steps.png)
A canary deployment is triggered by changes in any of the following objects:
* Deployment PodSpec (container image, command, ports, env, resources, etc)
* ConfigMaps and Secrets mounted as volumes or mapped to environment variables
Trigger a canary deployment by updating the container image:
```bash
kubectl -n test set image deployment/podinfo \
podinfod=stefanprodan/podinfo:3.1.1
```
Flagger detects that the deployment revision changed and starts a new rollout:
```text
kubectl -n test describe canary/podinfo
Status:
Canary Weight: 0
Failed Checks: 0
Phase: Succeeded
Events:
New revision detected! Scaling up podinfo.test
Waiting for podinfo.test rollout to finish: 0 of 1 updated replicas are available
Pre-rollout check acceptance-test passed
Advance podinfo.test canary weight 5
Advance podinfo.test canary weight 10
Advance podinfo.test canary weight 15
Advance podinfo.test canary weight 20
Advance podinfo.test canary weight 25
Advance podinfo.test canary weight 30
Advance podinfo.test canary weight 35
Advance podinfo.test canary weight 40
Advance podinfo.test canary weight 45
Advance podinfo.test canary weight 50
Copying podinfo.test template spec to podinfo-primary.test
Waiting for podinfo-primary.test rollout to finish: 1 of 2 updated replicas are available
Routing all traffic to primary
Promotion completed! Scaling down podinfo.test
```
When the canary analysis starts, Flagger will call the pre-rollout webhooks before routing traffic to the canary.
**Note** that if you apply new changes to the deployment during the canary analysis, Flagger will restart the analysis.
You can monitor all canaries with:
```bash
watch kubectl get canaries --all-namespaces
NAMESPACE NAME STATUS WEIGHT LASTTRANSITIONTIME
test podinfo Progressing 15 2019-12-20T14:05:07Z
```
If youve enabled the Slack notifications, you should receive the following messages:
![Flagger Slack Notifications](https://raw.githubusercontent.com/weaveworks/flagger/master/docs/screens/slack-canary-notifications.png)
### Automated rollback
During the canary analysis you can generate HTTP 500 errors or high latency to test if Flagger pauses the rollout.
Trigger a canary deployment:
```bash
kubectl -n test set image deployment/podinfo \
podinfod=stefanprodan/podinfo:3.1.2
```
Exec into the load tester pod with:
```bash
kubectl -n test exec -it deploy/flagger-loadtester bash
```
Generate HTTP 500 errors:
```bash
hey -z 1m -c 5 -q 5 http://app.example.com/status/500
```
Generate latency:
```bash
watch -n 1 curl http://app.example.com/delay/1
```
When the number of failed checks reaches the canary analysis threshold, the traffic is routed back to the primary,
the canary is scaled to zero and the rollout is marked as failed.
```text
kubectl -n projectcontour logs deploy/flagger -f | jq .msg
New revision detected! Starting canary analysis for podinfo.test
Pre-rollout check acceptance-test passed
Advance podinfo.test canary weight 5
Advance podinfo.test canary weight 10
Advance podinfo.test canary weight 15
Halt podinfo.test advancement success rate 69.17% < 99%
Halt podinfo.test advancement success rate 61.39% < 99%
Halt podinfo.test advancement success rate 55.06% < 99%
Halt podinfo.test advancement request duration 1.20s > 500ms
Halt podinfo.test advancement request duration 1.45s > 500ms
Rolling back podinfo.test failed checks threshold reached 5
Canary failed! Scaling down podinfo.test
```
If youve enabled the Slack notifications, youll receive a message if the progress deadline is exceeded,
or if the analysis reached the maximum number of failed checks:
![Flagger Slack Notifications](https://raw.githubusercontent.com/weaveworks/flagger/master/docs/screens/slack-canary-failed.png)
### A/B Testing
Besides weighted routing, Flagger can be configured to route traffic to the canary based on HTTP match conditions.
In an A/B testing scenario, you'll be using HTTP headers or cookies to target a certain segment of your users.
This is particularly useful for frontend applications that require session affinity.
![Flagger A/B Testing Stages](https://raw.githubusercontent.com/weaveworks/flagger/master/docs/diagrams/flagger-abtest-steps.png)
Edit the canary analysis, remove the max/step weight and add the match conditions and iterations:
```yaml
canaryAnalysis:
interval: 1m
threshold: 5
iterations: 10
match:
- headers:
x-canary:
exact: "insider"
webhooks:
- name: load-test
url: http://flagger-loadtester.test/
metadata:
cmd: "hey -z 1m -q 5 -c 5 -H 'X-Canary: insider' -host app.example.com http://envoy.projectcontour"
```
The above configuration will run an analysis for ten minutes targeting users that have a `X-Canary: insider` header.
You can also use a HTTP cookie. To target all users with a cookie set to `insider`, the match condition should be:
```yaml
match:
- headers:
cookie:
suffix: "insider"
webhooks:
- name: load-test
url: http://flagger-loadtester.test/
metadata:
cmd: "hey -z 1m -q 5 -c 5 -H 'Cookie: canary=insider' -host app.example.com http://envoy.projectcontour"
```
Trigger a canary deployment by updating the container image:
```bash
kubectl -n test set image deployment/podinfo \
podinfod=stefanprodan/podinfo:3.1.3
```
Flagger detects that the deployment revision changed and starts the A/B test:
```text
kubectl -n appmesh-system logs deploy/flagger -f | jq .msg
New revision detected! Starting canary analysis for podinfo.test
Advance podinfo.test canary iteration 1/10
Advance podinfo.test canary iteration 2/10
Advance podinfo.test canary iteration 3/10
Advance podinfo.test canary iteration 4/10
Advance podinfo.test canary iteration 5/10
Advance podinfo.test canary iteration 6/10
Advance podinfo.test canary iteration 7/10
Advance podinfo.test canary iteration 8/10
Advance podinfo.test canary iteration 9/10
Advance podinfo.test canary iteration 10/10
Copying podinfo.test template spec to podinfo-primary.test
Waiting for podinfo-primary.test rollout to finish: 1 of 2 updated replicas are available
Routing all traffic to primary
Promotion completed! Scaling down podinfo.test
```
The web browser user agent header allows user segmentation based on device or OS.
For example, if you want to route all mobile users to the canary instance:
```yaml
match:
- headers:
user-agent:
prefix: "Mobile"
```
Or if you want to target only Android users:
```yaml
match:
- headers:
user-agent:
prefix: "Android"
```
Or a specific browser version:
```yaml
match:
- headers:
user-agent:
suffix: "Firefox/71.0"
```

View File

@@ -0,0 +1,319 @@
# Envoy/Crossover Canary Deployments
This guide shows you how to use Envoy, [Crossover](https://github.com/mumoshu/crossover) and Flagger to automate canary deployments.
Crossover is a minimal Envoy xDS implementation supports [Service Mesh Interface](https://smi-spec.io/).
### Prerequisites
Flagger requires a Kubernetes cluster **v1.11** or newer and Envoy paired with [Crossover](https://github.com/mumoshu/crossover) sidecar.
Create a test namespace:
```bash
kubectl create ns test
```
Install Envoy along with the Crossover sidecar with Helm:
```bash
helm repo add crossover https://mumoshu.github.io/crossover
helm upgrade --install envoy crossover/envoy \
--namespace test \
-f <(cat <<EOF
smi:
apiVersions:
trafficSplits: v1alpha1
upstreams:
podinfo:
smi:
enabled: true
backends:
podinfo-primary:
port: 9898
weight: 100
podinfo-canary:
port: 9898
weight: 0
EOF
)
```
Install Flagger and the Prometheus add-on in the same namespace as Envoy:
```bash
helm repo add flagger https://flagger.app
helm upgrade -i flagger flagger/flagger \
--namespace test \
--set prometheus.install=true \
--set meshProvider=smi:crossover
```
Optionally you can enable Slack notifications:
```bash
helm upgrade -i flagger flagger/flagger \
--reuse-values \
--namespace test \
--set slack.url=https://hooks.slack.com/services/YOUR/SLACK/WEBHOOK \
--set slack.channel=general \
--set slack.user=flagger
```
### Bootstrap
Flagger takes a Kubernetes deployment and optionally a horizontal pod autoscaler (HPA),
then creates a series of objects (Kubernetes deployments, ClusterIP services, SMI traffic splits).
These objects expose the application on the mesh and drive the canary analysis and promotion.
There's no SMI object you need to create by yourself.
Create a deployment and a horizontal pod autoscaler:
```bash
kubectl apply -k github.com/weaveworks/flagger//kustomize/podinfo
```
Deploy the load testing service to generate traffic during the canary analysis:
```bash
helm upgrade -i flagger-loadtester flagger/loadtester \
--namespace=test
```
Create a canary custom resource:
```yaml
apiVersion: flagger.app/v1alpha3
kind: Canary
metadata:
name: podinfo
namespace: test
spec:
# specify mesh provider if it isn't the default one
# provider: "smi:crossover"
# deployment reference
targetRef:
apiVersion: apps/v1
kind: Deployment
name: podinfo
# the maximum time in seconds for the canary deployment
# to make progress before it is rollback (default 600s)
progressDeadlineSeconds: 60
# HPA reference (optional)
autoscalerRef:
apiVersion: autoscaling/v2beta1
kind: HorizontalPodAutoscaler
name: podinfo
service:
# ClusterIP port number
port: 9898
# container port number or name (optional)
targetPort: 9898
# define the canary analysis timing and KPIs
canaryAnalysis:
# schedule interval (default 60s)
interval: 1m
# max number of failed metric checks before rollback
threshold: 5
# max traffic percentage routed to canary
# percentage (0-100)
maxWeight: 50
# canary increment step
# percentage (0-100)
stepWeight: 5
# App Mesh Prometheus checks
metrics:
- name: request-success-rate
# minimum req success rate (non 5xx responses)
# percentage (0-100)
threshold: 99
interval: 1m
- name: request-duration
# maximum req duration P99
# milliseconds
threshold: 500
interval: 30s
# testing (optional)
webhooks:
- name: acceptance-test
type: pre-rollout
url: http://flagger-loadtester.test/
timeout: 30s
metadata:
type: bash
cmd: "curl -sd 'test' http://podinfo-canary.test:9898/token | grep token"
- name: load-test
url: http://flagger-loadtester.test/
timeout: 5s
metadata:
cmd: "hey -z 1m -q 10 -c 2 -H 'Host: podinfo.test' http://envoy.test:10000/"
```
Save the above resource as podinfo-canary.yaml and then apply it:
```bash
kubectl apply -f ./podinfo-canary.yaml
```
After a couple of seconds Flagger will create the canary objects:
```bash
# applied
deployment.apps/podinfo
horizontalpodautoscaler.autoscaling/podinfo
canary.flagger.app/podinfo
# generated
deployment.apps/podinfo-primary
horizontalpodautoscaler.autoscaling/podinfo-primary
service/podinfo
service/podinfo-canary
service/podinfo-primary
trafficsplits.split.smi-spec.io/podinfo
```
After the boostrap, the podinfo deployment will be scaled to zero and the traffic to `podinfo.test` will be routed
to the primary pods. During the canary analysis, the `podinfo-canary.test` address can be used to target directly the canary pods.
### Automated canary promotion
Flagger implements a control loop that gradually shifts traffic to the canary while measuring key performance indicators like HTTP requests success rate, requests average duration and pod health. Based on analysis of the KPIs a canary is promoted or aborted, and the analysis result is published to Slack.
![Flagger Canary Stages](https://raw.githubusercontent.com/weaveworks/flagger/master/docs/diagrams/flagger-canary-steps.png)
A canary deployment is triggered by changes in any of the following objects:
* Deployment PodSpec (container image, command, ports, env, resources, etc)
* ConfigMaps and Secrets mounted as volumes or mapped to environment variables
Trigger a canary deployment by updating the container image:
```bash
kubectl -n test set image deployment/podinfo \
podinfod=stefanprodan/podinfo:3.1.5
```
Flagger detects that the deployment revision changed and starts a new rollout:
```text
kubectl -n test describe canary/podinfo
Status:
Canary Weight: 0
Failed Checks: 0
Phase: Succeeded
Events:
New revision detected! Scaling up podinfo.test
Waiting for podinfo.test rollout to finish: 0 of 1 updated replicas are available
Pre-rollout check acceptance-test passed
Advance podinfo.test canary weight 5
Advance podinfo.test canary weight 10
Advance podinfo.test canary weight 15
Advance podinfo.test canary weight 20
Advance podinfo.test canary weight 25
Advance podinfo.test canary weight 30
Advance podinfo.test canary weight 35
Advance podinfo.test canary weight 40
Advance podinfo.test canary weight 45
Advance podinfo.test canary weight 50
Copying podinfo.test template spec to podinfo-primary.test
Waiting for podinfo-primary.test rollout to finish: 1 of 2 updated replicas are available
Routing all traffic to primary
Promotion completed! Scaling down podinfo.test
```
When the canary analysis starts, Flagger will call the pre-rollout webhooks before routing traffic to the canary.
**Note** that if you apply new changes to the deployment during the canary analysis, Flagger will restart the analysis.
During the analysis the canarys progress can be monitored with Grafana.
Flagger comes with a Grafana dashboard made for canary analysis. Install Grafana with Helm:
```bash
helm upgrade -i flagger-grafana flagger/grafana \
--namespace=test \
--set url=http://flagger-prometheus:9090
```
Run:
```bash
kubectl port-forward --namespace test svc/flagger-grafana 3000:80
```
The Envoy dashboard URL is
http://localhost:3000/d/flagger-envoy/envoy-canary?refresh=10s&orgId=1&var-namespace=test&var-target=podinfo
![Envoy Canary Dashboard](https://raw.githubusercontent.com/weaveworks/flagger/master/docs/screens/flagger-grafana-appmesh.png)
You can monitor all canaries with:
```bash
watch kubectl get canaries --all-namespaces
NAMESPACE NAME STATUS WEIGHT LASTTRANSITIONTIME
test podinfo Progressing 15 2019-10-02T14:05:07Z
prod frontend Succeeded 0 2019-10-02T16:15:07Z
prod backend Failed 0 2019-10-02T17:05:07Z
```
If youve enabled the Slack notifications, you should receive the following messages:
![Flagger Slack Notifications](https://raw.githubusercontent.com/weaveworks/flagger/master/docs/screens/slack-canary-notifications.png)
### Automated rollback
During the canary analysis you can generate HTTP 500 errors or high latency to test if Flagger pauses the rollout.
Trigger a canary deployment:
```bash
kubectl -n test set image deployment/podinfo \
podinfod=stefanprodan/podinfo:3.1.2
```
Exec into the load tester pod with:
```bash
kubectl -n test exec -it deploy/flagger-loadtester bash
```
Generate HTTP 500 errors:
```bash
hey -z 1m -c 5 -q 5 -H 'Host: podinfo.test' http://envoy.test:10000/status/500
```
Generate latency:
```bash
watch -n 1 curl -H 'Host: podinfo.test' http://envoy.test:10000/delay/1
```
When the number of failed checks reaches the canary analysis threshold, the traffic is routed back to the primary,
the canary is scaled to zero and the rollout is marked as failed.
```text
kubectl -n test logs deploy/flagger -f | jq .msg
New revision detected! Starting canary analysis for podinfo.test
Pre-rollout check acceptance-test passed
Advance podinfo.test canary weight 5
Advance podinfo.test canary weight 10
Advance podinfo.test canary weight 15
Halt podinfo.test advancement success rate 69.17% < 99%
Halt podinfo.test advancement success rate 61.39% < 99%
Halt podinfo.test advancement success rate 55.06% < 99%
Halt podinfo.test advancement request duration 1.20s > 0.5s
Halt podinfo.test advancement request duration 1.45s > 0.5s
Rolling back podinfo.test failed checks threshold reached 5
Canary failed! Scaling down podinfo.test
```
If youve enabled the Slack notifications, youll receive a message if the progress deadline is exceeded,
or if the analysis reached the maximum number of failed checks:
![Flagger Slack Notifications](https://raw.githubusercontent.com/weaveworks/flagger/master/docs/screens/slack-canary-failed.png)

BIN
docs/logo/weaveworks.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 3.0 KiB

47
go.mod
View File

@@ -3,54 +3,23 @@ module github.com/weaveworks/flagger
go 1.13
require (
cloud.google.com/go v0.37.4 // indirect
github.com/Masterminds/semver v1.4.2
github.com/beorn7/perks v1.0.0 // indirect
github.com/gogo/protobuf v1.2.1 // indirect
github.com/Masterminds/semver/v3 v3.0.3
github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef // indirect
github.com/golang/protobuf v1.3.1 // indirect
github.com/google/go-cmp v0.3.0
github.com/googleapis/gnostic v0.2.0 // indirect
github.com/hashicorp/golang-lru v0.5.1 // indirect
github.com/imdario/mergo v0.3.7 // indirect
github.com/kr/pretty v0.1.0 // indirect
github.com/mitchellh/hashstructure v1.0.0
github.com/pkg/errors v0.8.1
github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90 // indirect
github.com/prometheus/common v0.3.0 // indirect
github.com/prometheus/procfs v0.0.0-20190416084830-8368d24ba045 // indirect
github.com/prometheus/client_golang v1.0.0
go.uber.org/atomic v1.3.2 // indirect
go.uber.org/multierr v1.1.0 // indirect
go.uber.org/zap v1.9.1
golang.org/x/crypto v0.0.0-20190418161225-b43e412143f9 // indirect
golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c // indirect
golang.org/x/sys v0.0.0-20190508220229-2d0786266e9c // indirect
golang.org/x/text v0.3.2 // indirect
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 // indirect
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 // indirect
go.uber.org/zap v1.10.0
gopkg.in/h2non/gock.v1 v1.0.14
gopkg.in/inf.v0 v0.9.1 // indirect
k8s.io/api v0.0.0-20190620073856-dcce3486da33
k8s.io/apimachinery v0.0.0-20190620073744-d16981aedf33
k8s.io/client-go v11.0.0+incompatible
k8s.io/code-generator v0.0.0-20190620073620-d55040311883
k8s.io/kube-openapi v0.0.0-20190418160015-6b3d3b2d5666 // indirect
k8s.io/utils v0.0.0-20190308190857-21c4ce38f2a7 // indirect
)
replace (
github.com/google/uuid => github.com/google/uuid v1.0.0
golang.org/x/crypto => golang.org/x/crypto v0.0.0-20181025213731-e84da0312774
golang.org/x/net => golang.org/x/net v0.0.0-20190206173232-65e2d4e15006
golang.org/x/sync => golang.org/x/sync v0.0.0-20181108010431-42b317875d0f
golang.org/x/sys => golang.org/x/sys v0.0.0-20190209173611-3b5209105503
golang.org/x/tools => golang.org/x/tools v0.0.0-20190313210603-aa82965741a9
k8s.io/api => k8s.io/api v0.0.0-20190620073856-dcce3486da33
k8s.io/apimachinery => k8s.io/apimachinery v0.0.0-20190620073744-d16981aedf33
k8s.io/client-go => k8s.io/client-go v0.0.0-20190620074045-585a16d2e773
k8s.io/code-generator => k8s.io/code-generator v0.0.0-20190620073620-d55040311883
k8s.io/component-base => k8s.io/component-base v0.0.0-20190620074451-e5083e713460
k8s.io/api v0.17.1
k8s.io/apimachinery v0.17.1
k8s.io/client-go v0.17.1
k8s.io/code-generator v0.17.1
k8s.io/utils v0.0.0-20191114184206-e782cd3c129f
)
replace k8s.io/klog => github.com/stefanprodan/klog v0.0.0-20190418165334-9cbb78b20423

209
go.sum
View File

@@ -1,20 +1,25 @@
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
cloud.google.com/go v0.37.4 h1:glPeL3BQJsbF6aIIYfZizMwc5LTYz250bDMjttbBGAU=
cloud.google.com/go v0.37.4/go.mod h1:NHPJ89PdicEuT9hdPXMROBD91xc5uRDxsMtSB16k7hw=
github.com/Azure/go-autorest v11.1.2+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
cloud.google.com/go v0.38.0 h1:ROfEUZz+Gh5pa62DJWXSaonyu3StP6EA6lPEXPI6mCo=
cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=
github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI=
github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0=
github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA=
github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0=
github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0=
github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc=
github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
github.com/Masterminds/semver v1.4.2 h1:WBLTQ37jOCzSLtXNdoo8bNM8876KhNqOKvrlGITgsTc=
github.com/Masterminds/semver v1.4.2/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y=
github.com/Masterminds/semver/v3 v3.0.3 h1:znjIyLfpXEDQjOIEWh+ehwpTU14UzUPub3c3sm36u14=
github.com/Masterminds/semver/v3 v3.0.3/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs=
github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ=
github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo=
github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI=
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/beorn7/perks v1.0.0 h1:HWo1m869IqiPhD389kmkxeTalrjNbbJTC8LXupb+sl0=
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
@@ -23,13 +28,11 @@ github.com/davecgh/go-spew v0.0.0-20151105211317-5215b55f46b2/go.mod h1:J7Y8YcW2
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/dgrijalva/jwt-go v0.0.0-20160705203006-01aeca54ebda/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM=
github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs=
github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU=
github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I=
github.com/elazarl/goproxy v0.0.0-20170405201442-c4fc26588b6e/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc=
github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
github.com/evanphx/json-patch v4.2.0+incompatible h1:fUDGZCv/7iAN7u0puUVhvKCcsR6vRfwrJatElLBEf0I=
github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=
@@ -38,15 +41,20 @@ github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2H
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0=
github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg=
github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg=
github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc=
github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8=
github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc=
github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo=
github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I=
github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
github.com/gogo/protobuf v0.0.0-20171007142547-342cbe0a0415/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
github.com/gogo/protobuf v1.2.1 h1:/s5zKNz0uPFCZ5hddgPdo2TK2TVrUNMn0OOX8/aZMTE=
github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d h1:3PaI8p3seN09VjbTYC/QWlUZdZ1qS1zGjy7LH2Wt07I=
github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
@@ -58,9 +66,10 @@ github.com/golang/protobuf v0.0.0-20161109072736-4bd1920723d7/go.mod h1:6lQm79b+
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.1 h1:YF8+flBXS5eO826T4nzqPrxfhQThhXl0YzfuUPu4SBg=
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/google/btree v0.0.0-20160524151835-7d79101e329e/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs=
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
@@ -69,17 +78,14 @@ github.com/google/gofuzz v1.0.0 h1:A8PeW59pxE9IoFRqBp37U+mSNaQoZ46F1f0f863XSXw=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
github.com/google/uuid v1.0.0 h1:b4Gk+7WdP/d3HZH8EJsZpvV7EtDOgaZLtnaNGIu1adA=
github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY=
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
github.com/googleapis/gnostic v0.0.0-20170426233943-68f4ded48ba9/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY=
github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY=
github.com/googleapis/gnostic v0.2.0 h1:l6N3VoaVzTncYYW+9yOz2LJJammFZGBO13sqgEhpy9g=
github.com/googleapis/gnostic v0.2.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY=
github.com/gophercloud/gophercloud v0.0.0-20190126172459-c818fa66e4c8/go.mod h1:3WdhXV3rUYy9p6AUW8d94kr+HS62Y4VL9mBnFxsD8q4=
github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg=
github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
github.com/gregjones/httpcache v0.0.0-20170728041850-787624de3eb7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
github.com/gophercloud/gophercloud v0.1.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8=
github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
github.com/h2non/parth v0.0.0-20190131123155-b4df798d6542 h1:2VTzZjLZBgl62/EtslCrtky5vbi9dd7HrQPQIx6wqiw=
github.com/h2non/parth v0.0.0-20190131123155-b4df798d6542/go.mod h1:Ow0tF8D4Kplbc8s8sSb3V2oUCygFHVp8gC3Dn6U4MNI=
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
@@ -93,25 +99,33 @@ github.com/imdario/mergo v0.3.7/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJ
github.com/json-iterator/go v0.0.0-20180612202835-f2b4162afba3/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
github.com/json-iterator/go v1.1.6 h1:MrUvLMLTMxbqFJ9kzlvat/rYZqZnW3u4wkLzWTaFwKs=
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
github.com/json-iterator/go v1.1.8 h1:QiWkFLKq0T7mpzwOTu6BzNDbfTE8OLrYhVKYMLF46Ok=
github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA=
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs=
github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
github.com/mitchellh/hashstructure v1.0.0 h1:ZkRJX1CyOoTkar7p/mLS5TZU4nJ1Rn/F8u9dGS02Q3Y=
github.com/mitchellh/hashstructure v1.0.0/go.mod h1:QjSHrPWS+BGUVBYkbTZWEnOh3G1DutKwClXU/ABz6AQ=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/reflect2 v0.0.0-20180320133207-05fbef0ca5da/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI=
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
@@ -121,18 +135,13 @@ github.com/nbio/st v0.0.0-20140626010706-e9e8d9816f32 h1:W6apQkHrMkS0Muv8G/TipAy
github.com/nbio/st v0.0.0-20140626010706-e9e8d9816f32/go.mod h1:9wM+0iRr9ahx58uYLpLIr5fm8diHn0JbqRycJi6w0Ms=
github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.8.0 h1:VkHVNpR4iVnU8XQR6DBm8BqYjN7CRzw+xKUbVVbbW9w=
github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.10.1 h1:q/mM8GF/n0shIN8SaAZ0V+jnLPzen6WIVZdiwrRlMlo=
github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
github.com/onsi/gomega v1.5.0 h1:izbySO9zDPmjJ8rDjLvkA2zJHIo+HkYXHnf7eN7SSyo=
github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw=
github.com/onsi/gomega v1.7.0 h1:XPnZz8VVBHjVsy1vzJmRwIcSwiUO+JFfrv/xGiigmME=
github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI=
github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU=
github.com/pierrec/lz4 v2.0.5+incompatible h1:2xWsjqPFWcplujydGg4WmhC/6fZqK42wMM8aXeqhl0I=
github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
@@ -140,42 +149,46 @@ github.com/pmezard/go-difflib v0.0.0-20151028094244-d8ed2627bdf0/go.mod h1:iKH77
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829 h1:D+CiwcpGTW6pL6bv6KI3KbyEyCKyS+1JWS2h8PNDnGA=
github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs=
github.com/prometheus/client_golang v1.0.0 h1:vrDKnkGzuGvhNAL56c7DBz29ZL+KxnoR0x7enabFceM=
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90 h1:S/YWwWx/RA8rT8tKFRuGUZhuA90OyIBpPCXkcbwU8DE=
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/common v0.3.0 h1:taZ4h8Tkxv2kNyoSctBvfXEHmBmxrwmIidZTIaHons4=
github.com/prometheus/common v0.3.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/common v0.4.1 h1:K0MGApIoQvMw27RTdJkPbr3JZ7DNbtxQNyi5STVM6Kw=
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.0-20190416084830-8368d24ba045 h1:Raos9GP+3BlCBicScEQ+SjTLpYYac34fZMoeqj9McSM=
github.com/prometheus/procfs v0.0.0-20190416084830-8368d24ba045/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
github.com/prometheus/procfs v0.0.2 h1:6LJUbpNm42llc4HRCuvApCSWB/WfhuNo9K98Q9sNGfs=
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/remyoudompheng/bigfft v0.0.0-20170806203942-52369c62f446/go.mod h1:uYEyJGbgTkfkS4+E/PavXkNJcbFIpEtjt2B0KDQ5+9M=
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk=
github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg=
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/stefanprodan/klog v0.0.0-20190418165334-9cbb78b20423 h1:qTtUiiNM+iq4IXOwHofKW5+jzvkvnNVz0GFRxwukUlY=
github.com/stefanprodan/klog v0.0.0-20190418165334-9cbb78b20423/go.mod h1:TYstY5LQfzxFVm9MiiMg7kZ39sc5cue/6CFoY5KgXn8=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE=
github.com/stretchr/testify v0.0.0-20151208002404-e3a8ff8ce365/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk=
github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
go.uber.org/atomic v1.3.2 h1:2Oa65PReHzfn29GpvgsYwloV9AVFHPDk8tYxt2c2tr4=
go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
go.uber.org/multierr v1.1.0 h1:HoEmRHQPVSqub6w2z2d2EOVs2fjyFRGyofhKuyDq0QI=
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
go.uber.org/zap v1.9.1 h1:XCJQEf3W6eZaVwhRBof6ImoYGJSITeKWsyeh3HFu/5o=
go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
golang.org/x/crypto v0.0.0-20181025213731-e84da0312774 h1:a4tQYYYuK9QdeO/+kEvNYyuR21S+7ve5EANok6hABhI=
golang.org/x/crypto v0.0.0-20181025213731-e84da0312774/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
go.uber.org/zap v1.10.0 h1:ORx85nbTijNz8ljznvCMR1ZBIPKFn3jQrag10X2AsuM=
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586 h1:7KByu05hhLed2MO29w7p1XfZvZ13m8mub3shuVftRs0=
golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190312203227-4b39c73a6495 h1:I6A9Ag9FpEKOjcKrRNjQkPHawoXIhKyTGfvvjFAiiAk=
@@ -185,35 +198,70 @@ golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTk
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
golang.org/x/net v0.0.0-20190206173232-65e2d4e15006 h1:bfLnR+k0tq5Lqt6dflRLcZiz6UaXCMt3vhYJ1l4FQ80=
golang.org/x/net v0.0.0-20190206173232-65e2d4e15006/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20191004110552-13f9640d40b9 h1:rjwSpXsdiK0dV8/Naq3kAw9ymfAeJIyd0upUIElB+lI=
golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421 h1:Wo7BWFiOk0QRFMLYMqJGFMd9CgUAcGx7V+qEg/h5IBI=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190402181905-9f3314589c9a h1:tImsplftrFpALCYumobsd0K86vlAs/eXGFms2txfJfA=
golang.org/x/oauth2 v0.0.0-20190402181905-9f3314589c9a/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 h1:SVwTIAaPC2U/AvvLNZ2a7OVsmBpC8L5BlwK1whH3hm0=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f h1:Bl/8QSvNqXvPGPGXa2z5xUTmV7VDcZyvRZ+QQXkXTZQ=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20170830134202-bb24a47a89ea/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190209173611-3b5209105503 h1:5SvYFrOM3W8Mexn9/oA44Ji7vhXAZQ9hiP+1Q/DMrWg=
golang.org/x/sys v0.0.0-20190209173611-3b5209105503/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456 h1:ng0gs1AKnRRuEMZoTLLlbOd+C17zUDepwGQBb/n+JVg=
golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/time v0.0.0-20161028155119-f51c12702a4d/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 h1:SvFZT6jyqRaOeXpc5h/JSfZenJ2O330aBsf7JfSUXmQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/tools v0.0.0-20190313210603-aa82965741a9 h1:7Pf/N3ln54fsGsAPsSwSfFhxXGKWHMIRUI/T5x1GP90=
golang.org/x/tools v0.0.0-20190313210603-aa82965741a9/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190206041539-40960b6deb8e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190920225731-5eefd052ad72 h1:bw9doJza/SFBEweII/rHQh338oozWyiFsBRHtrflcws=
golang.org/x/tools v0.0.0-20190920225731-5eefd052ad72/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
gonum.org/v1/gonum v0.0.0-20190331200053-3d26580ed485 h1:OB/uP/Puiu5vS5QMRPrXCDWUPb+kt8f1KW8oQzFejQw=
gonum.org/v1/gonum v0.0.0-20190331200053-3d26580ed485/go.mod h1:2ltnJ7xHfj0zHS40VVPYEAAMTa3ZGguvHGBSJeRWqE0=
gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw=
gonum.org/v1/netlib v0.0.0-20190331212654-76723241ea4e h1:jRyg0XfpwWlhEV8mDfdNGBeSJM2fuyh9Yjrnd8kF2Ts=
gonum.org/v1/netlib v0.0.0-20190331212654-76723241ea4e/go.mod h1:kS+toOQn6AQKjmKJ7gzohV1XkqsFehRA2FbsbkopSuQ=
google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk=
google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.4.0 h1:/wp5JvzpHIxhs/dumFmF7BXTf3Z+dd4uXta4kVyO508=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
@@ -221,9 +269,7 @@ google.golang.org/appengine v1.5.0 h1:KxkO13IPW4Lslp2bz+KHP2E3gtFlrIGNThxkZQ3g+4
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190404172233-64821d5d2107 h1:xtNn7qFlagY2mQNFHMSRPjT2RkOV4OXM7P5TVy9xATo=
google.golang.org/genproto v0.0.0-20190404172233-64821d5d2107/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
@@ -233,7 +279,6 @@ gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4=
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
gopkg.in/h2non/gock.v1 v1.0.14 h1:fTeu9fcUvSnLNacYvYI54h+1/XEteDyHvrVCZEEEYNM=
gopkg.in/h2non/gock.v1 v1.0.14/go.mod h1:sX4zAkdYX1TRGJ2JY156cFspQn4yRWn6p9EMdODlynE=
gopkg.in/inf.v0 v0.9.0/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
@@ -241,31 +286,31 @@ gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWD
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
gopkg.in/yaml.v2 v2.2.4 h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I=
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
k8s.io/api v0.0.0-20190620073856-dcce3486da33 h1:aC/EvF9PT1h8NeMEOVwTel8xxbZwq0SZnxXNThEROnE=
k8s.io/api v0.0.0-20190620073856-dcce3486da33/go.mod h1:ldk709UQo/iedNLOW7J06V9QSSGY5heETKeWqnPoqF8=
k8s.io/apimachinery v0.0.0-20190620073744-d16981aedf33 h1:Lkd+QNFOB3DqrDyWo796aodJgFJautn/M+t9IGearPc=
k8s.io/apimachinery v0.0.0-20190620073744-d16981aedf33/go.mod h1:9q5NW/mMno/nwbRZd/Ks2TECgi2PTZ9cwarf4q+ze6Q=
k8s.io/client-go v0.0.0-20190620074045-585a16d2e773 h1:XyjDnwRO9icfyrN7HRSa8o3NqdPOEQoVW8vWizuqyQQ=
k8s.io/client-go v0.0.0-20190620074045-585a16d2e773/go.mod h1:miKCC7C/WGwJqcDctyJtAnP3Gss0Y5KwURqJ7q5pfEw=
k8s.io/code-generator v0.0.0-20190620073620-d55040311883 h1:NWWNvN6IdpmQvZ43rVccCI8GPUrheK8XNdqeKycw0DI=
k8s.io/code-generator v0.0.0-20190620073620-d55040311883/go.mod h1:+a+9g9W0llgbgvx6qOb+VbeZPH5km1FrVyMQe9/jkQY=
k8s.io/gengo v0.0.0-20190116091435-f8a0810f38af/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
k8s.io/api v0.17.1 h1:i46MidoDOE9tvQ0TTEYggf3ka/pziP1+tHI/GFVeJao=
k8s.io/api v0.17.1/go.mod h1:zxiAc5y8Ngn4fmhWUtSxuUlkfz1ixT7j9wESokELzOg=
k8s.io/apimachinery v0.17.1 h1:zUjS3szTxoUjTDYNvdFkYt2uMEXLcthcbp+7uZvWhYM=
k8s.io/apimachinery v0.17.1/go.mod h1:b9qmWdKlLuU9EBh+06BtLcSf/Mu89rWL33naRxs1uZg=
k8s.io/client-go v0.17.1 h1:LbbuZ5tI7OYx4et5DfRFcJuoojvpYO0c7vps2rgJsHY=
k8s.io/client-go v0.17.1/go.mod h1:HZtHJSC/VuSHcETN9QA5QDZky1tXiYrkF/7t7vRpO1A=
k8s.io/code-generator v0.17.1 h1:e3B1UqRzRUWygp7WD+QTRT3ZUahPIaRKF0OFa7duQwI=
k8s.io/code-generator v0.17.1/go.mod h1:DVmfPQgxQENqDIzVR2ddLXMH34qeszkKSdH/N+s+38s=
k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6 h1:4s3/R4+OYYYUKptXPhZKjQ04WJ6EhQQVFdjOFvCazDk=
k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
k8s.io/kube-openapi v0.0.0-20190228160746-b3a7cee44a30/go.mod h1:BXM9ceUBTj2QnfH2MK1odQs778ajze1RxcmP6S8RVVc=
k8s.io/kube-openapi v0.0.0-20190418160015-6b3d3b2d5666 h1:hlzz2EvLPcefAcG/j0tOZpds4LWSElZzxpZuhxbblbc=
k8s.io/kube-openapi v0.0.0-20190418160015-6b3d3b2d5666/go.mod h1:jqYp7BKXW0Jl+F1dWXBieUmcHKMPpGHGWA0uqfpOZZ4=
k8s.io/utils v0.0.0-20190221042446-c2654d5206da/go.mod h1:8k8uAuAQ0rXslZKaEWd0c3oVhZz7sSzSiPnVZayjIX0=
k8s.io/utils v0.0.0-20190308190857-21c4ce38f2a7 h1:8r+l4bNWjRlsFYlQJnKJ2p7s1YQPj4XyXiJVqDHRx7c=
k8s.io/utils v0.0.0-20190308190857-21c4ce38f2a7/go.mod h1:8k8uAuAQ0rXslZKaEWd0c3oVhZz7sSzSiPnVZayjIX0=
k8s.io/gengo v0.0.0-20190822140433-26a664648505 h1:ZY6yclUKVbZ+SdWnkfY+Je5vrMpKOxmGeKRbsXVmqYM=
k8s.io/gengo v0.0.0-20190822140433-26a664648505/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
k8s.io/kube-openapi v0.0.0-20191107075043-30be4d16710a h1:UcxjrRMyNx/i/y8G7kPvLyy7rfbeuf1PYyBf973pgyU=
k8s.io/kube-openapi v0.0.0-20191107075043-30be4d16710a/go.mod h1:1TqjTSzOxsLGIKfj0lK8EeCP7K1iUG65v09OM0/WG5E=
k8s.io/utils v0.0.0-20191114184206-e782cd3c129f h1:GiPwtSzdP43eI1hpPCbROQCCIgCuiMMNF8YUVLF3vJo=
k8s.io/utils v0.0.0-20191114184206-e782cd3c129f/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew=
modernc.org/cc v1.0.0/go.mod h1:1Sk4//wdnYJiUIxnW8ddKpaOJCF37yAdqYnkxUpaYxw=
modernc.org/golex v1.0.0/go.mod h1:b/QX9oBD/LhixY6NDh+IdGv17hgB+51fET1i2kPSmvk=
modernc.org/mathutil v1.0.0/go.mod h1:wU0vUrJsVWBZ4P6e7xtFJEhFSNsfRLJ8H458uRjg03k=
modernc.org/strutil v1.0.0/go.mod h1:lstksw84oURvj9y3tn8lGvRxyRC1S2+g5uuIzNfIOBs=
modernc.org/xc v1.0.0/go.mod h1:mRNCo0bvLjGhHO9WsyuKVU4q0ceiDDDoEeWDJHrNx8I=
sigs.k8s.io/structured-merge-diff v0.0.0-20181214233322-d43a45b8663b/go.mod h1:wWxsB5ozmmv/SG7nM11ayaAW51xMvak/t1r0CSlcokI=
sigs.k8s.io/structured-merge-diff v0.0.0-20190525122527-15d366b2352e/go.mod h1:wWxsB5ozmmv/SG7nM11ayaAW51xMvak/t1r0CSlcokI=
sigs.k8s.io/yaml v1.1.0 h1:4A07+ZFc2wgJwo8YNlQpr1rVlgUDlxXHhPJciaPY5gs=
sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o=

View File

@@ -30,7 +30,7 @@ chmod +x ${CODEGEN_PKG}/generate-groups.sh
${CODEGEN_PKG}/generate-groups.sh all \
github.com/weaveworks/flagger/pkg/client github.com/weaveworks/flagger/pkg/apis \
"flagger:v1alpha3 appmesh:v1beta1 istio:v1alpha3 smi:v1alpha1 gloo:v1" \
"flagger:v1alpha3 appmesh:v1beta1 istio:v1alpha3 smi:v1alpha1 gloo:v1 projectcontour:v1" \
--output-base "${TEMP_DIR}" \
--go-header-file ${SCRIPT_ROOT}/hack/boilerplate.go.txt

View File

@@ -25,7 +25,7 @@ Install Flagger for AWS App Mesh:
kubectl apply -k github.com/weaveworks/flagger//kustomize/appmesh
```
This deploys Flagger and Prometheus (configured to scrape the App Mesh Envoy sidecars) in the `appmesh-system` namespace.
This deploys Flagger in the `appmesh-system` namespace and sets the metrics server URL to App Mesh Prometheus instance.
Install Flagger for Linkerd:
@@ -41,6 +41,14 @@ If you want to install a specific Flagger release, add the version number to the
kubectl apply -k github.com/weaveworks/flagger//kustomize/linkerd?ref=0.18.0
```
Install Flagger for Contour:
```bash
kubectl apply -k github.com/weaveworks/flagger//kustomize/contour
```
This deploys Flagger and Prometheus in the `projectcontour` namespace and sets Prometheus to scrape Contour's Envoy instances.
## Generic installer
Install Flagger and Prometheus:

View File

@@ -1,6 +1,5 @@
namespace: appmesh-system
bases:
- ../base/flagger
- ../base/prometheus
patchesStrategicMerge:
- patch.yaml

View File

@@ -10,7 +10,20 @@ spec:
args:
- -log-level=info
- -mesh-provider=appmesh
- -metrics-server=http://flagger-prometheus:9090
- -metrics-server=http://appmesh-prometheus:9090
- -slack-user=flagger
- -slack-channel=
- -slack-url=
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: flagger
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: flagger
subjects:
- kind: ServiceAccount
name: flagger
namespace: appmesh-system

View File

@@ -115,6 +115,9 @@ spec:
type: object
required: ["port"]
properties:
name:
description: Kubernetes service name
type: string
port:
description: Container port number
type: number
@@ -245,6 +248,8 @@ spec:
- rollout
- confirm-promotion
- post-rollout
- event
- rollback
url:
description: URL address of this webhook
type: string

View File

@@ -20,7 +20,7 @@ spec:
serviceAccountName: flagger
containers:
- name: flagger
image: weaveworks/flagger:0.16.0
image: weaveworks/flagger:0.21.0
imagePullPolicy: IfNotPresent
ports:
- name: http

View File

@@ -8,4 +8,4 @@ resources:
- deployment.yaml
images:
- name: weaveworks/flagger
newTag: 0.20.2
newTag: 0.23.0

View File

@@ -71,6 +71,11 @@ rules:
- virtualservices
- gateways
verbs: ["*"]
- apiGroups:
- projectcontour.io
resources:
- httpproxies
verbs: ["*"]
- nonResourceURLs:
- /version
verbs:

View File

@@ -19,7 +19,7 @@ spec:
serviceAccountName: flagger-prometheus
containers:
- name: prometheus
image: prom/prometheus:v2.12.0
image: prom/prometheus:v2.15.2
imagePullPolicy: IfNotPresent
args:
- '--storage.tsdb.retention=2h'

View File

@@ -12,4 +12,4 @@ configMapGenerator:
- prometheus.yml
images:
- name: prom/prometheus
newTag: v2.10.0
newTag: v2.15.2

View File

@@ -0,0 +1,8 @@
namespace: projectcontour
resources:
- namespace.yaml
bases:
- ../base/flagger/
- ../base/prometheus/
patchesStrategicMerge:
- patch.yaml

View File

@@ -0,0 +1,5 @@
apiVersion: v1
kind: Namespace
metadata:
name: projectcontour

View File

@@ -0,0 +1,16 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: flagger
spec:
template:
spec:
containers:
- name: flagger
args:
- -log-level=info
- -mesh-provider=contour
- -metrics-server=http://flagger-prometheus:9090
- -slack-user=flagger
- -slack-channel=
- -slack-url=

View File

@@ -14,3 +14,17 @@ spec:
- -slack-user=flagger
- -slack-channel=
- -slack-url=
---
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: flagger
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: flagger
subjects:
- kind: ServiceAccount
name: flagger
namespace: istio-system

View File

@@ -14,3 +14,16 @@ spec:
- -slack-user=flagger
- -slack-channel=
- -slack-url=
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: flagger
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: flagger
subjects:
- kind: ServiceAccount
name: flagger
namespace: linkerd

View File

@@ -46,6 +46,9 @@ spec:
- --level=info
- --random-delay=false
- --random-error=false
env:
- name: PODINFO_UI_COLOR
value: "#34577c"
livenessProbe:
exec:
command:

View File

@@ -18,7 +18,7 @@ spec:
spec:
containers:
- name: loadtester
image: weaveworks/flagger-loadtester:0.11.0
image: weaveworks/flagger-loadtester:0.12.1
imagePullPolicy: IfNotPresent
ports:
- name: http

View File

@@ -17,6 +17,7 @@ limitations under the License.
package v1alpha3
import (
"fmt"
"time"
hpav1 "k8s.io/api/autoscaling/v1"
@@ -69,7 +70,7 @@ type CanarySpec struct {
// virtual service spec
Service CanaryService `json:"service"`
// metrics and thresholds
// metrics, thresholds and webhooks spec
CanaryAnalysis CanaryAnalysis `json:"canaryAnalysis"`
// the maximum time in seconds for a canary deployment to make progress
@@ -92,8 +93,9 @@ type CanaryList struct {
}
// CanaryService is used to create ClusterIP services
// and Istio Virtual Service
// and service mesh or ingress routing objects
type CanaryService struct {
Name string `json:"name,omitempty"`
Port int32 `json:"port"`
PortName string `json:"portName,omitempty"`
TargetPort intstr.IntOrString `json:"targetPort,omitempty"`
@@ -126,7 +128,7 @@ type CanaryAnalysis struct {
Iterations int `json:"iterations,omitempty"`
}
// CanaryMetric holds the reference to Istio metrics used for canary analysis
// CanaryMetric holds the reference to metrics used for canary analysis
type CanaryMetric struct {
Name string `json:"name"`
Interval string `json:"interval,omitempty"`
@@ -149,6 +151,10 @@ const (
ConfirmRolloutHook HookType = "confirm-rollout"
// ConfirmPromotionHook halt canary promotion until webhook returns HTTP 200
ConfirmPromotionHook HookType = "confirm-promotion"
// EventHook dispatches Flagger events to the specified endpoint
EventHook HookType = "event"
// RollbackHook rollback canary anaylysis if webhook returns HTTP 200
RollbackHook HookType = "rollback"
)
// CanaryWebhook holds the reference to external checks used for canary analysis
@@ -169,6 +175,17 @@ type CanaryWebhookPayload struct {
Metadata map[string]string `json:"metadata,omitempty"`
}
// GetServiceNames returns the apex, primary and canary Kubernetes service names
func (c *Canary) GetServiceNames() (apexName, primaryName, canaryName string) {
apexName = c.Spec.TargetRef.Name
if c.Spec.Service.Name != "" {
apexName = c.Spec.Service.Name
}
primaryName = fmt.Sprintf("%s-primary", apexName)
canaryName = fmt.Sprintf("%s-canary", apexName)
return
}
// GetProgressDeadlineSeconds returns the progress deadline (default 600s)
func (c *Canary) GetProgressDeadlineSeconds() int {
if c.Spec.ProgressDeadlineSeconds != nil {

View File

@@ -0,0 +1,5 @@
package projectcontour
const (
GroupName = "projectcontour.io"
)

View File

@@ -0,0 +1,5 @@
// +k8s:deepcopy-gen=package
// Package v1 is the v1 version of the API.
// +groupName=projectcontour.io
package v1

View File

@@ -0,0 +1,376 @@
// Copyright © 2019 VMware
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package v1
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// HTTPProxySpec defines the spec of the CRD.
type HTTPProxySpec struct {
// Virtualhost appears at most once. If it is present, the object is considered
// to be a "root".
// +optional
VirtualHost *VirtualHost `json:"virtualhost,omitempty"`
// Routes are the ingress routes. If TCPProxy is present, Routes is ignored.
// +optional
Routes []Route `json:"routes,omitempty"`
// TCPProxy holds TCP proxy information.
// +optional
TCPProxy *TCPProxy `json:"tcpproxy,omitempty"`
// Includes allow for specific routing configuration to be appended to another HTTPProxy in another namespace.
// +optional
Includes []Include `json:"includes,omitempty"`
}
// Include describes a set of policies that can be applied to an HTTPProxy in a namespace.
type Include struct {
// Name of the HTTPProxy
Name string `json:"name"`
// Namespace of the HTTPProxy to include. Defaults to the current namespace if not supplied.
// +optional
Namespace string `json:"namespace,omitempty"`
// Conditions are a set of routing properties that is applied to an HTTPProxy in a namespace.
// +optional
Conditions []Condition `json:"conditions,omitempty"`
}
// Condition are policies that are applied on top of HTTPProxies.
// One of Prefix or Header must be provided.
type Condition struct {
// Prefix defines a prefix match for a request.
// +optional
Prefix string `json:"prefix,omitempty"`
// Header specifies the header condition to match.
// +optional
Header *HeaderCondition `json:"header,omitempty"`
}
// HeaderCondition specifies the header condition to match.
// Name is required. Only one of Present or Contains must
// be provided.
type HeaderCondition struct {
// Name is the name of the header to match on. Name is required.
// Header names are case insensitive.
Name string `json:"name"`
// Present is true if the Header is present in the request.
// +optional
Present bool `json:"present,omitempty"`
// Contains is true if the Header containing this string is present
// in the request.
// +optional
Contains string `json:"contains,omitempty"`
// NotContains is true if the Header containing this string is not present
// in the request.
// +optional
NotContains string `json:"notcontains,omitempty"`
// Exact is true if the Header containing this string matches exactly
// in the request.
// +optional
Exact string `json:"exact,omitempty"`
// NotExact is true if the Header containing this string doesn't match exactly
// in the request.
// +optional
NotExact string `json:"notexact,omitempty"`
}
// VirtualHost appears at most once. If it is present, the object is considered
// to be a "root".
type VirtualHost struct {
// The fully qualified domain name of the root of the ingress tree
// all leaves of the DAG rooted at this object relate to the fqdn
Fqdn string `json:"fqdn"`
// If present describes tls properties. The CNI names that will be matched on
// are described in fqdn, the tls.secretName secret must contain a
// matching certificate
// +optional
TLS *TLS `json:"tls,omitempty"`
}
// TLS describes tls properties. The CNI names that will be matched on
// are described in fqdn, the tls.secretName secret must contain a
// matching certificate unless tls.passthrough is set to true.
type TLS struct {
// required, the name of a secret in the current namespace
SecretName string `json:"secretName,omitempty"`
// Minimum TLS version this vhost should negotiate
// +optional
MinimumProtocolVersion string `json:"minimumProtocolVersion,omitempty"`
// If Passthrough is set to true, the SecretName will be ignored
// and the encrypted handshake will be passed through to the
// backing cluster.
// +optional
Passthrough bool `json:"passthrough,omitempty"`
}
// Route contains the set of routes for a virtual host.
type Route struct {
// Conditions are a set of routing properties that is applied to an HTTPProxy in a namespace.
// +optional
Conditions []Condition `json:"conditions,omitempty"`
// Services are the services to proxy traffic.
Services []Service `json:"services,omitempty"`
// Enables websocket support for the route.
// +optional
EnableWebsockets bool `json:"enableWebsockets,omitempty"`
// Allow this path to respond to insecure requests over HTTP which are normally
// not permitted when a `virtualhost.tls` block is present.
// +optional
PermitInsecure bool `json:"permitInsecure,omitempty"`
// The timeout policy for this route.
// +optional
TimeoutPolicy *TimeoutPolicy `json:"timeoutPolicy,omitempty"`
// The retry policy for this route.
// +optional
RetryPolicy *RetryPolicy `json:"retryPolicy,omitempty"`
// The health check policy for this route.
// +optional
HealthCheckPolicy *HTTPHealthCheckPolicy `json:"healthCheckPolicy,omitempty"`
// The load balancing policy for this route.
// +optional
LoadBalancerPolicy *LoadBalancerPolicy `json:"loadBalancerPolicy,omitempty"`
// The policy for rewriting the path of the request URL
// after the request has been routed to a Service.
//
// +optional
PathRewritePolicy *PathRewritePolicy `json:"pathRewritePolicy,omitempty"`
// The policy for managing request headers during proxying
// +optional
RequestHeadersPolicy *HeadersPolicy `json:"requestHeadersPolicy,omitempty"`
// The policy for managing response headers during proxying
// +optional
ResponseHeadersPolicy *HeadersPolicy `json:"responseHeadersPolicy,omitempty"`
}
func (r *Route) GetPrefixReplacements() []ReplacePrefix {
if r.PathRewritePolicy != nil {
return r.PathRewritePolicy.ReplacePrefix
}
return nil
}
// TCPProxy contains the set of services to proxy TCP connections.
type TCPProxy struct {
// The load balancing policy for the backend services.
// +optional
LoadBalancerPolicy *LoadBalancerPolicy `json:"loadBalancerPolicy,omitempty"`
// Services are the services to proxy traffic
Services []Service `json:"services,omitempty"`
// Include specifies that this tcpproxy should be delegated to another HTTPProxy.
// +optional
Include *TCPProxyInclude `json:"includes,omitempty"`
}
// TCPProxyInclude describes a target HTTPProxy document which contains the TCPProxy details.
type TCPProxyInclude struct {
// Name of the child HTTPProxy
Name string `json:"name"`
// Namespace of the HTTPProxy to include. Defaults to the current namespace if not supplied.
// +optional
Namespace string `json:"namespace,omitempty"`
}
// Service defines an Kubernetes Service to proxy traffic.
type Service struct {
// Name is the name of Kubernetes service to proxy traffic.
// Names defined here will be used to look up corresponding endpoints which contain the ips to route.
Name string `json:"name"`
// Port (defined as Integer) to proxy traffic to since a service can have multiple defined.
Port int `json:"port"`
// Protocol may be used to specify (or override) the protocol used to reach this Service.
// Values may be tls, h2, h2c. It ommitted protocol-selection falls back on Service annotations.
// +optional
Protocol *string `json:"protocol,omitempty"`
// Weight defines percentage of traffic to balance traffic
// +optional
Weight uint32 `json:"weight,omitempty"`
// UpstreamValidation defines how to verify the backend service's certificate
// +optional
UpstreamValidation *UpstreamValidation `json:"validation,omitempty"`
// If Mirror is true the Service will receive a read only mirror of the traffic for this route.
Mirror bool `json:"mirror,omitempty"`
// The policy for managing request headers during proxying
// +optional
RequestHeadersPolicy *HeadersPolicy `json:"requestHeadersPolicy,omitempty"`
// The policy for managing response headers during proxying
// +optional
ResponseHeadersPolicy *HeadersPolicy `json:"responseHeadersPolicy,omitempty"`
}
// HTTPHealthCheckPolicy defines health checks on the upstream service.
type HTTPHealthCheckPolicy struct {
// HTTP endpoint used to perform health checks on upstream service
Path string `json:"path"`
// The value of the host header in the HTTP health check request.
// If left empty (default value), the name "contour-envoy-healthcheck"
// will be used.
Host string `json:"host,omitempty"`
// The interval (seconds) between health checks
// +optional
IntervalSeconds int64 `json:"intervalSeconds"`
// The time to wait (seconds) for a health check response
// +optional
TimeoutSeconds int64 `json:"timeoutSeconds"`
// The number of unhealthy health checks required before a host is marked unhealthy
// +optional
UnhealthyThresholdCount uint32 `json:"unhealthyThresholdCount"`
// The number of healthy health checks required before a host is marked healthy
// +optional
HealthyThresholdCount uint32 `json:"healthyThresholdCount"`
}
// TimeoutPolicy defines the attributes associated with timeout.
type TimeoutPolicy struct {
// TimeoutPolicy durations are expressed as per the format specified in the ParseDuration documentation: https://godoc.org/time#ParseDuration
// Example input values: "300ms", "5s", "1m". Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h".
// The string 'infinity' is also a valid input and specifies no timeout.
// Timeout for receiving a response from the server after processing a request from client.
// If not supplied the timeout duration is undefined.
// +optional
Response string `json:"response,omitempty"`
// Timeout after which if there are no active requests for this route, the connection between
// Envoy and the backend will be closed. If not specified, there is no per-route idle timeout.
// +optional
Idle string `json:"idle,omitempty"`
}
// RetryPolicy defines the attributes associated with retrying policy.
type RetryPolicy struct {
// NumRetries is maximum allowed number of retries.
// If not supplied, the number of retries is one.
// +optional
NumRetries uint32 `json:"count"`
// PerTryTimeout specifies the timeout per retry attempt.
// Ignored if NumRetries is not supplied.
PerTryTimeout string `json:"perTryTimeout,omitempty"`
}
// ReplacePrefix describes a path prefix replacement.
type ReplacePrefix struct {
// Prefix specifies the URL path prefix to be replaced.
//
// If Prefix is specified, it must exactly match the Condition
// prefix that is rendered by the chain of including HTTPProxies
// and only that path prefix will be replaced by Replacement.
// This allows HTTPProxies that are included through multiple
// roots to only replace specific path prefixes, leaving others
// unmodified.
//
// If Prefix is not specified, all routing prefixes rendered
// by the include chain will be replaced.
//
// +optional
// +kubebuilder:validation:MinLength=1
Prefix string `json:"prefix,omitempty"`
// Replacement is the string that the routing path prefix
// will be replaced with. This must not be empty.
//
// +kubebuilder:validation:Required
// +kubebuilder:validation:MinLength=1
Replacement string `json:"replacement"`
}
// PathRewritePolicy specifies how a request URL path should be
// rewritten. This rewriting takes place after a request is routed
// and has no subsequent effects on the proxy's routing decision.
// No HTTP headers or body content is rewritten.
//
// Exactly one field in this struct may be specified.
type PathRewritePolicy struct {
// ReplacePrefix describes how the path prefix should be replaced.
// +optional
ReplacePrefix []ReplacePrefix `json:"replacePrefix,omitempty"`
}
// LoadBalancerPolicy defines the load balancing policy.
type LoadBalancerPolicy struct {
Strategy string `json:"strategy,omitempty"`
}
// HeadersPolicy defines how headers are managed during forwarding
type HeadersPolicy struct {
// Set specifies a list of HTTP header values that will be set in the HTTP header
// +optional
Set []HeaderValue `json:"set,omitempty"`
// Remove specifies a list of HTTP header names to remove
// +optional
Remove []string `json:"remove,omitempty"`
}
// HeaderValue represents a header name/value pair
type HeaderValue struct {
// Name represents a key of a header
// +kubebuilder:validation:Required
// +kubebuilder:validation:MinLength=1
Name string `json:"name"`
// Value represents the value of a header specified by a key
// +kubebuilder:validation:Required
// +kubebuilder:validation:MinLength=1
Value string `json:"value"`
}
// UpstreamValidation defines how to verify the backend service's certificate
type UpstreamValidation struct {
// Name of the Kubernetes secret be used to validate the certificate presented by the backend
CACertificate string `json:"caSecret"`
// Key which is expected to be present in the 'subjectAltName' of the presented certificate
SubjectName string `json:"subjectName"`
}
// Status reports the current state of the HTTPProxy.
type Status struct {
// +optional
CurrentStatus string `json:"currentStatus,omitempty"`
// +optional
Description string `json:"description,omitempty"`
}
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// HTTPProxy is an Ingress CRD specification
// +k8s:openapi-gen=true
// +kubebuilder:printcolumn:name="FQDN",type="string",JSONPath=".spec.virtualhost.fqdn",description="Fully qualified domain name"
// +kubebuilder:printcolumn:name="TLS Secret",type="string",JSONPath=".spec.virtualhost.tls.secretName",description="Secret with TLS credentials"
// +kubebuilder:printcolumn:name="Status",type="string",JSONPath=".status.currentStatus",description="The current status of the HTTPProxy"
// +kubebuilder:printcolumn:name="Status Description",type="string",JSONPath=".status.description",description="Description of the current status"
// +kubebuilder:resource:scope=Namespaced,path=httpproxies,shortName=proxy;proxies,singular=httpproxy
type HTTPProxy struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata"`
Spec HTTPProxySpec `json:"spec"`
// +optional
Status Status `json:"status,omitempty"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// HTTPProxyList is a list of HTTPProxies.
type HTTPProxyList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata"`
Items []HTTPProxy `json:"items"`
}

View File

@@ -0,0 +1,37 @@
package v1
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"github.com/weaveworks/flagger/pkg/apis/projectcontour"
)
// SchemeGroupVersion is the GroupVersion for the Contour API
var SchemeGroupVersion = schema.GroupVersion{Group: projectcontour.GroupName, Version: "v1"}
// Kind takes an unqualified kind and returns back a Group qualified GroupKind
func Kind(kind string) schema.GroupKind {
return SchemeGroupVersion.WithKind(kind).GroupKind()
}
// Resource gets an Contour GroupResource for a specified resource
func Resource(resource string) schema.GroupResource {
return SchemeGroupVersion.WithResource(resource).GroupResource()
}
var (
SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes)
AddToScheme = SchemeBuilder.AddToScheme
)
// Adds the list of known types to Scheme.
func addKnownTypes(scheme *runtime.Scheme) error {
scheme.AddKnownTypes(SchemeGroupVersion,
&HTTPProxy{},
&HTTPProxyList{},
)
metav1.AddToGroupVersion(scheme, SchemeGroupVersion)
return nil
}

View File

@@ -0,0 +1,548 @@
// +build !ignore_autogenerated
/*
Copyright The Flagger Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by deepcopy-gen. DO NOT EDIT.
package v1
import (
runtime "k8s.io/apimachinery/pkg/runtime"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Condition) DeepCopyInto(out *Condition) {
*out = *in
if in.Header != nil {
in, out := &in.Header, &out.Header
*out = new(HeaderCondition)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Condition.
func (in *Condition) DeepCopy() *Condition {
if in == nil {
return nil
}
out := new(Condition)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *HTTPHealthCheckPolicy) DeepCopyInto(out *HTTPHealthCheckPolicy) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPHealthCheckPolicy.
func (in *HTTPHealthCheckPolicy) DeepCopy() *HTTPHealthCheckPolicy {
if in == nil {
return nil
}
out := new(HTTPHealthCheckPolicy)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *HTTPProxy) DeepCopyInto(out *HTTPProxy) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
out.Status = in.Status
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPProxy.
func (in *HTTPProxy) DeepCopy() *HTTPProxy {
if in == nil {
return nil
}
out := new(HTTPProxy)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *HTTPProxy) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *HTTPProxyList) DeepCopyInto(out *HTTPProxyList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]HTTPProxy, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPProxyList.
func (in *HTTPProxyList) DeepCopy() *HTTPProxyList {
if in == nil {
return nil
}
out := new(HTTPProxyList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *HTTPProxyList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *HTTPProxySpec) DeepCopyInto(out *HTTPProxySpec) {
*out = *in
if in.VirtualHost != nil {
in, out := &in.VirtualHost, &out.VirtualHost
*out = new(VirtualHost)
(*in).DeepCopyInto(*out)
}
if in.Routes != nil {
in, out := &in.Routes, &out.Routes
*out = make([]Route, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.TCPProxy != nil {
in, out := &in.TCPProxy, &out.TCPProxy
*out = new(TCPProxy)
(*in).DeepCopyInto(*out)
}
if in.Includes != nil {
in, out := &in.Includes, &out.Includes
*out = make([]Include, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPProxySpec.
func (in *HTTPProxySpec) DeepCopy() *HTTPProxySpec {
if in == nil {
return nil
}
out := new(HTTPProxySpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *HeaderCondition) DeepCopyInto(out *HeaderCondition) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HeaderCondition.
func (in *HeaderCondition) DeepCopy() *HeaderCondition {
if in == nil {
return nil
}
out := new(HeaderCondition)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *HeaderValue) DeepCopyInto(out *HeaderValue) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HeaderValue.
func (in *HeaderValue) DeepCopy() *HeaderValue {
if in == nil {
return nil
}
out := new(HeaderValue)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *HeadersPolicy) DeepCopyInto(out *HeadersPolicy) {
*out = *in
if in.Set != nil {
in, out := &in.Set, &out.Set
*out = make([]HeaderValue, len(*in))
copy(*out, *in)
}
if in.Remove != nil {
in, out := &in.Remove, &out.Remove
*out = make([]string, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HeadersPolicy.
func (in *HeadersPolicy) DeepCopy() *HeadersPolicy {
if in == nil {
return nil
}
out := new(HeadersPolicy)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Include) DeepCopyInto(out *Include) {
*out = *in
if in.Conditions != nil {
in, out := &in.Conditions, &out.Conditions
*out = make([]Condition, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Include.
func (in *Include) DeepCopy() *Include {
if in == nil {
return nil
}
out := new(Include)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *LoadBalancerPolicy) DeepCopyInto(out *LoadBalancerPolicy) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoadBalancerPolicy.
func (in *LoadBalancerPolicy) DeepCopy() *LoadBalancerPolicy {
if in == nil {
return nil
}
out := new(LoadBalancerPolicy)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PathRewritePolicy) DeepCopyInto(out *PathRewritePolicy) {
*out = *in
if in.ReplacePrefix != nil {
in, out := &in.ReplacePrefix, &out.ReplacePrefix
*out = make([]ReplacePrefix, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PathRewritePolicy.
func (in *PathRewritePolicy) DeepCopy() *PathRewritePolicy {
if in == nil {
return nil
}
out := new(PathRewritePolicy)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ReplacePrefix) DeepCopyInto(out *ReplacePrefix) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplacePrefix.
func (in *ReplacePrefix) DeepCopy() *ReplacePrefix {
if in == nil {
return nil
}
out := new(ReplacePrefix)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *RetryPolicy) DeepCopyInto(out *RetryPolicy) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RetryPolicy.
func (in *RetryPolicy) DeepCopy() *RetryPolicy {
if in == nil {
return nil
}
out := new(RetryPolicy)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Route) DeepCopyInto(out *Route) {
*out = *in
if in.Conditions != nil {
in, out := &in.Conditions, &out.Conditions
*out = make([]Condition, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.Services != nil {
in, out := &in.Services, &out.Services
*out = make([]Service, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.TimeoutPolicy != nil {
in, out := &in.TimeoutPolicy, &out.TimeoutPolicy
*out = new(TimeoutPolicy)
**out = **in
}
if in.RetryPolicy != nil {
in, out := &in.RetryPolicy, &out.RetryPolicy
*out = new(RetryPolicy)
**out = **in
}
if in.HealthCheckPolicy != nil {
in, out := &in.HealthCheckPolicy, &out.HealthCheckPolicy
*out = new(HTTPHealthCheckPolicy)
**out = **in
}
if in.LoadBalancerPolicy != nil {
in, out := &in.LoadBalancerPolicy, &out.LoadBalancerPolicy
*out = new(LoadBalancerPolicy)
**out = **in
}
if in.PathRewritePolicy != nil {
in, out := &in.PathRewritePolicy, &out.PathRewritePolicy
*out = new(PathRewritePolicy)
(*in).DeepCopyInto(*out)
}
if in.RequestHeadersPolicy != nil {
in, out := &in.RequestHeadersPolicy, &out.RequestHeadersPolicy
*out = new(HeadersPolicy)
(*in).DeepCopyInto(*out)
}
if in.ResponseHeadersPolicy != nil {
in, out := &in.ResponseHeadersPolicy, &out.ResponseHeadersPolicy
*out = new(HeadersPolicy)
(*in).DeepCopyInto(*out)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Route.
func (in *Route) DeepCopy() *Route {
if in == nil {
return nil
}
out := new(Route)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Service) DeepCopyInto(out *Service) {
*out = *in
if in.Protocol != nil {
in, out := &in.Protocol, &out.Protocol
*out = new(string)
**out = **in
}
if in.UpstreamValidation != nil {
in, out := &in.UpstreamValidation, &out.UpstreamValidation
*out = new(UpstreamValidation)
**out = **in
}
if in.RequestHeadersPolicy != nil {
in, out := &in.RequestHeadersPolicy, &out.RequestHeadersPolicy
*out = new(HeadersPolicy)
(*in).DeepCopyInto(*out)
}
if in.ResponseHeadersPolicy != nil {
in, out := &in.ResponseHeadersPolicy, &out.ResponseHeadersPolicy
*out = new(HeadersPolicy)
(*in).DeepCopyInto(*out)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Service.
func (in *Service) DeepCopy() *Service {
if in == nil {
return nil
}
out := new(Service)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Status) DeepCopyInto(out *Status) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Status.
func (in *Status) DeepCopy() *Status {
if in == nil {
return nil
}
out := new(Status)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *TCPProxy) DeepCopyInto(out *TCPProxy) {
*out = *in
if in.LoadBalancerPolicy != nil {
in, out := &in.LoadBalancerPolicy, &out.LoadBalancerPolicy
*out = new(LoadBalancerPolicy)
**out = **in
}
if in.Services != nil {
in, out := &in.Services, &out.Services
*out = make([]Service, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.Include != nil {
in, out := &in.Include, &out.Include
*out = new(TCPProxyInclude)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TCPProxy.
func (in *TCPProxy) DeepCopy() *TCPProxy {
if in == nil {
return nil
}
out := new(TCPProxy)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *TCPProxyInclude) DeepCopyInto(out *TCPProxyInclude) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TCPProxyInclude.
func (in *TCPProxyInclude) DeepCopy() *TCPProxyInclude {
if in == nil {
return nil
}
out := new(TCPProxyInclude)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *TLS) DeepCopyInto(out *TLS) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TLS.
func (in *TLS) DeepCopy() *TLS {
if in == nil {
return nil
}
out := new(TLS)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *TimeoutPolicy) DeepCopyInto(out *TimeoutPolicy) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TimeoutPolicy.
func (in *TimeoutPolicy) DeepCopy() *TimeoutPolicy {
if in == nil {
return nil
}
out := new(TimeoutPolicy)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *UpstreamValidation) DeepCopyInto(out *UpstreamValidation) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UpstreamValidation.
func (in *UpstreamValidation) DeepCopy() *UpstreamValidation {
if in == nil {
return nil
}
out := new(UpstreamValidation)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *VirtualHost) DeepCopyInto(out *VirtualHost) {
*out = *in
if in.TLS != nil {
in, out := &in.TLS, &out.TLS
*out = new(TLS)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualHost.
func (in *VirtualHost) DeepCopy() *VirtualHost {
if in == nil {
return nil
}
out := new(VirtualHost)
in.DeepCopyInto(out)
return out
}

22
pkg/canary/controller.go Normal file
View File

@@ -0,0 +1,22 @@
package canary
import (
flaggerv1 "github.com/weaveworks/flagger/pkg/apis/flagger/v1alpha3"
)
type Controller interface {
IsPrimaryReady(canary *flaggerv1.Canary) (bool, error)
IsCanaryReady(canary *flaggerv1.Canary) (bool, error)
GetMetadata(canary *flaggerv1.Canary) (string, map[string]int32, error)
SyncStatus(canary *flaggerv1.Canary, status flaggerv1.CanaryStatus) error
SetStatusFailedChecks(canary *flaggerv1.Canary, val int) error
SetStatusWeight(canary *flaggerv1.Canary, val int) error
SetStatusIterations(canary *flaggerv1.Canary, val int) error
SetStatusPhase(canary *flaggerv1.Canary, phase flaggerv1.CanaryPhase) error
Initialize(canary *flaggerv1.Canary, skipLivenessChecks bool) error
Promote(canary *flaggerv1.Canary) error
HasTargetChanged(canary *flaggerv1.Canary) (bool, error)
HaveDependenciesChanged(canary *flaggerv1.Canary) (bool, error)
Scale(canary *flaggerv1.Canary, replicas int32) error
ScaleFromZero(canary *flaggerv1.Canary) error
}

View File

@@ -6,7 +6,6 @@ import (
"io"
"github.com/google/go-cmp/cmp"
"github.com/mitchellh/hashstructure"
"go.uber.org/zap"
appsv1 "k8s.io/api/apps/v1"
hpav1 "k8s.io/api/autoscaling/v2beta1"
@@ -21,52 +20,53 @@ import (
clientset "github.com/weaveworks/flagger/pkg/client/clientset/versioned"
)
// Deployer is managing the operations for Kubernetes deployment kind
type Deployer struct {
KubeClient kubernetes.Interface
FlaggerClient clientset.Interface
Logger *zap.SugaredLogger
ConfigTracker ConfigTracker
Labels []string
// DeploymentController is managing the operations for Kubernetes Deployment kind
type DeploymentController struct {
kubeClient kubernetes.Interface
flaggerClient clientset.Interface
logger *zap.SugaredLogger
configTracker ConfigTracker
labels []string
}
// Initialize creates the primary deployment, hpa,
// scales to zero the canary deployment and returns the pod selector label and container ports
func (c *Deployer) Initialize(cd *flaggerv1.Canary, skipLivenessChecks bool) (label string, ports map[string]int32, err error) {
func (c *DeploymentController) Initialize(cd *flaggerv1.Canary, skipLivenessChecks bool) (err error) {
primaryName := fmt.Sprintf("%s-primary", cd.Spec.TargetRef.Name)
label, ports, err = c.createPrimaryDeployment(cd)
err = c.createPrimaryDeployment(cd)
if err != nil {
return "", ports, fmt.Errorf("creating deployment %s.%s failed: %v", primaryName, cd.Namespace, err)
return fmt.Errorf("creating deployment %s.%s failed: %v", primaryName, cd.Namespace, err)
}
if cd.Status.Phase == "" || cd.Status.Phase == flaggerv1.CanaryPhaseInitializing {
if !skipLivenessChecks {
if !skipLivenessChecks && !cd.Spec.SkipAnalysis {
_, readyErr := c.IsPrimaryReady(cd)
if readyErr != nil {
return "", ports, readyErr
return readyErr
}
}
c.Logger.With("canary", fmt.Sprintf("%s.%s", cd.Name, cd.Namespace)).Infof("Scaling down %s.%s", cd.Spec.TargetRef.Name, cd.Namespace)
c.logger.With("canary", fmt.Sprintf("%s.%s", cd.Name, cd.Namespace)).Infof("Scaling down %s.%s", cd.Spec.TargetRef.Name, cd.Namespace)
if err := c.Scale(cd, 0); err != nil {
return "", ports, err
return err
}
}
if cd.Spec.AutoscalerRef != nil && cd.Spec.AutoscalerRef.Kind == "HorizontalPodAutoscaler" {
if err := c.reconcilePrimaryHpa(cd, true); err != nil {
return "", ports, fmt.Errorf("creating HorizontalPodAutoscaler %s.%s failed: %v", primaryName, cd.Namespace, err)
return fmt.Errorf("creating HorizontalPodAutoscaler %s.%s failed: %v", primaryName, cd.Namespace, err)
}
}
return label, ports, nil
return nil
}
// Promote copies the pod spec, secrets and config maps from canary to primary
func (c *Deployer) Promote(cd *flaggerv1.Canary) error {
func (c *DeploymentController) Promote(cd *flaggerv1.Canary) error {
targetName := cd.Spec.TargetRef.Name
primaryName := fmt.Sprintf("%s-primary", targetName)
canary, err := c.KubeClient.AppsV1().Deployments(cd.Namespace).Get(targetName, metav1.GetOptions{})
canary, err := c.kubeClient.AppsV1().Deployments(cd.Namespace).Get(targetName, metav1.GetOptions{})
if err != nil {
if errors.IsNotFound(err) {
return fmt.Errorf("deployment %s.%s not found", targetName, cd.Namespace)
@@ -80,7 +80,7 @@ func (c *Deployer) Promote(cd *flaggerv1.Canary) error {
targetName, cd.Namespace, targetName)
}
primary, err := c.KubeClient.AppsV1().Deployments(cd.Namespace).Get(primaryName, metav1.GetOptions{})
primary, err := c.kubeClient.AppsV1().Deployments(cd.Namespace).Get(primaryName, metav1.GetOptions{})
if err != nil {
if errors.IsNotFound(err) {
return fmt.Errorf("deployment %s.%s not found", primaryName, cd.Namespace)
@@ -89,11 +89,11 @@ func (c *Deployer) Promote(cd *flaggerv1.Canary) error {
}
// promote secrets and config maps
configRefs, err := c.ConfigTracker.GetTargetConfigs(cd)
configRefs, err := c.configTracker.GetTargetConfigs(cd)
if err != nil {
return err
}
if err := c.ConfigTracker.CreatePrimaryConfigs(cd, configRefs); err != nil {
if err := c.configTracker.CreatePrimaryConfigs(cd, configRefs); err != nil {
return err
}
@@ -104,7 +104,7 @@ func (c *Deployer) Promote(cd *flaggerv1.Canary) error {
primaryCopy.Spec.Strategy = canary.Spec.Strategy
// update spec with primary secrets and config maps
primaryCopy.Spec.Template.Spec = c.ConfigTracker.ApplyPrimaryConfigs(canary.Spec.Template.Spec, configRefs)
primaryCopy.Spec.Template.Spec = c.configTracker.ApplyPrimaryConfigs(canary.Spec.Template.Spec, configRefs)
// update pod annotations to ensure a rolling update
annotations, err := c.makeAnnotations(canary.Spec.Template.Annotations)
@@ -116,7 +116,7 @@ func (c *Deployer) Promote(cd *flaggerv1.Canary) error {
primaryCopy.Spec.Template.Labels = makePrimaryLabels(canary.Spec.Template.Labels, primaryName, label)
// apply update
_, err = c.KubeClient.AppsV1().Deployments(cd.Namespace).Update(primaryCopy)
_, err = c.kubeClient.AppsV1().Deployments(cd.Namespace).Update(primaryCopy)
if err != nil {
return fmt.Errorf("updating deployment %s.%s template spec failed: %v",
primaryCopy.GetName(), primaryCopy.Namespace, err)
@@ -132,10 +132,10 @@ func (c *Deployer) Promote(cd *flaggerv1.Canary) error {
return nil
}
// HasDeploymentChanged returns true if the canary deployment pod spec has changed
func (c *Deployer) HasDeploymentChanged(cd *flaggerv1.Canary) (bool, error) {
// HasTargetChanged returns true if the canary deployment pod spec has changed
func (c *DeploymentController) HasTargetChanged(cd *flaggerv1.Canary) (bool, error) {
targetName := cd.Spec.TargetRef.Name
canary, err := c.KubeClient.AppsV1().Deployments(cd.Namespace).Get(targetName, metav1.GetOptions{})
canary, err := c.kubeClient.AppsV1().Deployments(cd.Namespace).Get(targetName, metav1.GetOptions{})
if err != nil {
if errors.IsNotFound(err) {
return false, fmt.Errorf("deployment %s.%s not found", targetName, cd.Namespace)
@@ -143,31 +143,13 @@ func (c *Deployer) HasDeploymentChanged(cd *flaggerv1.Canary) (bool, error) {
return false, fmt.Errorf("deployment %s.%s query error %v", targetName, cd.Namespace, err)
}
if cd.Status.LastAppliedSpec == "" {
return true, nil
}
newHash, err := hashstructure.Hash(canary.Spec.Template, nil)
if err != nil {
return false, fmt.Errorf("hash error %v", err)
}
// do not trigger a canary deployment on manual rollback
if cd.Status.LastPromotedSpec == fmt.Sprintf("%d", newHash) {
return false, nil
}
if cd.Status.LastAppliedSpec != fmt.Sprintf("%d", newHash) {
return true, nil
}
return false, nil
return hasSpecChanged(cd, canary.Spec.Template)
}
// Scale sets the canary deployment replicas
func (c *Deployer) Scale(cd *flaggerv1.Canary, replicas int32) error {
func (c *DeploymentController) Scale(cd *flaggerv1.Canary, replicas int32) error {
targetName := cd.Spec.TargetRef.Name
dep, err := c.KubeClient.AppsV1().Deployments(cd.Namespace).Get(targetName, metav1.GetOptions{})
dep, err := c.kubeClient.AppsV1().Deployments(cd.Namespace).Get(targetName, metav1.GetOptions{})
if err != nil {
if errors.IsNotFound(err) {
return fmt.Errorf("deployment %s.%s not found", targetName, cd.Namespace)
@@ -178,16 +160,16 @@ func (c *Deployer) Scale(cd *flaggerv1.Canary, replicas int32) error {
depCopy := dep.DeepCopy()
depCopy.Spec.Replicas = int32p(replicas)
_, err = c.KubeClient.AppsV1().Deployments(dep.Namespace).Update(depCopy)
_, err = c.kubeClient.AppsV1().Deployments(dep.Namespace).Update(depCopy)
if err != nil {
return fmt.Errorf("scaling %s.%s to %v failed: %v", depCopy.GetName(), depCopy.Namespace, replicas, err)
}
return nil
}
func (c *Deployer) ScaleUp(cd *flaggerv1.Canary) error {
func (c *DeploymentController) ScaleFromZero(cd *flaggerv1.Canary) error {
targetName := cd.Spec.TargetRef.Name
dep, err := c.KubeClient.AppsV1().Deployments(cd.Namespace).Get(targetName, metav1.GetOptions{})
dep, err := c.kubeClient.AppsV1().Deployments(cd.Namespace).Get(targetName, metav1.GetOptions{})
if err != nil {
if errors.IsNotFound(err) {
return fmt.Errorf("deployment %s.%s not found", targetName, cd.Namespace)
@@ -202,18 +184,18 @@ func (c *Deployer) ScaleUp(cd *flaggerv1.Canary) error {
depCopy := dep.DeepCopy()
depCopy.Spec.Replicas = replicas
_, err = c.KubeClient.AppsV1().Deployments(dep.Namespace).Update(depCopy)
_, err = c.kubeClient.AppsV1().Deployments(dep.Namespace).Update(depCopy)
if err != nil {
return fmt.Errorf("scaling %s.%s to %v failed: %v", depCopy.GetName(), depCopy.Namespace, replicas, err)
}
return nil
}
func (c *Deployer) createPrimaryDeployment(cd *flaggerv1.Canary) (string, map[string]int32, error) {
// GetMetadata returns the pod label selector and svc ports
func (c *DeploymentController) GetMetadata(cd *flaggerv1.Canary) (string, map[string]int32, error) {
targetName := cd.Spec.TargetRef.Name
primaryName := fmt.Sprintf("%s-primary", cd.Spec.TargetRef.Name)
canaryDep, err := c.KubeClient.AppsV1().Deployments(cd.Namespace).Get(targetName, metav1.GetOptions{})
canaryDep, err := c.kubeClient.AppsV1().Deployments(cd.Namespace).Get(targetName, metav1.GetOptions{})
if err != nil {
if errors.IsNotFound(err) {
return "", nil, fmt.Errorf("deployment %s.%s not found, retrying", targetName, cd.Namespace)
@@ -236,19 +218,39 @@ func (c *Deployer) createPrimaryDeployment(cd *flaggerv1.Canary) (string, map[st
ports = p
}
primaryDep, err := c.KubeClient.AppsV1().Deployments(cd.Namespace).Get(primaryName, metav1.GetOptions{})
return label, ports, nil
}
func (c *DeploymentController) createPrimaryDeployment(cd *flaggerv1.Canary) error {
targetName := cd.Spec.TargetRef.Name
primaryName := fmt.Sprintf("%s-primary", cd.Spec.TargetRef.Name)
canaryDep, err := c.kubeClient.AppsV1().Deployments(cd.Namespace).Get(targetName, metav1.GetOptions{})
if err != nil {
if errors.IsNotFound(err) {
return fmt.Errorf("deployment %s.%s not found, retrying", targetName, cd.Namespace)
}
return err
}
label, err := c.getSelectorLabel(canaryDep)
if err != nil {
return fmt.Errorf("invalid label selector! Deployment %s.%s spec.selector.matchLabels must contain selector 'app: %s'",
targetName, cd.Namespace, targetName)
}
primaryDep, err := c.kubeClient.AppsV1().Deployments(cd.Namespace).Get(primaryName, metav1.GetOptions{})
if errors.IsNotFound(err) {
// create primary secrets and config maps
configRefs, err := c.ConfigTracker.GetTargetConfigs(cd)
configRefs, err := c.configTracker.GetTargetConfigs(cd)
if err != nil {
return "", nil, err
return err
}
if err := c.ConfigTracker.CreatePrimaryConfigs(cd, configRefs); err != nil {
return "", nil, err
if err := c.configTracker.CreatePrimaryConfigs(cd, configRefs); err != nil {
return err
}
annotations, err := c.makeAnnotations(canaryDep.Spec.Template.Annotations)
if err != nil {
return "", nil, err
return err
}
replicas := int32(1)
@@ -289,25 +291,25 @@ func (c *Deployer) createPrimaryDeployment(cd *flaggerv1.Canary) (string, map[st
Annotations: annotations,
},
// update spec with the primary secrets and config maps
Spec: c.ConfigTracker.ApplyPrimaryConfigs(canaryDep.Spec.Template.Spec, configRefs),
Spec: c.configTracker.ApplyPrimaryConfigs(canaryDep.Spec.Template.Spec, configRefs),
},
},
}
_, err = c.KubeClient.AppsV1().Deployments(cd.Namespace).Create(primaryDep)
_, err = c.kubeClient.AppsV1().Deployments(cd.Namespace).Create(primaryDep)
if err != nil {
return "", nil, err
return err
}
c.Logger.With("canary", fmt.Sprintf("%s.%s", cd.Name, cd.Namespace)).Infof("Deployment %s.%s created", primaryDep.GetName(), cd.Namespace)
c.logger.With("canary", fmt.Sprintf("%s.%s", cd.Name, cd.Namespace)).Infof("Deployment %s.%s created", primaryDep.GetName(), cd.Namespace)
}
return label, ports, nil
return nil
}
func (c *Deployer) reconcilePrimaryHpa(cd *flaggerv1.Canary, init bool) error {
func (c *DeploymentController) reconcilePrimaryHpa(cd *flaggerv1.Canary, init bool) error {
primaryName := fmt.Sprintf("%s-primary", cd.Spec.TargetRef.Name)
hpa, err := c.KubeClient.AutoscalingV2beta1().HorizontalPodAutoscalers(cd.Namespace).Get(cd.Spec.AutoscalerRef.Name, metav1.GetOptions{})
hpa, err := c.kubeClient.AutoscalingV2beta1().HorizontalPodAutoscalers(cd.Namespace).Get(cd.Spec.AutoscalerRef.Name, metav1.GetOptions{})
if err != nil {
if errors.IsNotFound(err) {
return fmt.Errorf("HorizontalPodAutoscaler %s.%s not found, retrying",
@@ -328,7 +330,7 @@ func (c *Deployer) reconcilePrimaryHpa(cd *flaggerv1.Canary, init bool) error {
}
primaryHpaName := fmt.Sprintf("%s-primary", cd.Spec.AutoscalerRef.Name)
primaryHpa, err := c.KubeClient.AutoscalingV2beta1().HorizontalPodAutoscalers(cd.Namespace).Get(primaryHpaName, metav1.GetOptions{})
primaryHpa, err := c.kubeClient.AutoscalingV2beta1().HorizontalPodAutoscalers(cd.Namespace).Get(primaryHpaName, metav1.GetOptions{})
// create HPA
if errors.IsNotFound(err) {
@@ -348,11 +350,11 @@ func (c *Deployer) reconcilePrimaryHpa(cd *flaggerv1.Canary, init bool) error {
Spec: hpaSpec,
}
_, err = c.KubeClient.AutoscalingV2beta1().HorizontalPodAutoscalers(cd.Namespace).Create(primaryHpa)
_, err = c.kubeClient.AutoscalingV2beta1().HorizontalPodAutoscalers(cd.Namespace).Create(primaryHpa)
if err != nil {
return err
}
c.Logger.With("canary", fmt.Sprintf("%s.%s", cd.Name, cd.Namespace)).Infof("HorizontalPodAutoscaler %s.%s created", primaryHpa.GetName(), cd.Namespace)
c.logger.With("canary", fmt.Sprintf("%s.%s", cd.Name, cd.Namespace)).Infof("HorizontalPodAutoscaler %s.%s created", primaryHpa.GetName(), cd.Namespace)
return nil
}
@@ -370,11 +372,11 @@ func (c *Deployer) reconcilePrimaryHpa(cd *flaggerv1.Canary, init bool) error {
hpaClone.Spec.MinReplicas = hpaSpec.MinReplicas
hpaClone.Spec.Metrics = hpaSpec.Metrics
_, upErr := c.KubeClient.AutoscalingV2beta1().HorizontalPodAutoscalers(cd.Namespace).Update(hpaClone)
_, upErr := c.kubeClient.AutoscalingV2beta1().HorizontalPodAutoscalers(cd.Namespace).Update(hpaClone)
if upErr != nil {
return upErr
}
c.Logger.With("canary", fmt.Sprintf("%s.%s", cd.Name, cd.Namespace)).Infof("HorizontalPodAutoscaler %s.%s updated", primaryHpa.GetName(), cd.Namespace)
c.logger.With("canary", fmt.Sprintf("%s.%s", cd.Name, cd.Namespace)).Infof("HorizontalPodAutoscaler %s.%s updated", primaryHpa.GetName(), cd.Namespace)
}
}
@@ -382,7 +384,7 @@ func (c *Deployer) reconcilePrimaryHpa(cd *flaggerv1.Canary, init bool) error {
}
// makeAnnotations appends an unique ID to annotations map
func (c *Deployer) makeAnnotations(annotations map[string]string) (map[string]string, error) {
func (c *DeploymentController) makeAnnotations(annotations map[string]string) (map[string]string, error) {
idKey := "flagger-id"
res := make(map[string]string)
uuid := make([]byte, 16)
@@ -405,8 +407,8 @@ func (c *Deployer) makeAnnotations(annotations map[string]string) (map[string]st
}
// getSelectorLabel returns the selector match label
func (c *Deployer) getSelectorLabel(deployment *appsv1.Deployment) (string, error) {
for _, l := range c.Labels {
func (c *DeploymentController) getSelectorLabel(deployment *appsv1.Deployment) (string, error) {
for _, l := range c.labels {
if _, ok := deployment.Spec.Selector.MatchLabels[l]; ok {
return l, nil
}
@@ -420,8 +422,12 @@ var sidecars = map[string]bool{
"envoy": true,
}
func (c *DeploymentController) HaveDependenciesChanged(cd *flaggerv1.Canary) (bool, error) {
return c.configTracker.HasConfigChanged(cd)
}
// getPorts returns a list of all container ports
func (c *Deployer) getPorts(cd *flaggerv1.Canary, deployment *appsv1.Deployment) (map[string]int32, error) {
func (c *DeploymentController) getPorts(cd *flaggerv1.Canary, deployment *appsv1.Deployment) (map[string]int32, error) {
ports := make(map[string]int32)
for _, container := range deployment.Spec.Template.Spec.Containers {

View File

@@ -10,7 +10,7 @@ import (
func TestCanaryDeployer_Sync(t *testing.T) {
mocks := SetupMocks()
_, _, err := mocks.deployer.Initialize(mocks.canary, true)
err := mocks.deployer.Initialize(mocks.canary, true)
if err != nil {
t.Fatal(err.Error())
}
@@ -96,7 +96,7 @@ func TestCanaryDeployer_Sync(t *testing.T) {
func TestCanaryDeployer_IsNewSpec(t *testing.T) {
mocks := SetupMocks()
_, _, err := mocks.deployer.Initialize(mocks.canary, true)
err := mocks.deployer.Initialize(mocks.canary, true)
if err != nil {
t.Fatal(err.Error())
}
@@ -107,7 +107,7 @@ func TestCanaryDeployer_IsNewSpec(t *testing.T) {
t.Fatal(err.Error())
}
isNew, err := mocks.deployer.HasDeploymentChanged(mocks.canary)
isNew, err := mocks.deployer.HasTargetChanged(mocks.canary)
if err != nil {
t.Fatal(err.Error())
}
@@ -119,7 +119,7 @@ func TestCanaryDeployer_IsNewSpec(t *testing.T) {
func TestCanaryDeployer_Promote(t *testing.T) {
mocks := SetupMocks()
_, _, err := mocks.deployer.Initialize(mocks.canary, true)
err := mocks.deployer.Initialize(mocks.canary, true)
if err != nil {
t.Fatal(err.Error())
}
@@ -185,7 +185,7 @@ func TestCanaryDeployer_Promote(t *testing.T) {
func TestCanaryDeployer_IsReady(t *testing.T) {
mocks := SetupMocks()
_, _, err := mocks.deployer.Initialize(mocks.canary, true)
err := mocks.deployer.Initialize(mocks.canary, true)
if err != nil {
t.Error("Expected primary readiness check to fail")
}
@@ -203,7 +203,7 @@ func TestCanaryDeployer_IsReady(t *testing.T) {
func TestCanaryDeployer_SetFailedChecks(t *testing.T) {
mocks := SetupMocks()
_, _, err := mocks.deployer.Initialize(mocks.canary, true)
err := mocks.deployer.Initialize(mocks.canary, true)
if err != nil {
t.Fatal(err.Error())
}
@@ -225,7 +225,7 @@ func TestCanaryDeployer_SetFailedChecks(t *testing.T) {
func TestCanaryDeployer_SetState(t *testing.T) {
mocks := SetupMocks()
_, _, err := mocks.deployer.Initialize(mocks.canary, true)
err := mocks.deployer.Initialize(mocks.canary, true)
if err != nil {
t.Fatal(err.Error())
}
@@ -247,7 +247,7 @@ func TestCanaryDeployer_SetState(t *testing.T) {
func TestCanaryDeployer_SyncStatus(t *testing.T) {
mocks := SetupMocks()
_, _, err := mocks.deployer.Initialize(mocks.canary, true)
err := mocks.deployer.Initialize(mocks.canary, true)
if err != nil {
t.Fatal(err.Error())
}
@@ -286,7 +286,7 @@ func TestCanaryDeployer_SyncStatus(t *testing.T) {
func TestCanaryDeployer_Scale(t *testing.T) {
mocks := SetupMocks()
_, _, err := mocks.deployer.Initialize(mocks.canary, true)
err := mocks.deployer.Initialize(mocks.canary, true)
if err != nil {
t.Fatal(err.Error())
}

59
pkg/canary/factory.go Normal file
View File

@@ -0,0 +1,59 @@
package canary
import (
"go.uber.org/zap"
"k8s.io/client-go/kubernetes"
clientset "github.com/weaveworks/flagger/pkg/client/clientset/versioned"
)
type Factory struct {
kubeClient kubernetes.Interface
flaggerClient clientset.Interface
logger *zap.SugaredLogger
configTracker ConfigTracker
labels []string
}
func NewFactory(kubeClient kubernetes.Interface,
flaggerClient clientset.Interface,
configTracker ConfigTracker,
labels []string,
logger *zap.SugaredLogger) *Factory {
return &Factory{
kubeClient: kubeClient,
flaggerClient: flaggerClient,
logger: logger,
configTracker: configTracker,
labels: labels,
}
}
func (factory *Factory) Controller(kind string) Controller {
deploymentCtrl := &DeploymentController{
logger: factory.logger,
kubeClient: factory.kubeClient,
flaggerClient: factory.flaggerClient,
labels: factory.labels,
configTracker: ConfigTracker{
Logger: factory.logger,
KubeClient: factory.kubeClient,
FlaggerClient: factory.flaggerClient,
},
}
serviceCtrl := &ServiceController{
logger: factory.logger,
kubeClient: factory.kubeClient,
flaggerClient: factory.flaggerClient,
}
switch {
case kind == "Deployment":
return deploymentCtrl
case kind == "Service":
return serviceCtrl
default:
return deploymentCtrl
}
}

View File

@@ -20,7 +20,7 @@ type Mocks struct {
canary *flaggerv1.Canary
kubeClient kubernetes.Interface
flaggerClient clientset.Interface
deployer Deployer
deployer DeploymentController
logger *zap.SugaredLogger
}
@@ -43,12 +43,12 @@ func SetupMocks() Mocks {
logger, _ := logger.NewLogger("debug")
deployer := Deployer{
FlaggerClient: flaggerClient,
KubeClient: kubeClient,
Logger: logger,
Labels: []string{"app", "name"},
ConfigTracker: ConfigTracker{
deployer := DeploymentController{
flaggerClient: flaggerClient,
kubeClient: kubeClient,
logger: logger,
labels: []string{"app", "name"},
configTracker: ConfigTracker{
Logger: logger,
KubeClient: kubeClient,
FlaggerClient: flaggerClient,

View File

@@ -14,9 +14,9 @@ import (
// IsPrimaryReady checks the primary deployment status and returns an error if
// the deployment is in the middle of a rolling update or if the pods are unhealthy
// it will return a non retriable error if the rolling update is stuck
func (c *Deployer) IsPrimaryReady(cd *flaggerv1.Canary) (bool, error) {
func (c *DeploymentController) IsPrimaryReady(cd *flaggerv1.Canary) (bool, error) {
primaryName := fmt.Sprintf("%s-primary", cd.Spec.TargetRef.Name)
primary, err := c.KubeClient.AppsV1().Deployments(cd.Namespace).Get(primaryName, metav1.GetOptions{})
primary, err := c.kubeClient.AppsV1().Deployments(cd.Namespace).Get(primaryName, metav1.GetOptions{})
if err != nil {
if errors.IsNotFound(err) {
return true, fmt.Errorf("deployment %s.%s not found", primaryName, cd.Namespace)
@@ -39,9 +39,9 @@ func (c *Deployer) IsPrimaryReady(cd *flaggerv1.Canary) (bool, error) {
// IsCanaryReady checks the primary deployment status and returns an error if
// the deployment is in the middle of a rolling update or if the pods are unhealthy
// it will return a non retriable error if the rolling update is stuck
func (c *Deployer) IsCanaryReady(cd *flaggerv1.Canary) (bool, error) {
func (c *DeploymentController) IsCanaryReady(cd *flaggerv1.Canary) (bool, error) {
targetName := cd.Spec.TargetRef.Name
canary, err := c.KubeClient.AppsV1().Deployments(cd.Namespace).Get(targetName, metav1.GetOptions{})
canary, err := c.kubeClient.AppsV1().Deployments(cd.Namespace).Get(targetName, metav1.GetOptions{})
if err != nil {
if errors.IsNotFound(err) {
return true, fmt.Errorf("deployment %s.%s not found", targetName, cd.Namespace)
@@ -64,7 +64,7 @@ func (c *Deployer) IsCanaryReady(cd *flaggerv1.Canary) (bool, error) {
// isDeploymentReady determines if a deployment is ready by checking the status conditions
// if a deployment has exceeded the progress deadline it returns a non retriable error
func (c *Deployer) isDeploymentReady(deployment *appsv1.Deployment, deadline int) (bool, error) {
func (c *DeploymentController) isDeploymentReady(deployment *appsv1.Deployment, deadline int) (bool, error) {
retriable := true
if deployment.Generation <= deployment.Status.ObservedGeneration {
progress := c.getDeploymentCondition(deployment.Status, appsv1.DeploymentProgressing)
@@ -99,7 +99,7 @@ func (c *Deployer) isDeploymentReady(deployment *appsv1.Deployment, deadline int
return true, nil
}
func (c *Deployer) getDeploymentCondition(
func (c *DeploymentController) getDeploymentCondition(
status appsv1.DeploymentStatus,
conditionType appsv1.DeploymentConditionType,
) *appsv1.DeploymentCondition {

View File

@@ -0,0 +1,247 @@
package canary
import (
"fmt"
ex "github.com/pkg/errors"
"go.uber.org/zap"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/client-go/kubernetes"
flaggerv1 "github.com/weaveworks/flagger/pkg/apis/flagger/v1alpha3"
clientset "github.com/weaveworks/flagger/pkg/client/clientset/versioned"
)
// ServiceController is managing the operations for Kubernetes service kind
type ServiceController struct {
kubeClient kubernetes.Interface
flaggerClient clientset.Interface
logger *zap.SugaredLogger
}
// SetStatusFailedChecks updates the canary failed checks counter
func (c *ServiceController) SetStatusFailedChecks(cd *flaggerv1.Canary, val int) error {
return setStatusFailedChecks(c.flaggerClient, cd, val)
}
// SetStatusWeight updates the canary status weight value
func (c *ServiceController) SetStatusWeight(cd *flaggerv1.Canary, val int) error {
return setStatusWeight(c.flaggerClient, cd, val)
}
// SetStatusIterations updates the canary status iterations value
func (c *ServiceController) SetStatusIterations(cd *flaggerv1.Canary, val int) error {
return setStatusIterations(c.flaggerClient, cd, val)
}
// SetStatusPhase updates the canary status phase
func (c *ServiceController) SetStatusPhase(cd *flaggerv1.Canary, phase flaggerv1.CanaryPhase) error {
return setStatusPhase(c.flaggerClient, cd, phase)
}
// GetMetadata returns the pod label selector and svc ports
func (c *ServiceController) GetMetadata(cd *flaggerv1.Canary) (string, map[string]int32, error) {
return "", nil, nil
}
// Initialize creates or updates the primary and canary services to prepare for the canary release process targeted on the K8s service
func (c *ServiceController) Initialize(cd *flaggerv1.Canary, skipLivenessChecks bool) (err error) {
targetName := cd.Spec.TargetRef.Name
primaryName := fmt.Sprintf("%s-primary", targetName)
canaryName := fmt.Sprintf("%s-canary", targetName)
svc, err := c.kubeClient.CoreV1().Services(cd.Namespace).Get(targetName, metav1.GetOptions{})
if err != nil {
return err
}
// canary svc
err = c.reconcileCanaryService(cd, canaryName, svc)
if err != nil {
return err
}
// primary svc
err = c.reconcilePrimaryService(cd, primaryName, svc)
if err != nil {
return err
}
return nil
}
func (c *ServiceController) reconcileCanaryService(canary *flaggerv1.Canary, name string, src *corev1.Service) error {
current, err := c.kubeClient.CoreV1().Services(canary.Namespace).Get(name, metav1.GetOptions{})
if errors.IsNotFound(err) {
return c.createService(canary, name, src)
}
if err != nil {
return fmt.Errorf("service %s query error %v", name, err)
}
new := buildService(canary, name, src)
if new.Spec.Type == "ClusterIP" {
// We can't change this immutable field
new.Spec.ClusterIP = current.Spec.ClusterIP
}
// We can't change this immutable field
new.ObjectMeta.UID = current.ObjectMeta.UID
new.ObjectMeta.ResourceVersion = current.ObjectMeta.ResourceVersion
_, err = c.kubeClient.CoreV1().Services(canary.Namespace).Update(new)
if err != nil {
return err
}
c.logger.With("canary", fmt.Sprintf("%s.%s", canary.Name, canary.Namespace)).
Infof("Service %s.%s updated", new.GetName(), canary.Namespace)
return nil
}
func (c *ServiceController) reconcilePrimaryService(canary *flaggerv1.Canary, name string, src *corev1.Service) error {
_, err := c.kubeClient.CoreV1().Services(canary.Namespace).Get(name, metav1.GetOptions{})
if errors.IsNotFound(err) {
return c.createService(canary, name, src)
}
if err != nil {
return fmt.Errorf("service %s query error %v", name, err)
}
return nil
}
func (c *ServiceController) createService(canary *flaggerv1.Canary, name string, src *corev1.Service) error {
svc := buildService(canary, name, src)
if svc.Spec.Type == "ClusterIP" {
// Reset and let K8s assign the IP. Otherwise we get an error due to the IP is already assigned
svc.Spec.ClusterIP = ""
}
// Let K8s set this. Otherwise K8s API complains with "resourceVersion should not be set on objects to be created"
svc.ObjectMeta.ResourceVersion = ""
_, err := c.kubeClient.CoreV1().Services(canary.Namespace).Create(svc)
if err != nil {
return err
}
c.logger.With("canary", fmt.Sprintf("%s.%s", canary.Name, canary.Namespace)).
Infof("Service %s.%s created", svc.GetName(), canary.Namespace)
return nil
}
func buildService(canary *flaggerv1.Canary, name string, src *corev1.Service) *corev1.Service {
svc := src.DeepCopy()
svc.ObjectMeta.Name = name
svc.ObjectMeta.Namespace = canary.Namespace
svc.ObjectMeta.OwnerReferences = []metav1.OwnerReference{
*metav1.NewControllerRef(canary, schema.GroupVersionKind{
Group: flaggerv1.SchemeGroupVersion.Group,
Version: flaggerv1.SchemeGroupVersion.Version,
Kind: flaggerv1.CanaryKind,
}),
}
_, exists := svc.ObjectMeta.Annotations["kubectl.kubernetes.io/last-applied-configuration"]
if exists {
// Leaving this results in updates from flagger to this svc never succeed due to resourceVersion mismatch:
// Operation cannot be fulfilled on services "mysvc-canary": the object has been modified; please apply your changes to the latest version and try again
delete(svc.ObjectMeta.Annotations, "kubectl.kubernetes.io/last-applied-configuration")
}
return svc
}
// Promote copies target's spec from canary to primary
func (c *ServiceController) Promote(cd *flaggerv1.Canary) error {
targetName := cd.Spec.TargetRef.Name
primaryName := fmt.Sprintf("%s-primary", targetName)
canary, err := c.kubeClient.CoreV1().Services(cd.Namespace).Get(targetName, metav1.GetOptions{})
if err != nil {
if errors.IsNotFound(err) {
return fmt.Errorf("service %s.%s not found", targetName, cd.Namespace)
}
return fmt.Errorf("service %s.%s query error %v", targetName, cd.Namespace, err)
}
primary, err := c.kubeClient.CoreV1().Services(cd.Namespace).Get(primaryName, metav1.GetOptions{})
if err != nil {
if errors.IsNotFound(err) {
return fmt.Errorf("service %s.%s not found", primaryName, cd.Namespace)
}
return fmt.Errorf("service %s.%s query error %v", primaryName, cd.Namespace, err)
}
primaryCopy := canary.DeepCopy()
primaryCopy.ObjectMeta.Name = primary.ObjectMeta.Name
if primaryCopy.Spec.Type == "ClusterIP" {
primaryCopy.Spec.ClusterIP = primary.Spec.ClusterIP
}
primaryCopy.ObjectMeta.ResourceVersion = primary.ObjectMeta.ResourceVersion
primaryCopy.ObjectMeta.UID = primary.ObjectMeta.UID
// apply update
_, err = c.kubeClient.CoreV1().Services(cd.Namespace).Update(primaryCopy)
if err != nil {
return fmt.Errorf("updating service %s.%s spec failed: %v",
primaryCopy.GetName(), primaryCopy.Namespace, err)
}
return nil
}
// HasServiceChanged returns true if the canary service spec has changed
func (c *ServiceController) HasTargetChanged(cd *flaggerv1.Canary) (bool, error) {
targetName := cd.Spec.TargetRef.Name
canary, err := c.kubeClient.CoreV1().Services(cd.Namespace).Get(targetName, metav1.GetOptions{})
if err != nil {
if errors.IsNotFound(err) {
return false, fmt.Errorf("service %s.%s not found", targetName, cd.Namespace)
}
return false, fmt.Errorf("service %s.%s query error %v", targetName, cd.Namespace, err)
}
return hasSpecChanged(cd, canary.Spec)
}
// Scale sets the canary deployment replicas
func (c *ServiceController) Scale(cd *flaggerv1.Canary, replicas int32) error {
return nil
}
func (c *ServiceController) ScaleFromZero(cd *flaggerv1.Canary) error {
return nil
}
func (c *ServiceController) SyncStatus(cd *flaggerv1.Canary, status flaggerv1.CanaryStatus) error {
dep, err := c.kubeClient.CoreV1().Services(cd.Namespace).Get(cd.Spec.TargetRef.Name, metav1.GetOptions{})
if err != nil {
if errors.IsNotFound(err) {
return fmt.Errorf("service %s.%s not found", cd.Spec.TargetRef.Name, cd.Namespace)
}
return ex.Wrap(err, "SyncStatus service query error")
}
return syncCanaryStatus(c.flaggerClient, cd, status, dep.Spec, func(cdCopy *flaggerv1.Canary) {})
}
func (c *ServiceController) HaveDependenciesChanged(cd *flaggerv1.Canary) (bool, error) {
return false, nil
}
func (c *ServiceController) IsPrimaryReady(cd *flaggerv1.Canary) (bool, error) {
return true, nil
}
func (c *ServiceController) IsCanaryReady(cd *flaggerv1.Canary) (bool, error) {
return true, nil
}

30
pkg/canary/spec.go Normal file
View File

@@ -0,0 +1,30 @@
package canary
import (
"fmt"
"github.com/mitchellh/hashstructure"
"github.com/weaveworks/flagger/pkg/apis/flagger/v1alpha3"
)
func hasSpecChanged(cd *v1alpha3.Canary, spec interface{}) (bool, error) {
if cd.Status.LastAppliedSpec == "" {
return true, nil
}
newHash, err := hashstructure.Hash(spec, nil)
if err != nil {
return false, fmt.Errorf("hash error %v", err)
}
// do not trigger a canary deployment on manual rollback
if cd.Status.LastPromotedSpec == fmt.Sprintf("%d", newHash) {
return false, nil
}
if cd.Status.LastAppliedSpec != fmt.Sprintf("%d", newHash) {
return true, nil
}
return false, nil
}

View File

@@ -5,6 +5,7 @@ import (
"github.com/mitchellh/hashstructure"
ex "github.com/pkg/errors"
"github.com/weaveworks/flagger/pkg/client/clientset/versioned"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@@ -14,8 +15,8 @@ import (
)
// SyncStatus encodes the canary pod spec and updates the canary status
func (c *Deployer) SyncStatus(cd *flaggerv1.Canary, status flaggerv1.CanaryStatus) error {
dep, err := c.KubeClient.AppsV1().Deployments(cd.Namespace).Get(cd.Spec.TargetRef.Name, metav1.GetOptions{})
func (c *DeploymentController) SyncStatus(cd *flaggerv1.Canary, status flaggerv1.CanaryStatus) error {
dep, err := c.kubeClient.AppsV1().Deployments(cd.Namespace).Get(cd.Spec.TargetRef.Name, metav1.GetOptions{})
if err != nil {
if errors.IsNotFound(err) {
return fmt.Errorf("deployment %s.%s not found", cd.Spec.TargetRef.Name, cd.Namespace)
@@ -23,12 +24,18 @@ func (c *Deployer) SyncStatus(cd *flaggerv1.Canary, status flaggerv1.CanaryStatu
return ex.Wrap(err, "SyncStatus deployment query error")
}
configs, err := c.ConfigTracker.GetConfigRefs(cd)
configs, err := c.configTracker.GetConfigRefs(cd)
if err != nil {
return ex.Wrap(err, "SyncStatus configs query error")
}
hash, err := hashstructure.Hash(dep.Spec.Template, nil)
return syncCanaryStatus(c.flaggerClient, cd, status, dep.Spec.Template, func(cdCopy *flaggerv1.Canary) {
cdCopy.Status.TrackedConfigs = configs
})
}
func syncCanaryStatus(flaggerClient versioned.Interface, cd *flaggerv1.Canary, status flaggerv1.CanaryStatus, canaryResource interface{}, setAll func(cdCopy *flaggerv1.Canary)) error {
hash, err := hashstructure.Hash(canaryResource, nil)
if err != nil {
return ex.Wrap(err, "SyncStatus hash error")
}
@@ -37,7 +44,7 @@ func (c *Deployer) SyncStatus(cd *flaggerv1.Canary, status flaggerv1.CanaryStatu
err = retry.RetryOnConflict(retry.DefaultBackoff, func() (err error) {
var selErr error
if !firstTry {
cd, selErr = c.FlaggerClient.FlaggerV1alpha3().Canaries(cd.Namespace).Get(cd.GetName(), metav1.GetOptions{})
cd, selErr = flaggerClient.FlaggerV1alpha3().Canaries(cd.Namespace).Get(cd.GetName(), metav1.GetOptions{})
if selErr != nil {
return selErr
}
@@ -49,13 +56,13 @@ func (c *Deployer) SyncStatus(cd *flaggerv1.Canary, status flaggerv1.CanaryStatu
cdCopy.Status.Iterations = status.Iterations
cdCopy.Status.LastAppliedSpec = fmt.Sprintf("%d", hash)
cdCopy.Status.LastTransitionTime = metav1.Now()
cdCopy.Status.TrackedConfigs = configs
setAll(cdCopy)
if ok, conditions := c.MakeStatusConditions(cd.Status, status.Phase); ok {
if ok, conditions := MakeStatusConditions(cd.Status, status.Phase); ok {
cdCopy.Status.Conditions = conditions
}
_, err = c.FlaggerClient.FlaggerV1alpha3().Canaries(cd.Namespace).UpdateStatus(cdCopy)
_, err = flaggerClient.FlaggerV1alpha3().Canaries(cd.Namespace).UpdateStatus(cdCopy)
firstTry = false
return
})
@@ -66,12 +73,16 @@ func (c *Deployer) SyncStatus(cd *flaggerv1.Canary, status flaggerv1.CanaryStatu
}
// SetStatusFailedChecks updates the canary failed checks counter
func (c *Deployer) SetStatusFailedChecks(cd *flaggerv1.Canary, val int) error {
func (c *DeploymentController) SetStatusFailedChecks(cd *flaggerv1.Canary, val int) error {
return setStatusFailedChecks(c.flaggerClient, cd, val)
}
func setStatusFailedChecks(flaggerClient versioned.Interface, cd *flaggerv1.Canary, val int) error {
firstTry := true
err := retry.RetryOnConflict(retry.DefaultBackoff, func() (err error) {
var selErr error
if !firstTry {
cd, selErr = c.FlaggerClient.FlaggerV1alpha3().Canaries(cd.Namespace).Get(cd.GetName(), metav1.GetOptions{})
cd, selErr = flaggerClient.FlaggerV1alpha3().Canaries(cd.Namespace).Get(cd.GetName(), metav1.GetOptions{})
if selErr != nil {
return selErr
}
@@ -80,7 +91,7 @@ func (c *Deployer) SetStatusFailedChecks(cd *flaggerv1.Canary, val int) error {
cdCopy.Status.FailedChecks = val
cdCopy.Status.LastTransitionTime = metav1.Now()
_, err = c.FlaggerClient.FlaggerV1alpha3().Canaries(cd.Namespace).UpdateStatus(cdCopy)
_, err = flaggerClient.FlaggerV1alpha3().Canaries(cd.Namespace).UpdateStatus(cdCopy)
firstTry = false
return
})
@@ -91,12 +102,16 @@ func (c *Deployer) SetStatusFailedChecks(cd *flaggerv1.Canary, val int) error {
}
// SetStatusWeight updates the canary status weight value
func (c *Deployer) SetStatusWeight(cd *flaggerv1.Canary, val int) error {
func (c *DeploymentController) SetStatusWeight(cd *flaggerv1.Canary, val int) error {
return setStatusWeight(c.flaggerClient, cd, val)
}
func setStatusWeight(flaggerClient versioned.Interface, cd *flaggerv1.Canary, val int) error {
firstTry := true
err := retry.RetryOnConflict(retry.DefaultBackoff, func() (err error) {
var selErr error
if !firstTry {
cd, selErr = c.FlaggerClient.FlaggerV1alpha3().Canaries(cd.Namespace).Get(cd.GetName(), metav1.GetOptions{})
cd, selErr = flaggerClient.FlaggerV1alpha3().Canaries(cd.Namespace).Get(cd.GetName(), metav1.GetOptions{})
if selErr != nil {
return selErr
}
@@ -105,7 +120,7 @@ func (c *Deployer) SetStatusWeight(cd *flaggerv1.Canary, val int) error {
cdCopy.Status.CanaryWeight = val
cdCopy.Status.LastTransitionTime = metav1.Now()
_, err = c.FlaggerClient.FlaggerV1alpha3().Canaries(cd.Namespace).UpdateStatus(cdCopy)
_, err = flaggerClient.FlaggerV1alpha3().Canaries(cd.Namespace).UpdateStatus(cdCopy)
firstTry = false
return
})
@@ -116,12 +131,16 @@ func (c *Deployer) SetStatusWeight(cd *flaggerv1.Canary, val int) error {
}
// SetStatusIterations updates the canary status iterations value
func (c *Deployer) SetStatusIterations(cd *flaggerv1.Canary, val int) error {
func (c *DeploymentController) SetStatusIterations(cd *flaggerv1.Canary, val int) error {
return setStatusIterations(c.flaggerClient, cd, val)
}
func setStatusIterations(flaggerClient versioned.Interface, cd *flaggerv1.Canary, val int) error {
firstTry := true
err := retry.RetryOnConflict(retry.DefaultBackoff, func() (err error) {
var selErr error
if !firstTry {
cd, selErr = c.FlaggerClient.FlaggerV1alpha3().Canaries(cd.Namespace).Get(cd.GetName(), metav1.GetOptions{})
cd, selErr = flaggerClient.FlaggerV1alpha3().Canaries(cd.Namespace).Get(cd.GetName(), metav1.GetOptions{})
if selErr != nil {
return selErr
}
@@ -131,7 +150,7 @@ func (c *Deployer) SetStatusIterations(cd *flaggerv1.Canary, val int) error {
cdCopy.Status.Iterations = val
cdCopy.Status.LastTransitionTime = metav1.Now()
_, err = c.FlaggerClient.FlaggerV1alpha3().Canaries(cd.Namespace).UpdateStatus(cdCopy)
_, err = flaggerClient.FlaggerV1alpha3().Canaries(cd.Namespace).UpdateStatus(cdCopy)
firstTry = false
return
})
@@ -143,12 +162,16 @@ func (c *Deployer) SetStatusIterations(cd *flaggerv1.Canary, val int) error {
}
// SetStatusPhase updates the canary status phase
func (c *Deployer) SetStatusPhase(cd *flaggerv1.Canary, phase flaggerv1.CanaryPhase) error {
func (c *DeploymentController) SetStatusPhase(cd *flaggerv1.Canary, phase flaggerv1.CanaryPhase) error {
return setStatusPhase(c.flaggerClient, cd, phase)
}
func setStatusPhase(flaggerClient versioned.Interface, cd *flaggerv1.Canary, phase flaggerv1.CanaryPhase) error {
firstTry := true
err := retry.RetryOnConflict(retry.DefaultBackoff, func() (err error) {
var selErr error
if !firstTry {
cd, selErr = c.FlaggerClient.FlaggerV1alpha3().Canaries(cd.Namespace).Get(cd.GetName(), metav1.GetOptions{})
cd, selErr = flaggerClient.FlaggerV1alpha3().Canaries(cd.Namespace).Get(cd.GetName(), metav1.GetOptions{})
if selErr != nil {
return selErr
}
@@ -167,11 +190,11 @@ func (c *Deployer) SetStatusPhase(cd *flaggerv1.Canary, phase flaggerv1.CanaryPh
cdCopy.Status.LastPromotedSpec = cd.Status.LastAppliedSpec
}
if ok, conditions := c.MakeStatusConditions(cdCopy.Status, phase); ok {
if ok, conditions := MakeStatusConditions(cdCopy.Status, phase); ok {
cdCopy.Status.Conditions = conditions
}
_, err = c.FlaggerClient.FlaggerV1alpha3().Canaries(cd.Namespace).UpdateStatus(cdCopy)
_, err = flaggerClient.FlaggerV1alpha3().Canaries(cd.Namespace).UpdateStatus(cdCopy)
firstTry = false
return
})
@@ -181,8 +204,8 @@ func (c *Deployer) SetStatusPhase(cd *flaggerv1.Canary, phase flaggerv1.CanaryPh
return nil
}
// GetStatusCondition returns a condition based on type
func (c *Deployer) getStatusCondition(status flaggerv1.CanaryStatus, conditionType flaggerv1.CanaryConditionType) *flaggerv1.CanaryCondition {
// getStatusCondition returns a condition based on type
func getStatusCondition(status flaggerv1.CanaryStatus, conditionType flaggerv1.CanaryConditionType) *flaggerv1.CanaryCondition {
for i := range status.Conditions {
c := status.Conditions[i]
if c.Type == conditionType {
@@ -193,9 +216,9 @@ func (c *Deployer) getStatusCondition(status flaggerv1.CanaryStatus, conditionTy
}
// MakeStatusCondition updates the canary status conditions based on canary phase
func (c *Deployer) MakeStatusConditions(canaryStatus flaggerv1.CanaryStatus,
func MakeStatusConditions(canaryStatus flaggerv1.CanaryStatus,
phase flaggerv1.CanaryPhase) (bool, []flaggerv1.CanaryCondition) {
currentCondition := c.getStatusCondition(canaryStatus, flaggerv1.PromotedType)
currentCondition := getStatusCondition(canaryStatus, flaggerv1.PromotedType)
message := "New deployment detected, starting initialization."
status := corev1.ConditionUnknown

View File

@@ -19,10 +19,13 @@ limitations under the License.
package versioned
import (
"fmt"
appmeshv1beta1 "github.com/weaveworks/flagger/pkg/client/clientset/versioned/typed/appmesh/v1beta1"
flaggerv1alpha3 "github.com/weaveworks/flagger/pkg/client/clientset/versioned/typed/flagger/v1alpha3"
gloov1 "github.com/weaveworks/flagger/pkg/client/clientset/versioned/typed/gloo/v1"
networkingv1alpha3 "github.com/weaveworks/flagger/pkg/client/clientset/versioned/typed/istio/v1alpha3"
projectcontourv1 "github.com/weaveworks/flagger/pkg/client/clientset/versioned/typed/projectcontour/v1"
splitv1alpha1 "github.com/weaveworks/flagger/pkg/client/clientset/versioned/typed/smi/v1alpha1"
discovery "k8s.io/client-go/discovery"
rest "k8s.io/client-go/rest"
@@ -35,6 +38,7 @@ type Interface interface {
FlaggerV1alpha3() flaggerv1alpha3.FlaggerV1alpha3Interface
GlooV1() gloov1.GlooV1Interface
NetworkingV1alpha3() networkingv1alpha3.NetworkingV1alpha3Interface
ProjectcontourV1() projectcontourv1.ProjectcontourV1Interface
SplitV1alpha1() splitv1alpha1.SplitV1alpha1Interface
}
@@ -46,6 +50,7 @@ type Clientset struct {
flaggerV1alpha3 *flaggerv1alpha3.FlaggerV1alpha3Client
glooV1 *gloov1.GlooV1Client
networkingV1alpha3 *networkingv1alpha3.NetworkingV1alpha3Client
projectcontourV1 *projectcontourv1.ProjectcontourV1Client
splitV1alpha1 *splitv1alpha1.SplitV1alpha1Client
}
@@ -69,6 +74,11 @@ func (c *Clientset) NetworkingV1alpha3() networkingv1alpha3.NetworkingV1alpha3In
return c.networkingV1alpha3
}
// ProjectcontourV1 retrieves the ProjectcontourV1Client
func (c *Clientset) ProjectcontourV1() projectcontourv1.ProjectcontourV1Interface {
return c.projectcontourV1
}
// SplitV1alpha1 retrieves the SplitV1alpha1Client
func (c *Clientset) SplitV1alpha1() splitv1alpha1.SplitV1alpha1Interface {
return c.splitV1alpha1
@@ -83,9 +93,14 @@ func (c *Clientset) Discovery() discovery.DiscoveryInterface {
}
// NewForConfig creates a new Clientset for the given config.
// If config's RateLimiter is not set and QPS and Burst are acceptable,
// NewForConfig will generate a rate-limiter in configShallowCopy.
func NewForConfig(c *rest.Config) (*Clientset, error) {
configShallowCopy := *c
if configShallowCopy.RateLimiter == nil && configShallowCopy.QPS > 0 {
if configShallowCopy.Burst <= 0 {
return nil, fmt.Errorf("Burst is required to be greater than 0 when RateLimiter is not set and QPS is set to greater than 0")
}
configShallowCopy.RateLimiter = flowcontrol.NewTokenBucketRateLimiter(configShallowCopy.QPS, configShallowCopy.Burst)
}
var cs Clientset
@@ -106,6 +121,10 @@ func NewForConfig(c *rest.Config) (*Clientset, error) {
if err != nil {
return nil, err
}
cs.projectcontourV1, err = projectcontourv1.NewForConfig(&configShallowCopy)
if err != nil {
return nil, err
}
cs.splitV1alpha1, err = splitv1alpha1.NewForConfig(&configShallowCopy)
if err != nil {
return nil, err
@@ -126,6 +145,7 @@ func NewForConfigOrDie(c *rest.Config) *Clientset {
cs.flaggerV1alpha3 = flaggerv1alpha3.NewForConfigOrDie(c)
cs.glooV1 = gloov1.NewForConfigOrDie(c)
cs.networkingV1alpha3 = networkingv1alpha3.NewForConfigOrDie(c)
cs.projectcontourV1 = projectcontourv1.NewForConfigOrDie(c)
cs.splitV1alpha1 = splitv1alpha1.NewForConfigOrDie(c)
cs.DiscoveryClient = discovery.NewDiscoveryClientForConfigOrDie(c)
@@ -139,6 +159,7 @@ func New(c rest.Interface) *Clientset {
cs.flaggerV1alpha3 = flaggerv1alpha3.New(c)
cs.glooV1 = gloov1.New(c)
cs.networkingV1alpha3 = networkingv1alpha3.New(c)
cs.projectcontourV1 = projectcontourv1.New(c)
cs.splitV1alpha1 = splitv1alpha1.New(c)
cs.DiscoveryClient = discovery.NewDiscoveryClient(c)

View File

@@ -28,6 +28,8 @@ import (
fakegloov1 "github.com/weaveworks/flagger/pkg/client/clientset/versioned/typed/gloo/v1/fake"
networkingv1alpha3 "github.com/weaveworks/flagger/pkg/client/clientset/versioned/typed/istio/v1alpha3"
fakenetworkingv1alpha3 "github.com/weaveworks/flagger/pkg/client/clientset/versioned/typed/istio/v1alpha3/fake"
projectcontourv1 "github.com/weaveworks/flagger/pkg/client/clientset/versioned/typed/projectcontour/v1"
fakeprojectcontourv1 "github.com/weaveworks/flagger/pkg/client/clientset/versioned/typed/projectcontour/v1/fake"
splitv1alpha1 "github.com/weaveworks/flagger/pkg/client/clientset/versioned/typed/smi/v1alpha1"
fakesplitv1alpha1 "github.com/weaveworks/flagger/pkg/client/clientset/versioned/typed/smi/v1alpha1/fake"
"k8s.io/apimachinery/pkg/runtime"
@@ -104,6 +106,11 @@ func (c *Clientset) NetworkingV1alpha3() networkingv1alpha3.NetworkingV1alpha3In
return &fakenetworkingv1alpha3.FakeNetworkingV1alpha3{Fake: &c.Fake}
}
// ProjectcontourV1 retrieves the ProjectcontourV1Client
func (c *Clientset) ProjectcontourV1() projectcontourv1.ProjectcontourV1Interface {
return &fakeprojectcontourv1.FakeProjectcontourV1{Fake: &c.Fake}
}
// SplitV1alpha1 retrieves the SplitV1alpha1Client
func (c *Clientset) SplitV1alpha1() splitv1alpha1.SplitV1alpha1Interface {
return &fakesplitv1alpha1.FakeSplitV1alpha1{Fake: &c.Fake}

View File

@@ -23,6 +23,7 @@ import (
flaggerv1alpha3 "github.com/weaveworks/flagger/pkg/apis/flagger/v1alpha3"
gloov1 "github.com/weaveworks/flagger/pkg/apis/gloo/v1"
networkingv1alpha3 "github.com/weaveworks/flagger/pkg/apis/istio/v1alpha3"
projectcontourv1 "github.com/weaveworks/flagger/pkg/apis/projectcontour/v1"
splitv1alpha1 "github.com/weaveworks/flagger/pkg/apis/smi/v1alpha1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
@@ -39,6 +40,7 @@ var localSchemeBuilder = runtime.SchemeBuilder{
flaggerv1alpha3.AddToScheme,
gloov1.AddToScheme,
networkingv1alpha3.AddToScheme,
projectcontourv1.AddToScheme,
splitv1alpha1.AddToScheme,
}

View File

@@ -23,6 +23,7 @@ import (
flaggerv1alpha3 "github.com/weaveworks/flagger/pkg/apis/flagger/v1alpha3"
gloov1 "github.com/weaveworks/flagger/pkg/apis/gloo/v1"
networkingv1alpha3 "github.com/weaveworks/flagger/pkg/apis/istio/v1alpha3"
projectcontourv1 "github.com/weaveworks/flagger/pkg/apis/projectcontour/v1"
splitv1alpha1 "github.com/weaveworks/flagger/pkg/apis/smi/v1alpha1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
@@ -39,6 +40,7 @@ var localSchemeBuilder = runtime.SchemeBuilder{
flaggerv1alpha3.AddToScheme,
gloov1.AddToScheme,
networkingv1alpha3.AddToScheme,
projectcontourv1.AddToScheme,
splitv1alpha1.AddToScheme,
}

View File

@@ -0,0 +1,20 @@
/*
Copyright The Flagger Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by client-gen. DO NOT EDIT.
// This package has the automatically generated typed clients.
package v1

View File

@@ -0,0 +1,20 @@
/*
Copyright The Flagger Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by client-gen. DO NOT EDIT.
// Package fake has the automatically generated clients.
package fake

View File

@@ -0,0 +1,140 @@
/*
Copyright The Flagger Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by client-gen. DO NOT EDIT.
package fake
import (
projectcontourv1 "github.com/weaveworks/flagger/pkg/apis/projectcontour/v1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
labels "k8s.io/apimachinery/pkg/labels"
schema "k8s.io/apimachinery/pkg/runtime/schema"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
testing "k8s.io/client-go/testing"
)
// FakeHTTPProxies implements HTTPProxyInterface
type FakeHTTPProxies struct {
Fake *FakeProjectcontourV1
ns string
}
var httpproxiesResource = schema.GroupVersionResource{Group: "projectcontour.io", Version: "v1", Resource: "httpproxies"}
var httpproxiesKind = schema.GroupVersionKind{Group: "projectcontour.io", Version: "v1", Kind: "HTTPProxy"}
// Get takes name of the hTTPProxy, and returns the corresponding hTTPProxy object, and an error if there is any.
func (c *FakeHTTPProxies) Get(name string, options v1.GetOptions) (result *projectcontourv1.HTTPProxy, err error) {
obj, err := c.Fake.
Invokes(testing.NewGetAction(httpproxiesResource, c.ns, name), &projectcontourv1.HTTPProxy{})
if obj == nil {
return nil, err
}
return obj.(*projectcontourv1.HTTPProxy), err
}
// List takes label and field selectors, and returns the list of HTTPProxies that match those selectors.
func (c *FakeHTTPProxies) List(opts v1.ListOptions) (result *projectcontourv1.HTTPProxyList, err error) {
obj, err := c.Fake.
Invokes(testing.NewListAction(httpproxiesResource, httpproxiesKind, c.ns, opts), &projectcontourv1.HTTPProxyList{})
if obj == nil {
return nil, err
}
label, _, _ := testing.ExtractFromListOptions(opts)
if label == nil {
label = labels.Everything()
}
list := &projectcontourv1.HTTPProxyList{ListMeta: obj.(*projectcontourv1.HTTPProxyList).ListMeta}
for _, item := range obj.(*projectcontourv1.HTTPProxyList).Items {
if label.Matches(labels.Set(item.Labels)) {
list.Items = append(list.Items, item)
}
}
return list, err
}
// Watch returns a watch.Interface that watches the requested hTTPProxies.
func (c *FakeHTTPProxies) Watch(opts v1.ListOptions) (watch.Interface, error) {
return c.Fake.
InvokesWatch(testing.NewWatchAction(httpproxiesResource, c.ns, opts))
}
// Create takes the representation of a hTTPProxy and creates it. Returns the server's representation of the hTTPProxy, and an error, if there is any.
func (c *FakeHTTPProxies) Create(hTTPProxy *projectcontourv1.HTTPProxy) (result *projectcontourv1.HTTPProxy, err error) {
obj, err := c.Fake.
Invokes(testing.NewCreateAction(httpproxiesResource, c.ns, hTTPProxy), &projectcontourv1.HTTPProxy{})
if obj == nil {
return nil, err
}
return obj.(*projectcontourv1.HTTPProxy), err
}
// Update takes the representation of a hTTPProxy and updates it. Returns the server's representation of the hTTPProxy, and an error, if there is any.
func (c *FakeHTTPProxies) Update(hTTPProxy *projectcontourv1.HTTPProxy) (result *projectcontourv1.HTTPProxy, err error) {
obj, err := c.Fake.
Invokes(testing.NewUpdateAction(httpproxiesResource, c.ns, hTTPProxy), &projectcontourv1.HTTPProxy{})
if obj == nil {
return nil, err
}
return obj.(*projectcontourv1.HTTPProxy), err
}
// UpdateStatus was generated because the type contains a Status member.
// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
func (c *FakeHTTPProxies) UpdateStatus(hTTPProxy *projectcontourv1.HTTPProxy) (*projectcontourv1.HTTPProxy, error) {
obj, err := c.Fake.
Invokes(testing.NewUpdateSubresourceAction(httpproxiesResource, "status", c.ns, hTTPProxy), &projectcontourv1.HTTPProxy{})
if obj == nil {
return nil, err
}
return obj.(*projectcontourv1.HTTPProxy), err
}
// Delete takes name of the hTTPProxy and deletes it. Returns an error if one occurs.
func (c *FakeHTTPProxies) Delete(name string, options *v1.DeleteOptions) error {
_, err := c.Fake.
Invokes(testing.NewDeleteAction(httpproxiesResource, c.ns, name), &projectcontourv1.HTTPProxy{})
return err
}
// DeleteCollection deletes a collection of objects.
func (c *FakeHTTPProxies) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
action := testing.NewDeleteCollectionAction(httpproxiesResource, c.ns, listOptions)
_, err := c.Fake.Invokes(action, &projectcontourv1.HTTPProxyList{})
return err
}
// Patch applies the patch and returns the patched hTTPProxy.
func (c *FakeHTTPProxies) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *projectcontourv1.HTTPProxy, err error) {
obj, err := c.Fake.
Invokes(testing.NewPatchSubresourceAction(httpproxiesResource, c.ns, name, pt, data, subresources...), &projectcontourv1.HTTPProxy{})
if obj == nil {
return nil, err
}
return obj.(*projectcontourv1.HTTPProxy), err
}

View File

@@ -0,0 +1,40 @@
/*
Copyright The Flagger Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by client-gen. DO NOT EDIT.
package fake
import (
v1 "github.com/weaveworks/flagger/pkg/client/clientset/versioned/typed/projectcontour/v1"
rest "k8s.io/client-go/rest"
testing "k8s.io/client-go/testing"
)
type FakeProjectcontourV1 struct {
*testing.Fake
}
func (c *FakeProjectcontourV1) HTTPProxies(namespace string) v1.HTTPProxyInterface {
return &FakeHTTPProxies{c, namespace}
}
// RESTClient returns a RESTClient that is used to communicate
// with API server by this client implementation.
func (c *FakeProjectcontourV1) RESTClient() rest.Interface {
var ret *rest.RESTClient
return ret
}

View File

@@ -0,0 +1,21 @@
/*
Copyright The Flagger Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by client-gen. DO NOT EDIT.
package v1
type HTTPProxyExpansion interface{}

View File

@@ -0,0 +1,191 @@
/*
Copyright The Flagger Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by client-gen. DO NOT EDIT.
package v1
import (
"time"
v1 "github.com/weaveworks/flagger/pkg/apis/projectcontour/v1"
scheme "github.com/weaveworks/flagger/pkg/client/clientset/versioned/scheme"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
rest "k8s.io/client-go/rest"
)
// HTTPProxiesGetter has a method to return a HTTPProxyInterface.
// A group's client should implement this interface.
type HTTPProxiesGetter interface {
HTTPProxies(namespace string) HTTPProxyInterface
}
// HTTPProxyInterface has methods to work with HTTPProxy resources.
type HTTPProxyInterface interface {
Create(*v1.HTTPProxy) (*v1.HTTPProxy, error)
Update(*v1.HTTPProxy) (*v1.HTTPProxy, error)
UpdateStatus(*v1.HTTPProxy) (*v1.HTTPProxy, error)
Delete(name string, options *metav1.DeleteOptions) error
DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error
Get(name string, options metav1.GetOptions) (*v1.HTTPProxy, error)
List(opts metav1.ListOptions) (*v1.HTTPProxyList, error)
Watch(opts metav1.ListOptions) (watch.Interface, error)
Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.HTTPProxy, err error)
HTTPProxyExpansion
}
// hTTPProxies implements HTTPProxyInterface
type hTTPProxies struct {
client rest.Interface
ns string
}
// newHTTPProxies returns a HTTPProxies
func newHTTPProxies(c *ProjectcontourV1Client, namespace string) *hTTPProxies {
return &hTTPProxies{
client: c.RESTClient(),
ns: namespace,
}
}
// Get takes name of the hTTPProxy, and returns the corresponding hTTPProxy object, and an error if there is any.
func (c *hTTPProxies) Get(name string, options metav1.GetOptions) (result *v1.HTTPProxy, err error) {
result = &v1.HTTPProxy{}
err = c.client.Get().
Namespace(c.ns).
Resource("httpproxies").
Name(name).
VersionedParams(&options, scheme.ParameterCodec).
Do().
Into(result)
return
}
// List takes label and field selectors, and returns the list of HTTPProxies that match those selectors.
func (c *hTTPProxies) List(opts metav1.ListOptions) (result *v1.HTTPProxyList, err error) {
var timeout time.Duration
if opts.TimeoutSeconds != nil {
timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
}
result = &v1.HTTPProxyList{}
err = c.client.Get().
Namespace(c.ns).
Resource("httpproxies").
VersionedParams(&opts, scheme.ParameterCodec).
Timeout(timeout).
Do().
Into(result)
return
}
// Watch returns a watch.Interface that watches the requested hTTPProxies.
func (c *hTTPProxies) Watch(opts metav1.ListOptions) (watch.Interface, error) {
var timeout time.Duration
if opts.TimeoutSeconds != nil {
timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
}
opts.Watch = true
return c.client.Get().
Namespace(c.ns).
Resource("httpproxies").
VersionedParams(&opts, scheme.ParameterCodec).
Timeout(timeout).
Watch()
}
// Create takes the representation of a hTTPProxy and creates it. Returns the server's representation of the hTTPProxy, and an error, if there is any.
func (c *hTTPProxies) Create(hTTPProxy *v1.HTTPProxy) (result *v1.HTTPProxy, err error) {
result = &v1.HTTPProxy{}
err = c.client.Post().
Namespace(c.ns).
Resource("httpproxies").
Body(hTTPProxy).
Do().
Into(result)
return
}
// Update takes the representation of a hTTPProxy and updates it. Returns the server's representation of the hTTPProxy, and an error, if there is any.
func (c *hTTPProxies) Update(hTTPProxy *v1.HTTPProxy) (result *v1.HTTPProxy, err error) {
result = &v1.HTTPProxy{}
err = c.client.Put().
Namespace(c.ns).
Resource("httpproxies").
Name(hTTPProxy.Name).
Body(hTTPProxy).
Do().
Into(result)
return
}
// UpdateStatus was generated because the type contains a Status member.
// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
func (c *hTTPProxies) UpdateStatus(hTTPProxy *v1.HTTPProxy) (result *v1.HTTPProxy, err error) {
result = &v1.HTTPProxy{}
err = c.client.Put().
Namespace(c.ns).
Resource("httpproxies").
Name(hTTPProxy.Name).
SubResource("status").
Body(hTTPProxy).
Do().
Into(result)
return
}
// Delete takes name of the hTTPProxy and deletes it. Returns an error if one occurs.
func (c *hTTPProxies) Delete(name string, options *metav1.DeleteOptions) error {
return c.client.Delete().
Namespace(c.ns).
Resource("httpproxies").
Name(name).
Body(options).
Do().
Error()
}
// DeleteCollection deletes a collection of objects.
func (c *hTTPProxies) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error {
var timeout time.Duration
if listOptions.TimeoutSeconds != nil {
timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second
}
return c.client.Delete().
Namespace(c.ns).
Resource("httpproxies").
VersionedParams(&listOptions, scheme.ParameterCodec).
Timeout(timeout).
Body(options).
Do().
Error()
}
// Patch applies the patch and returns the patched hTTPProxy.
func (c *hTTPProxies) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.HTTPProxy, err error) {
result = &v1.HTTPProxy{}
err = c.client.Patch(pt).
Namespace(c.ns).
Resource("httpproxies").
SubResource(subresources...).
Name(name).
Body(data).
Do().
Into(result)
return
}

View File

@@ -0,0 +1,89 @@
/*
Copyright The Flagger Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by client-gen. DO NOT EDIT.
package v1
import (
v1 "github.com/weaveworks/flagger/pkg/apis/projectcontour/v1"
"github.com/weaveworks/flagger/pkg/client/clientset/versioned/scheme"
rest "k8s.io/client-go/rest"
)
type ProjectcontourV1Interface interface {
RESTClient() rest.Interface
HTTPProxiesGetter
}
// ProjectcontourV1Client is used to interact with features provided by the projectcontour.io group.
type ProjectcontourV1Client struct {
restClient rest.Interface
}
func (c *ProjectcontourV1Client) HTTPProxies(namespace string) HTTPProxyInterface {
return newHTTPProxies(c, namespace)
}
// NewForConfig creates a new ProjectcontourV1Client for the given config.
func NewForConfig(c *rest.Config) (*ProjectcontourV1Client, error) {
config := *c
if err := setConfigDefaults(&config); err != nil {
return nil, err
}
client, err := rest.RESTClientFor(&config)
if err != nil {
return nil, err
}
return &ProjectcontourV1Client{client}, nil
}
// NewForConfigOrDie creates a new ProjectcontourV1Client for the given config and
// panics if there is an error in the config.
func NewForConfigOrDie(c *rest.Config) *ProjectcontourV1Client {
client, err := NewForConfig(c)
if err != nil {
panic(err)
}
return client
}
// New creates a new ProjectcontourV1Client for the given RESTClient.
func New(c rest.Interface) *ProjectcontourV1Client {
return &ProjectcontourV1Client{c}
}
func setConfigDefaults(config *rest.Config) error {
gv := v1.SchemeGroupVersion
config.GroupVersion = &gv
config.APIPath = "/apis"
config.NegotiatedSerializer = scheme.Codecs.WithoutConversion()
if config.UserAgent == "" {
config.UserAgent = rest.DefaultKubernetesUserAgent()
}
return nil
}
// RESTClient returns a RESTClient that is used to communicate
// with API server by this client implementation.
func (c *ProjectcontourV1Client) RESTClient() rest.Interface {
if c == nil {
return nil
}
return c.restClient
}

View File

@@ -29,6 +29,7 @@ import (
gloo "github.com/weaveworks/flagger/pkg/client/informers/externalversions/gloo"
internalinterfaces "github.com/weaveworks/flagger/pkg/client/informers/externalversions/internalinterfaces"
istio "github.com/weaveworks/flagger/pkg/client/informers/externalversions/istio"
projectcontour "github.com/weaveworks/flagger/pkg/client/informers/externalversions/projectcontour"
smi "github.com/weaveworks/flagger/pkg/client/informers/externalversions/smi"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
@@ -180,6 +181,7 @@ type SharedInformerFactory interface {
Flagger() flagger.Interface
Gloo() gloo.Interface
Networking() istio.Interface
Projectcontour() projectcontour.Interface
Split() smi.Interface
}
@@ -199,6 +201,10 @@ func (f *sharedInformerFactory) Networking() istio.Interface {
return istio.New(f, f.namespace, f.tweakListOptions)
}
func (f *sharedInformerFactory) Projectcontour() projectcontour.Interface {
return projectcontour.New(f, f.namespace, f.tweakListOptions)
}
func (f *sharedInformerFactory) Split() smi.Interface {
return smi.New(f, f.namespace, f.tweakListOptions)
}

Some files were not shown because too many files have changed in this diff Show More