Compare commits

...

94 Commits

Author SHA1 Message Date
Stefan Prodan
e7fc72e6b5 Merge pull request #364 from weaveworks/release-0.20.2
Release v0.20.2
2019-11-07 12:08:18 +02:00
stefanprodan
4203232b05 Release v0.20.2 2019-11-07 11:34:25 +02:00
stefanprodan
a06aa05201 Add canary namespace to Linkerd webhooks example 2019-11-07 11:34:00 +02:00
Stefan Prodan
8e582e9b73 Merge pull request #363 from weaveworks/no-hpa
Use the specified replicas when scaling up the canary
2019-11-07 10:44:31 +02:00
stefanprodan
0e9fe8a446 Remove the traffic mention from the custom metrics error log
Fix: #361
2019-11-07 09:36:38 +02:00
stefanprodan
27b4bcc648 Use the specified replicas when scaling up the canary 2019-11-07 09:34:53 +02:00
Stefan Prodan
614b7c74c4 Merge pull request #358 from weaveworks/appmesh-gateway
Expose canaries on public domains with App Mesh Gateway
2019-11-06 13:21:20 +02:00
Stefan Prodan
5901129ec6 Merge pull request #359 from KeisukeYamashita/fix-typo-in-how-it-works
Fix typo in section "Webhook" of how-it-works.md
2019-11-06 13:20:53 +02:00
KeisukeYamashita
ded14345b4 doc(how-it-works): fix typo ca to can in how it works doc 2019-11-05 17:39:45 +09:00
stefanprodan
dd272c6870 Expose canaries on public domains with App Mesh Gateway
- map canary service hosts to domain gateway annotation
- map canary retries and timeout to gateway annotations
2019-11-04 18:26:28 +02:00
Stefan Prodan
b31c7c6230 Merge pull request #356 from weaveworks/docs-cleanup
Docs cleanup
2019-11-04 00:52:47 +02:00
stefanprodan
b0297213c3 Use kustomize in Istio docs 2019-11-04 00:35:28 +02:00
stefanprodan
d0fba2d111 Update Istio SMI tutorial 2019-11-04 00:13:19 +02:00
stefanprodan
9924cc2152 Update NGINX usage docs 2019-11-04 00:12:51 +02:00
Stefan Prodan
008a74f86c Merge pull request #354 from weaveworks/prep-0.20.1
Release v0.20.1
2019-11-03 12:29:14 +02:00
stefanprodan
4ca110292f Add v0.20.1 changelog 2019-11-03 11:57:58 +02:00
stefanprodan
55b4c19670 Release v0.20.1 2019-11-03 11:47:16 +02:00
stefanprodan
8349dd1cda Release load tester v0.11.0
- tools updates: Helm v2.15.1, Helm v3.0.0-rc.2, rimusz helm-tiller v0.9.3, gPRC probe v0.3.1
- add hey test during build
2019-11-03 11:46:18 +02:00
Stefan Prodan
402fb66b2a Merge pull request #353 from weaveworks/fix-promql
Fix Prometheus query escape
2019-11-03 11:04:43 +02:00
stefanprodan
f991274b97 Fix Prometheus query escape
Removing whitespace without trimming spaces
2019-11-03 00:01:32 +02:00
Stefan Prodan
0d94a49b6a Merge pull request #350 from laszlocph/update-hey-link
Updating hey release link
2019-10-30 09:01:56 +02:00
Laszlo Fogas
7c14225442 Updating hey release link 2019-10-30 06:40:57 +01:00
stefanprodan
2af0a050bc Fix Prometheus URL in EKS install docs 2019-10-29 18:32:15 +02:00
Stefan Prodan
582f8d6abd Merge pull request #346 from weaveworks/e2e-up
e2e testing: update providers
2019-10-28 16:26:06 +02:00
stefanprodan
eeea3123ac Update e2e NGINX ingress to v1.24.4 2019-10-28 16:08:00 +02:00
stefanprodan
51fe43e169 Update e2e Helm to v2.15.1 2019-10-28 15:32:02 +02:00
stefanprodan
6e6b127092 Update loadtester Helm to v3.0.0-beta.5 2019-10-28 15:31:17 +02:00
stefanprodan
c9bacdfe05 Update Istio to v1.3.3 2019-10-28 15:19:17 +02:00
stefanprodan
f56a69770c Update Linkerd to v2.6.0 2019-10-28 14:42:16 +02:00
Stefan Prodan
0196124c9f Merge pull request #343 from weaveworks/prep-0.20.0
Release v0.20.0
2019-10-22 19:11:59 +03:00
stefanprodan
63756d9d5f Add changelog for v0.20.0 2019-10-22 17:54:18 +03:00
stefanprodan
8e346960ac Add blue/green service mesh docs 2019-10-22 16:57:49 +03:00
stefanprodan
1b485b3459 Release v0.20.0 2019-10-22 09:39:14 +03:00
Stefan Prodan
ee05108279 Merge pull request #344 from weaveworks/gloo-refactoring
Gloo integration refactoring
2019-10-22 09:38:19 +03:00
stefanprodan
dfaa039c9c Update Goo docs 2019-10-22 00:48:15 +03:00
stefanprodan
46579d2ee6 Refactor Gloo integration
- build Gloo UpstreamGroup clientset
- drop solo-io, envoyproxy, hcl, consul, opencensus, apiextensions deps
- use the native routers with supergloo
2019-10-21 16:33:47 +03:00
Stefan Prodan
f372523fb8 Merge pull request #342 from weaveworks/prom-config
Implement metrics server override
2019-10-17 17:24:24 +03:00
stefanprodan
5e434df6ea Exclude high cardinality cAdvisor metrics 2019-10-17 13:02:18 +03:00
stefanprodan
d6c5bdd241 Implement metrics server override 2019-10-17 11:37:54 +03:00
stefanprodan
cdcd97244c Add the metrics server field to CRD 2019-10-17 11:36:25 +03:00
Stefan Prodan
60c4bba263 Merge pull request #340 from weaveworks/appmesh-ab-testing
Implement App Mesh A/B testing
2019-10-17 10:54:31 +03:00
stefanprodan
2b73bc5e38 Fix A/B testing examples 2019-10-17 09:12:39 +03:00
stefanprodan
03652dc631 Add App Mesh http match headers tests 2019-10-16 15:43:26 +03:00
stefanprodan
00155aff37 Add App Mesh A/B testing example to docs 2019-10-16 10:49:33 +03:00
stefanprodan
206c3e6d7a Implement App Mesh A/B testing 2019-10-15 16:39:54 +03:00
Stefan Prodan
8345fea812 Merge pull request #338 from weaveworks/appmesh-up
Implement App Mesh HTTP retry policy
2019-10-15 08:45:49 +03:00
stefanprodan
c11dba1e05 Add retry policy to docs and examples 2019-10-14 21:03:57 +03:00
stefanprodan
7d4c3c5814 Implement App Mesh HTTP retry policy 2019-10-14 20:27:48 +03:00
stefanprodan
9b36794c9d Update App Mesh CRD 2019-10-14 20:26:46 +03:00
Stefan Prodan
1f34c656e9 Merge pull request #336 from weaveworks/appmesh-router-fix
Generate unique names for App Mesh virtual routers and routes
2019-10-14 19:25:08 +03:00
stefanprodan
9982dc9c83 Generate unique names for App Mesh virtual routers and routes 2019-10-14 19:07:10 +03:00
Stefan Prodan
780f3d2ab9 Merge pull request #334 from weaveworks/env-vars
Allow setting Slack and Teams URLs with env vars
2019-10-10 09:05:04 +03:00
stefanprodan
1cb09890fb Add env to chart options to be used for Slack and Teams URLs 2019-10-09 16:53:34 +03:00
stefanprodan
faae6a7c3b Add env vars for Slack and Teams URLs 2019-10-09 16:03:30 +03:00
Stefan Prodan
d4250f3248 Merge pull request #333 from weaveworks/default-labels
Add the app/name label to services and primary deployment
2019-10-09 13:45:14 +03:00
stefanprodan
a8ee477b62 Add selector labels option to Helm chart 2019-10-09 13:22:10 +03:00
stefanprodan
673b6102a7 Add the name label to ClusterIP services and primary deployment 2019-10-09 13:01:15 +03:00
Stefan Prodan
316de42a2c Merge pull request #331 from weaveworks/prep-v0.19.0
Release v0.19.0
2019-10-08 13:22:16 +03:00
stefanprodan
dfb4b35e6c Release v0.19.0 2019-10-08 12:02:37 +03:00
Stefan Prodan
61ab596d1b Merge pull request #327 from weaveworks/target-port
Implement canary service target port
2019-10-08 11:10:04 +03:00
stefanprodan
3345692751 Add service target port to docs 2019-10-07 11:56:03 +03:00
stefanprodan
dff9287c75 Add target port to NGINX e2e tests 2019-10-07 10:01:28 +03:00
stefanprodan
b5fb7cdae5 Add target port number to Gloo e2e tests
Update Gloo to v0.20.2
Enable Gloo discovery Fix: #328
2019-10-07 09:34:23 +03:00
stefanprodan
2e79817437 Add target port number e2e test for Linkerd 2019-10-06 13:35:58 +03:00
stefanprodan
5f439adc36 Use kustomize in Linkerd e2e tests 2019-10-06 12:58:26 +03:00
stefanprodan
45df96ff3c Format imports 2019-10-06 12:54:01 +03:00
stefanprodan
98ee150364 Add target port and gPRC e2e tests for Linkerd 2019-10-06 12:26:03 +03:00
stefanprodan
d328a2146a Fix loadtester image tag 2019-10-06 11:43:25 +03:00
stefanprodan
4513f2e8be Use Docker Hub in e2e tests 2019-10-06 11:42:49 +03:00
stefanprodan
095fef1de6 Release loadtester v0.9.0 with gRPC health check 2019-10-06 11:26:42 +03:00
stefanprodan
754f02a30f Add gRPC acceptance test to Istio e2e tests 2019-10-06 11:03:00 +03:00
stefanprodan
01a4e7f6a8 Add service target port to Istio e2e tests 2019-10-06 11:02:05 +03:00
stefanprodan
6bba84422d Add service target port to Kubernetes e2e tests 2019-10-06 10:44:42 +03:00
stefanprodan
26190d0c6a Use podinfo v3.1.0 for e2e tests 2019-10-06 10:42:30 +03:00
stefanprodan
2d9098e43c Add target port number and name tests 2019-10-06 10:31:50 +03:00
stefanprodan
7581b396b2 Implement service target port 2019-10-06 10:21:34 +03:00
stefanprodan
67a6366906 Add service.targetPort field to Canary CRD 2019-10-06 10:04:21 +03:00
Stefan Prodan
5605fab740 Merge pull request #326 from weaveworks/force-bg
Enforce blue/green when using kubernetes networking
2019-10-05 18:55:13 +03:00
stefanprodan
b76d0001ed Move Istio routing docs to FAQ 2019-10-05 18:13:40 +03:00
stefanprodan
625eed0840 Enforce blue/green when using kubernetes networking
Use blue/green with ten iterations and warn that progressive traffic shifting and HTTP headers routing are not compatible with Kubernetes L4 networking.
2019-10-05 17:59:34 +03:00
stefanprodan
37f9151de3 Add traffic mirroring documentation 2019-10-05 16:23:43 +03:00
Stefan Prodan
20af98e4dc Merge pull request #325 from weaveworks/appmesh-grcp
Allow gPRC protocol for App Mesh
2019-10-05 12:49:07 +03:00
stefanprodan
76800d0ed0 Update canary spec in docs 2019-10-05 12:15:54 +03:00
stefanprodan
3103bde7f7 Use the App Mesh Prometheus chart in docs 2019-10-05 11:52:41 +03:00
stefanprodan
298d8c2d65 Allow gPRC protocol for App Mesh
Use the canary service port name to set http or grpc protocol on App Mesh virtual nodes and virtual routers
2019-10-05 11:21:43 +03:00
Stefan Prodan
5cdacf81e3 Merge pull request #324 from weaveworks/fix-ports-order
Fix port discovery diff
2019-10-05 11:03:35 +03:00
stefanprodan
2141d88ce1 Enable Prometheus scraping of Flagger metrics 2019-10-05 10:45:35 +03:00
stefanprodan
e8a2d4be2e Fix port discovery diff
Sort service ports by port number before comparing slices
2019-10-05 10:42:01 +03:00
Stefan Prodan
9a9baadf0e Merge pull request #311 from andrewjjenkins/mirror
Add traffic mirroring for Istio service mesh
2019-10-05 10:34:25 +03:00
Andrew Jenkins
a21e53fa31 Document traffic mirroring in the FAQ 2019-10-03 14:33:49 -06:00
Andrew Jenkins
61f8aea7d8 add Traffic Mirroring to Blue/Green deployments
Traffic mirroring for blue/green will mirror traffic for the entire
canary analysis phase of the blue/green deployment.
2019-10-03 14:33:49 -06:00
Andrew Jenkins
e384b03d49 Add Traffic Mirroring for Istio Service Mesh
Traffic mirroring is a pre-stage for canary deployments.  When mirroring
is enabled, at the beginning of a canary deployment traffic is mirrored
to the canary instead of shifted for one canary period.  The service
mesh should mirror by copying the request and sending one copy to the
primary and one copy to the canary; only the response from the primary
is sent to the user.  The response from the canary is only used for
collecting metrics.

Once the mirror period is over, the canary proceeds as usual, shifting
traffic from primary to canary until complete.

Added TestScheduler_Mirroring unit test.
2019-10-03 14:33:49 -06:00
Andrew Jenkins
655df36913 Extend test SetupMocks() to take arbitrary Canary resources
SetupMocks() currently takes a bool switch that tells it to configure
against either a shifting canary or an A-B canary.  I'll need a third
canary that has mirroring turned on so I changed this to an interface
that just takes the canary to configure (and configs the default
shifting canary if you pass nil).
2019-09-24 16:15:45 -06:00
Andrew Jenkins
2e079ba7a1 Add mirror to router interface and implement for istio router
The mirror option will be used to tell routers to configure traffic
mirroring.  Implement mirror for GetRoutes and SetRoutes for Istio.  For
other routers, GetRoutes always returns mirror == false, and SetRoutes
ignores mirror.

After this change there is no behavior change because no code sets
mirror true (yet).

Enhanced TestIstioRouter_SetRoutes and TestIstioRouter_GetRoutes.
2019-09-24 16:15:45 -06:00
133 changed files with 3814 additions and 2261 deletions

View File

@@ -10,6 +10,9 @@ jobs:
- restore_cache:
keys:
- go-mod-v3-{{ checksum "go.sum" }}
- run:
name: Run go mod download
command: go mod download
- run:
name: Run go fmt
command: make test-fmt
@@ -100,17 +103,6 @@ jobs:
- run: test/e2e-smi-istio.sh
- run: test/e2e-tests.sh canary
e2e-supergloo-testing:
machine: true
steps:
- checkout
- attach_workspace:
at: /tmp/bin
- run: test/container-build.sh
- run: test/e2e-kind.sh 0.2.1
- run: test/e2e-supergloo.sh
- run: test/e2e-tests.sh canary
e2e-gloo-testing:
machine: true
steps:
@@ -203,9 +195,6 @@ workflows:
- e2e-kubernetes-testing:
requires:
- build-binary
# - e2e-supergloo-testing:
# requires:
# - build-binary
- e2e-gloo-testing:
requires:
- build-binary
@@ -220,7 +209,6 @@ workflows:
- build-binary
- e2e-istio-testing
- e2e-kubernetes-testing
#- e2e-supergloo-testing
- e2e-gloo-testing
- e2e-nginx-testing
- e2e-linkerd-testing

View File

@@ -2,6 +2,71 @@
All notable changes to this project are documented in this file.
## 0.20.2 (2019-11-07)
Adds support for exposing canaries outside the cluster using App Mesh Gateway annotations
#### Improvements
- Expose canaries on public domains with App Mesh Gateway [#358](https://github.com/weaveworks/flagger/pull/358)
#### Fixes
- Use the specified replicas when scaling up the canary [#363](https://github.com/weaveworks/flagger/pull/363)
## 0.20.1 (2019-11-03)
Fixes promql execution and updates the load testing tools
#### Improvements
- Update load tester Helm tools [#8349dd1](https://github.com/weaveworks/flagger/commit/8349dd1cda59a741c7bed9a0f67c0fc0fbff4635)
- e2e testing: update providers [#346](https://github.com/weaveworks/flagger/pull/346)
#### Fixes
- Fix Prometheus query escape [#353](https://github.com/weaveworks/flagger/pull/353)
- Updating hey release link [#350](https://github.com/weaveworks/flagger/pull/350)
## 0.20.0 (2019-10-21)
Adds support for [A/B Testing](https://docs.flagger.app/usage/progressive-delivery#traffic-mirroring) and retry policies when using App Mesh
#### Features
- Implement App Mesh A/B testing based on HTTP headers match conditions [#340](https://github.com/weaveworks/flagger/pull/340)
- Implement App Mesh HTTP retry policy [#338](https://github.com/weaveworks/flagger/pull/338)
- Implement metrics server override [#342](https://github.com/weaveworks/flagger/pull/342)
#### Improvements
- Add the app/name label to services and primary deployment [#333](https://github.com/weaveworks/flagger/pull/333)
- Allow setting Slack and Teams URLs with env vars [#334](https://github.com/weaveworks/flagger/pull/334)
- Refactor Gloo integration [#344](https://github.com/weaveworks/flagger/pull/344)
#### Fixes
- Generate unique names for App Mesh virtual routers and routes [#336](https://github.com/weaveworks/flagger/pull/336)
## 0.19.0 (2019-10-08)
Adds support for canary and blue/green [traffic mirroring](https://docs.flagger.app/usage/progressive-delivery#traffic-mirroring)
#### Features
- Add traffic mirroring for Istio service mesh [#311](https://github.com/weaveworks/flagger/pull/311)
- Implement canary service target port [#327](https://github.com/weaveworks/flagger/pull/327)
#### Improvements
- Allow gPRC protocol for App Mesh [#325](https://github.com/weaveworks/flagger/pull/325)
- Enforce blue/green when using Kubernetes networking [#326](https://github.com/weaveworks/flagger/pull/326)
#### Fixes
- Fix port discovery diff [#324](https://github.com/weaveworks/flagger/pull/324)
- Helm chart: Enable Prometheus scraping of Flagger metrics [#2141d88](https://github.com/weaveworks/flagger/commit/2141d88ce1cc6be220dab34171c215a334ecde24)
## 0.18.6 (2019-10-03)
Adds support for App Mesh conformance tests and latency metric checks

View File

@@ -6,19 +6,22 @@ RUN addgroup -S app \
WORKDIR /home/app
RUN curl -sSLo hey "https://storage.googleapis.com/jblabs/dist/hey_linux_v0.1.2" && \
RUN curl -sSLo hey "https://storage.googleapis.com/hey-release/hey_linux_amd64" && \
chmod +x hey && mv hey /usr/local/bin/hey
RUN curl -sSL "https://get.helm.sh/helm-v2.14.3-linux-amd64.tar.gz" | tar xvz && \
# verify hey works
RUN hey -n 1 -c 1 https://flagger.app > /dev/null && echo $? | grep 0
RUN curl -sSL "https://get.helm.sh/helm-v2.15.1-linux-amd64.tar.gz" | tar xvz && \
chmod +x linux-amd64/helm && mv linux-amd64/helm /usr/local/bin/helm && \
chmod +x linux-amd64/tiller && mv linux-amd64/tiller /usr/local/bin/tiller && \
rm -rf linux-amd64
RUN curl -sSL "https://get.helm.sh/helm-v3.0.0-beta.3-linux-amd64.tar.gz" | tar xvz && \
RUN curl -sSL "https://get.helm.sh/helm-v3.0.0-rc.2-linux-amd64.tar.gz" | tar xvz && \
chmod +x linux-amd64/helm && mv linux-amd64/helm /usr/local/bin/helmv3 && \
rm -rf linux-amd64
RUN GRPC_HEALTH_PROBE_VERSION=v0.3.0 && \
RUN GRPC_HEALTH_PROBE_VERSION=v0.3.1 && \
wget -qO /usr/local/bin/grpc_health_probe https://github.com/grpc-ecosystem/grpc-health-probe/releases/download/${GRPC_HEALTH_PROBE_VERSION}/grpc_health_probe-linux-amd64 && \
chmod +x /usr/local/bin/grpc_health_probe
@@ -35,7 +38,7 @@ RUN chown -R app:app ./
USER app
RUN curl -sSL "https://github.com/rimusz/helm-tiller/archive/v0.8.3.tar.gz" | tar xvz && \
helm init --client-only && helm plugin install helm-tiller-0.8.3 && helm plugin list
RUN curl -sSL "https://github.com/rimusz/helm-tiller/archive/v0.9.3.tar.gz" | tar xvz && \
helm init --client-only && helm plugin install helm-tiller-0.9.3 && helm plugin list
ENTRYPOINT ["./loadtester"]

View File

@@ -39,7 +39,6 @@ Flagger documentation can be found at [docs.flagger.app](https://docs.flagger.ap
* [FAQ](https://docs.flagger.app/faq)
* Usage
* [Istio canary deployments](https://docs.flagger.app/usage/progressive-delivery)
* [Istio A/B testing](https://docs.flagger.app/usage/ab-testing)
* [Linkerd canary deployments](https://docs.flagger.app/usage/linkerd-progressive-delivery)
* [App Mesh canary deployments](https://docs.flagger.app/usage/appmesh-progressive-delivery)
* [NGINX ingress controller canary deployments](https://docs.flagger.app/usage/nginx-progressive-delivery)
@@ -70,7 +69,6 @@ metadata:
spec:
# service mesh provider (optional)
# can be: kubernetes, istio, linkerd, appmesh, nginx, gloo, supergloo
# use the kubernetes provider for Blue/Green style deployments
provider: istio
# deployment reference
targetRef:
@@ -86,14 +84,12 @@ spec:
kind: HorizontalPodAutoscaler
name: podinfo
service:
# container port
# ClusterIP port number
port: 9898
# Istio gateways (optional)
gateways:
- public-gateway.istio-system.svc.cluster.local
# Istio virtual service host names (optional)
hosts:
- podinfo.example.com
# container port name or number (optional)
targetPort: 9898
# port name can be http or grpc (default http)
portName: http
# HTTP match conditions (optional)
match:
- uri:
@@ -101,10 +97,6 @@ spec:
# HTTP rewrite (optional)
rewrite:
uri: /
# cross-origin resource sharing policy (optional)
corsPolicy:
allowOrigin:
- example.com
# request timeout (optional)
timeout: 5s
# promote the canary without analysing it (default false)
@@ -144,7 +136,7 @@ spec:
topic="podinfo"
}[1m]
)
# external checks (optional)
# testing (optional)
webhooks:
- name: load-test
url: http://flagger-loadtester.test/
@@ -157,15 +149,17 @@ For more details on how the canary analysis and promotion works please [read the
## Features
| Feature | Istio | Linkerd | App Mesh | NGINX | Gloo |
| -------------------------------------------- | ------------------ | ------------------ |------------------ |------------------ |------------------ |
| Canary deployments (weighted traffic) | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: |
| A/B testing (headers and cookies filters) | :heavy_check_mark: | :heavy_minus_sign: | :heavy_minus_sign: | :heavy_check_mark: | :heavy_minus_sign: |
| Webhooks (acceptance/load testing) | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: |
| Request success rate check (L7 metric) | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: |
| Request duration check (L7 metric) | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: |
| Custom promql checks | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: |
| Traffic policy, CORS, retries and timeouts | :heavy_check_mark: | :heavy_minus_sign: | :heavy_minus_sign: | :heavy_minus_sign: | :heavy_minus_sign: |
| Feature | Istio | Linkerd | App Mesh | NGINX | Gloo | Kubernetes CNI |
| -------------------------------------------- | ------------------ | ------------------ |------------------ |------------------ |------------------ |------------------ |
| Canary deployments (weighted traffic) | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_minus_sign: |
| A/B testing (headers and cookies routing) | :heavy_check_mark: | :heavy_minus_sign: | :heavy_check_mark: | :heavy_check_mark: | :heavy_minus_sign: | :heavy_minus_sign: |
| Blue/Green deployments (traffic switch) | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: |
| Webhooks (acceptance/load testing) | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: |
| Manual gating (approve/pause/resume) | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: |
| Request success rate check (L7 metric) | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_minus_sign: |
| Request duration check (L7 metric) | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_minus_sign: |
| Custom promql checks | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: |
| Traffic policy, CORS, retries and timeouts | :heavy_check_mark: | :heavy_minus_sign: | :heavy_minus_sign: | :heavy_minus_sign: | :heavy_minus_sign: | :heavy_minus_sign: |
## Roadmap

View File

@@ -1,67 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: abtest
namespace: test
labels:
app: abtest
spec:
minReadySeconds: 5
revisionHistoryLimit: 5
progressDeadlineSeconds: 60
strategy:
rollingUpdate:
maxUnavailable: 0
type: RollingUpdate
selector:
matchLabels:
app: abtest
template:
metadata:
annotations:
prometheus.io/scrape: "true"
labels:
app: abtest
spec:
containers:
- name: podinfod
image: quay.io/stefanprodan/podinfo:1.7.0
imagePullPolicy: IfNotPresent
ports:
- containerPort: 9898
name: http
protocol: TCP
command:
- ./podinfo
- --port=9898
- --level=info
- --random-delay=false
- --random-error=false
env:
- name: PODINFO_UI_COLOR
value: blue
livenessProbe:
exec:
command:
- podcli
- check
- http
- localhost:9898/healthz
initialDelaySeconds: 5
timeoutSeconds: 5
readinessProbe:
exec:
command:
- podcli
- check
- http
- localhost:9898/readyz
initialDelaySeconds: 5
timeoutSeconds: 5
resources:
limits:
cpu: 2000m
memory: 512Mi
requests:
cpu: 100m
memory: 64Mi

View File

@@ -1,19 +0,0 @@
apiVersion: autoscaling/v2beta1
kind: HorizontalPodAutoscaler
metadata:
name: abtest
namespace: test
spec:
scaleTargetRef:
apiVersion: apps/v1
kind: Deployment
name: abtest
minReplicas: 2
maxReplicas: 4
metrics:
- type: Resource
resource:
name: cpu
# scale up if usage is above
# 99% of the requested CPU (100m)
targetAverageUtilization: 99

View File

@@ -20,8 +20,16 @@ spec:
service:
# container port
port: 9898
# container port name (optional)
# can be http or grpc
portName: http
# App Mesh reference
meshName: global
# App Mesh retry policy (optional)
retries:
attempts: 3
perTryTimeout: 1s
retryOn: "gateway-error,client-error,stream-error"
# define the canary analysis timing and KPIs
canaryAnalysis:
# schedule interval (default 60s)

View File

@@ -1,14 +1,14 @@
apiVersion: flagger.app/v1alpha3
kind: Canary
metadata:
name: abtest
name: podinfo
namespace: test
spec:
# deployment reference
targetRef:
apiVersion: apps/v1
kind: Deployment
name: abtest
name: podinfo
# the maximum time in seconds for the canary deployment
# to make progress before it is rollback (default 600s)
progressDeadlineSeconds: 60
@@ -16,7 +16,7 @@ spec:
autoscalerRef:
apiVersion: autoscaling/v2beta1
kind: HorizontalPodAutoscaler
name: abtest
name: podinfo
service:
# container port
port: 9898
@@ -26,7 +26,12 @@ spec:
- mesh
# Istio virtual service host names (optional)
hosts:
- abtest.istio.weavedx.com
- app.example.com
# Istio traffic policy (optional)
trafficPolicy:
tls:
# use ISTIO_MUTUAL when mTLS is enabled
mode: DISABLE
canaryAnalysis:
# schedule interval (default 60s)
interval: 10s
@@ -36,12 +41,12 @@ spec:
iterations: 10
# canary match condition
match:
- headers:
user-agent:
regex: "^(?!.*Chrome)(?=.*\bSafari\b).*$"
- headers:
cookie:
regex: "^(.*?;)?(type=insider)(;.*)?$"
- headers:
user-agent:
regex: "(?=.*Safari)(?!.*Chrome).*$"
metrics:
- name: request-success-rate
# minimum req success rate (non 5xx responses)

View File

@@ -37,7 +37,7 @@ spec:
- mesh
# Istio virtual service host names (optional)
hosts:
- app.istio.weavedx.com
- app.example.com
# Istio traffic policy (optional)
trafficPolicy:
tls:

View File

@@ -20,12 +20,13 @@ spec:
metadata:
annotations:
prometheus.io/scrape: "true"
prometheus.io/port: "9898"
labels:
app: podinfo
spec:
containers:
- name: podinfod
image: stefanprodan/podinfo:2.0.0
image: stefanprodan/podinfo:3.1.0
imagePullPolicy: IfNotPresent
ports:
- containerPort: 9898

View File

@@ -41,6 +41,10 @@ spec:
type: string
JSONPath: .spec.canaryAnalysis.interval
priority: 1
- name: Mirror
type: boolean
JSONPath: .spec.canaryAnalysis.mirror
priority: 1
- name: StepWeight
type: string
JSONPath: .spec.canaryAnalysis.stepWeight
@@ -64,6 +68,9 @@ spec:
provider:
description: Traffic managent provider
type: string
metricsServer:
description: Prometheus URL
type: string
progressDeadlineSeconds:
description: Deployment progress deadline
type: number
@@ -114,6 +121,11 @@ spec:
portName:
description: Container port name
type: string
targetPort:
description: Container target port name
anyOf:
- type: string
- type: number
portDiscovery:
description: Enable port dicovery
type: boolean
@@ -183,6 +195,9 @@ spec:
stepWeight:
description: Canary incremental traffic percentage step
type: number
mirror:
description: Mirror traffic to canary before shifting
type: boolean
match:
description: A/B testing match conditions
anyOf:

View File

@@ -22,7 +22,7 @@ spec:
serviceAccountName: flagger
containers:
- name: flagger
image: weaveworks/flagger:0.18.6
image: weaveworks/flagger:0.20.2
imagePullPolicy: IfNotPresent
ports:
- name: http

View File

@@ -4,6 +4,7 @@ metadata:
name: podinfo
namespace: test
spec:
provider: gloo
targetRef:
apiVersion: apps/v1
kind: Deployment
@@ -28,9 +29,24 @@ spec:
threshold: 500
interval: 30s
webhooks:
- name: acceptance-test
type: pre-rollout
url: http://flagger-loadtester.test/
timeout: 10s
metadata:
type: bash
cmd: "curl -sd 'test' http://podinfo-canary:9898/token | grep token"
- name: gloo-acceptance-test
type: pre-rollout
url: http://flagger-loadtester.test/
timeout: 10s
metadata:
type: bash
cmd: "curl -sd 'test' -H 'Host: app.example.com' http://gateway-proxy-v2.gloo-system/token | grep token"
- name: load-test
url: http://flagger-loadtester.test/
timeout: 5s
metadata:
type: cmd
cmd: "hey -z 1m -q 10 -c 2 http://gloo.example.com/"
cmd: "hey -z 2m -q 5 -c 2 -host app.example.com http://gateway-proxy-v2.gloo-system"
logCmdOutput: "true"

View File

@@ -1,67 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: podinfo
namespace: test
labels:
app: podinfo
spec:
minReadySeconds: 5
revisionHistoryLimit: 5
progressDeadlineSeconds: 60
strategy:
rollingUpdate:
maxUnavailable: 0
type: RollingUpdate
selector:
matchLabels:
app: podinfo
template:
metadata:
annotations:
prometheus.io/scrape: "true"
labels:
app: podinfo
spec:
containers:
- name: podinfod
image: quay.io/stefanprodan/podinfo:1.7.0
imagePullPolicy: IfNotPresent
ports:
- containerPort: 9898
name: http
protocol: TCP
command:
- ./podinfo
- --port=9898
- --level=info
- --random-delay=false
- --random-error=false
env:
- name: PODINFO_UI_COLOR
value: blue
livenessProbe:
exec:
command:
- podcli
- check
- http
- localhost:9898/healthz
initialDelaySeconds: 5
timeoutSeconds: 5
readinessProbe:
exec:
command:
- podcli
- check
- http
- localhost:9898/readyz
initialDelaySeconds: 5
timeoutSeconds: 5
resources:
limits:
cpu: 2000m
memory: 512Mi
requests:
cpu: 100m
memory: 64Mi

View File

@@ -1,19 +0,0 @@
apiVersion: autoscaling/v2beta1
kind: HorizontalPodAutoscaler
metadata:
name: podinfo
namespace: test
spec:
scaleTargetRef:
apiVersion: apps/v1
kind: Deployment
name: podinfo
minReplicas: 1
maxReplicas: 4
metrics:
- type: Resource
resource:
name: cpu
# scale up if usage is above
# 99% of the requested CPU (100m)
targetAverageUtilization: 99

View File

@@ -7,11 +7,11 @@ spec:
virtualHost:
domains:
- '*'
name: podinfo.default
name: podinfo
routes:
- matcher:
prefix: /
routeAction:
upstreamGroup:
name: podinfo
namespace: gloo
namespace: test

View File

@@ -17,7 +17,7 @@ spec:
spec:
containers:
- name: loadtester
image: weaveworks/flagger-loadtester:0.8.0
image: weaveworks/flagger-loadtester:0.11.0
imagePullPolicy: IfNotPresent
ports:
- name: http

View File

@@ -23,8 +23,10 @@ spec:
# to make progress before it is rollback (default 600s)
progressDeadlineSeconds: 60
service:
# container port
port: 9898
# ClusterIP port number
port: 80
# container port number or name
targetPort: 9898
canaryAnalysis:
# schedule interval (default 60s)
interval: 10s

View File

@@ -1,69 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: podinfo
namespace: test
labels:
app: podinfo
spec:
replicas: 1
strategy:
rollingUpdate:
maxUnavailable: 0
type: RollingUpdate
selector:
matchLabels:
app: podinfo
template:
metadata:
annotations:
prometheus.io/scrape: "true"
labels:
app: podinfo
spec:
containers:
- name: podinfod
image: quay.io/stefanprodan/podinfo:1.7.0
imagePullPolicy: IfNotPresent
ports:
- containerPort: 9898
name: http
protocol: TCP
command:
- ./podinfo
- --port=9898
- --level=info
- --random-delay=false
- --random-error=false
env:
- name: PODINFO_UI_COLOR
value: green
livenessProbe:
exec:
command:
- podcli
- check
- http
- localhost:9898/healthz
failureThreshold: 3
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 2
readinessProbe:
exec:
command:
- podcli
- check
- http
- localhost:9898/readyz
failureThreshold: 3
periodSeconds: 3
successThreshold: 1
timeoutSeconds: 2
resources:
limits:
cpu: 1000m
memory: 256Mi
requests:
cpu: 100m
memory: 16Mi

View File

@@ -1,19 +0,0 @@
apiVersion: autoscaling/v2beta1
kind: HorizontalPodAutoscaler
metadata:
name: podinfo
namespace: test
spec:
scaleTargetRef:
apiVersion: apps/v1
kind: Deployment
name: podinfo
minReplicas: 2
maxReplicas: 4
metrics:
- type: Resource
resource:
name: cpu
# scale up if usage is above
# 99% of the requested CPU (100m)
targetAverageUtilization: 99

View File

@@ -1,131 +0,0 @@
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: trafficsplits.split.smi-spec.io
spec:
additionalPrinterColumns:
- JSONPath: .spec.service
description: The service
name: Service
type: string
group: split.smi-spec.io
names:
kind: TrafficSplit
listKind: TrafficSplitList
plural: trafficsplits
singular: trafficsplit
scope: Namespaced
subresources:
status: {}
version: v1alpha1
versions:
- name: v1alpha1
served: true
storage: true
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: smi-adapter-istio
namespace: istio-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: smi-adapter-istio
rules:
- apiGroups:
- ""
resources:
- pods
- services
- endpoints
- persistentvolumeclaims
- events
- configmaps
- secrets
verbs:
- '*'
- apiGroups:
- apps
resources:
- deployments
- daemonsets
- replicasets
- statefulsets
verbs:
- '*'
- apiGroups:
- monitoring.coreos.com
resources:
- servicemonitors
verbs:
- get
- create
- apiGroups:
- apps
resourceNames:
- smi-adapter-istio
resources:
- deployments/finalizers
verbs:
- update
- apiGroups:
- split.smi-spec.io
resources:
- '*'
verbs:
- '*'
- apiGroups:
- networking.istio.io
resources:
- '*'
verbs:
- '*'
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: smi-adapter-istio
subjects:
- kind: ServiceAccount
name: smi-adapter-istio
namespace: istio-system
roleRef:
kind: ClusterRole
name: smi-adapter-istio
apiGroup: rbac.authorization.k8s.io
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: smi-adapter-istio
namespace: istio-system
spec:
replicas: 1
selector:
matchLabels:
name: smi-adapter-istio
template:
metadata:
labels:
name: smi-adapter-istio
annotations:
sidecar.istio.io/inject: "false"
spec:
serviceAccountName: smi-adapter-istio
containers:
- name: smi-adapter-istio
image: docker.io/stefanprodan/smi-adapter-istio:0.0.2-beta.1
command:
- smi-adapter-istio
imagePullPolicy: Always
env:
- name: WATCH_NAMESPACE
value: ""
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: OPERATOR_NAME
value: "smi-adapter-istio"

View File

@@ -1,7 +1,7 @@
apiVersion: v1
name: flagger
version: 0.18.6
appVersion: 0.18.6
version: 0.20.2
appVersion: 0.20.2
kubeVersion: ">=1.11.0-0"
engine: gotpl
description: Flagger is a Kubernetes operator that automates the promotion of canary deployments using Istio, Linkerd, App Mesh, Gloo or NGINX routing for traffic shifting and Prometheus metrics for canary analysis.

View File

@@ -68,6 +68,7 @@ Parameter | Description | Default
`image.pullPolicy` | image pull policy | `IfNotPresent`
`prometheus.install` | if `true`, installs Prometheus configured to scrape all pods in the custer including the App Mesh sidecar | `false`
`metricsServer` | Prometheus URL, used when `prometheus.install` is `false` | `http://prometheus.istio-system:9090`
`selectorLabels` | list of labels that Flagger uses to create pod selectors | `app,name,app.kubernetes.io/name`
`slack.url` | Slack incoming webhook | None
`slack.channel` | Slack channel | None
`slack.user` | Slack username | `flagger`

View File

@@ -42,6 +42,10 @@ spec:
type: string
JSONPath: .spec.canaryAnalysis.interval
priority: 1
- name: Mirror
type: boolean
JSONPath: .spec.canaryAnalysis.mirror
priority: 1
- name: StepWeight
type: string
JSONPath: .spec.canaryAnalysis.stepWeight
@@ -65,6 +69,9 @@ spec:
provider:
description: Traffic managent provider
type: string
metricsServer:
description: Prometheus URL
type: string
progressDeadlineSeconds:
description: Deployment progress deadline
type: number
@@ -115,6 +122,11 @@ spec:
portName:
description: Container port name
type: string
targetPort:
description: Container target port name
anyOf:
- type: string
- type: number
portDiscovery:
description: Enable port dicovery
type: boolean
@@ -184,6 +196,9 @@ spec:
stepWeight:
description: Canary incremental traffic percentage step
type: number
mirror:
description: Mirror traffic to canary before shifting
type: boolean
match:
description: A/B testing match conditions
anyOf:

View File

@@ -61,6 +61,9 @@ spec:
{{- else }}
- -metrics-server={{ .Values.metricsServer }}
{{- end }}
{{- if .Values.selectorLabels }}
- -selector-labels={{ .Values.selectorLabels }}
{{- end }}
{{- if .Values.namespace }}
- -namespace={{ .Values.namespace }}
{{- end }}
@@ -99,6 +102,10 @@ spec:
- --spider
- http://localhost:8080/healthz
timeoutSeconds: 5
{{- if .Values.env }}
env:
{{ toYaml .Values.env | indent 12 }}
{{- end }}
resources:
{{ toYaml .Values.resources | indent 12 }}
{{- with .Values.nodeSelector }}

View File

@@ -133,38 +133,22 @@ data:
scheme: https
tls_config:
ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
insecure_skip_verify: true
bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
relabel_configs:
- source_labels: [__meta_kubernetes_service_name, __meta_kubernetes_endpoint_port_name]
action: keep
regex: kubernetes;https
# Scrape config for nodes
- job_name: 'kubernetes-nodes'
scheme: https
tls_config:
ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
kubernetes_sd_configs:
- role: node
relabel_configs:
- action: labelmap
regex: __meta_kubernetes_node_label_(.+)
- target_label: __address__
replacement: kubernetes.default.svc:443
- source_labels: [__meta_kubernetes_node_name]
regex: (.+)
target_label: __metrics_path__
replacement: /api/v1/nodes/${1}/proxy/metrics
# scrape config for cAdvisor
- job_name: 'kubernetes-cadvisor'
scheme: https
tls_config:
ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
insecure_skip_verify: true
bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
kubernetes_sd_configs:
- role: node
- role: node
relabel_configs:
- action: labelmap
regex: __meta_kubernetes_node_label_(.+)
@@ -174,6 +158,14 @@ data:
regex: (.+)
target_label: __metrics_path__
replacement: /api/v1/nodes/${1}/proxy/metrics/cadvisor
# exclude high cardinality metrics
metric_relabel_configs:
- source_labels: [__name__]
regex: (container|machine)_(cpu|memory|network|fs)_(.+)
action: keep
- source_labels: [__name__]
regex: container_memory_failures_total
action: drop
# scrape config for pods
- job_name: kubernetes-pods

View File

@@ -2,20 +2,26 @@
image:
repository: weaveworks/flagger
tag: 0.18.6
tag: 0.20.2
pullPolicy: IfNotPresent
pullSecret:
podAnnotations: {}
podAnnotations:
prometheus.io/scrape: "true"
prometheus.io/port: "8080"
metricsServer: "http://prometheus:9090"
# accepted values are istio, appmesh, nginx or supergloo:mesh.namespace (defaults to istio)
# accepted values are kubernetes, istio, linkerd, appmesh, nginx, gloo or supergloo:mesh.namespace (defaults to istio)
meshProvider: ""
# single namespace restriction
namespace: ""
# list of pod labels that Flagger uses to create pod selectors
# defaults to: app,name,app.kubernetes.io/name
selectorLabels: ""
slack:
user: flagger
channel:
@@ -26,6 +32,19 @@ msteams:
# MS Teams incoming webhook URL
url:
#env:
#- name: SLACK_URL
# valueFrom:
# secretKeyRef:
# name: slack
# key: url
#- name: MSTEAMS_URL
# valueFrom:
# secretKeyRef:
# name: msteams
# key: url
env: []
leaderElection:
enabled: false
replicaCount: 1

View File

@@ -1,7 +1,7 @@
apiVersion: v1
name: loadtester
version: 0.8.0
appVersion: 0.8.0
version: 0.11.0
appVersion: 0.11.0
kubeVersion: ">=1.11.0-0"
engine: gotpl
description: Flagger's load testing services based on rakyll/hey and bojand/ghz that generates traffic during canary analysis when configured as a webhook.

View File

@@ -2,10 +2,12 @@ replicaCount: 1
image:
repository: weaveworks/flagger-loadtester
tag: 0.8.0
tag: 0.11.0
pullPolicy: IfNotPresent
podAnnotations: {}
podAnnotations:
prometheus.io/scrape: "true"
prometheus.io/port: "8080"
logLevel: info
cmd:

View File

@@ -286,10 +286,10 @@ func startLeaderElection(ctx context.Context, run func(), ns string, kubeClient
func initNotifier(logger *zap.SugaredLogger) (client notifier.Interface) {
provider := "slack"
notifierURL := slackURL
if msteamsURL != "" {
notifierURL := fromEnv("SLACK_URL", slackURL)
if msteamsURL != "" || os.Getenv("MSTEAMS_URL") != "" {
provider = "msteams"
notifierURL = msteamsURL
notifierURL = fromEnv("MSTEAMS_URL", msteamsURL)
}
notifierFactory := notifier.NewFactory(notifierURL, slackUser, slackChannel)
@@ -304,3 +304,10 @@ func initNotifier(logger *zap.SugaredLogger) (client notifier.Interface) {
}
return
}
func fromEnv(envVar string, defaultVal string) string {
if os.Getenv(envVar) != "" {
return os.Getenv(envVar)
}
return defaultVal
}

View File

@@ -10,7 +10,7 @@ import (
"time"
)
var VERSION = "0.8.0"
var VERSION = "0.11.0"
var (
logLevel string
port string

Binary file not shown.

Before

Width:  |  Height:  |  Size: 158 KiB

After

Width:  |  Height:  |  Size: 30 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 39 KiB

View File

@@ -7,10 +7,12 @@
Flagger can run automated application analysis, promotion and rollback for the following deployment strategies:
* Canary (progressive traffic shifting)
* Istio, Linkerd, App Mesh, NGINX, Gloo
* Canary (traffic mirroring)
* Istio
* A/B Testing (HTTP headers and cookies traffic routing)
* Istio, NGINX
* Istio, App Mesh, NGINX
* Blue/Green (traffic switch)
* Kubernetes CNI
* Kubernetes CNI, Istio, Linkerd, App Mesh, NGINX, Gloo
For Canary deployments and A/B testing you'll need a Layer 7 traffic management solution like a service mesh or an ingress controller.
For Blue/Green deployments no service mesh or ingress controller is required.
@@ -41,6 +43,21 @@ Istio example:
regex: "^(.*?;)?(canary=always)(;.*)?$"
```
App Mesh example:
```yaml
canaryAnalysis:
interval: 1m
threshold: 10
iterations: 2
match:
- headers:
user-agent:
regex: ".*Chrome.*"
```
Note that App Mesh supports a single condition.
NGINX example:
```yaml
@@ -102,6 +119,42 @@ The above configuration will run an analysis for five minutes.
Flagger starts the load test for the canary service (green version) and checks the Prometheus metrics every 30 seconds.
If the analysis result is positive, Flagger will promote the canary (green version) to primary (blue version).
**When can I use traffic mirroring?**
Traffic Mirroring is a pre-stage in a Canary (progressive traffic shifting) or
Blue/Green deployment strategy. Traffic mirroring will copy each incoming
request, sending one request to the primary and one to the canary service.
The response from the primary is sent back to the user. The response from the canary
is discarded. Metrics are collected on both requests so that the deployment will
only proceed if the canary metrics are healthy.
Mirroring is supported by Istio only.
In Istio, mirrored requests have `-shadow` appended to the `Host` (HTTP) or
`Authority` (HTTP/2) header; for example requests to `podinfo.test` that are
mirrored will be reported in telemetry with a destination host `podinfo.test-shadow`.
Mirroring must only be used for requests that are **idempotent** or capable of
being processed twice (once by the primary and once by the canary). Reads are
idempotent. Before using mirroring on requests that may be writes, you should
consider what will happen if a write is duplicated and handled by the primary
and canary.
To use mirroring, set `spec.canaryAnalysis.mirror` to `true`. Example for
traffic shifting:
```yaml
apiVersion: flagger.app/v1alpha3
kind: Canary
spec:
provider: istio
canaryAnalysis:
mirror: true
interval: 30s
stepWeight: 20
maxWeight: 50
```
### Kubernetes services
**How is an application exposed inside the cluster?**
@@ -120,8 +173,10 @@ spec:
kind: Deployment
name: podinfo
service:
# container port (required)
# ClusterIP port number (required)
port: 9898
# container port name or number
targetPort: http
# port name can be http or grpc (default http)
portName: http
```
@@ -291,6 +346,195 @@ spec:
topologyKey: kubernetes.io/hostname
```
### Istio routing
**How does Flagger interact with Istio?**
Flagger creates an Istio Virtual Service and Destination Rules based on the Canary service spec.
The service configuration lets you expose an app inside or outside the mesh.
You can also define traffic policies, HTTP match conditions, URI rewrite rules, CORS policies, timeout and retries.
The following spec exposes the `frontend` workload inside the mesh on `frontend.test.svc.cluster.local:9898`
and outside the mesh on `frontend.example.com`. You'll have to specify an Istio ingress gateway for external hosts.
```yaml
apiVersion: flagger.app/v1alpha3
kind: Canary
metadata:
name: frontend
namespace: test
spec:
service:
# container port
port: 9898
# service port name (optional, will default to "http")
portName: http-frontend
# Istio gateways (optional)
gateways:
- public-gateway.istio-system.svc.cluster.local
- mesh
# Istio virtual service host names (optional)
hosts:
- frontend.example.com
# Istio traffic policy
trafficPolicy:
tls:
# use ISTIO_MUTUAL when mTLS is enabled
mode: DISABLE
# HTTP match conditions (optional)
match:
- uri:
prefix: /
# HTTP rewrite (optional)
rewrite:
uri: /
# Istio retry policy (optional)
retries:
attempts: 3
perTryTimeout: 1s
retryOn: "gateway-error,connect-failure,refused-stream"
# Add headers (optional)
headers:
request:
add:
x-some-header: "value"
# cross-origin resource sharing policy (optional)
corsPolicy:
allowOrigin:
- example.com
allowMethods:
- GET
allowCredentials: false
allowHeaders:
- x-some-header
maxAge: 24h
```
For the above spec Flagger will generate the following virtual service:
```yaml
apiVersion: networking.istio.io/v1alpha3
kind: VirtualService
metadata:
name: frontend
namespace: test
ownerReferences:
- apiVersion: flagger.app/v1alpha3
blockOwnerDeletion: true
controller: true
kind: Canary
name: podinfo
uid: 3a4a40dd-3875-11e9-8e1d-42010a9c0fd1
spec:
gateways:
- public-gateway.istio-system.svc.cluster.local
- mesh
hosts:
- frontend.example.com
- frontend
http:
- appendHeaders:
x-some-header: "value"
corsPolicy:
allowHeaders:
- x-some-header
allowMethods:
- GET
allowOrigin:
- example.com
maxAge: 24h
match:
- uri:
prefix: /
rewrite:
uri: /
route:
- destination:
host: podinfo-primary
weight: 100
- destination:
host: podinfo-canary
weight: 0
retries:
attempts: 3
perTryTimeout: 1s
retryOn: "gateway-error,connect-failure,refused-stream"
```
For each destination in the virtual service a rule is generated:
```yaml
apiVersion: networking.istio.io/v1alpha3
kind: DestinationRule
metadata:
name: frontend-primary
namespace: test
spec:
host: frontend-primary
trafficPolicy:
tls:
mode: DISABLE
---
apiVersion: networking.istio.io/v1alpha3
kind: DestinationRule
metadata:
name: frontend-canary
namespace: test
spec:
host: frontend-canary
trafficPolicy:
tls:
mode: DISABLE
```
Flagger keeps in sync the virtual service and destination rules with the canary service spec.
Any direct modification to the virtual service spec will be overwritten.
To expose a workload inside the mesh on `http://backend.test.svc.cluster.local:9898`,
the service spec can contain only the container port and the traffic policy:
```yaml
apiVersion: flagger.app/v1alpha3
kind: Canary
metadata:
name: backend
namespace: test
spec:
service:
port: 9898
trafficPolicy:
tls:
mode: DISABLE
```
Based on the above spec, Flagger will create several ClusterIP services like:
```yaml
apiVersion: v1
kind: Service
metadata:
name: backend-primary
ownerReferences:
- apiVersion: flagger.app/v1alpha3
blockOwnerDeletion: true
controller: true
kind: Canary
name: backend
uid: 2ca1a9c7-2ef6-11e9-bd01-42010a9c0145
spec:
type: ClusterIP
ports:
- name: http
port: 9898
protocol: TCP
targetPort: 9898
selector:
app: backend-primary
```
Flagger works for user facing apps exposed outside the cluster via an ingress gateway
and for backend HTTP APIs that are accessible only from inside the mesh.
### Istio Ingress Gateway
**How can I expose multiple canaries on the same external domain?**

View File

@@ -4,8 +4,6 @@
a horizontal pod autoscaler \(HPA\) and creates a series of objects
\(Kubernetes deployments, ClusterIP services, virtual service, traffic split or ingress\) to drive the canary analysis and promotion.
![Flagger Canary Process](https://raw.githubusercontent.com/weaveworks/flagger/master/docs/diagrams/flagger-canary-hpa.png)
### Canary Custom Resource
For a deployment named _podinfo_, a canary promotion can be defined using Flagger's custom resource:
@@ -19,8 +17,7 @@ metadata:
spec:
# service mesh provider (optional)
# can be: kubernetes, istio, linkerd, appmesh, nginx, gloo, supergloo
# use the kubernetes provider for Blue/Green style deployments
provider: istio
provider: linkerd
# deployment reference
targetRef:
apiVersion: apps/v1
@@ -35,16 +32,15 @@ spec:
kind: HorizontalPodAutoscaler
name: podinfo
service:
# container port
# ClusterIP port number
port: 9898
# service port name (optional, will default to "http")
portName: http-podinfo
# Istio gateways (optional)
gateways:
- public-gateway.istio-system.svc.cluster.local
# Istio virtual service host names (optional)
hosts:
- podinfo.example.com
# ClusterIP port name can be http or grpc (default http)
portName: http
# container port number or name (optional)
targetPort: 9898
# add all the other container ports
# to the ClusterIP services (default false)
portDiscovery: false
# promote the canary without analysing it (default false)
skipAnalysis: false
# define the canary analysis timing and KPIs
@@ -71,15 +67,13 @@ spec:
# milliseconds
threshold: 500
interval: 30s
# external checks (optional)
# testing (optional)
webhooks:
- name: integration-tests
url: http://podinfo.test:9898/echo
timeout: 1m
# key-value pairs (optional)
- name: load-test
url: http://flagger-loadtester.test/
timeout: 5s
metadata:
test: "all"
token: "16688eb5e9f289f1991c"
cmd: "hey -z 1m -q 10 -c 2 http://podinfo.test:9898/"
```
**Note** that the target deployment must have a single label selector in the format `app: <DEPLOYMENT-NAME>`:
@@ -102,8 +96,8 @@ spec:
Besides `app` Flagger supports `name` and `app.kubernetes.io/name` selectors. If you use a different
convention you can specify your label with the `-selector-labels` flag.
The target deployment should expose a TCP port that will be used by Flagger to create the ClusterIP Service and
the Istio Virtual Service. The container port from the target deployment should match the `service.port` value.
The target deployment should expose a TCP port that will be used by Flagger to create the ClusterIP Services.
The container port from the target deployment should match the `service.port` or `service.targetPort`.
### Canary status
@@ -173,184 +167,6 @@ kubectl wait canary/podinfo --for=condition=promoted --timeout=5m
kubectl get canary/podinfo | grep Succeeded
```
### Istio routing
Flagger creates an Istio Virtual Service and Destination Rules based on the Canary service spec.
The service configuration lets you expose an app inside or outside the mesh.
You can also define traffic policies, HTTP match conditions, URI rewrite rules, CORS policies, timeout and retries.
The following spec exposes the `frontend` workload inside the mesh on `frontend.test.svc.cluster.local:9898`
and outside the mesh on `frontend.example.com`. You'll have to specify an Istio ingress gateway for external hosts.
```yaml
apiVersion: flagger.app/v1alpha3
kind: Canary
metadata:
name: frontend
namespace: test
spec:
service:
# container port
port: 9898
# service port name (optional, will default to "http")
portName: http-frontend
# Istio gateways (optional)
gateways:
- public-gateway.istio-system.svc.cluster.local
- mesh
# Istio virtual service host names (optional)
hosts:
- frontend.example.com
# Istio traffic policy (optional)
trafficPolicy:
loadBalancer:
simple: LEAST_CONN
# HTTP match conditions (optional)
match:
- uri:
prefix: /
# HTTP rewrite (optional)
rewrite:
uri: /
# Envoy timeout and retry policy (optional)
headers:
request:
add:
x-envoy-upstream-rq-timeout-ms: "15000"
x-envoy-max-retries: "10"
x-envoy-retry-on: "gateway-error,connect-failure,refused-stream"
# cross-origin resource sharing policy (optional)
corsPolicy:
allowOrigin:
- example.com
allowMethods:
- GET
allowCredentials: false
allowHeaders:
- x-some-header
maxAge: 24h
```
For the above spec Flagger will generate the following virtual service:
```yaml
apiVersion: networking.istio.io/v1alpha3
kind: VirtualService
metadata:
name: frontend
namespace: test
ownerReferences:
- apiVersion: flagger.app/v1alpha3
blockOwnerDeletion: true
controller: true
kind: Canary
name: podinfo
uid: 3a4a40dd-3875-11e9-8e1d-42010a9c0fd1
spec:
gateways:
- public-gateway.istio-system.svc.cluster.local
- mesh
hosts:
- frontend.example.com
- frontend
http:
- appendHeaders:
x-envoy-max-retries: "10"
x-envoy-retry-on: gateway-error,connect-failure,refused-stream
x-envoy-upstream-rq-timeout-ms: "15000"
corsPolicy:
allowHeaders:
- x-some-header
allowMethods:
- GET
allowOrigin:
- example.com
maxAge: 24h
match:
- uri:
prefix: /
rewrite:
uri: /
route:
- destination:
host: podinfo-primary
weight: 100
- destination:
host: podinfo-canary
weight: 0
```
For each destination in the virtual service a rule is generated:
```yaml
apiVersion: networking.istio.io/v1alpha3
kind: DestinationRule
metadata:
name: frontend-primary
namespace: test
spec:
host: frontend-primary
trafficPolicy:
loadBalancer:
simple: LEAST_CONN
---
apiVersion: networking.istio.io/v1alpha3
kind: DestinationRule
metadata:
name: frontend-canary
namespace: test
spec:
host: frontend-canary
trafficPolicy:
loadBalancer:
simple: LEAST_CONN
```
Flagger keeps in sync the virtual service and destination rules with the canary service spec.
Any direct modification to the virtual service spec will be overwritten.
To expose a workload inside the mesh on `http://backend.test.svc.cluster.local:9898`,
the service spec can contain only the container port:
```yaml
apiVersion: flagger.app/v1alpha3
kind: Canary
metadata:
name: backend
namespace: test
spec:
service:
port: 9898
```
Based on the above spec, Flagger will create several ClusterIP services like:
```yaml
apiVersion: v1
kind: Service
metadata:
name: backend-primary
ownerReferences:
- apiVersion: flagger.app/v1alpha3
blockOwnerDeletion: true
controller: true
kind: Canary
name: backend
uid: 2ca1a9c7-2ef6-11e9-bd01-42010a9c0145
spec:
type: ClusterIP
ports:
- name: http
port: 9898
protocol: TCP
targetPort: 9898
selector:
app: backend-primary
```
Flagger works for user facing apps exposed outside the cluster via an ingress gateway
and for backend HTTP APIs that are accessible only from inside the mesh.
### Canary Stages
![Flagger Canary Stages](https://raw.githubusercontent.com/weaveworks/flagger/master/docs/diagrams/flagger-canary-steps.png)
@@ -488,6 +304,45 @@ interval * threshold
Make sure that the analysis threshold is lower than the number of iterations.
### Blue/Green deployments
For applications that are not deployed on a service mesh, Flagger can orchestrate blue/green style deployments
with Kubernetes L4 networking. When using Istio you have the option to mirror traffic between blue and green.
You can use the blue/green deployment strategy by replacing `stepWeight/maxWeight` with `iterations` in the `canaryAnalysis` spec:
```yaml
canaryAnalysis:
# schedule interval (default 60s)
interval: 1m
# total number of iterations
iterations: 10
# max number of failed iterations before rollback
threshold: 2
# Traffic shadowing (compatible with Istio only)
mirror: true
```
With the above configuration Flagger will run conformance and load tests on the canary pods for ten minutes.
If the metrics analysis succeeds, live traffic will be switched from the old version to the new one when the
canary is promoted.
The blue/green deployment strategy is supported for all service mesh providers.
Blue/Green rollout steps for service mesh:
* scale up the canary (green)
* run conformance tests for the canary pods
* run load tests and metric checks for the canary pods
* route traffic to canary
* promote canary spec over primary (blue)
* wait for primary rollout
* route traffic to primary
* scale down canary
After the analysis finishes, the traffic is routed to the canary (green) before triggering the primary (blue)
rolling update, this ensures a smooth transition to the new version avoiding dropping in-flight requests during
the Kubernetes deployment rollout.
### HTTP Metrics
The canary analysis is using the following Prometheus queries:
@@ -531,7 +386,7 @@ sum(
)
```
App Mesh query:
Envoy query (App Mesh or Gloo):
```javascript
sum(
@@ -539,7 +394,7 @@ sum(
envoy_cluster_upstream_rq{
kubernetes_namespace="$namespace",
kubernetes_pod_name=~"$workload",
response_code!~"5.*"
envoy_response_code!~"5.*"
}[$interval]
)
)
@@ -584,7 +439,7 @@ histogram_quantile(0.99,
)
```
App Mesh query:
Envoy query (App Mesh or Gloo):
```javascript
histogram_quantile(0.99,
@@ -680,7 +535,7 @@ The canary analysis can be extended with webhooks. Flagger will call each webhoo
determine from the response status code (HTTP 2xx) if the canary is failing or not.
There are three types of hooks:
* Confirm-rollout hooks are executed before scaling up the canary deployment and ca be used for manual approval.
* Confirm-rollout hooks are executed before scaling up the canary deployment and can be used for manual approval.
The rollout is paused until the hook returns a successful HTTP status code.
* Pre-rollout hooks are executed before routing traffic to canary.
The canary advancement is paused if a pre-rollout hook fails and if the number of failures reach the

View File

@@ -17,8 +17,7 @@ The App Mesh integration with EKS is made out of the following components:
### Create a Kubernetes cluster
In order to create an EKS cluster you can use [eksctl](https://eksctl.io).
Eksctl is an open source command-line utility made by Weaveworks in collaboration with Amazon,
its a Kubernetes-native tool written in Go.
Eksctl is an open source command-line utility made by Weaveworks in collaboration with Amazon.
On MacOS you can install eksctl with Homebrew:
@@ -114,7 +113,7 @@ Install the App Mesh CRD controller:
```sh
helm upgrade -i appmesh-controller eks/appmesh-controller \
--wait --namespace appmesh-system
--wait --namespace appmesh-system --version 0.2.0
```
Install the App Mesh admission controller:
@@ -137,7 +136,17 @@ Status:
Type: MeshActive
```
### Install Flagger, Prometheus and Grafana
In order to collect the App Mesh metrics that Flagger needs to run the canary analysis,
you'll need to setup a Prometheus instance to scrape the Envoy sidecars.
Install the App Mesh Prometheus:
```sh
helm upgrade -i appmesh-prometheus eks/appmesh-prometheus \
--wait --namespace appmesh-system
```
### Install Flagger and Grafana
Add Flagger Helm repository:
@@ -151,20 +160,17 @@ Install Flagger's Canary CRD:
kubectl apply -f https://raw.githubusercontent.com/weaveworks/flagger/master/artifacts/flagger/crd.yaml
```
Deploy Flagger and Prometheus in the _**appmesh-system**_ namespace:
Deploy Flagger in the _**appmesh-system**_ namespace:
```bash
helm upgrade -i flagger flagger/flagger \
--namespace=appmesh-system \
--set crd.create=false \
--set meshProvider=appmesh \
--set prometheus.install=true
--set metricsServer=http://appmesh-prometheus:9090
```
In order to collect the App Mesh metrics that Flagger needs to run the canary analysis,
you'll need to setup a Prometheus instance to scrape the Envoy sidecars.
You can enable **Slack** notifications with:
You can enable Slack or MS Teams notifications with:
```bash
helm upgrade -i flagger flagger/flagger \
@@ -181,7 +187,7 @@ Deploy Grafana in the _**appmesh-system**_ namespace:
```bash
helm upgrade -i flagger-grafana flagger/grafana \
--namespace=appmesh-system \
--set url=http://flagger-prometheus.appmesh-system:9090
--set url=http://appmesh-prometheus:9090
```
You can access Grafana using port forwarding:

View File

@@ -4,42 +4,16 @@ This guide shows you how to use the SMI Istio adapter and Flagger to automate ca
### Prerequisites
Flagger requires a Kubernetes cluster **v1.11** or newer with the following admission controllers enabled:
* Kubernetes > 1.13
* Istio > 1.0
* MutatingAdmissionWebhook
* ValidatingAdmissionWebhook
### Install Istio SMI adapter
Flagger depends on [Istio](https://istio.io/docs/setup/kubernetes/quick-start/) **v1.0.3** or newer
with traffic management, telemetry and Prometheus enabled.
A minimal Istio installation should contain the following services:
* istio-pilot
* istio-ingressgateway
* istio-sidecar-injector
* istio-telemetry
* prometheus
### Install Istio and the SMI adapter
Add Istio Helm repository:
Install the SMI adapter:
```bash
helm repo add istio.io https://storage.googleapis.com/istio-release/releases/1.1.5/charts
```
Install Istio CRDs:
```bash
helm upgrade -i istio-init istio.io/istio-init --wait --namespace istio-system
kubectl -n istio-system wait --for=condition=complete job/istio-init-crd-11
```
Install Istio:
```bash
helm upgrade -i istio istio.io/istio --wait --namespace istio-system
kubectl apply -f https://raw.githubusercontent.com/deislabs/smi-adapter-istio/master/deploy/crds/crds.yaml
kubectl apply -f https://raw.githubusercontent.com/deislabs/smi-adapter-istio/master/deploy/operator-and-rbac.yaml
```
Create a generic Istio gateway to expose services outside the mesh on HTTP:
@@ -74,14 +48,6 @@ Find the Gateway load balancer IP and add a DNS record for it:
kubectl -n istio-system get svc/istio-ingressgateway -ojson | jq -r .status.loadBalancer.ingress[0].ip
```
Install the SMI adapter:
```bash
REPO=https://raw.githubusercontent.com/weaveworks/flagger/master
kubectl apply -f ${REPO}/artifacts/smi/istio-adapter.yaml
```
### Install Flagger and Grafana
Add Flagger Helm repository:
@@ -95,7 +61,6 @@ Deploy Flagger in the _**istio-system**_ namespace:
```bash
helm upgrade -i flagger flagger/flagger \
--namespace=istio-system \
--set image.tag=master-12d84b2 \
--set meshProvider=smi:istio
```
@@ -119,24 +84,23 @@ kubectl -n istio-system port-forward svc/flagger-grafana 3000:80
Create a test namespace with Istio sidecar injection enabled:
```bash
export REPO=https://raw.githubusercontent.com/weaveworks/flagger/master
Create a test namespace and enable Linkerd proxy injection:
kubectl apply -f ${REPO}/artifacts/namespaces/test.yaml
```bash
kubectl create ns test
kubectl label namespace test istio-injection=enabled
```
Create a deployment and a horizontal pod autoscaler:
```bash
kubectl apply -f ${REPO}/artifacts/canaries/deployment.yaml
kubectl apply -f ${REPO}/artifacts/canaries/hpa.yaml
kubectl apply -k github.com/weaveworks/flagger//kustomize/podinfo
```
Deploy the load testing service to generate traffic during the canary analysis:
```bash
kubectl -n test apply -f ${REPO}/artifacts/loadtester/deployment.yaml
kubectl -n test apply -f ${REPO}/artifacts/loadtester/service.yaml
kubectl apply -k github.com/weaveworks/flagger//kustomize/tester
```
Create a canary custom resource (replace example.com with your own domain):
@@ -236,7 +200,7 @@ Trigger a canary deployment by updating the container image:
```bash
kubectl -n test set image deployment/podinfo \
podinfod=quay.io/stefanprodan/podinfo:1.7.1
podinfod=quay.io/stefanprodan/podinfo:3.1.1
```
Flagger detects that the deployment revision changed and starts a new rollout:
@@ -287,7 +251,7 @@ Create a tester pod and exec into it:
```bash
kubectl -n test run tester \
--image=quay.io/stefanprodan/podinfo:1.2.1 \
--image=quay.io/stefanprodan/podinfo:3.1.2 \
-- ./podinfo --port=9898
kubectl -n test exec -it tester-xx-xx sh

View File

@@ -13,23 +13,20 @@ This is particularly useful for frontend applications that require session affin
Create a test namespace with Istio sidecar injection enabled:
```bash
export REPO=https://raw.githubusercontent.com/weaveworks/flagger/master
kubectl apply -f ${REPO}/artifacts/namespaces/test.yaml
kubectl create ns test
kubectl label namespace test istio-injection=enabled
```
Create a deployment and a horizontal pod autoscaler:
```bash
kubectl apply -f ${REPO}/artifacts/ab-testing/deployment.yaml
kubectl apply -f ${REPO}/artifacts/ab-testing/hpa.yaml
kubectl apply -k github.com/weaveworks/flagger//kustomize/podinfo
```
Deploy the load testing service to generate traffic during the canary analysis:
```bash
kubectl -n test apply -f ${REPO}/artifacts/loadtester/deployment.yaml
kubectl -n test apply -f ${REPO}/artifacts/loadtester/service.yaml
kubectl apply -k github.com/weaveworks/flagger//kustomize/tester
```
Create a canary custom resource (replace example.com with your own domain):
@@ -38,14 +35,14 @@ Create a canary custom resource (replace example.com with your own domain):
apiVersion: flagger.app/v1alpha3
kind: Canary
metadata:
name: abtest
name: podinfo
namespace: test
spec:
# deployment reference
targetRef:
apiVersion: apps/v1
kind: Deployment
name: abtest
name: podinfo
# the maximum time in seconds for the canary deployment
# to make progress before it is rollback (default 600s)
progressDeadlineSeconds: 60
@@ -53,7 +50,7 @@ spec:
autoscalerRef:
apiVersion: autoscaling/v2beta1
kind: HorizontalPodAutoscaler
name: abtest
name: podinfo
service:
# container port
port: 9898
@@ -63,6 +60,11 @@ spec:
# Istio virtual service host names (optional)
hosts:
- app.example.com
# Istio traffic policy (optional)
trafficPolicy:
tls:
# use ISTIO_MUTUAL when mTLS is enabled
mode: DISABLE
canaryAnalysis:
# schedule interval (default 60s)
interval: 1m
@@ -110,19 +112,19 @@ After a couple of seconds Flagger will create the canary objects:
```bash
# applied
deployment.apps/abtest
horizontalpodautoscaler.autoscaling/abtest
canary.flagger.app/abtest
deployment.apps/podinfo
horizontalpodautoscaler.autoscaling/podinfo
canary.flagger.app/podinfo
# generated
deployment.apps/abtest-primary
horizontalpodautoscaler.autoscaling/abtest-primary
service/abtest
service/abtest-canary
service/abtest-primary
destinationrule.networking.istio.io/abtest-canary
destinationrule.networking.istio.io/abtest-primary
virtualservice.networking.istio.io/abtest
deployment.apps/podinfo-primary
horizontalpodautoscaler.autoscaling/podinfo-primary
service/podinfo
service/podinfo-canary
service/podinfo-primary
destinationrule.networking.istio.io/podinfo-canary
destinationrule.networking.istio.io/podinfo-primary
virtualservice.networking.istio.io/podinfo
```
### Automated canary promotion
@@ -131,7 +133,7 @@ Trigger a canary deployment by updating the container image:
```bash
kubectl -n test set image deployment/abtest \
podinfod=stefanprodan/podinfo:2.0.1
podinfod=stefanprodan/podinfo:3.1.1
```
Flagger detects that the deployment revision changed and starts a new rollout:
@@ -145,22 +147,22 @@ Status:
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal Synced 3m flagger New revision detected abtest.test
Normal Synced 3m flagger Scaling up abtest.test
Warning Synced 3m flagger Waiting for abtest.test rollout to finish: 0 of 1 updated replicas are available
Normal Synced 3m flagger Advance abtest.test canary iteration 1/10
Normal Synced 3m flagger Advance abtest.test canary iteration 2/10
Normal Synced 3m flagger Advance abtest.test canary iteration 3/10
Normal Synced 2m flagger Advance abtest.test canary iteration 4/10
Normal Synced 2m flagger Advance abtest.test canary iteration 5/10
Normal Synced 1m flagger Advance abtest.test canary iteration 6/10
Normal Synced 1m flagger Advance abtest.test canary iteration 7/10
Normal Synced 55s flagger Advance abtest.test canary iteration 8/10
Normal Synced 45s flagger Advance abtest.test canary iteration 9/10
Normal Synced 35s flagger Advance abtest.test canary iteration 10/10
Normal Synced 25s flagger Copying abtest.test template spec to abtest-primary.test
Normal Synced 3m flagger New revision detected podinfo.test
Normal Synced 3m flagger Scaling up podinfo.test
Warning Synced 3m flagger Waiting for podinfo.test rollout to finish: 0 of 1 updated replicas are available
Normal Synced 3m flagger Advance podinfo.test canary iteration 1/10
Normal Synced 3m flagger Advance podinfo.test canary iteration 2/10
Normal Synced 3m flagger Advance podinfo.test canary iteration 3/10
Normal Synced 2m flagger Advance podinfo.test canary iteration 4/10
Normal Synced 2m flagger Advance podinfo.test canary iteration 5/10
Normal Synced 1m flagger Advance podinfo.test canary iteration 6/10
Normal Synced 1m flagger Advance podinfo.test canary iteration 7/10
Normal Synced 55s flagger Advance podinfo.test canary iteration 8/10
Normal Synced 45s flagger Advance podinfo.test canary iteration 9/10
Normal Synced 35s flagger Advance podinfo.test canary iteration 10/10
Normal Synced 25s flagger Copying podinfo.test template spec to abtest-primary.test
Warning Synced 15s flagger Waiting for abtest-primary.test rollout to finish: 1 of 2 updated replicas are available
Normal Synced 5s flagger Promotion completed! Scaling down abtest.test
Normal Synced 5s flagger Promotion completed! Scaling down podinfo.test
```
**Note** that if you apply new changes to the deployment during the canary analysis, Flagger will restart the analysis.
@@ -204,12 +206,12 @@ Status:
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal Synced 3m flagger Starting canary deployment for abtest.test
Normal Synced 3m flagger Advance abtest.test canary iteration 1/10
Normal Synced 3m flagger Advance abtest.test canary iteration 2/10
Normal Synced 3m flagger Advance abtest.test canary iteration 3/10
Normal Synced 3m flagger Halt abtest.test advancement success rate 69.17% < 99%
Normal Synced 2m flagger Halt abtest.test advancement success rate 61.39% < 99%
Warning Synced 2m flagger Rolling back abtest.test failed checks threshold reached 2
Warning Synced 1m flagger Canary failed! Scaling down abtest.test
Normal Synced 3m flagger Starting canary deployment for podinfo.test
Normal Synced 3m flagger Advance podinfo.test canary iteration 1/10
Normal Synced 3m flagger Advance podinfo.test canary iteration 2/10
Normal Synced 3m flagger Advance podinfo.test canary iteration 3/10
Normal Synced 3m flagger Halt podinfo.test advancement success rate 69.17% < 99%
Normal Synced 2m flagger Halt podinfo.test advancement success rate 61.39% < 99%
Warning Synced 2m flagger Rolling back podinfo.test failed checks threshold reached 2
Warning Synced 1m flagger Canary failed! Scaling down podinfo.test
```

View File

@@ -67,11 +67,19 @@ spec:
service:
# container port
port: 9898
# container port name (optional)
# can be http or grpc
portName: http
# App Mesh reference
meshName: global
# App Mesh egress (optional)
backends:
- backend.test
# App Mesh retry policy (optional)
retries:
attempts: 3
perTryTimeout: 1s
retryOn: "gateway-error,client-error,stream-error"
# define the canary analysis timing and KPIs
canaryAnalysis:
# schedule interval (default 60s)
@@ -98,18 +106,18 @@ spec:
interval: 30s
# testing (optional)
webhooks:
- name: acceptance-test
type: pre-rollout
url: http://flagger-loadtester.test/
timeout: 30s
metadata:
type: bash
cmd: "curl -sd 'test' http://podinfo-canary.test:9898/token | grep token"
- name: load-test
url: http://flagger-loadtester.test/
timeout: 5s
metadata:
cmd: "hey -z 1m -q 10 -c 2 http://podinfo.test:9898/"
- name: acceptance-test
type: pre-rollout
url: http://flagger-loadtester.test/
timeout: 30s
metadata:
type: bash
cmd: "curl -sd 'test' http://podinfo-canary.test:9898/token | grep token"
- name: load-test
url: http://flagger-loadtester.test/
timeout: 5s
metadata:
cmd: "hey -z 1m -q 10 -c 2 http://podinfo-canary.test:9898/"
```
Save the above resource as podinfo-canary.yaml and then apply it:
@@ -312,3 +320,74 @@ If youve enabled the Slack notifications, youll receive a message if the p
or if the analysis reached the maximum number of failed checks:
![Flagger Slack Notifications](https://raw.githubusercontent.com/weaveworks/flagger/master/docs/screens/slack-canary-failed.png)
### A/B Testing
Besides weighted routing, Flagger can be configured to route traffic to the canary based on HTTP match conditions.
In an A/B testing scenario, you'll be using HTTP headers or cookies to target a certain segment of your users.
This is particularly useful for frontend applications that require session affinity.
![Flagger A/B Testing Stages](https://raw.githubusercontent.com/weaveworks/flagger/master/docs/diagrams/flagger-abtest-steps.png)
Edit the canary analysis, remove the max/step weight and add the match conditions and iterations:
```yaml
canaryAnalysis:
interval: 1m
threshold: 10
iterations: 10
match:
- headers:
x-canary:
exact: "insider"
webhooks:
- name: load-test
url: http://flagger-loadtester.test/
metadata:
cmd: "hey -z 1m -q 10 -c 2 -H 'X-Canary: insider' http://podinfo.test:9898/"
```
The above configuration will run an analysis for ten minutes targeting users that have a `X-Canary: insider` header.
You can also use a HTTP cookie, to target all users with a `canary` cookie set to `insider` the match condition should be:
```yaml
match:
- headers:
cookie:
regex: "^(.*?;)?(canary=insider)(;.*)?$"
webhooks:
- name: load-test
url: http://flagger-loadtester.test/
metadata:
cmd: "hey -z 1m -q 10 -c 2 -H 'Cookie: canary=insider' http://podinfo.test:9898/"
```
Trigger a canary deployment by updating the container image:
```bash
kubectl -n test set image deployment/podinfo \
podinfod=stefanprodan/podinfo:3.1.3
```
Flagger detects that the deployment revision changed and starts the A/B test:
```text
kubectl -n appmesh-system logs deploy/flagger -f | jq .msg
New revision detected! Starting canary analysis for podinfo.test
Advance podinfo.test canary iteration 1/10
Advance podinfo.test canary iteration 2/10
Advance podinfo.test canary iteration 3/10
Advance podinfo.test canary iteration 4/10
Advance podinfo.test canary iteration 5/10
Advance podinfo.test canary iteration 6/10
Advance podinfo.test canary iteration 7/10
Advance podinfo.test canary iteration 8/10
Advance podinfo.test canary iteration 9/10
Advance podinfo.test canary iteration 10/10
Copying podinfo.test template spec to podinfo-primary.test
Waiting for podinfo-primary.test rollout to finish: 1 of 2 updated replicas are available
Routing all traffic to primary
Promotion completed! Scaling down podinfo.test
```

View File

@@ -4,6 +4,7 @@ This guide shows you how to automate Blue/Green deployments with Flagger and Kub
For applications that are not deployed on a service mesh, Flagger can orchestrate Blue/Green style deployments
with Kubernetes L4 networking.
When using a service mesh blue/green can be used as specified [here](https://docs.flagger.app/how-it-works#blue-green-deployments).
![Flagger Blue/Green Stages](https://raw.githubusercontent.com/weaveworks/flagger/master/docs/diagrams/flagger-bluegreen-steps.png)
@@ -80,7 +81,6 @@ metadata:
namespace: test
spec:
# service mesh provider can be: kubernetes, istio, appmesh, nginx, gloo
# use the kubernetes provider for Blue/Green style deployments
provider: kubernetes
# deployment reference
targetRef:
@@ -96,7 +96,6 @@ spec:
kind: HorizontalPodAutoscaler
name: podinfo
service:
# container port
port: 9898
portDiscovery: true
canaryAnalysis:
@@ -172,7 +171,7 @@ Trigger a deployment by updating the container image:
```bash
kubectl -n test set image deployment/podinfo \
podinfod=stefanprodan/podinfo:2.0.1
podinfod=stefanprodan/podinfo:3.1.1
```
Flagger detects that the deployment revision changed and starts a new rollout:
@@ -297,7 +296,7 @@ Trigger a deployment by updating the container image:
```bash
kubectl -n test set image deployment/podinfo \
podinfod=stefanprodan/podinfo:2.0.3
podinfod=stefanprodan/podinfo:3.1.3
```
Generate 404s:

View File

@@ -54,15 +54,13 @@ kubectl create ns test
Create a deployment and a horizontal pod autoscaler:
```bash
kubectl apply -f ${REPO}/artifacts/gloo/deployment.yaml
kubectl apply -f ${REPO}/artifacts/gloo/hpa.yaml
kubectl -n test apply -k github.com/weaveworks/flagger//kustomize/podinfo
```
Deploy the load testing service to generate traffic during the canary analysis:
```bash
helm upgrade -i flagger-loadtester flagger/loadtester \
--namespace=test
kubectl -n test apply -k github.com/weaveworks/flagger//kustomize/tester
```
Create an virtual service definition that references an upstream group that will be generated by Flagger
@@ -118,8 +116,10 @@ spec:
# to make progress before it is rollback (default 600s)
progressDeadlineSeconds: 60
service:
# container port
# ClusterIP port number
port: 9898
# container port number or name (optional)
targetPort: 9898
canaryAnalysis:
# schedule interval (default 60s)
interval: 10s
@@ -143,14 +143,21 @@ spec:
# milliseconds
threshold: 500
interval: 30s
# load testing (optional)
# testing (optinal)
webhooks:
- name: acceptance-test
type: pre-rollout
url: http://flagger-loadtester.test/
timeout: 10s
metadata:
type: bash
cmd: "curl -sd 'test' http://podinfo-canary:9898/token | grep token"
- name: load-test
url: http://flagger-loadtester.test/
timeout: 5s
metadata:
type: cmd
cmd: "hey -z 1m -q 10 -c 2 http://app.example.com/"
cmd: "hey -z 2m -q 5 -c 2 -host app.example.com http://gateway-proxy-v2.gloo-system"
```
Save the above resource as podinfo-canary.yaml and then apply it:
@@ -198,7 +205,7 @@ Trigger a canary deployment by updating the container image:
```bash
kubectl -n test set image deployment/podinfo \
podinfod=stefanprodan/podinfo:2.0.1
podinfod=stefanprodan/podinfo:3.1.1
```
Flagger detects that the deployment revision changed and starts a new rollout:
@@ -252,19 +259,19 @@ Trigger another canary deployment:
```bash
kubectl -n test set image deployment/podinfo \
podinfod=stefanprodan/podinfo:2.0.2
podinfod=stefanprodan/podinfo:3.1.2
```
Generate HTTP 500 errors:
```bash
watch curl http://app.example.com/status/500
watch curl -H 'Host: app.example.com' http://gateway-proxy-v2.gloo-system/status/500
```
Generate high latency:
```bash
watch curl http://app.example.com/delay/2
watch curl -H 'Host: app.example.com' http://gateway-proxy-v2.gloo-system/delay/2
```
When the number of failed checks reaches the canary analysis threshold, the traffic is routed back to the primary,
@@ -335,13 +342,13 @@ Trigger a canary deployment by updating the container image:
```bash
kubectl -n test set image deployment/podinfo \
podinfod=stefanprodan/podinfo:2.0.3
podinfod=stefanprodan/podinfo:3.1.3
```
Generate 404s:
```bash
watch curl http://app.example.com/status/400
watch curl -H 'Host: app.example.com' http://gateway-proxy-v2.gloo-system/status/400
```
Watch Flagger logs:

View File

@@ -67,8 +67,10 @@ spec:
# to make progress before it is rollback (default 600s)
progressDeadlineSeconds: 60
service:
# container port
# ClusterIP port number
port: 9898
# container port number or name (optional)
targetPort: 9898
canaryAnalysis:
# schedule interval (default 60s)
interval: 30s
@@ -100,12 +102,12 @@ spec:
timeout: 30s
metadata:
type: bash
cmd: "curl -sd 'test' http://podinfo-canary:9898/token | grep token"
cmd: "curl -sd 'test' http://podinfo-canary.test:9898/token | grep token"
- name: load-test
type: rollout
url: http://flagger-loadtester.test/
metadata:
cmd: "hey -z 2m -q 10 -c 2 http://podinfo:9898/"
cmd: "hey -z 2m -q 10 -c 2 http://podinfo-canary.test:9898/"
```
Save the above resource as podinfo-canary.yaml and then apply it:
@@ -150,7 +152,7 @@ Trigger a canary deployment by updating the container image:
```bash
kubectl -n test set image deployment/podinfo \
podinfod=stefanprodan/podinfo:2.0.1
podinfod=stefanprodan/podinfo:3.1.1
```
Flagger detects that the deployment revision changed and starts a new rollout:
@@ -208,7 +210,7 @@ Trigger another canary deployment:
```bash
kubectl -n test set image deployment/podinfo \
podinfod=stefanprodan/podinfo:2.0.2
podinfod=stefanprodan/podinfo:3.1.2
```
Exec into the load tester pod with:
@@ -297,7 +299,7 @@ Trigger a canary deployment by updating the container image:
```bash
kubectl -n test set image deployment/podinfo \
podinfod=stefanprodan/podinfo:2.0.3
podinfod=stefanprodan/podinfo:3.1.3
```
Generate 404s:
@@ -444,7 +446,7 @@ Trigger a canary deployment by updating the container image:
```bash
kubectl -n test set image deployment/podinfo \
podinfod=stefanprodan/podinfo:2.0.4
podinfod=stefanprodan/podinfo:3.1.4
```
Flagger detects that the deployment revision changed and starts the A/B testing:

View File

@@ -56,8 +56,7 @@ kubectl create ns test
Create a deployment and a horizontal pod autoscaler:
```bash
kubectl apply -f ${REPO}/artifacts/nginx/deployment.yaml
kubectl apply -f ${REPO}/artifacts/nginx/hpa.yaml
kubectl apply -k github.com/weaveworks/flagger//kustomize/podinfo
```
Deploy the load testing service to generate traffic during the canary analysis:
@@ -86,7 +85,7 @@ spec:
paths:
- backend:
serviceName: podinfo
servicePort: 9898
servicePort: 80
```
Save the above resource as podinfo-ingress.yaml and then apply it:
@@ -124,8 +123,10 @@ spec:
# to make progress before it is rollback (default 600s)
progressDeadlineSeconds: 60
service:
# container port
port: 9898
# ClusterIP port number
port: 80
# container port number or name
targetPort: 9898
canaryAnalysis:
# schedule interval (default 60s)
interval: 10s
@@ -144,13 +145,19 @@ spec:
# percentage (0-100)
threshold: 99
interval: 1m
# load testing (optional)
# testing (optional)
webhooks:
- name: acceptance-test
type: pre-rollout
url: http://flagger-loadtester.test/
timeout: 30s
metadata:
type: bash
cmd: "curl -sd 'test' http://podinfo-canary/token | grep token"
- name: load-test
url: http://flagger-loadtester.test/
timeout: 5s
metadata:
type: cmd
cmd: "hey -z 1m -q 10 -c 2 http://app.example.com/"
```
@@ -190,7 +197,7 @@ Trigger a canary deployment by updating the container image:
```bash
kubectl -n test set image deployment/podinfo \
podinfod=stefanprodan/podinfo:2.0.1
podinfod=stefanprodan/podinfo:3.1.1
```
Flagger detects that the deployment revision changed and starts a new rollout:
@@ -244,7 +251,7 @@ Trigger another canary deployment:
```bash
kubectl -n test set image deployment/podinfo \
podinfod=stefanprodan/podinfo:2.0.2
podinfod=stefanprodan/podinfo:3.1.2
```
Generate HTTP 500 errors:
@@ -314,7 +321,7 @@ Trigger a canary deployment by updating the container image:
```bash
kubectl -n test set image deployment/podinfo \
podinfod=stefanprodan/podinfo:2.0.3
podinfod=stefanprodan/podinfo:3.1.3
```
Generate high response latency:
@@ -373,12 +380,10 @@ Edit the canary analysis, remove the max/step weight and add the match condition
interval: 1m
webhooks:
- name: load-test
url: http://localhost:8888/
url: http://flagger-loadtester.test/
timeout: 5s
metadata:
type: cmd
cmd: "hey -z 1m -q 10 -c 2 -H 'Cookie: canary=always' http://app.example.com/"
logCmdOutput: "true"
```
The above configuration will run an analysis for ten minutes targeting users that have a `canary` cookie set to `always` or
@@ -388,7 +393,7 @@ Trigger a canary deployment by updating the container image:
```bash
kubectl -n test set image deployment/podinfo \
podinfod=stefanprodan/podinfo:2.0.4
podinfod=stefanprodan/podinfo:3.1.4
```
Flagger detects that the deployment revision changed and starts the A/B testing:
@@ -419,4 +424,3 @@ Events:
Warning Synced 15s flagger Waiting for podinfo-primary.test rollout to finish: 1 of 2 updated replicas are available
Normal Synced 5s flagger Promotion completed! Scaling down podinfo.test
```

View File

@@ -2,28 +2,31 @@
This guide shows you how to use Istio and Flagger to automate canary deployments.
![Flagger Canary Stages](https://raw.githubusercontent.com/weaveworks/flagger/master/docs/diagrams/flagger-canary-steps.png)
### Bootstrap
Flagger takes a Kubernetes deployment and optionally a horizontal pod autoscaler (HPA),
then creates a series of objects (Kubernetes deployments, ClusterIP services, Istio destination rules and virtual services).
These objects expose the application inside the mesh and drive the canary analysis and promotion.
Create a test namespace with Istio sidecar injection enabled:
```bash
export REPO=https://raw.githubusercontent.com/weaveworks/flagger/master
kubectl apply -f ${REPO}/artifacts/namespaces/test.yaml
kubectl create ns test
kubectl label namespace test istio-injection=enabled
```
Create a deployment and a horizontal pod autoscaler:
```bash
kubectl apply -f ${REPO}/artifacts/canaries/deployment.yaml
kubectl apply -f ${REPO}/artifacts/canaries/hpa.yaml
kubectl apply -k github.com/weaveworks/flagger//kustomize/podinfo
```
Deploy the load testing service to generate traffic during the canary analysis:
```bash
kubectl -n test apply -f ${REPO}/artifacts/loadtester/deployment.yaml
kubectl -n test apply -f ${REPO}/artifacts/loadtester/service.yaml
kubectl apply -k github.com/weaveworks/flagger//kustomize/tester
```
Create a canary custom resource (replace example.com with your own domain):
@@ -49,14 +52,26 @@ spec:
kind: HorizontalPodAutoscaler
name: podinfo
service:
# container port
# service port number
port: 9898
# container port number or name (optional)
targetPort: 9898
# Istio gateways (optional)
gateways:
- public-gateway.istio-system.svc.cluster.local
# Istio virtual service host names (optional)
hosts:
- app.example.com
# Istio traffic policy (optional)
trafficPolicy:
tls:
# use ISTIO_MUTUAL when mTLS is enabled
mode: DISABLE
# Istio retry policy (optional)
retries:
attempts: 3
perTryTimeout: 1s
retryOn: "gateway-error,connect-failure,refused-stream"
canaryAnalysis:
# schedule interval (default 60s)
interval: 1m
@@ -104,6 +119,8 @@ kubectl apply -f ./podinfo-canary.yaml
When the canary analysis starts, Flagger will call the pre-rollout webhooks before routing traffic to the canary.
The canary analysis will run for five minutes while validating the HTTP metrics and rollout hooks every minute.
![Flagger Canary Process](https://raw.githubusercontent.com/weaveworks/flagger/master/docs/diagrams/flagger-canary-hpa.png)
After a couple of seconds Flagger will create the canary objects:
```bash
@@ -129,7 +146,7 @@ Trigger a canary deployment by updating the container image:
```bash
kubectl -n test set image deployment/podinfo \
podinfod=stefanprodan/podinfo:2.0.1
podinfod=stefanprodan/podinfo:3.1.1
```
Flagger detects that the deployment revision changed and starts a new rollout:
@@ -164,6 +181,11 @@ Events:
**Note** that if you apply new changes to the deployment during the canary analysis, Flagger will restart the analysis.
A canary deployment is triggered by changes in any of the following objects:
* Deployment PodSpec (container image, command, ports, env, resources, etc)
* ConfigMaps mounted as volumes or mapped to environment variables
* Secrets mounted as volumes or mapped to environment variables
You can monitor all canaries with:
```bash
@@ -183,7 +205,7 @@ Trigger another canary deployment:
```bash
kubectl -n test set image deployment/podinfo \
podinfod=stefanprodan/podinfo:2.0.2
podinfod=stefanprodan/podinfo:3.1.2
```
Exec into the load tester pod with:
@@ -229,3 +251,82 @@ Events:
Warning Synced 1m flagger Rolling back podinfo.test failed checks threshold reached 10
Warning Synced 1m flagger Canary failed! Scaling down podinfo.test
```
### Traffic mirroring
![Flagger Canary Traffic Shadowing](https://raw.githubusercontent.com/weaveworks/flagger/master/docs/diagrams/flagger-canary-traffic-mirroring.png)
For applications that perform read operations, Flagger can be configured to drive canary releases with traffic mirroring.
Istio traffic mirroring will copy each incoming request, sending one request to the primary and one to the canary service.
The response from the primary is sent back to the user and the response from the canary is discarded.
Metrics are collected on both requests so that the deployment will only proceed if the canary metrics are within the threshold values.
Note that mirroring should be used for requests that are **idempotent** or capable of being processed twice
(once by the primary and once by the canary).
You can enable mirroring by replacing `stepWeight/maxWeight` with `iterations` and
by setting `canaryAnalysis.mirror` to `true`:
```yaml
apiVersion: flagger.app/v1alpha3
kind: Canary
metadata:
name: podinfo
namespace: test
spec:
canaryAnalysis:
# schedule interval
interval: 1m
# max number of failed metric checks before rollback
threshold: 5
# total number of iterations
iterations: 10
# enable traffic shadowing
mirror: true
metrics:
- name: request-success-rate
threshold: 99
interval: 1m
- name: request-duration
threshold: 500
interval: 1m
webhooks:
- name: acceptance-test
type: pre-rollout
url: http://flagger-loadtester.test/
timeout: 30s
metadata:
type: bash
cmd: "curl -sd 'test' http://podinfo-canary:9898/token | grep token"
- name: load-test
url: http://flagger-loadtester.test/
timeout: 5s
metadata:
cmd: "hey -z 1m -q 10 -c 2 http://podinfo.test:9898/"
```
With the above configuration, Flagger will run a canary release with the following steps:
* detect new revision (deployment spec, secrets or configmaps changes)
* scale from zero the canary deployment
* wait for the HPA to set the canary minimum replicas
* check canary pods health
* run the acceptance tests
* abort the canary release if tests fail
* start the load tests
* mirror traffic from primary to canary
* check request success rate and request duration every minute
* abort the canary release if the metrics check failure threshold is reached
* stop traffic mirroring after the number of iterations is reached
* route live traffic to the canary pods
* promote the canary (update the primary secrets, configmaps and deployment spec)
* wait for the primary deployment rollout to finish
* wait for the HPA to set the primary minimum replicas
* check primary pods health
* switch live traffic back to primary
* scale to zero the canary
* send notification with the canary analysis result
The above procedure can be extended with [custom metrics](https://docs.flagger.app/how-it-works#custom-metrics) checks,
[webhooks](https://docs.flagger.app/how-it-works#webhooks),
[manual promotion](https://docs.flagger.app/how-it-works#manual-gating) approval and
[Slack or MS Teams](https://docs.flagger.app/usage/alerting) notifications.

42
go.mod
View File

@@ -6,53 +6,37 @@ require (
cloud.google.com/go v0.37.4 // indirect
github.com/Masterminds/semver v1.4.2
github.com/beorn7/perks v1.0.0 // indirect
github.com/bxcodec/faker v2.0.1+incompatible // indirect
github.com/envoyproxy/go-control-plane v0.8.0 // indirect
github.com/gogo/googleapis v1.2.0 // indirect
github.com/gogo/protobuf v1.2.1
github.com/gogo/protobuf v1.2.1 // indirect
github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef // indirect
github.com/golang/protobuf v1.3.1 // indirect
github.com/golang/snappy v0.0.1 // indirect
github.com/google/btree v1.0.0 // indirect
github.com/google/go-cmp v0.3.0
github.com/hashicorp/consul v1.4.4 // indirect
github.com/hashicorp/go-cleanhttp v0.5.1 // indirect
github.com/hashicorp/go-retryablehttp v0.5.3 // indirect
github.com/hashicorp/go-rootcerts v1.0.0 // indirect
github.com/hashicorp/go-sockaddr v1.0.2 // indirect
github.com/hashicorp/hcl v1.0.0 // indirect
github.com/hashicorp/serf v0.8.3 // indirect
github.com/hashicorp/vault v1.1.0 // indirect
github.com/googleapis/gnostic v0.2.0 // indirect
github.com/hashicorp/golang-lru v0.5.1 // indirect
github.com/imdario/mergo v0.3.7 // indirect
github.com/k0kubun/pp v3.0.1+incompatible // indirect
github.com/linkerd/linkerd2 v0.0.0-20190221030352-5e47cb150a33 // indirect
github.com/lyft/protoc-gen-validate v0.0.14 // indirect
github.com/mattn/go-colorable v0.1.1 // indirect
github.com/mattn/go-isatty v0.0.7 // indirect
github.com/mitchellh/go-homedir v1.1.0 // indirect
github.com/mitchellh/go-testing-interface v1.0.0 // indirect
github.com/kr/pretty v0.1.0 // indirect
github.com/mitchellh/hashstructure v1.0.0
github.com/pkg/errors v0.8.1
github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90 // indirect
github.com/prometheus/common v0.3.0 // indirect
github.com/prometheus/procfs v0.0.0-20190416084830-8368d24ba045 // indirect
github.com/radovskyb/watcher v1.0.6 // indirect
github.com/ryanuber/go-glob v1.0.0 // indirect
github.com/solo-io/gloo v0.13.17
github.com/solo-io/go-utils v0.7.11 // indirect
github.com/solo-io/solo-kit v0.6.3
github.com/solo-io/supergloo v0.3.11
go.opencensus.io v0.20.2 // indirect
go.uber.org/atomic v1.3.2 // indirect
go.uber.org/multierr v1.1.0 // indirect
go.uber.org/zap v1.9.1
golang.org/x/crypto v0.0.0-20190418161225-b43e412143f9 // indirect
golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c // indirect
golang.org/x/sys v0.0.0-20190508220229-2d0786266e9c // indirect
golang.org/x/text v0.3.2 // indirect
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 // indirect
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 // indirect
gopkg.in/h2non/gock.v1 v1.0.14
gopkg.in/inf.v0 v0.9.1 // indirect
k8s.io/api v0.0.0-20190620073856-dcce3486da33
k8s.io/apiextensions-apiserver v0.0.0-20190315093550-53c4693659ed // indirect
k8s.io/apimachinery v0.0.0-20190620073744-d16981aedf33
k8s.io/client-go v11.0.0+incompatible
k8s.io/code-generator v0.0.0-20190620073620-d55040311883
k8s.io/kube-openapi v0.0.0-20190418160015-6b3d3b2d5666 // indirect
k8s.io/utils v0.0.0-20190308190857-21c4ce38f2a7 // indirect
)
replace (

247
go.sum
View File

@@ -2,126 +2,46 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMT
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
cloud.google.com/go v0.37.4 h1:glPeL3BQJsbF6aIIYfZizMwc5LTYz250bDMjttbBGAU=
cloud.google.com/go v0.37.4/go.mod h1:NHPJ89PdicEuT9hdPXMROBD91xc5uRDxsMtSB16k7hw=
git.apache.org/thrift.git v0.0.0-20180902110319-2566ecd5d999/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg=
github.com/Azure/go-autorest v11.1.2+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
github.com/MakeNowJust/heredoc v0.0.0-20171113091838-e9091a26100e/go.mod h1:64YHyfSL2R96J44Nlwm39UHepQbyR5q10x7iYa1ks2E=
github.com/Masterminds/goutils v1.1.0/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU=
github.com/Masterminds/semver v1.4.2 h1:WBLTQ37jOCzSLtXNdoo8bNM8876KhNqOKvrlGITgsTc=
github.com/Masterminds/semver v1.4.2/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y=
github.com/Masterminds/sprig v2.18.0+incompatible/go.mod h1:y6hNFY5UBTIWBxnzTeuNhlNS5hqE0NB0E6fgfo2Br3o=
github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ=
github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c=
github.com/Netflix/go-expect v0.0.0-20180615182759-c93bf25de8e8/go.mod h1:oX5x61PbNXchhh0oikYAH+4Pcfw5LKv21+Jnpr6r6Pc=
github.com/Netflix/go-expect v0.0.0-20180928190340-9d1f4485533b/go.mod h1:oX5x61PbNXchhh0oikYAH+4Pcfw5LKv21+Jnpr6r6Pc=
github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
github.com/PuerkitoBio/purell v1.1.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo=
github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI=
github.com/airbrake/gobrake v3.6.1+incompatible/go.mod h1:wM4gu3Cn0W0K7GUuVWnlXZU11AGBXMILnrdOU8Kn00o=
github.com/alcortesm/tgz v0.0.0-20161220082320-9c5fe88206d7/go.mod h1:6zEj6s6u/ghQa61ZWa/C2Aw3RkjiTBOix7dkqa1VLIs=
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c=
github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ=
github.com/appscode/jsonpatch v0.0.0-20190108182946-7c0e3b262f30/go.mod h1:4AJxUpXUhv4N+ziTvIcWWXgeorXpxPZOfk9HdEVr96M=
github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o=
github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da h1:8GUt8eRujhVEGZFFEjBj46YV4rDjvGrNxb0KMWYkL2I=
github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY=
github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310 h1:BUAU3CGlLvorLI26FmByPp2eC2qla6E1Tw+scpcg/to=
github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY=
github.com/avast/retry-go v2.2.0+incompatible h1:m+w7mVLWa/oKqX2xYqiEKQQkeGH8DDEXB/XnjS54Wyw=
github.com/avast/retry-go v2.2.0+incompatible/go.mod h1:XtSnn+n/sHqQIpZ10K1qAevBhOOCWBLXXy3hyiqqBrY=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/beorn7/perks v1.0.0 h1:HWo1m869IqiPhD389kmkxeTalrjNbbJTC8LXupb+sl0=
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
github.com/bgentry/speakeasy v0.1.0 h1:ByYyxL9InA1OWqxJqqp2A5pYHUrCiAL6K3J+LKSsQkY=
github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
github.com/bitly/go-simplejson v0.5.0/go.mod h1:cXHtHw4XUPsvGaxgjIAn8PhEWG9NfngEKAMDJEczWVA=
github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4=
github.com/bugsnag/bugsnag-go v1.4.0/go.mod h1:2oa8nejYd4cQ/b0hMIopN0lCRxU0bueqREvZLWFrtK8=
github.com/bugsnag/panicwrap v1.2.0/go.mod h1:D/8v3kj0zr8ZAKg1AQ6crr+5VwKN5eIywRkfhyM/+dE=
github.com/bxcodec/faker v2.0.1+incompatible h1:P0KUpUw5w6WJXwrPfv35oc91i4d8nf40Nwln+M/+faA=
github.com/bxcodec/faker v2.0.1+incompatible/go.mod h1:BNzfpVdTwnFJ6GtfYTcQu6l6rHShT+veBxNCnjCx5XM=
github.com/certifi/gocertifi v0.0.0-20190105021004-abcd57078448/go.mod h1:GJKEexRPVJrBSOjoqN5VNOIKJ5Q3RViH6eu3puDRwx4=
github.com/chai2010/gettext-go v0.0.0-20170215093142-bf70f2a70fb1/go.mod h1:/iP1qXHoty45bqomnu2LM+VVyAEdWN+vtSHGlQgyxbw=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/coreos/etcd v3.3.12+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
github.com/cpuguy83/go-md2man v1.0.8/go.mod h1:N6JayAiVKtlHSnuTCeuLSQVs75hb8q+dYQLjr7cDsKY=
github.com/cyphar/filepath-securejoin v0.2.2/go.mod h1:FpkQEhXnPnOthhzymB7CGsFk2G9VLXONKD9G7QGMM+4=
github.com/davecgh/go-spew v0.0.0-20151105211317-5215b55f46b2/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/dgrijalva/jwt-go v0.0.0-20160705203006-01aeca54ebda/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
github.com/docker/docker v1.13.1/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM=
github.com/docker/spdystream v0.0.0-20181023171402-6480d4af844c/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM=
github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs=
github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU=
github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I=
github.com/elazarl/go-bindata-assetfs v1.0.0/go.mod h1:v+YaWX3bdea5J/mo8dSETolEo7R71Vk1u8bnjau5yw4=
github.com/elazarl/goproxy v0.0.0-20170405201442-c4fc26588b6e/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc=
github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
github.com/emicklei/go-restful v2.9.3+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
github.com/emicklei/go-restful-swagger12 v0.0.0-20170926063155-7524189396c6/go.mod h1:qr0VowGBT4CS4Q8vFF8BSeKz34PuqKGxs/L0IAQA9DQ=
github.com/emirpasic/gods v1.9.0/go.mod h1:YfzfFFoVP/catgzJb4IKIqXjX78Ha8FMSDh3ymbK86o=
github.com/envoyproxy/go-control-plane v0.8.0 h1:uE6Fp4fOcAJdc1wTQXLJ+SYistkbG1dNoi6Zs1+Ybvk=
github.com/envoyproxy/go-control-plane v0.8.0/go.mod h1:GSSbY9P1neVhdY7G4wu+IK1rk/dqhiCC/4ExuWJZVuk=
github.com/envoyproxy/protoc-gen-validate v0.0.14 h1:YBW6/cKy9prEGRYLnaGa4IDhzxZhRCtKsax8srGKDnM=
github.com/envoyproxy/protoc-gen-validate v0.0.14/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
github.com/evanphx/json-patch v4.0.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
github.com/evanphx/json-patch v4.1.0+incompatible h1:K1MDoo4AZ4wU0GIU/fPmtZg7VpzLjCxu+UwBD1FvwOc=
github.com/evanphx/json-patch v4.1.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
github.com/evanphx/json-patch v4.2.0+incompatible h1:fUDGZCv/7iAN7u0puUVhvKCcsR6vRfwrJatElLBEf0I=
github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d/go.mod h1:ZZMPRZwes7CROmyNKgQzC3XPs6L/G2EJLHddWejkmf4=
github.com/fatih/color v1.7.0 h1:DkWD4oS2D8LGGgTQ6IvwJJXSL5Vp2ffcQg58nFV38Ys=
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
github.com/fgrosse/zaptest v1.1.0 h1:sK9hP0/xBoNX5qfFo3KWFluDXfc809APomI1QXuYELA=
github.com/fgrosse/zaptest v1.1.0/go.mod h1:vMnRSul6kW7kIUXZgnZZcDwyTn8k49ODfAULL8nmL5w=
github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc=
github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
github.com/getsentry/raven-go v0.2.0/go.mod h1:KungGk8q33+aIAZUIVWZDr2OfAEBsO49PX4NzFV5kcQ=
github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk=
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
github.com/gliderlabs/ssh v0.1.1/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0=
github.com/globalsign/mgo v0.0.0-20180905125535-1ca0a4f7cbcb/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q=
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas=
github.com/go-logr/zapr v0.1.1/go.mod h1:tabnROwaDl0UNxkVeFRbY8bwB37GwRv0P8lg6aAiEnk=
github.com/go-openapi/analysis v0.0.0-20180825180245-b006789cd277/go.mod h1:k70tL6pCuVxPJOHXQ+wIac1FUrvNkHolPie/cLEU6hI=
github.com/go-openapi/analysis v0.17.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik=
github.com/go-openapi/errors v0.17.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0=
github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0=
github.com/go-openapi/jsonpointer v0.17.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M=
github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg=
github.com/go-openapi/jsonreference v0.17.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I=
github.com/go-openapi/loads v0.17.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU=
github.com/go-openapi/runtime v0.0.0-20180920151709-4f900dc2ade9/go.mod h1:6v9a6LTXWQCdL8k1AO3cvqx5OtZY/Y9wKTgaoP6YRfA=
github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc=
github.com/go-openapi/spec v0.17.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI=
github.com/go-openapi/spec v0.19.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI=
github.com/go-openapi/strfmt v0.17.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU=
github.com/go-openapi/strfmt v0.19.0/go.mod h1:+uW+93UVvGGq2qGaZxdDeJqSAqBqBdl+ZPMF/cC8nDY=
github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I=
github.com/go-openapi/swag v0.17.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg=
github.com/go-openapi/validate v0.19.0/go.mod h1:Uh4HdOzKt19xGIGm1qHf/ofbX1YQ4Y+MYsct2VUrAJ4=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8=
github.com/gofrs/uuid v3.2.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM=
github.com/gogo/googleapis v1.1.0/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s=
github.com/gogo/googleapis v1.2.0 h1:Z0v3OJDotX9ZBpdz2V+AI7F4fITSZhVE5mg6GQppwMM=
github.com/gogo/googleapis v1.2.0/go.mod h1:Njal3psf3qN6dwBtQfUmBZh2ybovJ0tlu3o/AC7HYjU=
github.com/gogo/protobuf v0.0.0-20171007142547-342cbe0a0415/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
@@ -132,7 +52,6 @@ github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfU
github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef h1:veQD95Isof8w9/WXiA+pa3tz3fJXkt5B7QaRBrM62gk=
github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:tluoj9z5200jBnyusfRPU2LqT6J+DAorxEvtC7LHB+E=
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/protobuf v0.0.0-20161109072736-4bd1920723d7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
@@ -140,19 +59,12 @@ github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5y
github.com/golang/protobuf v1.3.1 h1:YF8+flBXS5eO826T4nzqPrxfhQThhXl0YzfuUPu4SBg=
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4=
github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/google/btree v0.0.0-20160524151835-7d79101e329e/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/btree v1.0.0 h1:0udJVsspx3VBr5FwtLhQQtuAsVc79tTq0ocGIPAU6qo=
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ=
github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck=
github.com/google/gofuzz v0.0.0-20161122191042-44d81051d367/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI=
github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI=
github.com/google/gofuzz v1.0.0 h1:A8PeW59pxE9IoFRqBp37U+mSNaQoZ46F1f0f863XSXw=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
@@ -164,83 +76,25 @@ github.com/googleapis/gnostic v0.0.0-20170426233943-68f4ded48ba9/go.mod h1:sJBsC
github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY=
github.com/googleapis/gnostic v0.2.0 h1:l6N3VoaVzTncYYW+9yOz2LJJammFZGBO13sqgEhpy9g=
github.com/googleapis/gnostic v0.2.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY=
github.com/goph/emperror v0.17.1/go.mod h1:+ZbQ+fUNO/6FNiUo0ujtMjhgad9Xa6fQL9KhH4LNHic=
github.com/gophercloud/gophercloud v0.0.0-20190126172459-c818fa66e4c8/go.mod h1:3WdhXV3rUYy9p6AUW8d94kr+HS62Y4VL9mBnFxsD8q4=
github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg=
github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
github.com/gregjones/httpcache v0.0.0-20170728041850-787624de3eb7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
github.com/gregjones/httpcache v0.0.0-20181110185634-c63ab54fda8f h1:ShTPMJQes6tubcjzGMODIVG5hlrCeImaBnZzKF2N8SM=
github.com/gregjones/httpcache v0.0.0-20181110185634-c63ab54fda8f/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
github.com/grpc-ecosystem/grpc-gateway v1.5.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw=
github.com/h2non/parth v0.0.0-20190131123155-b4df798d6542 h1:2VTzZjLZBgl62/EtslCrtky5vbi9dd7HrQPQIx6wqiw=
github.com/h2non/parth v0.0.0-20190131123155-b4df798d6542/go.mod h1:Ow0tF8D4Kplbc8s8sSb3V2oUCygFHVp8gC3Dn6U4MNI=
github.com/hashicorp/consul v1.4.4 h1:DR1+5EGgnPsd/LIsK3c9RDvajcsV5GOkGQBSNd3dpn8=
github.com/hashicorp/consul v1.4.4/go.mod h1:mFrjN1mfidgJfYP1xrJCF+AfRhr6Eaqhb2+sfyn/OOI=
github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA=
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
github.com/hashicorp/go-cleanhttp v0.5.1 h1:dH3aiDG9Jvb5r5+bYHsikaOUIpcM0xvgMXVoDkXMzJM=
github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
github.com/hashicorp/go-immutable-radix v1.0.0 h1:AKDB1HM5PWEA7i4nhcpwOrO2byshxBjXVn/J/3+z5/0=
github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
github.com/hashicorp/go-msgpack v0.5.3 h1:zKjpN5BK/P5lMYrLmBHdBULWbJ0XpYR+7NGzqkZzoD4=
github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM=
github.com/hashicorp/go-multierror v1.0.0 h1:iVjPR7a6H0tWELX5NxNe7bYopibicUzc7uPribsnS6o=
github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk=
github.com/hashicorp/go-retryablehttp v0.5.3 h1:QlWt0KvWT0lq8MFppF9tsJGF+ynG7ztc2KIPhzRGk7s=
github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs=
github.com/hashicorp/go-rootcerts v1.0.0 h1:Rqb66Oo1X/eSV1x66xbDccZjhJigjg0+e82kpwzSwCI=
github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU=
github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU=
github.com/hashicorp/go-sockaddr v1.0.2 h1:ztczhD1jLxIRjVejw8gFomI1BQZOe2WoVOu0SyteCQc=
github.com/hashicorp/go-sockaddr v1.0.2/go.mod h1:rB4wwRAUzs07qva3c5SdrY/NEtAUjGlgmH/UkBUC97A=
github.com/hashicorp/go-syslog v1.0.0 h1:KaodqZuhUoZereWVIYmpUgZysurB1kBLX2j0MwMrUAE=
github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4=
github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
github.com/hashicorp/go-uuid v1.0.1 h1:fv1ep09latC32wFoVwnqcnKJGnMSdBanPczbHAYm1BE=
github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90=
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU=
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4=
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
github.com/hashicorp/logutils v1.0.0 h1:dLEQVugN8vlakKOUE3ihGLTZJRB4j+M2cdTm/ORI65Y=
github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64=
github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ=
github.com/hashicorp/memberlist v0.1.3 h1:EmmoJme1matNzb+hMpDuR/0sbJSUisxyqBGG676r31M=
github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I=
github.com/hashicorp/serf v0.8.3 h1:MWYcmct5EtKz0efYooPcL0yNkem+7kWxqXDi/UIh+8k=
github.com/hashicorp/serf v0.8.3/go.mod h1:UpNcs7fFbpKIyZaUuSW6EPiH+eZC7OuyFD+wc1oal+k=
github.com/hashicorp/vault v1.1.0 h1:v79NUgO5xCZnXVzUkIqFOXtP8YhpnHAi1fk3eo9cuOE=
github.com/hashicorp/vault v1.1.0/go.mod h1:KfSyffbKxoVyspOdlaGVjIuwLobi07qD1bAbosPMpP0=
github.com/hinshun/vt10x v0.0.0-20180616224451-1954e6464174/go.mod h1:DqJ97dSdRW1W22yXSB90986pcOyQ7r45iio1KN2ez1A=
github.com/hinshun/vt10x v0.0.0-20180809195222-d55458df857c/go.mod h1:DqJ97dSdRW1W22yXSB90986pcOyQ7r45iio1KN2ez1A=
github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
github.com/huandu/xstrings v1.2.0/go.mod h1:DvyZB1rfVYsBIigL8HwpZgxHwXozlTgGqn63UyNX5k4=
github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
github.com/imdario/mergo v0.3.7 h1:Y+UAYTZ7gDEuOfhxKWy+dvb5dRQ6rJjFSdX2HZY1/gI=
github.com/imdario/mergo v0.3.7/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99/go.mod h1:1lJo3i6rXxKeerYnT8Nvf0QmHCRC1n8sfWVwXF2Frvo=
github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
github.com/json-iterator/go v0.0.0-20180612202835-f2b4162afba3/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
github.com/json-iterator/go v1.1.5/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
github.com/json-iterator/go v1.1.6 h1:MrUvLMLTMxbqFJ9kzlvat/rYZqZnW3u4wkLzWTaFwKs=
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
github.com/k0kubun/colorstring v0.0.0-20150214042306-9440f1994b88 h1:uC1QfSlInpQF+M0ao65imhwqKnz3Q2z/d8PWZRMQvDM=
github.com/k0kubun/colorstring v0.0.0-20150214042306-9440f1994b88/go.mod h1:3w7q1U84EfirKl04SVQ/s7nPm1ZPhiXd34z40TNz36k=
github.com/k0kubun/pp v2.3.0+incompatible/go.mod h1:GWse8YhT0p8pT4ir3ZgBbfZild3tgzSScAn6HmfYukg=
github.com/k0kubun/pp v3.0.1+incompatible h1:3tqvf7QgUnZ5tXO6pNAZlrvHgl6DvifjDrd9g2S9Z40=
github.com/k0kubun/pp v3.0.1+incompatible/go.mod h1:GWse8YhT0p8pT4ir3ZgBbfZild3tgzSScAn6HmfYukg=
github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0/go.mod h1:1NbS8ALrpOvjt0rHPNLyCIeMtbizbir8U//inJ+zuB8=
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8=
github.com/kevinburke/ssh_config v0.0.0-20180830205328-81db2a75821e/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM=
github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
@@ -248,45 +102,16 @@ github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFB
github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/pty v1.1.3/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/linkerd/linkerd2 v0.0.0-20190221030352-5e47cb150a33 h1:+eM/rkJK2iCSi0fDzp218TzJSAglSGeI985YYgRS/mY=
github.com/linkerd/linkerd2 v0.0.0-20190221030352-5e47cb150a33/go.mod h1:n9QnL65Uv2gAG97S0t1q2aYmP33wPQ3oAh0+DJhQSSw=
github.com/lyft/protoc-gen-validate v0.0.14 h1:xbdDVIHd0Xq5Bfzu+8JR9s7mFmJPMvNLmfGhgcHJdFU=
github.com/lyft/protoc-gen-validate v0.0.14/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ=
github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
github.com/mattn/go-colorable v0.1.1 h1:G1f5SKeVxmagw/IyvzvtZE4Gybcc4Tr1tf7I8z0XgOg=
github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ=
github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
github.com/mattn/go-isatty v0.0.7 h1:UvyT9uN+3r7yLEYSlJsbQGdsaB/a0DlgWP3pql6iwOc=
github.com/mattn/go-isatty v0.0.7/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE=
github.com/miekg/dns v1.0.14 h1:9jZdLNd/P4+SfEJ0TNyxYpsK8N4GtfylBLqtbYN1sbA=
github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
github.com/mitchellh/cli v1.0.0 h1:iGBIsUe3+HZ/AD/Vd7DErOt5sU9fa8Uj7A2s1aggv1Y=
github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc=
github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
github.com/mitchellh/go-testing-interface v1.0.0 h1:fzU/JVNcaqHQEcVFAKeR41fkiLdIPrefOvVG1VZ96U0=
github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI=
github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo=
github.com/mitchellh/hashstructure v1.0.0 h1:ZkRJX1CyOoTkar7p/mLS5TZU4nJ1Rn/F8u9dGS02Q3Y=
github.com/mitchellh/hashstructure v1.0.0/go.mod h1:QjSHrPWS+BGUVBYkbTZWEnOh3G1DutKwClXU/ABz6AQ=
github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE=
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/reflect2 v0.0.0-20180320133207-05fbef0ca5da/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI=
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
@@ -303,14 +128,7 @@ github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGV
github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
github.com/onsi/gomega v1.5.0 h1:izbySO9zDPmjJ8rDjLvkA2zJHIo+HkYXHnf7eN7SSyo=
github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
github.com/openzipkin/zipkin-go v0.1.1/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTmOf0Erfk+hxe8=
github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw=
github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c h1:Lgl0gzECD8GnQ5QCWA8o6BtfL6mDH5rQgM4/fX3avOs=
github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k=
github.com/pelletier/go-buffruneio v0.2.0/go.mod h1:JkE26KsDizTr40EUHkXVtNPvgGtbSNq5BcowyYOWdKo=
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI=
github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU=
github.com/pierrec/lz4 v2.0.5+incompatible h1:2xWsjqPFWcplujydGg4WmhC/6fZqK42wMM8aXeqhl0I=
@@ -321,73 +139,35 @@ github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE
github.com/pmezard/go-difflib v0.0.0-20151028094244-d8ed2627bdf0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/posener/complete v1.1.1 h1:ccV59UEOTzVDnDUEFdT95ZzHVZ+5+158q8+SJb2QV5w=
github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI=
github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
github.com/prometheus/client_golang v0.9.2/go.mod h1:OsXs2jCmiKlQ1lTBmv21f2mNfw4xf/QclQDMrYNZzcM=
github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829 h1:D+CiwcpGTW6pL6bv6KI3KbyEyCKyS+1JWS2h8PNDnGA=
github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90 h1:S/YWwWx/RA8rT8tKFRuGUZhuA90OyIBpPCXkcbwU8DE=
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
github.com/prometheus/common v0.0.0-20181126121408-4724e9255275/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
github.com/prometheus/common v0.0.0-20190104105734-b1c43a6df3ae/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/common v0.3.0 h1:taZ4h8Tkxv2kNyoSctBvfXEHmBmxrwmIidZTIaHons4=
github.com/prometheus/common v0.3.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.0-20190104112138-b1a0a9a36d74/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.0-20190416084830-8368d24ba045 h1:Raos9GP+3BlCBicScEQ+SjTLpYYac34fZMoeqj9McSM=
github.com/prometheus/procfs v0.0.0-20190416084830-8368d24ba045/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/radovskyb/watcher v1.0.6 h1:8WIQ9UxEYMZjem1OwU7dVH94DXXk9mAIE1i8eqHD+IY=
github.com/radovskyb/watcher v1.0.6/go.mod h1:78okwvY5wPdzcb1UYnip1pvrZNIVEIh/Cm+ZuvsUYIg=
github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
github.com/remyoudompheng/bigfft v0.0.0-20170806203942-52369c62f446/go.mod h1:uYEyJGbgTkfkS4+E/PavXkNJcbFIpEtjt2B0KDQ5+9M=
github.com/rollbar/rollbar-go v1.0.2/go.mod h1:AcFs5f0I+c71bpHlXNNDbOWJiKwjFDtISeXco0L5PKQ=
github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
github.com/ryanuber/go-glob v1.0.0 h1:iQh3xXAumdQ+4Ufa5b25cRpC5TYKlno6hsv6Cb3pkBk=
github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc=
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I=
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo=
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
github.com/solo-io/gloo v0.13.17 h1:rbNmO7e5+0vEq5krkO9/Rcp16PqqvepyGD0j3xPdmhg=
github.com/solo-io/gloo v0.13.17/go.mod h1:dNnxchbq5F4ITJhX/0fy5brfbsj6vlW+AwgnTlqZN0A=
github.com/solo-io/go-utils v0.7.11 h1:3Kmk50e6nYqyf7MBY1473XkH5L7qO/Nigjx+t6jEOQo=
github.com/solo-io/go-utils v0.7.11/go.mod h1:7r+dFKdqJNOjx+odeLFqg8SOwVHyVVG1P0EPt6rNLN8=
github.com/solo-io/solo-kit v0.6.3 h1:s/SxcgG7YSjW7wu7iQER5MCHSzeXg1b/lCZRazQ0IMw=
github.com/solo-io/solo-kit v0.6.3/go.mod h1:oBaQ6tOwuO97u7w+s3TeI08YLHcbiWemInx0XkDfKFw=
github.com/solo-io/supergloo v0.3.11 h1:IwnrL2xojowzb7k+V2wCG3I6WrelzXsezqJiraaVxIM=
github.com/solo-io/supergloo v0.3.11/go.mod h1:hJuUwop5IMBL9Qc2/G+f+/PfIWPt/2nGr66fDcuhrn8=
github.com/spf13/afero v1.2.1/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk=
github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg=
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/src-d/gcfg v1.4.0/go.mod h1:p/UMsR43ujA89BJY9duynAwIpvqEujIH/jFlfL7jWoI=
github.com/stefanprodan/klog v0.0.0-20190418165334-9cbb78b20423 h1:qTtUiiNM+iq4IXOwHofKW5+jzvkvnNVz0GFRxwukUlY=
github.com/stefanprodan/klog v0.0.0-20190418165334-9cbb78b20423/go.mod h1:TYstY5LQfzxFVm9MiiMg7kZ39sc5cue/6CFoY5KgXn8=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v0.0.0-20151208002404-e3a8ff8ce365/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.2.1/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/technosophos/moniker v0.0.0-20180509230615-a5dbd03a2245/go.mod h1:O1c8HleITsZqzNZDjSNzirUGsMT0oGu9LhHKoJrqO+A=
github.com/xanzy/ssh-agent v0.2.0/go.mod h1:0NyE30eGUDliuLEHJgYte/zncp2zdTStcOnWhgSqHD8=
go.opencensus.io v0.18.0/go.mod h1:vKdFvxhtzZ9onBp9VKHK8z/sRpBMnKAsufL7wlDrCOA=
go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk=
go.opencensus.io v0.20.2 h1:NAfh7zF0/3/HqtMvJNZ/RFrSlCE6ZTlHmKfhL/Dm1Jk=
go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk=
go.uber.org/atomic v1.3.2 h1:2Oa65PReHzfn29GpvgsYwloV9AVFHPDk8tYxt2c2tr4=
go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
go.uber.org/multierr v1.1.0 h1:HoEmRHQPVSqub6w2z2d2EOVs2fjyFRGyofhKuyDq0QI=
@@ -401,7 +181,6 @@ golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL
golang.org/x/exp v0.0.0-20190312203227-4b39c73a6495 h1:I6A9Ag9FpEKOjcKrRNjQkPHawoXIhKyTGfvvjFAiiAk=
golang.org/x/exp v0.0.0-20190312203227-4b39c73a6495/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
@@ -409,7 +188,6 @@ golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU
golang.org/x/net v0.0.0-20190206173232-65e2d4e15006 h1:bfLnR+k0tq5Lqt6dflRLcZiz6UaXCMt3vhYJ1l4FQ80=
golang.org/x/net v0.0.0-20190206173232-65e2d4e15006/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421 h1:Wo7BWFiOk0QRFMLYMqJGFMd9CgUAcGx7V+qEg/h5IBI=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190402181905-9f3314589c9a h1:tImsplftrFpALCYumobsd0K86vlAs/eXGFms2txfJfA=
@@ -435,7 +213,6 @@ gonum.org/v1/gonum v0.0.0-20190331200053-3d26580ed485/go.mod h1:2ltnJ7xHfj0zHS40
gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw=
gonum.org/v1/netlib v0.0.0-20190331212654-76723241ea4e h1:jRyg0XfpwWlhEV8mDfdNGBeSJM2fuyh9Yjrnd8kF2Ts=
gonum.org/v1/netlib v0.0.0-20190331212654-76723241ea4e/go.mod h1:kS+toOQn6AQKjmKJ7gzohV1XkqsFehRA2FbsbkopSuQ=
google.golang.org/api v0.0.0-20180910000450-7ca32eb868bf/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0=
google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.4.0 h1:/wp5JvzpHIxhs/dumFmF7BXTf3Z+dd4uXta4kVyO508=
@@ -443,18 +220,11 @@ google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7
google.golang.org/appengine v1.5.0 h1:KxkO13IPW4Lslp2bz+KHP2E3gtFlrIGNThxkZQ3g+4c=
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20180831171423-11092d34479b/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20181221175505-bd9b4fb69e2f/go.mod h1:7Ep/1NZk928CDR8SjdVbjWNpdIf6nzjE3BTgJDr2Atg=
google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190404172233-64821d5d2107 h1:xtNn7qFlagY2mQNFHMSRPjT2RkOV4OXM7P5TVy9xATo=
google.golang.org/genproto v0.0.0-20190404172233-64821d5d2107/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio=
google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.19.1 h1:TrBcJ1yqAl1G++wO39nD/qtgpsW9/1+QGrluyMGEYgM=
google.golang.org/grpc v1.19.1/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
gopkg.in/AlecAivazis/survey.v1 v1.8.2/go.mod h1:iBNOmqKz/NUbZx3bA+4hAGLRC7fSK7tgtVDT4tB22XA=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=
@@ -466,30 +236,18 @@ gopkg.in/h2non/gock.v1 v1.0.14/go.mod h1:sX4zAkdYX1TRGJ2JY156cFspQn4yRWn6p9EMdOD
gopkg.in/inf.v0 v0.9.0/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k=
gopkg.in/square/go-jose.v2 v2.3.0/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=
gopkg.in/src-d/go-billy.v4 v4.2.1/go.mod h1:tm33zBoOwxjYHZIE+OV8bxTWFMJLrconzFMd38aARFk=
gopkg.in/src-d/go-git-fixtures.v3 v3.1.1/go.mod h1:dLBcvytrw/TYZsNTWCnkNF2DSIlzWYqTe3rJR56Ac7g=
gopkg.in/src-d/go-git.v4 v4.10.0/go.mod h1:Vtut8izDyrM8BUVQnzJ+YvmNcem2J89EmfZYCkLokZk=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI=
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
istio.io/gogo-genproto v0.0.0-20190124151557-6d926a6e6feb/go.mod h1:eIDJ6jNk/IeJz6ODSksHl5Aiczy5JUq6vFhJWI5OtiI=
k8s.io/api v0.0.0-20190620073856-dcce3486da33 h1:aC/EvF9PT1h8NeMEOVwTel8xxbZwq0SZnxXNThEROnE=
k8s.io/api v0.0.0-20190620073856-dcce3486da33/go.mod h1:ldk709UQo/iedNLOW7J06V9QSSGY5heETKeWqnPoqF8=
k8s.io/apiextensions-apiserver v0.0.0-20190111034747-7d26de67f177+incompatible/go.mod h1:IxkesAMoaCRoLrPJdZNZUQp9NfZnzqaVzLhb2VEQzXE=
k8s.io/apiextensions-apiserver v0.0.0-20190315093550-53c4693659ed h1:rCteec//ELIjZMfjIGQbVtZooyaofqDJwsmWwWKItNs=
k8s.io/apiextensions-apiserver v0.0.0-20190315093550-53c4693659ed/go.mod h1:IxkesAMoaCRoLrPJdZNZUQp9NfZnzqaVzLhb2VEQzXE=
k8s.io/apimachinery v0.0.0-20190620073744-d16981aedf33 h1:Lkd+QNFOB3DqrDyWo796aodJgFJautn/M+t9IGearPc=
k8s.io/apimachinery v0.0.0-20190620073744-d16981aedf33/go.mod h1:9q5NW/mMno/nwbRZd/Ks2TECgi2PTZ9cwarf4q+ze6Q=
k8s.io/apiserver v0.0.0-20190111033246-d50e9ac5404f+incompatible/go.mod h1:6bqaTSOSJavUIXUtfaR9Os9JtTCm8ZqH2SUl2S60C4w=
k8s.io/cli-runtime v0.0.0-20190111035321-c7263d800665+incompatible/go.mod h1:qWnH3/b8sp/l7EvlDh7ulDU3UWA4P4N1NFbEEP791tM=
k8s.io/client-go v0.0.0-20190620074045-585a16d2e773 h1:XyjDnwRO9icfyrN7HRSa8o3NqdPOEQoVW8vWizuqyQQ=
k8s.io/client-go v0.0.0-20190620074045-585a16d2e773/go.mod h1:miKCC7C/WGwJqcDctyJtAnP3Gss0Y5KwURqJ7q5pfEw=
k8s.io/code-generator v0.0.0-20190620073620-d55040311883 h1:NWWNvN6IdpmQvZ43rVccCI8GPUrheK8XNdqeKycw0DI=
@@ -497,12 +255,9 @@ k8s.io/code-generator v0.0.0-20190620073620-d55040311883/go.mod h1:+a+9g9W0llgbg
k8s.io/gengo v0.0.0-20190116091435-f8a0810f38af/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6 h1:4s3/R4+OYYYUKptXPhZKjQ04WJ6EhQQVFdjOFvCazDk=
k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
k8s.io/helm v2.13.0+incompatible/go.mod h1:LZzlS4LQBHfciFOurYBFkCMTaZ0D1l+p0teMg7TSULI=
k8s.io/kube-openapi v0.0.0-20190228160746-b3a7cee44a30/go.mod h1:BXM9ceUBTj2QnfH2MK1odQs778ajze1RxcmP6S8RVVc=
k8s.io/kube-openapi v0.0.0-20190401085232-94e1e7b7574c/go.mod h1:BXM9ceUBTj2QnfH2MK1odQs778ajze1RxcmP6S8RVVc=
k8s.io/kube-openapi v0.0.0-20190418160015-6b3d3b2d5666 h1:hlzz2EvLPcefAcG/j0tOZpds4LWSElZzxpZuhxbblbc=
k8s.io/kube-openapi v0.0.0-20190418160015-6b3d3b2d5666/go.mod h1:jqYp7BKXW0Jl+F1dWXBieUmcHKMPpGHGWA0uqfpOZZ4=
k8s.io/kubernetes v1.13.2/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk=
k8s.io/utils v0.0.0-20190221042446-c2654d5206da/go.mod h1:8k8uAuAQ0rXslZKaEWd0c3oVhZz7sSzSiPnVZayjIX0=
k8s.io/utils v0.0.0-20190308190857-21c4ce38f2a7 h1:8r+l4bNWjRlsFYlQJnKJ2p7s1YQPj4XyXiJVqDHRx7c=
k8s.io/utils v0.0.0-20190308190857-21c4ce38f2a7/go.mod h1:8k8uAuAQ0rXslZKaEWd0c3oVhZz7sSzSiPnVZayjIX0=
@@ -511,8 +266,6 @@ modernc.org/golex v1.0.0/go.mod h1:b/QX9oBD/LhixY6NDh+IdGv17hgB+51fET1i2kPSmvk=
modernc.org/mathutil v1.0.0/go.mod h1:wU0vUrJsVWBZ4P6e7xtFJEhFSNsfRLJ8H458uRjg03k=
modernc.org/strutil v1.0.0/go.mod h1:lstksw84oURvj9y3tn8lGvRxyRC1S2+g5uuIzNfIOBs=
modernc.org/xc v1.0.0/go.mod h1:mRNCo0bvLjGhHO9WsyuKVU4q0ceiDDDoEeWDJHrNx8I=
sigs.k8s.io/controller-runtime v0.1.10/go.mod h1:HFAYoOh6XMV+jKF1UjFwrknPbowfyHEHHRdJMf2jMX8=
sigs.k8s.io/structured-merge-diff v0.0.0-20181214233322-d43a45b8663b/go.mod h1:wWxsB5ozmmv/SG7nM11ayaAW51xMvak/t1r0CSlcokI=
sigs.k8s.io/yaml v1.1.0 h1:4A07+ZFc2wgJwo8YNlQpr1rVlgUDlxXHhPJciaPY5gs=
sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o=
vbom.ml/util v0.0.0-20180919145318-efcd4e0f9787/go.mod h1:so/NYdZXCz+E3ZpW0uAoCj6uzU2+8OWDFv/HxUSs7kI=

View File

@@ -30,7 +30,7 @@ chmod +x ${CODEGEN_PKG}/generate-groups.sh
${CODEGEN_PKG}/generate-groups.sh all \
github.com/weaveworks/flagger/pkg/client github.com/weaveworks/flagger/pkg/apis \
"appmesh:v1beta1 istio:v1alpha3 flagger:v1alpha3 smi:v1alpha1" \
"flagger:v1alpha3 appmesh:v1beta1 istio:v1alpha3 smi:v1alpha1 gloo:v1" \
--output-base "${TEMP_DIR}" \
--go-header-file ${SCRIPT_ROOT}/hack/boilerplate.go.txt

View File

@@ -41,6 +41,10 @@ spec:
type: string
JSONPath: .spec.canaryAnalysis.interval
priority: 1
- name: Mirror
type: boolean
JSONPath: .spec.canaryAnalysis.mirror
priority: 1
- name: StepWeight
type: string
JSONPath: .spec.canaryAnalysis.stepWeight
@@ -64,6 +68,9 @@ spec:
provider:
description: Traffic managent provider
type: string
metricsServer:
description: Prometheus URL
type: string
progressDeadlineSeconds:
description: Deployment progress deadline
type: number
@@ -114,6 +121,11 @@ spec:
portName:
description: Container port name
type: string
targetPort:
description: Container target port name
anyOf:
- type: string
- type: number
portDiscovery:
description: Enable port dicovery
type: boolean
@@ -183,6 +195,9 @@ spec:
stepWeight:
description: Canary incremental traffic percentage step
type: number
mirror:
description: Mirror traffic to canary before shifting
type: boolean
match:
description: A/B testing match conditions
anyOf:

View File

@@ -15,6 +15,7 @@ spec:
app: flagger
annotations:
prometheus.io/scrape: "true"
prometheus.io/port: "8080"
spec:
serviceAccountName: flagger
containers:

View File

@@ -8,4 +8,4 @@ resources:
- deployment.yaml
images:
- name: weaveworks/flagger
newTag: 0.18.6
newTag: 0.20.2

View File

@@ -2,12 +2,11 @@ global:
scrape_interval: 5s
scrape_configs:
# Scrape config for AppMesh Envoy sidecar
# scrape config for AppMesh Envoy sidecar
- job_name: 'appmesh-envoy'
metrics_path: /stats/prometheus
kubernetes_sd_configs:
- role: pod
relabel_configs:
- source_labels: [__meta_kubernetes_pod_container_name]
action: keep
@@ -25,8 +24,7 @@ scrape_configs:
- source_labels: [__meta_kubernetes_pod_name]
action: replace
target_label: kubernetes_pod_name
# Exclude high cardinality metrics
# exclude high cardinality metrics
metric_relabel_configs:
- source_labels: [ cluster_name ]
regex: '(outbound|inbound|prometheus_stats).*'
@@ -56,7 +54,7 @@ scrape_configs:
regex: 'envoy_cluster_(lb|retry|bind|internal|max|original).*'
action: drop
# Scrape config for API servers
# scrape config for API servers
- job_name: 'kubernetes-apiservers'
kubernetes_sd_configs:
- role: endpoints
@@ -66,35 +64,19 @@ scrape_configs:
scheme: https
tls_config:
ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
insecure_skip_verify: true
bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
relabel_configs:
- source_labels: [__meta_kubernetes_service_name, __meta_kubernetes_endpoint_port_name]
action: keep
regex: kubernetes;https
# Scrape config for nodes
- job_name: 'kubernetes-nodes'
scheme: https
tls_config:
ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
kubernetes_sd_configs:
- role: node
relabel_configs:
- action: labelmap
regex: __meta_kubernetes_node_label_(.+)
- target_label: __address__
replacement: kubernetes.default.svc:443
- source_labels: [__meta_kubernetes_node_name]
regex: (.+)
target_label: __metrics_path__
replacement: /api/v1/nodes/${1}/proxy/metrics
# scrape config for cAdvisor
- job_name: 'kubernetes-cadvisor'
scheme: https
tls_config:
ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
insecure_skip_verify: true
bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
kubernetes_sd_configs:
- role: node
@@ -107,6 +89,14 @@ scrape_configs:
regex: (.+)
target_label: __metrics_path__
replacement: /api/v1/nodes/${1}/proxy/metrics/cadvisor
# exclude high cardinality metrics
metric_relabel_configs:
- source_labels: [__name__]
regex: (container|machine)_(cpu|memory|network|fs)_(.+)
action: keep
- source_labels: [__name__]
regex: container_memory_failures_total
action: drop
# scrape config for pods
- job_name: kubernetes-pods

View File

@@ -19,6 +19,7 @@ spec:
metadata:
annotations:
prometheus.io/scrape: "true"
prometheus.io/port: "9797"
labels:
app: podinfo
spec:

View File

@@ -14,10 +14,11 @@ spec:
app: flagger-loadtester
annotations:
prometheus.io/scrape: "true"
prometheus.io/port: "8080"
spec:
containers:
- name: loadtester
image: weaveworks/flagger-loadtester:0.8.0
image: weaveworks/flagger-loadtester:0.11.0
imagePullPolicy: IfNotPresent
ports:
- name: http

View File

@@ -2,6 +2,4 @@ namespace: test
resources:
- service.yaml
- deployment.yaml
images:
- name: weaveworks/flagger-loadtester
newTag: 0.6.1

View File

@@ -28,6 +28,7 @@ type Mesh struct {
type MeshServiceDiscoveryType string
const (
// Dns type is used when mesh is backed by a DNS namespace
Dns MeshServiceDiscoveryType = "Dns"
)
@@ -104,8 +105,12 @@ type VirtualServiceSpec struct {
// VirtualRouter is the spec for a VirtualRouter resource
type VirtualRouter struct {
Name string `json:"name"`
Listeners []Listener `json:"listeners,omitempty"`
Name string `json:"name"`
Listeners []VirtualRouterListener `json:"listeners,omitempty"`
}
type VirtualRouterListener struct {
PortMapping PortMapping `json:"portMapping"`
}
type Route struct {
@@ -114,21 +119,78 @@ type Route struct {
Http *HttpRoute `json:"http,omitempty"`
// +optional
Tcp *TcpRoute `json:"tcp,omitempty"`
// +optional
Priority *int64 `json:"priority,omitempty"`
}
type HttpRoute struct {
Match HttpRouteMatch `json:"match"`
Action HttpRouteAction `json:"action"`
// +optional
RetryPolicy *HttpRetryPolicy `json:"retryPolicy,omitempty"`
}
type HttpRouteMatch struct {
Prefix string `json:"prefix"`
// +optional
Method *string `json:"method,omitempty"`
// +optional
Headers []HttpRouteHeader `json:"headers,omitempty"`
// +optional
Scheme *string `json:"scheme,omitempty"`
}
type HttpRouteHeader struct {
Name string `json:"name"`
// +optional
Invert *bool `json:"invert,omitempty"`
// +optional
Match *HeaderMatchMethod `json:"match,omitempty"`
}
type HeaderMatchMethod struct {
// +optional
Exact *string `json:"exact,omitempty"`
// +optional
Prefix *string `json:"prefix,omitempty"`
// +optional
Range *MatchRange `json:"range,omitempty"`
// +optional
Regex *string `json:"regex,omitempty"`
// +optional
Suffix *string `json:"suffix,omitempty"`
}
type MatchRange struct {
// +optional
Start *int64 `json:"start,omitempty"`
// +optional
End *int64 `json:"end,omitempty"`
}
type HttpRouteAction struct {
WeightedTargets []WeightedTarget `json:"weightedTargets"`
}
type HttpRetryPolicy struct {
// +optional
PerRetryTimeoutMillis *int64 `json:"perRetryTimeoutMillis,omitempty"`
// +optional
MaxRetries *int64 `json:"maxRetries,omitempty"`
// +optional
HttpRetryPolicyEvents []HttpRetryPolicyEvent `json:"httpRetryEvents,omitempty"`
// +optional
TcpRetryPolicyEvents []TcpRetryPolicyEvent `json:"tcpRetryEvents,omitempty"`
}
type HttpRetryPolicyEvent string
type TcpRetryPolicyEvent string
const (
TcpRetryPolicyEventConnectionError TcpRetryPolicyEvent = "connection-error"
)
type TcpRoute struct {
Action TcpRouteAction `json:"action"`
}
@@ -222,6 +284,8 @@ type VirtualNodeSpec struct {
type Listener struct {
PortMapping PortMapping `json:"portMapping"`
// +optional
HealthCheck *HealthCheckPolicy `json:"healthCheck,omitempty"`
}
type PortMapping struct {
@@ -229,6 +293,28 @@ type PortMapping struct {
Protocol string `json:"protocol"`
}
type HealthCheckPolicy struct {
// +optional
HealthyThreshold *int64 `json:"healthyThreshold,omitempty"`
// +optional
IntervalMillis *int64 `json:"intervalMillis,omitempty"`
// +optional
Path *string `json:"path,omitempty"`
// +optional
Port *int64 `json:"port,omitempty"`
// +optional
Protocol *string `json:"protocol,omitempty"`
// +optional
TimeoutMillis *int64 `json:"timeoutMillis,omitempty"`
// +optional
UnhealthyThreshold *int64 `json:"unhealthyThreshold,omitempty"`
}
const (
PortProtocolHttp = "http"
PortProtocolTcp = "tcp"
)
type ServiceDiscovery struct {
// +optional
CloudMap *CloudMapServiceDiscovery `json:"cloudMap,omitempty"`
@@ -237,7 +323,10 @@ type ServiceDiscovery struct {
}
type CloudMapServiceDiscovery struct {
CloudMapServiceName string `json:"cloudMapServiceName"`
ServiceName string `json:"serviceName"`
NamespaceName string `json:"namespaceName"`
// +optional
Attributes map[string]string `json:"attributes,omitempty"`
}
type DnsServiceDiscovery struct {
@@ -272,13 +361,21 @@ type VirtualNodeStatus struct {
MeshArn *string `json:"meshArn,omitempty"`
// VirtualNodeArn is the AppMesh VirtualNode object's Amazon Resource Name
// +optional
VirtualNodeArn *string `json:"virtualNodeArn,omitempty"`
// CloudMapServiceArn is a CloudMap Service object's Amazon Resource Name
VirtualNodeArn *string `json:"virtualNodeArn,omitempty"`
Conditions []VirtualNodeCondition `json:"conditions"`
// CloudMapService is AWS CloudMap Service object's info
// +optional
CloudMapServiceArn *string `json:"cloudMapServiceArn,omitempty"`
CloudMapService *CloudMapServiceStatus `json:"cloudmapService,omitempty"`
}
// CloudMapServiceStatus is AWS CloudMap Service object's info
type CloudMapServiceStatus struct {
// ServiceID is AWS CloudMap Service object's Id
// +optional
QueryParameters map[string]string `json:"queryParameters,omitempty"`
Conditions []VirtualNodeCondition `json:"conditions"`
ServiceID *string `json:"serviceId,omitempty"`
// NamespaceID is AWS CloudMap Service object's namespace Id
// +optional
NamespaceID *string `json:"namespaceId,omitempty"`
}
type VirtualNodeConditionType string

View File

@@ -65,6 +65,13 @@ func (in *Backend) DeepCopy() *Backend {
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CloudMapServiceDiscovery) DeepCopyInto(out *CloudMapServiceDiscovery) {
*out = *in
if in.Attributes != nil {
in, out := &in.Attributes, &out.Attributes
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
return
}
@@ -78,6 +85,32 @@ func (in *CloudMapServiceDiscovery) DeepCopy() *CloudMapServiceDiscovery {
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CloudMapServiceStatus) DeepCopyInto(out *CloudMapServiceStatus) {
*out = *in
if in.ServiceID != nil {
in, out := &in.ServiceID, &out.ServiceID
*out = new(string)
**out = **in
}
if in.NamespaceID != nil {
in, out := &in.NamespaceID, &out.NamespaceID
*out = new(string)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudMapServiceStatus.
func (in *CloudMapServiceStatus) DeepCopy() *CloudMapServiceStatus {
if in == nil {
return nil
}
out := new(CloudMapServiceStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DnsServiceDiscovery) DeepCopyInto(out *DnsServiceDiscovery) {
*out = *in
@@ -110,11 +143,144 @@ func (in *FileAccessLog) DeepCopy() *FileAccessLog {
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *HeaderMatchMethod) DeepCopyInto(out *HeaderMatchMethod) {
*out = *in
if in.Exact != nil {
in, out := &in.Exact, &out.Exact
*out = new(string)
**out = **in
}
if in.Prefix != nil {
in, out := &in.Prefix, &out.Prefix
*out = new(string)
**out = **in
}
if in.Range != nil {
in, out := &in.Range, &out.Range
*out = new(MatchRange)
(*in).DeepCopyInto(*out)
}
if in.Regex != nil {
in, out := &in.Regex, &out.Regex
*out = new(string)
**out = **in
}
if in.Suffix != nil {
in, out := &in.Suffix, &out.Suffix
*out = new(string)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HeaderMatchMethod.
func (in *HeaderMatchMethod) DeepCopy() *HeaderMatchMethod {
if in == nil {
return nil
}
out := new(HeaderMatchMethod)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *HealthCheckPolicy) DeepCopyInto(out *HealthCheckPolicy) {
*out = *in
if in.HealthyThreshold != nil {
in, out := &in.HealthyThreshold, &out.HealthyThreshold
*out = new(int64)
**out = **in
}
if in.IntervalMillis != nil {
in, out := &in.IntervalMillis, &out.IntervalMillis
*out = new(int64)
**out = **in
}
if in.Path != nil {
in, out := &in.Path, &out.Path
*out = new(string)
**out = **in
}
if in.Port != nil {
in, out := &in.Port, &out.Port
*out = new(int64)
**out = **in
}
if in.Protocol != nil {
in, out := &in.Protocol, &out.Protocol
*out = new(string)
**out = **in
}
if in.TimeoutMillis != nil {
in, out := &in.TimeoutMillis, &out.TimeoutMillis
*out = new(int64)
**out = **in
}
if in.UnhealthyThreshold != nil {
in, out := &in.UnhealthyThreshold, &out.UnhealthyThreshold
*out = new(int64)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HealthCheckPolicy.
func (in *HealthCheckPolicy) DeepCopy() *HealthCheckPolicy {
if in == nil {
return nil
}
out := new(HealthCheckPolicy)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *HttpRetryPolicy) DeepCopyInto(out *HttpRetryPolicy) {
*out = *in
if in.PerRetryTimeoutMillis != nil {
in, out := &in.PerRetryTimeoutMillis, &out.PerRetryTimeoutMillis
*out = new(int64)
**out = **in
}
if in.MaxRetries != nil {
in, out := &in.MaxRetries, &out.MaxRetries
*out = new(int64)
**out = **in
}
if in.HttpRetryPolicyEvents != nil {
in, out := &in.HttpRetryPolicyEvents, &out.HttpRetryPolicyEvents
*out = make([]HttpRetryPolicyEvent, len(*in))
copy(*out, *in)
}
if in.TcpRetryPolicyEvents != nil {
in, out := &in.TcpRetryPolicyEvents, &out.TcpRetryPolicyEvents
*out = make([]TcpRetryPolicyEvent, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HttpRetryPolicy.
func (in *HttpRetryPolicy) DeepCopy() *HttpRetryPolicy {
if in == nil {
return nil
}
out := new(HttpRetryPolicy)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *HttpRoute) DeepCopyInto(out *HttpRoute) {
*out = *in
out.Match = in.Match
in.Match.DeepCopyInto(&out.Match)
in.Action.DeepCopyInto(&out.Action)
if in.RetryPolicy != nil {
in, out := &in.RetryPolicy, &out.RetryPolicy
*out = new(HttpRetryPolicy)
(*in).DeepCopyInto(*out)
}
return
}
@@ -149,9 +315,52 @@ func (in *HttpRouteAction) DeepCopy() *HttpRouteAction {
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *HttpRouteHeader) DeepCopyInto(out *HttpRouteHeader) {
*out = *in
if in.Invert != nil {
in, out := &in.Invert, &out.Invert
*out = new(bool)
**out = **in
}
if in.Match != nil {
in, out := &in.Match, &out.Match
*out = new(HeaderMatchMethod)
(*in).DeepCopyInto(*out)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HttpRouteHeader.
func (in *HttpRouteHeader) DeepCopy() *HttpRouteHeader {
if in == nil {
return nil
}
out := new(HttpRouteHeader)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *HttpRouteMatch) DeepCopyInto(out *HttpRouteMatch) {
*out = *in
if in.Method != nil {
in, out := &in.Method, &out.Method
*out = new(string)
**out = **in
}
if in.Headers != nil {
in, out := &in.Headers, &out.Headers
*out = make([]HttpRouteHeader, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.Scheme != nil {
in, out := &in.Scheme, &out.Scheme
*out = new(string)
**out = **in
}
return
}
@@ -169,6 +378,11 @@ func (in *HttpRouteMatch) DeepCopy() *HttpRouteMatch {
func (in *Listener) DeepCopyInto(out *Listener) {
*out = *in
out.PortMapping = in.PortMapping
if in.HealthCheck != nil {
in, out := &in.HealthCheck, &out.HealthCheck
*out = new(HealthCheckPolicy)
(*in).DeepCopyInto(*out)
}
return
}
@@ -203,6 +417,32 @@ func (in *Logging) DeepCopy() *Logging {
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *MatchRange) DeepCopyInto(out *MatchRange) {
*out = *in
if in.Start != nil {
in, out := &in.Start, &out.Start
*out = new(int64)
**out = **in
}
if in.End != nil {
in, out := &in.End, &out.End
*out = new(int64)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MatchRange.
func (in *MatchRange) DeepCopy() *MatchRange {
if in == nil {
return nil
}
out := new(MatchRange)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Mesh) DeepCopyInto(out *Mesh) {
*out = *in
@@ -372,6 +612,11 @@ func (in *Route) DeepCopyInto(out *Route) {
*out = new(TcpRoute)
(*in).DeepCopyInto(*out)
}
if in.Priority != nil {
in, out := &in.Priority, &out.Priority
*out = new(int64)
**out = **in
}
return
}
@@ -391,7 +636,7 @@ func (in *ServiceDiscovery) DeepCopyInto(out *ServiceDiscovery) {
if in.CloudMap != nil {
in, out := &in.CloudMap, &out.CloudMap
*out = new(CloudMapServiceDiscovery)
**out = **in
(*in).DeepCopyInto(*out)
}
if in.Dns != nil {
in, out := &in.Dns, &out.Dns
@@ -546,7 +791,9 @@ func (in *VirtualNodeSpec) DeepCopyInto(out *VirtualNodeSpec) {
if in.Listeners != nil {
in, out := &in.Listeners, &out.Listeners
*out = make([]Listener, len(*in))
copy(*out, *in)
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.ServiceDiscovery != nil {
in, out := &in.ServiceDiscovery, &out.ServiceDiscovery
@@ -589,18 +836,6 @@ func (in *VirtualNodeStatus) DeepCopyInto(out *VirtualNodeStatus) {
*out = new(string)
**out = **in
}
if in.CloudMapServiceArn != nil {
in, out := &in.CloudMapServiceArn, &out.CloudMapServiceArn
*out = new(string)
**out = **in
}
if in.QueryParameters != nil {
in, out := &in.QueryParameters, &out.QueryParameters
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
if in.Conditions != nil {
in, out := &in.Conditions, &out.Conditions
*out = make([]VirtualNodeCondition, len(*in))
@@ -608,6 +843,11 @@ func (in *VirtualNodeStatus) DeepCopyInto(out *VirtualNodeStatus) {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.CloudMapService != nil {
in, out := &in.CloudMapService, &out.CloudMapService
*out = new(CloudMapServiceStatus)
(*in).DeepCopyInto(*out)
}
return
}
@@ -626,7 +866,7 @@ func (in *VirtualRouter) DeepCopyInto(out *VirtualRouter) {
*out = *in
if in.Listeners != nil {
in, out := &in.Listeners, &out.Listeners
*out = make([]Listener, len(*in))
*out = make([]VirtualRouterListener, len(*in))
copy(*out, *in)
}
return
@@ -642,6 +882,23 @@ func (in *VirtualRouter) DeepCopy() *VirtualRouter {
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *VirtualRouterListener) DeepCopyInto(out *VirtualRouterListener) {
*out = *in
out.PortMapping = in.PortMapping
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualRouterListener.
func (in *VirtualRouterListener) DeepCopy() *VirtualRouterListener {
if in == nil {
return nil
}
out := new(VirtualRouterListener)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *VirtualService) DeepCopyInto(out *VirtualService) {
*out = *in

18
pkg/apis/flagger/v1alpha3/types.go Executable file → Normal file
View File

@@ -19,9 +19,11 @@ package v1alpha3
import (
"time"
istiov1alpha3 "github.com/weaveworks/flagger/pkg/apis/istio/v1alpha3"
hpav1 "k8s.io/api/autoscaling/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/intstr"
istiov1alpha3 "github.com/weaveworks/flagger/pkg/apis/istio/v1alpha3"
)
const (
@@ -49,6 +51,10 @@ type CanarySpec struct {
// +optional
Provider string `json:"provider,omitempty"`
// if specified overwrites the -metrics-server flag for this particular canary
// +optional
MetricsServer string `json:"metricsServer,omitempty"`
// reference to target resource
TargetRef hpav1.CrossVersionObjectReference `json:"targetRef"`
@@ -88,10 +94,11 @@ type CanaryList struct {
// CanaryService is used to create ClusterIP services
// and Istio Virtual Service
type CanaryService struct {
Port int32 `json:"port"`
PortName string `json:"portName,omitempty"`
PortDiscovery bool `json:"portDiscovery"`
Timeout string `json:"timeout,omitempty"`
Port int32 `json:"port"`
PortName string `json:"portName,omitempty"`
TargetPort intstr.IntOrString `json:"targetPort,omitempty"`
PortDiscovery bool `json:"portDiscovery"`
Timeout string `json:"timeout,omitempty"`
// Istio
Gateways []string `json:"gateways,omitempty"`
Hosts []string `json:"hosts,omitempty"`
@@ -111,6 +118,7 @@ type CanaryAnalysis struct {
Interval string `json:"interval"`
Threshold int `json:"threshold"`
MaxWeight int `json:"maxWeight"`
Mirror bool `json:"mirror,omitempty"`
StepWeight int `json:"stepWeight"`
Metrics []CanaryMetric `json:"metrics"`
Webhooks []CanaryWebhook `json:"webhooks,omitempty"`

View File

@@ -159,6 +159,7 @@ func (in *CanaryMetric) DeepCopy() *CanaryMetric {
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CanaryService) DeepCopyInto(out *CanaryService) {
*out = *in
out.TargetPort = in.TargetPort
if in.Gateways != nil {
in, out := &in.Gateways, &out.Gateways
*out = make([]string, len(*in))

View File

@@ -0,0 +1,5 @@
package gloo
const (
GroupName = "gloo.solo.io"
)

5
pkg/apis/gloo/v1/doc.go Normal file
View File

@@ -0,0 +1,5 @@
// +k8s:deepcopy-gen=package
// Package v1 is the v1 version of the API.
// +groupName=gloo.solo.io
package v1

36
pkg/apis/gloo/v1/register.go Executable file
View File

@@ -0,0 +1,36 @@
package v1
import (
"github.com/weaveworks/flagger/pkg/apis/gloo"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
)
// SchemeGroupVersion is group version used to register these objects
var SchemeGroupVersion = schema.GroupVersion{Group: gloo.GroupName, Version: "v1"}
// Kind takes an unqualified kind and returns back a Group qualified GroupKind
func Kind(kind string) schema.GroupKind {
return SchemeGroupVersion.WithKind(kind).GroupKind()
}
// Resource takes an unqualified resource and returns a Group qualified GroupResource
func Resource(resource string) schema.GroupResource {
return SchemeGroupVersion.WithResource(resource).GroupResource()
}
var (
SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes)
AddToScheme = SchemeBuilder.AddToScheme
)
// Adds the list of known types to Scheme.
func addKnownTypes(scheme *runtime.Scheme) error {
scheme.AddKnownTypes(SchemeGroupVersion,
&UpstreamGroup{},
&UpstreamGroupList{},
)
metav1.AddToGroupVersion(scheme, SchemeGroupVersion)
return nil
}

49
pkg/apis/gloo/v1/types.go Normal file
View File

@@ -0,0 +1,49 @@
package v1
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// UpstreamGroup is a specification for a Gloo UpstreamGroup resource
type UpstreamGroup struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec UpstreamGroupSpec `json:"spec"`
}
type UpstreamGroupSpec struct {
Destinations []WeightedDestination `json:"destinations,omitempty"`
}
// WeightedDestination attaches a weight to a single destination.
type WeightedDestination struct {
Destination Destination `json:"destination,omitempty"`
// Weight must be greater than zero
// Routing to each destination will be balanced by the ratio of the destination's weight to the total weight on a route
Weight uint32 `json:"weight,omitempty"`
}
// Destinations define routable destinations for proxied requests
type Destination struct {
Upstream ResourceRef `json:"upstream"`
}
// ResourceRef references resources across namespaces
type ResourceRef struct {
Name string `json:"name,omitempty"`
Namespace string `json:"namespace,omitempty"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// UpstreamGroupList is a list of UpstreamGroup resources
type UpstreamGroupList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata"`
Items []UpstreamGroup `json:"items"`
}

View File

@@ -0,0 +1,156 @@
// +build !ignore_autogenerated
/*
Copyright The Flagger Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by deepcopy-gen. DO NOT EDIT.
package v1
import (
runtime "k8s.io/apimachinery/pkg/runtime"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Destination) DeepCopyInto(out *Destination) {
*out = *in
out.Upstream = in.Upstream
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Destination.
func (in *Destination) DeepCopy() *Destination {
if in == nil {
return nil
}
out := new(Destination)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ResourceRef) DeepCopyInto(out *ResourceRef) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceRef.
func (in *ResourceRef) DeepCopy() *ResourceRef {
if in == nil {
return nil
}
out := new(ResourceRef)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *UpstreamGroup) DeepCopyInto(out *UpstreamGroup) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UpstreamGroup.
func (in *UpstreamGroup) DeepCopy() *UpstreamGroup {
if in == nil {
return nil
}
out := new(UpstreamGroup)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *UpstreamGroup) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *UpstreamGroupList) DeepCopyInto(out *UpstreamGroupList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]UpstreamGroup, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UpstreamGroupList.
func (in *UpstreamGroupList) DeepCopy() *UpstreamGroupList {
if in == nil {
return nil
}
out := new(UpstreamGroupList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *UpstreamGroupList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *UpstreamGroupSpec) DeepCopyInto(out *UpstreamGroupSpec) {
*out = *in
if in.Destinations != nil {
in, out := &in.Destinations, &out.Destinations
*out = make([]WeightedDestination, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UpstreamGroupSpec.
func (in *UpstreamGroupSpec) DeepCopy() *UpstreamGroupSpec {
if in == nil {
return nil
}
out := new(UpstreamGroupSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *WeightedDestination) DeepCopyInto(out *WeightedDestination) {
*out = *in
out.Destination = in.Destination
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WeightedDestination.
func (in *WeightedDestination) DeepCopy() *WeightedDestination {
if in == nil {
return nil
}
out := new(WeightedDestination)
in.DeepCopyInto(out)
return out
}

View File

@@ -7,8 +7,6 @@ import (
"github.com/google/go-cmp/cmp"
"github.com/mitchellh/hashstructure"
flaggerv1 "github.com/weaveworks/flagger/pkg/apis/flagger/v1alpha3"
clientset "github.com/weaveworks/flagger/pkg/client/clientset/versioned"
"go.uber.org/zap"
appsv1 "k8s.io/api/apps/v1"
hpav1 "k8s.io/api/autoscaling/v2beta1"
@@ -16,7 +14,11 @@ import (
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/client-go/kubernetes"
flaggerv1 "github.com/weaveworks/flagger/pkg/apis/flagger/v1alpha3"
clientset "github.com/weaveworks/flagger/pkg/client/clientset/versioned"
)
// Deployer is managing the operations for Kubernetes deployment kind
@@ -30,7 +32,7 @@ type Deployer struct {
// Initialize creates the primary deployment, hpa,
// scales to zero the canary deployment and returns the pod selector label and container ports
func (c *Deployer) Initialize(cd *flaggerv1.Canary, skipLivenessChecks bool) (label string, ports *map[string]int32, err error) {
func (c *Deployer) Initialize(cd *flaggerv1.Canary, skipLivenessChecks bool) (label string, ports map[string]int32, err error) {
primaryName := fmt.Sprintf("%s-primary", cd.Spec.TargetRef.Name)
label, ports, err = c.createPrimaryDeployment(cd)
if err != nil {
@@ -183,7 +185,31 @@ func (c *Deployer) Scale(cd *flaggerv1.Canary, replicas int32) error {
return nil
}
func (c *Deployer) createPrimaryDeployment(cd *flaggerv1.Canary) (string, *map[string]int32, error) {
func (c *Deployer) ScaleUp(cd *flaggerv1.Canary) error {
targetName := cd.Spec.TargetRef.Name
dep, err := c.KubeClient.AppsV1().Deployments(cd.Namespace).Get(targetName, metav1.GetOptions{})
if err != nil {
if errors.IsNotFound(err) {
return fmt.Errorf("deployment %s.%s not found", targetName, cd.Namespace)
}
return fmt.Errorf("deployment %s.%s query error %v", targetName, cd.Namespace, err)
}
replicas := int32p(1)
if dep.Spec.Replicas != nil && *dep.Spec.Replicas > 0 {
replicas = dep.Spec.Replicas
}
depCopy := dep.DeepCopy()
depCopy.Spec.Replicas = replicas
_, err = c.KubeClient.AppsV1().Deployments(dep.Namespace).Update(depCopy)
if err != nil {
return fmt.Errorf("scaling %s.%s to %v failed: %v", depCopy.GetName(), depCopy.Namespace, replicas, err)
}
return nil
}
func (c *Deployer) createPrimaryDeployment(cd *flaggerv1.Canary) (string, map[string]int32, error) {
targetName := cd.Spec.TargetRef.Name
primaryName := fmt.Sprintf("%s-primary", cd.Spec.TargetRef.Name)
@@ -201,13 +227,13 @@ func (c *Deployer) createPrimaryDeployment(cd *flaggerv1.Canary) (string, *map[s
targetName, cd.Namespace, targetName)
}
var ports *map[string]int32
var ports map[string]int32
if cd.Spec.Service.PortDiscovery {
p, err := c.getPorts(canaryDep, cd.Spec.Service.Port)
p, err := c.getPorts(cd, canaryDep)
if err != nil {
return "", nil, fmt.Errorf("port discovery failed with error: %v", err)
}
ports = &p
ports = p
}
primaryDep, err := c.KubeClient.AppsV1().Deployments(cd.Namespace).Get(primaryName, metav1.GetOptions{})
@@ -235,6 +261,9 @@ func (c *Deployer) createPrimaryDeployment(cd *flaggerv1.Canary) (string, *map[s
ObjectMeta: metav1.ObjectMeta{
Name: primaryName,
Namespace: cd.Namespace,
Labels: map[string]string{
label: primaryName,
},
OwnerReferences: []metav1.OwnerReference{
*metav1.NewControllerRef(cd, schema.GroupVersionKind{
Group: flaggerv1.SchemeGroupVersion.Group,
@@ -392,7 +421,7 @@ var sidecars = map[string]bool{
}
// getPorts returns a list of all container ports
func (c *Deployer) getPorts(deployment *appsv1.Deployment, canaryPort int32) (map[string]int32, error) {
func (c *Deployer) getPorts(cd *flaggerv1.Canary, deployment *appsv1.Deployment) (map[string]int32, error) {
ports := make(map[string]int32)
for _, container := range deployment.Spec.Template.Spec.Containers {
@@ -401,9 +430,22 @@ func (c *Deployer) getPorts(deployment *appsv1.Deployment, canaryPort int32) (ma
continue
}
for i, p := range container.Ports {
// exclude canary.service.port
if p.ContainerPort == canaryPort {
continue
// exclude canary.service.port or canary.service.targetPort
if cd.Spec.Service.TargetPort.String() == "0" {
if p.ContainerPort == cd.Spec.Service.Port {
continue
}
} else {
if cd.Spec.Service.TargetPort.Type == intstr.Int {
if p.ContainerPort == cd.Spec.Service.TargetPort.IntVal {
continue
}
}
if cd.Spec.Service.TargetPort.Type == intstr.String {
if p.Name == cd.Spec.Service.TargetPort.StrVal {
continue
}
}
}
name := fmt.Sprintf("tcp-%s-%v", container.Name, i)
if p.Name != "" {

View File

@@ -3,8 +3,9 @@ package canary
import (
"testing"
"github.com/weaveworks/flagger/pkg/apis/flagger/v1alpha3"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
flaggerv1 "github.com/weaveworks/flagger/pkg/apis/flagger/v1alpha3"
)
func TestCanaryDeployer_Sync(t *testing.T) {
@@ -229,7 +230,7 @@ func TestCanaryDeployer_SetState(t *testing.T) {
t.Fatal(err.Error())
}
err = mocks.deployer.SetStatusPhase(mocks.canary, v1alpha3.CanaryPhaseProgressing)
err = mocks.deployer.SetStatusPhase(mocks.canary, flaggerv1.CanaryPhaseProgressing)
if err != nil {
t.Fatal(err.Error())
}
@@ -239,8 +240,8 @@ func TestCanaryDeployer_SetState(t *testing.T) {
t.Fatal(err.Error())
}
if res.Status.Phase != v1alpha3.CanaryPhaseProgressing {
t.Errorf("Got %v wanted %v", res.Status.Phase, v1alpha3.CanaryPhaseProgressing)
if res.Status.Phase != flaggerv1.CanaryPhaseProgressing {
t.Errorf("Got %v wanted %v", res.Status.Phase, flaggerv1.CanaryPhaseProgressing)
}
}
@@ -251,8 +252,8 @@ func TestCanaryDeployer_SyncStatus(t *testing.T) {
t.Fatal(err.Error())
}
status := v1alpha3.CanaryStatus{
Phase: v1alpha3.CanaryPhaseProgressing,
status := flaggerv1.CanaryStatus{
Phase: flaggerv1.CanaryPhaseProgressing,
FailedChecks: 2,
}
err = mocks.deployer.SyncStatus(mocks.canary, status)

View File

@@ -1,9 +1,6 @@
package canary
import (
"github.com/weaveworks/flagger/pkg/apis/flagger/v1alpha3"
clientset "github.com/weaveworks/flagger/pkg/client/clientset/versioned"
fakeFlagger "github.com/weaveworks/flagger/pkg/client/clientset/versioned/fake"
"github.com/weaveworks/flagger/pkg/logger"
"go.uber.org/zap"
appsv1 "k8s.io/api/apps/v1"
@@ -13,10 +10,14 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/kubernetes/fake"
flaggerv1 "github.com/weaveworks/flagger/pkg/apis/flagger/v1alpha3"
clientset "github.com/weaveworks/flagger/pkg/client/clientset/versioned"
fakeFlagger "github.com/weaveworks/flagger/pkg/client/clientset/versioned/fake"
)
type Mocks struct {
canary *v1alpha3.Canary
canary *flaggerv1.Canary
kubeClient kubernetes.Interface
flaggerClient clientset.Interface
deployer Deployer
@@ -172,14 +173,14 @@ func NewTestSecretVol() *corev1.Secret {
}
}
func newTestCanary() *v1alpha3.Canary {
cd := &v1alpha3.Canary{
TypeMeta: metav1.TypeMeta{APIVersion: v1alpha3.SchemeGroupVersion.String()},
func newTestCanary() *flaggerv1.Canary {
cd := &flaggerv1.Canary{
TypeMeta: metav1.TypeMeta{APIVersion: flaggerv1.SchemeGroupVersion.String()},
ObjectMeta: metav1.ObjectMeta{
Namespace: "default",
Name: "podinfo",
},
Spec: v1alpha3.CanarySpec{
Spec: flaggerv1.CanarySpec{
TargetRef: hpav1.CrossVersionObjectReference{
Name: "podinfo",
APIVersion: "apps/v1",
@@ -189,13 +190,13 @@ func newTestCanary() *v1alpha3.Canary {
Name: "podinfo",
APIVersion: "autoscaling/v2beta1",
Kind: "HorizontalPodAutoscaler",
}, Service: v1alpha3.CanaryService{
}, Service: flaggerv1.CanaryService{
Port: 9898,
}, CanaryAnalysis: v1alpha3.CanaryAnalysis{
}, CanaryAnalysis: flaggerv1.CanaryAnalysis{
Threshold: 10,
StepWeight: 10,
MaxWeight: 50,
Metrics: []v1alpha3.CanaryMetric{
Metrics: []flaggerv1.CanaryMetric{
{
Name: "istio_requests_total",
Threshold: 99,

View File

@@ -4,10 +4,11 @@ import (
"fmt"
"time"
flaggerv1 "github.com/weaveworks/flagger/pkg/apis/flagger/v1alpha3"
appsv1 "k8s.io/api/apps/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
flaggerv1 "github.com/weaveworks/flagger/pkg/apis/flagger/v1alpha3"
)
// IsPrimaryReady checks the primary deployment status and returns an error if

View File

@@ -2,14 +2,15 @@ package canary
import (
"fmt"
"k8s.io/client-go/util/retry"
"github.com/mitchellh/hashstructure"
ex "github.com/pkg/errors"
flaggerv1 "github.com/weaveworks/flagger/pkg/apis/flagger/v1alpha3"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/util/retry"
flaggerv1 "github.com/weaveworks/flagger/pkg/apis/flagger/v1alpha3"
)
// SyncStatus encodes the canary pod spec and updates the canary status

View File

@@ -4,14 +4,16 @@ import (
"crypto/sha256"
"encoding/json"
"fmt"
flaggerv1 "github.com/weaveworks/flagger/pkg/apis/flagger/v1alpha3"
clientset "github.com/weaveworks/flagger/pkg/client/clientset/versioned"
"go.uber.org/zap"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/client-go/kubernetes"
flaggerv1 "github.com/weaveworks/flagger/pkg/apis/flagger/v1alpha3"
clientset "github.com/weaveworks/flagger/pkg/client/clientset/versioned"
)
// ConfigTracker is managing the operations for Kubernetes ConfigMaps and Secrets

View File

@@ -21,6 +21,7 @@ package versioned
import (
appmeshv1beta1 "github.com/weaveworks/flagger/pkg/client/clientset/versioned/typed/appmesh/v1beta1"
flaggerv1alpha3 "github.com/weaveworks/flagger/pkg/client/clientset/versioned/typed/flagger/v1alpha3"
gloov1 "github.com/weaveworks/flagger/pkg/client/clientset/versioned/typed/gloo/v1"
networkingv1alpha3 "github.com/weaveworks/flagger/pkg/client/clientset/versioned/typed/istio/v1alpha3"
splitv1alpha1 "github.com/weaveworks/flagger/pkg/client/clientset/versioned/typed/smi/v1alpha1"
discovery "k8s.io/client-go/discovery"
@@ -32,6 +33,7 @@ type Interface interface {
Discovery() discovery.DiscoveryInterface
AppmeshV1beta1() appmeshv1beta1.AppmeshV1beta1Interface
FlaggerV1alpha3() flaggerv1alpha3.FlaggerV1alpha3Interface
GlooV1() gloov1.GlooV1Interface
NetworkingV1alpha3() networkingv1alpha3.NetworkingV1alpha3Interface
SplitV1alpha1() splitv1alpha1.SplitV1alpha1Interface
}
@@ -42,6 +44,7 @@ type Clientset struct {
*discovery.DiscoveryClient
appmeshV1beta1 *appmeshv1beta1.AppmeshV1beta1Client
flaggerV1alpha3 *flaggerv1alpha3.FlaggerV1alpha3Client
glooV1 *gloov1.GlooV1Client
networkingV1alpha3 *networkingv1alpha3.NetworkingV1alpha3Client
splitV1alpha1 *splitv1alpha1.SplitV1alpha1Client
}
@@ -56,6 +59,11 @@ func (c *Clientset) FlaggerV1alpha3() flaggerv1alpha3.FlaggerV1alpha3Interface {
return c.flaggerV1alpha3
}
// GlooV1 retrieves the GlooV1Client
func (c *Clientset) GlooV1() gloov1.GlooV1Interface {
return c.glooV1
}
// NetworkingV1alpha3 retrieves the NetworkingV1alpha3Client
func (c *Clientset) NetworkingV1alpha3() networkingv1alpha3.NetworkingV1alpha3Interface {
return c.networkingV1alpha3
@@ -90,6 +98,10 @@ func NewForConfig(c *rest.Config) (*Clientset, error) {
if err != nil {
return nil, err
}
cs.glooV1, err = gloov1.NewForConfig(&configShallowCopy)
if err != nil {
return nil, err
}
cs.networkingV1alpha3, err = networkingv1alpha3.NewForConfig(&configShallowCopy)
if err != nil {
return nil, err
@@ -112,6 +124,7 @@ func NewForConfigOrDie(c *rest.Config) *Clientset {
var cs Clientset
cs.appmeshV1beta1 = appmeshv1beta1.NewForConfigOrDie(c)
cs.flaggerV1alpha3 = flaggerv1alpha3.NewForConfigOrDie(c)
cs.glooV1 = gloov1.NewForConfigOrDie(c)
cs.networkingV1alpha3 = networkingv1alpha3.NewForConfigOrDie(c)
cs.splitV1alpha1 = splitv1alpha1.NewForConfigOrDie(c)
@@ -124,6 +137,7 @@ func New(c rest.Interface) *Clientset {
var cs Clientset
cs.appmeshV1beta1 = appmeshv1beta1.New(c)
cs.flaggerV1alpha3 = flaggerv1alpha3.New(c)
cs.glooV1 = gloov1.New(c)
cs.networkingV1alpha3 = networkingv1alpha3.New(c)
cs.splitV1alpha1 = splitv1alpha1.New(c)

View File

@@ -24,6 +24,8 @@ import (
fakeappmeshv1beta1 "github.com/weaveworks/flagger/pkg/client/clientset/versioned/typed/appmesh/v1beta1/fake"
flaggerv1alpha3 "github.com/weaveworks/flagger/pkg/client/clientset/versioned/typed/flagger/v1alpha3"
fakeflaggerv1alpha3 "github.com/weaveworks/flagger/pkg/client/clientset/versioned/typed/flagger/v1alpha3/fake"
gloov1 "github.com/weaveworks/flagger/pkg/client/clientset/versioned/typed/gloo/v1"
fakegloov1 "github.com/weaveworks/flagger/pkg/client/clientset/versioned/typed/gloo/v1/fake"
networkingv1alpha3 "github.com/weaveworks/flagger/pkg/client/clientset/versioned/typed/istio/v1alpha3"
fakenetworkingv1alpha3 "github.com/weaveworks/flagger/pkg/client/clientset/versioned/typed/istio/v1alpha3/fake"
splitv1alpha1 "github.com/weaveworks/flagger/pkg/client/clientset/versioned/typed/smi/v1alpha1"
@@ -92,6 +94,11 @@ func (c *Clientset) FlaggerV1alpha3() flaggerv1alpha3.FlaggerV1alpha3Interface {
return &fakeflaggerv1alpha3.FakeFlaggerV1alpha3{Fake: &c.Fake}
}
// GlooV1 retrieves the GlooV1Client
func (c *Clientset) GlooV1() gloov1.GlooV1Interface {
return &fakegloov1.FakeGlooV1{Fake: &c.Fake}
}
// NetworkingV1alpha3 retrieves the NetworkingV1alpha3Client
func (c *Clientset) NetworkingV1alpha3() networkingv1alpha3.NetworkingV1alpha3Interface {
return &fakenetworkingv1alpha3.FakeNetworkingV1alpha3{Fake: &c.Fake}

View File

@@ -21,6 +21,7 @@ package fake
import (
appmeshv1beta1 "github.com/weaveworks/flagger/pkg/apis/appmesh/v1beta1"
flaggerv1alpha3 "github.com/weaveworks/flagger/pkg/apis/flagger/v1alpha3"
gloov1 "github.com/weaveworks/flagger/pkg/apis/gloo/v1"
networkingv1alpha3 "github.com/weaveworks/flagger/pkg/apis/istio/v1alpha3"
splitv1alpha1 "github.com/weaveworks/flagger/pkg/apis/smi/v1alpha1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@@ -36,6 +37,7 @@ var parameterCodec = runtime.NewParameterCodec(scheme)
var localSchemeBuilder = runtime.SchemeBuilder{
appmeshv1beta1.AddToScheme,
flaggerv1alpha3.AddToScheme,
gloov1.AddToScheme,
networkingv1alpha3.AddToScheme,
splitv1alpha1.AddToScheme,
}

View File

@@ -21,6 +21,7 @@ package scheme
import (
appmeshv1beta1 "github.com/weaveworks/flagger/pkg/apis/appmesh/v1beta1"
flaggerv1alpha3 "github.com/weaveworks/flagger/pkg/apis/flagger/v1alpha3"
gloov1 "github.com/weaveworks/flagger/pkg/apis/gloo/v1"
networkingv1alpha3 "github.com/weaveworks/flagger/pkg/apis/istio/v1alpha3"
splitv1alpha1 "github.com/weaveworks/flagger/pkg/apis/smi/v1alpha1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@@ -36,6 +37,7 @@ var ParameterCodec = runtime.NewParameterCodec(Scheme)
var localSchemeBuilder = runtime.SchemeBuilder{
appmeshv1beta1.AddToScheme,
flaggerv1alpha3.AddToScheme,
gloov1.AddToScheme,
networkingv1alpha3.AddToScheme,
splitv1alpha1.AddToScheme,
}

View File

@@ -0,0 +1,20 @@
/*
Copyright The Flagger Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by client-gen. DO NOT EDIT.
// This package has the automatically generated typed clients.
package v1

View File

@@ -0,0 +1,20 @@
/*
Copyright The Flagger Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by client-gen. DO NOT EDIT.
// Package fake has the automatically generated clients.
package fake

View File

@@ -0,0 +1,40 @@
/*
Copyright The Flagger Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by client-gen. DO NOT EDIT.
package fake
import (
v1 "github.com/weaveworks/flagger/pkg/client/clientset/versioned/typed/gloo/v1"
rest "k8s.io/client-go/rest"
testing "k8s.io/client-go/testing"
)
type FakeGlooV1 struct {
*testing.Fake
}
func (c *FakeGlooV1) UpstreamGroups(namespace string) v1.UpstreamGroupInterface {
return &FakeUpstreamGroups{c, namespace}
}
// RESTClient returns a RESTClient that is used to communicate
// with API server by this client implementation.
func (c *FakeGlooV1) RESTClient() rest.Interface {
var ret *rest.RESTClient
return ret
}

View File

@@ -0,0 +1,128 @@
/*
Copyright The Flagger Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by client-gen. DO NOT EDIT.
package fake
import (
gloov1 "github.com/weaveworks/flagger/pkg/apis/gloo/v1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
labels "k8s.io/apimachinery/pkg/labels"
schema "k8s.io/apimachinery/pkg/runtime/schema"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
testing "k8s.io/client-go/testing"
)
// FakeUpstreamGroups implements UpstreamGroupInterface
type FakeUpstreamGroups struct {
Fake *FakeGlooV1
ns string
}
var upstreamgroupsResource = schema.GroupVersionResource{Group: "gloo.solo.io", Version: "v1", Resource: "upstreamgroups"}
var upstreamgroupsKind = schema.GroupVersionKind{Group: "gloo.solo.io", Version: "v1", Kind: "UpstreamGroup"}
// Get takes name of the upstreamGroup, and returns the corresponding upstreamGroup object, and an error if there is any.
func (c *FakeUpstreamGroups) Get(name string, options v1.GetOptions) (result *gloov1.UpstreamGroup, err error) {
obj, err := c.Fake.
Invokes(testing.NewGetAction(upstreamgroupsResource, c.ns, name), &gloov1.UpstreamGroup{})
if obj == nil {
return nil, err
}
return obj.(*gloov1.UpstreamGroup), err
}
// List takes label and field selectors, and returns the list of UpstreamGroups that match those selectors.
func (c *FakeUpstreamGroups) List(opts v1.ListOptions) (result *gloov1.UpstreamGroupList, err error) {
obj, err := c.Fake.
Invokes(testing.NewListAction(upstreamgroupsResource, upstreamgroupsKind, c.ns, opts), &gloov1.UpstreamGroupList{})
if obj == nil {
return nil, err
}
label, _, _ := testing.ExtractFromListOptions(opts)
if label == nil {
label = labels.Everything()
}
list := &gloov1.UpstreamGroupList{ListMeta: obj.(*gloov1.UpstreamGroupList).ListMeta}
for _, item := range obj.(*gloov1.UpstreamGroupList).Items {
if label.Matches(labels.Set(item.Labels)) {
list.Items = append(list.Items, item)
}
}
return list, err
}
// Watch returns a watch.Interface that watches the requested upstreamGroups.
func (c *FakeUpstreamGroups) Watch(opts v1.ListOptions) (watch.Interface, error) {
return c.Fake.
InvokesWatch(testing.NewWatchAction(upstreamgroupsResource, c.ns, opts))
}
// Create takes the representation of a upstreamGroup and creates it. Returns the server's representation of the upstreamGroup, and an error, if there is any.
func (c *FakeUpstreamGroups) Create(upstreamGroup *gloov1.UpstreamGroup) (result *gloov1.UpstreamGroup, err error) {
obj, err := c.Fake.
Invokes(testing.NewCreateAction(upstreamgroupsResource, c.ns, upstreamGroup), &gloov1.UpstreamGroup{})
if obj == nil {
return nil, err
}
return obj.(*gloov1.UpstreamGroup), err
}
// Update takes the representation of a upstreamGroup and updates it. Returns the server's representation of the upstreamGroup, and an error, if there is any.
func (c *FakeUpstreamGroups) Update(upstreamGroup *gloov1.UpstreamGroup) (result *gloov1.UpstreamGroup, err error) {
obj, err := c.Fake.
Invokes(testing.NewUpdateAction(upstreamgroupsResource, c.ns, upstreamGroup), &gloov1.UpstreamGroup{})
if obj == nil {
return nil, err
}
return obj.(*gloov1.UpstreamGroup), err
}
// Delete takes name of the upstreamGroup and deletes it. Returns an error if one occurs.
func (c *FakeUpstreamGroups) Delete(name string, options *v1.DeleteOptions) error {
_, err := c.Fake.
Invokes(testing.NewDeleteAction(upstreamgroupsResource, c.ns, name), &gloov1.UpstreamGroup{})
return err
}
// DeleteCollection deletes a collection of objects.
func (c *FakeUpstreamGroups) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
action := testing.NewDeleteCollectionAction(upstreamgroupsResource, c.ns, listOptions)
_, err := c.Fake.Invokes(action, &gloov1.UpstreamGroupList{})
return err
}
// Patch applies the patch and returns the patched upstreamGroup.
func (c *FakeUpstreamGroups) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *gloov1.UpstreamGroup, err error) {
obj, err := c.Fake.
Invokes(testing.NewPatchSubresourceAction(upstreamgroupsResource, c.ns, name, pt, data, subresources...), &gloov1.UpstreamGroup{})
if obj == nil {
return nil, err
}
return obj.(*gloov1.UpstreamGroup), err
}

View File

@@ -0,0 +1,21 @@
/*
Copyright The Flagger Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by client-gen. DO NOT EDIT.
package v1
type UpstreamGroupExpansion interface{}

View File

@@ -0,0 +1,89 @@
/*
Copyright The Flagger Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by client-gen. DO NOT EDIT.
package v1
import (
v1 "github.com/weaveworks/flagger/pkg/apis/gloo/v1"
"github.com/weaveworks/flagger/pkg/client/clientset/versioned/scheme"
rest "k8s.io/client-go/rest"
)
type GlooV1Interface interface {
RESTClient() rest.Interface
UpstreamGroupsGetter
}
// GlooV1Client is used to interact with features provided by the gloo.solo.io group.
type GlooV1Client struct {
restClient rest.Interface
}
func (c *GlooV1Client) UpstreamGroups(namespace string) UpstreamGroupInterface {
return newUpstreamGroups(c, namespace)
}
// NewForConfig creates a new GlooV1Client for the given config.
func NewForConfig(c *rest.Config) (*GlooV1Client, error) {
config := *c
if err := setConfigDefaults(&config); err != nil {
return nil, err
}
client, err := rest.RESTClientFor(&config)
if err != nil {
return nil, err
}
return &GlooV1Client{client}, nil
}
// NewForConfigOrDie creates a new GlooV1Client for the given config and
// panics if there is an error in the config.
func NewForConfigOrDie(c *rest.Config) *GlooV1Client {
client, err := NewForConfig(c)
if err != nil {
panic(err)
}
return client
}
// New creates a new GlooV1Client for the given RESTClient.
func New(c rest.Interface) *GlooV1Client {
return &GlooV1Client{c}
}
func setConfigDefaults(config *rest.Config) error {
gv := v1.SchemeGroupVersion
config.GroupVersion = &gv
config.APIPath = "/apis"
config.NegotiatedSerializer = scheme.Codecs.WithoutConversion()
if config.UserAgent == "" {
config.UserAgent = rest.DefaultKubernetesUserAgent()
}
return nil
}
// RESTClient returns a RESTClient that is used to communicate
// with API server by this client implementation.
func (c *GlooV1Client) RESTClient() rest.Interface {
if c == nil {
return nil
}
return c.restClient
}

View File

@@ -0,0 +1,174 @@
/*
Copyright The Flagger Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by client-gen. DO NOT EDIT.
package v1
import (
"time"
v1 "github.com/weaveworks/flagger/pkg/apis/gloo/v1"
scheme "github.com/weaveworks/flagger/pkg/client/clientset/versioned/scheme"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
rest "k8s.io/client-go/rest"
)
// UpstreamGroupsGetter has a method to return a UpstreamGroupInterface.
// A group's client should implement this interface.
type UpstreamGroupsGetter interface {
UpstreamGroups(namespace string) UpstreamGroupInterface
}
// UpstreamGroupInterface has methods to work with UpstreamGroup resources.
type UpstreamGroupInterface interface {
Create(*v1.UpstreamGroup) (*v1.UpstreamGroup, error)
Update(*v1.UpstreamGroup) (*v1.UpstreamGroup, error)
Delete(name string, options *metav1.DeleteOptions) error
DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error
Get(name string, options metav1.GetOptions) (*v1.UpstreamGroup, error)
List(opts metav1.ListOptions) (*v1.UpstreamGroupList, error)
Watch(opts metav1.ListOptions) (watch.Interface, error)
Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.UpstreamGroup, err error)
UpstreamGroupExpansion
}
// upstreamGroups implements UpstreamGroupInterface
type upstreamGroups struct {
client rest.Interface
ns string
}
// newUpstreamGroups returns a UpstreamGroups
func newUpstreamGroups(c *GlooV1Client, namespace string) *upstreamGroups {
return &upstreamGroups{
client: c.RESTClient(),
ns: namespace,
}
}
// Get takes name of the upstreamGroup, and returns the corresponding upstreamGroup object, and an error if there is any.
func (c *upstreamGroups) Get(name string, options metav1.GetOptions) (result *v1.UpstreamGroup, err error) {
result = &v1.UpstreamGroup{}
err = c.client.Get().
Namespace(c.ns).
Resource("upstreamgroups").
Name(name).
VersionedParams(&options, scheme.ParameterCodec).
Do().
Into(result)
return
}
// List takes label and field selectors, and returns the list of UpstreamGroups that match those selectors.
func (c *upstreamGroups) List(opts metav1.ListOptions) (result *v1.UpstreamGroupList, err error) {
var timeout time.Duration
if opts.TimeoutSeconds != nil {
timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
}
result = &v1.UpstreamGroupList{}
err = c.client.Get().
Namespace(c.ns).
Resource("upstreamgroups").
VersionedParams(&opts, scheme.ParameterCodec).
Timeout(timeout).
Do().
Into(result)
return
}
// Watch returns a watch.Interface that watches the requested upstreamGroups.
func (c *upstreamGroups) Watch(opts metav1.ListOptions) (watch.Interface, error) {
var timeout time.Duration
if opts.TimeoutSeconds != nil {
timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
}
opts.Watch = true
return c.client.Get().
Namespace(c.ns).
Resource("upstreamgroups").
VersionedParams(&opts, scheme.ParameterCodec).
Timeout(timeout).
Watch()
}
// Create takes the representation of a upstreamGroup and creates it. Returns the server's representation of the upstreamGroup, and an error, if there is any.
func (c *upstreamGroups) Create(upstreamGroup *v1.UpstreamGroup) (result *v1.UpstreamGroup, err error) {
result = &v1.UpstreamGroup{}
err = c.client.Post().
Namespace(c.ns).
Resource("upstreamgroups").
Body(upstreamGroup).
Do().
Into(result)
return
}
// Update takes the representation of a upstreamGroup and updates it. Returns the server's representation of the upstreamGroup, and an error, if there is any.
func (c *upstreamGroups) Update(upstreamGroup *v1.UpstreamGroup) (result *v1.UpstreamGroup, err error) {
result = &v1.UpstreamGroup{}
err = c.client.Put().
Namespace(c.ns).
Resource("upstreamgroups").
Name(upstreamGroup.Name).
Body(upstreamGroup).
Do().
Into(result)
return
}
// Delete takes name of the upstreamGroup and deletes it. Returns an error if one occurs.
func (c *upstreamGroups) Delete(name string, options *metav1.DeleteOptions) error {
return c.client.Delete().
Namespace(c.ns).
Resource("upstreamgroups").
Name(name).
Body(options).
Do().
Error()
}
// DeleteCollection deletes a collection of objects.
func (c *upstreamGroups) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error {
var timeout time.Duration
if listOptions.TimeoutSeconds != nil {
timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second
}
return c.client.Delete().
Namespace(c.ns).
Resource("upstreamgroups").
VersionedParams(&listOptions, scheme.ParameterCodec).
Timeout(timeout).
Body(options).
Do().
Error()
}
// Patch applies the patch and returns the patched upstreamGroup.
func (c *upstreamGroups) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.UpstreamGroup, err error) {
result = &v1.UpstreamGroup{}
err = c.client.Patch(pt).
Namespace(c.ns).
Resource("upstreamgroups").
SubResource(subresources...).
Name(name).
Body(data).
Do().
Into(result)
return
}

View File

@@ -26,6 +26,7 @@ import (
versioned "github.com/weaveworks/flagger/pkg/client/clientset/versioned"
appmesh "github.com/weaveworks/flagger/pkg/client/informers/externalversions/appmesh"
flagger "github.com/weaveworks/flagger/pkg/client/informers/externalversions/flagger"
gloo "github.com/weaveworks/flagger/pkg/client/informers/externalversions/gloo"
internalinterfaces "github.com/weaveworks/flagger/pkg/client/informers/externalversions/internalinterfaces"
istio "github.com/weaveworks/flagger/pkg/client/informers/externalversions/istio"
smi "github.com/weaveworks/flagger/pkg/client/informers/externalversions/smi"
@@ -177,6 +178,7 @@ type SharedInformerFactory interface {
Appmesh() appmesh.Interface
Flagger() flagger.Interface
Gloo() gloo.Interface
Networking() istio.Interface
Split() smi.Interface
}
@@ -189,6 +191,10 @@ func (f *sharedInformerFactory) Flagger() flagger.Interface {
return flagger.New(f, f.namespace, f.tweakListOptions)
}
func (f *sharedInformerFactory) Gloo() gloo.Interface {
return gloo.New(f, f.namespace, f.tweakListOptions)
}
func (f *sharedInformerFactory) Networking() istio.Interface {
return istio.New(f, f.namespace, f.tweakListOptions)
}

View File

@@ -23,6 +23,7 @@ import (
v1beta1 "github.com/weaveworks/flagger/pkg/apis/appmesh/v1beta1"
v1alpha3 "github.com/weaveworks/flagger/pkg/apis/flagger/v1alpha3"
v1 "github.com/weaveworks/flagger/pkg/apis/gloo/v1"
istiov1alpha3 "github.com/weaveworks/flagger/pkg/apis/istio/v1alpha3"
v1alpha1 "github.com/weaveworks/flagger/pkg/apis/smi/v1alpha1"
schema "k8s.io/apimachinery/pkg/runtime/schema"
@@ -67,6 +68,10 @@ func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource
case v1alpha3.SchemeGroupVersion.WithResource("canaries"):
return &genericInformer{resource: resource.GroupResource(), informer: f.Flagger().V1alpha3().Canaries().Informer()}, nil
// Group=gloo.solo.io, Version=v1
case v1.SchemeGroupVersion.WithResource("upstreamgroups"):
return &genericInformer{resource: resource.GroupResource(), informer: f.Gloo().V1().UpstreamGroups().Informer()}, nil
// Group=networking.istio.io, Version=v1alpha3
case istiov1alpha3.SchemeGroupVersion.WithResource("destinationrules"):
return &genericInformer{resource: resource.GroupResource(), informer: f.Networking().V1alpha3().DestinationRules().Informer()}, nil

View File

@@ -0,0 +1,46 @@
/*
Copyright The Flagger Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by informer-gen. DO NOT EDIT.
package gloo
import (
v1 "github.com/weaveworks/flagger/pkg/client/informers/externalversions/gloo/v1"
internalinterfaces "github.com/weaveworks/flagger/pkg/client/informers/externalversions/internalinterfaces"
)
// Interface provides access to each of this group's versions.
type Interface interface {
// V1 provides access to shared informers for resources in V1.
V1() v1.Interface
}
type group struct {
factory internalinterfaces.SharedInformerFactory
namespace string
tweakListOptions internalinterfaces.TweakListOptionsFunc
}
// New returns a new Interface.
func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface {
return &group{factory: f, namespace: namespace, tweakListOptions: tweakListOptions}
}
// V1 returns a new v1.Interface.
func (g *group) V1() v1.Interface {
return v1.New(g.factory, g.namespace, g.tweakListOptions)
}

View File

@@ -0,0 +1,45 @@
/*
Copyright The Flagger Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by informer-gen. DO NOT EDIT.
package v1
import (
internalinterfaces "github.com/weaveworks/flagger/pkg/client/informers/externalversions/internalinterfaces"
)
// Interface provides access to all the informers in this group version.
type Interface interface {
// UpstreamGroups returns a UpstreamGroupInformer.
UpstreamGroups() UpstreamGroupInformer
}
type version struct {
factory internalinterfaces.SharedInformerFactory
namespace string
tweakListOptions internalinterfaces.TweakListOptionsFunc
}
// New returns a new Interface.
func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface {
return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions}
}
// UpstreamGroups returns a UpstreamGroupInformer.
func (v *version) UpstreamGroups() UpstreamGroupInformer {
return &upstreamGroupInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions}
}

View File

@@ -0,0 +1,89 @@
/*
Copyright The Flagger Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by informer-gen. DO NOT EDIT.
package v1
import (
time "time"
gloov1 "github.com/weaveworks/flagger/pkg/apis/gloo/v1"
versioned "github.com/weaveworks/flagger/pkg/client/clientset/versioned"
internalinterfaces "github.com/weaveworks/flagger/pkg/client/informers/externalversions/internalinterfaces"
v1 "github.com/weaveworks/flagger/pkg/client/listers/gloo/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
watch "k8s.io/apimachinery/pkg/watch"
cache "k8s.io/client-go/tools/cache"
)
// UpstreamGroupInformer provides access to a shared informer and lister for
// UpstreamGroups.
type UpstreamGroupInformer interface {
Informer() cache.SharedIndexInformer
Lister() v1.UpstreamGroupLister
}
type upstreamGroupInformer struct {
factory internalinterfaces.SharedInformerFactory
tweakListOptions internalinterfaces.TweakListOptionsFunc
namespace string
}
// NewUpstreamGroupInformer constructs a new informer for UpstreamGroup type.
// Always prefer using an informer factory to get a shared informer instead of getting an independent
// one. This reduces memory footprint and number of connections to the server.
func NewUpstreamGroupInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
return NewFilteredUpstreamGroupInformer(client, namespace, resyncPeriod, indexers, nil)
}
// NewFilteredUpstreamGroupInformer constructs a new informer for UpstreamGroup type.
// Always prefer using an informer factory to get a shared informer instead of getting an independent
// one. This reduces memory footprint and number of connections to the server.
func NewFilteredUpstreamGroupInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
return cache.NewSharedIndexInformer(
&cache.ListWatch{
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
}
return client.GlooV1().UpstreamGroups(namespace).List(options)
},
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
}
return client.GlooV1().UpstreamGroups(namespace).Watch(options)
},
},
&gloov1.UpstreamGroup{},
resyncPeriod,
indexers,
)
}
func (f *upstreamGroupInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
return NewFilteredUpstreamGroupInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
}
func (f *upstreamGroupInformer) Informer() cache.SharedIndexInformer {
return f.factory.InformerFor(&gloov1.UpstreamGroup{}, f.defaultInformer)
}
func (f *upstreamGroupInformer) Lister() v1.UpstreamGroupLister {
return v1.NewUpstreamGroupLister(f.Informer().GetIndexer())
}

View File

@@ -0,0 +1,27 @@
/*
Copyright The Flagger Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by lister-gen. DO NOT EDIT.
package v1
// UpstreamGroupListerExpansion allows custom methods to be added to
// UpstreamGroupLister.
type UpstreamGroupListerExpansion interface{}
// UpstreamGroupNamespaceListerExpansion allows custom methods to be added to
// UpstreamGroupNamespaceLister.
type UpstreamGroupNamespaceListerExpansion interface{}

View File

@@ -0,0 +1,94 @@
/*
Copyright The Flagger Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by lister-gen. DO NOT EDIT.
package v1
import (
v1 "github.com/weaveworks/flagger/pkg/apis/gloo/v1"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/client-go/tools/cache"
)
// UpstreamGroupLister helps list UpstreamGroups.
type UpstreamGroupLister interface {
// List lists all UpstreamGroups in the indexer.
List(selector labels.Selector) (ret []*v1.UpstreamGroup, err error)
// UpstreamGroups returns an object that can list and get UpstreamGroups.
UpstreamGroups(namespace string) UpstreamGroupNamespaceLister
UpstreamGroupListerExpansion
}
// upstreamGroupLister implements the UpstreamGroupLister interface.
type upstreamGroupLister struct {
indexer cache.Indexer
}
// NewUpstreamGroupLister returns a new UpstreamGroupLister.
func NewUpstreamGroupLister(indexer cache.Indexer) UpstreamGroupLister {
return &upstreamGroupLister{indexer: indexer}
}
// List lists all UpstreamGroups in the indexer.
func (s *upstreamGroupLister) List(selector labels.Selector) (ret []*v1.UpstreamGroup, err error) {
err = cache.ListAll(s.indexer, selector, func(m interface{}) {
ret = append(ret, m.(*v1.UpstreamGroup))
})
return ret, err
}
// UpstreamGroups returns an object that can list and get UpstreamGroups.
func (s *upstreamGroupLister) UpstreamGroups(namespace string) UpstreamGroupNamespaceLister {
return upstreamGroupNamespaceLister{indexer: s.indexer, namespace: namespace}
}
// UpstreamGroupNamespaceLister helps list and get UpstreamGroups.
type UpstreamGroupNamespaceLister interface {
// List lists all UpstreamGroups in the indexer for a given namespace.
List(selector labels.Selector) (ret []*v1.UpstreamGroup, err error)
// Get retrieves the UpstreamGroup from the indexer for a given namespace and name.
Get(name string) (*v1.UpstreamGroup, error)
UpstreamGroupNamespaceListerExpansion
}
// upstreamGroupNamespaceLister implements the UpstreamGroupNamespaceLister
// interface.
type upstreamGroupNamespaceLister struct {
indexer cache.Indexer
namespace string
}
// List lists all UpstreamGroups in the indexer for a given namespace.
func (s upstreamGroupNamespaceLister) List(selector labels.Selector) (ret []*v1.UpstreamGroup, err error) {
err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) {
ret = append(ret, m.(*v1.UpstreamGroup))
})
return ret, err
}
// Get retrieves the UpstreamGroup from the indexer for a given namespace and name.
func (s upstreamGroupNamespaceLister) Get(name string) (*v1.UpstreamGroup, error) {
obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name)
if err != nil {
return nil, err
}
if !exists {
return nil, errors.NewNotFound(v1.Resource("upstreamgroup"), name)
}
return obj.(*v1.UpstreamGroup), nil
}

View File

@@ -5,16 +5,6 @@ import (
"sync"
"time"
flaggerv1 "github.com/weaveworks/flagger/pkg/apis/flagger/v1alpha3"
"github.com/weaveworks/flagger/pkg/canary"
clientset "github.com/weaveworks/flagger/pkg/client/clientset/versioned"
flaggerscheme "github.com/weaveworks/flagger/pkg/client/clientset/versioned/scheme"
flaggerinformers "github.com/weaveworks/flagger/pkg/client/informers/externalversions/flagger/v1alpha3"
flaggerlisters "github.com/weaveworks/flagger/pkg/client/listers/flagger/v1alpha3"
"github.com/weaveworks/flagger/pkg/metrics"
"github.com/weaveworks/flagger/pkg/notifier"
"github.com/weaveworks/flagger/pkg/router"
"github.com/google/go-cmp/cmp"
"go.uber.org/zap"
corev1 "k8s.io/api/core/v1"
@@ -28,6 +18,16 @@ import (
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/record"
"k8s.io/client-go/util/workqueue"
flaggerv1 "github.com/weaveworks/flagger/pkg/apis/flagger/v1alpha3"
"github.com/weaveworks/flagger/pkg/canary"
clientset "github.com/weaveworks/flagger/pkg/client/clientset/versioned"
flaggerscheme "github.com/weaveworks/flagger/pkg/client/clientset/versioned/scheme"
flaggerinformers "github.com/weaveworks/flagger/pkg/client/informers/externalversions/flagger/v1alpha3"
flaggerlisters "github.com/weaveworks/flagger/pkg/client/listers/flagger/v1alpha3"
"github.com/weaveworks/flagger/pkg/metrics"
"github.com/weaveworks/flagger/pkg/notifier"
"github.com/weaveworks/flagger/pkg/router"
)
const controllerAgentName = "flagger"

View File

@@ -4,16 +4,6 @@ import (
"sync"
"time"
"github.com/weaveworks/flagger/pkg/apis/flagger/v1alpha3"
istiov1alpha1 "github.com/weaveworks/flagger/pkg/apis/istio/common/v1alpha1"
istiov1alpha3 "github.com/weaveworks/flagger/pkg/apis/istio/v1alpha3"
"github.com/weaveworks/flagger/pkg/canary"
clientset "github.com/weaveworks/flagger/pkg/client/clientset/versioned"
fakeFlagger "github.com/weaveworks/flagger/pkg/client/clientset/versioned/fake"
informers "github.com/weaveworks/flagger/pkg/client/informers/externalversions"
"github.com/weaveworks/flagger/pkg/logger"
"github.com/weaveworks/flagger/pkg/metrics"
"github.com/weaveworks/flagger/pkg/router"
"go.uber.org/zap"
appsv1 "k8s.io/api/apps/v1"
hpav1 "k8s.io/api/autoscaling/v1"
@@ -24,6 +14,17 @@ import (
"k8s.io/client-go/kubernetes/fake"
"k8s.io/client-go/tools/record"
"k8s.io/client-go/util/workqueue"
flaggerv1 "github.com/weaveworks/flagger/pkg/apis/flagger/v1alpha3"
istiov1alpha1 "github.com/weaveworks/flagger/pkg/apis/istio/common/v1alpha1"
istiov1alpha3 "github.com/weaveworks/flagger/pkg/apis/istio/v1alpha3"
"github.com/weaveworks/flagger/pkg/canary"
clientset "github.com/weaveworks/flagger/pkg/client/clientset/versioned"
fakeFlagger "github.com/weaveworks/flagger/pkg/client/clientset/versioned/fake"
informers "github.com/weaveworks/flagger/pkg/client/informers/externalversions"
"github.com/weaveworks/flagger/pkg/logger"
"github.com/weaveworks/flagger/pkg/metrics"
"github.com/weaveworks/flagger/pkg/router"
)
var (
@@ -32,7 +33,7 @@ var (
)
type Mocks struct {
canary *v1alpha3.Canary
canary *flaggerv1.Canary
kubeClient kubernetes.Interface
meshClient clientset.Interface
flaggerClient clientset.Interface
@@ -42,11 +43,9 @@ type Mocks struct {
router router.Interface
}
func SetupMocks(abtest bool) Mocks {
// init canary
c := newTestCanary()
if abtest {
c = newTestCanaryAB()
func SetupMocks(c *flaggerv1.Canary) Mocks {
if c == nil {
c = newTestCanary()
}
flaggerClient := fakeFlagger.NewSimpleClientset(c)
@@ -228,14 +227,14 @@ func NewTestSecretVol() *corev1.Secret {
}
}
func newTestCanary() *v1alpha3.Canary {
cd := &v1alpha3.Canary{
TypeMeta: metav1.TypeMeta{APIVersion: v1alpha3.SchemeGroupVersion.String()},
func newTestCanary() *flaggerv1.Canary {
cd := &flaggerv1.Canary{
TypeMeta: metav1.TypeMeta{APIVersion: flaggerv1.SchemeGroupVersion.String()},
ObjectMeta: metav1.ObjectMeta{
Namespace: "default",
Name: "podinfo",
},
Spec: v1alpha3.CanarySpec{
Spec: flaggerv1.CanarySpec{
TargetRef: hpav1.CrossVersionObjectReference{
Name: "podinfo",
APIVersion: "apps/v1",
@@ -245,13 +244,13 @@ func newTestCanary() *v1alpha3.Canary {
Name: "podinfo",
APIVersion: "autoscaling/v2beta1",
Kind: "HorizontalPodAutoscaler",
}, Service: v1alpha3.CanaryService{
}, Service: flaggerv1.CanaryService{
Port: 9898,
}, CanaryAnalysis: v1alpha3.CanaryAnalysis{
}, CanaryAnalysis: flaggerv1.CanaryAnalysis{
Threshold: 10,
StepWeight: 10,
MaxWeight: 50,
Metrics: []v1alpha3.CanaryMetric{
Metrics: []flaggerv1.CanaryMetric{
{
Name: "istio_requests_total",
Threshold: 99,
@@ -269,14 +268,20 @@ func newTestCanary() *v1alpha3.Canary {
return cd
}
func newTestCanaryAB() *v1alpha3.Canary {
cd := &v1alpha3.Canary{
TypeMeta: metav1.TypeMeta{APIVersion: v1alpha3.SchemeGroupVersion.String()},
func newTestCanaryMirror() *flaggerv1.Canary {
cd := newTestCanary()
cd.Spec.CanaryAnalysis.Mirror = true
return cd
}
func newTestCanaryAB() *flaggerv1.Canary {
cd := &flaggerv1.Canary{
TypeMeta: metav1.TypeMeta{APIVersion: flaggerv1.SchemeGroupVersion.String()},
ObjectMeta: metav1.ObjectMeta{
Namespace: "default",
Name: "podinfo",
},
Spec: v1alpha3.CanarySpec{
Spec: flaggerv1.CanarySpec{
TargetRef: hpav1.CrossVersionObjectReference{
Name: "podinfo",
APIVersion: "apps/v1",
@@ -286,9 +291,9 @@ func newTestCanaryAB() *v1alpha3.Canary {
Name: "podinfo",
APIVersion: "autoscaling/v2beta1",
Kind: "HorizontalPodAutoscaler",
}, Service: v1alpha3.CanaryService{
}, Service: flaggerv1.CanaryService{
Port: 9898,
}, CanaryAnalysis: v1alpha3.CanaryAnalysis{
}, CanaryAnalysis: flaggerv1.CanaryAnalysis{
Threshold: 10,
Iterations: 10,
Match: []istiov1alpha3.HTTPMatchRequest{
@@ -300,7 +305,7 @@ func newTestCanaryAB() *v1alpha3.Canary {
},
},
},
Metrics: []v1alpha3.CanaryMetric{
Metrics: []flaggerv1.CanaryMetric{
{
Name: "istio_requests_total",
Threshold: 99,

View File

@@ -2,13 +2,14 @@ package controller
import (
"fmt"
"github.com/weaveworks/flagger/pkg/metrics"
"strings"
"time"
"github.com/weaveworks/flagger/pkg/router"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
flaggerv1 "github.com/weaveworks/flagger/pkg/apis/flagger/v1alpha3"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/weaveworks/flagger/pkg/router"
)
// scheduleCanaries synchronises the canary map with the jobs map,
@@ -81,7 +82,7 @@ func (c *Controller) scheduleCanaries() {
func (c *Controller) advanceCanary(name string, namespace string, skipLivenessChecks bool) {
begin := time.Now()
// check if the canary exists
cd, err := c.flaggerClient.FlaggerV1alpha3().Canaries(namespace).Get(name, v1.GetOptions{})
cd, err := c.flaggerClient.FlaggerV1alpha3().Canaries(namespace).Get(name, metav1.GetOptions{})
if err != nil {
c.logger.With("canary", fmt.Sprintf("%s.%s", name, namespace)).
Errorf("Canary %s.%s not found", name, namespace)
@@ -102,7 +103,7 @@ func (c *Controller) advanceCanary(name string, namespace string, skipLivenessCh
if skipLivenessChecks || strings.Contains(provider, "istio") || strings.Contains(provider, "appmesh") {
skipPrimaryCheck = true
}
label, ports, err := c.deployer.Initialize(cd, skipPrimaryCheck)
labelSelector, ports, err := c.deployer.Initialize(cd, skipPrimaryCheck)
if err != nil {
c.recordEventWarningf(cd, "%v", err)
return
@@ -112,7 +113,7 @@ func (c *Controller) advanceCanary(name string, namespace string, skipLivenessCh
meshRouter := c.routerFactory.MeshRouter(provider)
// create or update ClusterIP services
if err := c.routerFactory.KubernetesRouter(label, ports).Reconcile(cd); err != nil {
if err := c.routerFactory.KubernetesRouter(labelSelector, map[string]string{}, ports).Reconcile(cd); err != nil {
c.recordEventWarningf(cd, "%v", err)
return
}
@@ -123,6 +124,7 @@ func (c *Controller) advanceCanary(name string, namespace string, skipLivenessCh
return
}
// check for deployment spec or configs changes
shouldAdvance, err := c.shouldAdvance(cd)
if err != nil {
c.recordEventWarningf(cd, "%v", err)
@@ -153,9 +155,8 @@ func (c *Controller) advanceCanary(name string, namespace string, skipLivenessCh
}
}
// check if virtual service exists
// and if it contains weighted destination routes to the primary and canary services
primaryWeight, canaryWeight, err := meshRouter.GetRoutes(cd)
// get the routing settings
primaryWeight, canaryWeight, mirrored, err := meshRouter.GetRoutes(cd)
if err != nil {
c.recordEventWarningf(cd, "%v", err)
return
@@ -176,7 +177,7 @@ func (c *Controller) advanceCanary(name string, namespace string, skipLivenessCh
// route all traffic back to primary
primaryWeight = 100
canaryWeight = 0
if err := meshRouter.SetRoutes(cd, primaryWeight, canaryWeight); err != nil {
if err := meshRouter.SetRoutes(cd, primaryWeight, canaryWeight, false); err != nil {
c.recordEventWarningf(cd, "%v", err)
return
}
@@ -218,7 +219,7 @@ func (c *Controller) advanceCanary(name string, namespace string, skipLivenessCh
if cd.Status.Phase == flaggerv1.CanaryPhasePromoting {
if provider != "kubernetes" {
c.recordEventInfof(cd, "Routing all traffic to primary")
if err := meshRouter.SetRoutes(cd, 100, 0); err != nil {
if err := meshRouter.SetRoutes(cd, 100, 0, false); err != nil {
c.recordEventWarningf(cd, "%v", err)
return
}
@@ -275,7 +276,7 @@ func (c *Controller) advanceCanary(name string, namespace string, skipLivenessCh
// route all traffic back to primary
primaryWeight = 100
canaryWeight = 0
if err := meshRouter.SetRoutes(cd, primaryWeight, canaryWeight); err != nil {
if err := meshRouter.SetRoutes(cd, primaryWeight, canaryWeight, false); err != nil {
c.recordEventWarningf(cd, "%v", err)
return
}
@@ -302,8 +303,9 @@ func (c *Controller) advanceCanary(name string, namespace string, skipLivenessCh
}
// check if the canary success rate is above the threshold
// skip check if no traffic is routed to canary
if canaryWeight == 0 && cd.Status.Iterations == 0 {
// skip check if no traffic is routed or mirrored to canary
if canaryWeight == 0 && cd.Status.Iterations == 0 &&
(cd.Spec.CanaryAnalysis.Mirror == false || mirrored == false) {
c.recordEventInfof(cd, "Starting canary analysis for %s.%s", cd.Spec.TargetRef.Name, cd.Namespace)
// run pre-rollout web hooks
@@ -324,11 +326,24 @@ func (c *Controller) advanceCanary(name string, namespace string, skipLivenessCh
}
}
// use blue/green strategy for kubernetes provider
if provider == "kubernetes" {
if len(cd.Spec.CanaryAnalysis.Match) > 0 {
c.recordEventWarningf(cd, "A/B testing is not supported when using the kubernetes provider")
cd.Spec.CanaryAnalysis.Match = nil
}
if cd.Spec.CanaryAnalysis.Iterations < 1 {
c.recordEventWarningf(cd, "Progressive traffic is not supported when using the kubernetes provider")
c.recordEventWarningf(cd, "Setting canaryAnalysis.iterations: 10")
cd.Spec.CanaryAnalysis.Iterations = 10
}
}
// strategy: A/B testing
if len(cd.Spec.CanaryAnalysis.Match) > 0 && cd.Spec.CanaryAnalysis.Iterations > 0 {
// route traffic to canary and increment iterations
if cd.Spec.CanaryAnalysis.Iterations > cd.Status.Iterations {
if err := meshRouter.SetRoutes(cd, 0, 100); err != nil {
if err := meshRouter.SetRoutes(cd, 0, 100, false); err != nil {
c.recordEventWarningf(cd, "%v", err)
return
}
@@ -372,6 +387,15 @@ func (c *Controller) advanceCanary(name string, namespace string, skipLivenessCh
if cd.Spec.CanaryAnalysis.Iterations > 0 {
// increment iterations
if cd.Spec.CanaryAnalysis.Iterations > cd.Status.Iterations {
// If in "mirror" mode, mirror requests during the entire B/G canary test
if provider != "kubernetes" &&
cd.Spec.CanaryAnalysis.Mirror == true && mirrored == false {
if err := meshRouter.SetRoutes(cd, 100, 0, true); err != nil {
c.recordEventWarningf(cd, "%v", err)
}
c.logger.With("canary", fmt.Sprintf("%s.%s", name, namespace)).
Infof("Start traffic mirroring")
}
if err := c.deployer.SetStatusIterations(cd, cd.Status.Iterations+1); err != nil {
c.recordEventWarningf(cd, "%v", err)
return
@@ -389,8 +413,12 @@ func (c *Controller) advanceCanary(name string, namespace string, skipLivenessCh
// route all traffic to canary - max iterations reached
if cd.Spec.CanaryAnalysis.Iterations == cd.Status.Iterations {
if provider != "kubernetes" {
c.recordEventInfof(cd, "Routing all traffic to canary")
if err := meshRouter.SetRoutes(cd, 0, 100); err != nil {
if cd.Spec.CanaryAnalysis.Mirror {
c.recordEventInfof(cd, "Stop traffic mirroring and route all traffic to canary")
} else {
c.recordEventInfof(cd, "Routing all traffic to canary")
}
if err := meshRouter.SetRoutes(cd, 0, 100, false); err != nil {
c.recordEventWarningf(cd, "%v", err)
return
}
@@ -429,16 +457,34 @@ func (c *Controller) advanceCanary(name string, namespace string, skipLivenessCh
if cd.Spec.CanaryAnalysis.StepWeight > 0 {
// increase traffic weight
if canaryWeight < maxWeight {
primaryWeight -= cd.Spec.CanaryAnalysis.StepWeight
if primaryWeight < 0 {
primaryWeight = 0
}
canaryWeight += cd.Spec.CanaryAnalysis.StepWeight
if canaryWeight > 100 {
canaryWeight = 100
// If in "mirror" mode, do one step of mirroring before shifting traffic to canary.
// When mirroring, all requests go to primary and canary, but only responses from
// primary go back to the user.
if cd.Spec.CanaryAnalysis.Mirror && canaryWeight == 0 {
if mirrored == false {
mirrored = true
primaryWeight = 100
canaryWeight = 0
} else {
mirrored = false
primaryWeight = 100 - cd.Spec.CanaryAnalysis.StepWeight
canaryWeight = cd.Spec.CanaryAnalysis.StepWeight
}
c.logger.With("canary", fmt.Sprintf("%s.%s", name, namespace)).
Infof("Running mirror step %d/%d/%t", primaryWeight, canaryWeight, mirrored)
} else {
primaryWeight -= cd.Spec.CanaryAnalysis.StepWeight
if primaryWeight < 0 {
primaryWeight = 0
}
canaryWeight += cd.Spec.CanaryAnalysis.StepWeight
if canaryWeight > 100 {
canaryWeight = 100
}
}
if err := meshRouter.SetRoutes(cd, primaryWeight, canaryWeight); err != nil {
if err := meshRouter.SetRoutes(cd, primaryWeight, canaryWeight, mirrored); err != nil {
c.recordEventWarningf(cd, "%v", err)
return
}
@@ -489,7 +535,7 @@ func (c *Controller) shouldSkipAnalysis(cd *flaggerv1.Canary, meshRouter router.
// route all traffic to primary
primaryWeight = 100
canaryWeight = 0
if err := meshRouter.SetRoutes(cd, primaryWeight, canaryWeight); err != nil {
if err := meshRouter.SetRoutes(cd, primaryWeight, canaryWeight, false); err != nil {
c.recordEventWarningf(cd, "%v", err)
return false
}
@@ -576,7 +622,7 @@ func (c *Controller) checkCanaryStatus(cd *flaggerv1.Canary, shouldAdvance bool)
c.recordEventInfof(cd, "New revision detected! Scaling up %s.%s", cd.Spec.TargetRef.Name, cd.Namespace)
c.sendNotification(cd, "New revision detected, starting canary analysis.",
true, false)
if err := c.deployer.Scale(cd, 1); err != nil {
if err := c.deployer.ScaleUp(cd); err != nil {
c.recordEventErrorf(cd, "%v", err)
return false
}
@@ -704,7 +750,20 @@ func (c *Controller) analyseCanary(r *flaggerv1.Canary) bool {
}
// create observer based on the mesh provider
observer := c.observerFactory.Observer(metricsProvider)
observerFactory := c.observerFactory
// override the global metrics server if one is specified in the canary spec
metricsServer := c.observerFactory.Client.GetMetricsServer()
if r.Spec.MetricsServer != "" {
metricsServer = r.Spec.MetricsServer
var err error
observerFactory, err = metrics.NewFactory(metricsServer, metricsProvider, 5*time.Second)
if err != nil {
c.recordEventErrorf(r, "Error building Prometheus client for %s %v", r.Spec.MetricsServer, err)
return false
}
}
observer := observerFactory.Observer(metricsProvider)
// run metrics checks
for _, metric := range r.Spec.CanaryAnalysis.Metrics {
@@ -719,7 +778,7 @@ func (c *Controller) analyseCanary(r *flaggerv1.Canary) bool {
c.recordEventWarningf(r, "Halt advancement no values found for metric %s probably %s.%s is not receiving traffic",
metric.Name, r.Spec.TargetRef.Name, r.Namespace)
} else {
c.recordEventErrorf(r, "Metrics server %s query failed: %v", c.observerFactory.Client.GetMetricsServer(), err)
c.recordEventErrorf(r, "Metrics server %s query failed: %v", metricsServer, err)
}
return false
}
@@ -739,7 +798,7 @@ func (c *Controller) analyseCanary(r *flaggerv1.Canary) bool {
c.recordEventWarningf(r, "Halt advancement no values found for metric %s probably %s.%s is not receiving traffic",
metric.Name, r.Spec.TargetRef.Name, r.Namespace)
} else {
c.recordEventErrorf(r, "Metrics server %s query failed: %v", c.observerFactory.Client.GetMetricsServer(), err)
c.recordEventErrorf(r, "Metrics server %s query failed: %v", metricsServer, err)
}
return false
}
@@ -755,13 +814,13 @@ func (c *Controller) analyseCanary(r *flaggerv1.Canary) bool {
// custom checks
if metric.Query != "" {
val, err := c.observerFactory.Client.RunQuery(metric.Query)
val, err := observerFactory.Client.RunQuery(metric.Query)
if err != nil {
if strings.Contains(err.Error(), "no values found") {
c.recordEventWarningf(r, "Halt advancement no values found for metric %s probably %s.%s is not receiving traffic",
metric.Name, r.Spec.TargetRef.Name, r.Namespace)
c.recordEventWarningf(r, "Halt advancement no values found for custom metric: %s",
metric.Name)
} else {
c.recordEventErrorf(r, "Metrics server %s query failed for %s: %v", c.observerFactory.Client.GetMetricsServer(), metric.Name, err)
c.recordEventErrorf(r, "Metrics server %s query failed for %s: %v", metricsServer, metric.Name, err)
}
return false
}

View File

@@ -2,13 +2,16 @@ package controller
import (
"fmt"
"github.com/weaveworks/flagger/pkg/apis/flagger/v1alpha3"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"testing"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/intstr"
flaggerv1 "github.com/weaveworks/flagger/pkg/apis/flagger/v1alpha3"
)
func TestScheduler_Init(t *testing.T) {
mocks := SetupMocks(false)
mocks := SetupMocks(nil)
mocks.ctrl.advanceCanary("podinfo", "default", true)
_, err := mocks.kubeClient.AppsV1().Deployments("default").Get("podinfo-primary", metav1.GetOptions{})
@@ -18,7 +21,7 @@ func TestScheduler_Init(t *testing.T) {
}
func TestScheduler_NewRevision(t *testing.T) {
mocks := SetupMocks(false)
mocks := SetupMocks(nil)
mocks.ctrl.advanceCanary("podinfo", "default", true)
// update
@@ -42,12 +45,12 @@ func TestScheduler_NewRevision(t *testing.T) {
}
func TestScheduler_Rollback(t *testing.T) {
mocks := SetupMocks(false)
mocks := SetupMocks(nil)
// init
mocks.ctrl.advanceCanary("podinfo", "default", true)
// update failed checks to max
err := mocks.deployer.SyncStatus(mocks.canary, v1alpha3.CanaryStatus{Phase: v1alpha3.CanaryPhaseProgressing, FailedChecks: 11})
err := mocks.deployer.SyncStatus(mocks.canary, flaggerv1.CanaryStatus{Phase: flaggerv1.CanaryPhaseProgressing, FailedChecks: 11})
if err != nil {
t.Fatal(err.Error())
}
@@ -60,13 +63,13 @@ func TestScheduler_Rollback(t *testing.T) {
t.Fatal(err.Error())
}
if c.Status.Phase != v1alpha3.CanaryPhaseFailed {
t.Errorf("Got canary state %v wanted %v", c.Status.Phase, v1alpha3.CanaryPhaseFailed)
if c.Status.Phase != flaggerv1.CanaryPhaseFailed {
t.Errorf("Got canary state %v wanted %v", c.Status.Phase, flaggerv1.CanaryPhaseFailed)
}
}
func TestScheduler_SkipAnalysis(t *testing.T) {
mocks := SetupMocks(false)
mocks := SetupMocks(nil)
// init
mocks.ctrl.advanceCanary("podinfo", "default", true)
@@ -101,13 +104,13 @@ func TestScheduler_SkipAnalysis(t *testing.T) {
t.Errorf("Got skip analysis %v wanted %v", c.Spec.SkipAnalysis, true)
}
if c.Status.Phase != v1alpha3.CanaryPhaseSucceeded {
t.Errorf("Got canary state %v wanted %v", c.Status.Phase, v1alpha3.CanaryPhaseSucceeded)
if c.Status.Phase != flaggerv1.CanaryPhaseSucceeded {
t.Errorf("Got canary state %v wanted %v", c.Status.Phase, flaggerv1.CanaryPhaseSucceeded)
}
}
func TestScheduler_NewRevisionReset(t *testing.T) {
mocks := SetupMocks(false)
mocks := SetupMocks(nil)
// init
mocks.ctrl.advanceCanary("podinfo", "default", true)
@@ -123,7 +126,7 @@ func TestScheduler_NewRevisionReset(t *testing.T) {
// advance
mocks.ctrl.advanceCanary("podinfo", "default", true)
primaryWeight, canaryWeight, err := mocks.router.GetRoutes(mocks.canary)
primaryWeight, canaryWeight, mirrored, err := mocks.router.GetRoutes(mocks.canary)
if err != nil {
t.Fatal(err.Error())
}
@@ -136,6 +139,10 @@ func TestScheduler_NewRevisionReset(t *testing.T) {
t.Errorf("Got canary route %v wanted %v", canaryWeight, 10)
}
if mirrored != false {
t.Errorf("Got mirrored %v wanted %v", mirrored, false)
}
// second update
dep2.Spec.Template.Spec.ServiceAccountName = "test"
_, err = mocks.kubeClient.AppsV1().Deployments("default").Update(dep2)
@@ -146,7 +153,7 @@ func TestScheduler_NewRevisionReset(t *testing.T) {
// detect changes
mocks.ctrl.advanceCanary("podinfo", "default", true)
primaryWeight, canaryWeight, err = mocks.router.GetRoutes(mocks.canary)
primaryWeight, canaryWeight, mirrored, err = mocks.router.GetRoutes(mocks.canary)
if err != nil {
t.Fatal(err.Error())
}
@@ -158,10 +165,14 @@ func TestScheduler_NewRevisionReset(t *testing.T) {
if canaryWeight != 0 {
t.Errorf("Got canary route %v wanted %v", canaryWeight, 0)
}
if mirrored != false {
t.Errorf("Got mirrored %v wanted %v", mirrored, false)
}
}
func TestScheduler_Promotion(t *testing.T) {
mocks := SetupMocks(false)
mocks := SetupMocks(nil)
// init
mocks.ctrl.advanceCanary("podinfo", "default", true)
@@ -172,8 +183,8 @@ func TestScheduler_Promotion(t *testing.T) {
t.Fatal(err.Error())
}
if c.Status.Phase != v1alpha3.CanaryPhaseInitialized {
t.Errorf("Got canary state %v wanted %v", c.Status.Phase, v1alpha3.CanaryPhaseInitialized)
if c.Status.Phase != flaggerv1.CanaryPhaseInitialized {
t.Errorf("Got canary state %v wanted %v", c.Status.Phase, flaggerv1.CanaryPhaseInitialized)
}
// update
@@ -201,14 +212,14 @@ func TestScheduler_Promotion(t *testing.T) {
// detect configs changes
mocks.ctrl.advanceCanary("podinfo", "default", true)
primaryWeight, canaryWeight, err := mocks.router.GetRoutes(mocks.canary)
primaryWeight, canaryWeight, mirrored, err := mocks.router.GetRoutes(mocks.canary)
if err != nil {
t.Fatal(err.Error())
}
primaryWeight = 60
canaryWeight = 40
err = mocks.router.SetRoutes(mocks.canary, primaryWeight, canaryWeight)
err = mocks.router.SetRoutes(mocks.canary, primaryWeight, canaryWeight, mirrored)
if err != nil {
t.Fatal(err.Error())
}
@@ -222,8 +233,8 @@ func TestScheduler_Promotion(t *testing.T) {
t.Fatal(err.Error())
}
if c.Status.Phase != v1alpha3.CanaryPhaseProgressing {
t.Errorf("Got canary state %v wanted %v", c.Status.Phase, v1alpha3.CanaryPhaseProgressing)
if c.Status.Phase != flaggerv1.CanaryPhaseProgressing {
t.Errorf("Got canary state %v wanted %v", c.Status.Phase, flaggerv1.CanaryPhaseProgressing)
}
// promote
@@ -235,14 +246,14 @@ func TestScheduler_Promotion(t *testing.T) {
t.Fatal(err.Error())
}
if c.Status.Phase != v1alpha3.CanaryPhasePromoting {
t.Errorf("Got canary state %v wanted %v", c.Status.Phase, v1alpha3.CanaryPhasePromoting)
if c.Status.Phase != flaggerv1.CanaryPhasePromoting {
t.Errorf("Got canary state %v wanted %v", c.Status.Phase, flaggerv1.CanaryPhasePromoting)
}
// finalise
mocks.ctrl.advanceCanary("podinfo", "default", true)
primaryWeight, canaryWeight, err = mocks.router.GetRoutes(mocks.canary)
primaryWeight, canaryWeight, mirrored, err = mocks.router.GetRoutes(mocks.canary)
if err != nil {
t.Fatal(err.Error())
}
@@ -255,6 +266,10 @@ func TestScheduler_Promotion(t *testing.T) {
t.Errorf("Got canary route %v wanted %v", canaryWeight, 0)
}
if mirrored != false {
t.Errorf("Got mirrored %v wanted %v", mirrored, false)
}
primaryDep, err := mocks.kubeClient.AppsV1().Deployments("default").Get("podinfo-primary", metav1.GetOptions{})
if err != nil {
t.Fatal(err.Error())
@@ -290,8 +305,8 @@ func TestScheduler_Promotion(t *testing.T) {
t.Fatal(err.Error())
}
if c.Status.Phase != v1alpha3.CanaryPhaseFinalising {
t.Errorf("Got canary state %v wanted %v", c.Status.Phase, v1alpha3.CanaryPhaseFinalising)
if c.Status.Phase != flaggerv1.CanaryPhaseFinalising {
t.Errorf("Got canary state %v wanted %v", c.Status.Phase, flaggerv1.CanaryPhaseFinalising)
}
// scale canary to zero
@@ -302,13 +317,71 @@ func TestScheduler_Promotion(t *testing.T) {
t.Fatal(err.Error())
}
if c.Status.Phase != v1alpha3.CanaryPhaseSucceeded {
t.Errorf("Got canary state %v wanted %v", c.Status.Phase, v1alpha3.CanaryPhaseSucceeded)
if c.Status.Phase != flaggerv1.CanaryPhaseSucceeded {
t.Errorf("Got canary state %v wanted %v", c.Status.Phase, flaggerv1.CanaryPhaseSucceeded)
}
}
func TestScheduler_Mirroring(t *testing.T) {
mocks := SetupMocks(newTestCanaryMirror())
// init
mocks.ctrl.advanceCanary("podinfo", "default", true)
// update
dep2 := newTestDeploymentV2()
_, err := mocks.kubeClient.AppsV1().Deployments("default").Update(dep2)
if err != nil {
t.Fatal(err.Error())
}
// detect pod spec changes
mocks.ctrl.advanceCanary("podinfo", "default", true)
// advance
mocks.ctrl.advanceCanary("podinfo", "default", true)
// check if traffic is mirrored to canary
primaryWeight, canaryWeight, mirrored, err := mocks.router.GetRoutes(mocks.canary)
if err != nil {
t.Fatal(err.Error())
}
if primaryWeight != 100 {
t.Errorf("Got primary route %v wanted %v", primaryWeight, 100)
}
if canaryWeight != 0 {
t.Errorf("Got canary route %v wanted %v", canaryWeight, 0)
}
if mirrored != true {
t.Errorf("Got mirrored %v wanted %v", mirrored, true)
}
// advance
mocks.ctrl.advanceCanary("podinfo", "default", true)
// check if traffic is mirrored to canary
primaryWeight, canaryWeight, mirrored, err = mocks.router.GetRoutes(mocks.canary)
if err != nil {
t.Fatal(err.Error())
}
if primaryWeight != 90 {
t.Errorf("Got primary route %v wanted %v", primaryWeight, 90)
}
if canaryWeight != 10 {
t.Errorf("Got canary route %v wanted %v", canaryWeight, 10)
}
if mirrored != false {
t.Errorf("Got mirrored %v wanted %v", mirrored, false)
}
}
func TestScheduler_ABTesting(t *testing.T) {
mocks := SetupMocks(true)
mocks := SetupMocks(newTestCanaryAB())
// init
mocks.ctrl.advanceCanary("podinfo", "default", true)
@@ -326,7 +399,7 @@ func TestScheduler_ABTesting(t *testing.T) {
mocks.ctrl.advanceCanary("podinfo", "default", true)
// check if traffic is routed to canary
primaryWeight, canaryWeight, err := mocks.router.GetRoutes(mocks.canary)
primaryWeight, canaryWeight, mirrored, err := mocks.router.GetRoutes(mocks.canary)
if err != nil {
t.Fatal(err.Error())
}
@@ -339,6 +412,10 @@ func TestScheduler_ABTesting(t *testing.T) {
t.Errorf("Got canary route %v wanted %v", canaryWeight, 100)
}
if mirrored != false {
t.Errorf("Got mirrored %v wanted %v", mirrored, false)
}
cd, err := mocks.flaggerClient.FlaggerV1alpha3().Canaries("default").Get("podinfo", metav1.GetOptions{})
if err != nil {
t.Fatal(err.Error())
@@ -361,8 +438,8 @@ func TestScheduler_ABTesting(t *testing.T) {
t.Fatal(err.Error())
}
if c.Status.Phase != v1alpha3.CanaryPhaseFinalising {
t.Errorf("Got canary state %v wanted %v", c.Status.Phase, v1alpha3.CanaryPhaseFinalising)
if c.Status.Phase != flaggerv1.CanaryPhaseFinalising {
t.Errorf("Got canary state %v wanted %v", c.Status.Phase, flaggerv1.CanaryPhaseFinalising)
}
// check if the container image tag was updated
@@ -386,13 +463,13 @@ func TestScheduler_ABTesting(t *testing.T) {
t.Fatal(err.Error())
}
if c.Status.Phase != v1alpha3.CanaryPhaseSucceeded {
t.Errorf("Got canary state %v wanted %v", c.Status.Phase, v1alpha3.CanaryPhaseSucceeded)
if c.Status.Phase != flaggerv1.CanaryPhaseSucceeded {
t.Errorf("Got canary state %v wanted %v", c.Status.Phase, flaggerv1.CanaryPhaseSucceeded)
}
}
func TestScheduler_PortDiscovery(t *testing.T) {
mocks := SetupMocks(false)
mocks := SetupMocks(nil)
// enable port discovery
cd, err := mocks.flaggerClient.FlaggerV1alpha3().Canaries("default").Get("podinfo", metav1.GetOptions{})
@@ -434,3 +511,93 @@ func TestScheduler_PortDiscovery(t *testing.T) {
}
}
func TestScheduler_TargetPortNumber(t *testing.T) {
mocks := SetupMocks(nil)
cd, err := mocks.flaggerClient.FlaggerV1alpha3().Canaries("default").Get("podinfo", metav1.GetOptions{})
if err != nil {
t.Fatal(err.Error())
}
cd.Spec.Service.Port = 80
cd.Spec.Service.TargetPort = intstr.FromInt(9898)
cd.Spec.Service.PortDiscovery = true
_, err = mocks.flaggerClient.FlaggerV1alpha3().Canaries("default").Update(cd)
if err != nil {
t.Fatal(err.Error())
}
mocks.ctrl.advanceCanary("podinfo", "default", true)
canarySvc, err := mocks.kubeClient.CoreV1().Services("default").Get("podinfo-canary", metav1.GetOptions{})
if err != nil {
t.Fatal(err.Error())
}
if len(canarySvc.Spec.Ports) != 3 {
t.Fatalf("Got svc port count %v wanted %v", len(canarySvc.Spec.Ports), 3)
}
matchPorts := func(lookup string) bool {
switch lookup {
case
"http 80",
"http-metrics 8080",
"tcp-podinfo-2 8888":
return true
}
return false
}
for _, port := range canarySvc.Spec.Ports {
if !matchPorts(fmt.Sprintf("%s %v", port.Name, port.Port)) {
t.Fatalf("Got wrong svc port %v", port.Name)
}
}
}
func TestScheduler_TargetPortName(t *testing.T) {
mocks := SetupMocks(nil)
cd, err := mocks.flaggerClient.FlaggerV1alpha3().Canaries("default").Get("podinfo", metav1.GetOptions{})
if err != nil {
t.Fatal(err.Error())
}
cd.Spec.Service.Port = 8080
cd.Spec.Service.TargetPort = intstr.FromString("http")
cd.Spec.Service.PortDiscovery = true
_, err = mocks.flaggerClient.FlaggerV1alpha3().Canaries("default").Update(cd)
if err != nil {
t.Fatal(err.Error())
}
mocks.ctrl.advanceCanary("podinfo", "default", true)
canarySvc, err := mocks.kubeClient.CoreV1().Services("default").Get("podinfo-canary", metav1.GetOptions{})
if err != nil {
t.Fatal(err.Error())
}
if len(canarySvc.Spec.Ports) != 3 {
t.Fatalf("Got svc port count %v wanted %v", len(canarySvc.Spec.Ports), 3)
}
matchPorts := func(lookup string) bool {
switch lookup {
case
"http 8080",
"http-metrics 8080",
"tcp-podinfo-2 8888":
return true
}
return false
}
for _, port := range canarySvc.Spec.Ports {
if !matchPorts(fmt.Sprintf("%s %v", port.Name, port.Port)) {
t.Fatalf("Got wrong svc port %v", port.Name)
}
}
}

View File

@@ -6,11 +6,12 @@ import (
"encoding/json"
"errors"
"fmt"
flaggerv1 "github.com/weaveworks/flagger/pkg/apis/flagger/v1alpha3"
"io/ioutil"
"net/http"
"net/url"
"time"
flaggerv1 "github.com/weaveworks/flagger/pkg/apis/flagger/v1alpha3"
)
// CallWebhook does a HTTP POST to an external service and

View File

@@ -1,10 +1,11 @@
package controller
import (
flaggerv1 "github.com/weaveworks/flagger/pkg/apis/flagger/v1alpha3"
"net/http"
"net/http/httptest"
"testing"
flaggerv1 "github.com/weaveworks/flagger/pkg/apis/flagger/v1alpha3"
)
func TestCallWebhook(t *testing.T) {

View File

@@ -10,8 +10,8 @@ import (
"net/http"
"net/url"
"path"
"regexp"
"strconv"
"strings"
"text/template"
"time"
)
@@ -137,13 +137,10 @@ func (p *PrometheusClient) RunQuery(query string) (float64, error) {
return *value, nil
}
// TrimQuery takes a promql query and removes spaces, tabs and new lines
// TrimQuery takes a promql query and removes whitespace
func (p *PrometheusClient) TrimQuery(query string) string {
query = strings.Replace(query, "\n", "", -1)
query = strings.Replace(query, "\t", "", -1)
query = strings.Replace(query, " ", "", -1)
return query
space := regexp.MustCompile(`\s+`)
return space.ReplaceAllString(query, " ")
}
// IsOnline call Prometheus status endpoint and returns an error if the API is unreachable

View File

@@ -8,7 +8,7 @@ import (
)
func TestEnvoyObserver_GetRequestSuccessRate(t *testing.T) {
expected := `sum(rate(envoy_cluster_upstream_rq{kubernetes_namespace="default",kubernetes_pod_name=~"podinfo-[0-9a-zA-Z]+(-[0-9a-zA-Z]+)",envoy_response_code!~"5.*"}[1m]))/sum(rate(envoy_cluster_upstream_rq{kubernetes_namespace="default",kubernetes_pod_name=~"podinfo-[0-9a-zA-Z]+(-[0-9a-zA-Z]+)"}[1m]))*100`
expected := ` sum( rate( envoy_cluster_upstream_rq{ kubernetes_namespace="default", kubernetes_pod_name=~"podinfo-[0-9a-zA-Z]+(-[0-9a-zA-Z]+)", envoy_response_code!~"5.*" }[1m] ) ) / sum( rate( envoy_cluster_upstream_rq{ kubernetes_namespace="default", kubernetes_pod_name=~"podinfo-[0-9a-zA-Z]+(-[0-9a-zA-Z]+)" }[1m] ) ) * 100`
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
promql := r.URL.Query()["query"][0]
@@ -41,7 +41,7 @@ func TestEnvoyObserver_GetRequestSuccessRate(t *testing.T) {
}
func TestEnvoyObserver_GetRequestDuration(t *testing.T) {
expected := `histogram_quantile(0.99,sum(rate(envoy_cluster_upstream_rq_time_bucket{kubernetes_namespace="default",kubernetes_pod_name=~"podinfo-[0-9a-zA-Z]+(-[0-9a-zA-Z]+)"}[1m]))by(le))`
expected := ` histogram_quantile( 0.99, sum( rate( envoy_cluster_upstream_rq_time_bucket{ kubernetes_namespace="default", kubernetes_pod_name=~"podinfo-[0-9a-zA-Z]+(-[0-9a-zA-Z]+)" }[1m] ) ) by (le) )`
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
promql := r.URL.Query()["query"][0]

View File

@@ -8,7 +8,7 @@ import (
)
func TestGlooObserver_GetRequestSuccessRate(t *testing.T) {
expected := `sum(rate(envoy_cluster_upstream_rq{envoy_cluster_name=~"default-podinfo-canary-[0-9a-zA-Z-]+_[0-9a-zA-Z-]+",envoy_response_code!~"5.*"}[1m]))/sum(rate(envoy_cluster_upstream_rq{envoy_cluster_name=~"default-podinfo-canary-[0-9a-zA-Z-]+_[0-9a-zA-Z-]+",}[1m]))*100`
expected := ` sum( rate( envoy_cluster_upstream_rq{ envoy_cluster_name=~"default-podinfo-canary-[0-9a-zA-Z-]+_[0-9a-zA-Z-]+", envoy_response_code!~"5.*" }[1m] ) ) / sum( rate( envoy_cluster_upstream_rq{ envoy_cluster_name=~"default-podinfo-canary-[0-9a-zA-Z-]+_[0-9a-zA-Z-]+", }[1m] ) ) * 100`
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
promql := r.URL.Query()["query"][0]
@@ -41,7 +41,7 @@ func TestGlooObserver_GetRequestSuccessRate(t *testing.T) {
}
func TestGlooObserver_GetRequestDuration(t *testing.T) {
expected := `histogram_quantile(0.99,sum(rate(envoy_cluster_upstream_rq_time_bucket{envoy_cluster_name=~"default-podinfo-canary-[0-9a-zA-Z-]+_[0-9a-zA-Z-]+",}[1m]))by(le))`
expected := ` histogram_quantile( 0.99, sum( rate( envoy_cluster_upstream_rq_time_bucket{ envoy_cluster_name=~"default-podinfo-canary-[0-9a-zA-Z-]+_[0-9a-zA-Z-]+", }[1m] ) ) by (le) )`
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
promql := r.URL.Query()["query"][0]

View File

@@ -8,7 +8,7 @@ import (
)
func TestHttpObserver_GetRequestSuccessRate(t *testing.T) {
expected := `sum(rate(http_request_duration_seconds_count{kubernetes_namespace="default",kubernetes_pod_name=~"podinfo-[0-9a-zA-Z]+(-[0-9a-zA-Z]+)",status!~"5.*"}[1m]))/sum(rate(http_request_duration_seconds_count{kubernetes_namespace="default",kubernetes_pod_name=~"podinfo-[0-9a-zA-Z]+(-[0-9a-zA-Z]+)"}[1m]))*100`
expected := ` sum( rate( http_request_duration_seconds_count{ kubernetes_namespace="default", kubernetes_pod_name=~"podinfo-[0-9a-zA-Z]+(-[0-9a-zA-Z]+)", status!~"5.*" }[1m] ) ) / sum( rate( http_request_duration_seconds_count{ kubernetes_namespace="default", kubernetes_pod_name=~"podinfo-[0-9a-zA-Z]+(-[0-9a-zA-Z]+)" }[1m] ) ) * 100`
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
promql := r.URL.Query()["query"][0]
@@ -41,7 +41,7 @@ func TestHttpObserver_GetRequestSuccessRate(t *testing.T) {
}
func TestHttpObserver_GetRequestDuration(t *testing.T) {
expected := `histogram_quantile(0.99,sum(rate(http_request_duration_seconds_bucket{kubernetes_namespace="default",kubernetes_pod_name=~"podinfo-[0-9a-zA-Z]+(-[0-9a-zA-Z]+)"}[1m]))by(le))`
expected := ` histogram_quantile( 0.99, sum( rate( http_request_duration_seconds_bucket{ kubernetes_namespace="default", kubernetes_pod_name=~"podinfo-[0-9a-zA-Z]+(-[0-9a-zA-Z]+)" }[1m] ) ) by (le) )`
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
promql := r.URL.Query()["query"][0]

View File

@@ -8,7 +8,7 @@ import (
)
func TestIstioObserver_GetRequestSuccessRate(t *testing.T) {
expected := `sum(rate(istio_requests_total{reporter="destination",destination_workload_namespace="default",destination_workload=~"podinfo",response_code!~"5.*"}[1m]))/sum(rate(istio_requests_total{reporter="destination",destination_workload_namespace="default",destination_workload=~"podinfo"}[1m]))*100`
expected := ` sum( rate( istio_requests_total{ reporter="destination", destination_workload_namespace="default", destination_workload=~"podinfo", response_code!~"5.*" }[1m] ) ) / sum( rate( istio_requests_total{ reporter="destination", destination_workload_namespace="default", destination_workload=~"podinfo" }[1m] ) ) * 100`
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
promql := r.URL.Query()["query"][0]
@@ -41,7 +41,7 @@ func TestIstioObserver_GetRequestSuccessRate(t *testing.T) {
}
func TestIstioObserver_GetRequestDuration(t *testing.T) {
expected := `histogram_quantile(0.99,sum(rate(istio_request_duration_seconds_bucket{reporter="destination",destination_workload_namespace="default",destination_workload=~"podinfo"}[1m]))by(le))`
expected := ` histogram_quantile( 0.99, sum( rate( istio_request_duration_seconds_bucket{ reporter="destination", destination_workload_namespace="default", destination_workload=~"podinfo" }[1m] ) ) by (le) )`
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
promql := r.URL.Query()["query"][0]

View File

@@ -8,7 +8,7 @@ import (
)
func TestLinkerdObserver_GetRequestSuccessRate(t *testing.T) {
expected := `sum(rate(response_total{namespace="default",deployment=~"podinfo",classification!="failure",direction="inbound"}[1m]))/sum(rate(response_total{namespace="default",deployment=~"podinfo",direction="inbound"}[1m]))*100`
expected := ` sum( rate( response_total{ namespace="default", deployment=~"podinfo", classification!="failure", direction="inbound" }[1m] ) ) / sum( rate( response_total{ namespace="default", deployment=~"podinfo", direction="inbound" }[1m] ) ) * 100`
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
promql := r.URL.Query()["query"][0]
@@ -41,7 +41,7 @@ func TestLinkerdObserver_GetRequestSuccessRate(t *testing.T) {
}
func TestLinkerdObserver_GetRequestDuration(t *testing.T) {
expected := `histogram_quantile(0.99,sum(rate(response_latency_ms_bucket{namespace="default",deployment=~"podinfo",direction="inbound"}[1m]))by(le))`
expected := ` histogram_quantile( 0.99, sum( rate( response_latency_ms_bucket{ namespace="default", deployment=~"podinfo", direction="inbound" }[1m] ) ) by (le) )`
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
promql := r.URL.Query()["query"][0]

View File

@@ -8,7 +8,7 @@ import (
)
func TestNginxObserver_GetRequestSuccessRate(t *testing.T) {
expected := `sum(rate(nginx_ingress_controller_requests{namespace="nginx",ingress="podinfo",status!~"5.*"}[1m]))/sum(rate(nginx_ingress_controller_requests{namespace="nginx",ingress="podinfo"}[1m]))*100`
expected := ` sum( rate( nginx_ingress_controller_requests{ namespace="nginx", ingress="podinfo", status!~"5.*" }[1m] ) ) / sum( rate( nginx_ingress_controller_requests{ namespace="nginx", ingress="podinfo" }[1m] ) ) * 100`
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
promql := r.URL.Query()["query"][0]
@@ -41,7 +41,7 @@ func TestNginxObserver_GetRequestSuccessRate(t *testing.T) {
}
func TestNginxObserver_GetRequestDuration(t *testing.T) {
expected := `sum(rate(nginx_ingress_controller_ingress_upstream_latency_seconds_sum{namespace="nginx",ingress="podinfo"}[1m]))/sum(rate(nginx_ingress_controller_ingress_upstream_latency_seconds_count{namespace="nginx",ingress="podinfo"}[1m]))*1000`
expected := ` sum( rate( nginx_ingress_controller_ingress_upstream_latency_seconds_sum{ namespace="nginx", ingress="podinfo" }[1m] ) ) / sum( rate( nginx_ingress_controller_ingress_upstream_latency_seconds_count{ namespace="nginx", ingress="podinfo" }[1m] ) ) * 1000`
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
promql := r.URL.Query()["query"][0]

Some files were not shown because too many files have changed in this diff Show More