Compare commits

...

41 Commits
0.2.0 ... 0.4.0

Author SHA1 Message Date
stefanprodan
c46fe55ad0 Release v0.4.0 2019-01-18 12:49:36 +02:00
Stefan Prodan
36a54fbf2a Merge pull request #31 from stefanprodan/reset
Restart analysis if revision changes during validation
2019-01-18 10:25:38 +01:00
stefanprodan
60f6b05397 Refactor scheduler tests 2019-01-18 11:14:27 +02:00
stefanprodan
6d8a7343b7 Add tests for analysis restart and canary promotion 2019-01-18 11:05:40 +02:00
stefanprodan
aff8b117d4 Restart validation if revision changes during analysis 2019-01-17 15:13:59 +02:00
Stefan Prodan
1b3c3b22b3 Merge pull request #29 from stefanprodan/status
Use Kubernetes 1.11 CRD status sub-resource
2019-01-17 13:06:28 +01:00
stefanprodan
1d31b5ed90 Add canary name and namespace to controller logs
- zap key-value: canary=name.namespace
2019-01-17 13:58:10 +02:00
stefanprodan
1ef310f00d Add traffic weight to canary status
- show current weight on kubectl get canaries and kubectl get all
2019-01-16 16:29:59 +02:00
stefanprodan
acdd2c46d5 Refactor Canary status
- add status phases (Initialized, Progressing, Succeeded, Failed)
- rename status revision to LastAppliedSpec
2019-01-16 15:06:38 +02:00
stefanprodan
9872e6bc16 Skip readiness checks if canary analysis finished 2019-01-16 13:18:53 +02:00
stefanprodan
10c2bdec86 Use deep copy when updating the virtual service routes 2019-01-16 13:13:07 +02:00
stefanprodan
4bf3b70048 Use CRD UpdateStatus for Canary status updated
- requires Kubernetes >=1.11
2019-01-16 01:00:39 +02:00
stefanprodan
ada446bbaa Drop compatibility with Kubernetes 1.10 2019-01-16 00:58:51 +02:00
stefanprodan
c4981ef4db Add status and additional printer columns to CRD 2019-01-16 00:57:46 +02:00
Stefan Prodan
d1b84cd31d Merge pull request #28 from stefanprodan/naming
Fix for when canary name is different to the target name
2019-01-15 23:32:41 +01:00
stefanprodan
9232c8647a Check if multiple canaries have the same target
- log an error on target duplication ref #13
2019-01-15 21:43:05 +02:00
stefanprodan
23e8c7d616 Fix for when canary name is different to the target name
- use target name consistent at bootstrap
2019-01-15 21:18:46 +02:00
Stefan Prodan
42607fbd64 Merge pull request #26 from carlossg/service-name
Fix VirtualService routes
2019-01-15 19:38:38 +01:00
stefanprodan
28781a5f02 Use deep copy when updating the deployment object
- fix canary status update logs
2019-01-15 20:37:14 +02:00
stefanprodan
3589e11244 Bump dev version 2019-01-15 20:36:59 +02:00
Carlos Sanchez
5e880d3942 Wrong VirtualService routes
If deployment name is different from canary name
the virtual service routes are created with canary name
but the services are created with deployment name

Note that canary name should match deployment name
2019-01-15 18:44:50 +01:00
stefanprodan
f7e675144d Release v0.3.0 2019-01-11 20:10:41 +02:00
Stefan Prodan
3bff2c339b Merge pull request #20 from stefanprodan/scheduler
Add canary analysis schedule interval to CRD
2019-01-11 19:06:17 +01:00
Stefan Prodan
b035c1e7fb Merge pull request #25 from carlossg/virtualservice-naming
Tries to create VirtualService that already exists
2019-01-11 18:03:57 +01:00
Carlos Sanchez
7ae0d49e80 Tries to create VirtualService that already exists
When canary name is different than deployment name

VirtualService croc-hunter-jenkinsx.jx-staging create error virtualservices.networking.istio.io "croc-hunter-jenkinsx" already exists
2019-01-11 17:47:52 +01:00
Stefan Prodan
07f66e849d Merge branch 'master' into scheduler 2019-01-11 15:07:03 +01:00
Stefan Prodan
06c29051eb Merge pull request #24 from carlossg/log-fix
Fix bad error message
2019-01-11 15:05:37 +01:00
stefanprodan
83118faeb3 Fix autoscalerRef tests 2019-01-11 13:51:44 +02:00
stefanprodan
aa2c28c733 Make autoscalerRef optional
- use anyOf as a workaround for the openAPI object validation not accepting empty values
- fix #23
2019-01-11 13:42:32 +02:00
stefanprodan
10185407f6 Use httpbin.org for webhook testing 2019-01-11 13:12:53 +02:00
Carlos Sanchez
c1bde57c17 Fix bad error message
"controller/scheduler.go:217","msg":"deployment . update error Canary.flagger.app \"jx-staging-croc-hunter-jenkinsx\" is invalid: []: Invalid value: map[string]interface {}{\"metadata\":map[string]interface {}{\"name\":\"jx-staging-croc-hunter-jenkinsx\", \"namespace\":\"jx-staging\", \"selfLink\":\"/apis/flagger.app/v1alpha2/namespaces/jx-staging/canaries/jx-staging-croc-hunter-jenkinsx\", \"uid\":\"b248877e-1406-11e9-bf64-42010a8000c6\", \"resourceVersion\":\"30650895\", \"generation\":1, \"creationTimestamp\":\"2019-01-09T12:04:20Z\"}, \"spec\":map[string]interface {}{\"canaryAnalysis\":map[string]interface {}{\"threshold\":5, \"maxWeight\":50, \"stepWeight\":10, \"metrics\":[]interface {}{map[string]interface {}{\"name\":\"istio_requests_total\", \"interval\":\"1m\", \"threshold\":99}, map[string]interface {}{\"name\":\"istio_request_duration_seconds_bucket\", \"interval\":\"30s\"istio-system/flagger-b486d78c8-fkmbr[flagger]: {"level":"info","ts":"2019-01-09T12:14:05.158Z","caller":"controller/deployer.go:228","msg":"Scaling down jx-staging-croc-hunter-jenkinsx.jx-staging"}
2019-01-09 13:17:17 +01:00
stefanprodan
882b4b2d23 Update the control loop interval flag description 2019-01-08 13:15:10 +02:00
Stefan Prodan
cac585157f Merge pull request #21 from carlossg/patch-1
Qualify letsencrypt api version
2019-01-07 15:45:07 +02:00
Carlos Sanchez
cc2860a49f Qualify letsencrypt api version
Otherwise getting

    error: unable to recognize "./letsencrypt-issuer.yaml": no matches for kind "Issuer" in version "v1alpha2"
2019-01-07 14:38:53 +01:00
stefanprodan
bec96356ec Bump CRD version to v1alpha3
- new field canaryAnalysis.interval
2019-01-07 01:03:31 +02:00
stefanprodan
b5c648ea54 Bump version to 0.3.0-beta.1 2019-01-07 00:30:09 +02:00
stefanprodan
e6e3e500be Schedule canary analysis based on interval 2019-01-07 00:26:01 +02:00
stefanprodan
537e8fdaf7 Add canary analysis interval to CRD 2019-01-07 00:24:43 +02:00
stefanprodan
322c83bdad Add docs site link to chart 2019-01-06 18:18:47 +02:00
stefanprodan
41f0ba0247 Document the CRD target ref and control loop interval 2019-01-05 10:22:00 +02:00
stefanprodan
b67b49fde6 Change the default analysis interval to 1m 2019-01-05 01:05:27 +02:00
57 changed files with 997 additions and 473 deletions

View File

@@ -8,8 +8,8 @@
Flagger is a Kubernetes operator that automates the promotion of canary deployments
using Istio routing for traffic shifting and Prometheus metrics for canary analysis.
The canary analysis can be extended with webhooks for running integration tests, load tests or any other custom
validation.
The canary analysis can be extended with webhooks for running integration tests,
load tests or any other custom validation.
### Install
@@ -25,11 +25,10 @@ helm repo add flagger https://flagger.app
# install or upgrade
helm upgrade -i flagger flagger/flagger \
--namespace=istio-system \
--set metricsServer=http://prometheus.istio-system:9090 \
--set controlLoopInterval=1m
--set metricsServer=http://prometheus.istio-system:9090
```
Flagger is compatible with Kubernetes >1.10.0 and Istio >1.0.0.
Flagger is compatible with Kubernetes >1.11.0 and Istio >1.0.0.
### Usage
@@ -75,7 +74,7 @@ You can change the canary analysis _max weight_ and the _step weight_ percentage
For a deployment named _podinfo_, a canary promotion can be defined using Flagger's custom resource:
```yaml
apiVersion: flagger.app/v1alpha2
apiVersion: flagger.app/v1alpha3
kind: Canary
metadata:
name: podinfo
@@ -102,8 +101,10 @@ spec:
- public-gateway.istio-system.svc.cluster.local
# Istio virtual service host names (optional)
hosts:
- app.iowa.weavedx.com
- podinfo.example.com
canaryAnalysis:
# schedule interval (default 60s)
interval: 1m
# max number of failed metric checks before rollback
threshold: 10
# max traffic percentage routed to canary
@@ -241,15 +242,16 @@ kubectl -n test set image deployment/podinfo \
podinfod=quay.io/stefanprodan/podinfo:1.2.1
```
Flagger detects that the deployment revision changed and starts a new rollout:
Flagger detects that the deployment revision changed and starts a new canary analysis:
```
kubectl -n test describe canary/podinfo
Status:
Canary Revision: 19871136
Failed Checks: 0
State: finished
Canary Weight: 0
Failed Checks: 0
Last Transition Time: 2019-01-16T13:47:16Z
Phase: Succeeded
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
@@ -271,6 +273,15 @@ Events:
Normal Synced 5s flagger Promotion completed! Scaling down podinfo.test
```
You can monitor all canaries with:
```bash
watch kubectl get canaries --all-namespaces
NAMESPACE NAME STATUS WEIGHT LASTTRANSITIONTIME
test podinfo Progressing 5 2019-01-16T14:05:07Z
```
During the canary analysis you can generate HTTP 500 errors and high latency to test if Flagger pauses the rollout.
Create a tester pod and exec into it:
@@ -299,9 +310,10 @@ the canary is scaled to zero and the rollout is marked as failed.
kubectl -n test describe canary/podinfo
Status:
Canary Revision: 16695041
Failed Checks: 10
State: failed
Canary Weight: 0
Failed Checks: 10
Last Transition Time: 2019-01-16T13:47:16Z
Phase: Failed
Events:
Type Reason Age From Message
---- ------ ---- ---- -------

View File

@@ -1,4 +1,4 @@
apiVersion: flagger.app/v1alpha2
apiVersion: flagger.app/v1alpha3
kind: Canary
metadata:
name: podinfo
@@ -27,6 +27,8 @@ spec:
hosts:
- app.iowa.weavedx.com
canaryAnalysis:
# schedule interval (default 60s)
interval: 10s
# max number of failed metric checks before rollback
threshold: 10
# max traffic percentage routed to canary
@@ -50,7 +52,7 @@ spec:
# external checks (optional)
webhooks:
- name: integration-tests
url: http://podinfo.test:9898/echo
url: https://httpbin.org/post
timeout: 1m
metadata:
test: "all"

View File

@@ -4,11 +4,14 @@ metadata:
name: canaries.flagger.app
spec:
group: flagger.app
version: v1alpha2
version: v1alpha3
versions:
- name: v1alpha2
- name: v1alpha3
served: true
storage: true
- name: v1alpha2
served: true
storage: false
- name: v1alpha1
served: true
storage: false
@@ -16,7 +19,21 @@ spec:
plural: canaries
singular: canary
kind: Canary
categories:
- all
scope: Namespaced
subresources:
status: {}
additionalPrinterColumns:
- name: Status
type: string
JSONPath: .status.phase
- name: Weight
type: string
JSONPath: .status.canaryWeight
- name: LastTransitionTime
type: string
JSONPath: .status.lastTransitionTime
validation:
openAPIV3Schema:
properties:
@@ -39,7 +56,9 @@ spec:
name:
type: string
autoscalerRef:
type: object
anyOf:
- type: string
- type: object
required: ['apiVersion', 'kind', 'name']
properties:
apiVersion:
@@ -56,6 +75,9 @@ spec:
type: number
canaryAnalysis:
properties:
interval:
type: string
pattern: "^[0-9]+(m|s)"
threshold:
type: number
maxWeight:
@@ -73,7 +95,7 @@ spec:
type: string
interval:
type: string
pattern: "^[0-9]+(m)"
pattern: "^[0-9]+(m|s)"
threshold:
type: number
webhooks:
@@ -90,4 +112,4 @@ spec:
format: url
timeout:
type: string
pattern: "^[0-9]+(s)"
pattern: "^[0-9]+(m|s)"

View File

@@ -22,7 +22,7 @@ spec:
serviceAccountName: flagger
containers:
- name: flagger
image: quay.io/stefanprodan/flagger:0.2.0
image: quay.io/stefanprodan/flagger:0.4.0
imagePullPolicy: Always
ports:
- name: http

View File

@@ -1,11 +1,11 @@
apiVersion: v1
name: flagger
version: 0.2.0
appVersion: 0.2.0
kubeVersion: ">=1.9.0-0"
version: 0.4.0
appVersion: 0.4.0
kubeVersion: ">=1.11.0-0"
engine: gotpl
description: Flagger is a Kubernetes operator that automates the promotion of canary deployments using Istio routing for traffic shifting and Prometheus metrics for canary analysis.
home: https://flagger.app
home: https://docs.flagger.app
icon: https://raw.githubusercontent.com/stefanprodan/flagger/master/docs/logo/flagger-icon.png
sources:
- https://github.com/stefanprodan/flagger

View File

@@ -1,14 +1,14 @@
# Flagger
[Flagger](https://flagger.app) is a Kubernetes operator that automates the promotion of canary deployments
using Istio routing for traffic shifting and Prometheus metrics for canary analysis.
[Flagger](https://github.com/stefanprodan/flagger) is a Kubernetes operator that automates the promotion of
canary deployments using Istio routing for traffic shifting and Prometheus metrics for canary analysis.
Flagger implements a control loop that gradually shifts traffic to the canary while measuring key performance indicators
like HTTP requests success rate, requests average duration and pods health.
Based on the KPIs analysis a canary is promoted or aborted and the analysis result is published to Slack.
## Prerequisites
* Kubernetes >= 1.9
* Kubernetes >= 1.11
* Istio >= 1.0
* Prometheus >= 2.6
@@ -48,7 +48,6 @@ Parameter | Description | Default
`image.repository` | image repository | `quay.io/stefanprodan/flagger`
`image.tag` | image tag | `<VERSION>`
`image.pullPolicy` | image pull policy | `IfNotPresent`
`controlLoopInterval` | wait interval between checks | `10s`
`metricsServer` | Prometheus URL | `http://prometheus.istio-system:9090`
`slack.url` | Slack incoming webhook | None
`slack.channel` | Slack channel | None
@@ -68,7 +67,8 @@ Specify each parameter using the `--set key=value[,key=value]` argument to `helm
```console
$ helm upgrade -i flagger flagger/flagger \
--namespace istio-system \
--set controlLoopInterval=1m
--set slack.url=https://hooks.slack.com/services/YOUR/SLACK/WEBHOOK \
--set slack.channel=general
```
Alternatively, a YAML file that specifies the values for the above parameters can be provided while installing the chart. For example,
@@ -80,5 +80,5 @@ $ helm upgrade -i flagger flagger/flagger \
```
> **Tip**: You can use the default [values.yaml](values.yaml)
```

View File

@@ -5,11 +5,14 @@ metadata:
name: canaries.flagger.app
spec:
group: flagger.app
version: v1alpha2
version: v1alpha3
versions:
- name: v1alpha2
- name: v1alpha3
served: true
storage: true
- name: v1alpha2
served: true
storage: false
- name: v1alpha1
served: true
storage: false
@@ -17,7 +20,21 @@ spec:
plural: canaries
singular: canary
kind: Canary
categories:
- all
scope: Namespaced
subresources:
status: {}
additionalPrinterColumns:
- name: Status
type: string
JSONPath: .status.phase
- name: Weight
type: string
JSONPath: .status.canaryWeight
- name: LastTransitionTime
type: string
JSONPath: .status.lastTransitionTime
validation:
openAPIV3Schema:
properties:
@@ -40,7 +57,9 @@ spec:
name:
type: string
autoscalerRef:
type: object
anyOf:
- type: string
- type: object
required: ['apiVersion', 'kind', 'name']
properties:
apiVersion:
@@ -57,6 +76,9 @@ spec:
type: number
canaryAnalysis:
properties:
interval:
type: string
pattern: "^[0-9]+(m|s)"
threshold:
type: number
maxWeight:
@@ -74,7 +96,7 @@ spec:
type: string
interval:
type: string
pattern: "^[0-9]+(m)"
pattern: "^[0-9]+(m|s)"
threshold:
type: number
webhooks:
@@ -91,6 +113,5 @@ spec:
format: url
timeout:
type: string
pattern: "^[0-9]+(s)"
pattern: "^[0-9]+(m|s)"
{{- end }}

View File

@@ -35,7 +35,6 @@ spec:
command:
- ./flagger
- -log-level=info
- -control-loop-interval={{ .Values.controlLoopInterval }}
- -metrics-server={{ .Values.metricsServer }}
{{- if .Values.slack.url }}
- -slack-url={{ .Values.slack.url }}

View File

@@ -2,10 +2,9 @@
image:
repository: quay.io/stefanprodan/flagger
tag: 0.2.0
tag: 0.4.0
pullPolicy: IfNotPresent
controlLoopInterval: "10s"
metricsServer: "http://prometheus.istio-system.svc.cluster.local:9090"
slack:

View File

@@ -37,7 +37,7 @@ func init() {
flag.StringVar(&kubeconfig, "kubeconfig", "", "Path to a kubeconfig. Only required if out-of-cluster.")
flag.StringVar(&masterURL, "master", "", "The address of the Kubernetes API server. Overrides any value in kubeconfig. Only required if out-of-cluster.")
flag.StringVar(&metricsServer, "metrics-server", "http://prometheus:9090", "Prometheus URL")
flag.DurationVar(&controlLoopInterval, "control-loop-interval", 10*time.Second, "wait interval between rollouts")
flag.DurationVar(&controlLoopInterval, "control-loop-interval", 10*time.Second, "Kubernetes API sync interval")
flag.StringVar(&logLevel, "log-level", "debug", "Log level can be: debug, info, warning, error.")
flag.StringVar(&port, "port", "8080", "Port to listen on.")
flag.StringVar(&slackURL, "slack-url", "", "Slack hook URL.")
@@ -77,7 +77,7 @@ func main() {
}
flaggerInformerFactory := informers.NewSharedInformerFactory(flaggerClient, time.Second*30)
canaryInformer := flaggerInformerFactory.Flagger().V1alpha2().Canaries()
canaryInformer := flaggerInformerFactory.Flagger().V1alpha3().Canaries()
logger.Infof("Starting flagger version %s revision %s", version.VERSION, version.REVISION)

BIN
docs/flagger-0.3.0.tgz Normal file

Binary file not shown.

BIN
docs/flagger-0.4.0.tgz Normal file

Binary file not shown.

View File

View File

@@ -4,13 +4,20 @@ description: Flagger is an Istio progressive delivery Kubernetes operator
# Introduction
[Flagger](https://github.com/stefanprodan/flagger) is a **Kubernetes** operator that automates the promotion of canary deployments using **Istio** routing for traffic shifting and **Prometheus** metrics for canary analysis.
[Flagger](https://github.com/stefanprodan/flagger) is a **Kubernetes** operator that automates the promotion of canary
deployments using **Istio** routing for traffic shifting and **Prometheus** metrics for canary analysis.
The canary analysis can be extended with webhooks for running integration tests,
load tests or any other custom validation.
Flagger implements a control loop that gradually shifts traffic to the canary while measuring key performance indicators like HTTP requests success rate, requests average duration and pods health. Based on the **KPIs** analysis a canary is promoted or aborted and the analysis result is published to **Slack**.
Flagger implements a control loop that gradually shifts traffic to the canary while measuring key performance
indicators like HTTP requests success rate, requests average duration and pods health.
Based on the **KPIs** analysis a canary is promoted or aborted and the analysis result is published to **Slack**.
![Flagger overview diagram](https://raw.githubusercontent.com/stefanprodan/flagger/master/docs/diagrams/flagger-canary-overview.png)
Flagger can be configured with Kubernetes custom resources \(canaries.flagger.app kind\) and is compatible with any CI/CD solutions made for Kubernetes. Since Flagger is declarative and reacts to Kubernetes events, it can be used in **GitOps** pipelines together with Weave Flux or JenkinsX.
Flagger can be configured with Kubernetes custom resources \(canaries.flagger.app kind\) and is compatible with
any CI/CD solutions made for Kubernetes. Since Flagger is declarative and reacts to Kubernetes events,
it can be used in **GitOps** pipelines together with Weave Flux or JenkinsX.
This project is sponsored by [Weaveworks](https://www.weave.works/)

View File

@@ -5,8 +5,8 @@
## Install
* [Install Flagger](install/installing-flagger.md)
* [Install Grafana](install/installing-grafana.md)
* [Install Flagger](install/install-flagger.md)
* [Install Grafana](install/install-grafana.md)
* [Install Istio](install/install-istio.md)
## Usage

View File

@@ -2,14 +2,14 @@
[Flagger](https://github.com/stefanprodan/flagger) takes a Kubernetes deployment and optionally a horizontal pod autoscaler \(HPA\) and creates a series of objects \(Kubernetes deployments, ClusterIP services and Istio virtual services\) to drive the canary analysis and promotion.
![flagger-canary-hpa](https://raw.githubusercontent.com/stefanprodan/flagger/master/docs/diagrams/flagger-canary-hpa.png)
![Flagger Canary Process](https://raw.githubusercontent.com/stefanprodan/flagger/master/docs/diagrams/flagger-canary-hpa.png)
### Canary Custom Resource
For a deployment named _podinfo_, a canary promotion can be defined using Flagger's custom resource:
```yaml
apiVersion: flagger.app/v1alpha2
apiVersion: flagger.app/v1alpha3
kind: Canary
metadata:
name: podinfo
@@ -36,8 +36,10 @@ spec:
- public-gateway.istio-system.svc.cluster.local
# Istio virtual service host names (optional)
hosts:
- app.iowa.weavedx.com
- podinfo.example.com
canaryAnalysis:
# schedule interval (default 60s)
interval: 1m
# max number of failed metric checks before rollback
threshold: 10
# max traffic percentage routed to canary
@@ -63,14 +65,35 @@ spec:
- name: integration-tests
url: http://podinfo.test:9898/echo
timeout: 1m
# key-value pairs (optional)
metadata:
test: "all"
token: "16688eb5e9f289f1991c"
```
**Note** that the target deployment must have a single label selector in the format `app: <DEPLOYMENT-NAME>`:
```yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: podinfo
spec:
selector:
matchLabels:
app: podinfo
template:
metadata:
labels:
app: podinfo
```
The target deployment should expose a TCP port that will be used by Flagger to create the ClusterIP Service and
the Istio Virtual Service. The container port from the target deployment should match the `service.port` value.
### Canary Deployment
![flagger-canary-steps](https://raw.githubusercontent.com/stefanprodan/flagger/master/docs/diagrams/flagger-canary-steps.png)
![Flagger Canary Stages](https://raw.githubusercontent.com/stefanprodan/flagger/master/docs/diagrams/flagger-canary-steps.png)
Gated canary promotion stages:
@@ -99,17 +122,21 @@ Gated canary promotion stages:
* halt advancement if pods are unhealthy
* route all traffic to primary
* scale to zero the canary deployment
* mark rollout as finished
* mark the canary deployment as finished
* wait for the canary deployment to be updated \(revision bump\) and start over
You can change the canary analysis _max weight_ and the _step weight_ percentage in the Flagger's custom resource.
### Canary Analysis
The canary analysis runs periodically until it reaches the maximum traffic weight or the failed checks threshold.
Spec:
```yaml
canaryAnalysis:
# schedule interval (default 60s)
interval: 1m
# max number of failed metric checks before rollback
threshold: 10
# max traffic percentage routed to canary
@@ -117,19 +144,20 @@ Spec:
maxWeight: 50
# canary increment step
# percentage (0-100)
stepWeight: 5
stepWeight: 2
```
The above analysis, if it succeeds, will run for 25 minutes while validating the HTTP metrics and webhooks every minute.
You can determine the minimum time that it takes to validate and promote a canary deployment using this formula:
```
controlLoopInterval * (maxWeight / stepWeight)
interval * (maxWeight / stepWeight)
```
And the time it takes for a canary to be rollback:
And the time it takes for a canary to be rollback when the metrics or webhook checks are failing:
```
controlLoopInterval * threshold
interval * threshold
```
### HTTP Metrics
@@ -205,10 +233,12 @@ histogram_quantile(0.99,
)
```
> **Note** that the metric interval should be lower or equal to the control loop interval.
### Webhooks
The canary analysis can be extended with webhooks.
Flagger would call a URL (HTTP POST) and determine from the response status code (HTTP 2xx) if the canary is failing or not.
Flagger will call each webhook URL and determine from the response status code (HTTP 2xx) if the canary is failing or not.
Spec:
@@ -217,13 +247,21 @@ Spec:
webhooks:
- name: integration-tests
url: http://podinfo.test:9898/echo
timeout: 1m
timeout: 30s
metadata:
test: "all"
token: "16688eb5e9f289f1991c"
- name: load-tests
url: http://podinfo.test:9898/echo
timeout: 30s
metadata:
key1: "val1"
key2: "val2"
```
Webhook payload:
> **Note** that the sum of all webhooks timeouts should be lower than the control loop interval.
Webhook payload (HTTP POST):
```json
{

View File

@@ -1,10 +1,12 @@
# Install Flagger
Before installing Flagger make sure you have [Istio](https://istio.io) running with Prometheus enabled. If you are new to Istio you can follow this GKE guide [Istio service mesh walk-through](https://docs.flagger.app/install/install-istio).
Before installing Flagger make sure you have [Istio](https://istio.io) running with Prometheus enabled.
If you are new to Istio you can follow this GKE guide
[Istio service mesh walk-through](https://docs.flagger.app/install/install-istio).
**Prerequisites**
* Kubernetes &gt;= 1.9
* Kubernetes &gt;= 1.11
* Istio &gt;= 1.0
* Prometheus &gt;= 2.6
@@ -21,8 +23,7 @@ Deploy Flagger in the _**istio-system**_ namespace:
```bash
helm upgrade -i flagger flagger/flagger \
--namespace=istio-system \
--set metricsServer=http://prometheus.istio-system:9090 \
--set controlLoopInterval=1m
--set metricsServer=http://prometheus.istio-system:9090
```
Enable **Slack** notifications:
@@ -61,11 +62,12 @@ helm delete --purge flagger
The command removes all the Kubernetes components associated with the chart and deletes the release.
{% hint style="info" %}
On uninstall the Flagger CRD will not be removed. Deleting the CRD will make Kubernetes remove all the objects owned by the CRD like Istio virtual services, Kubernetes deployments and ClusterIP services.
{% endhint %}
> **Note** that on uninstall the Canary CRD will not be removed.
Deleting the CRD will make Kubernetes remove all the objects owned by Flagger like Istio virtual services,
Kubernetes deployments and ClusterIP services.
If you want to remove all the objects created by Flagger you have delete the canary CRD with kubectl:
If you want to remove all the objects created by Flagger you have delete the Canary CRD with kubectl:
```text
kubectl delete crd canaries.flagger.app

View File

@@ -1,12 +1,14 @@
# Install Istio
This guide walks you through setting up Istio with Jaeger, Prometheus, Grafana and Lets Encrypt TLS for ingress gateway on Google Kubernetes Engine.
This guide walks you through setting up Istio with Jaeger, Prometheus, Grafana and
Lets Encrypt TLS for ingress gateway on Google Kubernetes Engine.
![Istio GKE diagram](https://raw.githubusercontent.com/stefanprodan/istio-gke/master/docs/screens/istio-gcp-overview.png)
### Prerequisites
You will be creating a cluster on Googles Kubernetes Engine \(GKE\), if you dont have an account you can sign up [here](https://cloud.google.com/free/) for free credits.
You will be creating a cluster on Googles Kubernetes Engine \(GKE\),
if you dont have an account you can sign up [here](https://cloud.google.com/free/) for free credits.
Login into GCP, create a project and enable billing for it.
@@ -64,7 +66,9 @@ gcloud container clusters create istio \
--scopes=gke-default,compute-rw,storage-rw
```
The above command will create a default node pool consisting of `n1-highcpu-4` \(vCPU: 4, RAM 3.60GB, DISK: 30GB\) preemptible VMs. Preemptible VMs are up to 80% cheaper than regular instances and are terminated and replaced after a maximum of 24 hours.
The above command will create a default node pool consisting of `n1-highcpu-4` \(vCPU: 4, RAM 3.60GB, DISK: 30GB\)
preemptible VMs. Preemptible VMs are up to 80% cheaper than regular instances and are terminated and replaced
after a maximum of 24 hours.
Set up credentials for `kubectl`:
@@ -330,10 +334,11 @@ kubectl create secret generic cert-manager-credentials \
--namespace=istio-system
```
Create a letsencrypt issuer for CloudDNS \(replace `email@example.com` with a valid email address and `my-gcp-project`with your project ID\):
Create a letsencrypt issuer for CloudDNS \(replace `email@example.com` with a valid email address and
`my-gcp-project`with your project ID\):
```yaml
apiVersion: v1alpha2
apiVersion: certmanager.k8s.io/v1alpha1
kind: Issuer
metadata:
name: letsencrypt-prod
@@ -363,7 +368,7 @@ kubectl apply -f ./letsencrypt-issuer.yaml
Create a wildcard certificate \(replace `example.com` with your domain\):
```yaml
apiVersion: v1alpha2
apiVersion: certmanager.k8s.io/v1alpha1
kind: Certificate
metadata:
name: istio-gateway
@@ -403,7 +408,10 @@ Recreate Istio ingress gateway pods:
kubectl -n istio-system delete pods -l istio=ingressgateway
```
Note that Istio gateway doesn't reload the certificates from the TLS secret on cert-manager renewal. Since the GKE cluster is made out of preemptible VMs the gateway pods will be replaced once every 24h, if your not using preemptible nodes then you need to manually kill the gateway pods every two months before the certificate expires.
Note that Istio gateway doesn't reload the certificates from the TLS secret on cert-manager renewal.
Since the GKE cluster is made out of preemptible VMs the gateway pods will be replaced once every 24h,
if your not using preemptible nodes then you need to manually kill the gateway pods every two months
before the certificate expires.
### Expose services outside the service mesh

View File

@@ -12,11 +12,13 @@ helm upgrade -i flagger flagger/flagger \
--set slack.user=flagger
```
Once configured with a Slack incoming **webhook**, Flagger will post messages when a canary deployment has been initialised, when a new revision has been detected and if the canary analysis failed or succeeded.
Once configured with a Slack incoming **webhook**, Flagger will post messages when a canary deployment
has been initialised, when a new revision has been detected and if the canary analysis failed or succeeded.
![flagger-slack](https://raw.githubusercontent.com/stefanprodan/flagger/master/docs/screens/slack-canary-notifications.png)
A canary deployment will be rolled back if the progress deadline exceeded or if the analysis reached the maximum number of failed checks:
A canary deployment will be rolled back if the progress deadline exceeded or if the analysis reached the
maximum number of failed checks:
![flagger-slack-errors](https://raw.githubusercontent.com/stefanprodan/flagger/master/docs/screens/slack-canary-failed.png)

View File

@@ -44,7 +44,8 @@ Promotion completed! podinfo.test
### Metrics
Flagger exposes Prometheus metrics that can be used to determine the canary analysis status and the destination weight values:
Flagger exposes Prometheus metrics that can be used to determine the canary analysis status and
the destination weight values:
```bash
# Canaries total gauge
@@ -65,5 +66,4 @@ flagger_canary_duration_seconds_sum{name="podinfo",namespace="test"} 17.3561329
flagger_canary_duration_seconds_count{name="podinfo",namespace="test"} 6
```
####

View File

@@ -20,7 +20,7 @@ kubectl apply -f ${REPO}/artifacts/canaries/hpa.yaml
Create a canary custom resource \(replace example.com with your own domain\):
```yaml
apiVersion: v1alpha2
apiVersion: flagger.app/v1alpha3
kind: Canary
metadata:
name: podinfo
@@ -49,6 +49,8 @@ spec:
hosts:
- app.example.com
canaryAnalysis:
# schedule interval (default 60s)
interval: 1m
# max number of failed metric checks before rollback
threshold: 5
# max traffic percentage routed to canary
@@ -106,9 +108,9 @@ Flagger detects that the deployment revision changed and starts a new rollout:
kubectl -n test describe canary/podinfo
Status:
Canary Revision: 19871136
Failed Checks: 0
State: finished
Canary Weight: 0
Failed Checks: 0
Phase: Succeeded
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
@@ -130,6 +132,17 @@ Events:
Normal Synced 5s flagger Promotion completed! Scaling down podinfo.test
```
You can monitor all canaries with:
```bash
watch kubectl get canaries --all-namespaces
NAMESPACE NAME STATUS WEIGHT LASTTRANSITIONTIME
test podinfo Progressing 15 2019-01-16T14:05:07Z
prod frontend Succeeded 0 2019-01-15T16:15:07Z
prod backend Failed 0 2019-01-14T17:05:07Z
```
During the canary analysis you can generate HTTP 500 errors and high latency to test if Flagger pauses the rollout.
Create a tester pod and exec into it:
@@ -160,9 +173,9 @@ When the number of failed checks reaches the canary analysis threshold, the traf
kubectl -n test describe canary/podinfo
Status:
Canary Revision: 16695041
Failed Checks: 10
State: failed
Canary Weight: 0
Failed Checks: 10
Phase: Failed
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
@@ -179,5 +192,3 @@ Events:
Warning Synced 1m flagger Canary failed! Scaling down podinfo.test
```
####

Binary file not shown.

View File

@@ -1,9 +1,59 @@
apiVersion: v1
entries:
flagger:
- apiVersion: v1
appVersion: 0.4.0
created: 2019-01-18T12:49:18.099861+02:00
description: Flagger is a Kubernetes operator that automates the promotion of
canary deployments using Istio routing for traffic shifting and Prometheus metrics
for canary analysis.
digest: fe06de1c68c6cc414440ef681cde67ae02c771de9b1e4d2d264c38a7a9c37b3d
engine: gotpl
home: https://docs.flagger.app
icon: https://raw.githubusercontent.com/stefanprodan/flagger/master/docs/logo/flagger-icon.png
keywords:
- canary
- istio
- gitops
kubeVersion: '>=1.11.0-0'
maintainers:
- email: stefanprodan@users.noreply.github.com
name: stefanprodan
url: https://github.com/stefanprodan
name: flagger
sources:
- https://github.com/stefanprodan/flagger
urls:
- https://stefanprodan.github.io/flagger/flagger-0.4.0.tgz
version: 0.4.0
- apiVersion: v1
appVersion: 0.3.0
created: 2019-01-18T12:49:18.099501+02:00
description: Flagger is a Kubernetes operator that automates the promotion of
canary deployments using Istio routing for traffic shifting and Prometheus metrics
for canary analysis.
digest: 8baa478cc802f4e6b7593934483359b8f70ec34413ca3b8de3a692e347a9bda4
engine: gotpl
home: https://docs.flagger.app
icon: https://raw.githubusercontent.com/stefanprodan/flagger/master/docs/logo/flagger-icon.png
keywords:
- canary
- istio
- gitops
kubeVersion: '>=1.9.0-0'
maintainers:
- email: stefanprodan@users.noreply.github.com
name: stefanprodan
url: https://github.com/stefanprodan
name: flagger
sources:
- https://github.com/stefanprodan/flagger
urls:
- https://stefanprodan.github.io/flagger/flagger-0.3.0.tgz
version: 0.3.0
- apiVersion: v1
appVersion: 0.2.0
created: 2019-01-04T13:38:42.239798+02:00
created: 2019-01-18T12:49:18.099162+02:00
description: Flagger is a Kubernetes operator that automates the promotion of
canary deployments using Istio routing for traffic shifting and Prometheus metrics
for canary analysis.
@@ -28,7 +78,7 @@ entries:
version: 0.2.0
- apiVersion: v1
appVersion: 0.1.2
created: 2019-01-04T13:38:42.239389+02:00
created: 2019-01-18T12:49:18.098811+02:00
description: Flagger is a Kubernetes operator that automates the promotion of
canary deployments using Istio routing for traffic shifting and Prometheus metrics
for canary analysis.
@@ -53,7 +103,7 @@ entries:
version: 0.1.2
- apiVersion: v1
appVersion: 0.1.1
created: 2019-01-04T13:38:42.238504+02:00
created: 2019-01-18T12:49:18.098439+02:00
description: Flagger is a Kubernetes operator that automates the promotion of
canary deployments using Istio routing for traffic shifting and Prometheus metrics
for canary analysis.
@@ -65,7 +115,7 @@ entries:
version: 0.1.1
- apiVersion: v1
appVersion: 0.1.0
created: 2019-01-04T13:38:42.237702+02:00
created: 2019-01-18T12:49:18.098153+02:00
description: Flagger is a Kubernetes operator that automates the promotion of
canary deployments using Istio routing for traffic shifting and Prometheus metrics
for canary analysis.
@@ -78,9 +128,9 @@ entries:
grafana:
- apiVersion: v1
appVersion: 5.4.2
created: 2019-01-04T13:38:42.24034+02:00
created: 2019-01-18T12:49:18.100331+02:00
description: Grafana dashboards for monitoring Flagger canary deployments
digest: f94c0c2eaf7a7db7ef070575d280c37f93922c0e11ebdf203482c9f43603a1c9
digest: 97257d1742aca506f8703922d67863c459c1b43177870bc6050d453d19a683c0
home: https://flagger.app
icon: https://raw.githubusercontent.com/stefanprodan/flagger/master/docs/logo/flagger-icon.png
maintainers:
@@ -93,4 +143,4 @@ entries:
urls:
- https://stefanprodan.github.io/flagger/grafana-0.1.0.tgz
version: 0.1.0
generated: 2019-01-04T13:38:42.236727+02:00
generated: 2019-01-18T12:49:18.097682+02:00

View File

@@ -23,6 +23,6 @@ CODEGEN_PKG=${CODEGEN_PKG:-$(cd ${SCRIPT_ROOT}; ls -d -1 ./vendor/k8s.io/code-ge
${CODEGEN_PKG}/generate-groups.sh "deepcopy,client,informer,lister" \
github.com/stefanprodan/flagger/pkg/client github.com/stefanprodan/flagger/pkg/apis \
flagger:v1alpha2 \
flagger:v1alpha3 \
--go-header-file ${SCRIPT_ROOT}/hack/boilerplate.go.txt

View File

@@ -16,6 +16,6 @@ limitations under the License.
// +k8s:deepcopy-gen=package
// Package v1alpha2 is the v1alpha2 version of the API.
// Package v1alpha3 is the v1alpha3 version of the API.
// +groupName=flagger.app
package v1alpha2
package v1alpha3

View File

@@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha2
package v1alpha3
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@@ -25,7 +25,7 @@ import (
)
// SchemeGroupVersion is group version used to register these objects
var SchemeGroupVersion = schema.GroupVersion{Group: rollout.GroupName, Version: "v1alpha2"}
var SchemeGroupVersion = schema.GroupVersion{Group: rollout.GroupName, Version: "v1alpha3"}
// Kind takes an unqualified kind and returns back a Group qualified GroupKind
func Kind(kind string) schema.GroupKind {

View File

@@ -14,16 +14,18 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha2
package v1alpha3
import (
hpav1 "k8s.io/api/autoscaling/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"time"
)
const (
CanaryKind = "Canary"
ProgressDeadlineSeconds = 600
AnalysisInterval = 60 * time.Second
)
// +genclient
@@ -44,7 +46,8 @@ type CanarySpec struct {
TargetRef hpav1.CrossVersionObjectReference `json:"targetRef"`
// reference to autoscaling resource
AutoscalerRef hpav1.CrossVersionObjectReference `json:"autoscalerRef"`
// +optional
AutoscalerRef *hpav1.CrossVersionObjectReference `json:"autoscalerRef,omitempty"`
// virtual service spec
Service CanaryService `json:"service"`
@@ -53,7 +56,7 @@ type CanarySpec struct {
CanaryAnalysis CanaryAnalysis `json:"canaryAnalysis"`
// the maximum time in seconds for a canary deployment to make progress
// before it is considered to be failed. Defaults to 60s.
// before it is considered to be failed. Defaults to ten minutes.
ProgressDeadlineSeconds *int32 `json:"progressDeadlineSeconds,omitempty"`
}
@@ -67,21 +70,30 @@ type CanaryList struct {
Items []Canary `json:"items"`
}
// CanaryState used for status state op
type CanaryState string
// CanaryPhase is a label for the condition of a canary at the current time
type CanaryPhase string
const (
CanaryRunning CanaryState = "running"
CanaryFinished CanaryState = "finished"
CanaryFailed CanaryState = "failed"
CanaryInitialized CanaryState = "initialized"
// CanaryInitialized means the primary deployment, hpa and ClusterIP services
// have been created along with the Istio virtual service
CanaryInitialized CanaryPhase = "Initialized"
// CanaryProgressing means the canary analysis is underway
CanaryProgressing CanaryPhase = "Progressing"
// CanarySucceeded means the canary analysis has been successful
// and the canary deployment has been promoted
CanarySucceeded CanaryPhase = "Succeeded"
// CanaryFailed means the canary analysis failed
// and the canary deployment has been scaled to zero
CanaryFailed CanaryPhase = "Failed"
)
// CanaryStatus is used for state persistence (read-only)
type CanaryStatus struct {
State CanaryState `json:"state"`
CanaryRevision string `json:"canaryRevision"`
FailedChecks int `json:"failedChecks"`
Phase CanaryPhase `json:"phase"`
FailedChecks int `json:"failedChecks"`
CanaryWeight int `json:"canaryWeight"`
// +optional
LastAppliedSpec string `json:"lastAppliedSpec,omitempty"`
// +optional
LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty"`
}
@@ -96,6 +108,7 @@ type CanaryService struct {
// CanaryAnalysis is used to describe how the analysis should be done
type CanaryAnalysis struct {
Interval string `json:"interval"`
Threshold int `json:"threshold"`
MaxWeight int `json:"maxWeight"`
StepWeight int `json:"stepWeight"`
@@ -134,3 +147,17 @@ func (c *Canary) GetProgressDeadlineSeconds() int {
return ProgressDeadlineSeconds
}
// GetAnalysisInterval returns the canary analysis interval (default 60s)
func (c *Canary) GetAnalysisInterval() time.Duration {
if c.Spec.CanaryAnalysis.Interval == "" {
return AnalysisInterval
}
interval, err := time.ParseDuration(c.Spec.CanaryAnalysis.Interval)
if err != nil {
return AnalysisInterval
}
return interval
}

View File

@@ -18,9 +18,10 @@ limitations under the License.
// Code generated by deepcopy-gen. DO NOT EDIT.
package v1alpha2
package v1alpha3
import (
v1 "k8s.io/api/autoscaling/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
)
@@ -159,7 +160,11 @@ func (in *CanaryService) DeepCopy() *CanaryService {
func (in *CanarySpec) DeepCopyInto(out *CanarySpec) {
*out = *in
out.TargetRef = in.TargetRef
out.AutoscalerRef = in.AutoscalerRef
if in.AutoscalerRef != nil {
in, out := &in.AutoscalerRef, &out.AutoscalerRef
*out = new(v1.CrossVersionObjectReference)
**out = **in
}
in.Service.DeepCopyInto(&out.Service)
in.CanaryAnalysis.DeepCopyInto(&out.CanaryAnalysis)
if in.ProgressDeadlineSeconds != nil {

View File

@@ -19,7 +19,7 @@ limitations under the License.
package versioned
import (
flaggerv1alpha2 "github.com/stefanprodan/flagger/pkg/client/clientset/versioned/typed/flagger/v1alpha2"
flaggerv1alpha3 "github.com/stefanprodan/flagger/pkg/client/clientset/versioned/typed/flagger/v1alpha3"
discovery "k8s.io/client-go/discovery"
rest "k8s.io/client-go/rest"
flowcontrol "k8s.io/client-go/util/flowcontrol"
@@ -27,27 +27,27 @@ import (
type Interface interface {
Discovery() discovery.DiscoveryInterface
FlaggerV1alpha2() flaggerv1alpha2.FlaggerV1alpha2Interface
FlaggerV1alpha3() flaggerv1alpha3.FlaggerV1alpha3Interface
// Deprecated: please explicitly pick a version if possible.
Flagger() flaggerv1alpha2.FlaggerV1alpha2Interface
Flagger() flaggerv1alpha3.FlaggerV1alpha3Interface
}
// Clientset contains the clients for groups. Each group has exactly one
// version included in a Clientset.
type Clientset struct {
*discovery.DiscoveryClient
flaggerV1alpha2 *flaggerv1alpha2.FlaggerV1alpha2Client
flaggerV1alpha3 *flaggerv1alpha3.FlaggerV1alpha3Client
}
// FlaggerV1alpha2 retrieves the FlaggerV1alpha2Client
func (c *Clientset) FlaggerV1alpha2() flaggerv1alpha2.FlaggerV1alpha2Interface {
return c.flaggerV1alpha2
// FlaggerV1alpha3 retrieves the FlaggerV1alpha3Client
func (c *Clientset) FlaggerV1alpha3() flaggerv1alpha3.FlaggerV1alpha3Interface {
return c.flaggerV1alpha3
}
// Deprecated: Flagger retrieves the default version of FlaggerClient.
// Please explicitly pick a version.
func (c *Clientset) Flagger() flaggerv1alpha2.FlaggerV1alpha2Interface {
return c.flaggerV1alpha2
func (c *Clientset) Flagger() flaggerv1alpha3.FlaggerV1alpha3Interface {
return c.flaggerV1alpha3
}
// Discovery retrieves the DiscoveryClient
@@ -66,7 +66,7 @@ func NewForConfig(c *rest.Config) (*Clientset, error) {
}
var cs Clientset
var err error
cs.flaggerV1alpha2, err = flaggerv1alpha2.NewForConfig(&configShallowCopy)
cs.flaggerV1alpha3, err = flaggerv1alpha3.NewForConfig(&configShallowCopy)
if err != nil {
return nil, err
}
@@ -82,7 +82,7 @@ func NewForConfig(c *rest.Config) (*Clientset, error) {
// panics if there is an error in the config.
func NewForConfigOrDie(c *rest.Config) *Clientset {
var cs Clientset
cs.flaggerV1alpha2 = flaggerv1alpha2.NewForConfigOrDie(c)
cs.flaggerV1alpha3 = flaggerv1alpha3.NewForConfigOrDie(c)
cs.DiscoveryClient = discovery.NewDiscoveryClientForConfigOrDie(c)
return &cs
@@ -91,7 +91,7 @@ func NewForConfigOrDie(c *rest.Config) *Clientset {
// New creates a new Clientset for the given RESTClient.
func New(c rest.Interface) *Clientset {
var cs Clientset
cs.flaggerV1alpha2 = flaggerv1alpha2.New(c)
cs.flaggerV1alpha3 = flaggerv1alpha3.New(c)
cs.DiscoveryClient = discovery.NewDiscoveryClient(c)
return &cs

View File

@@ -20,8 +20,8 @@ package fake
import (
clientset "github.com/stefanprodan/flagger/pkg/client/clientset/versioned"
flaggerv1alpha2 "github.com/stefanprodan/flagger/pkg/client/clientset/versioned/typed/flagger/v1alpha2"
fakeflaggerv1alpha2 "github.com/stefanprodan/flagger/pkg/client/clientset/versioned/typed/flagger/v1alpha2/fake"
flaggerv1alpha3 "github.com/stefanprodan/flagger/pkg/client/clientset/versioned/typed/flagger/v1alpha3"
fakeflaggerv1alpha3 "github.com/stefanprodan/flagger/pkg/client/clientset/versioned/typed/flagger/v1alpha3/fake"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/client-go/discovery"
@@ -71,12 +71,12 @@ func (c *Clientset) Discovery() discovery.DiscoveryInterface {
var _ clientset.Interface = &Clientset{}
// FlaggerV1alpha2 retrieves the FlaggerV1alpha2Client
func (c *Clientset) FlaggerV1alpha2() flaggerv1alpha2.FlaggerV1alpha2Interface {
return &fakeflaggerv1alpha2.FakeFlaggerV1alpha2{Fake: &c.Fake}
// FlaggerV1alpha3 retrieves the FlaggerV1alpha3Client
func (c *Clientset) FlaggerV1alpha3() flaggerv1alpha3.FlaggerV1alpha3Interface {
return &fakeflaggerv1alpha3.FakeFlaggerV1alpha3{Fake: &c.Fake}
}
// Flagger retrieves the FlaggerV1alpha2Client
func (c *Clientset) Flagger() flaggerv1alpha2.FlaggerV1alpha2Interface {
return &fakeflaggerv1alpha2.FakeFlaggerV1alpha2{Fake: &c.Fake}
// Flagger retrieves the FlaggerV1alpha3Client
func (c *Clientset) Flagger() flaggerv1alpha3.FlaggerV1alpha3Interface {
return &fakeflaggerv1alpha3.FakeFlaggerV1alpha3{Fake: &c.Fake}
}

View File

@@ -19,7 +19,7 @@ limitations under the License.
package fake
import (
flaggerv1alpha2 "github.com/stefanprodan/flagger/pkg/apis/flagger/v1alpha2"
flaggerv1alpha3 "github.com/stefanprodan/flagger/pkg/apis/flagger/v1alpha3"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
schema "k8s.io/apimachinery/pkg/runtime/schema"
@@ -50,5 +50,5 @@ func init() {
// After this, RawExtensions in Kubernetes types will serialize kube-aggregator types
// correctly.
func AddToScheme(scheme *runtime.Scheme) {
flaggerv1alpha2.AddToScheme(scheme)
flaggerv1alpha3.AddToScheme(scheme)
}

View File

@@ -19,7 +19,7 @@ limitations under the License.
package scheme
import (
flaggerv1alpha2 "github.com/stefanprodan/flagger/pkg/apis/flagger/v1alpha2"
flaggerv1alpha3 "github.com/stefanprodan/flagger/pkg/apis/flagger/v1alpha3"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
schema "k8s.io/apimachinery/pkg/runtime/schema"
@@ -50,5 +50,5 @@ func init() {
// After this, RawExtensions in Kubernetes types will serialize kube-aggregator types
// correctly.
func AddToScheme(scheme *runtime.Scheme) {
flaggerv1alpha2.AddToScheme(scheme)
flaggerv1alpha3.AddToScheme(scheme)
}

View File

@@ -16,10 +16,10 @@ limitations under the License.
// Code generated by client-gen. DO NOT EDIT.
package v1alpha2
package v1alpha3
import (
v1alpha2 "github.com/stefanprodan/flagger/pkg/apis/flagger/v1alpha2"
v1alpha3 "github.com/stefanprodan/flagger/pkg/apis/flagger/v1alpha3"
scheme "github.com/stefanprodan/flagger/pkg/client/clientset/versioned/scheme"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
@@ -35,15 +35,15 @@ type CanariesGetter interface {
// CanaryInterface has methods to work with Canary resources.
type CanaryInterface interface {
Create(*v1alpha2.Canary) (*v1alpha2.Canary, error)
Update(*v1alpha2.Canary) (*v1alpha2.Canary, error)
UpdateStatus(*v1alpha2.Canary) (*v1alpha2.Canary, error)
Create(*v1alpha3.Canary) (*v1alpha3.Canary, error)
Update(*v1alpha3.Canary) (*v1alpha3.Canary, error)
UpdateStatus(*v1alpha3.Canary) (*v1alpha3.Canary, error)
Delete(name string, options *v1.DeleteOptions) error
DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error
Get(name string, options v1.GetOptions) (*v1alpha2.Canary, error)
List(opts v1.ListOptions) (*v1alpha2.CanaryList, error)
Get(name string, options v1.GetOptions) (*v1alpha3.Canary, error)
List(opts v1.ListOptions) (*v1alpha3.CanaryList, error)
Watch(opts v1.ListOptions) (watch.Interface, error)
Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha2.Canary, err error)
Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha3.Canary, err error)
CanaryExpansion
}
@@ -54,7 +54,7 @@ type canaries struct {
}
// newCanaries returns a Canaries
func newCanaries(c *FlaggerV1alpha2Client, namespace string) *canaries {
func newCanaries(c *FlaggerV1alpha3Client, namespace string) *canaries {
return &canaries{
client: c.RESTClient(),
ns: namespace,
@@ -62,8 +62,8 @@ func newCanaries(c *FlaggerV1alpha2Client, namespace string) *canaries {
}
// Get takes name of the canary, and returns the corresponding canary object, and an error if there is any.
func (c *canaries) Get(name string, options v1.GetOptions) (result *v1alpha2.Canary, err error) {
result = &v1alpha2.Canary{}
func (c *canaries) Get(name string, options v1.GetOptions) (result *v1alpha3.Canary, err error) {
result = &v1alpha3.Canary{}
err = c.client.Get().
Namespace(c.ns).
Resource("canaries").
@@ -75,8 +75,8 @@ func (c *canaries) Get(name string, options v1.GetOptions) (result *v1alpha2.Can
}
// List takes label and field selectors, and returns the list of Canaries that match those selectors.
func (c *canaries) List(opts v1.ListOptions) (result *v1alpha2.CanaryList, err error) {
result = &v1alpha2.CanaryList{}
func (c *canaries) List(opts v1.ListOptions) (result *v1alpha3.CanaryList, err error) {
result = &v1alpha3.CanaryList{}
err = c.client.Get().
Namespace(c.ns).
Resource("canaries").
@@ -97,8 +97,8 @@ func (c *canaries) Watch(opts v1.ListOptions) (watch.Interface, error) {
}
// Create takes the representation of a canary and creates it. Returns the server's representation of the canary, and an error, if there is any.
func (c *canaries) Create(canary *v1alpha2.Canary) (result *v1alpha2.Canary, err error) {
result = &v1alpha2.Canary{}
func (c *canaries) Create(canary *v1alpha3.Canary) (result *v1alpha3.Canary, err error) {
result = &v1alpha3.Canary{}
err = c.client.Post().
Namespace(c.ns).
Resource("canaries").
@@ -109,8 +109,8 @@ func (c *canaries) Create(canary *v1alpha2.Canary) (result *v1alpha2.Canary, err
}
// Update takes the representation of a canary and updates it. Returns the server's representation of the canary, and an error, if there is any.
func (c *canaries) Update(canary *v1alpha2.Canary) (result *v1alpha2.Canary, err error) {
result = &v1alpha2.Canary{}
func (c *canaries) Update(canary *v1alpha3.Canary) (result *v1alpha3.Canary, err error) {
result = &v1alpha3.Canary{}
err = c.client.Put().
Namespace(c.ns).
Resource("canaries").
@@ -124,8 +124,8 @@ func (c *canaries) Update(canary *v1alpha2.Canary) (result *v1alpha2.Canary, err
// UpdateStatus was generated because the type contains a Status member.
// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
func (c *canaries) UpdateStatus(canary *v1alpha2.Canary) (result *v1alpha2.Canary, err error) {
result = &v1alpha2.Canary{}
func (c *canaries) UpdateStatus(canary *v1alpha3.Canary) (result *v1alpha3.Canary, err error) {
result = &v1alpha3.Canary{}
err = c.client.Put().
Namespace(c.ns).
Resource("canaries").
@@ -160,8 +160,8 @@ func (c *canaries) DeleteCollection(options *v1.DeleteOptions, listOptions v1.Li
}
// Patch applies the patch and returns the patched canary.
func (c *canaries) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha2.Canary, err error) {
result = &v1alpha2.Canary{}
func (c *canaries) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha3.Canary, err error) {
result = &v1alpha3.Canary{}
err = c.client.Patch(pt).
Namespace(c.ns).
Resource("canaries").

View File

@@ -17,4 +17,4 @@ limitations under the License.
// Code generated by client-gen. DO NOT EDIT.
// This package has the automatically generated typed clients.
package v1alpha2
package v1alpha3

View File

@@ -19,7 +19,7 @@ limitations under the License.
package fake
import (
v1alpha2 "github.com/stefanprodan/flagger/pkg/apis/flagger/v1alpha2"
v1alpha3 "github.com/stefanprodan/flagger/pkg/apis/flagger/v1alpha3"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
labels "k8s.io/apimachinery/pkg/labels"
schema "k8s.io/apimachinery/pkg/runtime/schema"
@@ -30,29 +30,29 @@ import (
// FakeCanaries implements CanaryInterface
type FakeCanaries struct {
Fake *FakeFlaggerV1alpha2
Fake *FakeFlaggerV1alpha3
ns string
}
var canariesResource = schema.GroupVersionResource{Group: "flagger.app", Version: "v1alpha2", Resource: "canaries"}
var canariesResource = schema.GroupVersionResource{Group: "flagger.app", Version: "v1alpha3", Resource: "canaries"}
var canariesKind = schema.GroupVersionKind{Group: "flagger.app", Version: "v1alpha2", Kind: "Canary"}
var canariesKind = schema.GroupVersionKind{Group: "flagger.app", Version: "v1alpha3", Kind: "Canary"}
// Get takes name of the canary, and returns the corresponding canary object, and an error if there is any.
func (c *FakeCanaries) Get(name string, options v1.GetOptions) (result *v1alpha2.Canary, err error) {
func (c *FakeCanaries) Get(name string, options v1.GetOptions) (result *v1alpha3.Canary, err error) {
obj, err := c.Fake.
Invokes(testing.NewGetAction(canariesResource, c.ns, name), &v1alpha2.Canary{})
Invokes(testing.NewGetAction(canariesResource, c.ns, name), &v1alpha3.Canary{})
if obj == nil {
return nil, err
}
return obj.(*v1alpha2.Canary), err
return obj.(*v1alpha3.Canary), err
}
// List takes label and field selectors, and returns the list of Canaries that match those selectors.
func (c *FakeCanaries) List(opts v1.ListOptions) (result *v1alpha2.CanaryList, err error) {
func (c *FakeCanaries) List(opts v1.ListOptions) (result *v1alpha3.CanaryList, err error) {
obj, err := c.Fake.
Invokes(testing.NewListAction(canariesResource, canariesKind, c.ns, opts), &v1alpha2.CanaryList{})
Invokes(testing.NewListAction(canariesResource, canariesKind, c.ns, opts), &v1alpha3.CanaryList{})
if obj == nil {
return nil, err
@@ -62,8 +62,8 @@ func (c *FakeCanaries) List(opts v1.ListOptions) (result *v1alpha2.CanaryList, e
if label == nil {
label = labels.Everything()
}
list := &v1alpha2.CanaryList{ListMeta: obj.(*v1alpha2.CanaryList).ListMeta}
for _, item := range obj.(*v1alpha2.CanaryList).Items {
list := &v1alpha3.CanaryList{ListMeta: obj.(*v1alpha3.CanaryList).ListMeta}
for _, item := range obj.(*v1alpha3.CanaryList).Items {
if label.Matches(labels.Set(item.Labels)) {
list.Items = append(list.Items, item)
}
@@ -79,43 +79,43 @@ func (c *FakeCanaries) Watch(opts v1.ListOptions) (watch.Interface, error) {
}
// Create takes the representation of a canary and creates it. Returns the server's representation of the canary, and an error, if there is any.
func (c *FakeCanaries) Create(canary *v1alpha2.Canary) (result *v1alpha2.Canary, err error) {
func (c *FakeCanaries) Create(canary *v1alpha3.Canary) (result *v1alpha3.Canary, err error) {
obj, err := c.Fake.
Invokes(testing.NewCreateAction(canariesResource, c.ns, canary), &v1alpha2.Canary{})
Invokes(testing.NewCreateAction(canariesResource, c.ns, canary), &v1alpha3.Canary{})
if obj == nil {
return nil, err
}
return obj.(*v1alpha2.Canary), err
return obj.(*v1alpha3.Canary), err
}
// Update takes the representation of a canary and updates it. Returns the server's representation of the canary, and an error, if there is any.
func (c *FakeCanaries) Update(canary *v1alpha2.Canary) (result *v1alpha2.Canary, err error) {
func (c *FakeCanaries) Update(canary *v1alpha3.Canary) (result *v1alpha3.Canary, err error) {
obj, err := c.Fake.
Invokes(testing.NewUpdateAction(canariesResource, c.ns, canary), &v1alpha2.Canary{})
Invokes(testing.NewUpdateAction(canariesResource, c.ns, canary), &v1alpha3.Canary{})
if obj == nil {
return nil, err
}
return obj.(*v1alpha2.Canary), err
return obj.(*v1alpha3.Canary), err
}
// UpdateStatus was generated because the type contains a Status member.
// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
func (c *FakeCanaries) UpdateStatus(canary *v1alpha2.Canary) (*v1alpha2.Canary, error) {
func (c *FakeCanaries) UpdateStatus(canary *v1alpha3.Canary) (*v1alpha3.Canary, error) {
obj, err := c.Fake.
Invokes(testing.NewUpdateSubresourceAction(canariesResource, "status", c.ns, canary), &v1alpha2.Canary{})
Invokes(testing.NewUpdateSubresourceAction(canariesResource, "status", c.ns, canary), &v1alpha3.Canary{})
if obj == nil {
return nil, err
}
return obj.(*v1alpha2.Canary), err
return obj.(*v1alpha3.Canary), err
}
// Delete takes name of the canary and deletes it. Returns an error if one occurs.
func (c *FakeCanaries) Delete(name string, options *v1.DeleteOptions) error {
_, err := c.Fake.
Invokes(testing.NewDeleteAction(canariesResource, c.ns, name), &v1alpha2.Canary{})
Invokes(testing.NewDeleteAction(canariesResource, c.ns, name), &v1alpha3.Canary{})
return err
}
@@ -124,17 +124,17 @@ func (c *FakeCanaries) Delete(name string, options *v1.DeleteOptions) error {
func (c *FakeCanaries) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
action := testing.NewDeleteCollectionAction(canariesResource, c.ns, listOptions)
_, err := c.Fake.Invokes(action, &v1alpha2.CanaryList{})
_, err := c.Fake.Invokes(action, &v1alpha3.CanaryList{})
return err
}
// Patch applies the patch and returns the patched canary.
func (c *FakeCanaries) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha2.Canary, err error) {
func (c *FakeCanaries) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha3.Canary, err error) {
obj, err := c.Fake.
Invokes(testing.NewPatchSubresourceAction(canariesResource, c.ns, name, data, subresources...), &v1alpha2.Canary{})
Invokes(testing.NewPatchSubresourceAction(canariesResource, c.ns, name, data, subresources...), &v1alpha3.Canary{})
if obj == nil {
return nil, err
}
return obj.(*v1alpha2.Canary), err
return obj.(*v1alpha3.Canary), err
}

View File

@@ -19,22 +19,22 @@ limitations under the License.
package fake
import (
v1alpha2 "github.com/stefanprodan/flagger/pkg/client/clientset/versioned/typed/flagger/v1alpha2"
v1alpha3 "github.com/stefanprodan/flagger/pkg/client/clientset/versioned/typed/flagger/v1alpha3"
rest "k8s.io/client-go/rest"
testing "k8s.io/client-go/testing"
)
type FakeFlaggerV1alpha2 struct {
type FakeFlaggerV1alpha3 struct {
*testing.Fake
}
func (c *FakeFlaggerV1alpha2) Canaries(namespace string) v1alpha2.CanaryInterface {
func (c *FakeFlaggerV1alpha3) Canaries(namespace string) v1alpha3.CanaryInterface {
return &FakeCanaries{c, namespace}
}
// RESTClient returns a RESTClient that is used to communicate
// with API server by this client implementation.
func (c *FakeFlaggerV1alpha2) RESTClient() rest.Interface {
func (c *FakeFlaggerV1alpha3) RESTClient() rest.Interface {
var ret *rest.RESTClient
return ret
}

View File

@@ -16,31 +16,31 @@ limitations under the License.
// Code generated by client-gen. DO NOT EDIT.
package v1alpha2
package v1alpha3
import (
v1alpha2 "github.com/stefanprodan/flagger/pkg/apis/flagger/v1alpha2"
v1alpha3 "github.com/stefanprodan/flagger/pkg/apis/flagger/v1alpha3"
"github.com/stefanprodan/flagger/pkg/client/clientset/versioned/scheme"
serializer "k8s.io/apimachinery/pkg/runtime/serializer"
rest "k8s.io/client-go/rest"
)
type FlaggerV1alpha2Interface interface {
type FlaggerV1alpha3Interface interface {
RESTClient() rest.Interface
CanariesGetter
}
// FlaggerV1alpha2Client is used to interact with features provided by the flagger.app group.
type FlaggerV1alpha2Client struct {
// FlaggerV1alpha3Client is used to interact with features provided by the flagger.app group.
type FlaggerV1alpha3Client struct {
restClient rest.Interface
}
func (c *FlaggerV1alpha2Client) Canaries(namespace string) CanaryInterface {
func (c *FlaggerV1alpha3Client) Canaries(namespace string) CanaryInterface {
return newCanaries(c, namespace)
}
// NewForConfig creates a new FlaggerV1alpha2Client for the given config.
func NewForConfig(c *rest.Config) (*FlaggerV1alpha2Client, error) {
// NewForConfig creates a new FlaggerV1alpha3Client for the given config.
func NewForConfig(c *rest.Config) (*FlaggerV1alpha3Client, error) {
config := *c
if err := setConfigDefaults(&config); err != nil {
return nil, err
@@ -49,12 +49,12 @@ func NewForConfig(c *rest.Config) (*FlaggerV1alpha2Client, error) {
if err != nil {
return nil, err
}
return &FlaggerV1alpha2Client{client}, nil
return &FlaggerV1alpha3Client{client}, nil
}
// NewForConfigOrDie creates a new FlaggerV1alpha2Client for the given config and
// NewForConfigOrDie creates a new FlaggerV1alpha3Client for the given config and
// panics if there is an error in the config.
func NewForConfigOrDie(c *rest.Config) *FlaggerV1alpha2Client {
func NewForConfigOrDie(c *rest.Config) *FlaggerV1alpha3Client {
client, err := NewForConfig(c)
if err != nil {
panic(err)
@@ -62,13 +62,13 @@ func NewForConfigOrDie(c *rest.Config) *FlaggerV1alpha2Client {
return client
}
// New creates a new FlaggerV1alpha2Client for the given RESTClient.
func New(c rest.Interface) *FlaggerV1alpha2Client {
return &FlaggerV1alpha2Client{c}
// New creates a new FlaggerV1alpha3Client for the given RESTClient.
func New(c rest.Interface) *FlaggerV1alpha3Client {
return &FlaggerV1alpha3Client{c}
}
func setConfigDefaults(config *rest.Config) error {
gv := v1alpha2.SchemeGroupVersion
gv := v1alpha3.SchemeGroupVersion
config.GroupVersion = &gv
config.APIPath = "/apis"
config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs}
@@ -82,7 +82,7 @@ func setConfigDefaults(config *rest.Config) error {
// RESTClient returns a RESTClient that is used to communicate
// with API server by this client implementation.
func (c *FlaggerV1alpha2Client) RESTClient() rest.Interface {
func (c *FlaggerV1alpha3Client) RESTClient() rest.Interface {
if c == nil {
return nil
}

View File

@@ -16,6 +16,6 @@ limitations under the License.
// Code generated by client-gen. DO NOT EDIT.
package v1alpha2
package v1alpha3
type CanaryExpansion interface{}

View File

@@ -19,14 +19,14 @@ limitations under the License.
package flagger
import (
v1alpha2 "github.com/stefanprodan/flagger/pkg/client/informers/externalversions/flagger/v1alpha2"
v1alpha3 "github.com/stefanprodan/flagger/pkg/client/informers/externalversions/flagger/v1alpha3"
internalinterfaces "github.com/stefanprodan/flagger/pkg/client/informers/externalversions/internalinterfaces"
)
// Interface provides access to each of this group's versions.
type Interface interface {
// V1alpha2 provides access to shared informers for resources in V1alpha2.
V1alpha2() v1alpha2.Interface
// V1alpha3 provides access to shared informers for resources in V1alpha3.
V1alpha3() v1alpha3.Interface
}
type group struct {
@@ -40,7 +40,7 @@ func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakList
return &group{factory: f, namespace: namespace, tweakListOptions: tweakListOptions}
}
// V1alpha2 returns a new v1alpha2.Interface.
func (g *group) V1alpha2() v1alpha2.Interface {
return v1alpha2.New(g.factory, g.namespace, g.tweakListOptions)
// V1alpha3 returns a new v1alpha3.Interface.
func (g *group) V1alpha3() v1alpha3.Interface {
return v1alpha3.New(g.factory, g.namespace, g.tweakListOptions)
}

View File

@@ -16,15 +16,15 @@ limitations under the License.
// Code generated by informer-gen. DO NOT EDIT.
package v1alpha2
package v1alpha3
import (
time "time"
flaggerv1alpha2 "github.com/stefanprodan/flagger/pkg/apis/flagger/v1alpha2"
flaggerv1alpha3 "github.com/stefanprodan/flagger/pkg/apis/flagger/v1alpha3"
versioned "github.com/stefanprodan/flagger/pkg/client/clientset/versioned"
internalinterfaces "github.com/stefanprodan/flagger/pkg/client/informers/externalversions/internalinterfaces"
v1alpha2 "github.com/stefanprodan/flagger/pkg/client/listers/flagger/v1alpha2"
v1alpha3 "github.com/stefanprodan/flagger/pkg/client/listers/flagger/v1alpha3"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
watch "k8s.io/apimachinery/pkg/watch"
@@ -35,7 +35,7 @@ import (
// Canaries.
type CanaryInformer interface {
Informer() cache.SharedIndexInformer
Lister() v1alpha2.CanaryLister
Lister() v1alpha3.CanaryLister
}
type canaryInformer struct {
@@ -61,16 +61,16 @@ func NewFilteredCanaryInformer(client versioned.Interface, namespace string, res
if tweakListOptions != nil {
tweakListOptions(&options)
}
return client.FlaggerV1alpha2().Canaries(namespace).List(options)
return client.FlaggerV1alpha3().Canaries(namespace).List(options)
},
WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
}
return client.FlaggerV1alpha2().Canaries(namespace).Watch(options)
return client.FlaggerV1alpha3().Canaries(namespace).Watch(options)
},
},
&flaggerv1alpha2.Canary{},
&flaggerv1alpha3.Canary{},
resyncPeriod,
indexers,
)
@@ -81,9 +81,9 @@ func (f *canaryInformer) defaultInformer(client versioned.Interface, resyncPerio
}
func (f *canaryInformer) Informer() cache.SharedIndexInformer {
return f.factory.InformerFor(&flaggerv1alpha2.Canary{}, f.defaultInformer)
return f.factory.InformerFor(&flaggerv1alpha3.Canary{}, f.defaultInformer)
}
func (f *canaryInformer) Lister() v1alpha2.CanaryLister {
return v1alpha2.NewCanaryLister(f.Informer().GetIndexer())
func (f *canaryInformer) Lister() v1alpha3.CanaryLister {
return v1alpha3.NewCanaryLister(f.Informer().GetIndexer())
}

View File

@@ -16,7 +16,7 @@ limitations under the License.
// Code generated by informer-gen. DO NOT EDIT.
package v1alpha2
package v1alpha3
import (
internalinterfaces "github.com/stefanprodan/flagger/pkg/client/informers/externalversions/internalinterfaces"

View File

@@ -21,7 +21,7 @@ package externalversions
import (
"fmt"
v1alpha2 "github.com/stefanprodan/flagger/pkg/apis/flagger/v1alpha2"
v1alpha3 "github.com/stefanprodan/flagger/pkg/apis/flagger/v1alpha3"
schema "k8s.io/apimachinery/pkg/runtime/schema"
cache "k8s.io/client-go/tools/cache"
)
@@ -52,9 +52,9 @@ func (f *genericInformer) Lister() cache.GenericLister {
// TODO extend this to unknown resources with a client pool
func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource) (GenericInformer, error) {
switch resource {
// Group=flagger.app, Version=v1alpha2
case v1alpha2.SchemeGroupVersion.WithResource("canaries"):
return &genericInformer{resource: resource.GroupResource(), informer: f.Flagger().V1alpha2().Canaries().Informer()}, nil
// Group=flagger.app, Version=v1alpha3
case v1alpha3.SchemeGroupVersion.WithResource("canaries"):
return &genericInformer{resource: resource.GroupResource(), informer: f.Flagger().V1alpha3().Canaries().Informer()}, nil
}

View File

@@ -16,10 +16,10 @@ limitations under the License.
// Code generated by lister-gen. DO NOT EDIT.
package v1alpha2
package v1alpha3
import (
v1alpha2 "github.com/stefanprodan/flagger/pkg/apis/flagger/v1alpha2"
v1alpha3 "github.com/stefanprodan/flagger/pkg/apis/flagger/v1alpha3"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/client-go/tools/cache"
@@ -28,7 +28,7 @@ import (
// CanaryLister helps list Canaries.
type CanaryLister interface {
// List lists all Canaries in the indexer.
List(selector labels.Selector) (ret []*v1alpha2.Canary, err error)
List(selector labels.Selector) (ret []*v1alpha3.Canary, err error)
// Canaries returns an object that can list and get Canaries.
Canaries(namespace string) CanaryNamespaceLister
CanaryListerExpansion
@@ -45,9 +45,9 @@ func NewCanaryLister(indexer cache.Indexer) CanaryLister {
}
// List lists all Canaries in the indexer.
func (s *canaryLister) List(selector labels.Selector) (ret []*v1alpha2.Canary, err error) {
func (s *canaryLister) List(selector labels.Selector) (ret []*v1alpha3.Canary, err error) {
err = cache.ListAll(s.indexer, selector, func(m interface{}) {
ret = append(ret, m.(*v1alpha2.Canary))
ret = append(ret, m.(*v1alpha3.Canary))
})
return ret, err
}
@@ -60,9 +60,9 @@ func (s *canaryLister) Canaries(namespace string) CanaryNamespaceLister {
// CanaryNamespaceLister helps list and get Canaries.
type CanaryNamespaceLister interface {
// List lists all Canaries in the indexer for a given namespace.
List(selector labels.Selector) (ret []*v1alpha2.Canary, err error)
List(selector labels.Selector) (ret []*v1alpha3.Canary, err error)
// Get retrieves the Canary from the indexer for a given namespace and name.
Get(name string) (*v1alpha2.Canary, error)
Get(name string) (*v1alpha3.Canary, error)
CanaryNamespaceListerExpansion
}
@@ -74,21 +74,21 @@ type canaryNamespaceLister struct {
}
// List lists all Canaries in the indexer for a given namespace.
func (s canaryNamespaceLister) List(selector labels.Selector) (ret []*v1alpha2.Canary, err error) {
func (s canaryNamespaceLister) List(selector labels.Selector) (ret []*v1alpha3.Canary, err error) {
err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) {
ret = append(ret, m.(*v1alpha2.Canary))
ret = append(ret, m.(*v1alpha3.Canary))
})
return ret, err
}
// Get retrieves the Canary from the indexer for a given namespace and name.
func (s canaryNamespaceLister) Get(name string) (*v1alpha2.Canary, error) {
func (s canaryNamespaceLister) Get(name string) (*v1alpha3.Canary, error) {
obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name)
if err != nil {
return nil, err
}
if !exists {
return nil, errors.NewNotFound(v1alpha2.Resource("canary"), name)
return nil, errors.NewNotFound(v1alpha3.Resource("canary"), name)
}
return obj.(*v1alpha2.Canary), nil
return obj.(*v1alpha3.Canary), nil
}

View File

@@ -16,7 +16,7 @@ limitations under the License.
// Code generated by lister-gen. DO NOT EDIT.
package v1alpha2
package v1alpha3
// CanaryListerExpansion allows custom methods to be added to
// CanaryLister.

View File

@@ -7,11 +7,11 @@ import (
"github.com/google/go-cmp/cmp"
istioclientset "github.com/knative/pkg/client/clientset/versioned"
flaggerv1 "github.com/stefanprodan/flagger/pkg/apis/flagger/v1alpha2"
flaggerv1 "github.com/stefanprodan/flagger/pkg/apis/flagger/v1alpha3"
clientset "github.com/stefanprodan/flagger/pkg/client/clientset/versioned"
flaggerscheme "github.com/stefanprodan/flagger/pkg/client/clientset/versioned/scheme"
flaggerinformers "github.com/stefanprodan/flagger/pkg/client/informers/externalversions/flagger/v1alpha2"
flaggerlisters "github.com/stefanprodan/flagger/pkg/client/listers/flagger/v1alpha2"
flaggerinformers "github.com/stefanprodan/flagger/pkg/client/informers/externalversions/flagger/v1alpha3"
flaggerlisters "github.com/stefanprodan/flagger/pkg/client/listers/flagger/v1alpha3"
"github.com/stefanprodan/flagger/pkg/notifier"
"go.uber.org/zap"
corev1 "k8s.io/api/core/v1"
@@ -40,6 +40,7 @@ type Controller struct {
eventRecorder record.EventRecorder
logger *zap.SugaredLogger
canaries *sync.Map
jobs map[string]CanaryJob
deployer CanaryDeployer
router CanaryRouter
observer CanaryObserver
@@ -98,6 +99,7 @@ func NewController(
eventRecorder: eventRecorder,
logger: logger,
canaries: new(sync.Map),
jobs: map[string]CanaryJob{},
flaggerWindow: flaggerWindow,
deployer: deployer,
router: router,
@@ -246,17 +248,17 @@ func checkCustomResourceType(obj interface{}, logger *zap.SugaredLogger) (flagge
}
func (c *Controller) recordEventInfof(r *flaggerv1.Canary, template string, args ...interface{}) {
c.logger.Infof(template, args...)
c.logger.With("canary", fmt.Sprintf("%s.%s", r.Name, r.Namespace)).Infof(template, args...)
c.eventRecorder.Event(r, corev1.EventTypeNormal, "Synced", fmt.Sprintf(template, args...))
}
func (c *Controller) recordEventErrorf(r *flaggerv1.Canary, template string, args ...interface{}) {
c.logger.Errorf(template, args...)
c.logger.With("canary", fmt.Sprintf("%s.%s", r.Name, r.Namespace)).Errorf(template, args...)
c.eventRecorder.Event(r, corev1.EventTypeWarning, "Synced", fmt.Sprintf(template, args...))
}
func (c *Controller) recordEventWarningf(r *flaggerv1.Canary, template string, args ...interface{}) {
c.logger.Infof(template, args...)
c.logger.With("canary", fmt.Sprintf("%s.%s", r.Name, r.Namespace)).Infof(template, args...)
c.eventRecorder.Event(r, corev1.EventTypeWarning, "Synced", fmt.Sprintf(template, args...))
}

View File

@@ -9,7 +9,7 @@ import (
"github.com/google/go-cmp/cmp"
"github.com/google/go-cmp/cmp/cmpopts"
istioclientset "github.com/knative/pkg/client/clientset/versioned"
flaggerv1 "github.com/stefanprodan/flagger/pkg/apis/flagger/v1alpha2"
flaggerv1 "github.com/stefanprodan/flagger/pkg/apis/flagger/v1alpha3"
clientset "github.com/stefanprodan/flagger/pkg/client/clientset/versioned"
"go.uber.org/zap"
appsv1 "k8s.io/api/apps/v1"
@@ -32,15 +32,17 @@ type CanaryDeployer struct {
// Promote copies the pod spec from canary to primary
func (c *CanaryDeployer) Promote(cd *flaggerv1.Canary) error {
canary, err := c.kubeClient.AppsV1().Deployments(cd.Namespace).Get(cd.Spec.TargetRef.Name, metav1.GetOptions{})
targetName := cd.Spec.TargetRef.Name
primaryName := fmt.Sprintf("%s-primary", targetName)
canary, err := c.kubeClient.AppsV1().Deployments(cd.Namespace).Get(targetName, metav1.GetOptions{})
if err != nil {
if errors.IsNotFound(err) {
return fmt.Errorf("deployment %s.%s not found", cd.Spec.TargetRef.Name, cd.Namespace)
return fmt.Errorf("deployment %s.%s not found", targetName, cd.Namespace)
}
return fmt.Errorf("deployment %s.%s query error %v", cd.Spec.TargetRef.Name, cd.Namespace, err)
return fmt.Errorf("deployment %s.%s query error %v", targetName, cd.Namespace, err)
}
primaryName := fmt.Sprintf("%s-primary", cd.Spec.TargetRef.Name)
primary, err := c.kubeClient.AppsV1().Deployments(cd.Namespace).Get(primaryName, metav1.GetOptions{})
if err != nil {
if errors.IsNotFound(err) {
@@ -49,15 +51,17 @@ func (c *CanaryDeployer) Promote(cd *flaggerv1.Canary) error {
return fmt.Errorf("deployment %s.%s query error %v", primaryName, cd.Namespace, err)
}
primary.Spec.ProgressDeadlineSeconds = canary.Spec.ProgressDeadlineSeconds
primary.Spec.MinReadySeconds = canary.Spec.MinReadySeconds
primary.Spec.RevisionHistoryLimit = canary.Spec.RevisionHistoryLimit
primary.Spec.Strategy = canary.Spec.Strategy
primary.Spec.Template.Spec = canary.Spec.Template.Spec
_, err = c.kubeClient.AppsV1().Deployments(primary.Namespace).Update(primary)
if err != nil {
return fmt.Errorf("updating template spec %s.%s failed: %v", primary.GetName(), primary.Namespace, err)
primaryCopy := primary.DeepCopy()
primaryCopy.Spec.ProgressDeadlineSeconds = canary.Spec.ProgressDeadlineSeconds
primaryCopy.Spec.MinReadySeconds = canary.Spec.MinReadySeconds
primaryCopy.Spec.RevisionHistoryLimit = canary.Spec.RevisionHistoryLimit
primaryCopy.Spec.Strategy = canary.Spec.Strategy
primaryCopy.Spec.Template.Spec = canary.Spec.Template.Spec
_, err = c.kubeClient.AppsV1().Deployments(cd.Namespace).Update(primaryCopy)
if err != nil {
return fmt.Errorf("updating deployment %s.%s template spec failed: %v",
primaryCopy.GetName(), primaryCopy.Namespace, err)
}
return nil
@@ -78,15 +82,11 @@ func (c *CanaryDeployer) IsPrimaryReady(cd *flaggerv1.Canary) (bool, error) {
retriable, err := c.isDeploymentReady(primary, cd.GetProgressDeadlineSeconds())
if err != nil {
if retriable {
return retriable, fmt.Errorf("Halt %s.%s advancement %s", cd.Name, cd.Namespace, err.Error())
} else {
return retriable, err
}
return retriable, fmt.Errorf("Halt advancement %s.%s %s", primaryName, cd.Namespace, err.Error())
}
if primary.Spec.Replicas == int32p(0) {
return true, fmt.Errorf("halt %s.%s advancement primary deployment is scaled to zero",
return true, fmt.Errorf("Halt %s.%s advancement primary deployment is scaled to zero",
cd.Name, cd.Namespace)
}
return true, nil
@@ -96,18 +96,19 @@ func (c *CanaryDeployer) IsPrimaryReady(cd *flaggerv1.Canary) (bool, error) {
// the deployment is in the middle of a rolling update or if the pods are unhealthy
// it will return a non retriable error if the rolling update is stuck
func (c *CanaryDeployer) IsCanaryReady(cd *flaggerv1.Canary) (bool, error) {
canary, err := c.kubeClient.AppsV1().Deployments(cd.Namespace).Get(cd.Spec.TargetRef.Name, metav1.GetOptions{})
targetName := cd.Spec.TargetRef.Name
canary, err := c.kubeClient.AppsV1().Deployments(cd.Namespace).Get(targetName, metav1.GetOptions{})
if err != nil {
if errors.IsNotFound(err) {
return true, fmt.Errorf("deployment %s.%s not found", cd.Spec.TargetRef.Name, cd.Namespace)
return true, fmt.Errorf("deployment %s.%s not found", targetName, cd.Namespace)
}
return true, fmt.Errorf("deployment %s.%s query error %v", cd.Spec.TargetRef.Name, cd.Namespace, err)
return true, fmt.Errorf("deployment %s.%s query error %v", targetName, cd.Namespace, err)
}
retriable, err := c.isDeploymentReady(canary, cd.GetProgressDeadlineSeconds())
if err != nil {
if retriable {
return retriable, fmt.Errorf("Halt %s.%s advancement %s", cd.Name, cd.Namespace, err.Error())
return retriable, fmt.Errorf("Halt advancement %s.%s %s", targetName, cd.Namespace, err.Error())
} else {
return retriable, fmt.Errorf("deployment does not have minimum availability for more than %vs",
cd.GetProgressDeadlineSeconds())
@@ -119,22 +120,23 @@ func (c *CanaryDeployer) IsCanaryReady(cd *flaggerv1.Canary) (bool, error) {
// IsNewSpec returns true if the canary deployment pod spec has changed
func (c *CanaryDeployer) IsNewSpec(cd *flaggerv1.Canary) (bool, error) {
canary, err := c.kubeClient.AppsV1().Deployments(cd.Namespace).Get(cd.Spec.TargetRef.Name, metav1.GetOptions{})
targetName := cd.Spec.TargetRef.Name
canary, err := c.kubeClient.AppsV1().Deployments(cd.Namespace).Get(targetName, metav1.GetOptions{})
if err != nil {
if errors.IsNotFound(err) {
return false, fmt.Errorf("deployment %s.%s not found", cd.Spec.TargetRef.Name, cd.Namespace)
return false, fmt.Errorf("deployment %s.%s not found", targetName, cd.Namespace)
}
return false, fmt.Errorf("deployment %s.%s query error %v", cd.Spec.TargetRef.Name, cd.Namespace, err)
return false, fmt.Errorf("deployment %s.%s query error %v", targetName, cd.Namespace, err)
}
if cd.Status.CanaryRevision == "" {
if cd.Status.LastAppliedSpec == "" {
return true, nil
}
newSpec := &canary.Spec.Template.Spec
oldSpecJson, err := base64.StdEncoding.DecodeString(cd.Status.CanaryRevision)
oldSpecJson, err := base64.StdEncoding.DecodeString(cd.Status.LastAppliedSpec)
if err != nil {
return false, err
return false, fmt.Errorf("%s.%s decode error %v", cd.Name, cd.Namespace, err)
}
oldSpec := &corev1.PodSpec{}
err = json.Unmarshal(oldSpecJson, oldSpec)
@@ -150,31 +152,60 @@ func (c *CanaryDeployer) IsNewSpec(cd *flaggerv1.Canary) (bool, error) {
return false, nil
}
// SetFailedChecks updates the canary failed checks counter
func (c *CanaryDeployer) SetFailedChecks(cd *flaggerv1.Canary, val int) error {
cd.Status.FailedChecks = val
cd.Status.LastTransitionTime = metav1.Now()
cd, err := c.flaggerClient.FlaggerV1alpha2().Canaries(cd.Namespace).Update(cd)
// ShouldAdvance determines if the canary analysis can proceed
func (c *CanaryDeployer) ShouldAdvance(cd *flaggerv1.Canary) (bool, error) {
if cd.Status.LastAppliedSpec == "" || cd.Status.Phase == flaggerv1.CanaryProgressing {
return true, nil
}
return c.IsNewSpec(cd)
}
// SetStatusFailedChecks updates the canary failed checks counter
func (c *CanaryDeployer) SetStatusFailedChecks(cd *flaggerv1.Canary, val int) error {
cdCopy := cd.DeepCopy()
cdCopy.Status.FailedChecks = val
cdCopy.Status.LastTransitionTime = metav1.Now()
cd, err := c.flaggerClient.FlaggerV1alpha3().Canaries(cd.Namespace).UpdateStatus(cdCopy)
if err != nil {
return fmt.Errorf("deployment %s.%s update error %v", cd.Spec.TargetRef.Name, cd.Namespace, err)
return fmt.Errorf("canary %s.%s status update error %v", cdCopy.Name, cdCopy.Namespace, err)
}
return nil
}
// SetState updates the canary status state
func (c *CanaryDeployer) SetState(cd *flaggerv1.Canary, state flaggerv1.CanaryState) error {
cd.Status.State = state
cd.Status.LastTransitionTime = metav1.Now()
cd, err := c.flaggerClient.FlaggerV1alpha2().Canaries(cd.Namespace).Update(cd)
// SetStatusWeight updates the canary status weight value
func (c *CanaryDeployer) SetStatusWeight(cd *flaggerv1.Canary, val int) error {
cdCopy := cd.DeepCopy()
cdCopy.Status.CanaryWeight = val
cdCopy.Status.LastTransitionTime = metav1.Now()
cd, err := c.flaggerClient.FlaggerV1alpha3().Canaries(cd.Namespace).UpdateStatus(cdCopy)
if err != nil {
return fmt.Errorf("deployment %s.%s update error %v", cd.Spec.TargetRef.Name, cd.Namespace, err)
return fmt.Errorf("canary %s.%s status update error %v", cdCopy.Name, cdCopy.Namespace, err)
}
return nil
}
// SetStatusPhase updates the canary status phase
func (c *CanaryDeployer) SetStatusPhase(cd *flaggerv1.Canary, phase flaggerv1.CanaryPhase) error {
cdCopy := cd.DeepCopy()
cdCopy.Status.Phase = phase
cdCopy.Status.LastTransitionTime = metav1.Now()
if phase != flaggerv1.CanaryProgressing {
cdCopy.Status.CanaryWeight = 0
}
cd, err := c.flaggerClient.FlaggerV1alpha3().Canaries(cd.Namespace).UpdateStatus(cdCopy)
if err != nil {
return fmt.Errorf("canary %s.%s status update error %v", cdCopy.Name, cdCopy.Namespace, err)
}
return nil
}
// SyncStatus encodes the canary pod spec and updates the canary status
func (c *CanaryDeployer) SyncStatus(cd *flaggerv1.Canary, status flaggerv1.CanaryStatus) error {
canary, err := c.kubeClient.AppsV1().Deployments(cd.Namespace).Get(cd.Spec.TargetRef.Name, metav1.GetOptions{})
dep, err := c.kubeClient.AppsV1().Deployments(cd.Namespace).Get(cd.Spec.TargetRef.Name, metav1.GetOptions{})
if err != nil {
if errors.IsNotFound(err) {
return fmt.Errorf("deployment %s.%s not found", cd.Spec.TargetRef.Name, cd.Namespace)
@@ -182,36 +213,42 @@ func (c *CanaryDeployer) SyncStatus(cd *flaggerv1.Canary, status flaggerv1.Canar
return fmt.Errorf("deployment %s.%s query error %v", cd.Spec.TargetRef.Name, cd.Namespace, err)
}
specJson, err := json.Marshal(canary.Spec.Template.Spec)
specJson, err := json.Marshal(dep.Spec.Template.Spec)
if err != nil {
return fmt.Errorf("deployment %s.%s marshal error %v", cd.Spec.TargetRef.Name, cd.Namespace, err)
}
specEnc := base64.StdEncoding.EncodeToString(specJson)
cd.Status.State = status.State
cd.Status.FailedChecks = status.FailedChecks
cd.Status.CanaryRevision = specEnc
cd.Status.LastTransitionTime = metav1.Now()
cd, err = c.flaggerClient.FlaggerV1alpha2().Canaries(cd.Namespace).Update(cd)
cdCopy := cd.DeepCopy()
cdCopy.Status.Phase = status.Phase
cdCopy.Status.CanaryWeight = status.CanaryWeight
cdCopy.Status.FailedChecks = status.FailedChecks
cdCopy.Status.LastAppliedSpec = base64.StdEncoding.EncodeToString(specJson)
cdCopy.Status.LastTransitionTime = metav1.Now()
cd, err = c.flaggerClient.FlaggerV1alpha3().Canaries(cd.Namespace).UpdateStatus(cdCopy)
if err != nil {
return fmt.Errorf("deployment %s.%s update error %v", cd.Spec.TargetRef.Name, cd.Namespace, err)
return fmt.Errorf("canary %s.%s status update error %v", cdCopy.Name, cdCopy.Namespace, err)
}
return nil
}
// Scale sets the canary deployment replicas
func (c *CanaryDeployer) Scale(cd *flaggerv1.Canary, replicas int32) error {
canary, err := c.kubeClient.AppsV1().Deployments(cd.Namespace).Get(cd.Spec.TargetRef.Name, metav1.GetOptions{})
targetName := cd.Spec.TargetRef.Name
dep, err := c.kubeClient.AppsV1().Deployments(cd.Namespace).Get(targetName, metav1.GetOptions{})
if err != nil {
if errors.IsNotFound(err) {
return fmt.Errorf("deployment %s.%s not found", cd.Spec.TargetRef.Name, cd.Namespace)
return fmt.Errorf("deployment %s.%s not found", targetName, cd.Namespace)
}
return fmt.Errorf("deployment %s.%s query error %v", cd.Spec.TargetRef.Name, cd.Namespace, err)
return fmt.Errorf("deployment %s.%s query error %v", targetName, cd.Namespace, err)
}
canary.Spec.Replicas = int32p(replicas)
canary, err = c.kubeClient.AppsV1().Deployments(canary.Namespace).Update(canary)
depCopy := dep.DeepCopy()
depCopy.Spec.Replicas = int32p(replicas)
_, err = c.kubeClient.AppsV1().Deployments(dep.Namespace).Update(depCopy)
if err != nil {
return fmt.Errorf("scaling %s.%s to %v failed: %v", canary.GetName(), canary.Namespace, replicas, err)
return fmt.Errorf("scaling %s.%s to %v failed: %v", depCopy.GetName(), depCopy.Namespace, replicas, err)
}
return nil
}
@@ -224,14 +261,14 @@ func (c *CanaryDeployer) Sync(cd *flaggerv1.Canary) error {
return fmt.Errorf("creating deployment %s.%s failed: %v", primaryName, cd.Namespace, err)
}
if cd.Status.State == "" {
c.logger.Infof("Scaling down %s.%s", cd.Spec.TargetRef.Name, cd.Namespace)
if cd.Status.Phase == "" {
c.logger.With("canary", fmt.Sprintf("%s.%s", cd.Name, cd.Namespace)).Infof("Scaling down %s.%s", cd.Spec.TargetRef.Name, cd.Namespace)
if err := c.Scale(cd, 0); err != nil {
return err
}
}
if cd.Spec.AutoscalerRef.Kind == "HorizontalPodAutoscaler" {
if cd.Spec.AutoscalerRef != nil && cd.Spec.AutoscalerRef.Kind == "HorizontalPodAutoscaler" {
if err := c.createPrimaryHpa(cd); err != nil {
return fmt.Errorf("creating hpa %s.%s failed: %v", primaryName, cd.Namespace, err)
}
@@ -240,13 +277,13 @@ func (c *CanaryDeployer) Sync(cd *flaggerv1.Canary) error {
}
func (c *CanaryDeployer) createPrimaryDeployment(cd *flaggerv1.Canary) error {
canaryName := cd.Spec.TargetRef.Name
targetName := cd.Spec.TargetRef.Name
primaryName := fmt.Sprintf("%s-primary", cd.Spec.TargetRef.Name)
canaryDep, err := c.kubeClient.AppsV1().Deployments(cd.Namespace).Get(canaryName, metav1.GetOptions{})
canaryDep, err := c.kubeClient.AppsV1().Deployments(cd.Namespace).Get(targetName, metav1.GetOptions{})
if err != nil {
if errors.IsNotFound(err) {
return fmt.Errorf("deployment %s.%s not found, retrying", canaryName, cd.Namespace)
return fmt.Errorf("deployment %s.%s not found, retrying", targetName, cd.Namespace)
}
return err
}
@@ -292,7 +329,7 @@ func (c *CanaryDeployer) createPrimaryDeployment(cd *flaggerv1.Canary) error {
return err
}
c.logger.Infof("Deployment %s.%s created", primaryDep.GetName(), cd.Namespace)
c.logger.With("canary", fmt.Sprintf("%s.%s", cd.Name, cd.Namespace)).Infof("Deployment %s.%s created", primaryDep.GetName(), cd.Namespace)
}
return nil
@@ -340,7 +377,7 @@ func (c *CanaryDeployer) createPrimaryHpa(cd *flaggerv1.Canary) error {
if err != nil {
return err
}
c.logger.Infof("HorizontalPodAutoscaler %s.%s created", primaryHpa.GetName(), cd.Namespace)
c.logger.With("canary", fmt.Sprintf("%s.%s", cd.Name, cd.Namespace)).Infof("HorizontalPodAutoscaler %s.%s created", primaryHpa.GetName(), cd.Namespace)
}
return nil

View File

@@ -3,7 +3,7 @@ package controller
import (
"testing"
"github.com/stefanprodan/flagger/pkg/apis/flagger/v1alpha2"
"github.com/stefanprodan/flagger/pkg/apis/flagger/v1alpha3"
fakeFlagger "github.com/stefanprodan/flagger/pkg/client/clientset/versioned/fake"
"github.com/stefanprodan/flagger/pkg/logging"
appsv1 "k8s.io/api/apps/v1"
@@ -14,30 +14,30 @@ import (
"k8s.io/client-go/kubernetes/fake"
)
func newTestCanary() *v1alpha2.Canary {
cd := &v1alpha2.Canary{
TypeMeta: metav1.TypeMeta{APIVersion: v1alpha2.SchemeGroupVersion.String()},
func newTestCanary() *v1alpha3.Canary {
cd := &v1alpha3.Canary{
TypeMeta: metav1.TypeMeta{APIVersion: v1alpha3.SchemeGroupVersion.String()},
ObjectMeta: metav1.ObjectMeta{
Namespace: "default",
Name: "podinfo",
},
Spec: v1alpha2.CanarySpec{
Spec: v1alpha3.CanarySpec{
TargetRef: hpav1.CrossVersionObjectReference{
Name: "podinfo",
APIVersion: "apps/v1",
Kind: "Deployment",
},
AutoscalerRef: hpav1.CrossVersionObjectReference{
AutoscalerRef: &hpav1.CrossVersionObjectReference{
Name: "podinfo",
APIVersion: "autoscaling/v2beta1",
Kind: "HorizontalPodAutoscaler",
}, Service: v1alpha2.CanaryService{
}, Service: v1alpha3.CanaryService{
Port: 9898,
}, CanaryAnalysis: v1alpha2.CanaryAnalysis{
}, CanaryAnalysis: v1alpha3.CanaryAnalysis{
Threshold: 10,
StepWeight: 10,
MaxWeight: 50,
Metrics: []v1alpha2.CanaryMetric{
Metrics: []v1alpha3.CanaryMetric{
{
Name: "istio_requests_total",
Threshold: 99,
@@ -351,12 +351,12 @@ func TestCanaryDeployer_SetFailedChecks(t *testing.T) {
t.Fatal(err.Error())
}
err = deployer.SetFailedChecks(canary, 1)
err = deployer.SetStatusFailedChecks(canary, 1)
if err != nil {
t.Fatal(err.Error())
}
res, err := flaggerClient.FlaggerV1alpha2().Canaries("default").Get("podinfo", metav1.GetOptions{})
res, err := flaggerClient.FlaggerV1alpha3().Canaries("default").Get("podinfo", metav1.GetOptions{})
if err != nil {
t.Fatal(err.Error())
}
@@ -387,18 +387,18 @@ func TestCanaryDeployer_SetState(t *testing.T) {
t.Fatal(err.Error())
}
err = deployer.SetState(canary, v1alpha2.CanaryRunning)
err = deployer.SetStatusPhase(canary, v1alpha3.CanaryProgressing)
if err != nil {
t.Fatal(err.Error())
}
res, err := flaggerClient.FlaggerV1alpha2().Canaries("default").Get("podinfo", metav1.GetOptions{})
res, err := flaggerClient.FlaggerV1alpha3().Canaries("default").Get("podinfo", metav1.GetOptions{})
if err != nil {
t.Fatal(err.Error())
}
if res.Status.State != v1alpha2.CanaryRunning {
t.Errorf("Got %v wanted %v", res.Status.State, v1alpha2.CanaryRunning)
if res.Status.Phase != v1alpha3.CanaryProgressing {
t.Errorf("Got %v wanted %v", res.Status.Phase, v1alpha3.CanaryProgressing)
}
}
@@ -423,8 +423,8 @@ func TestCanaryDeployer_SyncStatus(t *testing.T) {
t.Fatal(err.Error())
}
status := v1alpha2.CanaryStatus{
State: v1alpha2.CanaryRunning,
status := v1alpha3.CanaryStatus{
Phase: v1alpha3.CanaryProgressing,
FailedChecks: 2,
}
err = deployer.SyncStatus(canary, status)
@@ -432,13 +432,13 @@ func TestCanaryDeployer_SyncStatus(t *testing.T) {
t.Fatal(err.Error())
}
res, err := flaggerClient.FlaggerV1alpha2().Canaries("default").Get("podinfo", metav1.GetOptions{})
res, err := flaggerClient.FlaggerV1alpha3().Canaries("default").Get("podinfo", metav1.GetOptions{})
if err != nil {
t.Fatal(err.Error())
}
if res.Status.State != status.State {
t.Errorf("Got state %v wanted %v", res.Status.State, status.State)
if res.Status.Phase != status.Phase {
t.Errorf("Got state %v wanted %v", res.Status.Phase, status.Phase)
}
if res.Status.FailedChecks != status.FailedChecks {

35
pkg/controller/job.go Normal file
View File

@@ -0,0 +1,35 @@
package controller
import "time"
// CanaryJob holds the reference to a canary deployment schedule
type CanaryJob struct {
Name string
Namespace string
SkipTests bool
function func(name string, namespace string, skipTests bool)
done chan bool
ticker *time.Ticker
}
// Start runs the canary analysis on a schedule
func (j CanaryJob) Start() {
go func() {
// run the infra bootstrap on job creation
j.function(j.Name, j.Namespace, j.SkipTests)
for {
select {
case <-j.ticker.C:
j.function(j.Name, j.Namespace, j.SkipTests)
case <-j.done:
return
}
}
}()
}
// Stop closes the job channel and stops the ticker
func (j CanaryJob) Stop() {
close(j.done)
j.ticker.Stop()
}

View File

@@ -5,7 +5,7 @@ import (
"time"
"github.com/prometheus/client_golang/prometheus"
flaggerv1 "github.com/stefanprodan/flagger/pkg/apis/flagger/v1alpha2"
flaggerv1 "github.com/stefanprodan/flagger/pkg/apis/flagger/v1alpha3"
)
// CanaryRecorder records the canary analysis as Prometheus metrics
@@ -72,8 +72,8 @@ func (cr *CanaryRecorder) SetTotal(namespace string, total int) {
// SetStatus sets the last known canary analysis status
func (cr *CanaryRecorder) SetStatus(cd *flaggerv1.Canary) {
status := 1
switch cd.Status.State {
case flaggerv1.CanaryRunning:
switch cd.Status.Phase {
case flaggerv1.CanaryProgressing:
status = 0
case flaggerv1.CanaryFailed:
status = 2

View File

@@ -5,7 +5,7 @@ import (
istiov1alpha3 "github.com/knative/pkg/apis/istio/v1alpha3"
istioclientset "github.com/knative/pkg/client/clientset/versioned"
flaggerv1 "github.com/stefanprodan/flagger/pkg/apis/flagger/v1alpha2"
flaggerv1 "github.com/stefanprodan/flagger/pkg/apis/flagger/v1alpha3"
clientset "github.com/stefanprodan/flagger/pkg/client/clientset/versioned"
"go.uber.org/zap"
corev1 "k8s.io/api/core/v1"
@@ -42,13 +42,13 @@ func (c *CanaryRouter) Sync(cd *flaggerv1.Canary) error {
}
func (c *CanaryRouter) createServices(cd *flaggerv1.Canary) error {
canaryName := cd.Spec.TargetRef.Name
primaryName := fmt.Sprintf("%s-primary", canaryName)
canaryService, err := c.kubeClient.CoreV1().Services(cd.Namespace).Get(canaryName, metav1.GetOptions{})
targetName := cd.Spec.TargetRef.Name
primaryName := fmt.Sprintf("%s-primary", targetName)
canaryService, err := c.kubeClient.CoreV1().Services(cd.Namespace).Get(targetName, metav1.GetOptions{})
if errors.IsNotFound(err) {
canaryService = &corev1.Service{
ObjectMeta: metav1.ObjectMeta{
Name: canaryName,
Name: targetName,
Namespace: cd.Namespace,
OwnerReferences: []metav1.OwnerReference{
*metav1.NewControllerRef(cd, schema.GroupVersionKind{
@@ -60,7 +60,7 @@ func (c *CanaryRouter) createServices(cd *flaggerv1.Canary) error {
},
Spec: corev1.ServiceSpec{
Type: corev1.ServiceTypeClusterIP,
Selector: map[string]string{"app": canaryName},
Selector: map[string]string{"app": targetName},
Ports: []corev1.ServicePort{
{
Name: "http",
@@ -79,7 +79,7 @@ func (c *CanaryRouter) createServices(cd *flaggerv1.Canary) error {
if err != nil {
return err
}
c.logger.Infof("Service %s.%s created", canaryService.GetName(), cd.Namespace)
c.logger.With("canary", fmt.Sprintf("%s.%s", cd.Name, cd.Namespace)).Infof("Service %s.%s created", canaryService.GetName(), cd.Namespace)
}
canaryTestServiceName := fmt.Sprintf("%s-canary", cd.Spec.TargetRef.Name)
@@ -99,7 +99,7 @@ func (c *CanaryRouter) createServices(cd *flaggerv1.Canary) error {
},
Spec: corev1.ServiceSpec{
Type: corev1.ServiceTypeClusterIP,
Selector: map[string]string{"app": canaryName},
Selector: map[string]string{"app": targetName},
Ports: []corev1.ServicePort{
{
Name: "http",
@@ -118,7 +118,7 @@ func (c *CanaryRouter) createServices(cd *flaggerv1.Canary) error {
if err != nil {
return err
}
c.logger.Infof("Service %s.%s created", canaryTestService.GetName(), cd.Namespace)
c.logger.With("canary", fmt.Sprintf("%s.%s", cd.Name, cd.Namespace)).Infof("Service %s.%s created", canaryTestService.GetName(), cd.Namespace)
}
primaryService, err := c.kubeClient.CoreV1().Services(cd.Namespace).Get(primaryName, metav1.GetOptions{})
@@ -157,22 +157,23 @@ func (c *CanaryRouter) createServices(cd *flaggerv1.Canary) error {
return err
}
c.logger.Infof("Service %s.%s created", primaryService.GetName(), cd.Namespace)
c.logger.With("canary", fmt.Sprintf("%s.%s", cd.Name, cd.Namespace)).Infof("Service %s.%s created", primaryService.GetName(), cd.Namespace)
}
return nil
}
func (c *CanaryRouter) createVirtualService(cd *flaggerv1.Canary) error {
canaryName := cd.Spec.TargetRef.Name
primaryName := fmt.Sprintf("%s-primary", canaryName)
hosts := append(cd.Spec.Service.Hosts, canaryName)
targetName := cd.Spec.TargetRef.Name
primaryName := fmt.Sprintf("%s-primary", targetName)
hosts := append(cd.Spec.Service.Hosts, targetName)
gateways := append(cd.Spec.Service.Gateways, "mesh")
virtualService, err := c.istioClient.NetworkingV1alpha3().VirtualServices(cd.Namespace).Get(canaryName, metav1.GetOptions{})
virtualService, err := c.istioClient.NetworkingV1alpha3().VirtualServices(cd.Namespace).Get(targetName, metav1.GetOptions{})
if errors.IsNotFound(err) {
c.logger.Debugf("VirtualService %s.%s not found", targetName, cd.Namespace)
virtualService = &istiov1alpha3.VirtualService{
ObjectMeta: metav1.ObjectMeta{
Name: cd.Name,
Name: targetName,
Namespace: cd.Namespace,
OwnerReferences: []metav1.OwnerReference{
*metav1.NewControllerRef(cd, schema.GroupVersionKind{
@@ -199,7 +200,7 @@ func (c *CanaryRouter) createVirtualService(cd *flaggerv1.Canary) error {
},
{
Destination: istiov1alpha3.Destination{
Host: canaryName,
Host: targetName,
Port: istiov1alpha3.PortSelector{
Number: uint32(cd.Spec.Service.Port),
},
@@ -212,11 +213,12 @@ func (c *CanaryRouter) createVirtualService(cd *flaggerv1.Canary) error {
},
}
c.logger.Debugf("Creating VirtualService %s.%s", virtualService.GetName(), cd.Namespace)
_, err = c.istioClient.NetworkingV1alpha3().VirtualServices(cd.Namespace).Create(virtualService)
if err != nil {
return fmt.Errorf("VirtualService %s.%s create error %v", cd.Name, cd.Namespace, err)
return fmt.Errorf("VirtualService %s.%s create error %v", targetName, cd.Namespace, err)
}
c.logger.Infof("VirtualService %s.%s created", virtualService.GetName(), cd.Namespace)
c.logger.With("canary", fmt.Sprintf("%s.%s", cd.Name, cd.Namespace)).Infof("VirtualService %s.%s created", virtualService.GetName(), cd.Namespace)
}
return nil
@@ -228,23 +230,24 @@ func (c *CanaryRouter) GetRoutes(cd *flaggerv1.Canary) (
canary istiov1alpha3.DestinationWeight,
err error,
) {
targetName := cd.Spec.TargetRef.Name
vs := &istiov1alpha3.VirtualService{}
vs, err = c.istioClient.NetworkingV1alpha3().VirtualServices(cd.Namespace).Get(cd.Name, v1.GetOptions{})
vs, err = c.istioClient.NetworkingV1alpha3().VirtualServices(cd.Namespace).Get(targetName, v1.GetOptions{})
if err != nil {
if errors.IsNotFound(err) {
err = fmt.Errorf("VirtualService %s.%s not found", cd.Name, cd.Namespace)
err = fmt.Errorf("VirtualService %s.%s not found", targetName, cd.Namespace)
return
}
err = fmt.Errorf("VirtualService %s.%s query error %v", cd.Name, cd.Namespace, err)
err = fmt.Errorf("VirtualService %s.%s query error %v", targetName, cd.Namespace, err)
return
}
for _, http := range vs.Spec.Http {
for _, route := range http.Route {
if route.Destination.Host == fmt.Sprintf("%s-primary", cd.Spec.TargetRef.Name) {
if route.Destination.Host == fmt.Sprintf("%s-primary", targetName) {
primary = route
}
if route.Destination.Host == cd.Spec.TargetRef.Name {
if route.Destination.Host == targetName {
canary = route
}
}
@@ -252,7 +255,7 @@ func (c *CanaryRouter) GetRoutes(cd *flaggerv1.Canary) (
if primary.Weight == 0 && canary.Weight == 0 {
err = fmt.Errorf("VirtualService %s.%s does not contain routes for %s and %s",
cd.Name, cd.Namespace, fmt.Sprintf("%s-primary", cd.Spec.TargetRef.Name), cd.Spec.TargetRef.Name)
targetName, cd.Namespace, fmt.Sprintf("%s-primary", targetName), targetName)
}
return
@@ -264,23 +267,26 @@ func (c *CanaryRouter) SetRoutes(
primary istiov1alpha3.DestinationWeight,
canary istiov1alpha3.DestinationWeight,
) error {
vs, err := c.istioClient.NetworkingV1alpha3().VirtualServices(cd.Namespace).Get(cd.Name, v1.GetOptions{})
targetName := cd.Spec.TargetRef.Name
vs, err := c.istioClient.NetworkingV1alpha3().VirtualServices(cd.Namespace).Get(targetName, v1.GetOptions{})
if err != nil {
if errors.IsNotFound(err) {
return fmt.Errorf("VirtualService %s.%s not found", cd.Name, cd.Namespace)
return fmt.Errorf("VirtualService %s.%s not found", targetName, cd.Namespace)
}
return fmt.Errorf("VirtualService %s.%s query error %v", cd.Name, cd.Namespace, err)
return fmt.Errorf("VirtualService %s.%s query error %v", targetName, cd.Namespace, err)
}
vs.Spec.Http = []istiov1alpha3.HTTPRoute{
vsCopy := vs.DeepCopy()
vsCopy.Spec.Http = []istiov1alpha3.HTTPRoute{
{
Route: []istiov1alpha3.DestinationWeight{primary, canary},
},
}
vs, err = c.istioClient.NetworkingV1alpha3().VirtualServices(cd.Namespace).Update(vs)
vs, err = c.istioClient.NetworkingV1alpha3().VirtualServices(cd.Namespace).Update(vsCopy)
if err != nil {
return fmt.Errorf("VirtualService %s.%s update failed: %v", cd.Name, cd.Namespace, err)
return fmt.Errorf("VirtualService %s.%s update failed: %v", targetName, cd.Namespace, err)
}
return nil

View File

@@ -4,37 +4,77 @@ import (
"fmt"
"time"
flaggerv1 "github.com/stefanprodan/flagger/pkg/apis/flagger/v1alpha2"
flaggerv1 "github.com/stefanprodan/flagger/pkg/apis/flagger/v1alpha3"
"k8s.io/apimachinery/pkg/apis/meta/v1"
)
// scheduleCanaries synchronises the canary map with the jobs map,
// for new canaries new jobs are created and started
// for the removed canaries the jobs are stopped and deleted
func (c *Controller) scheduleCanaries() {
current := make(map[string]string)
stats := make(map[string]int)
c.canaries.Range(func(key interface{}, value interface{}) bool {
r := value.(*flaggerv1.Canary)
if r.Spec.TargetRef.Kind == "Deployment" {
go c.advanceCanary(r.Name, r.Namespace)
canary := value.(*flaggerv1.Canary)
// format: <name>.<namespace>
name := key.(string)
current[name] = fmt.Sprintf("%s.%s", canary.Spec.TargetRef.Name, canary.Namespace)
// schedule new jobs
if _, exists := c.jobs[name]; !exists {
job := CanaryJob{
Name: canary.Name,
Namespace: canary.Namespace,
function: c.advanceCanary,
done: make(chan bool),
ticker: time.NewTicker(canary.GetAnalysisInterval()),
}
c.jobs[name] = job
job.Start()
}
t, ok := stats[r.Namespace]
// compute canaries per namespace total
t, ok := stats[canary.Namespace]
if !ok {
stats[r.Namespace] = 1
stats[canary.Namespace] = 1
} else {
stats[r.Namespace] = t + 1
stats[canary.Namespace] = t + 1
}
return true
})
// cleanup deleted jobs
for job := range c.jobs {
if _, exists := current[job]; !exists {
c.jobs[job].Stop()
delete(c.jobs, job)
}
}
// check if multiple canaries have the same target
for canaryName, targetName := range current {
for name, target := range current {
if name != canaryName && target == targetName {
c.logger.With("canary", canaryName).Errorf("Bad things will happen! Found more than one canary with the same target %s", targetName)
}
}
}
// set total canaries per namespace metric
for k, v := range stats {
c.recorder.SetTotal(k, v)
}
}
func (c *Controller) advanceCanary(name string, namespace string) {
func (c *Controller) advanceCanary(name string, namespace string, skipLivenessChecks bool) {
begin := time.Now()
// check if the canary exists
cd, err := c.flaggerClient.FlaggerV1alpha2().Canaries(namespace).Get(name, v1.GetOptions{})
cd, err := c.flaggerClient.FlaggerV1alpha3().Canaries(namespace).Get(name, v1.GetOptions{})
if err != nil {
c.logger.Errorf("Canary %s.%s not found", name, namespace)
c.logger.With("canary", fmt.Sprintf("%s.%s", name, namespace)).Errorf("Canary %s.%s not found", name, namespace)
return
}
@@ -50,6 +90,13 @@ func (c *Controller) advanceCanary(name string, namespace string) {
return
}
if ok, err := c.deployer.ShouldAdvance(cd); !ok {
if err != nil {
c.recordEventWarningf(cd, "%v", err)
}
return
}
// set max weight default value to 100%
maxWeight := 100
if cd.Spec.CanaryAnalysis.MaxWeight > 0 {
@@ -57,9 +104,11 @@ func (c *Controller) advanceCanary(name string, namespace string) {
}
// check primary deployment status
if _, err := c.deployer.IsPrimaryReady(cd); err != nil {
c.recordEventWarningf(cd, "%v", err)
return
if !skipLivenessChecks {
if _, err := c.deployer.IsPrimaryReady(cd); err != nil {
c.recordEventWarningf(cd, "%v", err)
return
}
}
// check if virtual service exists
@@ -73,7 +122,33 @@ func (c *Controller) advanceCanary(name string, namespace string) {
c.recorder.SetWeight(cd, primaryRoute.Weight, canaryRoute.Weight)
// check if canary analysis should start (canary revision has changes) or continue
if ok := c.checkCanaryStatus(cd, c.deployer); !ok {
if ok := c.checkCanaryStatus(cd); !ok {
return
}
// check if canary revision changed during analysis
if restart := c.hasCanaryRevisionChanged(cd); restart {
c.recordEventInfof(cd, "New revision detected! Restarting analysis for %s.%s",
cd.Spec.TargetRef.Name, cd.Namespace)
// route all traffic back to primary
primaryRoute.Weight = 100
canaryRoute.Weight = 0
if err := c.router.SetRoutes(cd, primaryRoute, canaryRoute); err != nil {
c.recordEventWarningf(cd, "%v", err)
return
}
// reset status
status := flaggerv1.CanaryStatus{
Phase: flaggerv1.CanaryProgressing,
CanaryWeight: 0,
FailedChecks: 0,
}
if err := c.deployer.SyncStatus(cd, status); err != nil {
c.recordEventWarningf(cd, "%v", err)
return
}
return
}
@@ -82,14 +157,17 @@ func (c *Controller) advanceCanary(name string, namespace string) {
}()
// check canary deployment status
retriable, err := c.deployer.IsCanaryReady(cd)
if err != nil && retriable {
c.recordEventWarningf(cd, "%v", err)
return
var retriable = true
if !skipLivenessChecks {
retriable, err = c.deployer.IsCanaryReady(cd)
if err != nil && retriable {
c.recordEventWarningf(cd, "%v", err)
return
}
}
// check if the number of failed checks reached the threshold
if cd.Status.State == flaggerv1.CanaryRunning &&
if cd.Status.Phase == flaggerv1.CanaryProgressing &&
(!retriable || cd.Status.FailedChecks >= cd.Spec.CanaryAnalysis.Threshold) {
if cd.Status.FailedChecks >= cd.Spec.CanaryAnalysis.Threshold {
@@ -125,8 +203,8 @@ func (c *Controller) advanceCanary(name string, namespace string) {
}
// mark canary as failed
if err := c.deployer.SyncStatus(cd, flaggerv1.CanaryStatus{State: flaggerv1.CanaryFailed}); err != nil {
c.logger.Errorf("%v", err)
if err := c.deployer.SyncStatus(cd, flaggerv1.CanaryStatus{Phase: flaggerv1.CanaryFailed, CanaryWeight: 0}); err != nil {
c.logger.With("canary", fmt.Sprintf("%s.%s", cd.Name, cd.Namespace)).Errorf("%v", err)
return
}
@@ -137,10 +215,10 @@ func (c *Controller) advanceCanary(name string, namespace string) {
// check if the canary success rate is above the threshold
// skip check if no traffic is routed to canary
if canaryRoute.Weight == 0 {
c.recordEventInfof(cd, "Starting canary deployment for %s.%s", cd.Name, cd.Namespace)
c.recordEventInfof(cd, "Starting canary analysis for %s.%s", cd.Spec.TargetRef.Name, cd.Namespace)
} else {
if ok := c.analyseCanary(cd); !ok {
if err := c.deployer.SetFailedChecks(cd, cd.Status.FailedChecks+1); err != nil {
if err := c.deployer.SetStatusFailedChecks(cd, cd.Status.FailedChecks+1); err != nil {
c.recordEventWarningf(cd, "%v", err)
return
}
@@ -164,6 +242,12 @@ func (c *Controller) advanceCanary(name string, namespace string) {
return
}
// update weight status
if err := c.deployer.SetStatusWeight(cd, canaryRoute.Weight); err != nil {
c.recordEventWarningf(cd, "%v", err)
return
}
c.recorder.SetWeight(cd, primaryRoute.Weight, canaryRoute.Weight)
c.recordEventInfof(cd, "Advance %s.%s canary weight %v", cd.Name, cd.Namespace, canaryRoute.Weight)
@@ -195,8 +279,8 @@ func (c *Controller) advanceCanary(name string, namespace string) {
return
}
// update status
if err := c.deployer.SetState(cd, flaggerv1.CanaryFinished); err != nil {
// update status phase
if err := c.deployer.SetStatusPhase(cd, flaggerv1.CanarySucceeded); err != nil {
c.recordEventWarningf(cd, "%v", err)
return
}
@@ -206,15 +290,15 @@ func (c *Controller) advanceCanary(name string, namespace string) {
}
}
func (c *Controller) checkCanaryStatus(cd *flaggerv1.Canary, deployer CanaryDeployer) bool {
func (c *Controller) checkCanaryStatus(cd *flaggerv1.Canary) bool {
c.recorder.SetStatus(cd)
if cd.Status.State == "running" {
if cd.Status.Phase == flaggerv1.CanaryProgressing {
return true
}
if cd.Status.State == "" {
if err := deployer.SyncStatus(cd, flaggerv1.CanaryStatus{State: flaggerv1.CanaryInitialized}); err != nil {
c.logger.Errorf("%v", err)
if cd.Status.Phase == "" {
if err := c.deployer.SyncStatus(cd, flaggerv1.CanaryStatus{Phase: flaggerv1.CanaryInitialized}); err != nil {
c.logger.With("canary", fmt.Sprintf("%s.%s", cd.Name, cd.Namespace)).Errorf("%v", err)
return false
}
c.recorder.SetStatus(cd)
@@ -224,16 +308,16 @@ func (c *Controller) checkCanaryStatus(cd *flaggerv1.Canary, deployer CanaryDepl
return false
}
if diff, err := deployer.IsNewSpec(cd); diff {
if diff, err := c.deployer.IsNewSpec(cd); diff {
c.recordEventInfof(cd, "New revision detected! Scaling up %s.%s", cd.Spec.TargetRef.Name, cd.Namespace)
c.sendNotification(cd, "New revision detected, starting canary analysis.",
true, false)
if err = deployer.Scale(cd, 1); err != nil {
if err = c.deployer.Scale(cd, 1); err != nil {
c.recordEventErrorf(cd, "%v", err)
return false
}
if err := deployer.SyncStatus(cd, flaggerv1.CanaryStatus{State: flaggerv1.CanaryRunning}); err != nil {
c.logger.Errorf("%v", err)
if err := c.deployer.SyncStatus(cd, flaggerv1.CanaryStatus{Phase: flaggerv1.CanaryProgressing}); err != nil {
c.logger.With("canary", fmt.Sprintf("%s.%s", cd.Name, cd.Namespace)).Errorf("%v", err)
return false
}
c.recorder.SetStatus(cd)
@@ -242,6 +326,15 @@ func (c *Controller) checkCanaryStatus(cd *flaggerv1.Canary, deployer CanaryDepl
return false
}
func (c *Controller) hasCanaryRevisionChanged(cd *flaggerv1.Canary) bool {
if cd.Status.Phase == flaggerv1.CanaryProgressing {
if diff, _ := c.deployer.IsNewSpec(cd); diff {
return true
}
}
return false
}
func (c *Controller) analyseCanary(r *flaggerv1.Canary) bool {
// run metrics checks
for _, metric := range r.Spec.CanaryAnalysis.Metrics {

View File

@@ -1,12 +1,16 @@
package controller
import (
"go.uber.org/zap"
"k8s.io/client-go/kubernetes"
"sync"
"testing"
"time"
istioclientset "github.com/knative/pkg/client/clientset/versioned"
fakeIstio "github.com/knative/pkg/client/clientset/versioned/fake"
"github.com/stefanprodan/flagger/pkg/apis/flagger/v1alpha2"
"github.com/stefanprodan/flagger/pkg/apis/flagger/v1alpha3"
clientset "github.com/stefanprodan/flagger/pkg/client/clientset/versioned"
fakeFlagger "github.com/stefanprodan/flagger/pkg/client/clientset/versioned/fake"
informers "github.com/stefanprodan/flagger/pkg/client/informers/externalversions"
"github.com/stefanprodan/flagger/pkg/logging"
@@ -21,6 +25,39 @@ var (
noResyncPeriodFunc = func() time.Duration { return 0 }
)
func newTestController(
kubeClient kubernetes.Interface,
istioClient istioclientset.Interface,
flaggerClient clientset.Interface,
logger *zap.SugaredLogger,
deployer CanaryDeployer,
router CanaryRouter,
observer CanaryObserver,
) *Controller {
flaggerInformerFactory := informers.NewSharedInformerFactory(flaggerClient, noResyncPeriodFunc())
flaggerInformer := flaggerInformerFactory.Flagger().V1alpha3().Canaries()
ctrl := &Controller{
kubeClient: kubeClient,
istioClient: istioClient,
flaggerClient: flaggerClient,
flaggerLister: flaggerInformer.Lister(),
flaggerSynced: flaggerInformer.Informer().HasSynced,
workqueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), controllerAgentName),
eventRecorder: &record.FakeRecorder{},
logger: logger,
canaries: new(sync.Map),
flaggerWindow: time.Second,
deployer: deployer,
router: router,
observer: observer,
recorder: NewCanaryRecorder(false),
}
ctrl.flaggerSynced = alwaysReady
return ctrl
}
func TestScheduler_Init(t *testing.T) {
canary := newTestCanary()
dep := newTestDeployment()
@@ -45,29 +82,9 @@ func TestScheduler_Init(t *testing.T) {
observer := CanaryObserver{
metricsServer: "fake",
}
ctrl := newTestController(kubeClient, istioClient, flaggerClient, logger, deployer, router, observer)
flaggerInformerFactory := informers.NewSharedInformerFactory(flaggerClient, noResyncPeriodFunc())
flaggerInformer := flaggerInformerFactory.Flagger().V1alpha2().Canaries()
ctrl := &Controller{
kubeClient: kubeClient,
istioClient: istioClient,
flaggerClient: flaggerClient,
flaggerLister: flaggerInformer.Lister(),
flaggerSynced: flaggerInformer.Informer().HasSynced,
workqueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), controllerAgentName),
eventRecorder: &record.FakeRecorder{},
logger: logger,
canaries: new(sync.Map),
flaggerWindow: time.Second,
deployer: deployer,
router: router,
observer: observer,
recorder: NewCanaryRecorder(false),
}
ctrl.flaggerSynced = alwaysReady
ctrl.advanceCanary("podinfo", "default")
ctrl.advanceCanary("podinfo", "default", false)
_, err := kubeClient.AppsV1().Deployments("default").Get("podinfo-primary", metav1.GetOptions{})
if err != nil {
@@ -99,30 +116,10 @@ func TestScheduler_NewRevision(t *testing.T) {
observer := CanaryObserver{
metricsServer: "fake",
}
flaggerInformerFactory := informers.NewSharedInformerFactory(flaggerClient, noResyncPeriodFunc())
flaggerInformer := flaggerInformerFactory.Flagger().V1alpha2().Canaries()
ctrl := &Controller{
kubeClient: kubeClient,
istioClient: istioClient,
flaggerClient: flaggerClient,
flaggerLister: flaggerInformer.Lister(),
flaggerSynced: flaggerInformer.Informer().HasSynced,
workqueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), controllerAgentName),
eventRecorder: &record.FakeRecorder{},
logger: logger,
canaries: new(sync.Map),
flaggerWindow: time.Second,
deployer: deployer,
router: router,
observer: observer,
recorder: NewCanaryRecorder(false),
}
ctrl.flaggerSynced = alwaysReady
ctrl := newTestController(kubeClient, istioClient, flaggerClient, logger, deployer, router, observer)
// init
ctrl.advanceCanary("podinfo", "default")
ctrl.advanceCanary("podinfo", "default", false)
// update
dep2 := newTestDeploymentUpdated()
@@ -132,7 +129,7 @@ func TestScheduler_NewRevision(t *testing.T) {
}
// detect changes
ctrl.advanceCanary("podinfo", "default")
ctrl.advanceCanary("podinfo", "default", false)
c, err := kubeClient.AppsV1().Deployments("default").Get("podinfo", metav1.GetOptions{})
if err != nil {
@@ -168,46 +165,195 @@ func TestScheduler_Rollback(t *testing.T) {
observer := CanaryObserver{
metricsServer: "fake",
}
flaggerInformerFactory := informers.NewSharedInformerFactory(flaggerClient, noResyncPeriodFunc())
flaggerInformer := flaggerInformerFactory.Flagger().V1alpha2().Canaries()
ctrl := &Controller{
kubeClient: kubeClient,
istioClient: istioClient,
flaggerClient: flaggerClient,
flaggerLister: flaggerInformer.Lister(),
flaggerSynced: flaggerInformer.Informer().HasSynced,
workqueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), controllerAgentName),
eventRecorder: &record.FakeRecorder{},
logger: logger,
canaries: new(sync.Map),
flaggerWindow: time.Second,
deployer: deployer,
router: router,
observer: observer,
recorder: NewCanaryRecorder(false),
}
ctrl.flaggerSynced = alwaysReady
ctrl := newTestController(kubeClient, istioClient, flaggerClient, logger, deployer, router, observer)
// init
ctrl.advanceCanary("podinfo", "default")
ctrl.advanceCanary("podinfo", "default", true)
// update failed checks to max
err := deployer.SyncStatus(canary, v1alpha2.CanaryStatus{State: v1alpha2.CanaryRunning, FailedChecks: 11})
err := deployer.SyncStatus(canary, v1alpha3.CanaryStatus{Phase: v1alpha3.CanaryProgressing, FailedChecks: 11})
if err != nil {
t.Fatal(err.Error())
}
// detect changes
ctrl.advanceCanary("podinfo", "default")
ctrl.advanceCanary("podinfo", "default", true)
c, err := flaggerClient.FlaggerV1alpha2().Canaries("default").Get("podinfo", metav1.GetOptions{})
c, err := flaggerClient.FlaggerV1alpha3().Canaries("default").Get("podinfo", metav1.GetOptions{})
if err != nil {
t.Fatal(err.Error())
}
if c.Status.State != v1alpha2.CanaryFailed {
t.Errorf("Got canary state %v wanted %v", c.Status.State, v1alpha2.CanaryFailed)
if c.Status.Phase != v1alpha3.CanaryFailed {
t.Errorf("Got canary state %v wanted %v", c.Status.Phase, v1alpha3.CanaryFailed)
}
}
func TestScheduler_NewRevisionReset(t *testing.T) {
canary := newTestCanary()
dep := newTestDeployment()
hpa := newTestHPA()
flaggerClient := fakeFlagger.NewSimpleClientset(canary)
kubeClient := fake.NewSimpleClientset(dep, hpa)
istioClient := fakeIstio.NewSimpleClientset()
logger, _ := logging.NewLogger("debug")
deployer := CanaryDeployer{
flaggerClient: flaggerClient,
kubeClient: kubeClient,
logger: logger,
}
router := CanaryRouter{
flaggerClient: flaggerClient,
kubeClient: kubeClient,
istioClient: istioClient,
logger: logger,
}
observer := CanaryObserver{
metricsServer: "fake",
}
ctrl := newTestController(kubeClient, istioClient, flaggerClient, logger, deployer, router, observer)
// init
ctrl.advanceCanary("podinfo", "default", false)
// first update
dep2 := newTestDeploymentUpdated()
_, err := kubeClient.AppsV1().Deployments("default").Update(dep2)
if err != nil {
t.Fatal(err.Error())
}
// detect changes
ctrl.advanceCanary("podinfo", "default", true)
// advance
ctrl.advanceCanary("podinfo", "default", true)
primaryRoute, canaryRoute, err := router.GetRoutes(canary)
if err != nil {
t.Fatal(err.Error())
}
if primaryRoute.Weight != 90 {
t.Errorf("Got primary route %v wanted %v", primaryRoute.Weight, 90)
}
if canaryRoute.Weight != 10 {
t.Errorf("Got canary route %v wanted %v", canaryRoute.Weight, 10)
}
// second update
dep2.Spec.Template.Spec.ServiceAccountName = "test"
_, err = kubeClient.AppsV1().Deployments("default").Update(dep2)
if err != nil {
t.Fatal(err.Error())
}
// detect changes
ctrl.advanceCanary("podinfo", "default", true)
primaryRoute, canaryRoute, err = router.GetRoutes(canary)
if err != nil {
t.Fatal(err.Error())
}
if primaryRoute.Weight != 100 {
t.Errorf("Got primary route %v wanted %v", primaryRoute.Weight, 100)
}
if canaryRoute.Weight != 0 {
t.Errorf("Got canary route %v wanted %v", canaryRoute.Weight, 0)
}
}
func TestScheduler_Promotion(t *testing.T) {
canary := newTestCanary()
dep := newTestDeployment()
hpa := newTestHPA()
flaggerClient := fakeFlagger.NewSimpleClientset(canary)
kubeClient := fake.NewSimpleClientset(dep, hpa)
istioClient := fakeIstio.NewSimpleClientset()
logger, _ := logging.NewLogger("debug")
deployer := CanaryDeployer{
flaggerClient: flaggerClient,
kubeClient: kubeClient,
logger: logger,
}
router := CanaryRouter{
flaggerClient: flaggerClient,
kubeClient: kubeClient,
istioClient: istioClient,
logger: logger,
}
observer := CanaryObserver{
metricsServer: "fake",
}
ctrl := newTestController(kubeClient, istioClient, flaggerClient, logger, deployer, router, observer)
// init
ctrl.advanceCanary("podinfo", "default", false)
// update
dep2 := newTestDeploymentUpdated()
_, err := kubeClient.AppsV1().Deployments("default").Update(dep2)
if err != nil {
t.Fatal(err.Error())
}
// detect changes
ctrl.advanceCanary("podinfo", "default", true)
primaryRoute, canaryRoute, err := router.GetRoutes(canary)
if err != nil {
t.Fatal(err.Error())
}
primaryRoute.Weight = 60
canaryRoute.Weight = 40
err = ctrl.router.SetRoutes(canary, primaryRoute, canaryRoute)
if err != nil {
t.Fatal(err.Error())
}
// advance
ctrl.advanceCanary("podinfo", "default", true)
// promote
ctrl.advanceCanary("podinfo", "default", true)
primaryRoute, canaryRoute, err = router.GetRoutes(canary)
if err != nil {
t.Fatal(err.Error())
}
if primaryRoute.Weight != 100 {
t.Errorf("Got primary route %v wanted %v", primaryRoute.Weight, 100)
}
if canaryRoute.Weight != 0 {
t.Errorf("Got canary route %v wanted %v", canaryRoute.Weight, 0)
}
primaryDep, err := kubeClient.AppsV1().Deployments("default").Get("podinfo-primary", metav1.GetOptions{})
if err != nil {
t.Fatal(err.Error())
}
primaryImage := primaryDep.Spec.Template.Spec.Containers[0].Image
canaryImage := dep2.Spec.Template.Spec.Containers[0].Image
if primaryImage != canaryImage {
t.Errorf("Got primary image %v wanted %v", primaryImage, canaryImage)
}
c, err := flaggerClient.FlaggerV1alpha3().Canaries("default").Get("podinfo", metav1.GetOptions{})
if err != nil {
t.Fatal(err.Error())
}
if c.Status.Phase != v1alpha3.CanarySucceeded {
t.Errorf("Got canary state %v wanted %v", c.Status.Phase, v1alpha3.CanarySucceeded)
}
}

View File

@@ -6,7 +6,7 @@ import (
"encoding/json"
"errors"
"fmt"
flaggerv1 "github.com/stefanprodan/flagger/pkg/apis/flagger/v1alpha2"
flaggerv1 "github.com/stefanprodan/flagger/pkg/apis/flagger/v1alpha3"
"io/ioutil"
"net/http"
"net/url"

View File

@@ -1,7 +1,7 @@
package controller
import (
flaggerv1 "github.com/stefanprodan/flagger/pkg/apis/flagger/v1alpha2"
flaggerv1 "github.com/stefanprodan/flagger/pkg/apis/flagger/v1alpha3"
"net/http"
"net/http/httptest"
"testing"

View File

@@ -1,4 +1,4 @@
package version
var VERSION = "0.2.0"
var VERSION = "0.4.0"
var REVISION = "unknown"