Compare commits

...

61 Commits
0.1.0 ... 0.2.0

Author SHA1 Message Date
stefanprodan
f90ba560b7 Release v0.2.0 2019-01-04 13:44:50 +02:00
Stefan Prodan
2a9641fd68 Merge pull request #18 from stefanprodan/webhooks
Add external checks to canary analysis
2019-01-04 13:24:49 +02:00
stefanprodan
13fffe1323 Document webhooks status codes 2019-01-03 18:46:36 +02:00
stefanprodan
083556baae Document the canary analysis timespan 2019-01-03 18:27:49 +02:00
stefanprodan
5d0939af7d Add webhook docs 2019-01-03 16:11:30 +02:00
stefanprodan
d26255070e Copyright Weaveworks 2019-01-03 14:42:21 +02:00
stefanprodan
b008abd4a7 Fix metrics server offline test 2018-12-27 12:43:43 +02:00
stefanprodan
cbf9e1011d Add tests for metrics server check 2018-12-27 12:42:12 +02:00
stefanprodan
6ec3d7a76f Format observer tests 2018-12-27 12:21:33 +02:00
stefanprodan
ab52752d57 Add observer histogram test 2018-12-27 12:16:10 +02:00
stefanprodan
df3951a7ef Add observer tests 2018-12-27 12:15:16 +02:00
stefanprodan
722d36a8cc Add webhook tests 2018-12-26 17:58:35 +02:00
stefanprodan
e86c02d600 Implement canary external check
- do a HTTP POST for each webhook registered in the canary analysis
- increment the failed checks counter if a webhook returns a non-2xx status code and log the error and the response body if exists
2018-12-26 14:41:35 +02:00
stefanprodan
53546878d5 Make service port mandatory in CRD v1alpha2 2018-12-26 13:55:34 +02:00
stefanprodan
199e3b36c6 Upgrade CRD to v1alpha2
- add required fields for deployment and hpa targets
- make service port mandatory
- add webhooks validation
2018-12-26 13:46:59 +02:00
stefanprodan
0d96bedfee Add webhooks to Canary CRD v1alpha2 2018-12-26 13:42:36 +02:00
Stefan Prodan
9753820579 GitBook: [master] 3 pages modified 2018-12-19 14:32:51 +00:00
Stefan Prodan
197f218ba4 GitBook: [master] one page modified 2018-12-19 14:10:49 +00:00
Stefan Prodan
b4b1a36aba GitBook: [master] 8 pages modified 2018-12-19 13:45:12 +00:00
stefanprodan
cfc848bfa9 Link to docs website 2018-12-19 15:42:16 +02:00
stefanprodan
fcf6f96912 Add overview diagram 2018-12-19 15:30:43 +02:00
Stefan Prodan
1504dcab74 GitBook: [master] 5 pages modified 2018-12-19 13:24:16 +00:00
Stefan Prodan
4e4bc0c4f0 GitBook: [master] 4 pages modified 2018-12-19 13:21:33 +00:00
Stefan Prodan
36ce610465 GitBook: [master] 5 pages modified 2018-12-19 12:46:06 +00:00
stefanprodan
1dc2aa147b Ignore gitbook for GitHub pages 2018-12-19 13:31:18 +02:00
Stefan Prodan
8cc7e4adbb GitBook: [master] 4 pages modified 2018-12-19 11:25:30 +00:00
Stefan Prodan
978f7256a8 GitBook: [master] 2 pages modified 2018-12-19 10:08:59 +00:00
stefanprodan
e799e63e3f Set gitbook root 2018-12-19 12:00:18 +02:00
stefanprodan
5b35854464 init gitbook 2018-12-19 11:56:42 +02:00
stefanprodan
d485498a14 Add email field to charts 2018-12-18 18:38:33 +02:00
stefanprodan
dfa974cf57 Change Grafana chart title 2018-12-18 18:37:04 +02:00
stefanprodan
ee1e2e6fd9 Upgrade Grafana to v5.4.2 2018-12-18 12:58:14 +02:00
Stefan Prodan
eeb3b1ba4d Merge pull request #15 from stefanprodan/chart
Add service account option to Helm chart
2018-12-18 12:12:05 +02:00
Stefan Prodan
b510f0ee02 Merge branch 'master' into chart 2018-12-18 11:56:06 +02:00
stefanprodan
c34737b9ce Use app.kubernetes.io labels 2018-12-18 11:53:42 +02:00
stefanprodan
e4ea4f3994 Make the service account optional 2018-12-18 11:06:53 +02:00
stefanprodan
07359192e7 Add chart prerequisites and icon 2018-12-18 10:31:47 +02:00
stefanprodan
4dd23c42a2 Add Flagger logo and icons 2018-12-18 10:31:05 +02:00
Stefan Prodan
f281021abf Add Slack notifications screen 2018-12-06 16:18:38 +07:00
Stefan Prodan
71137ba3bb Release 0.1.2 2018-12-06 14:00:12 +07:00
Stefan Prodan
6372c7dfcc Merge pull request #14 from stefanprodan/slack
Add details to Slack messages
2018-12-06 13:53:20 +07:00
Stefan Prodan
4584733f6f Change coverage threshold 2018-12-06 13:48:06 +07:00
Stefan Prodan
03408683c0 Add details to Slack messages
- attach canary analysis metadata to init/start messages
- add rollback reason to failed canary messages
2018-12-06 12:51:02 +07:00
Stefan Prodan
29137ae75b Add Alermanager example 2018-12-06 12:49:41 +07:00
Stefan Prodan
6bf85526d0 Add Slack screens with successful and failed canaries 2018-12-06 12:49:15 +07:00
stefanprodan
9f6a30f43e Bump dev version 2018-11-28 15:08:24 +02:00
stefanprodan
11bc0390c4 Release v0.1.1 2018-11-28 14:56:34 +02:00
stefanprodan
9a29ea69d7 Change progress deadline default to 10 minutes 2018-11-28 14:53:12 +02:00
Stefan Prodan
2d8adbaca4 Merge pull request #10 from stefanprodan/deadline
Rollback canary based on the deployment progress deadline check
2018-11-28 14:48:17 +02:00
stefanprodan
f3904ea099 Use canary state constants in recorder 2018-11-27 17:34:48 +02:00
stefanprodan
1b2b13e77f Disable patch coverage 2018-11-27 17:11:57 +02:00
stefanprodan
8878f15806 Clean up isDeploymentReady 2018-11-27 17:11:35 +02:00
stefanprodan
5977ff9bae Add rollback test based on failed checks threshold 2018-11-27 17:00:13 +02:00
stefanprodan
11ef6bdf37 Add progressDeadlineSeconds to canary example 2018-11-27 16:58:21 +02:00
stefanprodan
9c342e35be Add progressDeadlineSeconds validation 2018-11-27 16:35:39 +02:00
stefanprodan
c7e7785b06 Fix canary deployer is ready test 2018-11-27 15:55:04 +02:00
stefanprodan
4cb5ceb48b Rollback canary based on the deployment progress deadline check
- determine if the canary deployment is stuck by checking if there is a minimum replicas unavailable condition and if the last update time exceeds the deadline
- set progress deadline default value to 60 seconds
2018-11-27 15:44:15 +02:00
stefanprodan
5a79402a73 Add canary status state constants 2018-11-27 15:29:06 +02:00
stefanprodan
c24b11ff8b Add ProgressDeadlineSeconds to Canary CRD 2018-11-27 12:16:20 +02:00
stefanprodan
042d3c1a5b Set ProgressDeadlineSeconds for primary deployment on init/promote 2018-11-27 12:10:14 +02:00
stefanprodan
f8821cf30b bump dev version 2018-11-27 11:56:11 +02:00
84 changed files with 2213 additions and 729 deletions

8
.codecov.yml Normal file
View File

@@ -0,0 +1,8 @@
coverage:
status:
project:
default:
target: auto
threshold: 50
base: auto
patch: off

1
.gitbook.yaml Normal file
View File

@@ -0,0 +1 @@
root: ./docs/gitbook

View File

@@ -186,7 +186,7 @@
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Copyright 2018 Weaveworks. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.

View File

@@ -13,7 +13,8 @@ build:
docker build -t stefanprodan/flagger:$(TAG) . -f Dockerfile
push:
docker push stefanprodan/flagger:$(TAG)
docker tag stefanprodan/flagger:$(TAG) quay.io/stefanprodan/flagger:$(VERSION)
docker push quay.io/stefanprodan/flagger:$(VERSION)
fmt:
gofmt -l -s -w $(SOURCE_DIRS)

View File

@@ -7,7 +7,9 @@
[![release](https://img.shields.io/github/release/stefanprodan/flagger/all.svg)](https://github.com/stefanprodan/flagger/releases)
Flagger is a Kubernetes operator that automates the promotion of canary deployments
using Istio routing for traffic shifting and Prometheus metrics for canary analysis.
using Istio routing for traffic shifting and Prometheus metrics for canary analysis.
The canary analysis can be extended with webhooks for running integration tests, load tests or any other custom
validation.
### Install
@@ -37,7 +39,7 @@ ClusterIP [services](https://kubernetes.io/docs/concepts/services-networking/ser
Istio [virtual services](https://istio.io/docs/reference/config/istio.networking.v1alpha3/#VirtualService))
to drive the canary analysis and promotion.
![flagger-overview](https://raw.githubusercontent.com/stefanprodan/flagger/master/docs/diagrams/flagger-overview.png)
![flagger-overview](https://raw.githubusercontent.com/stefanprodan/flagger/master/docs/diagrams/flagger-canary-overview.png)
Gated canary promotion stages:
@@ -73,7 +75,7 @@ You can change the canary analysis _max weight_ and the _step weight_ percentage
For a deployment named _podinfo_, a canary promotion can be defined using Flagger's custom resource:
```yaml
apiVersion: flagger.app/v1alpha1
apiVersion: flagger.app/v1alpha2
kind: Canary
metadata:
name: podinfo
@@ -84,7 +86,10 @@ spec:
apiVersion: apps/v1
kind: Deployment
name: podinfo
# hpa reference (optional)
# the maximum time in seconds for the canary deployment
# to make progress before it is rollback (default 600s)
progressDeadlineSeconds: 60
# HPA reference (optional)
autoscalerRef:
apiVersion: autoscaling/v2beta1
kind: HorizontalPodAutoscaler
@@ -97,16 +102,17 @@ spec:
- public-gateway.istio-system.svc.cluster.local
# Istio virtual service host names (optional)
hosts:
- app.istio.weavedx.com
- app.iowa.weavedx.com
canaryAnalysis:
# max number of failed metric checks before rollback
threshold: 5
threshold: 10
# max traffic percentage routed to canary
# percentage (0-100)
maxWeight: 50
# canary increment step
# percentage (0-100)
stepWeight: 10
stepWeight: 5
# Istio Prometheus checks
metrics:
- name: istio_requests_total
# minimum req success rate (non 5xx responses)
@@ -118,6 +124,14 @@ spec:
# milliseconds
threshold: 500
interval: 30s
# external checks (optional)
webhooks:
- name: integration-tests
url: http://podinfo.test:9898/echo
timeout: 1m
metadata:
test: "all"
token: "16688eb5e9f289f1991c"
```
The canary analysis is using the following promql queries:
@@ -163,6 +177,22 @@ histogram_quantile(0.99,
)
```
The canary analysis can be extended with webhooks.
Flagger will call the webhooks (HTTP POST) and determine from the response status code (HTTP 2xx) if the canary is failing or not.
Webhook payload:
```json
{
"name": "podinfo",
"namespace": "test",
"metadata": {
"test": "all",
"token": "16688eb5e9f289f1991c"
}
}
```
### Automated canary analysis, promotions and rollbacks
Create a test namespace with Istio sidecar injection enabled:
@@ -364,7 +394,25 @@ helm upgrade -i flagger flagger/flagger \
Once configured with a Slack incoming webhook, Flagger will post messages when a canary deployment has been initialized,
when a new revision has been detected and if the canary analysis failed or succeeded.
![flagger-slack](https://raw.githubusercontent.com/stefanprodan/flagger/master/docs/screens/slack-notifications.png)
![flagger-slack](https://raw.githubusercontent.com/stefanprodan/flagger/master/docs/screens/slack-canary-notifications.png)
A canary deployment will be rolled back if the progress deadline exceeded or if the analysis
reached the maximum number of failed checks:
![flagger-slack-errors](https://raw.githubusercontent.com/stefanprodan/flagger/master/docs/screens/slack-canary-failed.png)
Besides Slack, you can use Alertmanager to trigger alerts when a canary deployment failed:
```yaml
- alert: canary_rollback
expr: flagger_canary_status > 1
for: 1m
labels:
severity: warning
annotations:
summary: "Canary failed"
description: "Workload {{ $labels.name }} namespace {{ $labels.namespace }}"
```
### Roadmap

View File

@@ -1,4 +1,4 @@
apiVersion: flagger.app/v1alpha1
apiVersion: flagger.app/v1alpha2
kind: Canary
metadata:
name: podinfo
@@ -9,6 +9,9 @@ spec:
apiVersion: apps/v1
kind: Deployment
name: podinfo
# the maximum time in seconds for the canary deployment
# to make progress before it is rollback (default 600s)
progressDeadlineSeconds: 60
# HPA reference (optional)
autoscalerRef:
apiVersion: autoscaling/v2beta1
@@ -32,6 +35,7 @@ spec:
# canary increment step
# percentage (0-100)
stepWeight: 5
# Istio Prometheus checks
metrics:
- name: istio_requests_total
# minimum req success rate (non 5xx responses)
@@ -43,3 +47,11 @@ spec:
# milliseconds
threshold: 500
interval: 30s
# external checks (optional)
webhooks:
- name: integration-tests
url: http://podinfo.test:9898/echo
timeout: 1m
metadata:
test: "all"
token: "16688eb5e9f289f1991c"

View File

@@ -8,6 +8,7 @@ metadata:
spec:
minReadySeconds: 5
revisionHistoryLimit: 5
progressDeadlineSeconds: 60
strategy:
rollingUpdate:
maxUnavailable: 0

View File

@@ -4,11 +4,14 @@ metadata:
name: canaries.flagger.app
spec:
group: flagger.app
version: v1alpha1
version: v1alpha2
versions:
- name: v1alpha1
- name: v1alpha2
served: true
storage: true
- name: v1alpha1
served: true
storage: false
names:
plural: canaries
singular: canary
@@ -23,7 +26,11 @@ spec:
- service
- canaryAnalysis
properties:
progressDeadlineSeconds:
type: number
targetRef:
type: object
required: ['apiVersion', 'kind', 'name']
properties:
apiVersion:
type: string
@@ -32,6 +39,8 @@ spec:
name:
type: string
autoscalerRef:
type: object
required: ['apiVersion', 'kind', 'name']
properties:
apiVersion:
type: string
@@ -40,6 +49,8 @@ spec:
name:
type: string
service:
type: object
required: ['port']
properties:
port:
type: number
@@ -56,6 +67,7 @@ spec:
properties:
items:
type: object
required: ['name', 'interval', 'threshold']
properties:
name:
type: string
@@ -64,3 +76,18 @@ spec:
pattern: "^[0-9]+(m)"
threshold:
type: number
webhooks:
type: array
properties:
items:
type: object
required: ['name', 'url', 'timeout']
properties:
name:
type: string
url:
type: string
format: url
timeout:
type: string
pattern: "^[0-9]+(s)"

View File

@@ -22,7 +22,7 @@ spec:
serviceAccountName: flagger
containers:
- name: flagger
image: quay.io/stefanprodan/flagger:0.1.0
image: quay.io/stefanprodan/flagger:0.2.0
imagePullPolicy: Always
ports:
- name: http

View File

@@ -1,6 +1,19 @@
apiVersion: v1
name: flagger
version: 0.1.0
appVersion: 0.1.0
version: 0.2.0
appVersion: 0.2.0
kubeVersion: ">=1.9.0-0"
engine: gotpl
description: Flagger is a Kubernetes operator that automates the promotion of canary deployments using Istio routing for traffic shifting and Prometheus metrics for canary analysis.
home: https://github.com/stefanprodan/flagger
home: https://flagger.app
icon: https://raw.githubusercontent.com/stefanprodan/flagger/master/docs/logo/flagger-icon.png
sources:
- https://github.com/stefanprodan/flagger
maintainers:
- name: stefanprodan
url: https://github.com/stefanprodan
email: stefanprodan@users.noreply.github.com
keywords:
- canary
- istio
- gitops

View File

@@ -1,11 +1,20 @@
# Flagger
[Flagger](https://flagger.app) is a Kubernetes operator that automates the promotion of canary deployments
using Istio routing for traffic shifting and Prometheus metrics for canary analysis.
using Istio routing for traffic shifting and Prometheus metrics for canary analysis.
Flagger implements a control loop that gradually shifts traffic to the canary while measuring key performance indicators
like HTTP requests success rate, requests average duration and pods health.
Based on the KPIs analysis a canary is promoted or aborted and the analysis result is published to Slack.
## Prerequisites
* Kubernetes >= 1.9
* Istio >= 1.0
* Prometheus >= 2.6
## Installing the Chart
Add Flagger Hel repository:
Add Flagger Helm repository:
```console
helm repo add flagger https://flagger.app

View File

@@ -1,4 +1,10 @@
{{/* vim: set filetype=mustache: */}}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "flagger.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Expand the name of the chart.
*/}}
@@ -25,8 +31,12 @@ If release name contains chart name it will be used as a full name.
{{- end -}}
{{/*
Create chart name and version as used by the chart label.
Create the name of the service account to use
*/}}
{{- define "flagger.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
{{- define "flagger.serviceAccountName" -}}
{{- if .Values.serviceAccount.create -}}
{{ default (include "flagger.fullname" .) .Values.serviceAccount.name }}
{{- else -}}
{{ default "default" .Values.serviceAccount.name }}
{{- end -}}
{{- end -}}

View File

@@ -1,9 +1,11 @@
{{- if .Values.serviceAccount.create }}
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ template "flagger.name" . }}
name: {{ template "flagger.serviceAccountName" . }}
labels:
app: {{ template "flagger.name" . }}
chart: {{ template "flagger.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
helm.sh/chart: {{ template "flagger.chart" . }}
app.kubernetes.io/name: {{ template "flagger.name" . }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- end }}

View File

@@ -3,15 +3,16 @@ apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: canaries.flagger.app
annotations:
"helm.sh/resource-policy": keep
spec:
group: flagger.app
version: v1alpha1
version: v1alpha2
versions:
- name: v1alpha1
- name: v1alpha2
served: true
storage: true
- name: v1alpha1
served: true
storage: false
names:
plural: canaries
singular: canary
@@ -22,11 +23,15 @@ spec:
properties:
spec:
required:
- targetRef
- service
- canaryAnalysis
- targetRef
- service
- canaryAnalysis
properties:
progressDeadlineSeconds:
type: number
targetRef:
type: object
required: ['apiVersion', 'kind', 'name']
properties:
apiVersion:
type: string
@@ -35,6 +40,8 @@ spec:
name:
type: string
autoscalerRef:
type: object
required: ['apiVersion', 'kind', 'name']
properties:
apiVersion:
type: string
@@ -43,6 +50,8 @@ spec:
name:
type: string
service:
type: object
required: ['port']
properties:
port:
type: number
@@ -59,6 +68,7 @@ spec:
properties:
items:
type: object
required: ['name', 'interval', 'threshold']
properties:
name:
type: string
@@ -67,4 +77,20 @@ spec:
pattern: "^[0-9]+(m)"
threshold:
type: number
webhooks:
type: array
properties:
items:
type: object
required: ['name', 'url', 'timeout']
properties:
name:
type: string
url:
type: string
format: url
timeout:
type: string
pattern: "^[0-9]+(s)"
{{- end }}

View File

@@ -3,25 +3,25 @@ kind: Deployment
metadata:
name: {{ include "flagger.fullname" . }}
labels:
app: {{ include "flagger.name" . }}
chart: {{ include "flagger.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
helm.sh/chart: {{ template "flagger.chart" . }}
app.kubernetes.io/name: {{ template "flagger.name" . }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/instance: {{ .Release.Name }}
spec:
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app: {{ include "flagger.name" . }}
release: {{ .Release.Name }}
app.kubernetes.io/name: {{ template "flagger.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
template:
metadata:
labels:
app: {{ include "flagger.name" . }}
release: {{ .Release.Name }}
app.kubernetes.io/name: {{ template "flagger.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
spec:
serviceAccountName: flagger
serviceAccountName: {{ template "flagger.serviceAccountName" . }}
containers:
- name: flagger
securityContext:

View File

@@ -4,10 +4,10 @@ kind: ClusterRole
metadata:
name: {{ template "flagger.fullname" . }}
labels:
app: {{ template "flagger.name" . }}
chart: {{ template "flagger.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
helm.sh/chart: {{ template "flagger.chart" . }}
app.kubernetes.io/name: {{ template "flagger.name" . }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/instance: {{ .Release.Name }}
rules:
- apiGroups: ['*']
resources: ['*']
@@ -20,16 +20,16 @@ kind: ClusterRoleBinding
metadata:
name: {{ template "flagger.fullname" . }}
labels:
app: {{ template "flagger.name" . }}
chart: {{ template "flagger.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
helm.sh/chart: {{ template "flagger.chart" . }}
app.kubernetes.io/name: {{ template "flagger.name" . }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/instance: {{ .Release.Name }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: {{ template "flagger.fullname" . }}
subjects:
- name: {{ template "flagger.name" . }}
namespace: {{ .Release.Namespace | quote }}
- name: {{ template "flagger.serviceAccountName" . }}
namespace: {{ .Release.Namespace }}
kind: ServiceAccount
{{- end }}

View File

@@ -2,7 +2,7 @@
image:
repository: quay.io/stefanprodan/flagger
tag: 0.1.0
tag: 0.2.0
pullPolicy: IfNotPresent
controlLoopInterval: "10s"
@@ -14,10 +14,18 @@ slack:
# incoming webhook https://api.slack.com/incoming-webhooks
url:
crd:
serviceAccount:
# serviceAccount.create: Whether to create a service account or not
create: true
# serviceAccount.name: The name of the service account to create or use
name: ""
rbac:
# rbac.create: `true` if rbac resources should be created
create: true
crd:
# crd.create: `true` if custom resource definitions should be created
create: true
nameOverride: ""

View File

@@ -1,6 +1,13 @@
apiVersion: v1
name: grafana
version: 0.1.0
appVersion: 5.3.1
description: A Grafana Helm chart for monitoring progressive deployments powered by Istio and Flagger
home: https://github.com/stefanprodan/flagger
appVersion: 5.4.2
description: Grafana dashboards for monitoring Flagger canary deployments
icon: https://raw.githubusercontent.com/stefanprodan/flagger/master/docs/logo/flagger-icon.png
home: https://flagger.app
sources:
- https://github.com/stefanprodan/flagger
maintainers:
- name: stefanprodan
url: https://github.com/stefanprodan
email: stefanprodan@users.noreply.github.com

View File

@@ -1,16 +1,31 @@
# Weave Cloud Grafana
# Flagger Grafana
Grafana v5 with Kubernetes dashboards and Prometheus and Weave Cloud data sources.
Grafana dashboards for monitoring progressive deployments powered by Istio, Prometheus and Flagger.
![flagger-grafana](https://raw.githubusercontent.com/stefanprodan/flagger/master/docs/screens/grafana-canary-analysis.png)
## Prerequisites
* Kubernetes >= 1.9
* Istio >= 1.0
* Prometheus >= 2.6
## Installing the Chart
To install the chart with the release name `my-release`:
Add Flagger Helm repository:
```console
$ helm install stable/grafana --name my-release \
--set service.type=NodePort \
--set token=WEAVE-TOKEN \
--set password=admin
helm repo add flagger https://flagger.app
```
To install the chart with the release name `flagger-grafana`:
```console
helm upgrade -i flagger-grafana flagger/grafana \
--namespace=istio-system \
--set url=http://prometheus:9090 \
--set user=admin \
--set password=admin
```
The command deploys Grafana on the Kubernetes cluster in the default namespace.
@@ -18,10 +33,10 @@ The [configuration](#configuration) section lists the parameters that can be con
## Uninstalling the Chart
To uninstall/delete the `my-release` deployment:
To uninstall/delete the `flagger-grafana` deployment:
```console
$ helm delete --purge my-release
helm delete --purge flagger-grafana
```
The command removes all the Kubernetes components associated with the chart and deletes the release.
@@ -34,30 +49,29 @@ Parameter | Description | Default
--- | --- | ---
`image.repository` | Image repository | `grafana/grafana`
`image.pullPolicy` | Image pull policy | `IfNotPresent`
`image.tag` | Image tag | `5.0.1`
`image.tag` | Image tag | `<VERSION>`
`replicaCount` | desired number of pods | `1`
`resources` | pod resources | `none`
`tolerations` | List of node taints to tolerate | `[]`
`affinity` | node/pod affinities | `node`
`nodeSelector` | node labels for pod assignment | `{}`
`service.type` | type of service | `LoadBalancer`
`url` | Prometheus URL, used when Weave token is empty | `http://prometheus:9090`
`service.type` | type of service | `ClusterIP`
`url` | Prometheus URL, used when Weave Cloud token is empty | `http://prometheus:9090`
`token` | Weave Cloud token | `none`
`user` | Grafana admin username | `admin`
`password` | Grafana admin password | `none`
`password` | Grafana admin password | `admin`
Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example,
```console
$ helm install stable/grafana --name my-release \
--set=token=WEAVE-TOKEN \
--set password=admin
helm install flagger/grafana --name flagger-grafana \
--set token=WEAVE-CLOUD-TOKEN
```
Alternatively, a YAML file that specifies the values for the above parameters can be provided while installing the chart. For example,
```console
$ helm install stable/grafana --name my-release -f values.yaml
helm install flagger/grafana --name flagger-grafana -f values.yaml
```
> **Tip**: You can use the default [values.yaml](values.yaml)

View File

@@ -6,7 +6,7 @@ replicaCount: 1
image:
repository: grafana/grafana
tag: 5.3.1
tag: 5.4.2
pullPolicy: IfNotPresent
service:

View File

@@ -77,7 +77,7 @@ func main() {
}
flaggerInformerFactory := informers.NewSharedInformerFactory(flaggerClient, time.Second*30)
canaryInformer := flaggerInformerFactory.Flagger().V1alpha1().Canaries()
canaryInformer := flaggerInformerFactory.Flagger().V1alpha2().Canaries()
logger.Infof("Starting flagger version %s revision %s", version.VERSION, version.REVISION)

View File

@@ -1,384 +1,11 @@
# flagger
# Flagger
[![build](https://travis-ci.org/stefanprodan/flagger.svg?branch=master)](https://travis-ci.org/stefanprodan/flagger)
[![report](https://goreportcard.com/badge/github.com/stefanprodan/flagger)](https://goreportcard.com/report/github.com/stefanprodan/flagger)
[![codecov](https://codecov.io/gh/stefanprodan/flagger/branch/master/graph/badge.svg)](https://codecov.io/gh/stefanprodan/flagger)
[![license](https://img.shields.io/github/license/stefanprodan/flagger.svg)](https://github.com/stefanprodan/flagger/blob/master/LICENSE)
[![release](https://img.shields.io/github/release/stefanprodan/flagger/all.svg)](https://github.com/stefanprodan/flagger/releases)
Flagger is a Kubernetes operator that automates the promotion of canary deployments using Istio routing for traffic
shifting and Prometheus metrics for canary analysis.
Flagger is a Kubernetes operator that automates the promotion of canary deployments
using Istio routing for traffic shifting and Prometheus metrics for canary analysis.
Flagger implements a control loop that gradually shifts traffic to the canary while measuring key performance
indicators like HTTP requests success rate, requests average duration and pods health. Based on the KPIs analysis
a canary is promoted or aborted and the analysis result is published to Slack.
### Install
### For the install instructions and usage examples please see [docs.flagger.app](https://docs.flagger.app)
Before installing Flagger make sure you have Istio setup up with Prometheus enabled.
If you are new to Istio you can follow my [Istio service mesh walk-through](https://github.com/stefanprodan/istio-gke).
Deploy Flagger in the `istio-system` namespace using Helm:
```bash
# add the Helm repository
helm repo add flagger https://flagger.app
# install or upgrade
helm upgrade -i flagger flagger/flagger \
--namespace=istio-system \
--set metricsServer=http://prometheus.istio-system:9090 \
--set controlLoopInterval=1m
```
Flagger is compatible with Kubernetes >1.10.0 and Istio >1.0.0.
### Usage
Flagger takes a Kubernetes deployment and creates a series of objects
(Kubernetes [deployments](https://kubernetes.io/docs/concepts/workloads/controllers/deployment/),
ClusterIP [services](https://kubernetes.io/docs/concepts/services-networking/service/) and
Istio [virtual services](https://istio.io/docs/reference/config/istio.networking.v1alpha3/#VirtualService))
to drive the canary analysis and promotion.
![flagger-overview](https://raw.githubusercontent.com/stefanprodan/flagger/master/docs/diagrams/flagger-overview.png)
Gated canary promotion stages:
* scan for canary deployments
* check Istio virtual service routes are mapped to primary and canary ClusterIP services
* check primary and canary deployments status
* halt advancement if a rolling update is underway
* halt advancement if pods are unhealthy
* increase canary traffic weight percentage from 0% to 5% (step weight)
* check canary HTTP request success rate and latency
* halt advancement if any metric is under the specified threshold
* increment the failed checks counter
* check if the number of failed checks reached the threshold
* route all traffic to primary
* scale to zero the canary deployment and mark it as failed
* wait for the canary deployment to be updated (revision bump) and start over
* increase canary traffic weight by 5% (step weight) till it reaches 50% (max weight)
* halt advancement while canary request success rate is under the threshold
* halt advancement while canary request duration P99 is over the threshold
* halt advancement if the primary or canary deployment becomes unhealthy
* halt advancement while canary deployment is being scaled up/down by HPA
* promote canary to primary
* copy canary deployment spec template over primary
* wait for primary rolling update to finish
* halt advancement if pods are unhealthy
* route all traffic to primary
* scale to zero the canary deployment
* mark rollout as finished
* wait for the canary deployment to be updated (revision bump) and start over
You can change the canary analysis _max weight_ and the _step weight_ percentage in the Flagger's custom resource.
For a deployment named _podinfo_, a canary promotion can be defined using Flagger's custom resource:
```yaml
apiVersion: flagger.app/v1alpha1
kind: Canary
metadata:
name: podinfo
namespace: test
spec:
# deployment reference
targetRef:
apiVersion: apps/v1
kind: Deployment
name: podinfo
# hpa reference (optional)
autoscalerRef:
apiVersion: autoscaling/v2beta1
kind: HorizontalPodAutoscaler
name: podinfo
service:
# container port
port: 9898
# Istio gateways (optional)
gateways:
- public-gateway.istio-system.svc.cluster.local
# Istio virtual service host names (optional)
hosts:
- app.istio.weavedx.com
canaryAnalysis:
# max number of failed metric checks before rollback
threshold: 5
# max traffic percentage routed to canary
# percentage (0-100)
maxWeight: 50
# canary increment step
# percentage (0-100)
stepWeight: 10
metrics:
- name: istio_requests_total
# minimum req success rate (non 5xx responses)
# percentage (0-100)
threshold: 99
interval: 1m
- name: istio_request_duration_seconds_bucket
# maximum req duration P99
# milliseconds
threshold: 500
interval: 30s
```
The canary analysis is using the following promql queries:
_HTTP requests success rate percentage_
```
sum(
rate(
istio_requests_total{
reporter="destination",
destination_workload_namespace=~"$namespace",
destination_workload=~"$workload",
response_code!~"5.*"
}[$interval]
)
)
/
sum(
rate(
istio_requests_total{
reporter="destination",
destination_workload_namespace=~"$namespace",
destination_workload=~"$workload"
}[$interval]
)
)
```
_HTTP requests milliseconds duration P99_
```
histogram_quantile(0.99,
sum(
irate(
istio_request_duration_seconds_bucket{
reporter="destination",
destination_workload=~"$workload",
destination_workload_namespace=~"$namespace"
}[$interval]
)
) by (le)
)
```
### Automated canary analysis, promotions and rollbacks
Create a test namespace with Istio sidecar injection enabled:
```bash
export REPO=https://raw.githubusercontent.com/stefanprodan/flagger/master
kubectl apply -f ${REPO}/artifacts/namespaces/test.yaml
```
Create a deployment and a horizontal pod autoscaler:
```bash
kubectl apply -f ${REPO}/artifacts/canaries/deployment.yaml
kubectl apply -f ${REPO}/artifacts/canaries/hpa.yaml
```
Create a canary promotion custom resource (replace the Istio gateway and the internet domain with your own):
```bash
kubectl apply -f ${REPO}/artifacts/canaries/canary.yaml
```
After a couple of seconds Flagger will create the canary objects:
```bash
# applied
deployment.apps/podinfo
horizontalpodautoscaler.autoscaling/podinfo
canary.flagger.app/podinfo
# generated
deployment.apps/podinfo-primary
horizontalpodautoscaler.autoscaling/podinfo-primary
service/podinfo
service/podinfo-canary
service/podinfo-primary
virtualservice.networking.istio.io/podinfo
```
![flagger-canary-steps](https://raw.githubusercontent.com/stefanprodan/flagger/master/docs/diagrams/flagger-canary-steps.png)
Trigger a canary deployment by updating the container image:
```bash
kubectl -n test set image deployment/podinfo \
podinfod=quay.io/stefanprodan/podinfo:1.2.1
```
Flagger detects that the deployment revision changed and starts a new rollout:
```
kubectl -n test describe canary/podinfo
Status:
Canary Revision: 19871136
Failed Checks: 0
State: finished
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal Synced 3m flagger New revision detected podinfo.test
Normal Synced 3m flagger Scaling up podinfo.test
Warning Synced 3m flagger Waiting for podinfo.test rollout to finish: 0 of 1 updated replicas are available
Normal Synced 3m flagger Advance podinfo.test canary weight 5
Normal Synced 3m flagger Advance podinfo.test canary weight 10
Normal Synced 3m flagger Advance podinfo.test canary weight 15
Normal Synced 2m flagger Advance podinfo.test canary weight 20
Normal Synced 2m flagger Advance podinfo.test canary weight 25
Normal Synced 1m flagger Advance podinfo.test canary weight 30
Normal Synced 1m flagger Advance podinfo.test canary weight 35
Normal Synced 55s flagger Advance podinfo.test canary weight 40
Normal Synced 45s flagger Advance podinfo.test canary weight 45
Normal Synced 35s flagger Advance podinfo.test canary weight 50
Normal Synced 25s flagger Copying podinfo.test template spec to podinfo-primary.test
Warning Synced 15s flagger Waiting for podinfo-primary.test rollout to finish: 1 of 2 updated replicas are available
Normal Synced 5s flagger Promotion completed! Scaling down podinfo.test
```
During the canary analysis you can generate HTTP 500 errors and high latency to test if Flagger pauses the rollout.
Create a tester pod and exec into it:
```bash
kubectl -n test run tester --image=quay.io/stefanprodan/podinfo:1.2.1 -- ./podinfo --port=9898
kubectl -n test exec -it tester-xx-xx sh
```
Generate HTTP 500 errors:
```bash
watch curl http://podinfo-canary:9898/status/500
```
Generate latency:
```bash
watch curl http://podinfo-canary:9898/delay/1
```
When the number of failed checks reaches the canary analysis threshold, the traffic is routed back to the primary,
the canary is scaled to zero and the rollout is marked as failed.
```
kubectl -n test describe canary/podinfo
Status:
Canary Revision: 16695041
Failed Checks: 10
State: failed
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal Synced 3m flagger Starting canary deployment for podinfo.test
Normal Synced 3m flagger Advance podinfo.test canary weight 5
Normal Synced 3m flagger Advance podinfo.test canary weight 10
Normal Synced 3m flagger Advance podinfo.test canary weight 15
Normal Synced 3m flagger Halt podinfo.test advancement success rate 69.17% < 99%
Normal Synced 2m flagger Halt podinfo.test advancement success rate 61.39% < 99%
Normal Synced 2m flagger Halt podinfo.test advancement success rate 55.06% < 99%
Normal Synced 2m flagger Halt podinfo.test advancement success rate 47.00% < 99%
Normal Synced 2m flagger (combined from similar events): Halt podinfo.test advancement success rate 38.08% < 99%
Warning Synced 1m flagger Rolling back podinfo.test failed checks threshold reached 10
Warning Synced 1m flagger Canary failed! Scaling down podinfo.test
```
### Monitoring
Flagger comes with a Grafana dashboard made for canary analysis.
Install Grafana with Helm:
```bash
helm upgrade -i flagger-grafana flagger/grafana \
--namespace=istio-system \
--set url=http://prometheus.istio-system:9090
```
The dashboard shows the RED and USE metrics for the primary and canary workloads:
![flagger-grafana](https://raw.githubusercontent.com/stefanprodan/flagger/master/docs/screens/grafana-canary-analysis.png)
The canary errors and latency spikes have been recorded as Kubernetes events and logged by Flagger in json format:
```
kubectl -n istio-system logs deployment/flagger --tail=100 | jq .msg
Starting canary deployment for podinfo.test
Advance podinfo.test canary weight 5
Advance podinfo.test canary weight 10
Advance podinfo.test canary weight 15
Advance podinfo.test canary weight 20
Advance podinfo.test canary weight 25
Advance podinfo.test canary weight 30
Advance podinfo.test canary weight 35
Halt podinfo.test advancement success rate 98.69% < 99%
Advance podinfo.test canary weight 40
Halt podinfo.test advancement request duration 1.515s > 500ms
Advance podinfo.test canary weight 45
Advance podinfo.test canary weight 50
Copying podinfo.test template spec to podinfo-primary.test
Halt podinfo-primary.test advancement waiting for rollout to finish: 1 old replicas are pending termination
Scaling down podinfo.test
Promotion completed! podinfo.test
```
Flagger exposes Prometheus metrics that can be used to determine the canary analysis status and the destination weight values:
```bash
# Canaries total gauge
flagger_canary_total{namespace="test"} 1
# Canary promotion last known status gauge
# 0 - running, 1 - successful, 2 - failed
flagger_canary_status{name="podinfo" namespace="test"} 1
# Canary traffic weight gauge
flagger_canary_weight{workload="podinfo-primary" namespace="test"} 95
flagger_canary_weight{workload="podinfo" namespace="test"} 5
# Seconds spent performing canary analysis histogram
flagger_canary_duration_seconds_bucket{name="podinfo",namespace="test",le="10"} 6
flagger_canary_duration_seconds_bucket{name="podinfo",namespace="test",le="+Inf"} 6
flagger_canary_duration_seconds_sum{name="podinfo",namespace="test"} 17.3561329
flagger_canary_duration_seconds_count{name="podinfo",namespace="test"} 6
```
### Alerting
Flagger can be configured to send Slack notifications:
```bash
helm upgrade -i flagger flagger/flagger \
--namespace=istio-system \
--set slack.url=https://hooks.slack.com/services/YOUR/SLACK/WEBHOOK \
--set slack.channel=general \
--set slack.user=flagger
```
Once configured with a Slack incoming webhook, Flagger will post messages when a canary deployment has been initialized,
when a new revision has been detected and if the canary analysis failed or succeeded.
![flagger-slack](https://raw.githubusercontent.com/stefanprodan/flagger/master/docs/screens/slack-notifications.png)
### Roadmap
* Extend the validation mechanism to support other metrics than HTTP success rate and latency
* Add support for comparing the canary metrics to the primary ones and do the validation based on the derivation between the two
* Extend the canary analysis and promotion to other types than Kubernetes deployments such as Flux Helm releases or OpenFaaS functions
### Contributing
Flagger is Apache 2.0 licensed and accepts contributions via GitHub pull requests.
When submitting bug reports please include as much details as possible:
* which Flagger version
* which Flagger CRD version
* which Kubernetes/Istio version
* what configuration (canary, virtual service and workloads definitions)
* what happened (Flagger, Istio Pilot and Proxy logs)

View File

@@ -51,4 +51,5 @@ plugins:
exclude:
- CNAME
- gitbook

Binary file not shown.

After

Width:  |  Height:  |  Size: 207 KiB

BIN
docs/flagger-0.1.1.tgz Normal file

Binary file not shown.

BIN
docs/flagger-0.1.2.tgz Normal file

Binary file not shown.

BIN
docs/flagger-0.2.0.tgz Normal file

Binary file not shown.

0
docs/gitbook/.gitkeep Normal file
View File

16
docs/gitbook/README.md Normal file
View File

@@ -0,0 +1,16 @@
---
description: Flagger is an Istio progressive delivery Kubernetes operator
---
# Introduction
[Flagger](https://github.com/stefanprodan/flagger) is a **Kubernetes** operator that automates the promotion of canary deployments using **Istio** routing for traffic shifting and **Prometheus** metrics for canary analysis.
Flagger implements a control loop that gradually shifts traffic to the canary while measuring key performance indicators like HTTP requests success rate, requests average duration and pods health. Based on the **KPIs** analysis a canary is promoted or aborted and the analysis result is published to **Slack**.
![Flagger overview diagram](https://raw.githubusercontent.com/stefanprodan/flagger/master/docs/diagrams/flagger-canary-overview.png)
Flagger can be configured with Kubernetes custom resources \(canaries.flagger.app kind\) and is compatible with any CI/CD solutions made for Kubernetes. Since Flagger is declarative and reacts to Kubernetes events, it can be used in **GitOps** pipelines together with Weave Flux or JenkinsX.
This project is sponsored by [Weaveworks](https://www.weave.works/)

17
docs/gitbook/SUMMARY.md Normal file
View File

@@ -0,0 +1,17 @@
# Table of contents
* [Introduction](README.md)
* [How it works](how-it-works.md)
## Install
* [Install Flagger](install/installing-flagger.md)
* [Install Grafana](install/installing-grafana.md)
* [Install Istio](install/install-istio.md)
## Usage
* [Canary Deployments](usage/progressive-delivery.md)
* [Monitoring](usage/monitoring.md)
* [Alerting](usage/alerting.md)

View File

@@ -0,0 +1,246 @@
# How it works
[Flagger](https://github.com/stefanprodan/flagger) takes a Kubernetes deployment and optionally a horizontal pod autoscaler \(HPA\) and creates a series of objects \(Kubernetes deployments, ClusterIP services and Istio virtual services\) to drive the canary analysis and promotion.
![flagger-canary-hpa](https://raw.githubusercontent.com/stefanprodan/flagger/master/docs/diagrams/flagger-canary-hpa.png)
### Canary Custom Resource
For a deployment named _podinfo_, a canary promotion can be defined using Flagger's custom resource:
```yaml
apiVersion: flagger.app/v1alpha2
kind: Canary
metadata:
name: podinfo
namespace: test
spec:
# deployment reference
targetRef:
apiVersion: apps/v1
kind: Deployment
name: podinfo
# the maximum time in seconds for the canary deployment
# to make progress before it is rollback (default 600s)
progressDeadlineSeconds: 60
# HPA reference (optional)
autoscalerRef:
apiVersion: autoscaling/v2beta1
kind: HorizontalPodAutoscaler
name: podinfo
service:
# container port
port: 9898
# Istio gateways (optional)
gateways:
- public-gateway.istio-system.svc.cluster.local
# Istio virtual service host names (optional)
hosts:
- app.iowa.weavedx.com
canaryAnalysis:
# max number of failed metric checks before rollback
threshold: 10
# max traffic percentage routed to canary
# percentage (0-100)
maxWeight: 50
# canary increment step
# percentage (0-100)
stepWeight: 5
# Istio Prometheus checks
metrics:
- name: istio_requests_total
# minimum req success rate (non 5xx responses)
# percentage (0-100)
threshold: 99
interval: 1m
- name: istio_request_duration_seconds_bucket
# maximum req duration P99
# milliseconds
threshold: 500
interval: 30s
# external checks (optional)
webhooks:
- name: integration-tests
url: http://podinfo.test:9898/echo
timeout: 1m
metadata:
test: "all"
token: "16688eb5e9f289f1991c"
```
### Canary Deployment
![flagger-canary-steps](https://raw.githubusercontent.com/stefanprodan/flagger/master/docs/diagrams/flagger-canary-steps.png)
Gated canary promotion stages:
* scan for canary deployments
* creates the primary deployment if needed
* check Istio virtual service routes are mapped to primary and canary ClusterIP services
* check primary and canary deployments status
* halt advancement if a rolling update is underway
* halt advancement if pods are unhealthy
* increase canary traffic weight percentage from 0% to 5% \(step weight\)
* check canary HTTP request success rate and latency
* halt advancement if any metric is under the specified threshold
* increment the failed checks counter
* check if the number of failed checks reached the threshold
* route all traffic to primary
* scale to zero the canary deployment and mark it as failed
* wait for the canary deployment to be updated \(revision bump\) and start over
* increase canary traffic weight by 5% \(step weight\) till it reaches 50% \(max weight\)
* halt advancement while canary request success rate is under the threshold
* halt advancement while canary request duration P99 is over the threshold
* halt advancement if the primary or canary deployment becomes unhealthy
* halt advancement while canary deployment is being scaled up/down by HPA
* promote canary to primary
* copy canary deployment spec template over primary
* wait for primary rolling update to finish
* halt advancement if pods are unhealthy
* route all traffic to primary
* scale to zero the canary deployment
* mark rollout as finished
* wait for the canary deployment to be updated \(revision bump\) and start over
You can change the canary analysis _max weight_ and the _step weight_ percentage in the Flagger's custom resource.
### Canary Analysis
Spec:
```yaml
canaryAnalysis:
# max number of failed metric checks before rollback
threshold: 10
# max traffic percentage routed to canary
# percentage (0-100)
maxWeight: 50
# canary increment step
# percentage (0-100)
stepWeight: 5
```
You can determine the minimum time that it takes to validate and promote a canary deployment using this formula:
```
controlLoopInterval * (maxWeight / stepWeight)
```
And the time it takes for a canary to be rollback:
```
controlLoopInterval * threshold
```
### HTTP Metrics
The canary analysis is using the following Prometheus queries:
**HTTP requests success rate percentage**
Spec:
```yaml
canaryAnalysis:
metrics:
- name: istio_requests_total
# minimum req success rate (non 5xx responses)
# percentage (0-100)
threshold: 99
interval: 1m
```
Query:
```javascript
sum(
rate(
istio_requests_total{
reporter="destination",
destination_workload_namespace=~"$namespace",
destination_workload=~"$workload",
response_code!~"5.*"
}[$interval]
)
)
/
sum(
rate(
istio_requests_total{
reporter="destination",
destination_workload_namespace=~"$namespace",
destination_workload=~"$workload"
}[$interval]
)
)
```
**HTTP requests milliseconds duration P99**
Spec:
```yaml
canaryAnalysis:
metrics:
- name: istio_request_duration_seconds_bucket
# maximum req duration P99
# milliseconds
threshold: 500
interval: 1m
```
Query:
```javascript
histogram_quantile(0.99,
sum(
irate(
istio_request_duration_seconds_bucket{
reporter="destination",
destination_workload=~"$workload",
destination_workload_namespace=~"$namespace"
}[$interval]
)
) by (le)
)
```
### Webhooks
The canary analysis can be extended with webhooks.
Flagger would call a URL (HTTP POST) and determine from the response status code (HTTP 2xx) if the canary is failing or not.
Spec:
```yaml
canaryAnalysis:
webhooks:
- name: integration-tests
url: http://podinfo.test:9898/echo
timeout: 1m
metadata:
test: "all"
token: "16688eb5e9f289f1991c"
```
Webhook payload:
```json
{
"name": "podinfo",
"namespace": "test",
"metadata": {
"test": "all",
"token": "16688eb5e9f289f1991c"
}
}
```
Response status codes:
* 200-202 - advance canary by increasing the traffic weight
* timeout or non-2xx - halt advancement and increment failed checks
On a non-2xx response Flagger will include the response body (if any) in the failed checks log and Kubernetes events.

View File

@@ -0,0 +1,452 @@
# Install Istio
This guide walks you through setting up Istio with Jaeger, Prometheus, Grafana and Lets Encrypt TLS for ingress gateway on Google Kubernetes Engine.
![Istio GKE diagram](https://raw.githubusercontent.com/stefanprodan/istio-gke/master/docs/screens/istio-gcp-overview.png)
### Prerequisites
You will be creating a cluster on Googles Kubernetes Engine \(GKE\), if you dont have an account you can sign up [here](https://cloud.google.com/free/) for free credits.
Login into GCP, create a project and enable billing for it.
Install the [gcloud](https://cloud.google.com/sdk/) command line utility and configure your project with `gcloud init`.
Set the default project \(replace `PROJECT_ID` with your own project\):
```text
gcloud config set project PROJECT_ID
```
Set the default compute region and zone:
```text
gcloud config set compute/region europe-west3
gcloud config set compute/zone europe-west3-a
```
Enable the Kubernetes and Cloud DNS services for your project:
```text
gcloud services enable container.googleapis.com
gcloud services enable dns.googleapis.com
```
Install the `kubectl` command-line tool:
```text
gcloud components install kubectl
```
Install the `helm` command-line tool:
```text
brew install kubernetes-helm
```
### GKE cluster setup
Create a cluster with three nodes using the latest Kubernetes version:
```bash
k8s_version=$(gcloud container get-server-config --format=json \
| jq -r '.validNodeVersions[0]')
gcloud container clusters create istio \
--cluster-version=${k8s_version} \
--zone=europe-west3-a \
--num-nodes=3 \
--machine-type=n1-highcpu-4 \
--preemptible \
--no-enable-cloud-logging \
--disk-size=30 \
--enable-autorepair \
--scopes=gke-default,compute-rw,storage-rw
```
The above command will create a default node pool consisting of `n1-highcpu-4` \(vCPU: 4, RAM 3.60GB, DISK: 30GB\) preemptible VMs. Preemptible VMs are up to 80% cheaper than regular instances and are terminated and replaced after a maximum of 24 hours.
Set up credentials for `kubectl`:
```bash
gcloud container clusters get-credentials istio -z=europe-west3-a
```
Create a cluster admin role binding:
```bash
kubectl create clusterrolebinding "cluster-admin-$(whoami)" \
--clusterrole=cluster-admin \
--user="$(gcloud config get-value core/account)"
```
Validate your setup with:
```bash
kubectl get nodes -o wide
```
### Cloud DNS setup
You will need an internet domain and access to the registrar to change the name servers to Google Cloud DNS.
Create a managed zone named `istio` in Cloud DNS \(replace `example.com` with your domain\):
```bash
gcloud dns managed-zones create \
--dns-name="example.com." \
--description="Istio zone" "istio"
```
Look up your zone's name servers:
```bash
gcloud dns managed-zones describe istio
```
Update your registrar's name server records with the records returned by the above command.
Wait for the name servers to change \(replace `example.com` with your domain\):
```bash
watch dig +short NS example.com
```
Create a static IP address named `istio-gateway-ip` in the same region as your GKE cluster:
```bash
gcloud compute addresses create istio-gateway-ip --region europe-west3
```
Find the static IP address:
```bash
gcloud compute addresses describe istio-gateway-ip --region europe-west3
```
Create the following DNS records \(replace `example.com` with your domain and set your Istio Gateway IP\):
```bash
DOMAIN="example.com"
GATEWAYIP="35.198.98.90"
gcloud dns record-sets transaction start --zone=istio
gcloud dns record-sets transaction add --zone=istio \
--name="${DOMAIN}" --ttl=300 --type=A ${GATEWAYIP}
gcloud dns record-sets transaction add --zone=istio \
--name="www.${DOMAIN}" --ttl=300 --type=A ${GATEWAYIP}
gcloud dns record-sets transaction add --zone=istio \
--name="*.${DOMAIN}" --ttl=300 --type=A ${GATEWAYIP}
gcloud dns record-sets transaction execute --zone istio
```
Verify that the wildcard DNS is working \(replace `example.com` with your domain\):
```bash
watch host test.example.com
```
### Install Istio with Helm
Download the latest Istio release:
```bash
curl -L https://git.io/getLatestIstio | sh -
```
Navigate to `istio-x.x.x` dir and copy the Istio CLI in your bin:
```bash
cd istio-x.x.x/
sudo cp ./bin/istioctl /usr/local/bin/istioctl
```
Apply the Istio CRDs:
```bash
kubectl apply -f ./install/kubernetes/helm/istio/templates/crds.yaml
```
Create a service account and a cluster role binding for Tiller:
```bash
kubectl apply -f ./install/kubernetes/helm/helm-service-account.yaml
```
Deploy Tiller in the `kube-system` namespace:
```bash
helm init --service-account tiller
```
Find the GKE IP ranges:
```bash
gcloud container clusters describe istio --zone=europe-west3-a \
| grep -e clusterIpv4Cidr -e servicesIpv4Cidr
```
You'll be using the IP ranges to allow unrestricted egress traffic for services running inside the service mesh.
Configure Istio with Prometheus, Jaeger, and cert-manager:
```yaml
global:
nodePort: false
proxy:
# replace with your GKE IP ranges
includeIPRanges: "10.28.0.0/14,10.7.240.0/20"
sidecarInjectorWebhook:
enabled: true
enableNamespacesByDefault: false
gateways:
enabled: true
istio-ingressgateway:
replicaCount: 2
autoscaleMin: 2
autoscaleMax: 3
# replace with your Istio Gateway IP
loadBalancerIP: "35.198.98.90"
type: LoadBalancer
pilot:
enabled: true
replicaCount: 1
autoscaleMin: 1
autoscaleMax: 1
resources:
requests:
cpu: 500m
memory: 1024Mi
grafana:
enabled: true
security:
enabled: true
adminUser: admin
# change the password
adminPassword: admin
prometheus:
enabled: true
servicegraph:
enabled: true
tracing:
enabled: true
jaeger:
tag: 1.7
certmanager:
enabled: true
```
Save the above file as `my-istio.yaml` and install Istio with Helm:
```bash
helm upgrade --install istio ./install/kubernetes/helm/istio \
--namespace=istio-system \
-f ./my-istio.yaml
```
Verify that Istio workloads are running:
```text
kubectl -n istio-system get pods
```
### Configure Istio Gateway with LE TLS
![Istio Let&apos;s Encrypt diagram](https://raw.githubusercontent.com/stefanprodan/istio-gke/master/docs/screens/istio-cert-manager-gcp.png)
Create a Istio Gateway in istio-system namespace with HTTPS redirect:
```yaml
apiVersion: networking.istio.io/v1alpha3
kind: Gateway
metadata:
name: public-gateway
namespace: istio-system
spec:
selector:
istio: ingressgateway
servers:
- port:
number: 80
name: http
protocol: HTTP
hosts:
- "*"
tls:
httpsRedirect: true
- port:
number: 443
name: https
protocol: HTTPS
hosts:
- "*"
tls:
mode: SIMPLE
privateKey: /etc/istio/ingressgateway-certs/tls.key
serverCertificate: /etc/istio/ingressgateway-certs/tls.crt
```
Save the above resource as istio-gateway.yaml and then apply it:
```text
kubectl apply -f ./istio-gateway.yaml
```
Create a service account with Cloud DNS admin role \(replace `my-gcp-project` with your project ID\):
```bash
GCP_PROJECT=my-gcp-project
gcloud iam service-accounts create dns-admin \
--display-name=dns-admin \
--project=${GCP_PROJECT}
gcloud iam service-accounts keys create ./gcp-dns-admin.json \
--iam-account=dns-admin@${GCP_PROJECT}.iam.gserviceaccount.com \
--project=${GCP_PROJECT}
gcloud projects add-iam-policy-binding ${GCP_PROJECT} \
--member=serviceAccount:dns-admin@${GCP_PROJECT}.iam.gserviceaccount.com \
--role=roles/dns.admin
```
Create a Kubernetes secret with the GCP Cloud DNS admin key:
```bash
kubectl create secret generic cert-manager-credentials \
--from-file=./gcp-dns-admin.json \
--namespace=istio-system
```
Create a letsencrypt issuer for CloudDNS \(replace `email@example.com` with a valid email address and `my-gcp-project`with your project ID\):
```yaml
apiVersion: v1alpha2
kind: Issuer
metadata:
name: letsencrypt-prod
namespace: istio-system
spec:
acme:
server: https://acme-v02.api.letsencrypt.org/directory
email: email@example.com
privateKeySecretRef:
name: letsencrypt-prod
dns01:
providers:
- name: cloud-dns
clouddns:
serviceAccountSecretRef:
name: cert-manager-credentials
key: gcp-dns-admin.json
project: my-gcp-project
```
Save the above resource as letsencrypt-issuer.yaml and then apply it:
```text
kubectl apply -f ./letsencrypt-issuer.yaml
```
Create a wildcard certificate \(replace `example.com` with your domain\):
```yaml
apiVersion: v1alpha2
kind: Certificate
metadata:
name: istio-gateway
namespace: istio-system
spec:
secretname: istio-ingressgateway-certs
issuerRef:
name: letsencrypt-prod
commonName: "*.example.com"
acme:
config:
- dns01:
provider: cloud-dns
domains:
- "*.example.com"
- "example.com"
```
Save the above resource as of-cert.yaml and then apply it:
```text
kubectl apply -f ./of-cert.yaml
```
In a couple of seconds cert-manager should fetch a wildcard certificate from letsencrypt.org:
```text
kubectl -n istio-system logs deployment/certmanager -f
Certificate issued successfully
Certificate istio-system/istio-gateway scheduled for renewal in 1438 hours
```
Recreate Istio ingress gateway pods:
```bash
kubectl -n istio-system delete pods -l istio=ingressgateway
```
Note that Istio gateway doesn't reload the certificates from the TLS secret on cert-manager renewal. Since the GKE cluster is made out of preemptible VMs the gateway pods will be replaced once every 24h, if your not using preemptible nodes then you need to manually kill the gateway pods every two months before the certificate expires.
### Expose services outside the service mesh
In order to expose services via the Istio Gateway you have to create a Virtual Service attached to Istio Gateway.
Create a virtual service in `istio-system` namespace for Grafana \(replace `example.com` with your domain\):
```yaml
apiVersion: networking.istio.io/v1alpha3
kind: VirtualService
metadata:
name: grafana
namespace: istio-system
spec:
hosts:
- "grafana.example.com"
gateways:
- public-gateway.istio-system.svc.cluster.local
http:
- route:
- destination:
host: grafana
timeout: 30s
```
Save the above resource as grafana-virtual-service.yaml and then apply it:
```bash
kubectl apply -f ./grafana-virtual-service.yaml
```
Navigate to `http://grafana.example.com` in your browser and you should be redirected to the HTTPS version.
Check that HTTP2 is enabled:
```bash
curl -I --http2 https://grafana.example.com
HTTP/2 200
content-type: text/html; charset=UTF-8
x-envoy-upstream-service-time: 3
server: envoy
```

View File

@@ -0,0 +1,73 @@
# Install Flagger
Before installing Flagger make sure you have [Istio](https://istio.io) running with Prometheus enabled. If you are new to Istio you can follow this GKE guide [Istio service mesh walk-through](https://docs.flagger.app/install/install-istio).
**Prerequisites**
* Kubernetes &gt;= 1.9
* Istio &gt;= 1.0
* Prometheus &gt;= 2.6
### Install with Helm and Tiller
Add Flagger Helm repository:
```bash
helm repo add flagger https://flagger.app
```
Deploy Flagger in the _**istio-system**_ namespace:
```bash
helm upgrade -i flagger flagger/flagger \
--namespace=istio-system \
--set metricsServer=http://prometheus.istio-system:9090 \
--set controlLoopInterval=1m
```
Enable **Slack** notifications:
```bash
helm upgrade -i flagger flagger/flagger \
--namespace=istio-system \
--set slack.url=https://hooks.slack.com/services/YOUR/SLACK/WEBHOOK \
--set slack.channel=general \
--set slack.user=flagger
```
### Install with kubectl
If you don't have Tiller you can use the helm template command and apply the generated yaml with kubectl:
```bash
# generate
helm template flagger/flagger \
--name flagger \
--namespace=istio-system \
--set metricsServer=http://prometheus.istio-system:9090 \
--set controlLoopInterval=1m > $HOME/flagger.yaml
# apply
kubectl apply -f $HOME/flagger.yaml
```
### Uninstall
To uninstall/delete the flagger release with Helm run:
```text
helm delete --purge flagger
```
The command removes all the Kubernetes components associated with the chart and deletes the release.
{% hint style="info" %}
On uninstall the Flagger CRD will not be removed. Deleting the CRD will make Kubernetes remove all the objects owned by the CRD like Istio virtual services, Kubernetes deployments and ClusterIP services.
{% endhint %}
If you want to remove all the objects created by Flagger you have delete the canary CRD with kubectl:
```text
kubectl delete crd canaries.flagger.app
```

View File

@@ -0,0 +1,48 @@
# Install Grafana
Flagger comes with a Grafana dashboard made for monitoring the canary analysis.
### Install with Helm and Tiller
Add Flagger Helm repository:
```bash
helm repo add flagger https://flagger.app
```
Deploy Grafana in the _**istio-system**_ namespace:
```bash
helm upgrade -i flagger-grafana flagger/grafana \
--namespace=istio-system \
--set url=http://prometheus:9090 \
--set user=admin \
--set password=admin
```
### Install with kubectl
If you don't have Tiller you can use the helm template command and apply the generated yaml with kubectl:
```bash
# generate
helm template flagger/grafana \
--name flagger-grafana \
--namespace=istio-system \
--set user=admin \
--set password=admin > $HOME/flagger-grafana.yaml
# apply
kubectl apply -f $HOME/flagger-grafana.yaml
```
### Uninstall
To uninstall/delete the Grafana release with Helm run:
```text
helm delete --purge flagger-grafana
```
The command removes all the Kubernetes components associated with the chart and deletes the release.

View File

@@ -0,0 +1,37 @@
# Alerting
### Slack
Flagger can be configured to send Slack notifications:
```bash
helm upgrade -i flagger flagger/flagger \
--namespace=istio-system \
--set slack.url=https://hooks.slack.com/services/YOUR/SLACK/WEBHOOK \
--set slack.channel=general \
--set slack.user=flagger
```
Once configured with a Slack incoming **webhook**, Flagger will post messages when a canary deployment has been initialised, when a new revision has been detected and if the canary analysis failed or succeeded.
![flagger-slack](https://raw.githubusercontent.com/stefanprodan/flagger/master/docs/screens/slack-canary-notifications.png)
A canary deployment will be rolled back if the progress deadline exceeded or if the analysis reached the maximum number of failed checks:
![flagger-slack-errors](https://raw.githubusercontent.com/stefanprodan/flagger/master/docs/screens/slack-canary-failed.png)
### Prometheus Alert Manager
Besides Slack, you can use Alertmanager to trigger alerts when a canary deployment failed:
```yaml
- alert: canary_rollback
expr: flagger_canary_status > 1
for: 1m
labels:
severity: warning
annotations:
summary: "Canary failed"
description: "Workload {{ $labels.name }} namespace {{ $labels.namespace }}"
```

View File

@@ -0,0 +1,69 @@
# Monitoring
### Grafana
Flagger comes with a Grafana dashboard made for canary analysis. Install Grafana with Helm:
```bash
helm upgrade -i flagger-grafana flagger/grafana \
--namespace=istio-system \
--set url=http://prometheus:9090 \
--set user=admin \
--set password=admin
```
The dashboard shows the RED and USE metrics for the primary and canary workloads:
![canary dashboard](https://raw.githubusercontent.com/stefanprodan/flagger/master/docs/screens/grafana-canary-analysis.png)
### Logging
The canary errors and latency spikes have been recorded as Kubernetes events and logged by Flagger in json format:
```text
kubectl -n istio-system logs deployment/flagger --tail=100 | jq .msg
Starting canary deployment for podinfo.test
Advance podinfo.test canary weight 5
Advance podinfo.test canary weight 10
Advance podinfo.test canary weight 15
Advance podinfo.test canary weight 20
Advance podinfo.test canary weight 25
Advance podinfo.test canary weight 30
Advance podinfo.test canary weight 35
Halt podinfo.test advancement success rate 98.69% < 99%
Advance podinfo.test canary weight 40
Halt podinfo.test advancement request duration 1.515s > 500ms
Advance podinfo.test canary weight 45
Advance podinfo.test canary weight 50
Copying podinfo.test template spec to podinfo-primary.test
Halt podinfo-primary.test advancement waiting for rollout to finish: 1 old replicas are pending termination
Scaling down podinfo.test
Promotion completed! podinfo.test
```
### Metrics
Flagger exposes Prometheus metrics that can be used to determine the canary analysis status and the destination weight values:
```bash
# Canaries total gauge
flagger_canary_total{namespace="test"} 1
# Canary promotion last known status gauge
# 0 - running, 1 - successful, 2 - failed
flagger_canary_status{name="podinfo" namespace="test"} 1
# Canary traffic weight gauge
flagger_canary_weight{workload="podinfo-primary" namespace="test"} 95
flagger_canary_weight{workload="podinfo" namespace="test"} 5
# Seconds spent performing canary analysis histogram
flagger_canary_duration_seconds_bucket{name="podinfo",namespace="test",le="10"} 6
flagger_canary_duration_seconds_bucket{name="podinfo",namespace="test",le="+Inf"} 6
flagger_canary_duration_seconds_sum{name="podinfo",namespace="test"} 17.3561329
flagger_canary_duration_seconds_count{name="podinfo",namespace="test"} 6
```
####

View File

@@ -0,0 +1,183 @@
# Canary Deployments
This guide shows you how to use Istio and Flagger to automate canary deployments.
Create a test namespace with Istio sidecar injection enabled:
```bash
export REPO=https://raw.githubusercontent.com/stefanprodan/flagger/master
kubectl apply -f ${REPO}/artifacts/namespaces/test.yaml
```
Create a deployment and a horizontal pod autoscaler:
```bash
kubectl apply -f ${REPO}/artifacts/canaries/deployment.yaml
kubectl apply -f ${REPO}/artifacts/canaries/hpa.yaml
```
Create a canary custom resource \(replace example.com with your own domain\):
```yaml
apiVersion: v1alpha2
kind: Canary
metadata:
name: podinfo
namespace: test
spec:
# deployment reference
targetRef:
apiVersion: apps/v1
kind: Deployment
name: podinfo
# the maximum time in seconds for the canary deployment
# to make progress before it is rollback (default 600s)
progressDeadlineSeconds: 60
# HPA reference (optional)
autoscalerRef:
apiVersion: autoscaling/v2beta1
kind: HorizontalPodAutoscaler
name: podinfo
service:
# container port
port: 9898
# Istio gateways (optional)
gateways:
- public-gateway.istio-system.svc.cluster.local
# Istio virtual service host names (optional)
hosts:
- app.example.com
canaryAnalysis:
# max number of failed metric checks before rollback
threshold: 5
# max traffic percentage routed to canary
# percentage (0-100)
maxWeight: 50
# canary increment step
# percentage (0-100)
stepWeight: 10
metrics:
- name: istio_requests_total
# minimum req success rate (non 5xx responses)
# percentage (0-100)
threshold: 99
interval: 1m
- name: istio_request_duration_seconds_bucket
# maximum req duration P99
# milliseconds
threshold: 500
interval: 30s
```
Save the above resource as podinfo-canary.yaml and then apply it:
```bash
kubectl apply -f ./podinfo-canary.yaml
```
After a couple of seconds Flagger will create the canary objects:
```bash
# applied
deployment.apps/podinfo
horizontalpodautoscaler.autoscaling/podinfo
canary.flagger.app/podinfo
# generated
deployment.apps/podinfo-primary
horizontalpodautoscaler.autoscaling/podinfo-primary
service/podinfo
service/podinfo-canary
service/podinfo-primary
virtualservice.networking.istio.io/podinfo
```
Trigger a canary deployment by updating the container image:
```bash
kubectl -n test set image deployment/podinfo \
podinfod=quay.io/stefanprodan/podinfo:1.2.1
```
Flagger detects that the deployment revision changed and starts a new rollout:
```text
kubectl -n test describe canary/podinfo
Status:
Canary Revision: 19871136
Failed Checks: 0
State: finished
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal Synced 3m flagger New revision detected podinfo.test
Normal Synced 3m flagger Scaling up podinfo.test
Warning Synced 3m flagger Waiting for podinfo.test rollout to finish: 0 of 1 updated replicas are available
Normal Synced 3m flagger Advance podinfo.test canary weight 5
Normal Synced 3m flagger Advance podinfo.test canary weight 10
Normal Synced 3m flagger Advance podinfo.test canary weight 15
Normal Synced 2m flagger Advance podinfo.test canary weight 20
Normal Synced 2m flagger Advance podinfo.test canary weight 25
Normal Synced 1m flagger Advance podinfo.test canary weight 30
Normal Synced 1m flagger Advance podinfo.test canary weight 35
Normal Synced 55s flagger Advance podinfo.test canary weight 40
Normal Synced 45s flagger Advance podinfo.test canary weight 45
Normal Synced 35s flagger Advance podinfo.test canary weight 50
Normal Synced 25s flagger Copying podinfo.test template spec to podinfo-primary.test
Warning Synced 15s flagger Waiting for podinfo-primary.test rollout to finish: 1 of 2 updated replicas are available
Normal Synced 5s flagger Promotion completed! Scaling down podinfo.test
```
During the canary analysis you can generate HTTP 500 errors and high latency to test if Flagger pauses the rollout.
Create a tester pod and exec into it:
```bash
kubectl -n test run tester \
--image=quay.io/stefanprodan/podinfo:1.2.1 \
-- ./podinfo --port=9898
kubectl -n test exec -it tester-xx-xx sh
```
Generate HTTP 500 errors:
```bash
watch curl http://podinfo-canary:9898/status/500
```
Generate latency:
```bash
watch curl http://podinfo-canary:9898/delay/1
```
When the number of failed checks reaches the canary analysis threshold, the traffic is routed back to the primary, the canary is scaled to zero and the rollout is marked as failed.
```text
kubectl -n test describe canary/podinfo
Status:
Canary Revision: 16695041
Failed Checks: 10
State: failed
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal Synced 3m flagger Starting canary deployment for podinfo.test
Normal Synced 3m flagger Advance podinfo.test canary weight 5
Normal Synced 3m flagger Advance podinfo.test canary weight 10
Normal Synced 3m flagger Advance podinfo.test canary weight 15
Normal Synced 3m flagger Halt podinfo.test advancement success rate 69.17% < 99%
Normal Synced 2m flagger Halt podinfo.test advancement success rate 61.39% < 99%
Normal Synced 2m flagger Halt podinfo.test advancement success rate 55.06% < 99%
Normal Synced 2m flagger Halt podinfo.test advancement success rate 47.00% < 99%
Normal Synced 2m flagger (combined from similar events): Halt podinfo.test advancement success rate 38.08% < 99%
Warning Synced 1m flagger Rolling back podinfo.test failed checks threshold reached 10
Warning Synced 1m flagger Canary failed! Scaling down podinfo.test
```
####

Binary file not shown.

View File

@@ -1,9 +1,71 @@
apiVersion: v1
entries:
flagger:
- apiVersion: v1
appVersion: 0.2.0
created: 2019-01-04T13:38:42.239798+02:00
description: Flagger is a Kubernetes operator that automates the promotion of
canary deployments using Istio routing for traffic shifting and Prometheus metrics
for canary analysis.
digest: 800b5fd1a0b2854ee8412b3170c36ecda3d382f209e18b475ee1d5e3c7fa2f83
engine: gotpl
home: https://flagger.app
icon: https://raw.githubusercontent.com/stefanprodan/flagger/master/docs/logo/flagger-icon.png
keywords:
- canary
- istio
- gitops
kubeVersion: '>=1.9.0-0'
maintainers:
- email: stefanprodan@users.noreply.github.com
name: stefanprodan
url: https://github.com/stefanprodan
name: flagger
sources:
- https://github.com/stefanprodan/flagger
urls:
- https://stefanprodan.github.io/flagger/flagger-0.2.0.tgz
version: 0.2.0
- apiVersion: v1
appVersion: 0.1.2
created: 2019-01-04T13:38:42.239389+02:00
description: Flagger is a Kubernetes operator that automates the promotion of
canary deployments using Istio routing for traffic shifting and Prometheus metrics
for canary analysis.
digest: 0029ef8dd20ebead3d84638eaa4b44d60b3e2bd953b4b7a1169963ce93a4e87c
engine: gotpl
home: https://flagger.app
icon: https://raw.githubusercontent.com/stefanprodan/flagger/master/docs/logo/flagger-icon.png
keywords:
- canary
- istio
- gitops
kubeVersion: '>=1.9.0-0'
maintainers:
- email: stefanprodan@users.noreply.github.com
name: stefanprodan
url: https://github.com/stefanprodan
name: flagger
sources:
- https://github.com/stefanprodan/flagger
urls:
- https://stefanprodan.github.io/flagger/flagger-0.1.2.tgz
version: 0.1.2
- apiVersion: v1
appVersion: 0.1.1
created: 2019-01-04T13:38:42.238504+02:00
description: Flagger is a Kubernetes operator that automates the promotion of
canary deployments using Istio routing for traffic shifting and Prometheus metrics
for canary analysis.
digest: 2bb8f72fcf63a5ba5ecbaa2ab0d0446f438ec93fbf3a598cd7de45e64d8f9628
home: https://github.com/stefanprodan/flagger
name: flagger
urls:
- https://stefanprodan.github.io/flagger/flagger-0.1.1.tgz
version: 0.1.1
- apiVersion: v1
appVersion: 0.1.0
created: 2018-11-25T20:52:59.226156+02:00
created: 2019-01-04T13:38:42.237702+02:00
description: Flagger is a Kubernetes operator that automates the promotion of
canary deployments using Istio routing for traffic shifting and Prometheus metrics
for canary analysis.
@@ -15,14 +77,20 @@ entries:
version: 0.1.0
grafana:
- apiVersion: v1
appVersion: 5.3.1
created: 2018-11-25T20:52:59.226488+02:00
description: A Grafana Helm chart for monitoring progressive deployments powered
by Istio and Flagger
digest: 12ad252512006e91b6eb359c4e0c73e7f01f74f3c07c85bb1e66780bed6747f5
home: https://github.com/stefanprodan/flagger
appVersion: 5.4.2
created: 2019-01-04T13:38:42.24034+02:00
description: Grafana dashboards for monitoring Flagger canary deployments
digest: f94c0c2eaf7a7db7ef070575d280c37f93922c0e11ebdf203482c9f43603a1c9
home: https://flagger.app
icon: https://raw.githubusercontent.com/stefanprodan/flagger/master/docs/logo/flagger-icon.png
maintainers:
- email: stefanprodan@users.noreply.github.com
name: stefanprodan
url: https://github.com/stefanprodan
name: grafana
sources:
- https://github.com/stefanprodan/flagger
urls:
- https://stefanprodan.github.io/flagger/grafana-0.1.0.tgz
version: 0.1.0
generated: 2018-11-25T20:52:59.225755+02:00
generated: 2019-01-04T13:38:42.236727+02:00

BIN
docs/logo/flagger-horiz.jpg Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 19 KiB

BIN
docs/logo/flagger-horiz.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 18 KiB

BIN
docs/logo/flagger-icon.jpg Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 30 KiB

BIN
docs/logo/flagger-icon.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 28 KiB

BIN
docs/logo/flagger-vert.jpg Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 31 KiB

BIN
docs/logo/flagger-vert.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 28 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 34 KiB

View File

@@ -1,20 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<svg width="512px" height="534px" viewBox="0 0 512 534" version="1.1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink">
<!-- Generator: Sketch 51.2 (57519) - http://www.bohemiancoding.com/sketch -->
<title>istio-refresh</title>
<desc>Created with Sketch.</desc>
<defs></defs>
<g id="Page-1" stroke="none" stroke-width="1" fill="none" fill-rule="evenodd">
<g id="Logo" transform="translate(-82.000000, -67.000000)" fill-rule="nonzero">
<g id="istio-refresh">
<polygon id="hull" fill="#466BB0" points="467.486761 456 322.182535 499.591268 235 456"></polygon>
<polygon id="mainsail" fill="#466BB0" points="322.182535 426.834648 322.182535 267 235 441.365071"></polygon>
<polygon id="headsail" fill="#466BB0" points="466.773803 441.608451 336 151 336 427.078028"></polygon>
<g id="arrows" transform="translate(338.000000, 338.000000) rotate(-24.000000) translate(-338.000000, -338.000000) translate(82.000000, 82.000000)">
<path d="M256.000042,0 C114.772094,0 0.00128552135,114.771366 0.00128552135,256 C-0.0639978677,260.61614 2.36127785,264.909841 6.34842716,267.236963 C10.3355765,269.564084 15.2668702,269.564084 19.2540195,267.236963 C23.2411688,264.909841 25.6664445,260.61614 25.6011612,256 C25.6011612,128.607834 128.608494,25.6 256.000042,25.6 C335.938457,25.6 406.118967,66.3034496 447.449109,128 L413.301505,128 C408.685388,127.934716 404.391707,130.360004 402.064597,134.347172 C399.737487,138.334341 399.737487,143.265659 402.064597,147.252828 C404.391707,151.239996 408.685388,153.665284 413.301505,153.6 L470.549003,153.6 L486.398923,153.6 L486.398923,70.7176471 C486.446301,67.257803 485.091067,63.9261384 482.642017,61.4817859 C480.192966,59.0374333 476.858715,57.6886086 473.398986,57.7426467 C466.339613,57.8529965 460.702373,63.6580397 460.799047,70.7176471 L460.799047,102.525005 C414.068394,40.3001766 339.69529,0 256.000042,0 Z" id="Shape" fill="#35A2EE"></path>
<path d="M498.998861,243.024998 C491.939488,243.135348 486.302248,248.940392 486.398923,256 C486.398923,383.392166 383.391589,486.4 256.000042,486.4 C176.054933,486.4 105.870698,445.69303 64.5509751,384 L92.9338727,384.024998 C97.54999,384.090282 101.843671,381.664995 104.170781,377.677826 C106.497891,373.690657 106.497891,368.75934 104.170781,364.772171 C101.843671,360.785002 97.54999,358.359715 92.9338727,358.424998 L44.1010713,358.4 C42.4667939,358.077951 40.7853733,358.077951 39.151096,358.4 L25.6011612,358.4 L25.6011612,432.243137 C25.5358778,436.859277 27.9611535,441.152979 31.9483028,443.4801 C35.9354521,445.807221 40.8667458,445.807221 44.8538951,443.4801 C48.8410445,441.152979 51.2663202,436.859277 51.2010368,432.243137 L51.2010368,409.424998 C97.9168162,471.668595 172.282881,512 256.000042,512 C397.227989,512 511.998798,397.228634 511.998798,256 C512.046177,252.540156 510.690943,249.208491 508.241893,246.764138 C505.792842,244.319785 502.458591,242.97096 498.998861,243.024998 Z" id="Path" fill="#1DCBA2"></path>
</g>
</g>
</g>
</g>
</svg>

Before

Width:  |  Height:  |  Size: 3.1 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 78 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 113 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 114 KiB

View File

@@ -23,6 +23,6 @@ CODEGEN_PKG=${CODEGEN_PKG:-$(cd ${SCRIPT_ROOT}; ls -d -1 ./vendor/k8s.io/code-ge
${CODEGEN_PKG}/generate-groups.sh "deepcopy,client,informer,lister" \
github.com/stefanprodan/flagger/pkg/client github.com/stefanprodan/flagger/pkg/apis \
flagger:v1alpha1 \
flagger:v1alpha2 \
--go-header-file ${SCRIPT_ROOT}/hack/boilerplate.go.txt

View File

@@ -16,6 +16,6 @@ limitations under the License.
// +k8s:deepcopy-gen=package
// Package v1alpha1 is the v1alpha1 version of the API.
// Package v1alpha2 is the v1alpha2 version of the API.
// +groupName=flagger.app
package v1alpha1
package v1alpha2

View File

@@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
package v1alpha2
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@@ -25,7 +25,7 @@ import (
)
// SchemeGroupVersion is group version used to register these objects
var SchemeGroupVersion = schema.GroupVersion{Group: rollout.GroupName, Version: "v1alpha1"}
var SchemeGroupVersion = schema.GroupVersion{Group: rollout.GroupName, Version: "v1alpha2"}
// Kind takes an unqualified kind and returns back a Group qualified GroupKind
func Kind(kind string) schema.GroupKind {

View File

@@ -14,14 +14,17 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
package v1alpha2
import (
hpav1 "k8s.io/api/autoscaling/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
const CanaryKind = "Canary"
const (
CanaryKind = "Canary"
ProgressDeadlineSeconds = 600
)
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
@@ -48,6 +51,10 @@ type CanarySpec struct {
// metrics and thresholds
CanaryAnalysis CanaryAnalysis `json:"canaryAnalysis"`
// the maximum time in seconds for a canary deployment to make progress
// before it is considered to be failed. Defaults to 60s.
ProgressDeadlineSeconds *int32 `json:"progressDeadlineSeconds,omitempty"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
@@ -60,11 +67,21 @@ type CanaryList struct {
Items []Canary `json:"items"`
}
// CanaryState used for status state op
type CanaryState string
const (
CanaryRunning CanaryState = "running"
CanaryFinished CanaryState = "finished"
CanaryFailed CanaryState = "failed"
CanaryInitialized CanaryState = "initialized"
)
// CanaryStatus is used for state persistence (read-only)
type CanaryStatus struct {
State string `json:"state"`
CanaryRevision string `json:"canaryRevision"`
FailedChecks int `json:"failedChecks"`
State CanaryState `json:"state"`
CanaryRevision string `json:"canaryRevision"`
FailedChecks int `json:"failedChecks"`
// +optional
LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty"`
}
@@ -79,15 +96,41 @@ type CanaryService struct {
// CanaryAnalysis is used to describe how the analysis should be done
type CanaryAnalysis struct {
Threshold int `json:"threshold"`
MaxWeight int `json:"maxWeight"`
StepWeight int `json:"stepWeight"`
Metrics []CanaryMetric `json:"metrics"`
Threshold int `json:"threshold"`
MaxWeight int `json:"maxWeight"`
StepWeight int `json:"stepWeight"`
Metrics []CanaryMetric `json:"metrics"`
Webhooks []CanaryWebhook `json:"webhooks,omitempty"`
}
// CanaryMetric hold the reference to Istio metrics used for canary analysis
// CanaryMetric holds the reference to Istio metrics used for canary analysis
type CanaryMetric struct {
Name string `json:"name"`
Interval string `json:"interval"`
Threshold int `json:"threshold"`
}
// CanaryWebhook holds the reference to external checks used for canary analysis
type CanaryWebhook struct {
Name string `json:"name"`
URL string `json:"url"`
Timeout string `json:"timeout"`
// +optional
Metadata *map[string]string `json:"metadata,omitempty"`
}
// CanaryWebhookPayload holds the deployment info and metadata sent to webhooks
type CanaryWebhookPayload struct {
Name string `json:"name"`
Namespace string `json:"namespace"`
Metadata *map[string]string `json:"metadata,omitempty"`
}
// GetProgressDeadlineSeconds returns the progress deadline (default 600s)
func (c *Canary) GetProgressDeadlineSeconds() int {
if c.Spec.ProgressDeadlineSeconds != nil {
return int(*c.Spec.ProgressDeadlineSeconds)
}
return ProgressDeadlineSeconds
}

View File

@@ -18,7 +18,7 @@ limitations under the License.
// Code generated by deepcopy-gen. DO NOT EDIT.
package v1alpha1
package v1alpha2
import (
runtime "k8s.io/apimachinery/pkg/runtime"
@@ -60,6 +60,13 @@ func (in *CanaryAnalysis) DeepCopyInto(out *CanaryAnalysis) {
*out = make([]CanaryMetric, len(*in))
copy(*out, *in)
}
if in.Webhooks != nil {
in, out := &in.Webhooks, &out.Webhooks
*out = make([]CanaryWebhook, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
@@ -155,6 +162,11 @@ func (in *CanarySpec) DeepCopyInto(out *CanarySpec) {
out.AutoscalerRef = in.AutoscalerRef
in.Service.DeepCopyInto(&out.Service)
in.CanaryAnalysis.DeepCopyInto(&out.CanaryAnalysis)
if in.ProgressDeadlineSeconds != nil {
in, out := &in.ProgressDeadlineSeconds, &out.ProgressDeadlineSeconds
*out = new(int32)
**out = **in
}
return
}
@@ -184,3 +196,57 @@ func (in *CanaryStatus) DeepCopy() *CanaryStatus {
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CanaryWebhook) DeepCopyInto(out *CanaryWebhook) {
*out = *in
if in.Metadata != nil {
in, out := &in.Metadata, &out.Metadata
*out = new(map[string]string)
if **in != nil {
in, out := *in, *out
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CanaryWebhook.
func (in *CanaryWebhook) DeepCopy() *CanaryWebhook {
if in == nil {
return nil
}
out := new(CanaryWebhook)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CanaryWebhookPayload) DeepCopyInto(out *CanaryWebhookPayload) {
*out = *in
if in.Metadata != nil {
in, out := &in.Metadata, &out.Metadata
*out = new(map[string]string)
if **in != nil {
in, out := *in, *out
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CanaryWebhookPayload.
func (in *CanaryWebhookPayload) DeepCopy() *CanaryWebhookPayload {
if in == nil {
return nil
}
out := new(CanaryWebhookPayload)
in.DeepCopyInto(out)
return out
}

View File

@@ -19,7 +19,7 @@ limitations under the License.
package versioned
import (
flaggerv1alpha1 "github.com/stefanprodan/flagger/pkg/client/clientset/versioned/typed/flagger/v1alpha1"
flaggerv1alpha2 "github.com/stefanprodan/flagger/pkg/client/clientset/versioned/typed/flagger/v1alpha2"
discovery "k8s.io/client-go/discovery"
rest "k8s.io/client-go/rest"
flowcontrol "k8s.io/client-go/util/flowcontrol"
@@ -27,27 +27,27 @@ import (
type Interface interface {
Discovery() discovery.DiscoveryInterface
FlaggerV1alpha1() flaggerv1alpha1.FlaggerV1alpha1Interface
FlaggerV1alpha2() flaggerv1alpha2.FlaggerV1alpha2Interface
// Deprecated: please explicitly pick a version if possible.
Flagger() flaggerv1alpha1.FlaggerV1alpha1Interface
Flagger() flaggerv1alpha2.FlaggerV1alpha2Interface
}
// Clientset contains the clients for groups. Each group has exactly one
// version included in a Clientset.
type Clientset struct {
*discovery.DiscoveryClient
flaggerV1alpha1 *flaggerv1alpha1.FlaggerV1alpha1Client
flaggerV1alpha2 *flaggerv1alpha2.FlaggerV1alpha2Client
}
// FlaggerV1alpha1 retrieves the FlaggerV1alpha1Client
func (c *Clientset) FlaggerV1alpha1() flaggerv1alpha1.FlaggerV1alpha1Interface {
return c.flaggerV1alpha1
// FlaggerV1alpha2 retrieves the FlaggerV1alpha2Client
func (c *Clientset) FlaggerV1alpha2() flaggerv1alpha2.FlaggerV1alpha2Interface {
return c.flaggerV1alpha2
}
// Deprecated: Flagger retrieves the default version of FlaggerClient.
// Please explicitly pick a version.
func (c *Clientset) Flagger() flaggerv1alpha1.FlaggerV1alpha1Interface {
return c.flaggerV1alpha1
func (c *Clientset) Flagger() flaggerv1alpha2.FlaggerV1alpha2Interface {
return c.flaggerV1alpha2
}
// Discovery retrieves the DiscoveryClient
@@ -66,7 +66,7 @@ func NewForConfig(c *rest.Config) (*Clientset, error) {
}
var cs Clientset
var err error
cs.flaggerV1alpha1, err = flaggerv1alpha1.NewForConfig(&configShallowCopy)
cs.flaggerV1alpha2, err = flaggerv1alpha2.NewForConfig(&configShallowCopy)
if err != nil {
return nil, err
}
@@ -82,7 +82,7 @@ func NewForConfig(c *rest.Config) (*Clientset, error) {
// panics if there is an error in the config.
func NewForConfigOrDie(c *rest.Config) *Clientset {
var cs Clientset
cs.flaggerV1alpha1 = flaggerv1alpha1.NewForConfigOrDie(c)
cs.flaggerV1alpha2 = flaggerv1alpha2.NewForConfigOrDie(c)
cs.DiscoveryClient = discovery.NewDiscoveryClientForConfigOrDie(c)
return &cs
@@ -91,7 +91,7 @@ func NewForConfigOrDie(c *rest.Config) *Clientset {
// New creates a new Clientset for the given RESTClient.
func New(c rest.Interface) *Clientset {
var cs Clientset
cs.flaggerV1alpha1 = flaggerv1alpha1.New(c)
cs.flaggerV1alpha2 = flaggerv1alpha2.New(c)
cs.DiscoveryClient = discovery.NewDiscoveryClient(c)
return &cs

View File

@@ -20,8 +20,8 @@ package fake
import (
clientset "github.com/stefanprodan/flagger/pkg/client/clientset/versioned"
flaggerv1alpha1 "github.com/stefanprodan/flagger/pkg/client/clientset/versioned/typed/flagger/v1alpha1"
fakeflaggerv1alpha1 "github.com/stefanprodan/flagger/pkg/client/clientset/versioned/typed/flagger/v1alpha1/fake"
flaggerv1alpha2 "github.com/stefanprodan/flagger/pkg/client/clientset/versioned/typed/flagger/v1alpha2"
fakeflaggerv1alpha2 "github.com/stefanprodan/flagger/pkg/client/clientset/versioned/typed/flagger/v1alpha2/fake"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/client-go/discovery"
@@ -71,12 +71,12 @@ func (c *Clientset) Discovery() discovery.DiscoveryInterface {
var _ clientset.Interface = &Clientset{}
// FlaggerV1alpha1 retrieves the FlaggerV1alpha1Client
func (c *Clientset) FlaggerV1alpha1() flaggerv1alpha1.FlaggerV1alpha1Interface {
return &fakeflaggerv1alpha1.FakeFlaggerV1alpha1{Fake: &c.Fake}
// FlaggerV1alpha2 retrieves the FlaggerV1alpha2Client
func (c *Clientset) FlaggerV1alpha2() flaggerv1alpha2.FlaggerV1alpha2Interface {
return &fakeflaggerv1alpha2.FakeFlaggerV1alpha2{Fake: &c.Fake}
}
// Flagger retrieves the FlaggerV1alpha1Client
func (c *Clientset) Flagger() flaggerv1alpha1.FlaggerV1alpha1Interface {
return &fakeflaggerv1alpha1.FakeFlaggerV1alpha1{Fake: &c.Fake}
// Flagger retrieves the FlaggerV1alpha2Client
func (c *Clientset) Flagger() flaggerv1alpha2.FlaggerV1alpha2Interface {
return &fakeflaggerv1alpha2.FakeFlaggerV1alpha2{Fake: &c.Fake}
}

View File

@@ -19,7 +19,7 @@ limitations under the License.
package fake
import (
flaggerv1alpha1 "github.com/stefanprodan/flagger/pkg/apis/flagger/v1alpha1"
flaggerv1alpha2 "github.com/stefanprodan/flagger/pkg/apis/flagger/v1alpha2"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
schema "k8s.io/apimachinery/pkg/runtime/schema"
@@ -50,5 +50,5 @@ func init() {
// After this, RawExtensions in Kubernetes types will serialize kube-aggregator types
// correctly.
func AddToScheme(scheme *runtime.Scheme) {
flaggerv1alpha1.AddToScheme(scheme)
flaggerv1alpha2.AddToScheme(scheme)
}

View File

@@ -19,7 +19,7 @@ limitations under the License.
package scheme
import (
flaggerv1alpha1 "github.com/stefanprodan/flagger/pkg/apis/flagger/v1alpha1"
flaggerv1alpha2 "github.com/stefanprodan/flagger/pkg/apis/flagger/v1alpha2"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
schema "k8s.io/apimachinery/pkg/runtime/schema"
@@ -50,5 +50,5 @@ func init() {
// After this, RawExtensions in Kubernetes types will serialize kube-aggregator types
// correctly.
func AddToScheme(scheme *runtime.Scheme) {
flaggerv1alpha1.AddToScheme(scheme)
flaggerv1alpha2.AddToScheme(scheme)
}

View File

@@ -16,10 +16,10 @@ limitations under the License.
// Code generated by client-gen. DO NOT EDIT.
package v1alpha1
package v1alpha2
import (
v1alpha1 "github.com/stefanprodan/flagger/pkg/apis/flagger/v1alpha1"
v1alpha2 "github.com/stefanprodan/flagger/pkg/apis/flagger/v1alpha2"
scheme "github.com/stefanprodan/flagger/pkg/client/clientset/versioned/scheme"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
@@ -35,15 +35,15 @@ type CanariesGetter interface {
// CanaryInterface has methods to work with Canary resources.
type CanaryInterface interface {
Create(*v1alpha1.Canary) (*v1alpha1.Canary, error)
Update(*v1alpha1.Canary) (*v1alpha1.Canary, error)
UpdateStatus(*v1alpha1.Canary) (*v1alpha1.Canary, error)
Create(*v1alpha2.Canary) (*v1alpha2.Canary, error)
Update(*v1alpha2.Canary) (*v1alpha2.Canary, error)
UpdateStatus(*v1alpha2.Canary) (*v1alpha2.Canary, error)
Delete(name string, options *v1.DeleteOptions) error
DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error
Get(name string, options v1.GetOptions) (*v1alpha1.Canary, error)
List(opts v1.ListOptions) (*v1alpha1.CanaryList, error)
Get(name string, options v1.GetOptions) (*v1alpha2.Canary, error)
List(opts v1.ListOptions) (*v1alpha2.CanaryList, error)
Watch(opts v1.ListOptions) (watch.Interface, error)
Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.Canary, err error)
Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha2.Canary, err error)
CanaryExpansion
}
@@ -54,7 +54,7 @@ type canaries struct {
}
// newCanaries returns a Canaries
func newCanaries(c *FlaggerV1alpha1Client, namespace string) *canaries {
func newCanaries(c *FlaggerV1alpha2Client, namespace string) *canaries {
return &canaries{
client: c.RESTClient(),
ns: namespace,
@@ -62,8 +62,8 @@ func newCanaries(c *FlaggerV1alpha1Client, namespace string) *canaries {
}
// Get takes name of the canary, and returns the corresponding canary object, and an error if there is any.
func (c *canaries) Get(name string, options v1.GetOptions) (result *v1alpha1.Canary, err error) {
result = &v1alpha1.Canary{}
func (c *canaries) Get(name string, options v1.GetOptions) (result *v1alpha2.Canary, err error) {
result = &v1alpha2.Canary{}
err = c.client.Get().
Namespace(c.ns).
Resource("canaries").
@@ -75,8 +75,8 @@ func (c *canaries) Get(name string, options v1.GetOptions) (result *v1alpha1.Can
}
// List takes label and field selectors, and returns the list of Canaries that match those selectors.
func (c *canaries) List(opts v1.ListOptions) (result *v1alpha1.CanaryList, err error) {
result = &v1alpha1.CanaryList{}
func (c *canaries) List(opts v1.ListOptions) (result *v1alpha2.CanaryList, err error) {
result = &v1alpha2.CanaryList{}
err = c.client.Get().
Namespace(c.ns).
Resource("canaries").
@@ -97,8 +97,8 @@ func (c *canaries) Watch(opts v1.ListOptions) (watch.Interface, error) {
}
// Create takes the representation of a canary and creates it. Returns the server's representation of the canary, and an error, if there is any.
func (c *canaries) Create(canary *v1alpha1.Canary) (result *v1alpha1.Canary, err error) {
result = &v1alpha1.Canary{}
func (c *canaries) Create(canary *v1alpha2.Canary) (result *v1alpha2.Canary, err error) {
result = &v1alpha2.Canary{}
err = c.client.Post().
Namespace(c.ns).
Resource("canaries").
@@ -109,8 +109,8 @@ func (c *canaries) Create(canary *v1alpha1.Canary) (result *v1alpha1.Canary, err
}
// Update takes the representation of a canary and updates it. Returns the server's representation of the canary, and an error, if there is any.
func (c *canaries) Update(canary *v1alpha1.Canary) (result *v1alpha1.Canary, err error) {
result = &v1alpha1.Canary{}
func (c *canaries) Update(canary *v1alpha2.Canary) (result *v1alpha2.Canary, err error) {
result = &v1alpha2.Canary{}
err = c.client.Put().
Namespace(c.ns).
Resource("canaries").
@@ -124,8 +124,8 @@ func (c *canaries) Update(canary *v1alpha1.Canary) (result *v1alpha1.Canary, err
// UpdateStatus was generated because the type contains a Status member.
// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
func (c *canaries) UpdateStatus(canary *v1alpha1.Canary) (result *v1alpha1.Canary, err error) {
result = &v1alpha1.Canary{}
func (c *canaries) UpdateStatus(canary *v1alpha2.Canary) (result *v1alpha2.Canary, err error) {
result = &v1alpha2.Canary{}
err = c.client.Put().
Namespace(c.ns).
Resource("canaries").
@@ -160,8 +160,8 @@ func (c *canaries) DeleteCollection(options *v1.DeleteOptions, listOptions v1.Li
}
// Patch applies the patch and returns the patched canary.
func (c *canaries) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.Canary, err error) {
result = &v1alpha1.Canary{}
func (c *canaries) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha2.Canary, err error) {
result = &v1alpha2.Canary{}
err = c.client.Patch(pt).
Namespace(c.ns).
Resource("canaries").

View File

@@ -17,4 +17,4 @@ limitations under the License.
// Code generated by client-gen. DO NOT EDIT.
// This package has the automatically generated typed clients.
package v1alpha1
package v1alpha2

View File

@@ -19,7 +19,7 @@ limitations under the License.
package fake
import (
v1alpha1 "github.com/stefanprodan/flagger/pkg/apis/flagger/v1alpha1"
v1alpha2 "github.com/stefanprodan/flagger/pkg/apis/flagger/v1alpha2"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
labels "k8s.io/apimachinery/pkg/labels"
schema "k8s.io/apimachinery/pkg/runtime/schema"
@@ -30,29 +30,29 @@ import (
// FakeCanaries implements CanaryInterface
type FakeCanaries struct {
Fake *FakeFlaggerV1alpha1
Fake *FakeFlaggerV1alpha2
ns string
}
var canariesResource = schema.GroupVersionResource{Group: "flagger.app", Version: "v1alpha1", Resource: "canaries"}
var canariesResource = schema.GroupVersionResource{Group: "flagger.app", Version: "v1alpha2", Resource: "canaries"}
var canariesKind = schema.GroupVersionKind{Group: "flagger.app", Version: "v1alpha1", Kind: "Canary"}
var canariesKind = schema.GroupVersionKind{Group: "flagger.app", Version: "v1alpha2", Kind: "Canary"}
// Get takes name of the canary, and returns the corresponding canary object, and an error if there is any.
func (c *FakeCanaries) Get(name string, options v1.GetOptions) (result *v1alpha1.Canary, err error) {
func (c *FakeCanaries) Get(name string, options v1.GetOptions) (result *v1alpha2.Canary, err error) {
obj, err := c.Fake.
Invokes(testing.NewGetAction(canariesResource, c.ns, name), &v1alpha1.Canary{})
Invokes(testing.NewGetAction(canariesResource, c.ns, name), &v1alpha2.Canary{})
if obj == nil {
return nil, err
}
return obj.(*v1alpha1.Canary), err
return obj.(*v1alpha2.Canary), err
}
// List takes label and field selectors, and returns the list of Canaries that match those selectors.
func (c *FakeCanaries) List(opts v1.ListOptions) (result *v1alpha1.CanaryList, err error) {
func (c *FakeCanaries) List(opts v1.ListOptions) (result *v1alpha2.CanaryList, err error) {
obj, err := c.Fake.
Invokes(testing.NewListAction(canariesResource, canariesKind, c.ns, opts), &v1alpha1.CanaryList{})
Invokes(testing.NewListAction(canariesResource, canariesKind, c.ns, opts), &v1alpha2.CanaryList{})
if obj == nil {
return nil, err
@@ -62,8 +62,8 @@ func (c *FakeCanaries) List(opts v1.ListOptions) (result *v1alpha1.CanaryList, e
if label == nil {
label = labels.Everything()
}
list := &v1alpha1.CanaryList{ListMeta: obj.(*v1alpha1.CanaryList).ListMeta}
for _, item := range obj.(*v1alpha1.CanaryList).Items {
list := &v1alpha2.CanaryList{ListMeta: obj.(*v1alpha2.CanaryList).ListMeta}
for _, item := range obj.(*v1alpha2.CanaryList).Items {
if label.Matches(labels.Set(item.Labels)) {
list.Items = append(list.Items, item)
}
@@ -79,43 +79,43 @@ func (c *FakeCanaries) Watch(opts v1.ListOptions) (watch.Interface, error) {
}
// Create takes the representation of a canary and creates it. Returns the server's representation of the canary, and an error, if there is any.
func (c *FakeCanaries) Create(canary *v1alpha1.Canary) (result *v1alpha1.Canary, err error) {
func (c *FakeCanaries) Create(canary *v1alpha2.Canary) (result *v1alpha2.Canary, err error) {
obj, err := c.Fake.
Invokes(testing.NewCreateAction(canariesResource, c.ns, canary), &v1alpha1.Canary{})
Invokes(testing.NewCreateAction(canariesResource, c.ns, canary), &v1alpha2.Canary{})
if obj == nil {
return nil, err
}
return obj.(*v1alpha1.Canary), err
return obj.(*v1alpha2.Canary), err
}
// Update takes the representation of a canary and updates it. Returns the server's representation of the canary, and an error, if there is any.
func (c *FakeCanaries) Update(canary *v1alpha1.Canary) (result *v1alpha1.Canary, err error) {
func (c *FakeCanaries) Update(canary *v1alpha2.Canary) (result *v1alpha2.Canary, err error) {
obj, err := c.Fake.
Invokes(testing.NewUpdateAction(canariesResource, c.ns, canary), &v1alpha1.Canary{})
Invokes(testing.NewUpdateAction(canariesResource, c.ns, canary), &v1alpha2.Canary{})
if obj == nil {
return nil, err
}
return obj.(*v1alpha1.Canary), err
return obj.(*v1alpha2.Canary), err
}
// UpdateStatus was generated because the type contains a Status member.
// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
func (c *FakeCanaries) UpdateStatus(canary *v1alpha1.Canary) (*v1alpha1.Canary, error) {
func (c *FakeCanaries) UpdateStatus(canary *v1alpha2.Canary) (*v1alpha2.Canary, error) {
obj, err := c.Fake.
Invokes(testing.NewUpdateSubresourceAction(canariesResource, "status", c.ns, canary), &v1alpha1.Canary{})
Invokes(testing.NewUpdateSubresourceAction(canariesResource, "status", c.ns, canary), &v1alpha2.Canary{})
if obj == nil {
return nil, err
}
return obj.(*v1alpha1.Canary), err
return obj.(*v1alpha2.Canary), err
}
// Delete takes name of the canary and deletes it. Returns an error if one occurs.
func (c *FakeCanaries) Delete(name string, options *v1.DeleteOptions) error {
_, err := c.Fake.
Invokes(testing.NewDeleteAction(canariesResource, c.ns, name), &v1alpha1.Canary{})
Invokes(testing.NewDeleteAction(canariesResource, c.ns, name), &v1alpha2.Canary{})
return err
}
@@ -124,17 +124,17 @@ func (c *FakeCanaries) Delete(name string, options *v1.DeleteOptions) error {
func (c *FakeCanaries) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
action := testing.NewDeleteCollectionAction(canariesResource, c.ns, listOptions)
_, err := c.Fake.Invokes(action, &v1alpha1.CanaryList{})
_, err := c.Fake.Invokes(action, &v1alpha2.CanaryList{})
return err
}
// Patch applies the patch and returns the patched canary.
func (c *FakeCanaries) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.Canary, err error) {
func (c *FakeCanaries) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha2.Canary, err error) {
obj, err := c.Fake.
Invokes(testing.NewPatchSubresourceAction(canariesResource, c.ns, name, data, subresources...), &v1alpha1.Canary{})
Invokes(testing.NewPatchSubresourceAction(canariesResource, c.ns, name, data, subresources...), &v1alpha2.Canary{})
if obj == nil {
return nil, err
}
return obj.(*v1alpha1.Canary), err
return obj.(*v1alpha2.Canary), err
}

View File

@@ -19,22 +19,22 @@ limitations under the License.
package fake
import (
v1alpha1 "github.com/stefanprodan/flagger/pkg/client/clientset/versioned/typed/flagger/v1alpha1"
v1alpha2 "github.com/stefanprodan/flagger/pkg/client/clientset/versioned/typed/flagger/v1alpha2"
rest "k8s.io/client-go/rest"
testing "k8s.io/client-go/testing"
)
type FakeFlaggerV1alpha1 struct {
type FakeFlaggerV1alpha2 struct {
*testing.Fake
}
func (c *FakeFlaggerV1alpha1) Canaries(namespace string) v1alpha1.CanaryInterface {
func (c *FakeFlaggerV1alpha2) Canaries(namespace string) v1alpha2.CanaryInterface {
return &FakeCanaries{c, namespace}
}
// RESTClient returns a RESTClient that is used to communicate
// with API server by this client implementation.
func (c *FakeFlaggerV1alpha1) RESTClient() rest.Interface {
func (c *FakeFlaggerV1alpha2) RESTClient() rest.Interface {
var ret *rest.RESTClient
return ret
}

View File

@@ -16,31 +16,31 @@ limitations under the License.
// Code generated by client-gen. DO NOT EDIT.
package v1alpha1
package v1alpha2
import (
v1alpha1 "github.com/stefanprodan/flagger/pkg/apis/flagger/v1alpha1"
v1alpha2 "github.com/stefanprodan/flagger/pkg/apis/flagger/v1alpha2"
"github.com/stefanprodan/flagger/pkg/client/clientset/versioned/scheme"
serializer "k8s.io/apimachinery/pkg/runtime/serializer"
rest "k8s.io/client-go/rest"
)
type FlaggerV1alpha1Interface interface {
type FlaggerV1alpha2Interface interface {
RESTClient() rest.Interface
CanariesGetter
}
// FlaggerV1alpha1Client is used to interact with features provided by the flagger.app group.
type FlaggerV1alpha1Client struct {
// FlaggerV1alpha2Client is used to interact with features provided by the flagger.app group.
type FlaggerV1alpha2Client struct {
restClient rest.Interface
}
func (c *FlaggerV1alpha1Client) Canaries(namespace string) CanaryInterface {
func (c *FlaggerV1alpha2Client) Canaries(namespace string) CanaryInterface {
return newCanaries(c, namespace)
}
// NewForConfig creates a new FlaggerV1alpha1Client for the given config.
func NewForConfig(c *rest.Config) (*FlaggerV1alpha1Client, error) {
// NewForConfig creates a new FlaggerV1alpha2Client for the given config.
func NewForConfig(c *rest.Config) (*FlaggerV1alpha2Client, error) {
config := *c
if err := setConfigDefaults(&config); err != nil {
return nil, err
@@ -49,12 +49,12 @@ func NewForConfig(c *rest.Config) (*FlaggerV1alpha1Client, error) {
if err != nil {
return nil, err
}
return &FlaggerV1alpha1Client{client}, nil
return &FlaggerV1alpha2Client{client}, nil
}
// NewForConfigOrDie creates a new FlaggerV1alpha1Client for the given config and
// NewForConfigOrDie creates a new FlaggerV1alpha2Client for the given config and
// panics if there is an error in the config.
func NewForConfigOrDie(c *rest.Config) *FlaggerV1alpha1Client {
func NewForConfigOrDie(c *rest.Config) *FlaggerV1alpha2Client {
client, err := NewForConfig(c)
if err != nil {
panic(err)
@@ -62,13 +62,13 @@ func NewForConfigOrDie(c *rest.Config) *FlaggerV1alpha1Client {
return client
}
// New creates a new FlaggerV1alpha1Client for the given RESTClient.
func New(c rest.Interface) *FlaggerV1alpha1Client {
return &FlaggerV1alpha1Client{c}
// New creates a new FlaggerV1alpha2Client for the given RESTClient.
func New(c rest.Interface) *FlaggerV1alpha2Client {
return &FlaggerV1alpha2Client{c}
}
func setConfigDefaults(config *rest.Config) error {
gv := v1alpha1.SchemeGroupVersion
gv := v1alpha2.SchemeGroupVersion
config.GroupVersion = &gv
config.APIPath = "/apis"
config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs}
@@ -82,7 +82,7 @@ func setConfigDefaults(config *rest.Config) error {
// RESTClient returns a RESTClient that is used to communicate
// with API server by this client implementation.
func (c *FlaggerV1alpha1Client) RESTClient() rest.Interface {
func (c *FlaggerV1alpha2Client) RESTClient() rest.Interface {
if c == nil {
return nil
}

View File

@@ -16,6 +16,6 @@ limitations under the License.
// Code generated by client-gen. DO NOT EDIT.
package v1alpha1
package v1alpha2
type CanaryExpansion interface{}

View File

@@ -19,14 +19,14 @@ limitations under the License.
package flagger
import (
v1alpha1 "github.com/stefanprodan/flagger/pkg/client/informers/externalversions/flagger/v1alpha1"
v1alpha2 "github.com/stefanprodan/flagger/pkg/client/informers/externalversions/flagger/v1alpha2"
internalinterfaces "github.com/stefanprodan/flagger/pkg/client/informers/externalversions/internalinterfaces"
)
// Interface provides access to each of this group's versions.
type Interface interface {
// V1alpha1 provides access to shared informers for resources in V1alpha1.
V1alpha1() v1alpha1.Interface
// V1alpha2 provides access to shared informers for resources in V1alpha2.
V1alpha2() v1alpha2.Interface
}
type group struct {
@@ -40,7 +40,7 @@ func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakList
return &group{factory: f, namespace: namespace, tweakListOptions: tweakListOptions}
}
// V1alpha1 returns a new v1alpha1.Interface.
func (g *group) V1alpha1() v1alpha1.Interface {
return v1alpha1.New(g.factory, g.namespace, g.tweakListOptions)
// V1alpha2 returns a new v1alpha2.Interface.
func (g *group) V1alpha2() v1alpha2.Interface {
return v1alpha2.New(g.factory, g.namespace, g.tweakListOptions)
}

View File

@@ -16,15 +16,15 @@ limitations under the License.
// Code generated by informer-gen. DO NOT EDIT.
package v1alpha1
package v1alpha2
import (
time "time"
flaggerv1alpha1 "github.com/stefanprodan/flagger/pkg/apis/flagger/v1alpha1"
flaggerv1alpha2 "github.com/stefanprodan/flagger/pkg/apis/flagger/v1alpha2"
versioned "github.com/stefanprodan/flagger/pkg/client/clientset/versioned"
internalinterfaces "github.com/stefanprodan/flagger/pkg/client/informers/externalversions/internalinterfaces"
v1alpha1 "github.com/stefanprodan/flagger/pkg/client/listers/flagger/v1alpha1"
v1alpha2 "github.com/stefanprodan/flagger/pkg/client/listers/flagger/v1alpha2"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
watch "k8s.io/apimachinery/pkg/watch"
@@ -35,7 +35,7 @@ import (
// Canaries.
type CanaryInformer interface {
Informer() cache.SharedIndexInformer
Lister() v1alpha1.CanaryLister
Lister() v1alpha2.CanaryLister
}
type canaryInformer struct {
@@ -61,16 +61,16 @@ func NewFilteredCanaryInformer(client versioned.Interface, namespace string, res
if tweakListOptions != nil {
tweakListOptions(&options)
}
return client.FlaggerV1alpha1().Canaries(namespace).List(options)
return client.FlaggerV1alpha2().Canaries(namespace).List(options)
},
WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
}
return client.FlaggerV1alpha1().Canaries(namespace).Watch(options)
return client.FlaggerV1alpha2().Canaries(namespace).Watch(options)
},
},
&flaggerv1alpha1.Canary{},
&flaggerv1alpha2.Canary{},
resyncPeriod,
indexers,
)
@@ -81,9 +81,9 @@ func (f *canaryInformer) defaultInformer(client versioned.Interface, resyncPerio
}
func (f *canaryInformer) Informer() cache.SharedIndexInformer {
return f.factory.InformerFor(&flaggerv1alpha1.Canary{}, f.defaultInformer)
return f.factory.InformerFor(&flaggerv1alpha2.Canary{}, f.defaultInformer)
}
func (f *canaryInformer) Lister() v1alpha1.CanaryLister {
return v1alpha1.NewCanaryLister(f.Informer().GetIndexer())
func (f *canaryInformer) Lister() v1alpha2.CanaryLister {
return v1alpha2.NewCanaryLister(f.Informer().GetIndexer())
}

View File

@@ -16,7 +16,7 @@ limitations under the License.
// Code generated by informer-gen. DO NOT EDIT.
package v1alpha1
package v1alpha2
import (
internalinterfaces "github.com/stefanprodan/flagger/pkg/client/informers/externalversions/internalinterfaces"

View File

@@ -21,7 +21,7 @@ package externalversions
import (
"fmt"
v1alpha1 "github.com/stefanprodan/flagger/pkg/apis/flagger/v1alpha1"
v1alpha2 "github.com/stefanprodan/flagger/pkg/apis/flagger/v1alpha2"
schema "k8s.io/apimachinery/pkg/runtime/schema"
cache "k8s.io/client-go/tools/cache"
)
@@ -52,9 +52,9 @@ func (f *genericInformer) Lister() cache.GenericLister {
// TODO extend this to unknown resources with a client pool
func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource) (GenericInformer, error) {
switch resource {
// Group=flagger.app, Version=v1alpha1
case v1alpha1.SchemeGroupVersion.WithResource("canaries"):
return &genericInformer{resource: resource.GroupResource(), informer: f.Flagger().V1alpha1().Canaries().Informer()}, nil
// Group=flagger.app, Version=v1alpha2
case v1alpha2.SchemeGroupVersion.WithResource("canaries"):
return &genericInformer{resource: resource.GroupResource(), informer: f.Flagger().V1alpha2().Canaries().Informer()}, nil
}

View File

@@ -16,10 +16,10 @@ limitations under the License.
// Code generated by lister-gen. DO NOT EDIT.
package v1alpha1
package v1alpha2
import (
v1alpha1 "github.com/stefanprodan/flagger/pkg/apis/flagger/v1alpha1"
v1alpha2 "github.com/stefanprodan/flagger/pkg/apis/flagger/v1alpha2"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/client-go/tools/cache"
@@ -28,7 +28,7 @@ import (
// CanaryLister helps list Canaries.
type CanaryLister interface {
// List lists all Canaries in the indexer.
List(selector labels.Selector) (ret []*v1alpha1.Canary, err error)
List(selector labels.Selector) (ret []*v1alpha2.Canary, err error)
// Canaries returns an object that can list and get Canaries.
Canaries(namespace string) CanaryNamespaceLister
CanaryListerExpansion
@@ -45,9 +45,9 @@ func NewCanaryLister(indexer cache.Indexer) CanaryLister {
}
// List lists all Canaries in the indexer.
func (s *canaryLister) List(selector labels.Selector) (ret []*v1alpha1.Canary, err error) {
func (s *canaryLister) List(selector labels.Selector) (ret []*v1alpha2.Canary, err error) {
err = cache.ListAll(s.indexer, selector, func(m interface{}) {
ret = append(ret, m.(*v1alpha1.Canary))
ret = append(ret, m.(*v1alpha2.Canary))
})
return ret, err
}
@@ -60,9 +60,9 @@ func (s *canaryLister) Canaries(namespace string) CanaryNamespaceLister {
// CanaryNamespaceLister helps list and get Canaries.
type CanaryNamespaceLister interface {
// List lists all Canaries in the indexer for a given namespace.
List(selector labels.Selector) (ret []*v1alpha1.Canary, err error)
List(selector labels.Selector) (ret []*v1alpha2.Canary, err error)
// Get retrieves the Canary from the indexer for a given namespace and name.
Get(name string) (*v1alpha1.Canary, error)
Get(name string) (*v1alpha2.Canary, error)
CanaryNamespaceListerExpansion
}
@@ -74,21 +74,21 @@ type canaryNamespaceLister struct {
}
// List lists all Canaries in the indexer for a given namespace.
func (s canaryNamespaceLister) List(selector labels.Selector) (ret []*v1alpha1.Canary, err error) {
func (s canaryNamespaceLister) List(selector labels.Selector) (ret []*v1alpha2.Canary, err error) {
err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) {
ret = append(ret, m.(*v1alpha1.Canary))
ret = append(ret, m.(*v1alpha2.Canary))
})
return ret, err
}
// Get retrieves the Canary from the indexer for a given namespace and name.
func (s canaryNamespaceLister) Get(name string) (*v1alpha1.Canary, error) {
func (s canaryNamespaceLister) Get(name string) (*v1alpha2.Canary, error) {
obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name)
if err != nil {
return nil, err
}
if !exists {
return nil, errors.NewNotFound(v1alpha1.Resource("canary"), name)
return nil, errors.NewNotFound(v1alpha2.Resource("canary"), name)
}
return obj.(*v1alpha1.Canary), nil
return obj.(*v1alpha2.Canary), nil
}

View File

@@ -16,7 +16,7 @@ limitations under the License.
// Code generated by lister-gen. DO NOT EDIT.
package v1alpha1
package v1alpha2
// CanaryListerExpansion allows custom methods to be added to
// CanaryLister.

View File

@@ -7,11 +7,11 @@ import (
"github.com/google/go-cmp/cmp"
istioclientset "github.com/knative/pkg/client/clientset/versioned"
flaggerv1 "github.com/stefanprodan/flagger/pkg/apis/flagger/v1alpha1"
flaggerv1 "github.com/stefanprodan/flagger/pkg/apis/flagger/v1alpha2"
clientset "github.com/stefanprodan/flagger/pkg/client/clientset/versioned"
flaggerscheme "github.com/stefanprodan/flagger/pkg/client/clientset/versioned/scheme"
flaggerinformers "github.com/stefanprodan/flagger/pkg/client/informers/externalversions/flagger/v1alpha1"
flaggerlisters "github.com/stefanprodan/flagger/pkg/client/listers/flagger/v1alpha1"
flaggerinformers "github.com/stefanprodan/flagger/pkg/client/informers/externalversions/flagger/v1alpha2"
flaggerlisters "github.com/stefanprodan/flagger/pkg/client/listers/flagger/v1alpha2"
"github.com/stefanprodan/flagger/pkg/notifier"
"go.uber.org/zap"
corev1 "k8s.io/api/core/v1"
@@ -260,12 +260,36 @@ func (c *Controller) recordEventWarningf(r *flaggerv1.Canary, template string, a
c.eventRecorder.Event(r, corev1.EventTypeWarning, "Synced", fmt.Sprintf(template, args...))
}
func (c *Controller) sendNotification(workload string, namespace string, message string, warn bool) {
func (c *Controller) sendNotification(cd *flaggerv1.Canary, message string, metadata bool, warn bool) {
if c.notifier == nil {
return
}
err := c.notifier.Post(workload, namespace, message, warn)
var fields []notifier.SlackField
if metadata {
fields = append(fields,
notifier.SlackField{
Title: "Target",
Value: fmt.Sprintf("%s/%s.%s", cd.Spec.TargetRef.Kind, cd.Spec.TargetRef.Name, cd.Namespace),
},
notifier.SlackField{
Title: "Traffic routing",
Value: fmt.Sprintf("Weight step: %v max: %v",
cd.Spec.CanaryAnalysis.StepWeight,
cd.Spec.CanaryAnalysis.MaxWeight),
},
notifier.SlackField{
Title: "Failed checks threshold",
Value: fmt.Sprintf("%v", cd.Spec.CanaryAnalysis.Threshold),
},
notifier.SlackField{
Title: "Progress deadline",
Value: fmt.Sprintf("%vs", cd.GetProgressDeadlineSeconds()),
},
)
}
err := c.notifier.Post(cd.Name, cd.Namespace, message, fields, warn)
if err != nil {
c.logger.Error(err)
}

View File

@@ -4,11 +4,12 @@ import (
"encoding/base64"
"encoding/json"
"fmt"
"time"
"github.com/google/go-cmp/cmp"
"github.com/google/go-cmp/cmp/cmpopts"
istioclientset "github.com/knative/pkg/client/clientset/versioned"
flaggerv1 "github.com/stefanprodan/flagger/pkg/apis/flagger/v1alpha1"
flaggerv1 "github.com/stefanprodan/flagger/pkg/apis/flagger/v1alpha2"
clientset "github.com/stefanprodan/flagger/pkg/client/clientset/versioned"
"go.uber.org/zap"
appsv1 "k8s.io/api/apps/v1"
@@ -48,6 +49,7 @@ func (c *CanaryDeployer) Promote(cd *flaggerv1.Canary) error {
return fmt.Errorf("deployment %s.%s query error %v", primaryName, cd.Namespace, err)
}
primary.Spec.ProgressDeadlineSeconds = canary.Spec.ProgressDeadlineSeconds
primary.Spec.MinReadySeconds = canary.Spec.MinReadySeconds
primary.Spec.RevisionHistoryLimit = canary.Spec.RevisionHistoryLimit
primary.Spec.Strategy = canary.Spec.Strategy
@@ -61,37 +63,58 @@ func (c *CanaryDeployer) Promote(cd *flaggerv1.Canary) error {
return nil
}
// IsReady checks the primary and canary deployment status and returns an error if
// the deployments are in the middle of a rolling update or if the pods are unhealthy
func (c *CanaryDeployer) IsReady(cd *flaggerv1.Canary) error {
canary, err := c.kubeClient.AppsV1().Deployments(cd.Namespace).Get(cd.Spec.TargetRef.Name, metav1.GetOptions{})
if err != nil {
if errors.IsNotFound(err) {
return fmt.Errorf("deployment %s.%s not found", cd.Spec.TargetRef.Name, cd.Namespace)
}
return fmt.Errorf("deployment %s.%s query error %v", cd.Spec.TargetRef.Name, cd.Namespace, err)
}
if msg, healthy := c.getDeploymentStatus(canary); !healthy {
return fmt.Errorf("Halt %s.%s advancement %s", cd.Name, cd.Namespace, msg)
}
// IsPrimaryReady checks the primary deployment status and returns an error if
// the deployment is in the middle of a rolling update or if the pods are unhealthy
// it will return a non retriable error if the rolling update is stuck
func (c *CanaryDeployer) IsPrimaryReady(cd *flaggerv1.Canary) (bool, error) {
primaryName := fmt.Sprintf("%s-primary", cd.Spec.TargetRef.Name)
primary, err := c.kubeClient.AppsV1().Deployments(cd.Namespace).Get(primaryName, metav1.GetOptions{})
if err != nil {
if errors.IsNotFound(err) {
return fmt.Errorf("deployment %s.%s not found", primaryName, cd.Namespace)
return true, fmt.Errorf("deployment %s.%s not found", primaryName, cd.Namespace)
}
return fmt.Errorf("deployment %s.%s query error %v", primaryName, cd.Namespace, err)
return true, fmt.Errorf("deployment %s.%s query error %v", primaryName, cd.Namespace, err)
}
if msg, healthy := c.getDeploymentStatus(primary); !healthy {
return fmt.Errorf("Halt %s.%s advancement %s", cd.Name, cd.Namespace, msg)
retriable, err := c.isDeploymentReady(primary, cd.GetProgressDeadlineSeconds())
if err != nil {
if retriable {
return retriable, fmt.Errorf("Halt %s.%s advancement %s", cd.Name, cd.Namespace, err.Error())
} else {
return retriable, err
}
}
if primary.Spec.Replicas == int32p(0) {
return fmt.Errorf("halt %s.%s advancement %s",
cd.Name, cd.Namespace, "primary deployment is scaled to zero")
return true, fmt.Errorf("halt %s.%s advancement primary deployment is scaled to zero",
cd.Name, cd.Namespace)
}
return nil
return true, nil
}
// IsCanaryReady checks the primary deployment status and returns an error if
// the deployment is in the middle of a rolling update or if the pods are unhealthy
// it will return a non retriable error if the rolling update is stuck
func (c *CanaryDeployer) IsCanaryReady(cd *flaggerv1.Canary) (bool, error) {
canary, err := c.kubeClient.AppsV1().Deployments(cd.Namespace).Get(cd.Spec.TargetRef.Name, metav1.GetOptions{})
if err != nil {
if errors.IsNotFound(err) {
return true, fmt.Errorf("deployment %s.%s not found", cd.Spec.TargetRef.Name, cd.Namespace)
}
return true, fmt.Errorf("deployment %s.%s query error %v", cd.Spec.TargetRef.Name, cd.Namespace, err)
}
retriable, err := c.isDeploymentReady(canary, cd.GetProgressDeadlineSeconds())
if err != nil {
if retriable {
return retriable, fmt.Errorf("Halt %s.%s advancement %s", cd.Name, cd.Namespace, err.Error())
} else {
return retriable, fmt.Errorf("deployment does not have minimum availability for more than %vs",
cd.GetProgressDeadlineSeconds())
}
}
return true, nil
}
// IsNewSpec returns true if the canary deployment pod spec has changed
@@ -131,7 +154,7 @@ func (c *CanaryDeployer) IsNewSpec(cd *flaggerv1.Canary) (bool, error) {
func (c *CanaryDeployer) SetFailedChecks(cd *flaggerv1.Canary, val int) error {
cd.Status.FailedChecks = val
cd.Status.LastTransitionTime = metav1.Now()
cd, err := c.flaggerClient.FlaggerV1alpha1().Canaries(cd.Namespace).Update(cd)
cd, err := c.flaggerClient.FlaggerV1alpha2().Canaries(cd.Namespace).Update(cd)
if err != nil {
return fmt.Errorf("deployment %s.%s update error %v", cd.Spec.TargetRef.Name, cd.Namespace, err)
}
@@ -139,10 +162,10 @@ func (c *CanaryDeployer) SetFailedChecks(cd *flaggerv1.Canary, val int) error {
}
// SetState updates the canary status state
func (c *CanaryDeployer) SetState(cd *flaggerv1.Canary, state string) error {
func (c *CanaryDeployer) SetState(cd *flaggerv1.Canary, state flaggerv1.CanaryState) error {
cd.Status.State = state
cd.Status.LastTransitionTime = metav1.Now()
cd, err := c.flaggerClient.FlaggerV1alpha1().Canaries(cd.Namespace).Update(cd)
cd, err := c.flaggerClient.FlaggerV1alpha2().Canaries(cd.Namespace).Update(cd)
if err != nil {
return fmt.Errorf("deployment %s.%s update error %v", cd.Spec.TargetRef.Name, cd.Namespace, err)
}
@@ -169,7 +192,7 @@ func (c *CanaryDeployer) SyncStatus(cd *flaggerv1.Canary, status flaggerv1.Canar
cd.Status.FailedChecks = status.FailedChecks
cd.Status.CanaryRevision = specEnc
cd.Status.LastTransitionTime = metav1.Now()
cd, err = c.flaggerClient.FlaggerV1alpha1().Canaries(cd.Namespace).Update(cd)
cd, err = c.flaggerClient.FlaggerV1alpha2().Canaries(cd.Namespace).Update(cd)
if err != nil {
return fmt.Errorf("deployment %s.%s update error %v", cd.Spec.TargetRef.Name, cd.Namespace, err)
}
@@ -244,10 +267,11 @@ func (c *CanaryDeployer) createPrimaryDeployment(cd *flaggerv1.Canary) error {
},
},
Spec: appsv1.DeploymentSpec{
MinReadySeconds: canaryDep.Spec.MinReadySeconds,
RevisionHistoryLimit: canaryDep.Spec.RevisionHistoryLimit,
Replicas: canaryDep.Spec.Replicas,
Strategy: canaryDep.Spec.Strategy,
ProgressDeadlineSeconds: canaryDep.Spec.ProgressDeadlineSeconds,
MinReadySeconds: canaryDep.Spec.MinReadySeconds,
RevisionHistoryLimit: canaryDep.Spec.RevisionHistoryLimit,
Replicas: canaryDep.Spec.Replicas,
Strategy: canaryDep.Spec.Strategy,
Selector: &metav1.LabelSelector{
MatchLabels: map[string]string{
"app": primaryName,
@@ -322,26 +346,41 @@ func (c *CanaryDeployer) createPrimaryHpa(cd *flaggerv1.Canary) error {
return nil
}
func (c *CanaryDeployer) getDeploymentStatus(deployment *appsv1.Deployment) (string, bool) {
// isDeploymentReady determines if a deployment is ready by checking the status conditions
// if a deployment has exceeded the progress deadline it returns a non retriable error
func (c *CanaryDeployer) isDeploymentReady(deployment *appsv1.Deployment, deadline int) (bool, error) {
retriable := true
if deployment.Generation <= deployment.Status.ObservedGeneration {
cond := c.getDeploymentCondition(deployment.Status, appsv1.DeploymentProgressing)
if cond != nil && cond.Reason == "ProgressDeadlineExceeded" {
return fmt.Sprintf("deployment %q exceeded its progress deadline", deployment.GetName()), false
} else if deployment.Spec.Replicas != nil && deployment.Status.UpdatedReplicas < *deployment.Spec.Replicas {
return fmt.Sprintf("waiting for rollout to finish: %d out of %d new replicas have been updated",
deployment.Status.UpdatedReplicas, *deployment.Spec.Replicas), false
} else if deployment.Status.Replicas > deployment.Status.UpdatedReplicas {
return fmt.Sprintf("waiting for rollout to finish: %d old replicas are pending termination",
deployment.Status.Replicas-deployment.Status.UpdatedReplicas), false
} else if deployment.Status.AvailableReplicas < deployment.Status.UpdatedReplicas {
return fmt.Sprintf("waiting for rollout to finish: %d of %d updated replicas are available",
deployment.Status.AvailableReplicas, deployment.Status.UpdatedReplicas), false
progress := c.getDeploymentCondition(deployment.Status, appsv1.DeploymentProgressing)
if progress != nil {
// Determine if the deployment is stuck by checking if there is a minimum replicas unavailable condition
// and if the last update time exceeds the deadline
available := c.getDeploymentCondition(deployment.Status, appsv1.DeploymentAvailable)
if available != nil && available.Status == "False" && available.Reason == "MinimumReplicasUnavailable" {
from := available.LastUpdateTime
delta := time.Duration(deadline) * time.Second
retriable = !from.Add(delta).Before(time.Now())
}
}
if progress != nil && progress.Reason == "ProgressDeadlineExceeded" {
return false, fmt.Errorf("deployment %q exceeded its progress deadline", deployment.GetName())
} else if deployment.Spec.Replicas != nil && deployment.Status.UpdatedReplicas < *deployment.Spec.Replicas {
return retriable, fmt.Errorf("waiting for rollout to finish: %d out of %d new replicas have been updated",
deployment.Status.UpdatedReplicas, *deployment.Spec.Replicas)
} else if deployment.Status.Replicas > deployment.Status.UpdatedReplicas {
return retriable, fmt.Errorf("waiting for rollout to finish: %d old replicas are pending termination",
deployment.Status.Replicas-deployment.Status.UpdatedReplicas)
} else if deployment.Status.AvailableReplicas < deployment.Status.UpdatedReplicas {
return retriable, fmt.Errorf("waiting for rollout to finish: %d of %d updated replicas are available",
deployment.Status.AvailableReplicas, deployment.Status.UpdatedReplicas)
}
} else {
return "waiting for rollout to finish: observed deployment generation less then desired generation", false
return true, fmt.Errorf("waiting for rollout to finish: observed deployment generation less then desired generation")
}
return "ready", true
return true, nil
}
func (c *CanaryDeployer) getDeploymentCondition(

View File

@@ -3,7 +3,7 @@ package controller
import (
"testing"
"github.com/stefanprodan/flagger/pkg/apis/flagger/v1alpha1"
"github.com/stefanprodan/flagger/pkg/apis/flagger/v1alpha2"
fakeFlagger "github.com/stefanprodan/flagger/pkg/client/clientset/versioned/fake"
"github.com/stefanprodan/flagger/pkg/logging"
appsv1 "k8s.io/api/apps/v1"
@@ -14,14 +14,14 @@ import (
"k8s.io/client-go/kubernetes/fake"
)
func newTestCanary() *v1alpha1.Canary {
cd := &v1alpha1.Canary{
TypeMeta: metav1.TypeMeta{APIVersion: v1alpha1.SchemeGroupVersion.String()},
func newTestCanary() *v1alpha2.Canary {
cd := &v1alpha2.Canary{
TypeMeta: metav1.TypeMeta{APIVersion: v1alpha2.SchemeGroupVersion.String()},
ObjectMeta: metav1.ObjectMeta{
Namespace: "default",
Name: "podinfo",
},
Spec: v1alpha1.CanarySpec{
Spec: v1alpha2.CanarySpec{
TargetRef: hpav1.CrossVersionObjectReference{
Name: "podinfo",
APIVersion: "apps/v1",
@@ -31,13 +31,13 @@ func newTestCanary() *v1alpha1.Canary {
Name: "podinfo",
APIVersion: "autoscaling/v2beta1",
Kind: "HorizontalPodAutoscaler",
}, Service: v1alpha1.CanaryService{
}, Service: v1alpha2.CanaryService{
Port: 9898,
}, CanaryAnalysis: v1alpha1.CanaryAnalysis{
}, CanaryAnalysis: v1alpha2.CanaryAnalysis{
Threshold: 10,
StepWeight: 10,
MaxWeight: 50,
Metrics: []v1alpha1.CanaryMetric{
Metrics: []v1alpha2.CanaryMetric{
{
Name: "istio_requests_total",
Threshold: 99,
@@ -319,7 +319,12 @@ func TestCanaryDeployer_IsReady(t *testing.T) {
t.Fatal(err.Error())
}
err = deployer.IsReady(canary)
_, err = deployer.IsPrimaryReady(canary)
if err != nil {
t.Fatal(err.Error())
}
_, err = deployer.IsCanaryReady(canary)
if err != nil {
t.Fatal(err.Error())
}
@@ -351,7 +356,7 @@ func TestCanaryDeployer_SetFailedChecks(t *testing.T) {
t.Fatal(err.Error())
}
res, err := flaggerClient.FlaggerV1alpha1().Canaries("default").Get("podinfo", metav1.GetOptions{})
res, err := flaggerClient.FlaggerV1alpha2().Canaries("default").Get("podinfo", metav1.GetOptions{})
if err != nil {
t.Fatal(err.Error())
}
@@ -382,18 +387,18 @@ func TestCanaryDeployer_SetState(t *testing.T) {
t.Fatal(err.Error())
}
err = deployer.SetState(canary, "running")
err = deployer.SetState(canary, v1alpha2.CanaryRunning)
if err != nil {
t.Fatal(err.Error())
}
res, err := flaggerClient.FlaggerV1alpha1().Canaries("default").Get("podinfo", metav1.GetOptions{})
res, err := flaggerClient.FlaggerV1alpha2().Canaries("default").Get("podinfo", metav1.GetOptions{})
if err != nil {
t.Fatal(err.Error())
}
if res.Status.State != "running" {
t.Errorf("Got %v wanted %v", res.Status.State, "running")
if res.Status.State != v1alpha2.CanaryRunning {
t.Errorf("Got %v wanted %v", res.Status.State, v1alpha2.CanaryRunning)
}
}
@@ -418,8 +423,8 @@ func TestCanaryDeployer_SyncStatus(t *testing.T) {
t.Fatal(err.Error())
}
status := v1alpha1.CanaryStatus{
State: "running",
status := v1alpha2.CanaryStatus{
State: v1alpha2.CanaryRunning,
FailedChecks: 2,
}
err = deployer.SyncStatus(canary, status)
@@ -427,7 +432,7 @@ func TestCanaryDeployer_SyncStatus(t *testing.T) {
t.Fatal(err.Error())
}
res, err := flaggerClient.FlaggerV1alpha1().Canaries("default").Get("podinfo", metav1.GetOptions{})
res, err := flaggerClient.FlaggerV1alpha2().Canaries("default").Get("podinfo", metav1.GetOptions{})
if err != nil {
t.Fatal(err.Error())
}

View File

@@ -0,0 +1,84 @@
package controller
import (
"net/http"
"net/http/httptest"
"testing"
"time"
)
func TestCanaryObserver_GetDeploymentCounter(t *testing.T) {
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
json := `{"status":"success","data":{"resultType":"vector","result":[{"metric":{},"value":[1545905245.458,"100"]}]}}`
w.Write([]byte(json))
}))
defer ts.Close()
observer := CanaryObserver{
metricsServer: ts.URL,
}
val, err := observer.GetDeploymentCounter("podinfo", "default", "istio_requests_total", "1m")
if err != nil {
t.Fatal(err.Error())
}
if val != 100 {
t.Errorf("Got %v wanted %v", val, 100)
}
}
func TestCanaryObserver_GetDeploymentHistogram(t *testing.T) {
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
json := `{"status":"success","data":{"resultType":"vector","result":[{"metric":{},"value":[1545905245.596,"0.2"]}]}}`
w.Write([]byte(json))
}))
defer ts.Close()
observer := CanaryObserver{
metricsServer: ts.URL,
}
val, err := observer.GetDeploymentHistogram("podinfo", "default", "istio_request_duration_seconds_bucket", "1m")
if err != nil {
t.Fatal(err.Error())
}
if val != 200*time.Millisecond {
t.Errorf("Got %v wanted %v", val, 200*time.Millisecond)
}
}
func TestCheckMetricsServer(t *testing.T) {
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
json := `{"status":"success","data":{"config.file":"/etc/prometheus/prometheus.yml"}}`
w.Write([]byte(json))
}))
defer ts.Close()
ok, err := CheckMetricsServer(ts.URL)
if err != nil {
t.Fatal(err.Error())
}
if !ok {
t.Errorf("Got %v wanted %v", ok, true)
}
}
func TestCheckMetricsServer_Offline(t *testing.T) {
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusBadGateway)
}))
defer ts.Close()
ok, err := CheckMetricsServer(ts.URL)
if err == nil {
t.Errorf("Got no error wanted %v", http.StatusBadGateway)
}
if ok {
t.Errorf("Got %v wanted %v", ok, false)
}
}

View File

@@ -5,7 +5,7 @@ import (
"time"
"github.com/prometheus/client_golang/prometheus"
flaggerv1 "github.com/stefanprodan/flagger/pkg/apis/flagger/v1alpha1"
flaggerv1 "github.com/stefanprodan/flagger/pkg/apis/flagger/v1alpha2"
)
// CanaryRecorder records the canary analysis as Prometheus metrics
@@ -73,9 +73,9 @@ func (cr *CanaryRecorder) SetTotal(namespace string, total int) {
func (cr *CanaryRecorder) SetStatus(cd *flaggerv1.Canary) {
status := 1
switch cd.Status.State {
case "running":
case flaggerv1.CanaryRunning:
status = 0
case "failed":
case flaggerv1.CanaryFailed:
status = 2
default:
status = 1

View File

@@ -5,7 +5,7 @@ import (
istiov1alpha3 "github.com/knative/pkg/apis/istio/v1alpha3"
istioclientset "github.com/knative/pkg/client/clientset/versioned"
flaggerv1 "github.com/stefanprodan/flagger/pkg/apis/flagger/v1alpha1"
flaggerv1 "github.com/stefanprodan/flagger/pkg/apis/flagger/v1alpha2"
clientset "github.com/stefanprodan/flagger/pkg/client/clientset/versioned"
"go.uber.org/zap"
corev1 "k8s.io/api/core/v1"

View File

@@ -4,7 +4,7 @@ import (
"fmt"
"time"
flaggerv1 "github.com/stefanprodan/flagger/pkg/apis/flagger/v1alpha1"
flaggerv1 "github.com/stefanprodan/flagger/pkg/apis/flagger/v1alpha2"
"k8s.io/apimachinery/pkg/apis/meta/v1"
)
@@ -32,7 +32,7 @@ func (c *Controller) scheduleCanaries() {
func (c *Controller) advanceCanary(name string, namespace string) {
begin := time.Now()
// check if the canary exists
cd, err := c.flaggerClient.FlaggerV1alpha1().Canaries(namespace).Get(name, v1.GetOptions{})
cd, err := c.flaggerClient.FlaggerV1alpha2().Canaries(namespace).Get(name, v1.GetOptions{})
if err != nil {
c.logger.Errorf("Canary %s.%s not found", name, namespace)
return
@@ -56,8 +56,8 @@ func (c *Controller) advanceCanary(name string, namespace string) {
maxWeight = cd.Spec.CanaryAnalysis.MaxWeight
}
// check primary and canary deployments status
if err := c.deployer.IsReady(cd); err != nil {
// check primary deployment status
if _, err := c.deployer.IsPrimaryReady(cd); err != nil {
c.recordEventWarningf(cd, "%v", err)
return
}
@@ -81,10 +81,30 @@ func (c *Controller) advanceCanary(name string, namespace string) {
c.recorder.SetDuration(cd, time.Since(begin))
}()
// check canary deployment status
retriable, err := c.deployer.IsCanaryReady(cd)
if err != nil && retriable {
c.recordEventWarningf(cd, "%v", err)
return
}
// check if the number of failed checks reached the threshold
if cd.Status.State == "running" && cd.Status.FailedChecks >= cd.Spec.CanaryAnalysis.Threshold {
c.recordEventWarningf(cd, "Rolling back %s.%s failed checks threshold reached %v",
cd.Name, cd.Namespace, cd.Status.FailedChecks)
if cd.Status.State == flaggerv1.CanaryRunning &&
(!retriable || cd.Status.FailedChecks >= cd.Spec.CanaryAnalysis.Threshold) {
if cd.Status.FailedChecks >= cd.Spec.CanaryAnalysis.Threshold {
c.recordEventWarningf(cd, "Rolling back %s.%s failed checks threshold reached %v",
cd.Name, cd.Namespace, cd.Status.FailedChecks)
c.sendNotification(cd, fmt.Sprintf("Failed checks threshold reached %v", cd.Status.FailedChecks),
false, true)
}
if !retriable {
c.recordEventWarningf(cd, "Rolling back %s.%s progress deadline exceeded %v",
cd.Name, cd.Namespace, err)
c.sendNotification(cd, fmt.Sprintf("Progress deadline exceeded %v", err),
false, true)
}
// route all traffic back to primary
primaryRoute.Weight = 100
@@ -96,7 +116,7 @@ func (c *Controller) advanceCanary(name string, namespace string) {
c.recorder.SetWeight(cd, primaryRoute.Weight, canaryRoute.Weight)
c.recordEventWarningf(cd, "Canary failed! Scaling down %s.%s",
cd.Spec.TargetRef.Name, cd.Namespace)
cd.Name, cd.Namespace)
// shutdown canary
if err := c.deployer.Scale(cd, 0); err != nil {
@@ -105,13 +125,12 @@ func (c *Controller) advanceCanary(name string, namespace string) {
}
// mark canary as failed
if err := c.deployer.SetState(cd, "failed"); err != nil {
if err := c.deployer.SyncStatus(cd, flaggerv1.CanaryStatus{State: flaggerv1.CanaryFailed}); err != nil {
c.logger.Errorf("%v", err)
return
}
c.recorder.SetStatus(cd)
c.sendNotification(cd.Spec.TargetRef.Name, cd.Namespace,
"Canary analysis failed, rollback finished.", true)
return
}
@@ -120,7 +139,7 @@ func (c *Controller) advanceCanary(name string, namespace string) {
if canaryRoute.Weight == 0 {
c.recordEventInfof(cd, "Starting canary deployment for %s.%s", cd.Name, cd.Namespace)
} else {
if ok := c.checkCanaryMetrics(cd); !ok {
if ok := c.analyseCanary(cd); !ok {
if err := c.deployer.SetFailedChecks(cd, cd.Status.FailedChecks+1); err != nil {
c.recordEventWarningf(cd, "%v", err)
return
@@ -177,13 +196,13 @@ func (c *Controller) advanceCanary(name string, namespace string) {
}
// update status
if err := c.deployer.SetState(cd, "finished"); err != nil {
if err := c.deployer.SetState(cd, flaggerv1.CanaryFinished); err != nil {
c.recordEventWarningf(cd, "%v", err)
return
}
c.recorder.SetStatus(cd)
c.sendNotification(cd.Spec.TargetRef.Name, cd.Namespace,
"Canary analysis completed successfully, promotion finished.", false)
c.sendNotification(cd, "Canary analysis completed successfully, promotion finished.",
false, false)
}
}
@@ -194,26 +213,26 @@ func (c *Controller) checkCanaryStatus(cd *flaggerv1.Canary, deployer CanaryDepl
}
if cd.Status.State == "" {
if err := deployer.SyncStatus(cd, flaggerv1.CanaryStatus{State: "initialized"}); err != nil {
if err := deployer.SyncStatus(cd, flaggerv1.CanaryStatus{State: flaggerv1.CanaryInitialized}); err != nil {
c.logger.Errorf("%v", err)
return false
}
c.recorder.SetStatus(cd)
c.recordEventInfof(cd, "Initialization done! %s.%s", cd.Name, cd.Namespace)
c.sendNotification(cd.Spec.TargetRef.Name, cd.Namespace,
"New deployment detected, initialization completed.", false)
c.sendNotification(cd, "New deployment detected, initialization completed.",
true, false)
return false
}
if diff, err := deployer.IsNewSpec(cd); diff {
c.recordEventInfof(cd, "New revision detected! Scaling up %s.%s", cd.Spec.TargetRef.Name, cd.Namespace)
c.sendNotification(cd.Spec.TargetRef.Name, cd.Namespace,
"New revision detected, starting canary analysis.", false)
c.sendNotification(cd, "New revision detected, starting canary analysis.",
true, false)
if err = deployer.Scale(cd, 1); err != nil {
c.recordEventErrorf(cd, "%v", err)
return false
}
if err := deployer.SyncStatus(cd, flaggerv1.CanaryStatus{State: "running"}); err != nil {
if err := deployer.SyncStatus(cd, flaggerv1.CanaryStatus{State: flaggerv1.CanaryRunning}); err != nil {
c.logger.Errorf("%v", err)
return false
}
@@ -223,7 +242,8 @@ func (c *Controller) checkCanaryStatus(cd *flaggerv1.Canary, deployer CanaryDepl
return false
}
func (c *Controller) checkCanaryMetrics(r *flaggerv1.Canary) bool {
func (c *Controller) analyseCanary(r *flaggerv1.Canary) bool {
// run metrics checks
for _, metric := range r.Spec.CanaryAnalysis.Metrics {
if metric.Name == "istio_requests_total" {
val, err := c.observer.GetDeploymentCounter(r.Spec.TargetRef.Name, r.Namespace, metric.Name, metric.Interval)
@@ -253,5 +273,15 @@ func (c *Controller) checkCanaryMetrics(r *flaggerv1.Canary) bool {
}
}
// run external checks
for _, webhook := range r.Spec.CanaryAnalysis.Webhooks {
err := CallWebhook(r.Name, r.Namespace, webhook)
if err != nil {
c.recordEventWarningf(r, "Halt %s.%s advancement external check %s failed %v",
r.Name, r.Namespace, webhook.Name, err)
return false
}
}
return true
}

View File

@@ -6,6 +6,7 @@ import (
"time"
fakeIstio "github.com/knative/pkg/client/clientset/versioned/fake"
"github.com/stefanprodan/flagger/pkg/apis/flagger/v1alpha2"
fakeFlagger "github.com/stefanprodan/flagger/pkg/client/clientset/versioned/fake"
informers "github.com/stefanprodan/flagger/pkg/client/informers/externalversions"
"github.com/stefanprodan/flagger/pkg/logging"
@@ -46,7 +47,7 @@ func TestScheduler_Init(t *testing.T) {
}
flaggerInformerFactory := informers.NewSharedInformerFactory(flaggerClient, noResyncPeriodFunc())
flaggerInformer := flaggerInformerFactory.Flagger().V1alpha1().Canaries()
flaggerInformer := flaggerInformerFactory.Flagger().V1alpha2().Canaries()
ctrl := &Controller{
kubeClient: kubeClient,
@@ -100,7 +101,7 @@ func TestScheduler_NewRevision(t *testing.T) {
}
flaggerInformerFactory := informers.NewSharedInformerFactory(flaggerClient, noResyncPeriodFunc())
flaggerInformer := flaggerInformerFactory.Flagger().V1alpha1().Canaries()
flaggerInformer := flaggerInformerFactory.Flagger().V1alpha2().Canaries()
ctrl := &Controller{
kubeClient: kubeClient,
@@ -142,3 +143,71 @@ func TestScheduler_NewRevision(t *testing.T) {
t.Errorf("Got canary replicas %v wanted %v", *c.Spec.Replicas, 1)
}
}
func TestScheduler_Rollback(t *testing.T) {
canary := newTestCanary()
dep := newTestDeployment()
hpa := newTestHPA()
flaggerClient := fakeFlagger.NewSimpleClientset(canary)
kubeClient := fake.NewSimpleClientset(dep, hpa)
istioClient := fakeIstio.NewSimpleClientset()
logger, _ := logging.NewLogger("debug")
deployer := CanaryDeployer{
flaggerClient: flaggerClient,
kubeClient: kubeClient,
logger: logger,
}
router := CanaryRouter{
flaggerClient: flaggerClient,
kubeClient: kubeClient,
istioClient: istioClient,
logger: logger,
}
observer := CanaryObserver{
metricsServer: "fake",
}
flaggerInformerFactory := informers.NewSharedInformerFactory(flaggerClient, noResyncPeriodFunc())
flaggerInformer := flaggerInformerFactory.Flagger().V1alpha2().Canaries()
ctrl := &Controller{
kubeClient: kubeClient,
istioClient: istioClient,
flaggerClient: flaggerClient,
flaggerLister: flaggerInformer.Lister(),
flaggerSynced: flaggerInformer.Informer().HasSynced,
workqueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), controllerAgentName),
eventRecorder: &record.FakeRecorder{},
logger: logger,
canaries: new(sync.Map),
flaggerWindow: time.Second,
deployer: deployer,
router: router,
observer: observer,
recorder: NewCanaryRecorder(false),
}
ctrl.flaggerSynced = alwaysReady
// init
ctrl.advanceCanary("podinfo", "default")
// update failed checks to max
err := deployer.SyncStatus(canary, v1alpha2.CanaryStatus{State: v1alpha2.CanaryRunning, FailedChecks: 11})
if err != nil {
t.Fatal(err.Error())
}
// detect changes
ctrl.advanceCanary("podinfo", "default")
c, err := flaggerClient.FlaggerV1alpha2().Canaries("default").Get("podinfo", metav1.GetOptions{})
if err != nil {
t.Fatal(err.Error())
}
if c.Status.State != v1alpha2.CanaryFailed {
t.Errorf("Got canary state %v wanted %v", c.Status.State, v1alpha2.CanaryFailed)
}
}

70
pkg/controller/webhook.go Normal file
View File

@@ -0,0 +1,70 @@
package controller
import (
"bytes"
"context"
"encoding/json"
"errors"
"fmt"
flaggerv1 "github.com/stefanprodan/flagger/pkg/apis/flagger/v1alpha2"
"io/ioutil"
"net/http"
"net/url"
"time"
)
// CallWebhook does a HTTP POST to an external service and
// returns an error if the response status code is non-2xx
func CallWebhook(name string, namepace string, w flaggerv1.CanaryWebhook) error {
payload := flaggerv1.CanaryWebhookPayload{
Name: name,
Namespace: namepace,
Metadata: w.Metadata,
}
payloadBin, err := json.Marshal(payload)
if err != nil {
return err
}
hook, err := url.Parse(w.URL)
if err != nil {
return err
}
req, err := http.NewRequest("POST", hook.String(), bytes.NewBuffer(payloadBin))
if err != nil {
return err
}
req.Header.Set("Content-Type", "application/json")
if len(w.Timeout) < 2 {
w.Timeout = "10s"
}
timeout, err := time.ParseDuration(w.Timeout)
if err != nil {
return err
}
ctx, cancel := context.WithTimeout(req.Context(), timeout)
defer cancel()
r, err := http.DefaultClient.Do(req.WithContext(ctx))
if err != nil {
return err
}
defer r.Body.Close()
b, err := ioutil.ReadAll(r.Body)
if err != nil {
return fmt.Errorf("error reading body: %s", err.Error())
}
if r.StatusCode > 202 {
return errors.New(string(b))
}
return nil
}

View File

@@ -0,0 +1,42 @@
package controller
import (
flaggerv1 "github.com/stefanprodan/flagger/pkg/apis/flagger/v1alpha2"
"net/http"
"net/http/httptest"
"testing"
)
func TestCallWebhook(t *testing.T) {
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusAccepted)
}))
defer ts.Close()
hook := flaggerv1.CanaryWebhook{
Name: "validation",
URL: ts.URL,
Timeout: "10s",
Metadata: &map[string]string{"key1": "val1"},
}
err := CallWebhook("podinfo", "default", hook)
if err != nil {
t.Fatal(err.Error())
}
}
func TestCallWebhook_StatusCode(t *testing.T) {
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusInternalServerError)
}))
defer ts.Close()
hook := flaggerv1.CanaryWebhook{
Name: "validation",
URL: ts.URL,
}
err := CallWebhook("podinfo", "default", hook)
if err == nil {
t.Errorf("Got no error wanted %v", http.StatusInternalServerError)
}
}

View File

@@ -30,10 +30,17 @@ type SlackPayload struct {
// SlackAttachment holds the markdown message body
type SlackAttachment struct {
Color string `json:"color"`
AuthorName string `json:"author_name"`
Text string `json:"text"`
MrkdwnIn []string `json:"mrkdwn_in"`
Color string `json:"color"`
AuthorName string `json:"author_name"`
Text string `json:"text"`
MrkdwnIn []string `json:"mrkdwn_in"`
Fields []SlackField `json:"fields"`
}
type SlackField struct {
Title string `json:"title"`
Value string `json:"value"`
Short bool `json:"short"`
}
// NewSlack validates the Slack URL and returns a Slack object
@@ -60,7 +67,7 @@ func NewSlack(hookURL string, username string, channel string) (*Slack, error) {
}
// Post Slack message
func (s *Slack) Post(workload string, namespace string, message string, warn bool) error {
func (s *Slack) Post(workload string, namespace string, message string, fields []SlackField, warn bool) error {
payload := SlackPayload{
Channel: s.Channel,
Username: s.Username,
@@ -76,6 +83,7 @@ func (s *Slack) Post(workload string, namespace string, message string, warn boo
AuthorName: fmt.Sprintf("%s.%s", workload, namespace),
Text: message,
MrkdwnIn: []string{"text"},
Fields: fields,
}
payload.Attachments = []SlackAttachment{a}

View File

@@ -1,4 +1,4 @@
package version
var VERSION = "0.1.0"
var VERSION = "0.2.0"
var REVISION = "unknown"