mirror of
https://github.com/fluxcd/flagger.git
synced 2026-02-16 02:49:51 +00:00
Compare commits
14 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
e7fc72e6b5 | ||
|
|
4203232b05 | ||
|
|
a06aa05201 | ||
|
|
8e582e9b73 | ||
|
|
0e9fe8a446 | ||
|
|
27b4bcc648 | ||
|
|
614b7c74c4 | ||
|
|
5901129ec6 | ||
|
|
ded14345b4 | ||
|
|
dd272c6870 | ||
|
|
b31c7c6230 | ||
|
|
b0297213c3 | ||
|
|
d0fba2d111 | ||
|
|
9924cc2152 |
12
CHANGELOG.md
12
CHANGELOG.md
@@ -2,6 +2,18 @@
|
||||
|
||||
All notable changes to this project are documented in this file.
|
||||
|
||||
## 0.20.2 (2019-11-07)
|
||||
|
||||
Adds support for exposing canaries outside the cluster using App Mesh Gateway annotations
|
||||
|
||||
#### Improvements
|
||||
|
||||
- Expose canaries on public domains with App Mesh Gateway [#358](https://github.com/weaveworks/flagger/pull/358)
|
||||
|
||||
#### Fixes
|
||||
|
||||
- Use the specified replicas when scaling up the canary [#363](https://github.com/weaveworks/flagger/pull/363)
|
||||
|
||||
## 0.20.1 (2019-11-03)
|
||||
|
||||
Fixes promql execution and updates the load testing tools
|
||||
|
||||
@@ -1,67 +0,0 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: abtest
|
||||
namespace: test
|
||||
labels:
|
||||
app: abtest
|
||||
spec:
|
||||
minReadySeconds: 5
|
||||
revisionHistoryLimit: 5
|
||||
progressDeadlineSeconds: 60
|
||||
strategy:
|
||||
rollingUpdate:
|
||||
maxUnavailable: 0
|
||||
type: RollingUpdate
|
||||
selector:
|
||||
matchLabels:
|
||||
app: abtest
|
||||
template:
|
||||
metadata:
|
||||
annotations:
|
||||
prometheus.io/scrape: "true"
|
||||
labels:
|
||||
app: abtest
|
||||
spec:
|
||||
containers:
|
||||
- name: podinfod
|
||||
image: quay.io/stefanprodan/podinfo:1.7.0
|
||||
imagePullPolicy: IfNotPresent
|
||||
ports:
|
||||
- containerPort: 9898
|
||||
name: http
|
||||
protocol: TCP
|
||||
command:
|
||||
- ./podinfo
|
||||
- --port=9898
|
||||
- --level=info
|
||||
- --random-delay=false
|
||||
- --random-error=false
|
||||
env:
|
||||
- name: PODINFO_UI_COLOR
|
||||
value: blue
|
||||
livenessProbe:
|
||||
exec:
|
||||
command:
|
||||
- podcli
|
||||
- check
|
||||
- http
|
||||
- localhost:9898/healthz
|
||||
initialDelaySeconds: 5
|
||||
timeoutSeconds: 5
|
||||
readinessProbe:
|
||||
exec:
|
||||
command:
|
||||
- podcli
|
||||
- check
|
||||
- http
|
||||
- localhost:9898/readyz
|
||||
initialDelaySeconds: 5
|
||||
timeoutSeconds: 5
|
||||
resources:
|
||||
limits:
|
||||
cpu: 2000m
|
||||
memory: 512Mi
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 64Mi
|
||||
@@ -1,19 +0,0 @@
|
||||
apiVersion: autoscaling/v2beta1
|
||||
kind: HorizontalPodAutoscaler
|
||||
metadata:
|
||||
name: abtest
|
||||
namespace: test
|
||||
spec:
|
||||
scaleTargetRef:
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
name: abtest
|
||||
minReplicas: 2
|
||||
maxReplicas: 4
|
||||
metrics:
|
||||
- type: Resource
|
||||
resource:
|
||||
name: cpu
|
||||
# scale up if usage is above
|
||||
# 99% of the requested CPU (100m)
|
||||
targetAverageUtilization: 99
|
||||
@@ -1,14 +1,14 @@
|
||||
apiVersion: flagger.app/v1alpha3
|
||||
kind: Canary
|
||||
metadata:
|
||||
name: abtest
|
||||
name: podinfo
|
||||
namespace: test
|
||||
spec:
|
||||
# deployment reference
|
||||
targetRef:
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
name: abtest
|
||||
name: podinfo
|
||||
# the maximum time in seconds for the canary deployment
|
||||
# to make progress before it is rollback (default 600s)
|
||||
progressDeadlineSeconds: 60
|
||||
@@ -16,7 +16,7 @@ spec:
|
||||
autoscalerRef:
|
||||
apiVersion: autoscaling/v2beta1
|
||||
kind: HorizontalPodAutoscaler
|
||||
name: abtest
|
||||
name: podinfo
|
||||
service:
|
||||
# container port
|
||||
port: 9898
|
||||
@@ -26,7 +26,12 @@ spec:
|
||||
- mesh
|
||||
# Istio virtual service host names (optional)
|
||||
hosts:
|
||||
- abtest.istio.weavedx.com
|
||||
- app.example.com
|
||||
# Istio traffic policy (optional)
|
||||
trafficPolicy:
|
||||
tls:
|
||||
# use ISTIO_MUTUAL when mTLS is enabled
|
||||
mode: DISABLE
|
||||
canaryAnalysis:
|
||||
# schedule interval (default 60s)
|
||||
interval: 10s
|
||||
@@ -37,7 +37,7 @@ spec:
|
||||
- mesh
|
||||
# Istio virtual service host names (optional)
|
||||
hosts:
|
||||
- app.istio.weavedx.com
|
||||
- app.example.com
|
||||
# Istio traffic policy (optional)
|
||||
trafficPolicy:
|
||||
tls:
|
||||
|
||||
@@ -22,7 +22,7 @@ spec:
|
||||
serviceAccountName: flagger
|
||||
containers:
|
||||
- name: flagger
|
||||
image: weaveworks/flagger:0.20.1
|
||||
image: weaveworks/flagger:0.20.2
|
||||
imagePullPolicy: IfNotPresent
|
||||
ports:
|
||||
- name: http
|
||||
|
||||
@@ -23,8 +23,10 @@ spec:
|
||||
# to make progress before it is rollback (default 600s)
|
||||
progressDeadlineSeconds: 60
|
||||
service:
|
||||
# container port
|
||||
port: 9898
|
||||
# ClusterIP port number
|
||||
port: 80
|
||||
# container port number or name
|
||||
targetPort: 9898
|
||||
canaryAnalysis:
|
||||
# schedule interval (default 60s)
|
||||
interval: 10s
|
||||
|
||||
@@ -1,69 +0,0 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: podinfo
|
||||
namespace: test
|
||||
labels:
|
||||
app: podinfo
|
||||
spec:
|
||||
replicas: 1
|
||||
strategy:
|
||||
rollingUpdate:
|
||||
maxUnavailable: 0
|
||||
type: RollingUpdate
|
||||
selector:
|
||||
matchLabels:
|
||||
app: podinfo
|
||||
template:
|
||||
metadata:
|
||||
annotations:
|
||||
prometheus.io/scrape: "true"
|
||||
labels:
|
||||
app: podinfo
|
||||
spec:
|
||||
containers:
|
||||
- name: podinfod
|
||||
image: quay.io/stefanprodan/podinfo:1.7.0
|
||||
imagePullPolicy: IfNotPresent
|
||||
ports:
|
||||
- containerPort: 9898
|
||||
name: http
|
||||
protocol: TCP
|
||||
command:
|
||||
- ./podinfo
|
||||
- --port=9898
|
||||
- --level=info
|
||||
- --random-delay=false
|
||||
- --random-error=false
|
||||
env:
|
||||
- name: PODINFO_UI_COLOR
|
||||
value: green
|
||||
livenessProbe:
|
||||
exec:
|
||||
command:
|
||||
- podcli
|
||||
- check
|
||||
- http
|
||||
- localhost:9898/healthz
|
||||
failureThreshold: 3
|
||||
periodSeconds: 10
|
||||
successThreshold: 1
|
||||
timeoutSeconds: 2
|
||||
readinessProbe:
|
||||
exec:
|
||||
command:
|
||||
- podcli
|
||||
- check
|
||||
- http
|
||||
- localhost:9898/readyz
|
||||
failureThreshold: 3
|
||||
periodSeconds: 3
|
||||
successThreshold: 1
|
||||
timeoutSeconds: 2
|
||||
resources:
|
||||
limits:
|
||||
cpu: 1000m
|
||||
memory: 256Mi
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 16Mi
|
||||
@@ -1,19 +0,0 @@
|
||||
apiVersion: autoscaling/v2beta1
|
||||
kind: HorizontalPodAutoscaler
|
||||
metadata:
|
||||
name: podinfo
|
||||
namespace: test
|
||||
spec:
|
||||
scaleTargetRef:
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
name: podinfo
|
||||
minReplicas: 2
|
||||
maxReplicas: 4
|
||||
metrics:
|
||||
- type: Resource
|
||||
resource:
|
||||
name: cpu
|
||||
# scale up if usage is above
|
||||
# 99% of the requested CPU (100m)
|
||||
targetAverageUtilization: 99
|
||||
@@ -1,131 +0,0 @@
|
||||
apiVersion: apiextensions.k8s.io/v1beta1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: trafficsplits.split.smi-spec.io
|
||||
spec:
|
||||
additionalPrinterColumns:
|
||||
- JSONPath: .spec.service
|
||||
description: The service
|
||||
name: Service
|
||||
type: string
|
||||
group: split.smi-spec.io
|
||||
names:
|
||||
kind: TrafficSplit
|
||||
listKind: TrafficSplitList
|
||||
plural: trafficsplits
|
||||
singular: trafficsplit
|
||||
scope: Namespaced
|
||||
subresources:
|
||||
status: {}
|
||||
version: v1alpha1
|
||||
versions:
|
||||
- name: v1alpha1
|
||||
served: true
|
||||
storage: true
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: smi-adapter-istio
|
||||
namespace: istio-system
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: smi-adapter-istio
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- pods
|
||||
- services
|
||||
- endpoints
|
||||
- persistentvolumeclaims
|
||||
- events
|
||||
- configmaps
|
||||
- secrets
|
||||
verbs:
|
||||
- '*'
|
||||
- apiGroups:
|
||||
- apps
|
||||
resources:
|
||||
- deployments
|
||||
- daemonsets
|
||||
- replicasets
|
||||
- statefulsets
|
||||
verbs:
|
||||
- '*'
|
||||
- apiGroups:
|
||||
- monitoring.coreos.com
|
||||
resources:
|
||||
- servicemonitors
|
||||
verbs:
|
||||
- get
|
||||
- create
|
||||
- apiGroups:
|
||||
- apps
|
||||
resourceNames:
|
||||
- smi-adapter-istio
|
||||
resources:
|
||||
- deployments/finalizers
|
||||
verbs:
|
||||
- update
|
||||
- apiGroups:
|
||||
- split.smi-spec.io
|
||||
resources:
|
||||
- '*'
|
||||
verbs:
|
||||
- '*'
|
||||
- apiGroups:
|
||||
- networking.istio.io
|
||||
resources:
|
||||
- '*'
|
||||
verbs:
|
||||
- '*'
|
||||
---
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: smi-adapter-istio
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: smi-adapter-istio
|
||||
namespace: istio-system
|
||||
roleRef:
|
||||
kind: ClusterRole
|
||||
name: smi-adapter-istio
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: smi-adapter-istio
|
||||
namespace: istio-system
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
name: smi-adapter-istio
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
name: smi-adapter-istio
|
||||
annotations:
|
||||
sidecar.istio.io/inject: "false"
|
||||
spec:
|
||||
serviceAccountName: smi-adapter-istio
|
||||
containers:
|
||||
- name: smi-adapter-istio
|
||||
image: docker.io/stefanprodan/smi-adapter-istio:0.0.2-beta.1
|
||||
command:
|
||||
- smi-adapter-istio
|
||||
imagePullPolicy: Always
|
||||
env:
|
||||
- name: WATCH_NAMESPACE
|
||||
value: ""
|
||||
- name: POD_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.name
|
||||
- name: OPERATOR_NAME
|
||||
value: "smi-adapter-istio"
|
||||
@@ -1,7 +1,7 @@
|
||||
apiVersion: v1
|
||||
name: flagger
|
||||
version: 0.20.1
|
||||
appVersion: 0.20.1
|
||||
version: 0.20.2
|
||||
appVersion: 0.20.2
|
||||
kubeVersion: ">=1.11.0-0"
|
||||
engine: gotpl
|
||||
description: Flagger is a Kubernetes operator that automates the promotion of canary deployments using Istio, Linkerd, App Mesh, Gloo or NGINX routing for traffic shifting and Prometheus metrics for canary analysis.
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
|
||||
image:
|
||||
repository: weaveworks/flagger
|
||||
tag: 0.20.1
|
||||
tag: 0.20.2
|
||||
pullPolicy: IfNotPresent
|
||||
pullSecret:
|
||||
|
||||
|
||||
@@ -535,7 +535,7 @@ The canary analysis can be extended with webhooks. Flagger will call each webhoo
|
||||
determine from the response status code (HTTP 2xx) if the canary is failing or not.
|
||||
|
||||
There are three types of hooks:
|
||||
* Confirm-rollout hooks are executed before scaling up the canary deployment and ca be used for manual approval.
|
||||
* Confirm-rollout hooks are executed before scaling up the canary deployment and can be used for manual approval.
|
||||
The rollout is paused until the hook returns a successful HTTP status code.
|
||||
* Pre-rollout hooks are executed before routing traffic to canary.
|
||||
The canary advancement is paused if a pre-rollout hook fails and if the number of failures reach the
|
||||
|
||||
@@ -4,42 +4,16 @@ This guide shows you how to use the SMI Istio adapter and Flagger to automate ca
|
||||
|
||||
### Prerequisites
|
||||
|
||||
Flagger requires a Kubernetes cluster **v1.11** or newer with the following admission controllers enabled:
|
||||
* Kubernetes > 1.13
|
||||
* Istio > 1.0
|
||||
|
||||
* MutatingAdmissionWebhook
|
||||
* ValidatingAdmissionWebhook
|
||||
### Install Istio SMI adapter
|
||||
|
||||
Flagger depends on [Istio](https://istio.io/docs/setup/kubernetes/quick-start/) **v1.0.3** or newer
|
||||
with traffic management, telemetry and Prometheus enabled.
|
||||
|
||||
A minimal Istio installation should contain the following services:
|
||||
|
||||
* istio-pilot
|
||||
* istio-ingressgateway
|
||||
* istio-sidecar-injector
|
||||
* istio-telemetry
|
||||
* prometheus
|
||||
|
||||
### Install Istio and the SMI adapter
|
||||
|
||||
Add Istio Helm repository:
|
||||
Install the SMI adapter:
|
||||
|
||||
```bash
|
||||
helm repo add istio.io https://storage.googleapis.com/istio-release/releases/1.1.5/charts
|
||||
```
|
||||
|
||||
Install Istio CRDs:
|
||||
|
||||
```bash
|
||||
helm upgrade -i istio-init istio.io/istio-init --wait --namespace istio-system
|
||||
|
||||
kubectl -n istio-system wait --for=condition=complete job/istio-init-crd-11
|
||||
```
|
||||
|
||||
Install Istio:
|
||||
|
||||
```bash
|
||||
helm upgrade -i istio istio.io/istio --wait --namespace istio-system
|
||||
kubectl apply -f https://raw.githubusercontent.com/deislabs/smi-adapter-istio/master/deploy/crds/crds.yaml
|
||||
kubectl apply -f https://raw.githubusercontent.com/deislabs/smi-adapter-istio/master/deploy/operator-and-rbac.yaml
|
||||
```
|
||||
|
||||
Create a generic Istio gateway to expose services outside the mesh on HTTP:
|
||||
@@ -74,14 +48,6 @@ Find the Gateway load balancer IP and add a DNS record for it:
|
||||
kubectl -n istio-system get svc/istio-ingressgateway -ojson | jq -r .status.loadBalancer.ingress[0].ip
|
||||
```
|
||||
|
||||
Install the SMI adapter:
|
||||
|
||||
```bash
|
||||
REPO=https://raw.githubusercontent.com/weaveworks/flagger/master
|
||||
|
||||
kubectl apply -f ${REPO}/artifacts/smi/istio-adapter.yaml
|
||||
```
|
||||
|
||||
### Install Flagger and Grafana
|
||||
|
||||
Add Flagger Helm repository:
|
||||
@@ -95,7 +61,6 @@ Deploy Flagger in the _**istio-system**_ namespace:
|
||||
```bash
|
||||
helm upgrade -i flagger flagger/flagger \
|
||||
--namespace=istio-system \
|
||||
--set image.tag=master-12d84b2 \
|
||||
--set meshProvider=smi:istio
|
||||
```
|
||||
|
||||
@@ -119,24 +84,23 @@ kubectl -n istio-system port-forward svc/flagger-grafana 3000:80
|
||||
|
||||
Create a test namespace with Istio sidecar injection enabled:
|
||||
|
||||
```bash
|
||||
export REPO=https://raw.githubusercontent.com/weaveworks/flagger/master
|
||||
Create a test namespace and enable Linkerd proxy injection:
|
||||
|
||||
kubectl apply -f ${REPO}/artifacts/namespaces/test.yaml
|
||||
```bash
|
||||
kubectl create ns test
|
||||
kubectl label namespace test istio-injection=enabled
|
||||
```
|
||||
|
||||
Create a deployment and a horizontal pod autoscaler:
|
||||
|
||||
```bash
|
||||
kubectl apply -f ${REPO}/artifacts/canaries/deployment.yaml
|
||||
kubectl apply -f ${REPO}/artifacts/canaries/hpa.yaml
|
||||
kubectl apply -k github.com/weaveworks/flagger//kustomize/podinfo
|
||||
```
|
||||
|
||||
Deploy the load testing service to generate traffic during the canary analysis:
|
||||
|
||||
```bash
|
||||
kubectl -n test apply -f ${REPO}/artifacts/loadtester/deployment.yaml
|
||||
kubectl -n test apply -f ${REPO}/artifacts/loadtester/service.yaml
|
||||
kubectl apply -k github.com/weaveworks/flagger//kustomize/tester
|
||||
```
|
||||
|
||||
Create a canary custom resource (replace example.com with your own domain):
|
||||
@@ -236,7 +200,7 @@ Trigger a canary deployment by updating the container image:
|
||||
|
||||
```bash
|
||||
kubectl -n test set image deployment/podinfo \
|
||||
podinfod=quay.io/stefanprodan/podinfo:1.7.1
|
||||
podinfod=quay.io/stefanprodan/podinfo:3.1.1
|
||||
```
|
||||
|
||||
Flagger detects that the deployment revision changed and starts a new rollout:
|
||||
@@ -287,7 +251,7 @@ Create a tester pod and exec into it:
|
||||
|
||||
```bash
|
||||
kubectl -n test run tester \
|
||||
--image=quay.io/stefanprodan/podinfo:1.2.1 \
|
||||
--image=quay.io/stefanprodan/podinfo:3.1.2 \
|
||||
-- ./podinfo --port=9898
|
||||
|
||||
kubectl -n test exec -it tester-xx-xx sh
|
||||
|
||||
@@ -13,23 +13,20 @@ This is particularly useful for frontend applications that require session affin
|
||||
Create a test namespace with Istio sidecar injection enabled:
|
||||
|
||||
```bash
|
||||
export REPO=https://raw.githubusercontent.com/weaveworks/flagger/master
|
||||
|
||||
kubectl apply -f ${REPO}/artifacts/namespaces/test.yaml
|
||||
kubectl create ns test
|
||||
kubectl label namespace test istio-injection=enabled
|
||||
```
|
||||
|
||||
Create a deployment and a horizontal pod autoscaler:
|
||||
|
||||
```bash
|
||||
kubectl apply -f ${REPO}/artifacts/ab-testing/deployment.yaml
|
||||
kubectl apply -f ${REPO}/artifacts/ab-testing/hpa.yaml
|
||||
kubectl apply -k github.com/weaveworks/flagger//kustomize/podinfo
|
||||
```
|
||||
|
||||
Deploy the load testing service to generate traffic during the canary analysis:
|
||||
|
||||
```bash
|
||||
kubectl -n test apply -f ${REPO}/artifacts/loadtester/deployment.yaml
|
||||
kubectl -n test apply -f ${REPO}/artifacts/loadtester/service.yaml
|
||||
kubectl apply -k github.com/weaveworks/flagger//kustomize/tester
|
||||
```
|
||||
|
||||
Create a canary custom resource (replace example.com with your own domain):
|
||||
@@ -38,14 +35,14 @@ Create a canary custom resource (replace example.com with your own domain):
|
||||
apiVersion: flagger.app/v1alpha3
|
||||
kind: Canary
|
||||
metadata:
|
||||
name: abtest
|
||||
name: podinfo
|
||||
namespace: test
|
||||
spec:
|
||||
# deployment reference
|
||||
targetRef:
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
name: abtest
|
||||
name: podinfo
|
||||
# the maximum time in seconds for the canary deployment
|
||||
# to make progress before it is rollback (default 600s)
|
||||
progressDeadlineSeconds: 60
|
||||
@@ -53,7 +50,7 @@ spec:
|
||||
autoscalerRef:
|
||||
apiVersion: autoscaling/v2beta1
|
||||
kind: HorizontalPodAutoscaler
|
||||
name: abtest
|
||||
name: podinfo
|
||||
service:
|
||||
# container port
|
||||
port: 9898
|
||||
@@ -63,6 +60,11 @@ spec:
|
||||
# Istio virtual service host names (optional)
|
||||
hosts:
|
||||
- app.example.com
|
||||
# Istio traffic policy (optional)
|
||||
trafficPolicy:
|
||||
tls:
|
||||
# use ISTIO_MUTUAL when mTLS is enabled
|
||||
mode: DISABLE
|
||||
canaryAnalysis:
|
||||
# schedule interval (default 60s)
|
||||
interval: 1m
|
||||
@@ -110,19 +112,19 @@ After a couple of seconds Flagger will create the canary objects:
|
||||
|
||||
```bash
|
||||
# applied
|
||||
deployment.apps/abtest
|
||||
horizontalpodautoscaler.autoscaling/abtest
|
||||
canary.flagger.app/abtest
|
||||
deployment.apps/podinfo
|
||||
horizontalpodautoscaler.autoscaling/podinfo
|
||||
canary.flagger.app/podinfo
|
||||
|
||||
# generated
|
||||
deployment.apps/abtest-primary
|
||||
horizontalpodautoscaler.autoscaling/abtest-primary
|
||||
service/abtest
|
||||
service/abtest-canary
|
||||
service/abtest-primary
|
||||
destinationrule.networking.istio.io/abtest-canary
|
||||
destinationrule.networking.istio.io/abtest-primary
|
||||
virtualservice.networking.istio.io/abtest
|
||||
deployment.apps/podinfo-primary
|
||||
horizontalpodautoscaler.autoscaling/podinfo-primary
|
||||
service/podinfo
|
||||
service/podinfo-canary
|
||||
service/podinfo-primary
|
||||
destinationrule.networking.istio.io/podinfo-canary
|
||||
destinationrule.networking.istio.io/podinfo-primary
|
||||
virtualservice.networking.istio.io/podinfo
|
||||
```
|
||||
|
||||
### Automated canary promotion
|
||||
@@ -145,22 +147,22 @@ Status:
|
||||
Events:
|
||||
Type Reason Age From Message
|
||||
---- ------ ---- ---- -------
|
||||
Normal Synced 3m flagger New revision detected abtest.test
|
||||
Normal Synced 3m flagger Scaling up abtest.test
|
||||
Warning Synced 3m flagger Waiting for abtest.test rollout to finish: 0 of 1 updated replicas are available
|
||||
Normal Synced 3m flagger Advance abtest.test canary iteration 1/10
|
||||
Normal Synced 3m flagger Advance abtest.test canary iteration 2/10
|
||||
Normal Synced 3m flagger Advance abtest.test canary iteration 3/10
|
||||
Normal Synced 2m flagger Advance abtest.test canary iteration 4/10
|
||||
Normal Synced 2m flagger Advance abtest.test canary iteration 5/10
|
||||
Normal Synced 1m flagger Advance abtest.test canary iteration 6/10
|
||||
Normal Synced 1m flagger Advance abtest.test canary iteration 7/10
|
||||
Normal Synced 55s flagger Advance abtest.test canary iteration 8/10
|
||||
Normal Synced 45s flagger Advance abtest.test canary iteration 9/10
|
||||
Normal Synced 35s flagger Advance abtest.test canary iteration 10/10
|
||||
Normal Synced 25s flagger Copying abtest.test template spec to abtest-primary.test
|
||||
Normal Synced 3m flagger New revision detected podinfo.test
|
||||
Normal Synced 3m flagger Scaling up podinfo.test
|
||||
Warning Synced 3m flagger Waiting for podinfo.test rollout to finish: 0 of 1 updated replicas are available
|
||||
Normal Synced 3m flagger Advance podinfo.test canary iteration 1/10
|
||||
Normal Synced 3m flagger Advance podinfo.test canary iteration 2/10
|
||||
Normal Synced 3m flagger Advance podinfo.test canary iteration 3/10
|
||||
Normal Synced 2m flagger Advance podinfo.test canary iteration 4/10
|
||||
Normal Synced 2m flagger Advance podinfo.test canary iteration 5/10
|
||||
Normal Synced 1m flagger Advance podinfo.test canary iteration 6/10
|
||||
Normal Synced 1m flagger Advance podinfo.test canary iteration 7/10
|
||||
Normal Synced 55s flagger Advance podinfo.test canary iteration 8/10
|
||||
Normal Synced 45s flagger Advance podinfo.test canary iteration 9/10
|
||||
Normal Synced 35s flagger Advance podinfo.test canary iteration 10/10
|
||||
Normal Synced 25s flagger Copying podinfo.test template spec to abtest-primary.test
|
||||
Warning Synced 15s flagger Waiting for abtest-primary.test rollout to finish: 1 of 2 updated replicas are available
|
||||
Normal Synced 5s flagger Promotion completed! Scaling down abtest.test
|
||||
Normal Synced 5s flagger Promotion completed! Scaling down podinfo.test
|
||||
```
|
||||
|
||||
**Note** that if you apply new changes to the deployment during the canary analysis, Flagger will restart the analysis.
|
||||
@@ -204,12 +206,12 @@ Status:
|
||||
Events:
|
||||
Type Reason Age From Message
|
||||
---- ------ ---- ---- -------
|
||||
Normal Synced 3m flagger Starting canary deployment for abtest.test
|
||||
Normal Synced 3m flagger Advance abtest.test canary iteration 1/10
|
||||
Normal Synced 3m flagger Advance abtest.test canary iteration 2/10
|
||||
Normal Synced 3m flagger Advance abtest.test canary iteration 3/10
|
||||
Normal Synced 3m flagger Halt abtest.test advancement success rate 69.17% < 99%
|
||||
Normal Synced 2m flagger Halt abtest.test advancement success rate 61.39% < 99%
|
||||
Warning Synced 2m flagger Rolling back abtest.test failed checks threshold reached 2
|
||||
Warning Synced 1m flagger Canary failed! Scaling down abtest.test
|
||||
Normal Synced 3m flagger Starting canary deployment for podinfo.test
|
||||
Normal Synced 3m flagger Advance podinfo.test canary iteration 1/10
|
||||
Normal Synced 3m flagger Advance podinfo.test canary iteration 2/10
|
||||
Normal Synced 3m flagger Advance podinfo.test canary iteration 3/10
|
||||
Normal Synced 3m flagger Halt podinfo.test advancement success rate 69.17% < 99%
|
||||
Normal Synced 2m flagger Halt podinfo.test advancement success rate 61.39% < 99%
|
||||
Warning Synced 2m flagger Rolling back podinfo.test failed checks threshold reached 2
|
||||
Warning Synced 1m flagger Canary failed! Scaling down podinfo.test
|
||||
```
|
||||
|
||||
@@ -102,12 +102,12 @@ spec:
|
||||
timeout: 30s
|
||||
metadata:
|
||||
type: bash
|
||||
cmd: "curl -sd 'test' http://podinfo-canary:9898/token | grep token"
|
||||
cmd: "curl -sd 'test' http://podinfo-canary.test:9898/token | grep token"
|
||||
- name: load-test
|
||||
type: rollout
|
||||
url: http://flagger-loadtester.test/
|
||||
metadata:
|
||||
cmd: "hey -z 2m -q 10 -c 2 http://podinfo:9898/"
|
||||
cmd: "hey -z 2m -q 10 -c 2 http://podinfo-canary.test:9898/"
|
||||
```
|
||||
|
||||
Save the above resource as podinfo-canary.yaml and then apply it:
|
||||
|
||||
@@ -56,8 +56,7 @@ kubectl create ns test
|
||||
Create a deployment and a horizontal pod autoscaler:
|
||||
|
||||
```bash
|
||||
kubectl apply -f ${REPO}/artifacts/nginx/deployment.yaml
|
||||
kubectl apply -f ${REPO}/artifacts/nginx/hpa.yaml
|
||||
kubectl apply -k github.com/weaveworks/flagger//kustomize/podinfo
|
||||
```
|
||||
|
||||
Deploy the load testing service to generate traffic during the canary analysis:
|
||||
|
||||
@@ -13,23 +13,20 @@ These objects expose the application inside the mesh and drive the canary analys
|
||||
Create a test namespace with Istio sidecar injection enabled:
|
||||
|
||||
```bash
|
||||
export REPO=https://raw.githubusercontent.com/weaveworks/flagger/master
|
||||
|
||||
kubectl apply -f ${REPO}/artifacts/namespaces/test.yaml
|
||||
kubectl create ns test
|
||||
kubectl label namespace test istio-injection=enabled
|
||||
```
|
||||
|
||||
Create a deployment and a horizontal pod autoscaler:
|
||||
|
||||
```bash
|
||||
kubectl apply -f ${REPO}/artifacts/canaries/deployment.yaml
|
||||
kubectl apply -f ${REPO}/artifacts/canaries/hpa.yaml
|
||||
kubectl apply -k github.com/weaveworks/flagger//kustomize/podinfo
|
||||
```
|
||||
|
||||
Deploy the load testing service to generate traffic during the canary analysis:
|
||||
|
||||
```bash
|
||||
kubectl -n test apply -f ${REPO}/artifacts/loadtester/deployment.yaml
|
||||
kubectl -n test apply -f ${REPO}/artifacts/loadtester/service.yaml
|
||||
kubectl apply -k github.com/weaveworks/flagger//kustomize/tester
|
||||
```
|
||||
|
||||
Create a canary custom resource (replace example.com with your own domain):
|
||||
|
||||
@@ -8,4 +8,4 @@ resources:
|
||||
- deployment.yaml
|
||||
images:
|
||||
- name: weaveworks/flagger
|
||||
newTag: 0.20.1
|
||||
newTag: 0.20.2
|
||||
|
||||
@@ -185,6 +185,30 @@ func (c *Deployer) Scale(cd *flaggerv1.Canary, replicas int32) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Deployer) ScaleUp(cd *flaggerv1.Canary) error {
|
||||
targetName := cd.Spec.TargetRef.Name
|
||||
dep, err := c.KubeClient.AppsV1().Deployments(cd.Namespace).Get(targetName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
if errors.IsNotFound(err) {
|
||||
return fmt.Errorf("deployment %s.%s not found", targetName, cd.Namespace)
|
||||
}
|
||||
return fmt.Errorf("deployment %s.%s query error %v", targetName, cd.Namespace, err)
|
||||
}
|
||||
|
||||
replicas := int32p(1)
|
||||
if dep.Spec.Replicas != nil && *dep.Spec.Replicas > 0 {
|
||||
replicas = dep.Spec.Replicas
|
||||
}
|
||||
depCopy := dep.DeepCopy()
|
||||
depCopy.Spec.Replicas = replicas
|
||||
|
||||
_, err = c.KubeClient.AppsV1().Deployments(dep.Namespace).Update(depCopy)
|
||||
if err != nil {
|
||||
return fmt.Errorf("scaling %s.%s to %v failed: %v", depCopy.GetName(), depCopy.Namespace, replicas, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Deployer) createPrimaryDeployment(cd *flaggerv1.Canary) (string, map[string]int32, error) {
|
||||
targetName := cd.Spec.TargetRef.Name
|
||||
primaryName := fmt.Sprintf("%s-primary", cd.Spec.TargetRef.Name)
|
||||
|
||||
@@ -622,7 +622,7 @@ func (c *Controller) checkCanaryStatus(cd *flaggerv1.Canary, shouldAdvance bool)
|
||||
c.recordEventInfof(cd, "New revision detected! Scaling up %s.%s", cd.Spec.TargetRef.Name, cd.Namespace)
|
||||
c.sendNotification(cd, "New revision detected, starting canary analysis.",
|
||||
true, false)
|
||||
if err := c.deployer.Scale(cd, 1); err != nil {
|
||||
if err := c.deployer.ScaleUp(cd); err != nil {
|
||||
c.recordEventErrorf(cd, "%v", err)
|
||||
return false
|
||||
}
|
||||
@@ -817,8 +817,8 @@ func (c *Controller) analyseCanary(r *flaggerv1.Canary) bool {
|
||||
val, err := observerFactory.Client.RunQuery(metric.Query)
|
||||
if err != nil {
|
||||
if strings.Contains(err.Error(), "no values found") {
|
||||
c.recordEventWarningf(r, "Halt advancement no values found for metric %s probably %s.%s is not receiving traffic",
|
||||
metric.Name, r.Spec.TargetRef.Name, r.Namespace)
|
||||
c.recordEventWarningf(r, "Halt advancement no values found for custom metric: %s",
|
||||
metric.Name)
|
||||
} else {
|
||||
c.recordEventErrorf(r, "Metrics server %s query failed for %s: %v", metricsServer, metric.Name, err)
|
||||
}
|
||||
|
||||
@@ -2,6 +2,7 @@ package router
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
@@ -80,7 +81,7 @@ func (ar *AppMeshRouter) Reconcile(canary *flaggerv1.Canary) error {
|
||||
// reconcileVirtualNode creates or updates a virtual node
|
||||
// the virtual node naming format is name-role-namespace
|
||||
func (ar *AppMeshRouter) reconcileVirtualNode(canary *flaggerv1.Canary, name string, host string) error {
|
||||
protocol := getProtocol(canary)
|
||||
protocol := ar.getProtocol(canary)
|
||||
vnSpec := appmeshv1.VirtualNodeSpec{
|
||||
MeshName: canary.Spec.Service.MeshName,
|
||||
Listeners: []appmeshv1.Listener{
|
||||
@@ -164,7 +165,7 @@ func (ar *AppMeshRouter) reconcileVirtualService(canary *flaggerv1.Canary, name
|
||||
targetName := canary.Spec.TargetRef.Name
|
||||
canaryVirtualNode := fmt.Sprintf("%s-canary", targetName)
|
||||
primaryVirtualNode := fmt.Sprintf("%s-primary", targetName)
|
||||
protocol := getProtocol(canary)
|
||||
protocol := ar.getProtocol(canary)
|
||||
|
||||
routerName := targetName
|
||||
if canaryWeight > 0 {
|
||||
@@ -212,7 +213,7 @@ func (ar *AppMeshRouter) reconcileVirtualService(canary *flaggerv1.Canary, name
|
||||
Http: &appmeshv1.HttpRoute{
|
||||
Match: appmeshv1.HttpRouteMatch{
|
||||
Prefix: routePrefix,
|
||||
Headers: makeHeaders(canary),
|
||||
Headers: ar.makeHeaders(canary),
|
||||
},
|
||||
RetryPolicy: makeRetryPolicy(canary),
|
||||
Action: appmeshv1.HttpRouteAction{
|
||||
@@ -284,6 +285,15 @@ func (ar *AppMeshRouter) reconcileVirtualService(canary *flaggerv1.Canary, name
|
||||
},
|
||||
Spec: vsSpec,
|
||||
}
|
||||
|
||||
// set App Mesh Gateway annotation on primary virtual service
|
||||
if canaryWeight == 0 {
|
||||
a := ar.gatewayAnnotations(canary)
|
||||
if len(a) > 0 {
|
||||
virtualService.ObjectMeta.Annotations = a
|
||||
}
|
||||
}
|
||||
|
||||
_, err = ar.appmeshClient.AppmeshV1beta1().VirtualServices(canary.Namespace).Create(virtualService)
|
||||
if err != nil {
|
||||
return fmt.Errorf("VirtualService %s create error %v", name, err)
|
||||
@@ -304,6 +314,14 @@ func (ar *AppMeshRouter) reconcileVirtualService(canary *flaggerv1.Canary, name
|
||||
vsClone.Spec = vsSpec
|
||||
vsClone.Spec.Routes[0].Http.Action = virtualService.Spec.Routes[0].Http.Action
|
||||
|
||||
// update App Mesh Gateway annotation on primary virtual service
|
||||
if canaryWeight == 0 {
|
||||
a := ar.gatewayAnnotations(canary)
|
||||
if len(a) > 0 {
|
||||
vsClone.ObjectMeta.Annotations = a
|
||||
}
|
||||
}
|
||||
|
||||
_, err = ar.appmeshClient.AppmeshV1beta1().VirtualServices(canary.Namespace).Update(vsClone)
|
||||
if err != nil {
|
||||
return fmt.Errorf("VirtualService %s update error %v", name, err)
|
||||
@@ -432,7 +450,7 @@ func makeRetryPolicy(canary *flaggerv1.Canary) *appmeshv1.HttpRetryPolicy {
|
||||
}
|
||||
|
||||
// makeRetryPolicy creates an App Mesh HttpRouteHeader from the Canary.CanaryAnalysis.Match
|
||||
func makeHeaders(canary *flaggerv1.Canary) []appmeshv1.HttpRouteHeader {
|
||||
func (ar *AppMeshRouter) makeHeaders(canary *flaggerv1.Canary) []appmeshv1.HttpRouteHeader {
|
||||
headers := []appmeshv1.HttpRouteHeader{}
|
||||
|
||||
for _, m := range canary.Spec.CanaryAnalysis.Match {
|
||||
@@ -453,13 +471,32 @@ func makeHeaders(canary *flaggerv1.Canary) []appmeshv1.HttpRouteHeader {
|
||||
return headers
|
||||
}
|
||||
|
||||
func getProtocol(canary *flaggerv1.Canary) string {
|
||||
func (ar *AppMeshRouter) getProtocol(canary *flaggerv1.Canary) string {
|
||||
if strings.Contains(canary.Spec.Service.PortName, "grpc") {
|
||||
return "grpc"
|
||||
}
|
||||
return "http"
|
||||
}
|
||||
|
||||
func (ar *AppMeshRouter) gatewayAnnotations(canary *flaggerv1.Canary) map[string]string {
|
||||
a := make(map[string]string)
|
||||
domains := ""
|
||||
for _, value := range canary.Spec.Service.Hosts {
|
||||
domains += value + ","
|
||||
}
|
||||
if domains != "" {
|
||||
a["gateway.appmesh.k8s.aws/expose"] = "true"
|
||||
a["gateway.appmesh.k8s.aws/domain"] = domains
|
||||
if canary.Spec.Service.Timeout != "" {
|
||||
a["gateway.appmesh.k8s.aws/timeout"] = canary.Spec.Service.Timeout
|
||||
}
|
||||
if canary.Spec.Service.Retries != nil && canary.Spec.Service.Retries.Attempts > 0 {
|
||||
a["gateway.appmesh.k8s.aws/retries"] = strconv.Itoa(canary.Spec.Service.Retries.Attempts)
|
||||
}
|
||||
}
|
||||
return a
|
||||
}
|
||||
|
||||
func int64p(i int64) *int64 {
|
||||
return &i
|
||||
}
|
||||
|
||||
@@ -2,6 +2,8 @@ package router
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
@@ -226,3 +228,45 @@ func TestAppmeshRouter_ABTest(t *testing.T) {
|
||||
t.Errorf("Got http match header exact %v wanted %v", exactMatch, "test")
|
||||
}
|
||||
}
|
||||
|
||||
func TestAppmeshRouter_Gateway(t *testing.T) {
|
||||
mocks := setupfakeClients()
|
||||
router := &AppMeshRouter{
|
||||
logger: mocks.logger,
|
||||
flaggerClient: mocks.flaggerClient,
|
||||
appmeshClient: mocks.meshClient,
|
||||
kubeClient: mocks.kubeClient,
|
||||
}
|
||||
|
||||
err := router.Reconcile(mocks.appmeshCanary)
|
||||
if err != nil {
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
|
||||
// check virtual service
|
||||
vsName := fmt.Sprintf("%s.%s", mocks.appmeshCanary.Spec.TargetRef.Name, mocks.appmeshCanary.Namespace)
|
||||
vs, err := router.appmeshClient.AppmeshV1beta1().VirtualServices("default").Get(vsName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
|
||||
expose := vs.Annotations["gateway.appmesh.k8s.aws/expose"]
|
||||
if expose != "true" {
|
||||
t.Errorf("Got gateway expose annotation %v wanted %v", expose, "true")
|
||||
}
|
||||
|
||||
domain := vs.Annotations["gateway.appmesh.k8s.aws/domain"]
|
||||
if !strings.Contains(domain, mocks.appmeshCanary.Spec.Service.Hosts[0]) {
|
||||
t.Errorf("Got gateway domain annotation %v wanted %v", domain, mocks.appmeshCanary.Spec.Service.Hosts[0])
|
||||
}
|
||||
|
||||
timeout := vs.Annotations["gateway.appmesh.k8s.aws/timeout"]
|
||||
if timeout != mocks.appmeshCanary.Spec.Service.Timeout {
|
||||
t.Errorf("Got gateway timeout annotation %v wanted %v", timeout, mocks.appmeshCanary.Spec.Service.Timeout)
|
||||
}
|
||||
|
||||
retries := vs.Annotations["gateway.appmesh.k8s.aws/retries"]
|
||||
if retries != strconv.Itoa(mocks.appmeshCanary.Spec.Service.Retries.Attempts) {
|
||||
t.Errorf("Got gateway retries annotation %v wanted %v", retries, strconv.Itoa(mocks.appmeshCanary.Spec.Service.Retries.Attempts))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -70,7 +70,14 @@ func newMockCanaryAppMesh() *flaggerv1.Canary {
|
||||
Service: flaggerv1.CanaryService{
|
||||
Port: 9898,
|
||||
MeshName: "global",
|
||||
Hosts: []string{"*"},
|
||||
Backends: []string{"backend.default"},
|
||||
Timeout: "25",
|
||||
Retries: &istiov1alpha3.HTTPRetry{
|
||||
Attempts: 5,
|
||||
PerTryTimeout: "gateway-error",
|
||||
RetryOn: "5s",
|
||||
},
|
||||
}, CanaryAnalysis: flaggerv1.CanaryAnalysis{
|
||||
Threshold: 10,
|
||||
StepWeight: 10,
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
package version
|
||||
|
||||
var VERSION = "0.20.1"
|
||||
var VERSION = "0.20.2"
|
||||
var REVISION = "unknown"
|
||||
|
||||
Reference in New Issue
Block a user