Compare commits

...

54 Commits
0.3.0 ... 0.5.0

Author SHA1 Message Date
stefanprodan
456d914c35 Release v0.5.0 2019-01-30 14:54:03 +02:00
Stefan Prodan
737507b0fe Merge pull request #37 from stefanprodan/track-configs
Track changes in ConfigMaps and Secrets
2019-01-30 13:46:56 +01:00
stefanprodan
4bcf82d295 Copy annotations from canary to primary on promotion 2019-01-28 11:02:33 +02:00
stefanprodan
e9cd7afc8a Add configs track changes to docs 2019-01-28 10:50:30 +02:00
stefanprodan
0830abd51d Trigger a rolling update when configs change
- generate a unique pod annotation on promotion
2019-01-28 10:49:43 +02:00
stefanprodan
5b296e01b3 Detect changes in configs and trigger canary analysis
- restart analysis if a ConfigMap or Secret changes during rollout
- add tests for tracked changes
2019-01-26 12:36:27 +02:00
stefanprodan
3fd039afd1 Add tracked configs checksum to canary status 2019-01-26 12:33:15 +02:00
stefanprodan
5904348ba5 Refactor tests
- consolidate fake clients and mock objects
2019-01-26 00:39:33 +02:00
stefanprodan
1a98e93723 Add config and secret volumes tests 2019-01-25 23:47:50 +02:00
stefanprodan
c9685fbd13 Add ConfigMap env from source tests 2019-01-25 18:58:23 +02:00
stefanprodan
dc347e273d Add secrets from env tests 2019-01-25 18:27:05 +02:00
stefanprodan
8170916897 Add ConfigMap tracking tests 2019-01-25 18:03:36 +02:00
stefanprodan
71cd4e0cb7 Include ConfigMaps and Secrets in promotion
- create primary configs and secrets at bootstrap
- copy configs and secrets from canary to primary and update the pod spec on promotion
2019-01-25 16:03:51 +02:00
stefanprodan
0109788ccc Discover config maps and secrets
- scan target deployment volumes and containers for configmaps and secrets
2019-01-25 13:20:46 +02:00
stefanprodan
1649dea468 Add config maps and secrets manifests for testing 2019-01-25 11:19:34 +02:00
Stefan Prodan
b8a7ea8534 Merge pull request #35 from stefanprodan/gh-actions
Publish charts with GitHub Actions
2019-01-24 11:52:54 +01:00
stefanprodan
afe4d59d5a Move Helm repository to gh-pages branch 2019-01-24 12:47:36 +02:00
stefanprodan
0f2697df23 Publish charts with GitHub Actions 2019-01-24 12:38:45 +02:00
stefanprodan
05664fa648 Release v0.4.1 2019-01-24 12:17:37 +02:00
Stefan Prodan
3b2564f34b Merge pull request #33 from stefanprodan/loadtest
Add load testing service
2019-01-24 11:04:31 +01:00
stefanprodan
dd0cf2d588 Add load tester dockerfile to docs 2019-01-23 15:12:23 +02:00
stefanprodan
7c66f23c6a Add load tester Helm chart 2019-01-21 21:02:40 +02:00
stefanprodan
a9f034de1a Add load testing diagram 2019-01-21 18:02:44 +02:00
stefanprodan
6ad2dca57a Add load testing setup to docs 2019-01-21 17:29:04 +02:00
stefanprodan
e8353c110b Release load tester v0.0.2 2019-01-21 13:37:26 +02:00
stefanprodan
dbf26ddf53 Add load tester flag to log the cmd output 2019-01-21 13:36:08 +02:00
stefanprodan
acc72d207f Change container image tag format 2019-01-20 17:27:08 +02:00
stefanprodan
a784f83464 Add loadtester manifests 2019-01-20 15:59:41 +02:00
stefanprodan
07d8355363 Rename load testing service to flagger-loadtester 2019-01-20 14:28:45 +02:00
stefanprodan
f7a439274e Go format API types 2019-01-20 14:10:10 +02:00
stefanprodan
bd6d446cb8 Go format scheduler 2019-01-20 14:04:10 +02:00
stefanprodan
385d0e0549 Add load test runner service
- embed rakyll/hey in the runner container image
2019-01-20 14:00:14 +02:00
stefanprodan
02236374d8 Run the wekbooks before the metrics checks
- log warning when no values are found for Istio metric due to lack of traffic
2019-01-20 13:54:44 +02:00
stefanprodan
c46fe55ad0 Release v0.4.0 2019-01-18 12:49:36 +02:00
Stefan Prodan
36a54fbf2a Merge pull request #31 from stefanprodan/reset
Restart analysis if revision changes during validation
2019-01-18 10:25:38 +01:00
stefanprodan
60f6b05397 Refactor scheduler tests 2019-01-18 11:14:27 +02:00
stefanprodan
6d8a7343b7 Add tests for analysis restart and canary promotion 2019-01-18 11:05:40 +02:00
stefanprodan
aff8b117d4 Restart validation if revision changes during analysis 2019-01-17 15:13:59 +02:00
Stefan Prodan
1b3c3b22b3 Merge pull request #29 from stefanprodan/status
Use Kubernetes 1.11 CRD status sub-resource
2019-01-17 13:06:28 +01:00
stefanprodan
1d31b5ed90 Add canary name and namespace to controller logs
- zap key-value: canary=name.namespace
2019-01-17 13:58:10 +02:00
stefanprodan
1ef310f00d Add traffic weight to canary status
- show current weight on kubectl get canaries and kubectl get all
2019-01-16 16:29:59 +02:00
stefanprodan
acdd2c46d5 Refactor Canary status
- add status phases (Initialized, Progressing, Succeeded, Failed)
- rename status revision to LastAppliedSpec
2019-01-16 15:06:38 +02:00
stefanprodan
9872e6bc16 Skip readiness checks if canary analysis finished 2019-01-16 13:18:53 +02:00
stefanprodan
10c2bdec86 Use deep copy when updating the virtual service routes 2019-01-16 13:13:07 +02:00
stefanprodan
4bf3b70048 Use CRD UpdateStatus for Canary status updated
- requires Kubernetes >=1.11
2019-01-16 01:00:39 +02:00
stefanprodan
ada446bbaa Drop compatibility with Kubernetes 1.10 2019-01-16 00:58:51 +02:00
stefanprodan
c4981ef4db Add status and additional printer columns to CRD 2019-01-16 00:57:46 +02:00
Stefan Prodan
d1b84cd31d Merge pull request #28 from stefanprodan/naming
Fix for when canary name is different to the target name
2019-01-15 23:32:41 +01:00
stefanprodan
9232c8647a Check if multiple canaries have the same target
- log an error on target duplication ref #13
2019-01-15 21:43:05 +02:00
stefanprodan
23e8c7d616 Fix for when canary name is different to the target name
- use target name consistent at bootstrap
2019-01-15 21:18:46 +02:00
Stefan Prodan
42607fbd64 Merge pull request #26 from carlossg/service-name
Fix VirtualService routes
2019-01-15 19:38:38 +01:00
stefanprodan
28781a5f02 Use deep copy when updating the deployment object
- fix canary status update logs
2019-01-15 20:37:14 +02:00
stefanprodan
3589e11244 Bump dev version 2019-01-15 20:36:59 +02:00
Carlos Sanchez
5e880d3942 Wrong VirtualService routes
If deployment name is different from canary name
the virtual service routes are created with canary name
but the services are created with deployment name

Note that canary name should match deployment name
2019-01-15 18:44:50 +01:00
62 changed files with 2735 additions and 984 deletions

17
.github/main.workflow vendored Normal file
View File

@@ -0,0 +1,17 @@
workflow "Publish Helm charts" {
on = "push"
resolves = ["helm-push"]
}
action "helm-lint" {
uses = "stefanprodan/gh-actions/helm@master"
args = ["lint charts/*"]
}
action "helm-push" {
needs = ["helm-lint"]
uses = "stefanprodan/gh-actions/helm-gh-pages@master"
args = ["charts/*","https://flagger.app"]
secrets = ["GITHUB_TOKEN"]
}

View File

@@ -23,9 +23,10 @@ after_success:
- if [ -z "$DOCKER_USER" ]; then
echo "PR build, skipping image push";
else
docker tag stefanprodan/flagger:latest quay.io/stefanprodan/flagger:${TRAVIS_COMMIT};
BRANCH_COMMIT=${TRAVIS_BRANCH}-$(echo ${TRAVIS_COMMIT} | head -c7);
docker tag stefanprodan/flagger:latest quay.io/stefanprodan/flagger:${BRANCH_COMMIT};
echo $DOCKER_PASS | docker login -u=$DOCKER_USER --password-stdin quay.io;
docker push quay.io/stefanprodan/flagger:${TRAVIS_COMMIT};
docker push quay.io/stefanprodan/flagger:${BRANCH_COMMIT};
fi
- if [ -z "$TRAVIS_TAG" ]; then
echo "Not a release, skipping image push";

44
Dockerfile.loadtester Normal file
View File

@@ -0,0 +1,44 @@
FROM golang:1.11 AS hey-builder
RUN mkdir -p /go/src/github.com/rakyll/hey/
WORKDIR /go/src/github.com/rakyll/hey
ADD https://github.com/rakyll/hey/archive/v0.1.1.tar.gz .
RUN tar xzf v0.1.1.tar.gz --strip 1
RUN go get ./...
RUN CGO_ENABLED=0 GOOS=linux GOARCH=amd64 \
go install -ldflags '-w -extldflags "-static"' \
/go/src/github.com/rakyll/hey
FROM golang:1.11 AS builder
RUN mkdir -p /go/src/github.com/stefanprodan/flagger/
WORKDIR /go/src/github.com/stefanprodan/flagger
COPY . .
RUN go test -race ./pkg/loadtester/
RUN CGO_ENABLED=0 GOOS=linux go build -a -installsuffix cgo -o loadtester ./cmd/loadtester/*
FROM alpine:3.8
RUN addgroup -S app \
&& adduser -S -g app app \
&& apk --no-cache add ca-certificates curl
WORKDIR /home/app
COPY --from=hey-builder /go/bin/hey /usr/local/bin/hey
COPY --from=builder /go/src/github.com/stefanprodan/flagger/loadtester .
RUN chown -R app:app ./
USER app
ENTRYPOINT ["./loadtester"]

View File

@@ -3,6 +3,8 @@ VERSION?=$(shell grep 'VERSION' pkg/version/version.go | awk '{ print $$4 }' | t
VERSION_MINOR:=$(shell grep 'VERSION' pkg/version/version.go | awk '{ print $$4 }' | tr -d '"' | rev | cut -d'.' -f2- | rev)
PATCH:=$(shell grep 'VERSION' pkg/version/version.go | awk '{ print $$4 }' | tr -d '"' | awk -F. '{print $$NF}')
SOURCE_DIRS = cmd pkg/apis pkg/controller pkg/server pkg/logging pkg/version
LT_VERSION?=$(shell grep 'VERSION' cmd/loadtester/main.go | awk '{ print $$4 }' | tr -d '"' | head -n1)
run:
go run cmd/flagger/* -kubeconfig=$$HOME/.kube/config -log-level=info \
-metrics-server=https://prometheus.iowa.weavedx.com \
@@ -29,7 +31,7 @@ test: test-fmt test-codegen
go test ./...
helm-package:
cd charts/ && helm package flagger/ && helm package grafana/
cd charts/ && helm package flagger/ && helm package grafana/ && helm package loadtester/
mv charts/*.tgz docs/
helm repo index docs --url https://stefanprodan.github.io/flagger --merge ./docs/index.yaml
@@ -44,6 +46,7 @@ version-set:
sed -i '' "s/flagger:$$current/flagger:$$next/g" artifacts/flagger/deployment.yaml && \
sed -i '' "s/tag: $$current/tag: $$next/g" charts/flagger/values.yaml && \
sed -i '' "s/appVersion: $$current/appVersion: $$next/g" charts/flagger/Chart.yaml && \
sed -i '' "s/version: $$current/version: $$next/g" charts/flagger/Chart.yaml && \
echo "Version $$next set in code, deployment and charts"
version-up:
@@ -77,3 +80,7 @@ reset-test:
kubectl delete -f ./artifacts/namespaces
kubectl apply -f ./artifacts/namespaces
kubectl apply -f ./artifacts/canaries
loadtester-push:
docker build -t quay.io/stefanprodan/flagger-loadtester:$(LT_VERSION) . -f Dockerfile.loadtester
docker push quay.io/stefanprodan/flagger-loadtester:$(LT_VERSION)

View File

@@ -8,8 +8,8 @@
Flagger is a Kubernetes operator that automates the promotion of canary deployments
using Istio routing for traffic shifting and Prometheus metrics for canary analysis.
The canary analysis can be extended with webhooks for running integration tests, load tests or any other custom
validation.
The canary analysis can be extended with webhooks for running integration tests,
load tests or any other custom validation.
### Install
@@ -28,7 +28,7 @@ helm upgrade -i flagger flagger/flagger \
--set metricsServer=http://prometheus.istio-system:9090
```
Flagger is compatible with Kubernetes >1.10.0 and Istio >1.0.0.
Flagger is compatible with Kubernetes >1.11.0 and Istio >1.0.0.
### Usage
@@ -38,6 +38,9 @@ ClusterIP [services](https://kubernetes.io/docs/concepts/services-networking/ser
Istio [virtual services](https://istio.io/docs/reference/config/istio.networking.v1alpha3/#VirtualService))
to drive the canary analysis and promotion.
Flagger keeps track of ConfigMaps and Secrets referenced by a Kubernetes Deployment and triggers a canary analysis if any of those objects change.
When promoting a workload in production, both code (container images) and configuration (config maps and secrets) are being synchronised.
![flagger-overview](https://raw.githubusercontent.com/stefanprodan/flagger/master/docs/diagrams/flagger-canary-overview.png)
Gated canary promotion stages:
@@ -48,28 +51,28 @@ Gated canary promotion stages:
* halt advancement if a rolling update is underway
* halt advancement if pods are unhealthy
* increase canary traffic weight percentage from 0% to 5% (step weight)
* call webhooks and check results
* check canary HTTP request success rate and latency
* halt advancement if any metric is under the specified threshold
* increment the failed checks counter
* check if the number of failed checks reached the threshold
* route all traffic to primary
* scale to zero the canary deployment and mark it as failed
* wait for the canary deployment to be updated (revision bump) and start over
* wait for the canary deployment to be updated and start over
* increase canary traffic weight by 5% (step weight) till it reaches 50% (max weight)
* halt advancement while canary request success rate is under the threshold
* halt advancement while canary request duration P99 is over the threshold
* halt advancement if the primary or canary deployment becomes unhealthy
* halt advancement while canary deployment is being scaled up/down by HPA
* promote canary to primary
* copy ConfigMaps and Secrets from canary to primary
* copy canary deployment spec template over primary
* wait for primary rolling update to finish
* halt advancement if pods are unhealthy
* route all traffic to primary
* scale to zero the canary deployment
* mark rollout as finished
* wait for the canary deployment to be updated (revision bump) and start over
You can change the canary analysis _max weight_ and the _step weight_ percentage in the Flagger's custom resource.
* wait for the canary deployment to be updated and start over
For a deployment named _podinfo_, a canary promotion can be defined using Flagger's custom resource:
@@ -127,12 +130,11 @@ spec:
interval: 30s
# external checks (optional)
webhooks:
- name: integration-tests
url: http://podinfo.test:9898/echo
timeout: 1m
- name: load-test
url: http://flagger-loadtester.test/
timeout: 5s
metadata:
test: "all"
token: "16688eb5e9f289f1991c"
cmd: "hey -z 1m -q 10 -c 2 http://podinfo.test:9898/"
```
The canary analysis is using the following promql queries:
@@ -211,6 +213,13 @@ kubectl apply -f ${REPO}/artifacts/canaries/deployment.yaml
kubectl apply -f ${REPO}/artifacts/canaries/hpa.yaml
```
Deploy the load testing service to generate traffic during the canary analysis:
```bash
kubectl -n test apply -f ${REPO}/artifacts/loadtester/deployment.yaml
kubectl -n test apply -f ${REPO}/artifacts/loadtester/service.yaml
```
Create a canary promotion custom resource (replace the Istio gateway and the internet domain with your own):
```bash
@@ -239,18 +248,22 @@ Trigger a canary deployment by updating the container image:
```bash
kubectl -n test set image deployment/podinfo \
podinfod=quay.io/stefanprodan/podinfo:1.2.1
podinfod=quay.io/stefanprodan/podinfo:1.4.0
```
Flagger detects that the deployment revision changed and starts a new rollout:
**Note** that Flagger tracks changes in the deployment `PodSpec` but also in `ConfigMaps` and `Secrets`
that are referenced in the pod's volumes and containers environment variables.
Flagger detects that the deployment revision changed and starts a new canary analysis:
```
kubectl -n test describe canary/podinfo
Status:
Canary Revision: 19871136
Failed Checks: 0
State: finished
Canary Weight: 0
Failed Checks: 0
Last Transition Time: 2019-01-16T13:47:16Z
Phase: Succeeded
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
@@ -272,6 +285,15 @@ Events:
Normal Synced 5s flagger Promotion completed! Scaling down podinfo.test
```
You can monitor all canaries with:
```bash
watch kubectl get canaries --all-namespaces
NAMESPACE NAME STATUS WEIGHT LASTTRANSITIONTIME
test podinfo Progressing 5 2019-01-16T14:05:07Z
```
During the canary analysis you can generate HTTP 500 errors and high latency to test if Flagger pauses the rollout.
Create a tester pod and exec into it:
@@ -300,9 +322,10 @@ the canary is scaled to zero and the rollout is marked as failed.
kubectl -n test describe canary/podinfo
Status:
Canary Revision: 16695041
Failed Checks: 10
State: failed
Canary Weight: 0
Failed Checks: 10
Last Transition Time: 2019-01-16T13:47:16Z
Phase: Failed
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
@@ -319,6 +342,8 @@ Events:
Warning Synced 1m flagger Canary failed! Scaling down podinfo.test
```
**Note** that if you apply new changes to the deployment during the canary analysis, Flagger will restart the analysis.
### Monitoring
Flagger comes with a Grafana dashboard made for canary analysis.

View File

@@ -51,9 +51,8 @@ spec:
interval: 30s
# external checks (optional)
webhooks:
- name: integration-tests
url: https://httpbin.org/post
timeout: 1m
- name: load-test
url: http://flagger-loadtester.test/
timeout: 5s
metadata:
test: "all"
token: "16688eb5e9f289f1991c"
cmd: "hey -z 1m -q 10 -c 2 http://podinfo.test:9898/"

View File

@@ -0,0 +1,58 @@
apiVersion: flagger.app/v1alpha3
kind: Canary
metadata:
name: podinfo
namespace: test
spec:
# deployment reference
targetRef:
apiVersion: apps/v1
kind: Deployment
name: podinfo
# the maximum time in seconds for the canary deployment
# to make progress before it is rollback (default 600s)
progressDeadlineSeconds: 60
# HPA reference (optional)
autoscalerRef:
apiVersion: autoscaling/v2beta1
kind: HorizontalPodAutoscaler
name: podinfo
service:
# container port
port: 9898
# Istio gateways (optional)
gateways:
- public-gateway.istio-system.svc.cluster.local
# Istio virtual service host names (optional)
hosts:
- app.iowa.weavedx.com
canaryAnalysis:
# schedule interval (default 60s)
interval: 10s
# max number of failed metric checks before rollback
threshold: 10
# max traffic percentage routed to canary
# percentage (0-100)
maxWeight: 50
# canary increment step
# percentage (0-100)
stepWeight: 5
# Istio Prometheus checks
metrics:
- name: istio_requests_total
# minimum req success rate (non 5xx responses)
# percentage (0-100)
threshold: 99
interval: 1m
- name: istio_request_duration_seconds_bucket
# maximum req duration P99
# milliseconds
threshold: 500
interval: 30s
# external checks (optional)
webhooks:
- name: load-test
url: http://flagger-loadtester.test/
timeout: 5s
metadata:
cmd: "hey -z 1m -q 10 -c 2 http://podinfo.test:9898/"

View File

@@ -0,0 +1,16 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: podinfo-config-env
namespace: test
data:
color: blue
---
apiVersion: v1
kind: ConfigMap
metadata:
name: podinfo-config-vol
namespace: test
data:
output: console
textmode: "true"

View File

@@ -0,0 +1,89 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: podinfo
namespace: test
labels:
app: podinfo
spec:
minReadySeconds: 5
revisionHistoryLimit: 5
progressDeadlineSeconds: 60
strategy:
rollingUpdate:
maxUnavailable: 0
type: RollingUpdate
selector:
matchLabels:
app: podinfo
template:
metadata:
annotations:
prometheus.io/scrape: "true"
labels:
app: podinfo
spec:
containers:
- name: podinfod
image: quay.io/stefanprodan/podinfo:1.3.0
imagePullPolicy: IfNotPresent
ports:
- containerPort: 9898
name: http
protocol: TCP
command:
- ./podinfo
- --port=9898
- --level=info
- --random-delay=false
- --random-error=false
env:
- name: PODINFO_UI_COLOR
valueFrom:
configMapKeyRef:
name: podinfo-config-env
key: color
- name: SECRET_USER
valueFrom:
secretKeyRef:
name: podinfo-secret-env
key: user
livenessProbe:
exec:
command:
- podcli
- check
- http
- localhost:9898/healthz
initialDelaySeconds: 5
timeoutSeconds: 5
readinessProbe:
exec:
command:
- podcli
- check
- http
- localhost:9898/readyz
initialDelaySeconds: 5
timeoutSeconds: 5
resources:
limits:
cpu: 2000m
memory: 512Mi
requests:
cpu: 100m
memory: 64Mi
volumeMounts:
- name: configs
mountPath: /etc/podinfo/configs
readOnly: true
- name: secrets
mountPath: /etc/podinfo/secrets
readOnly: true
volumes:
- name: configs
configMap:
name: podinfo-config-vol
- name: secrets
secret:
secretName: podinfo-secret-vol

View File

@@ -0,0 +1,19 @@
apiVersion: autoscaling/v2beta1
kind: HorizontalPodAutoscaler
metadata:
name: podinfo
namespace: test
spec:
scaleTargetRef:
apiVersion: apps/v1
kind: Deployment
name: podinfo
minReplicas: 1
maxReplicas: 4
metrics:
- type: Resource
resource:
name: cpu
# scale up if usage is above
# 99% of the requested CPU (100m)
targetAverageUtilization: 99

View File

@@ -0,0 +1,16 @@
apiVersion: v1
kind: Secret
metadata:
name: podinfo-secret-env
namespace: test
data:
password: cGFzc3dvcmQ=
user: YWRtaW4=
---
apiVersion: v1
kind: Secret
metadata:
name: podinfo-secret-vol
namespace: test
data:
key: cGFzc3dvcmQ=

View File

@@ -19,7 +19,21 @@ spec:
plural: canaries
singular: canary
kind: Canary
categories:
- all
scope: Namespaced
subresources:
status: {}
additionalPrinterColumns:
- name: Status
type: string
JSONPath: .status.phase
- name: Weight
type: string
JSONPath: .status.canaryWeight
- name: LastTransitionTime
type: string
JSONPath: .status.lastTransitionTime
validation:
openAPIV3Schema:
properties:

View File

@@ -22,7 +22,7 @@ spec:
serviceAccountName: flagger
containers:
- name: flagger
image: quay.io/stefanprodan/flagger:0.3.0
image: quay.io/stefanprodan/flagger:0.5.0
imagePullPolicy: Always
ports:
- name: http

View File

@@ -0,0 +1,60 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: flagger-loadtester
labels:
app: flagger-loadtester
spec:
selector:
matchLabels:
app: flagger-loadtester
template:
metadata:
labels:
app: flagger-loadtester
annotations:
prometheus.io/scrape: "true"
spec:
containers:
- name: loadtester
image: quay.io/stefanprodan/flagger-loadtester:0.1.0
imagePullPolicy: IfNotPresent
ports:
- name: http
containerPort: 8080
command:
- ./loadtester
- -port=8080
- -log-level=info
- -timeout=1h
- -log-cmd-output=true
livenessProbe:
exec:
command:
- wget
- --quiet
- --tries=1
- --timeout=4
- --spider
- http://localhost:8080/healthz
timeoutSeconds: 5
readinessProbe:
exec:
command:
- wget
- --quiet
- --tries=1
- --timeout=4
- --spider
- http://localhost:8080/healthz
timeoutSeconds: 5
resources:
limits:
memory: "512Mi"
cpu: "1000m"
requests:
memory: "32Mi"
cpu: "10m"
securityContext:
readOnlyRootFilesystem: true
runAsUser: 10001

View File

@@ -0,0 +1,15 @@
apiVersion: v1
kind: Service
metadata:
name: flagger-loadtester
labels:
app: flagger-loadtester
spec:
type: ClusterIP
selector:
app: flagger-loadtester
ports:
- name: http
port: 80
protocol: TCP
targetPort: http

View File

@@ -1,8 +1,8 @@
apiVersion: v1
name: flagger
version: 0.3.0
appVersion: 0.3.0
kubeVersion: ">=1.9.0-0"
version: 0.5.0
appVersion: 0.5.0
kubeVersion: ">=1.11.0-0"
engine: gotpl
description: Flagger is a Kubernetes operator that automates the promotion of canary deployments using Istio routing for traffic shifting and Prometheus metrics for canary analysis.
home: https://docs.flagger.app

View File

@@ -8,7 +8,7 @@ Based on the KPIs analysis a canary is promoted or aborted and the analysis resu
## Prerequisites
* Kubernetes >= 1.9
* Kubernetes >= 1.11
* Istio >= 1.0
* Prometheus >= 2.6

View File

@@ -20,7 +20,21 @@ spec:
plural: canaries
singular: canary
kind: Canary
categories:
- all
scope: Namespaced
subresources:
status: {}
additionalPrinterColumns:
- name: Status
type: string
JSONPath: .status.phase
- name: Weight
type: string
JSONPath: .status.canaryWeight
- name: LastTransitionTime
type: string
JSONPath: .status.lastTransitionTime
validation:
openAPIV3Schema:
properties:

View File

@@ -2,7 +2,7 @@
image:
repository: quay.io/stefanprodan/flagger
tag: 0.3.0
tag: 0.5.0
pullPolicy: IfNotPresent
metricsServer: "http://prometheus.istio-system.svc.cluster.local:9090"

View File

@@ -6,7 +6,7 @@ Grafana dashboards for monitoring progressive deployments powered by Istio, Prom
## Prerequisites
* Kubernetes >= 1.9
* Kubernetes >= 1.11
* Istio >= 1.0
* Prometheus >= 2.6
@@ -75,5 +75,5 @@ helm install flagger/grafana --name flagger-grafana -f values.yaml
```
> **Tip**: You can use the default [values.yaml](values.yaml)
```

View File

@@ -0,0 +1,22 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*~
# Various IDEs
.project
.idea/
*.tmproj
.vscode/

View File

@@ -0,0 +1,20 @@
apiVersion: v1
name: loadtester
version: 0.1.0
appVersion: 0.1.0
kubeVersion: ">=1.11.0-0"
engine: gotpl
description: Flagger's load testing services based on rakyll/hey that generates traffic during canary analysis when configured as a webhook.
home: https://docs.flagger.app
icon: https://raw.githubusercontent.com/stefanprodan/flagger/master/docs/logo/flagger-icon.png
sources:
- https://github.com/stefanprodan/flagger
maintainers:
- name: stefanprodan
url: https://github.com/stefanprodan
email: stefanprodan@users.noreply.github.com
keywords:
- canary
- istio
- gitops
- load testing

View File

@@ -0,0 +1,78 @@
# Flagger load testing service
[Flagger's](https://github.com/stefanprodan/flagger) load testing service is based on
[rakyll/hey](https://github.com/rakyll/hey)
and can be used to generates traffic during canary analysis when configured as a webhook.
## Prerequisites
* Kubernetes >= 1.11
* Istio >= 1.0
## Installing the Chart
Add Flagger Helm repository:
```console
helm repo add flagger https://flagger.app
```
To install the chart with the release name `flagger-loadtester`:
```console
helm upgrade -i flagger-loadtester flagger/loadtester
```
The command deploys Grafana on the Kubernetes cluster in the default namespace.
> **Tip**: Note that the namespace where you deploy the load tester should have the Istio sidecar injection enabled
The [configuration](#configuration) section lists the parameters that can be configured during installation.
## Uninstalling the Chart
To uninstall/delete the `flagger-loadtester` deployment:
```console
helm delete --purge flagger-loadtester
```
The command removes all the Kubernetes components associated with the chart and deletes the release.
## Configuration
The following tables lists the configurable parameters of the load tester chart and their default values.
Parameter | Description | Default
--- | --- | ---
`image.repository` | Image repository | `quay.io/stefanprodan/flagger-loadtester`
`image.pullPolicy` | Image pull policy | `IfNotPresent`
`image.tag` | Image tag | `<VERSION>`
`replicaCount` | desired number of pods | `1`
`resources.requests.cpu` | CPU requests | `10m`
`resources.requests.memory` | memory requests | `64Mi`
`tolerations` | List of node taints to tolerate | `[]`
`affinity` | node/pod affinities | `node`
`nodeSelector` | node labels for pod assignment | `{}`
`service.type` | type of service | `ClusterIP`
`service.port` | ClusterIP port | `80`
`cmd.logOutput` | Log the command output to stderr | `true`
`cmd.timeout` | Command execution timeout | `1h`
`logLevel` | Log level can be debug, info, warning, error or panic | `info`
Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example,
```console
helm install flagger/loadtester --name flagger-loadtester \
--set cmd.logOutput=false
```
Alternatively, a YAML file that specifies the values for the above parameters can be provided while installing the chart. For example,
```console
helm install flagger/loadtester --name flagger-loadtester -f values.yaml
```
> **Tip**: You can use the default [values.yaml](values.yaml)

View File

@@ -0,0 +1 @@
Flagger's load testing service is available at http://{{ include "loadtester.fullname" . }}.{{ .Release.Namespace }}/

View File

@@ -0,0 +1,32 @@
{{/* vim: set filetype=mustache: */}}
{{/*
Expand the name of the chart.
*/}}
{{- define "loadtester.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "loadtester.fullname" -}}
{{- if .Values.fullnameOverride -}}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- $name := default .Chart.Name .Values.nameOverride -}}
{{- if contains $name .Release.Name -}}
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "loadtester.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
{{- end -}}

View File

@@ -0,0 +1,66 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ include "loadtester.fullname" . }}
labels:
app.kubernetes.io/name: {{ include "loadtester.name" . }}
helm.sh/chart: {{ include "loadtester.chart" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
spec:
replicas: {{ .Values.replicaCount }}
selector:
matchLabels:
app: {{ include "loadtester.name" . }}
template:
metadata:
labels:
app: {{ include "loadtester.name" . }}
spec:
containers:
- name: {{ .Chart.Name }}
image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
imagePullPolicy: {{ .Values.image.pullPolicy }}
ports:
- name: http
containerPort: 8080
command:
- ./loadtester
- -port=8080
- -log-level={{ .Values.logLevel }}
- -timeout={{ .Values.cmd.timeout }}
- -log-cmd-output={{ .Values.cmd.logOutput }}
livenessProbe:
exec:
command:
- wget
- --quiet
- --tries=1
- --timeout=4
- --spider
- http://localhost:8080/healthz
timeoutSeconds: 5
readinessProbe:
exec:
command:
- wget
- --quiet
- --tries=1
- --timeout=4
- --spider
- http://localhost:8080/healthz
timeoutSeconds: 5
resources:
{{- toYaml .Values.resources | nindent 12 }}
{{- with .Values.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.affinity }}
affinity:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.tolerations }}
tolerations:
{{- toYaml . | nindent 8 }}
{{- end }}

View File

@@ -0,0 +1,18 @@
apiVersion: v1
kind: Service
metadata:
name: {{ include "loadtester.fullname" . }}
labels:
app.kubernetes.io/name: {{ include "loadtester.name" . }}
helm.sh/chart: {{ include "loadtester.chart" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
spec:
type: {{ .Values.service.type }}
ports:
- port: {{ .Values.service.port }}
targetPort: http
protocol: TCP
name: http
selector:
app: {{ include "loadtester.name" . }}

View File

@@ -0,0 +1,29 @@
replicaCount: 1
image:
repository: quay.io/stefanprodan/flagger-loadtester
tag: 0.1.0
pullPolicy: IfNotPresent
logLevel: info
cmd:
logOutput: true
timeout: 1h
nameOverride: ""
fullnameOverride: ""
service:
type: ClusterIP
port: 80
resources:
requests:
cpu: 10m
memory: 64Mi
nodeSelector: {}
tolerations: []
affinity: {}

44
cmd/loadtester/main.go Normal file
View File

@@ -0,0 +1,44 @@
package main
import (
"flag"
"github.com/knative/pkg/signals"
"github.com/stefanprodan/flagger/pkg/loadtester"
"github.com/stefanprodan/flagger/pkg/logging"
"log"
"time"
)
var VERSION = "0.1.0"
var (
logLevel string
port string
timeout time.Duration
logCmdOutput bool
)
func init() {
flag.StringVar(&logLevel, "log-level", "debug", "Log level can be: debug, info, warning, error.")
flag.StringVar(&port, "port", "9090", "Port to listen on.")
flag.DurationVar(&timeout, "timeout", time.Hour, "Command exec timeout.")
flag.BoolVar(&logCmdOutput, "log-cmd-output", true, "Log command output to stderr")
}
func main() {
flag.Parse()
logger, err := logging.NewLogger(logLevel)
if err != nil {
log.Fatalf("Error creating logger: %v", err)
}
defer logger.Sync()
stopCh := signals.SetupSignalHandler()
taskRunner := loadtester.NewTaskRunner(logger, timeout, logCmdOutput)
go taskRunner.Start(100*time.Millisecond, stopCh)
logger.Infof("Starting load tester v%s API on port %s", VERSION, port)
loadtester.ListenAndServe(port, time.Minute, logger, taskRunner, stopCh)
}

View File

@@ -1 +0,0 @@
flagger.app

View File

@@ -1,11 +0,0 @@
# Flagger
Flagger is a Kubernetes operator that automates the promotion of canary deployments using Istio routing for traffic
shifting and Prometheus metrics for canary analysis.
Flagger implements a control loop that gradually shifts traffic to the canary while measuring key performance
indicators like HTTP requests success rate, requests average duration and pods health. Based on the KPIs analysis
a canary is promoted or aborted and the analysis result is published to Slack.
### For the install instructions and usage examples please see [docs.flagger.app](https://docs.flagger.app)

View File

@@ -1,55 +0,0 @@
title: Flagger - Istio Progressive Delivery Kubernetes Operator
remote_theme: errordeveloper/simple-project-homepage
repository: stefanprodan/flagger
by_weaveworks: true
url: "https://flagger.app"
baseurl: "/"
twitter:
username: "stefanprodan"
author:
twitter: "stefanprodan"
# Set default og:image
defaults:
- scope: {path: ""}
values: {image: "diagrams/flagger-overview.png"}
# See: https://material.io/guidelines/style/color.html
# Use color-name-value, like pink-200 or deep-purple-100
brand_color: "amber-400"
# How article URLs are structured.
# See: https://jekyllrb.com/docs/permalinks/
permalink: posts/:title/
# "UA-NNNNNNNN-N"
google_analytics: ""
# Language. For example, if you write in Japanese, use "ja"
lang: "en"
# How many posts are visible on the home page without clicking "View More"
num_posts_visible_initially: 5
# Date format: See http://strftime.net/
date_format: "%b %-d, %Y"
plugins:
- jekyll-feed
- jekyll-readme-index
- jekyll-seo-tag
- jekyll-sitemap
- jemoji
# # required for local builds with starefossen/github-pages
# - jekyll-github-metadata
# - jekyll-mentions
# - jekyll-redirect-from
# - jekyll-remote-theme
exclude:
- CNAME
- gitbook

Binary file not shown.

After

Width:  |  Height:  |  Size: 159 KiB

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

View File

@@ -4,13 +4,20 @@ description: Flagger is an Istio progressive delivery Kubernetes operator
# Introduction
[Flagger](https://github.com/stefanprodan/flagger) is a **Kubernetes** operator that automates the promotion of canary deployments using **Istio** routing for traffic shifting and **Prometheus** metrics for canary analysis.
[Flagger](https://github.com/stefanprodan/flagger) is a **Kubernetes** operator that automates the promotion of canary
deployments using **Istio** routing for traffic shifting and **Prometheus** metrics for canary analysis.
The canary analysis can be extended with webhooks for running integration tests,
load tests or any other custom validation.
Flagger implements a control loop that gradually shifts traffic to the canary while measuring key performance indicators like HTTP requests success rate, requests average duration and pods health. Based on the **KPIs** analysis a canary is promoted or aborted and the analysis result is published to **Slack**.
Flagger implements a control loop that gradually shifts traffic to the canary while measuring key performance
indicators like HTTP requests success rate, requests average duration and pods health.
Based on the **KPIs** analysis a canary is promoted or aborted and the analysis result is published to **Slack**.
![Flagger overview diagram](https://raw.githubusercontent.com/stefanprodan/flagger/master/docs/diagrams/flagger-canary-overview.png)
Flagger can be configured with Kubernetes custom resources \(canaries.flagger.app kind\) and is compatible with any CI/CD solutions made for Kubernetes. Since Flagger is declarative and reacts to Kubernetes events, it can be used in **GitOps** pipelines together with Weave Flux or JenkinsX.
Flagger can be configured with Kubernetes custom resources \(canaries.flagger.app kind\) and is compatible with
any CI/CD solutions made for Kubernetes. Since Flagger is declarative and reacts to Kubernetes events,
it can be used in **GitOps** pipelines together with Weave Flux or JenkinsX.
This project is sponsored by [Weaveworks](https://www.weave.works/)

View File

@@ -1,6 +1,8 @@
# How it works
[Flagger](https://github.com/stefanprodan/flagger) takes a Kubernetes deployment and optionally a horizontal pod autoscaler \(HPA\) and creates a series of objects \(Kubernetes deployments, ClusterIP services and Istio virtual services\) to drive the canary analysis and promotion.
[Flagger](https://github.com/stefanprodan/flagger) takes a Kubernetes deployment and optionally
a horizontal pod autoscaler \(HPA\) and creates a series of objects
\(Kubernetes deployments, ClusterIP services and Istio virtual services\) to drive the canary analysis and promotion.
![Flagger Canary Process](https://raw.githubusercontent.com/stefanprodan/flagger/master/docs/diagrams/flagger-canary-hpa.png)
@@ -95,37 +97,42 @@ the Istio Virtual Service. The container port from the target deployment should
![Flagger Canary Stages](https://raw.githubusercontent.com/stefanprodan/flagger/master/docs/diagrams/flagger-canary-steps.png)
A canary deployment is triggered by changes in any of the following objects:
* Deployment PodSpec (container image, command, ports, env, resources, etc)
* ConfigMaps mounted as volumes or mapped to environment variables
* Secrets mounted as volumes or mapped to environment variables
Gated canary promotion stages:
* scan for canary deployments
* creates the primary deployment if needed
* check Istio virtual service routes are mapped to primary and canary ClusterIP services
* check primary and canary deployments status
* halt advancement if a rolling update is underway
* halt advancement if pods are unhealthy
* increase canary traffic weight percentage from 0% to 5% \(step weight\)
* halt advancement if a rolling update is underway
* halt advancement if pods are unhealthy
* increase canary traffic weight percentage from 0% to 5% (step weight)
* call webhooks and check results
* check canary HTTP request success rate and latency
* halt advancement if any metric is under the specified threshold
* increment the failed checks counter
* halt advancement if any metric is under the specified threshold
* increment the failed checks counter
* check if the number of failed checks reached the threshold
* route all traffic to primary
* scale to zero the canary deployment and mark it as failed
* wait for the canary deployment to be updated \(revision bump\) and start over
* increase canary traffic weight by 5% \(step weight\) till it reaches 50% \(max weight\)
* halt advancement while canary request success rate is under the threshold
* halt advancement while canary request duration P99 is over the threshold
* halt advancement if the primary or canary deployment becomes unhealthy
* halt advancement while canary deployment is being scaled up/down by HPA
* route all traffic to primary
* scale to zero the canary deployment and mark it as failed
* wait for the canary deployment to be updated and start over
* increase canary traffic weight by 5% (step weight) till it reaches 50% (max weight)
* halt advancement while canary request success rate is under the threshold
* halt advancement while canary request duration P99 is over the threshold
* halt advancement if the primary or canary deployment becomes unhealthy
* halt advancement while canary deployment is being scaled up/down by HPA
* promote canary to primary
* copy canary deployment spec template over primary
* copy ConfigMaps and Secrets from canary to primary
* copy canary deployment spec template over primary
* wait for primary rolling update to finish
* halt advancement if pods are unhealthy
* halt advancement if pods are unhealthy
* route all traffic to primary
* scale to zero the canary deployment
* mark the canary deployment as finished
* wait for the canary deployment to be updated \(revision bump\) and start over
You can change the canary analysis _max weight_ and the _step weight_ percentage in the Flagger's custom resource.
* mark rollout as finished
* wait for the canary deployment to be updated and start over
### Canary Analysis
@@ -281,4 +288,78 @@ Response status codes:
On a non-2xx response Flagger will include the response body (if any) in the failed checks log and Kubernetes events.
### Load Testing
For workloads that are not receiving constant traffic Flagger can be configured with a webhook,
that when called, will start a load test for the target workload.
If the target workload doesn't receive any traffic during the canary analysis,
Flagger metric checks will fail with "no values found for metric istio_requests_total".
Flagger comes with a load testing service based on [rakyll/hey](https://github.com/rakyll/hey)
that generates traffic during analysis when configured as a webhook.
![Flagger Load Testing Webhook](https://raw.githubusercontent.com/stefanprodan/flagger/master/docs/diagrams/flagger-load-testing.png)
First you need to deploy the load test runner in a namespace with Istio sidecar injection enabled:
```bash
export REPO=https://raw.githubusercontent.com/stefanprodan/flagger/master
kubectl -n test apply -f ${REPO}/artifacts/loadtester/deployment.yaml
kubectl -n test apply -f ${REPO}/artifacts/loadtester/service.yaml
```
Or by using Helm:
```bash
helm repo add flagger https://flagger.app
helm upgrade -i flagger-loadtester flagger/loadtester \
--namepace=test \
--set cmd.logOutput=true \
--set cmd.timeout=1h
```
When deployed the load tester API will be available at `http://flagger-loadtester.test/`.
Now you can add webhooks to the canary analysis spec:
```yaml
webhooks:
- name: load-test-get
url: http://flagger-loadtester.test/
timeout: 5s
metadata:
cmd: "hey -z 1m -q 10 -c 2 http://podinfo.test:9898/"
- name: load-test-post
url: http://flagger-loadtester.test/
timeout: 5s
metadata:
cmd: "hey -z 1m -q 10 -c 2 -m POST -d '{test: 2}' http://podinfo.test:9898/echo"
```
When the canary analysis starts, Flagger will call the webhooks and the load tester will run the `hey` commands
in the background, if they are not already running. This will ensure that during the
analysis, the `podinfo.test` virtual service will receive a steady steam of GET and POST requests.
If your workload is exposed outside the mesh with the Istio Gateway and TLS you can point `hey` to the
public URL and use HTTP2.
```yaml
webhooks:
- name: load-test-get
url: http://flagger-loadtester.test/
timeout: 5s
metadata:
cmd: "hey -z 1m -q 10 -c 2 -h2 https://podinfo.example.com/"
```
The load tester can run arbitrary commands as long as the binary is present in the container image.
For example if you you want to replace `hey` with another CLI, you can create your own Docker image:
```dockerfile
FROM quay.io/stefanprodan/flagger-loadtester:<VER>
RUN curl -Lo /usr/local/bin/my-cli https://github.com/user/repo/releases/download/ver/my-cli \
&& chmod +x /usr/local/bin/my-cli
```

View File

@@ -6,7 +6,7 @@ If you are new to Istio you can follow this GKE guide
**Prerequisites**
* Kubernetes &gt;= 1.9
* Kubernetes &gt;= 1.11
* Istio &gt;= 1.0
* Prometheus &gt;= 2.6

View File

@@ -17,7 +17,14 @@ kubectl apply -f ${REPO}/artifacts/canaries/deployment.yaml
kubectl apply -f ${REPO}/artifacts/canaries/hpa.yaml
```
Create a canary custom resource \(replace example.com with your own domain\):
Deploy the load testing service to generate traffic during the canary analysis:
```bash
kubectl -n test apply -f ${REPO}/artifacts/loadtester/deployment.yaml
kubectl -n test apply -f ${REPO}/artifacts/loadtester/service.yaml
```
Create a canary custom resource (replace example.com with your own domain):
```yaml
apiVersion: flagger.app/v1alpha3
@@ -70,6 +77,13 @@ spec:
# milliseconds
threshold: 500
interval: 30s
# generate traffic during analysis
webhooks:
- name: load-test
url: http://flagger-loadtester.test/
timeout: 5s
metadata:
cmd: "hey -z 1m -q 10 -c 2 http://podinfo.test:9898/"
```
Save the above resource as podinfo-canary.yaml and then apply it:
@@ -99,7 +113,7 @@ Trigger a canary deployment by updating the container image:
```bash
kubectl -n test set image deployment/podinfo \
podinfod=quay.io/stefanprodan/podinfo:1.2.1
podinfod=quay.io/stefanprodan/podinfo:1.4.0
```
Flagger detects that the deployment revision changed and starts a new rollout:
@@ -108,9 +122,9 @@ Flagger detects that the deployment revision changed and starts a new rollout:
kubectl -n test describe canary/podinfo
Status:
Canary Revision: 19871136
Failed Checks: 0
State: finished
Canary Weight: 0
Failed Checks: 0
Phase: Succeeded
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
@@ -132,6 +146,19 @@ Events:
Normal Synced 5s flagger Promotion completed! Scaling down podinfo.test
```
**Note** that if you apply new changes to the deployment during the canary analysis, Flagger will restart the analysis.
You can monitor all canaries with:
```bash
watch kubectl get canaries --all-namespaces
NAMESPACE NAME STATUS WEIGHT LASTTRANSITIONTIME
test podinfo Progressing 15 2019-01-16T14:05:07Z
prod frontend Succeeded 0 2019-01-15T16:15:07Z
prod backend Failed 0 2019-01-14T17:05:07Z
```
During the canary analysis you can generate HTTP 500 errors and high latency to test if Flagger pauses the rollout.
Create a tester pod and exec into it:
@@ -156,15 +183,16 @@ Generate latency:
watch curl http://podinfo-canary:9898/delay/1
```
When the number of failed checks reaches the canary analysis threshold, the traffic is routed back to the primary, the canary is scaled to zero and the rollout is marked as failed.
When the number of failed checks reaches the canary analysis threshold, the traffic is routed back to the primary,
the canary is scaled to zero and the rollout is marked as failed.
```text
kubectl -n test describe canary/podinfo
Status:
Canary Revision: 16695041
Failed Checks: 10
State: failed
Canary Weight: 0
Failed Checks: 10
Phase: Failed
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
@@ -181,5 +209,3 @@ Events:
Warning Synced 1m flagger Canary failed! Scaling down podinfo.test
```
####

Binary file not shown.

View File

@@ -1,121 +0,0 @@
apiVersion: v1
entries:
flagger:
- apiVersion: v1
appVersion: 0.3.0
created: 2019-01-11T20:08:47.476526+02:00
description: Flagger is a Kubernetes operator that automates the promotion of
canary deployments using Istio routing for traffic shifting and Prometheus metrics
for canary analysis.
digest: 8baa478cc802f4e6b7593934483359b8f70ec34413ca3b8de3a692e347a9bda4
engine: gotpl
home: https://docs.flagger.app
icon: https://raw.githubusercontent.com/stefanprodan/flagger/master/docs/logo/flagger-icon.png
keywords:
- canary
- istio
- gitops
kubeVersion: '>=1.9.0-0'
maintainers:
- email: stefanprodan@users.noreply.github.com
name: stefanprodan
url: https://github.com/stefanprodan
name: flagger
sources:
- https://github.com/stefanprodan/flagger
urls:
- https://stefanprodan.github.io/flagger/flagger-0.3.0.tgz
version: 0.3.0
- apiVersion: v1
appVersion: 0.2.0
created: 2019-01-11T20:08:47.476127+02:00
description: Flagger is a Kubernetes operator that automates the promotion of
canary deployments using Istio routing for traffic shifting and Prometheus metrics
for canary analysis.
digest: 800b5fd1a0b2854ee8412b3170c36ecda3d382f209e18b475ee1d5e3c7fa2f83
engine: gotpl
home: https://flagger.app
icon: https://raw.githubusercontent.com/stefanprodan/flagger/master/docs/logo/flagger-icon.png
keywords:
- canary
- istio
- gitops
kubeVersion: '>=1.9.0-0'
maintainers:
- email: stefanprodan@users.noreply.github.com
name: stefanprodan
url: https://github.com/stefanprodan
name: flagger
sources:
- https://github.com/stefanprodan/flagger
urls:
- https://stefanprodan.github.io/flagger/flagger-0.2.0.tgz
version: 0.2.0
- apiVersion: v1
appVersion: 0.1.2
created: 2019-01-11T20:08:47.475257+02:00
description: Flagger is a Kubernetes operator that automates the promotion of
canary deployments using Istio routing for traffic shifting and Prometheus metrics
for canary analysis.
digest: 0029ef8dd20ebead3d84638eaa4b44d60b3e2bd953b4b7a1169963ce93a4e87c
engine: gotpl
home: https://flagger.app
icon: https://raw.githubusercontent.com/stefanprodan/flagger/master/docs/logo/flagger-icon.png
keywords:
- canary
- istio
- gitops
kubeVersion: '>=1.9.0-0'
maintainers:
- email: stefanprodan@users.noreply.github.com
name: stefanprodan
url: https://github.com/stefanprodan
name: flagger
sources:
- https://github.com/stefanprodan/flagger
urls:
- https://stefanprodan.github.io/flagger/flagger-0.1.2.tgz
version: 0.1.2
- apiVersion: v1
appVersion: 0.1.1
created: 2019-01-11T20:08:47.474547+02:00
description: Flagger is a Kubernetes operator that automates the promotion of
canary deployments using Istio routing for traffic shifting and Prometheus metrics
for canary analysis.
digest: 2bb8f72fcf63a5ba5ecbaa2ab0d0446f438ec93fbf3a598cd7de45e64d8f9628
home: https://github.com/stefanprodan/flagger
name: flagger
urls:
- https://stefanprodan.github.io/flagger/flagger-0.1.1.tgz
version: 0.1.1
- apiVersion: v1
appVersion: 0.1.0
created: 2019-01-11T20:08:47.473757+02:00
description: Flagger is a Kubernetes operator that automates the promotion of
canary deployments using Istio routing for traffic shifting and Prometheus metrics
for canary analysis.
digest: 03e05634149e13ddfddae6757266d65c271878a026c21c7d1429c16712bf3845
home: https://github.com/stefanprodan/flagger
name: flagger
urls:
- https://stefanprodan.github.io/flagger/flagger-0.1.0.tgz
version: 0.1.0
grafana:
- apiVersion: v1
appVersion: 5.4.2
created: 2019-01-11T20:08:47.477041+02:00
description: Grafana dashboards for monitoring Flagger canary deployments
digest: 1c929348357ea747405308125d9c7969cf743de5ab9e8adff6fa83943593b2f0
home: https://flagger.app
icon: https://raw.githubusercontent.com/stefanprodan/flagger/master/docs/logo/flagger-icon.png
maintainers:
- email: stefanprodan@users.noreply.github.com
name: stefanprodan
url: https://github.com/stefanprodan
name: grafana
sources:
- https://github.com/stefanprodan/flagger
urls:
- https://stefanprodan.github.io/flagger/grafana-0.1.0.tgz
version: 0.1.0
generated: 2019-01-11T20:08:47.472865+02:00

View File

@@ -56,7 +56,7 @@ type CanarySpec struct {
CanaryAnalysis CanaryAnalysis `json:"canaryAnalysis"`
// the maximum time in seconds for a canary deployment to make progress
// before it is considered to be failed. Defaults to 60s.
// before it is considered to be failed. Defaults to ten minutes.
ProgressDeadlineSeconds *int32 `json:"progressDeadlineSeconds,omitempty"`
}
@@ -70,21 +70,32 @@ type CanaryList struct {
Items []Canary `json:"items"`
}
// CanaryState used for status state op
type CanaryState string
// CanaryPhase is a label for the condition of a canary at the current time
type CanaryPhase string
const (
CanaryRunning CanaryState = "running"
CanaryFinished CanaryState = "finished"
CanaryFailed CanaryState = "failed"
CanaryInitialized CanaryState = "initialized"
// CanaryInitialized means the primary deployment, hpa and ClusterIP services
// have been created along with the Istio virtual service
CanaryInitialized CanaryPhase = "Initialized"
// CanaryProgressing means the canary analysis is underway
CanaryProgressing CanaryPhase = "Progressing"
// CanarySucceeded means the canary analysis has been successful
// and the canary deployment has been promoted
CanarySucceeded CanaryPhase = "Succeeded"
// CanaryFailed means the canary analysis failed
// and the canary deployment has been scaled to zero
CanaryFailed CanaryPhase = "Failed"
)
// CanaryStatus is used for state persistence (read-only)
type CanaryStatus struct {
State CanaryState `json:"state"`
CanaryRevision string `json:"canaryRevision"`
FailedChecks int `json:"failedChecks"`
Phase CanaryPhase `json:"phase"`
FailedChecks int `json:"failedChecks"`
CanaryWeight int `json:"canaryWeight"`
// +optional
TrackedConfigs *map[string]string `json:"trackedConfigs,omitempty"`
// +optional
LastAppliedSpec string `json:"lastAppliedSpec,omitempty"`
// +optional
LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty"`
}
@@ -125,9 +136,9 @@ type CanaryWebhook struct {
// CanaryWebhookPayload holds the deployment info and metadata sent to webhooks
type CanaryWebhookPayload struct {
Name string `json:"name"`
Namespace string `json:"namespace"`
Metadata *map[string]string `json:"metadata,omitempty"`
Name string `json:"name"`
Namespace string `json:"namespace"`
Metadata map[string]string `json:"metadata,omitempty"`
}
// GetProgressDeadlineSeconds returns the progress deadline (default 600s)
@@ -139,6 +150,7 @@ func (c *Canary) GetProgressDeadlineSeconds() int {
return ProgressDeadlineSeconds
}
// GetAnalysisInterval returns the canary analysis interval (default 60s)
func (c *Canary) GetAnalysisInterval() time.Duration {
if c.Spec.CanaryAnalysis.Interval == "" {
return AnalysisInterval

View File

@@ -188,6 +188,17 @@ func (in *CanarySpec) DeepCopy() *CanarySpec {
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CanaryStatus) DeepCopyInto(out *CanaryStatus) {
*out = *in
if in.TrackedConfigs != nil {
in, out := &in.TrackedConfigs, &out.TrackedConfigs
*out = new(map[string]string)
if **in != nil {
in, out := *in, *out
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
}
in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime)
return
}
@@ -234,13 +245,9 @@ func (in *CanaryWebhookPayload) DeepCopyInto(out *CanaryWebhookPayload) {
*out = *in
if in.Metadata != nil {
in, out := &in.Metadata, &out.Metadata
*out = new(map[string]string)
if **in != nil {
in, out := *in, *out
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
return

View File

@@ -74,6 +74,11 @@ func NewController(
kubeClient: kubeClient,
istioClient: istioClient,
flaggerClient: flaggerClient,
configTracker: ConfigTracker{
logger: logger,
kubeClient: kubeClient,
flaggerClient: flaggerClient,
},
}
router := CanaryRouter{
@@ -248,17 +253,17 @@ func checkCustomResourceType(obj interface{}, logger *zap.SugaredLogger) (flagge
}
func (c *Controller) recordEventInfof(r *flaggerv1.Canary, template string, args ...interface{}) {
c.logger.Infof(template, args...)
c.logger.With("canary", fmt.Sprintf("%s.%s", r.Name, r.Namespace)).Infof(template, args...)
c.eventRecorder.Event(r, corev1.EventTypeNormal, "Synced", fmt.Sprintf(template, args...))
}
func (c *Controller) recordEventErrorf(r *flaggerv1.Canary, template string, args ...interface{}) {
c.logger.Errorf(template, args...)
c.logger.With("canary", fmt.Sprintf("%s.%s", r.Name, r.Namespace)).Errorf(template, args...)
c.eventRecorder.Event(r, corev1.EventTypeWarning, "Synced", fmt.Sprintf(template, args...))
}
func (c *Controller) recordEventWarningf(r *flaggerv1.Canary, template string, args ...interface{}) {
c.logger.Infof(template, args...)
c.logger.With("canary", fmt.Sprintf("%s.%s", r.Name, r.Namespace)).Infof(template, args...)
c.eventRecorder.Event(r, corev1.EventTypeWarning, "Synced", fmt.Sprintf(template, args...))
}

View File

@@ -0,0 +1,523 @@
package controller
import (
istioclientset "github.com/knative/pkg/client/clientset/versioned"
fakeIstio "github.com/knative/pkg/client/clientset/versioned/fake"
"github.com/stefanprodan/flagger/pkg/apis/flagger/v1alpha3"
clientset "github.com/stefanprodan/flagger/pkg/client/clientset/versioned"
fakeFlagger "github.com/stefanprodan/flagger/pkg/client/clientset/versioned/fake"
informers "github.com/stefanprodan/flagger/pkg/client/informers/externalversions"
"github.com/stefanprodan/flagger/pkg/logging"
"go.uber.org/zap"
appsv1 "k8s.io/api/apps/v1"
hpav1 "k8s.io/api/autoscaling/v1"
hpav2 "k8s.io/api/autoscaling/v2beta1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/kubernetes/fake"
"k8s.io/client-go/tools/record"
"k8s.io/client-go/util/workqueue"
"sync"
"time"
)
var (
alwaysReady = func() bool { return true }
noResyncPeriodFunc = func() time.Duration { return 0 }
)
type Mocks struct {
canary *v1alpha3.Canary
kubeClient kubernetes.Interface
istioClient istioclientset.Interface
flaggerClient clientset.Interface
deployer CanaryDeployer
router CanaryRouter
observer CanaryObserver
ctrl *Controller
logger *zap.SugaredLogger
}
func SetupMocks() Mocks {
// init canary
canary := newTestCanary()
flaggerClient := fakeFlagger.NewSimpleClientset(canary)
// init kube clientset and register mock objects
kubeClient := fake.NewSimpleClientset(
newTestDeployment(),
newTestHPA(),
NewTestConfigMap(),
NewTestConfigMapEnv(),
NewTestConfigMapVol(),
NewTestSecret(),
NewTestSecretEnv(),
NewTestSecretVol(),
)
istioClient := fakeIstio.NewSimpleClientset()
logger, _ := logging.NewLogger("debug")
// init controller helpers
deployer := CanaryDeployer{
flaggerClient: flaggerClient,
kubeClient: kubeClient,
logger: logger,
configTracker: ConfigTracker{
logger: logger,
kubeClient: kubeClient,
flaggerClient: flaggerClient,
},
}
router := CanaryRouter{
flaggerClient: flaggerClient,
kubeClient: kubeClient,
istioClient: istioClient,
logger: logger,
}
observer := CanaryObserver{
metricsServer: "fake",
}
// init controller
flaggerInformerFactory := informers.NewSharedInformerFactory(flaggerClient, noResyncPeriodFunc())
flaggerInformer := flaggerInformerFactory.Flagger().V1alpha3().Canaries()
ctrl := &Controller{
kubeClient: kubeClient,
istioClient: istioClient,
flaggerClient: flaggerClient,
flaggerLister: flaggerInformer.Lister(),
flaggerSynced: flaggerInformer.Informer().HasSynced,
workqueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), controllerAgentName),
eventRecorder: &record.FakeRecorder{},
logger: logger,
canaries: new(sync.Map),
flaggerWindow: time.Second,
deployer: deployer,
router: router,
observer: observer,
recorder: NewCanaryRecorder(false),
}
ctrl.flaggerSynced = alwaysReady
return Mocks{
canary: canary,
observer: observer,
router: router,
deployer: deployer,
logger: logger,
flaggerClient: flaggerClient,
istioClient: istioClient,
kubeClient: kubeClient,
ctrl: ctrl,
}
}
func NewTestConfigMap() *corev1.ConfigMap {
return &corev1.ConfigMap{
TypeMeta: metav1.TypeMeta{APIVersion: corev1.SchemeGroupVersion.String()},
ObjectMeta: metav1.ObjectMeta{
Namespace: "default",
Name: "podinfo-config-env",
},
Data: map[string]string{
"color": "red",
},
}
}
func NewTestConfigMapV2() *corev1.ConfigMap {
return &corev1.ConfigMap{
TypeMeta: metav1.TypeMeta{APIVersion: corev1.SchemeGroupVersion.String()},
ObjectMeta: metav1.ObjectMeta{
Namespace: "default",
Name: "podinfo-config-env",
},
Data: map[string]string{
"color": "blue",
"output": "console",
},
}
}
func NewTestConfigMapEnv() *corev1.ConfigMap {
return &corev1.ConfigMap{
TypeMeta: metav1.TypeMeta{APIVersion: corev1.SchemeGroupVersion.String()},
ObjectMeta: metav1.ObjectMeta{
Namespace: "default",
Name: "podinfo-config-all-env",
},
Data: map[string]string{
"color": "red",
},
}
}
func NewTestConfigMapVol() *corev1.ConfigMap {
return &corev1.ConfigMap{
TypeMeta: metav1.TypeMeta{APIVersion: corev1.SchemeGroupVersion.String()},
ObjectMeta: metav1.ObjectMeta{
Namespace: "default",
Name: "podinfo-config-vol",
},
Data: map[string]string{
"color": "red",
},
}
}
func NewTestSecret() *corev1.Secret {
return &corev1.Secret{
TypeMeta: metav1.TypeMeta{APIVersion: corev1.SchemeGroupVersion.String()},
ObjectMeta: metav1.ObjectMeta{
Namespace: "default",
Name: "podinfo-secret-env",
},
Type: corev1.SecretTypeOpaque,
Data: map[string][]byte{
"apiKey": []byte("test"),
},
}
}
func NewTestSecretV2() *corev1.Secret {
return &corev1.Secret{
TypeMeta: metav1.TypeMeta{APIVersion: corev1.SchemeGroupVersion.String()},
ObjectMeta: metav1.ObjectMeta{
Namespace: "default",
Name: "podinfo-secret-env",
},
Type: corev1.SecretTypeOpaque,
Data: map[string][]byte{
"apiKey": []byte("test2"),
},
}
}
func NewTestSecretEnv() *corev1.Secret {
return &corev1.Secret{
TypeMeta: metav1.TypeMeta{APIVersion: corev1.SchemeGroupVersion.String()},
ObjectMeta: metav1.ObjectMeta{
Namespace: "default",
Name: "podinfo-secret-all-env",
},
Type: corev1.SecretTypeOpaque,
Data: map[string][]byte{
"apiKey": []byte("test"),
},
}
}
func NewTestSecretVol() *corev1.Secret {
return &corev1.Secret{
TypeMeta: metav1.TypeMeta{APIVersion: corev1.SchemeGroupVersion.String()},
ObjectMeta: metav1.ObjectMeta{
Namespace: "default",
Name: "podinfo-secret-vol",
},
Type: corev1.SecretTypeOpaque,
Data: map[string][]byte{
"apiKey": []byte("test"),
},
}
}
func newTestCanary() *v1alpha3.Canary {
cd := &v1alpha3.Canary{
TypeMeta: metav1.TypeMeta{APIVersion: v1alpha3.SchemeGroupVersion.String()},
ObjectMeta: metav1.ObjectMeta{
Namespace: "default",
Name: "podinfo",
},
Spec: v1alpha3.CanarySpec{
TargetRef: hpav1.CrossVersionObjectReference{
Name: "podinfo",
APIVersion: "apps/v1",
Kind: "Deployment",
},
AutoscalerRef: &hpav1.CrossVersionObjectReference{
Name: "podinfo",
APIVersion: "autoscaling/v2beta1",
Kind: "HorizontalPodAutoscaler",
}, Service: v1alpha3.CanaryService{
Port: 9898,
}, CanaryAnalysis: v1alpha3.CanaryAnalysis{
Threshold: 10,
StepWeight: 10,
MaxWeight: 50,
Metrics: []v1alpha3.CanaryMetric{
{
Name: "istio_requests_total",
Threshold: 99,
Interval: "1m",
},
{
Name: "istio_request_duration_seconds_bucket",
Threshold: 500,
Interval: "1m",
},
},
},
},
}
return cd
}
func newTestDeployment() *appsv1.Deployment {
d := &appsv1.Deployment{
TypeMeta: metav1.TypeMeta{APIVersion: appsv1.SchemeGroupVersion.String()},
ObjectMeta: metav1.ObjectMeta{
Namespace: "default",
Name: "podinfo",
},
Spec: appsv1.DeploymentSpec{
Selector: &metav1.LabelSelector{
MatchLabels: map[string]string{
"app": "podinfo",
},
},
Template: corev1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
"app": "podinfo",
},
},
Spec: corev1.PodSpec{
Containers: []corev1.Container{
{
Name: "podinfo",
Image: "quay.io/stefanprodan/podinfo:1.2.0",
Command: []string{
"./podinfo",
"--port=9898",
},
Args: nil,
WorkingDir: "",
Ports: []corev1.ContainerPort{
{
Name: "http",
ContainerPort: 9898,
Protocol: corev1.ProtocolTCP,
},
},
Env: []corev1.EnvVar{
{
Name: "PODINFO_UI_COLOR",
ValueFrom: &corev1.EnvVarSource{
ConfigMapKeyRef: &corev1.ConfigMapKeySelector{
LocalObjectReference: corev1.LocalObjectReference{
Name: "podinfo-config-env",
},
Key: "color",
},
},
},
{
Name: "API_KEY",
ValueFrom: &corev1.EnvVarSource{
SecretKeyRef: &corev1.SecretKeySelector{
LocalObjectReference: corev1.LocalObjectReference{
Name: "podinfo-secret-env",
},
Key: "apiKey",
},
},
},
},
EnvFrom: []corev1.EnvFromSource{
{
ConfigMapRef: &corev1.ConfigMapEnvSource{
LocalObjectReference: corev1.LocalObjectReference{
Name: "podinfo-config-all-env",
},
},
},
{
SecretRef: &corev1.SecretEnvSource{
LocalObjectReference: corev1.LocalObjectReference{
Name: "podinfo-secret-all-env",
},
},
},
},
VolumeMounts: []corev1.VolumeMount{
{
Name: "config",
MountPath: "/etc/podinfo/config",
ReadOnly: true,
},
{
Name: "secret",
MountPath: "/etc/podinfo/secret",
ReadOnly: true,
},
},
},
},
Volumes: []corev1.Volume{
{
Name: "config",
VolumeSource: corev1.VolumeSource{
ConfigMap: &corev1.ConfigMapVolumeSource{
LocalObjectReference: corev1.LocalObjectReference{
Name: "podinfo-config-vol",
},
},
},
},
{
Name: "secret",
VolumeSource: corev1.VolumeSource{
Secret: &corev1.SecretVolumeSource{
SecretName: "podinfo-secret-vol",
},
},
},
},
},
},
},
}
return d
}
func newTestDeploymentV2() *appsv1.Deployment {
d := &appsv1.Deployment{
TypeMeta: metav1.TypeMeta{APIVersion: appsv1.SchemeGroupVersion.String()},
ObjectMeta: metav1.ObjectMeta{
Namespace: "default",
Name: "podinfo",
},
Spec: appsv1.DeploymentSpec{
Selector: &metav1.LabelSelector{
MatchLabels: map[string]string{
"app": "podinfo",
},
},
Template: corev1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
"app": "podinfo",
},
},
Spec: corev1.PodSpec{
Containers: []corev1.Container{
{
Name: "podinfo",
Image: "quay.io/stefanprodan/podinfo:1.2.1",
Ports: []corev1.ContainerPort{
{
Name: "http",
ContainerPort: 9898,
Protocol: corev1.ProtocolTCP,
},
},
Command: []string{
"./podinfo",
"--port=9898",
},
Env: []corev1.EnvVar{
{
Name: "PODINFO_UI_COLOR",
ValueFrom: &corev1.EnvVarSource{
ConfigMapKeyRef: &corev1.ConfigMapKeySelector{
LocalObjectReference: corev1.LocalObjectReference{
Name: "podinfo-config-env",
},
Key: "color",
},
},
},
{
Name: "API_KEY",
ValueFrom: &corev1.EnvVarSource{
SecretKeyRef: &corev1.SecretKeySelector{
LocalObjectReference: corev1.LocalObjectReference{
Name: "podinfo-secret-env",
},
Key: "apiKey",
},
},
},
},
EnvFrom: []corev1.EnvFromSource{
{
ConfigMapRef: &corev1.ConfigMapEnvSource{
LocalObjectReference: corev1.LocalObjectReference{
Name: "podinfo-config-all-env",
},
},
},
},
VolumeMounts: []corev1.VolumeMount{
{
Name: "config",
MountPath: "/etc/podinfo/config",
ReadOnly: true,
},
{
Name: "secret",
MountPath: "/etc/podinfo/secret",
ReadOnly: true,
},
},
},
},
Volumes: []corev1.Volume{
{
Name: "config",
VolumeSource: corev1.VolumeSource{
ConfigMap: &corev1.ConfigMapVolumeSource{
LocalObjectReference: corev1.LocalObjectReference{
Name: "podinfo-config-vol",
},
},
},
},
{
Name: "secret",
VolumeSource: corev1.VolumeSource{
Secret: &corev1.SecretVolumeSource{
SecretName: "podinfo-secret-vol",
},
},
},
},
},
},
},
}
return d
}
func newTestHPA() *hpav2.HorizontalPodAutoscaler {
h := &hpav2.HorizontalPodAutoscaler{
TypeMeta: metav1.TypeMeta{APIVersion: hpav2.SchemeGroupVersion.String()},
ObjectMeta: metav1.ObjectMeta{
Namespace: "default",
Name: "podinfo",
},
Spec: hpav2.HorizontalPodAutoscalerSpec{
ScaleTargetRef: hpav2.CrossVersionObjectReference{
Name: "podinfo",
APIVersion: "apps/v1",
Kind: "Deployment",
},
Metrics: []hpav2.MetricSpec{
{
Type: "Resource",
Resource: &hpav2.ResourceMetricSource{
Name: "cpu",
TargetAverageUtilization: int32p(99),
},
},
},
},
}
return h
}

View File

@@ -1,9 +1,11 @@
package controller
import (
"crypto/rand"
"encoding/base64"
"encoding/json"
"fmt"
"io"
"time"
"github.com/google/go-cmp/cmp"
@@ -28,19 +30,22 @@ type CanaryDeployer struct {
istioClient istioclientset.Interface
flaggerClient clientset.Interface
logger *zap.SugaredLogger
configTracker ConfigTracker
}
// Promote copies the pod spec from canary to primary
// Promote copies the pod spec, secrets and config maps from canary to primary
func (c *CanaryDeployer) Promote(cd *flaggerv1.Canary) error {
canary, err := c.kubeClient.AppsV1().Deployments(cd.Namespace).Get(cd.Spec.TargetRef.Name, metav1.GetOptions{})
targetName := cd.Spec.TargetRef.Name
primaryName := fmt.Sprintf("%s-primary", targetName)
canary, err := c.kubeClient.AppsV1().Deployments(cd.Namespace).Get(targetName, metav1.GetOptions{})
if err != nil {
if errors.IsNotFound(err) {
return fmt.Errorf("deployment %s.%s not found", cd.Spec.TargetRef.Name, cd.Namespace)
return fmt.Errorf("deployment %s.%s not found", targetName, cd.Namespace)
}
return fmt.Errorf("deployment %s.%s query error %v", cd.Spec.TargetRef.Name, cd.Namespace, err)
return fmt.Errorf("deployment %s.%s query error %v", targetName, cd.Namespace, err)
}
primaryName := fmt.Sprintf("%s-primary", cd.Spec.TargetRef.Name)
primary, err := c.kubeClient.AppsV1().Deployments(cd.Namespace).Get(primaryName, metav1.GetOptions{})
if err != nil {
if errors.IsNotFound(err) {
@@ -49,15 +54,35 @@ func (c *CanaryDeployer) Promote(cd *flaggerv1.Canary) error {
return fmt.Errorf("deployment %s.%s query error %v", primaryName, cd.Namespace, err)
}
primary.Spec.ProgressDeadlineSeconds = canary.Spec.ProgressDeadlineSeconds
primary.Spec.MinReadySeconds = canary.Spec.MinReadySeconds
primary.Spec.RevisionHistoryLimit = canary.Spec.RevisionHistoryLimit
primary.Spec.Strategy = canary.Spec.Strategy
primary.Spec.Template.Spec = canary.Spec.Template.Spec
_, err = c.kubeClient.AppsV1().Deployments(primary.Namespace).Update(primary)
// promote secrets and config maps
configRefs, err := c.configTracker.GetTargetConfigs(cd)
if err != nil {
return fmt.Errorf("updating template spec %s.%s failed: %v", primary.GetName(), primary.Namespace, err)
return err
}
if err := c.configTracker.CreatePrimaryConfigs(cd, configRefs); err != nil {
return err
}
primaryCopy := primary.DeepCopy()
primaryCopy.Spec.ProgressDeadlineSeconds = canary.Spec.ProgressDeadlineSeconds
primaryCopy.Spec.MinReadySeconds = canary.Spec.MinReadySeconds
primaryCopy.Spec.RevisionHistoryLimit = canary.Spec.RevisionHistoryLimit
primaryCopy.Spec.Strategy = canary.Spec.Strategy
// update spec with primary secrets and config maps
primaryCopy.Spec.Template.Spec = c.configTracker.ApplyPrimaryConfigs(canary.Spec.Template.Spec, configRefs)
// update pod annotations to ensure a rolling update
annotations, err := c.makeAnnotations(canary.Spec.Template.Annotations)
if err != nil {
return err
}
primaryCopy.Spec.Template.Annotations = annotations
_, err = c.kubeClient.AppsV1().Deployments(cd.Namespace).Update(primaryCopy)
if err != nil {
return fmt.Errorf("updating deployment %s.%s template spec failed: %v",
primaryCopy.GetName(), primaryCopy.Namespace, err)
}
return nil
@@ -78,15 +103,11 @@ func (c *CanaryDeployer) IsPrimaryReady(cd *flaggerv1.Canary) (bool, error) {
retriable, err := c.isDeploymentReady(primary, cd.GetProgressDeadlineSeconds())
if err != nil {
if retriable {
return retriable, fmt.Errorf("Halt %s.%s advancement %s", cd.Name, cd.Namespace, err.Error())
} else {
return retriable, err
}
return retriable, fmt.Errorf("Halt advancement %s.%s %s", primaryName, cd.Namespace, err.Error())
}
if primary.Spec.Replicas == int32p(0) {
return true, fmt.Errorf("halt %s.%s advancement primary deployment is scaled to zero",
return true, fmt.Errorf("Halt %s.%s advancement primary deployment is scaled to zero",
cd.Name, cd.Namespace)
}
return true, nil
@@ -96,18 +117,19 @@ func (c *CanaryDeployer) IsPrimaryReady(cd *flaggerv1.Canary) (bool, error) {
// the deployment is in the middle of a rolling update or if the pods are unhealthy
// it will return a non retriable error if the rolling update is stuck
func (c *CanaryDeployer) IsCanaryReady(cd *flaggerv1.Canary) (bool, error) {
canary, err := c.kubeClient.AppsV1().Deployments(cd.Namespace).Get(cd.Spec.TargetRef.Name, metav1.GetOptions{})
targetName := cd.Spec.TargetRef.Name
canary, err := c.kubeClient.AppsV1().Deployments(cd.Namespace).Get(targetName, metav1.GetOptions{})
if err != nil {
if errors.IsNotFound(err) {
return true, fmt.Errorf("deployment %s.%s not found", cd.Spec.TargetRef.Name, cd.Namespace)
return true, fmt.Errorf("deployment %s.%s not found", targetName, cd.Namespace)
}
return true, fmt.Errorf("deployment %s.%s query error %v", cd.Spec.TargetRef.Name, cd.Namespace, err)
return true, fmt.Errorf("deployment %s.%s query error %v", targetName, cd.Namespace, err)
}
retriable, err := c.isDeploymentReady(canary, cd.GetProgressDeadlineSeconds())
if err != nil {
if retriable {
return retriable, fmt.Errorf("Halt %s.%s advancement %s", cd.Name, cd.Namespace, err.Error())
return retriable, fmt.Errorf("Halt advancement %s.%s %s", targetName, cd.Namespace, err.Error())
} else {
return retriable, fmt.Errorf("deployment does not have minimum availability for more than %vs",
cd.GetProgressDeadlineSeconds())
@@ -119,22 +141,23 @@ func (c *CanaryDeployer) IsCanaryReady(cd *flaggerv1.Canary) (bool, error) {
// IsNewSpec returns true if the canary deployment pod spec has changed
func (c *CanaryDeployer) IsNewSpec(cd *flaggerv1.Canary) (bool, error) {
canary, err := c.kubeClient.AppsV1().Deployments(cd.Namespace).Get(cd.Spec.TargetRef.Name, metav1.GetOptions{})
targetName := cd.Spec.TargetRef.Name
canary, err := c.kubeClient.AppsV1().Deployments(cd.Namespace).Get(targetName, metav1.GetOptions{})
if err != nil {
if errors.IsNotFound(err) {
return false, fmt.Errorf("deployment %s.%s not found", cd.Spec.TargetRef.Name, cd.Namespace)
return false, fmt.Errorf("deployment %s.%s not found", targetName, cd.Namespace)
}
return false, fmt.Errorf("deployment %s.%s query error %v", cd.Spec.TargetRef.Name, cd.Namespace, err)
return false, fmt.Errorf("deployment %s.%s query error %v", targetName, cd.Namespace, err)
}
if cd.Status.CanaryRevision == "" {
if cd.Status.LastAppliedSpec == "" {
return true, nil
}
newSpec := &canary.Spec.Template.Spec
oldSpecJson, err := base64.StdEncoding.DecodeString(cd.Status.CanaryRevision)
oldSpecJson, err := base64.StdEncoding.DecodeString(cd.Status.LastAppliedSpec)
if err != nil {
return false, err
return false, fmt.Errorf("%s.%s decode error %v", cd.Name, cd.Namespace, err)
}
oldSpec := &corev1.PodSpec{}
err = json.Unmarshal(oldSpecJson, oldSpec)
@@ -150,31 +173,75 @@ func (c *CanaryDeployer) IsNewSpec(cd *flaggerv1.Canary) (bool, error) {
return false, nil
}
// SetFailedChecks updates the canary failed checks counter
func (c *CanaryDeployer) SetFailedChecks(cd *flaggerv1.Canary, val int) error {
cd.Status.FailedChecks = val
cd.Status.LastTransitionTime = metav1.Now()
cd, err := c.flaggerClient.FlaggerV1alpha3().Canaries(cd.Namespace).Update(cd)
// ShouldAdvance determines if the canary analysis can proceed
func (c *CanaryDeployer) ShouldAdvance(cd *flaggerv1.Canary) (bool, error) {
if cd.Status.LastAppliedSpec == "" || cd.Status.Phase == flaggerv1.CanaryProgressing {
return true, nil
}
newDep, err := c.IsNewSpec(cd)
if err != nil {
return fmt.Errorf("deployment %s.%s update error %v", cd.Spec.TargetRef.Name, cd.Namespace, err)
return false, err
}
if newDep {
return newDep, nil
}
newCfg, err := c.configTracker.HasConfigChanged(cd)
if err != nil {
return false, err
}
return newCfg, nil
}
// SetStatusFailedChecks updates the canary failed checks counter
func (c *CanaryDeployer) SetStatusFailedChecks(cd *flaggerv1.Canary, val int) error {
cdCopy := cd.DeepCopy()
cdCopy.Status.FailedChecks = val
cdCopy.Status.LastTransitionTime = metav1.Now()
cd, err := c.flaggerClient.FlaggerV1alpha3().Canaries(cd.Namespace).UpdateStatus(cdCopy)
if err != nil {
return fmt.Errorf("canary %s.%s status update error %v", cdCopy.Name, cdCopy.Namespace, err)
}
return nil
}
// SetState updates the canary status state
func (c *CanaryDeployer) SetState(cd *flaggerv1.Canary, state flaggerv1.CanaryState) error {
cd.Status.State = state
cd.Status.LastTransitionTime = metav1.Now()
cd, err := c.flaggerClient.FlaggerV1alpha3().Canaries(cd.Namespace).Update(cd)
// SetStatusWeight updates the canary status weight value
func (c *CanaryDeployer) SetStatusWeight(cd *flaggerv1.Canary, val int) error {
cdCopy := cd.DeepCopy()
cdCopy.Status.CanaryWeight = val
cdCopy.Status.LastTransitionTime = metav1.Now()
cd, err := c.flaggerClient.FlaggerV1alpha3().Canaries(cd.Namespace).UpdateStatus(cdCopy)
if err != nil {
return fmt.Errorf("deployment %s.%s update error %v", cd.Spec.TargetRef.Name, cd.Namespace, err)
return fmt.Errorf("canary %s.%s status update error %v", cdCopy.Name, cdCopy.Namespace, err)
}
return nil
}
// SetStatusPhase updates the canary status phase
func (c *CanaryDeployer) SetStatusPhase(cd *flaggerv1.Canary, phase flaggerv1.CanaryPhase) error {
cdCopy := cd.DeepCopy()
cdCopy.Status.Phase = phase
cdCopy.Status.LastTransitionTime = metav1.Now()
if phase != flaggerv1.CanaryProgressing {
cdCopy.Status.CanaryWeight = 0
}
cd, err := c.flaggerClient.FlaggerV1alpha3().Canaries(cd.Namespace).UpdateStatus(cdCopy)
if err != nil {
return fmt.Errorf("canary %s.%s status update error %v", cdCopy.Name, cdCopy.Namespace, err)
}
return nil
}
// SyncStatus encodes the canary pod spec and updates the canary status
func (c *CanaryDeployer) SyncStatus(cd *flaggerv1.Canary, status flaggerv1.CanaryStatus) error {
canary, err := c.kubeClient.AppsV1().Deployments(cd.Namespace).Get(cd.Spec.TargetRef.Name, metav1.GetOptions{})
dep, err := c.kubeClient.AppsV1().Deployments(cd.Namespace).Get(cd.Spec.TargetRef.Name, metav1.GetOptions{})
if err != nil {
if errors.IsNotFound(err) {
return fmt.Errorf("deployment %s.%s not found", cd.Spec.TargetRef.Name, cd.Namespace)
@@ -182,36 +249,48 @@ func (c *CanaryDeployer) SyncStatus(cd *flaggerv1.Canary, status flaggerv1.Canar
return fmt.Errorf("deployment %s.%s query error %v", cd.Spec.TargetRef.Name, cd.Namespace, err)
}
specJson, err := json.Marshal(canary.Spec.Template.Spec)
specJson, err := json.Marshal(dep.Spec.Template.Spec)
if err != nil {
return fmt.Errorf("deployment %s.%s marshal error %v", cd.Spec.TargetRef.Name, cd.Namespace, err)
}
specEnc := base64.StdEncoding.EncodeToString(specJson)
cd.Status.State = status.State
cd.Status.FailedChecks = status.FailedChecks
cd.Status.CanaryRevision = specEnc
cd.Status.LastTransitionTime = metav1.Now()
_, err = c.flaggerClient.FlaggerV1alpha3().Canaries(cd.Namespace).Update(cd)
configs, err := c.configTracker.GetConfigRefs(cd)
if err != nil {
return fmt.Errorf("deployment %s.%s update error %v", cd.Spec.TargetRef.Name, cd.Namespace, err)
return fmt.Errorf("configs query error %v", err)
}
cdCopy := cd.DeepCopy()
cdCopy.Status.Phase = status.Phase
cdCopy.Status.CanaryWeight = status.CanaryWeight
cdCopy.Status.FailedChecks = status.FailedChecks
cdCopy.Status.LastAppliedSpec = base64.StdEncoding.EncodeToString(specJson)
cdCopy.Status.LastTransitionTime = metav1.Now()
cdCopy.Status.TrackedConfigs = configs
cd, err = c.flaggerClient.FlaggerV1alpha3().Canaries(cd.Namespace).UpdateStatus(cdCopy)
if err != nil {
return fmt.Errorf("canary %s.%s status update error %v", cdCopy.Name, cdCopy.Namespace, err)
}
return nil
}
// Scale sets the canary deployment replicas
func (c *CanaryDeployer) Scale(cd *flaggerv1.Canary, replicas int32) error {
canary, err := c.kubeClient.AppsV1().Deployments(cd.Namespace).Get(cd.Spec.TargetRef.Name, metav1.GetOptions{})
targetName := cd.Spec.TargetRef.Name
dep, err := c.kubeClient.AppsV1().Deployments(cd.Namespace).Get(targetName, metav1.GetOptions{})
if err != nil {
if errors.IsNotFound(err) {
return fmt.Errorf("deployment %s.%s not found", cd.Spec.TargetRef.Name, cd.Namespace)
return fmt.Errorf("deployment %s.%s not found", targetName, cd.Namespace)
}
return fmt.Errorf("deployment %s.%s query error %v", cd.Spec.TargetRef.Name, cd.Namespace, err)
return fmt.Errorf("deployment %s.%s query error %v", targetName, cd.Namespace, err)
}
canary.Spec.Replicas = int32p(replicas)
canary, err = c.kubeClient.AppsV1().Deployments(canary.Namespace).Update(canary)
depCopy := dep.DeepCopy()
depCopy.Spec.Replicas = int32p(replicas)
_, err = c.kubeClient.AppsV1().Deployments(dep.Namespace).Update(depCopy)
if err != nil {
return fmt.Errorf("scaling %s.%s to %v failed: %v", canary.GetName(), canary.Namespace, replicas, err)
return fmt.Errorf("scaling %s.%s to %v failed: %v", depCopy.GetName(), depCopy.Namespace, replicas, err)
}
return nil
}
@@ -224,8 +303,8 @@ func (c *CanaryDeployer) Sync(cd *flaggerv1.Canary) error {
return fmt.Errorf("creating deployment %s.%s failed: %v", primaryName, cd.Namespace, err)
}
if cd.Status.State == "" {
c.logger.Infof("Scaling down %s.%s", cd.Spec.TargetRef.Name, cd.Namespace)
if cd.Status.Phase == "" {
c.logger.With("canary", fmt.Sprintf("%s.%s", cd.Name, cd.Namespace)).Infof("Scaling down %s.%s", cd.Spec.TargetRef.Name, cd.Namespace)
if err := c.Scale(cd, 0); err != nil {
return err
}
@@ -240,24 +319,38 @@ func (c *CanaryDeployer) Sync(cd *flaggerv1.Canary) error {
}
func (c *CanaryDeployer) createPrimaryDeployment(cd *flaggerv1.Canary) error {
canaryName := cd.Spec.TargetRef.Name
targetName := cd.Spec.TargetRef.Name
primaryName := fmt.Sprintf("%s-primary", cd.Spec.TargetRef.Name)
canaryDep, err := c.kubeClient.AppsV1().Deployments(cd.Namespace).Get(canaryName, metav1.GetOptions{})
canaryDep, err := c.kubeClient.AppsV1().Deployments(cd.Namespace).Get(targetName, metav1.GetOptions{})
if err != nil {
if errors.IsNotFound(err) {
return fmt.Errorf("deployment %s.%s not found, retrying", canaryName, cd.Namespace)
return fmt.Errorf("deployment %s.%s not found, retrying", targetName, cd.Namespace)
}
return err
}
primaryDep, err := c.kubeClient.AppsV1().Deployments(cd.Namespace).Get(primaryName, metav1.GetOptions{})
if errors.IsNotFound(err) {
// create primary secrets and config maps
configRefs, err := c.configTracker.GetTargetConfigs(cd)
if err != nil {
return err
}
if err := c.configTracker.CreatePrimaryConfigs(cd, configRefs); err != nil {
return err
}
annotations, err := c.makeAnnotations(canaryDep.Spec.Template.Annotations)
if err != nil {
return err
}
// create primary deployment
primaryDep = &appsv1.Deployment{
ObjectMeta: metav1.ObjectMeta{
Name: primaryName,
Annotations: canaryDep.Annotations,
Namespace: cd.Namespace,
Name: primaryName,
Labels: canaryDep.Labels,
Namespace: cd.Namespace,
OwnerReferences: []metav1.OwnerReference{
*metav1.NewControllerRef(cd, schema.GroupVersionKind{
Group: flaggerv1.SchemeGroupVersion.Group,
@@ -280,9 +373,10 @@ func (c *CanaryDeployer) createPrimaryDeployment(cd *flaggerv1.Canary) error {
Template: corev1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{"app": primaryName},
Annotations: canaryDep.Spec.Template.Annotations,
Annotations: annotations,
},
Spec: canaryDep.Spec.Template.Spec,
// update spec with the primary secrets and config maps
Spec: c.configTracker.ApplyPrimaryConfigs(canaryDep.Spec.Template.Spec, configRefs),
},
},
}
@@ -292,7 +386,7 @@ func (c *CanaryDeployer) createPrimaryDeployment(cd *flaggerv1.Canary) error {
return err
}
c.logger.Infof("Deployment %s.%s created", primaryDep.GetName(), cd.Namespace)
c.logger.With("canary", fmt.Sprintf("%s.%s", cd.Name, cd.Namespace)).Infof("Deployment %s.%s created", primaryDep.GetName(), cd.Namespace)
}
return nil
@@ -316,6 +410,7 @@ func (c *CanaryDeployer) createPrimaryHpa(cd *flaggerv1.Canary) error {
ObjectMeta: metav1.ObjectMeta{
Name: primaryHpaName,
Namespace: cd.Namespace,
Labels: hpa.Labels,
OwnerReferences: []metav1.OwnerReference{
*metav1.NewControllerRef(cd, schema.GroupVersionKind{
Group: flaggerv1.SchemeGroupVersion.Group,
@@ -340,7 +435,7 @@ func (c *CanaryDeployer) createPrimaryHpa(cd *flaggerv1.Canary) error {
if err != nil {
return err
}
c.logger.Infof("HorizontalPodAutoscaler %s.%s created", primaryHpa.GetName(), cd.Namespace)
c.logger.With("canary", fmt.Sprintf("%s.%s", cd.Name, cd.Namespace)).Infof("HorizontalPodAutoscaler %s.%s created", primaryHpa.GetName(), cd.Namespace)
}
return nil
@@ -395,3 +490,26 @@ func (c *CanaryDeployer) getDeploymentCondition(
}
return nil
}
// makeAnnotations appends an unique ID to annotations map
func (c *CanaryDeployer) makeAnnotations(annotations map[string]string) (map[string]string, error) {
idKey := "flagger-id"
res := make(map[string]string)
uuid := make([]byte, 16)
n, err := io.ReadFull(rand.Reader, uuid)
if n != len(uuid) || err != nil {
return res, err
}
uuid[8] = uuid[8]&^0xc0 | 0x80
uuid[6] = uuid[6]&^0xf0 | 0x40
id := fmt.Sprintf("%x-%x-%x-%x-%x", uuid[0:4], uuid[4:6], uuid[6:8], uuid[8:10], uuid[10:])
for k, v := range annotations {
if k != idKey {
res[k] = v
}
}
res[idKey] = id
return res, nil
}

View File

@@ -4,200 +4,24 @@ import (
"testing"
"github.com/stefanprodan/flagger/pkg/apis/flagger/v1alpha3"
fakeFlagger "github.com/stefanprodan/flagger/pkg/client/clientset/versioned/fake"
"github.com/stefanprodan/flagger/pkg/logging"
appsv1 "k8s.io/api/apps/v1"
hpav1 "k8s.io/api/autoscaling/v1"
hpav2 "k8s.io/api/autoscaling/v2beta1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes/fake"
)
func newTestCanary() *v1alpha3.Canary {
cd := &v1alpha3.Canary{
TypeMeta: metav1.TypeMeta{APIVersion: v1alpha3.SchemeGroupVersion.String()},
ObjectMeta: metav1.ObjectMeta{
Namespace: "default",
Name: "podinfo",
},
Spec: v1alpha3.CanarySpec{
TargetRef: hpav1.CrossVersionObjectReference{
Name: "podinfo",
APIVersion: "apps/v1",
Kind: "Deployment",
},
AutoscalerRef: &hpav1.CrossVersionObjectReference{
Name: "podinfo",
APIVersion: "autoscaling/v2beta1",
Kind: "HorizontalPodAutoscaler",
}, Service: v1alpha3.CanaryService{
Port: 9898,
}, CanaryAnalysis: v1alpha3.CanaryAnalysis{
Threshold: 10,
StepWeight: 10,
MaxWeight: 50,
Metrics: []v1alpha3.CanaryMetric{
{
Name: "istio_requests_total",
Threshold: 99,
Interval: "1m",
},
{
Name: "istio_request_duration_seconds_bucket",
Threshold: 500,
Interval: "1m",
},
},
},
},
}
return cd
}
func newTestDeployment() *appsv1.Deployment {
d := &appsv1.Deployment{
TypeMeta: metav1.TypeMeta{APIVersion: appsv1.SchemeGroupVersion.String()},
ObjectMeta: metav1.ObjectMeta{
Namespace: "default",
Name: "podinfo",
},
Spec: appsv1.DeploymentSpec{
Selector: &metav1.LabelSelector{
MatchLabels: map[string]string{
"app": "podinfo",
},
},
Template: corev1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
"app": "podinfo",
},
},
Spec: corev1.PodSpec{
Containers: []corev1.Container{
{
Name: "podinfo",
Image: "quay.io/stefanprodan/podinfo:1.2.0",
Ports: []corev1.ContainerPort{
{
Name: "http",
ContainerPort: 9898,
Protocol: corev1.ProtocolTCP,
},
},
Command: []string{
"./podinfo",
"--port=9898",
},
},
},
},
},
},
}
return d
}
func newTestDeploymentUpdated() *appsv1.Deployment {
d := &appsv1.Deployment{
TypeMeta: metav1.TypeMeta{APIVersion: appsv1.SchemeGroupVersion.String()},
ObjectMeta: metav1.ObjectMeta{
Namespace: "default",
Name: "podinfo",
},
Spec: appsv1.DeploymentSpec{
Selector: &metav1.LabelSelector{
MatchLabels: map[string]string{
"app": "podinfo",
},
},
Template: corev1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
"app": "podinfo",
},
},
Spec: corev1.PodSpec{
Containers: []corev1.Container{
{
Name: "podinfo",
Image: "quay.io/stefanprodan/podinfo:1.2.1",
Ports: []corev1.ContainerPort{
{
Name: "http",
ContainerPort: 9898,
Protocol: corev1.ProtocolTCP,
},
},
Command: []string{
"./podinfo",
"--port=9898",
},
},
},
},
},
},
}
return d
}
func newTestHPA() *hpav2.HorizontalPodAutoscaler {
h := &hpav2.HorizontalPodAutoscaler{
TypeMeta: metav1.TypeMeta{APIVersion: hpav2.SchemeGroupVersion.String()},
ObjectMeta: metav1.ObjectMeta{
Namespace: "default",
Name: "podinfo",
},
Spec: hpav2.HorizontalPodAutoscalerSpec{
ScaleTargetRef: hpav2.CrossVersionObjectReference{
Name: "podinfo",
APIVersion: "apps/v1",
Kind: "Deployment",
},
Metrics: []hpav2.MetricSpec{
{
Type: "Resource",
Resource: &hpav2.ResourceMetricSource{
Name: "cpu",
TargetAverageUtilization: int32p(99),
},
},
},
},
}
return h
}
func TestCanaryDeployer_Sync(t *testing.T) {
canary := newTestCanary()
mocks := SetupMocks()
err := mocks.deployer.Sync(mocks.canary)
if err != nil {
t.Fatal(err.Error())
}
depPrimary, err := mocks.kubeClient.AppsV1().Deployments("default").Get("podinfo-primary", metav1.GetOptions{})
if err != nil {
t.Fatal(err.Error())
}
dep := newTestDeployment()
hpa := newTestHPA()
flaggerClient := fakeFlagger.NewSimpleClientset(canary)
kubeClient := fake.NewSimpleClientset(dep, hpa)
logger, _ := logging.NewLogger("debug")
deployer := &CanaryDeployer{
flaggerClient: flaggerClient,
kubeClient: kubeClient,
logger: logger,
}
err := deployer.Sync(canary)
if err != nil {
t.Fatal(err.Error())
}
depPrimary, err := kubeClient.AppsV1().Deployments("default").Get("podinfo-primary", metav1.GetOptions{})
if err != nil {
t.Fatal(err.Error())
}
configMap := NewTestConfigMap()
secret := NewTestSecret()
primaryImage := depPrimary.Spec.Template.Spec.Containers[0].Image
sourceImage := dep.Spec.Template.Spec.Containers[0].Image
@@ -205,7 +29,7 @@ func TestCanaryDeployer_Sync(t *testing.T) {
t.Errorf("Got image %s wanted %s", primaryImage, sourceImage)
}
hpaPrimary, err := kubeClient.AutoscalingV2beta1().HorizontalPodAutoscalers("default").Get("podinfo-primary", metav1.GetOptions{})
hpaPrimary, err := mocks.kubeClient.AutoscalingV2beta1().HorizontalPodAutoscalers("default").Get("podinfo-primary", metav1.GetOptions{})
if err != nil {
t.Fatal(err.Error())
}
@@ -213,37 +37,76 @@ func TestCanaryDeployer_Sync(t *testing.T) {
if hpaPrimary.Spec.ScaleTargetRef.Name != depPrimary.Name {
t.Errorf("Got HPA target %s wanted %s", hpaPrimary.Spec.ScaleTargetRef.Name, depPrimary.Name)
}
configPrimary, err := mocks.kubeClient.CoreV1().ConfigMaps("default").Get("podinfo-config-env-primary", metav1.GetOptions{})
if err != nil {
t.Fatal(err.Error())
}
if configPrimary.Data["color"] != configMap.Data["color"] {
t.Errorf("Got ConfigMap color %s wanted %s", configPrimary.Data["color"], configMap.Data["color"])
}
configPrimaryEnv, err := mocks.kubeClient.CoreV1().ConfigMaps("default").Get("podinfo-config-all-env-primary", metav1.GetOptions{})
if err != nil {
t.Fatal(err.Error())
}
if configPrimaryEnv.Data["color"] != configMap.Data["color"] {
t.Errorf("Got ConfigMap %s wanted %s", configPrimaryEnv.Data["a"], configMap.Data["color"])
}
configPrimaryVol, err := mocks.kubeClient.CoreV1().ConfigMaps("default").Get("podinfo-config-vol-primary", metav1.GetOptions{})
if err != nil {
t.Fatal(err.Error())
}
if configPrimaryVol.Data["color"] != configMap.Data["color"] {
t.Errorf("Got ConfigMap color %s wanted %s", configPrimary.Data["color"], configMap.Data["color"])
}
secretPrimary, err := mocks.kubeClient.CoreV1().Secrets("default").Get("podinfo-secret-env-primary", metav1.GetOptions{})
if err != nil {
t.Fatal(err.Error())
}
if string(secretPrimary.Data["apiKey"]) != string(secret.Data["apiKey"]) {
t.Errorf("Got primary secret %s wanted %s", secretPrimary.Data["apiKey"], secret.Data["apiKey"])
}
secretPrimaryEnv, err := mocks.kubeClient.CoreV1().Secrets("default").Get("podinfo-secret-all-env-primary", metav1.GetOptions{})
if err != nil {
t.Fatal(err.Error())
}
if string(secretPrimaryEnv.Data["apiKey"]) != string(secret.Data["apiKey"]) {
t.Errorf("Got primary secret %s wanted %s", secretPrimary.Data["apiKey"], secret.Data["apiKey"])
}
secretPrimaryVol, err := mocks.kubeClient.CoreV1().Secrets("default").Get("podinfo-secret-vol-primary", metav1.GetOptions{})
if err != nil {
t.Fatal(err.Error())
}
if string(secretPrimaryVol.Data["apiKey"]) != string(secret.Data["apiKey"]) {
t.Errorf("Got primary secret %s wanted %s", secretPrimary.Data["apiKey"], secret.Data["apiKey"])
}
}
func TestCanaryDeployer_IsNewSpec(t *testing.T) {
canary := newTestCanary()
dep := newTestDeployment()
dep2 := newTestDeploymentUpdated()
hpa := newTestHPA()
flaggerClient := fakeFlagger.NewSimpleClientset(canary)
kubeClient := fake.NewSimpleClientset(dep, hpa)
logger, _ := logging.NewLogger("debug")
deployer := &CanaryDeployer{
flaggerClient: flaggerClient,
kubeClient: kubeClient,
logger: logger,
}
err := deployer.Sync(canary)
mocks := SetupMocks()
err := mocks.deployer.Sync(mocks.canary)
if err != nil {
t.Fatal(err.Error())
}
_, err = kubeClient.AppsV1().Deployments("default").Update(dep2)
dep2 := newTestDeploymentV2()
_, err = mocks.kubeClient.AppsV1().Deployments("default").Update(dep2)
if err != nil {
t.Fatal(err.Error())
}
isNew, err := deployer.IsNewSpec(canary)
isNew, err := mocks.deployer.IsNewSpec(mocks.canary)
if err != nil {
t.Fatal(err.Error())
}
@@ -254,39 +117,30 @@ func TestCanaryDeployer_IsNewSpec(t *testing.T) {
}
func TestCanaryDeployer_Promote(t *testing.T) {
canary := newTestCanary()
dep := newTestDeployment()
dep2 := newTestDeploymentUpdated()
hpa := newTestHPA()
flaggerClient := fakeFlagger.NewSimpleClientset(canary)
kubeClient := fake.NewSimpleClientset(dep, hpa)
logger, _ := logging.NewLogger("debug")
deployer := &CanaryDeployer{
flaggerClient: flaggerClient,
kubeClient: kubeClient,
logger: logger,
}
err := deployer.Sync(canary)
mocks := SetupMocks()
err := mocks.deployer.Sync(mocks.canary)
if err != nil {
t.Fatal(err.Error())
}
_, err = kubeClient.AppsV1().Deployments("default").Update(dep2)
dep2 := newTestDeploymentV2()
_, err = mocks.kubeClient.AppsV1().Deployments("default").Update(dep2)
if err != nil {
t.Fatal(err.Error())
}
err = deployer.Promote(canary)
config2 := NewTestConfigMapV2()
_, err = mocks.kubeClient.CoreV1().ConfigMaps("default").Update(config2)
if err != nil {
t.Fatal(err.Error())
}
depPrimary, err := kubeClient.AppsV1().Deployments("default").Get("podinfo-primary", metav1.GetOptions{})
err = mocks.deployer.Promote(mocks.canary)
if err != nil {
t.Fatal(err.Error())
}
depPrimary, err := mocks.kubeClient.AppsV1().Deployments("default").Get("podinfo-primary", metav1.GetOptions{})
if err != nil {
t.Fatal(err.Error())
}
@@ -296,67 +150,48 @@ func TestCanaryDeployer_Promote(t *testing.T) {
if primaryImage != sourceImage {
t.Errorf("Got image %s wanted %s", primaryImage, sourceImage)
}
configPrimary, err := mocks.kubeClient.CoreV1().ConfigMaps("default").Get("podinfo-config-env-primary", metav1.GetOptions{})
if err != nil {
t.Fatal(err.Error())
}
if configPrimary.Data["color"] != config2.Data["color"] {
t.Errorf("Got primary ConfigMap color %s wanted %s", configPrimary.Data["color"], config2.Data["color"])
}
}
func TestCanaryDeployer_IsReady(t *testing.T) {
canary := newTestCanary()
dep := newTestDeployment()
hpa := newTestHPA()
flaggerClient := fakeFlagger.NewSimpleClientset(canary)
kubeClient := fake.NewSimpleClientset(dep, hpa)
logger, _ := logging.NewLogger("debug")
deployer := &CanaryDeployer{
flaggerClient: flaggerClient,
kubeClient: kubeClient,
logger: logger,
}
err := deployer.Sync(canary)
mocks := SetupMocks()
err := mocks.deployer.Sync(mocks.canary)
if err != nil {
t.Fatal(err.Error())
}
_, err = deployer.IsPrimaryReady(canary)
_, err = mocks.deployer.IsPrimaryReady(mocks.canary)
if err != nil {
t.Fatal(err.Error())
}
_, err = deployer.IsCanaryReady(canary)
_, err = mocks.deployer.IsCanaryReady(mocks.canary)
if err != nil {
t.Fatal(err.Error())
}
}
func TestCanaryDeployer_SetFailedChecks(t *testing.T) {
canary := newTestCanary()
dep := newTestDeployment()
hpa := newTestHPA()
flaggerClient := fakeFlagger.NewSimpleClientset(canary)
kubeClient := fake.NewSimpleClientset(dep, hpa)
logger, _ := logging.NewLogger("debug")
deployer := &CanaryDeployer{
flaggerClient: flaggerClient,
kubeClient: kubeClient,
logger: logger,
}
err := deployer.Sync(canary)
mocks := SetupMocks()
err := mocks.deployer.Sync(mocks.canary)
if err != nil {
t.Fatal(err.Error())
}
err = deployer.SetFailedChecks(canary, 1)
err = mocks.deployer.SetStatusFailedChecks(mocks.canary, 1)
if err != nil {
t.Fatal(err.Error())
}
res, err := flaggerClient.FlaggerV1alpha3().Canaries("default").Get("podinfo", metav1.GetOptions{})
res, err := mocks.flaggerClient.FlaggerV1alpha3().Canaries("default").Get("podinfo", metav1.GetOptions{})
if err != nil {
t.Fatal(err.Error())
}
@@ -367,109 +202,76 @@ func TestCanaryDeployer_SetFailedChecks(t *testing.T) {
}
func TestCanaryDeployer_SetState(t *testing.T) {
canary := newTestCanary()
dep := newTestDeployment()
hpa := newTestHPA()
flaggerClient := fakeFlagger.NewSimpleClientset(canary)
kubeClient := fake.NewSimpleClientset(dep, hpa)
logger, _ := logging.NewLogger("debug")
deployer := &CanaryDeployer{
flaggerClient: flaggerClient,
kubeClient: kubeClient,
logger: logger,
}
err := deployer.Sync(canary)
mocks := SetupMocks()
err := mocks.deployer.Sync(mocks.canary)
if err != nil {
t.Fatal(err.Error())
}
err = deployer.SetState(canary, v1alpha3.CanaryRunning)
err = mocks.deployer.SetStatusPhase(mocks.canary, v1alpha3.CanaryProgressing)
if err != nil {
t.Fatal(err.Error())
}
res, err := flaggerClient.FlaggerV1alpha3().Canaries("default").Get("podinfo", metav1.GetOptions{})
res, err := mocks.flaggerClient.FlaggerV1alpha3().Canaries("default").Get("podinfo", metav1.GetOptions{})
if err != nil {
t.Fatal(err.Error())
}
if res.Status.State != v1alpha3.CanaryRunning {
t.Errorf("Got %v wanted %v", res.Status.State, v1alpha3.CanaryRunning)
if res.Status.Phase != v1alpha3.CanaryProgressing {
t.Errorf("Got %v wanted %v", res.Status.Phase, v1alpha3.CanaryProgressing)
}
}
func TestCanaryDeployer_SyncStatus(t *testing.T) {
canary := newTestCanary()
dep := newTestDeployment()
hpa := newTestHPA()
flaggerClient := fakeFlagger.NewSimpleClientset(canary)
kubeClient := fake.NewSimpleClientset(dep, hpa)
logger, _ := logging.NewLogger("debug")
deployer := &CanaryDeployer{
flaggerClient: flaggerClient,
kubeClient: kubeClient,
logger: logger,
}
err := deployer.Sync(canary)
mocks := SetupMocks()
err := mocks.deployer.Sync(mocks.canary)
if err != nil {
t.Fatal(err.Error())
}
status := v1alpha3.CanaryStatus{
State: v1alpha3.CanaryRunning,
Phase: v1alpha3.CanaryProgressing,
FailedChecks: 2,
}
err = deployer.SyncStatus(canary, status)
err = mocks.deployer.SyncStatus(mocks.canary, status)
if err != nil {
t.Fatal(err.Error())
}
res, err := flaggerClient.FlaggerV1alpha3().Canaries("default").Get("podinfo", metav1.GetOptions{})
res, err := mocks.flaggerClient.FlaggerV1alpha3().Canaries("default").Get("podinfo", metav1.GetOptions{})
if err != nil {
t.Fatal(err.Error())
}
if res.Status.State != status.State {
t.Errorf("Got state %v wanted %v", res.Status.State, status.State)
if res.Status.Phase != status.Phase {
t.Errorf("Got state %v wanted %v", res.Status.Phase, status.Phase)
}
if res.Status.FailedChecks != status.FailedChecks {
t.Errorf("Got failed checks %v wanted %v", res.Status.FailedChecks, status.FailedChecks)
}
if res.Status.TrackedConfigs == nil {
t.Fatalf("Status tracking configs are empty")
}
configs := *res.Status.TrackedConfigs
secret := NewTestSecret()
if _, exists := configs["secret/"+secret.GetName()]; !exists {
t.Errorf("Secret %s not found in status", secret.GetName())
}
}
func TestCanaryDeployer_Scale(t *testing.T) {
canary := newTestCanary()
dep := newTestDeployment()
hpa := newTestHPA()
flaggerClient := fakeFlagger.NewSimpleClientset(canary)
kubeClient := fake.NewSimpleClientset(dep, hpa)
logger, _ := logging.NewLogger("debug")
deployer := &CanaryDeployer{
flaggerClient: flaggerClient,
kubeClient: kubeClient,
logger: logger,
}
err := deployer.Sync(canary)
mocks := SetupMocks()
err := mocks.deployer.Sync(mocks.canary)
if err != nil {
t.Fatal(err.Error())
}
err = deployer.Scale(canary, 2)
err = mocks.deployer.Scale(mocks.canary, 2)
c, err := kubeClient.AppsV1().Deployments("default").Get("podinfo", metav1.GetOptions{})
c, err := mocks.kubeClient.AppsV1().Deployments("default").Get("podinfo", metav1.GetOptions{})
if err != nil {
t.Fatal(err.Error())
}

View File

@@ -6,7 +6,8 @@ import "time"
type CanaryJob struct {
Name string
Namespace string
function func(name string, namespace string)
SkipTests bool
function func(name string, namespace string, skipTests bool)
done chan bool
ticker *time.Ticker
}
@@ -15,11 +16,11 @@ type CanaryJob struct {
func (j CanaryJob) Start() {
go func() {
// run the infra bootstrap on job creation
j.function(j.Name, j.Namespace)
j.function(j.Name, j.Namespace, j.SkipTests)
for {
select {
case <-j.ticker.C:
j.function(j.Name, j.Namespace)
j.function(j.Name, j.Namespace, j.SkipTests)
case <-j.done:
return
}

View File

@@ -72,8 +72,8 @@ func (cr *CanaryRecorder) SetTotal(namespace string, total int) {
// SetStatus sets the last known canary analysis status
func (cr *CanaryRecorder) SetStatus(cd *flaggerv1.Canary) {
status := 1
switch cd.Status.State {
case flaggerv1.CanaryRunning:
switch cd.Status.Phase {
case flaggerv1.CanaryProgressing:
status = 0
case flaggerv1.CanaryFailed:
status = 2

View File

@@ -42,13 +42,13 @@ func (c *CanaryRouter) Sync(cd *flaggerv1.Canary) error {
}
func (c *CanaryRouter) createServices(cd *flaggerv1.Canary) error {
canaryName := cd.Spec.TargetRef.Name
primaryName := fmt.Sprintf("%s-primary", canaryName)
canaryService, err := c.kubeClient.CoreV1().Services(cd.Namespace).Get(canaryName, metav1.GetOptions{})
targetName := cd.Spec.TargetRef.Name
primaryName := fmt.Sprintf("%s-primary", targetName)
canaryService, err := c.kubeClient.CoreV1().Services(cd.Namespace).Get(targetName, metav1.GetOptions{})
if errors.IsNotFound(err) {
canaryService = &corev1.Service{
ObjectMeta: metav1.ObjectMeta{
Name: canaryName,
Name: targetName,
Namespace: cd.Namespace,
OwnerReferences: []metav1.OwnerReference{
*metav1.NewControllerRef(cd, schema.GroupVersionKind{
@@ -60,7 +60,7 @@ func (c *CanaryRouter) createServices(cd *flaggerv1.Canary) error {
},
Spec: corev1.ServiceSpec{
Type: corev1.ServiceTypeClusterIP,
Selector: map[string]string{"app": canaryName},
Selector: map[string]string{"app": targetName},
Ports: []corev1.ServicePort{
{
Name: "http",
@@ -79,7 +79,7 @@ func (c *CanaryRouter) createServices(cd *flaggerv1.Canary) error {
if err != nil {
return err
}
c.logger.Infof("Service %s.%s created", canaryService.GetName(), cd.Namespace)
c.logger.With("canary", fmt.Sprintf("%s.%s", cd.Name, cd.Namespace)).Infof("Service %s.%s created", canaryService.GetName(), cd.Namespace)
}
canaryTestServiceName := fmt.Sprintf("%s-canary", cd.Spec.TargetRef.Name)
@@ -99,7 +99,7 @@ func (c *CanaryRouter) createServices(cd *flaggerv1.Canary) error {
},
Spec: corev1.ServiceSpec{
Type: corev1.ServiceTypeClusterIP,
Selector: map[string]string{"app": canaryName},
Selector: map[string]string{"app": targetName},
Ports: []corev1.ServicePort{
{
Name: "http",
@@ -118,7 +118,7 @@ func (c *CanaryRouter) createServices(cd *flaggerv1.Canary) error {
if err != nil {
return err
}
c.logger.Infof("Service %s.%s created", canaryTestService.GetName(), cd.Namespace)
c.logger.With("canary", fmt.Sprintf("%s.%s", cd.Name, cd.Namespace)).Infof("Service %s.%s created", canaryTestService.GetName(), cd.Namespace)
}
primaryService, err := c.kubeClient.CoreV1().Services(cd.Namespace).Get(primaryName, metav1.GetOptions{})
@@ -157,23 +157,23 @@ func (c *CanaryRouter) createServices(cd *flaggerv1.Canary) error {
return err
}
c.logger.Infof("Service %s.%s created", primaryService.GetName(), cd.Namespace)
c.logger.With("canary", fmt.Sprintf("%s.%s", cd.Name, cd.Namespace)).Infof("Service %s.%s created", primaryService.GetName(), cd.Namespace)
}
return nil
}
func (c *CanaryRouter) createVirtualService(cd *flaggerv1.Canary) error {
canaryName := cd.Name
primaryName := fmt.Sprintf("%s-primary", canaryName)
hosts := append(cd.Spec.Service.Hosts, canaryName)
targetName := cd.Spec.TargetRef.Name
primaryName := fmt.Sprintf("%s-primary", targetName)
hosts := append(cd.Spec.Service.Hosts, targetName)
gateways := append(cd.Spec.Service.Gateways, "mesh")
virtualService, err := c.istioClient.NetworkingV1alpha3().VirtualServices(cd.Namespace).Get(canaryName, metav1.GetOptions{})
virtualService, err := c.istioClient.NetworkingV1alpha3().VirtualServices(cd.Namespace).Get(targetName, metav1.GetOptions{})
if errors.IsNotFound(err) {
c.logger.Debugf("VirtualService %s.%s not found", canaryName, cd.Namespace)
c.logger.Debugf("VirtualService %s.%s not found", targetName, cd.Namespace)
virtualService = &istiov1alpha3.VirtualService{
ObjectMeta: metav1.ObjectMeta{
Name: cd.Name,
Name: targetName,
Namespace: cd.Namespace,
OwnerReferences: []metav1.OwnerReference{
*metav1.NewControllerRef(cd, schema.GroupVersionKind{
@@ -200,7 +200,7 @@ func (c *CanaryRouter) createVirtualService(cd *flaggerv1.Canary) error {
},
{
Destination: istiov1alpha3.Destination{
Host: canaryName,
Host: targetName,
Port: istiov1alpha3.PortSelector{
Number: uint32(cd.Spec.Service.Port),
},
@@ -216,9 +216,9 @@ func (c *CanaryRouter) createVirtualService(cd *flaggerv1.Canary) error {
c.logger.Debugf("Creating VirtualService %s.%s", virtualService.GetName(), cd.Namespace)
_, err = c.istioClient.NetworkingV1alpha3().VirtualServices(cd.Namespace).Create(virtualService)
if err != nil {
return fmt.Errorf("VirtualService %s.%s create error %v", cd.Name, cd.Namespace, err)
return fmt.Errorf("VirtualService %s.%s create error %v", targetName, cd.Namespace, err)
}
c.logger.Infof("VirtualService %s.%s created", virtualService.GetName(), cd.Namespace)
c.logger.With("canary", fmt.Sprintf("%s.%s", cd.Name, cd.Namespace)).Infof("VirtualService %s.%s created", virtualService.GetName(), cd.Namespace)
}
return nil
@@ -230,23 +230,24 @@ func (c *CanaryRouter) GetRoutes(cd *flaggerv1.Canary) (
canary istiov1alpha3.DestinationWeight,
err error,
) {
targetName := cd.Spec.TargetRef.Name
vs := &istiov1alpha3.VirtualService{}
vs, err = c.istioClient.NetworkingV1alpha3().VirtualServices(cd.Namespace).Get(cd.Name, v1.GetOptions{})
vs, err = c.istioClient.NetworkingV1alpha3().VirtualServices(cd.Namespace).Get(targetName, v1.GetOptions{})
if err != nil {
if errors.IsNotFound(err) {
err = fmt.Errorf("VirtualService %s.%s not found", cd.Name, cd.Namespace)
err = fmt.Errorf("VirtualService %s.%s not found", targetName, cd.Namespace)
return
}
err = fmt.Errorf("VirtualService %s.%s query error %v", cd.Name, cd.Namespace, err)
err = fmt.Errorf("VirtualService %s.%s query error %v", targetName, cd.Namespace, err)
return
}
for _, http := range vs.Spec.Http {
for _, route := range http.Route {
if route.Destination.Host == fmt.Sprintf("%s-primary", cd.Spec.TargetRef.Name) {
if route.Destination.Host == fmt.Sprintf("%s-primary", targetName) {
primary = route
}
if route.Destination.Host == cd.Spec.TargetRef.Name {
if route.Destination.Host == targetName {
canary = route
}
}
@@ -254,7 +255,7 @@ func (c *CanaryRouter) GetRoutes(cd *flaggerv1.Canary) (
if primary.Weight == 0 && canary.Weight == 0 {
err = fmt.Errorf("VirtualService %s.%s does not contain routes for %s and %s",
cd.Name, cd.Namespace, fmt.Sprintf("%s-primary", cd.Spec.TargetRef.Name), cd.Spec.TargetRef.Name)
targetName, cd.Namespace, fmt.Sprintf("%s-primary", targetName), targetName)
}
return
@@ -266,23 +267,26 @@ func (c *CanaryRouter) SetRoutes(
primary istiov1alpha3.DestinationWeight,
canary istiov1alpha3.DestinationWeight,
) error {
vs, err := c.istioClient.NetworkingV1alpha3().VirtualServices(cd.Namespace).Get(cd.Name, v1.GetOptions{})
targetName := cd.Spec.TargetRef.Name
vs, err := c.istioClient.NetworkingV1alpha3().VirtualServices(cd.Namespace).Get(targetName, v1.GetOptions{})
if err != nil {
if errors.IsNotFound(err) {
return fmt.Errorf("VirtualService %s.%s not found", cd.Name, cd.Namespace)
return fmt.Errorf("VirtualService %s.%s not found", targetName, cd.Namespace)
}
return fmt.Errorf("VirtualService %s.%s query error %v", cd.Name, cd.Namespace, err)
return fmt.Errorf("VirtualService %s.%s query error %v", targetName, cd.Namespace, err)
}
vs.Spec.Http = []istiov1alpha3.HTTPRoute{
vsCopy := vs.DeepCopy()
vsCopy.Spec.Http = []istiov1alpha3.HTTPRoute{
{
Route: []istiov1alpha3.DestinationWeight{primary, canary},
},
}
vs, err = c.istioClient.NetworkingV1alpha3().VirtualServices(cd.Namespace).Update(vs)
vs, err = c.istioClient.NetworkingV1alpha3().VirtualServices(cd.Namespace).Update(vsCopy)
if err != nil {
return fmt.Errorf("VirtualService %s.%s update failed: %v", cd.Name, cd.Namespace, err)
return fmt.Errorf("VirtualService %s.%s update failed: %v", targetName, cd.Namespace, err)
}
return nil

View File

@@ -5,37 +5,17 @@ import (
"testing"
istiov1alpha3 "github.com/knative/pkg/apis/istio/v1alpha3"
fakeIstio "github.com/knative/pkg/client/clientset/versioned/fake"
fakeFlagger "github.com/stefanprodan/flagger/pkg/client/clientset/versioned/fake"
"github.com/stefanprodan/flagger/pkg/logging"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes/fake"
)
func TestCanaryRouter_Sync(t *testing.T) {
canary := newTestCanary()
dep := newTestDeployment()
hpa := newTestHPA()
flaggerClient := fakeFlagger.NewSimpleClientset(canary)
kubeClient := fake.NewSimpleClientset(dep, hpa)
istioClient := fakeIstio.NewSimpleClientset()
logger, _ := logging.NewLogger("debug")
router := &CanaryRouter{
flaggerClient: flaggerClient,
kubeClient: kubeClient,
istioClient: istioClient,
logger: logger,
}
err := router.Sync(canary)
mocks := SetupMocks()
err := mocks.router.Sync(mocks.canary)
if err != nil {
t.Fatal(err.Error())
}
canarySvc, err := kubeClient.CoreV1().Services("default").Get("podinfo", metav1.GetOptions{})
canarySvc, err := mocks.kubeClient.CoreV1().Services("default").Get("podinfo", metav1.GetOptions{})
if err != nil {
t.Fatal(err.Error())
}
@@ -48,7 +28,7 @@ func TestCanaryRouter_Sync(t *testing.T) {
t.Errorf("Got svc port %v wanted %v", canarySvc.Spec.Ports[0].Port, 9898)
}
primarySvc, err := kubeClient.CoreV1().Services("default").Get("podinfo-primary", metav1.GetOptions{})
primarySvc, err := mocks.kubeClient.CoreV1().Services("default").Get("podinfo-primary", metav1.GetOptions{})
if err != nil {
t.Fatal(err.Error())
}
@@ -61,7 +41,7 @@ func TestCanaryRouter_Sync(t *testing.T) {
t.Errorf("Got primary svc port %v wanted %v", primarySvc.Spec.Ports[0].Port, 9898)
}
vs, err := istioClient.NetworkingV1alpha3().VirtualServices("default").Get("podinfo", metav1.GetOptions{})
vs, err := mocks.istioClient.NetworkingV1alpha3().VirtualServices("default").Get("podinfo", metav1.GetOptions{})
if err != nil {
t.Fatal(err.Error())
}
@@ -76,29 +56,13 @@ func TestCanaryRouter_Sync(t *testing.T) {
}
func TestCanaryRouter_GetRoutes(t *testing.T) {
canary := newTestCanary()
dep := newTestDeployment()
hpa := newTestHPA()
flaggerClient := fakeFlagger.NewSimpleClientset(canary)
kubeClient := fake.NewSimpleClientset(dep, hpa)
istioClient := fakeIstio.NewSimpleClientset()
logger, _ := logging.NewLogger("debug")
router := &CanaryRouter{
flaggerClient: flaggerClient,
kubeClient: kubeClient,
istioClient: istioClient,
logger: logger,
}
err := router.Sync(canary)
mocks := SetupMocks()
err := mocks.router.Sync(mocks.canary)
if err != nil {
t.Fatal(err.Error())
}
p, c, err := router.GetRoutes(canary)
p, c, err := mocks.router.GetRoutes(mocks.canary)
if err != nil {
t.Fatal(err.Error())
}
@@ -113,29 +77,13 @@ func TestCanaryRouter_GetRoutes(t *testing.T) {
}
func TestCanaryRouter_SetRoutes(t *testing.T) {
canary := newTestCanary()
dep := newTestDeployment()
hpa := newTestHPA()
flaggerClient := fakeFlagger.NewSimpleClientset(canary)
kubeClient := fake.NewSimpleClientset(dep, hpa)
istioClient := fakeIstio.NewSimpleClientset()
logger, _ := logging.NewLogger("debug")
router := &CanaryRouter{
flaggerClient: flaggerClient,
kubeClient: kubeClient,
istioClient: istioClient,
logger: logger,
}
err := router.Sync(canary)
mocks := SetupMocks()
err := mocks.router.Sync(mocks.canary)
if err != nil {
t.Fatal(err.Error())
}
p, c, err := router.GetRoutes(canary)
p, c, err := mocks.router.GetRoutes(mocks.canary)
if err != nil {
t.Fatal(err.Error())
}
@@ -143,12 +91,12 @@ func TestCanaryRouter_SetRoutes(t *testing.T) {
p.Weight = 50
c.Weight = 50
err = router.SetRoutes(canary, p, c)
err = mocks.router.SetRoutes(mocks.canary, p, c)
if err != nil {
t.Fatal(err.Error())
}
vs, err := istioClient.NetworkingV1alpha3().VirtualServices("default").Get("podinfo", metav1.GetOptions{})
vs, err := mocks.istioClient.NetworkingV1alpha3().VirtualServices("default").Get("podinfo", metav1.GetOptions{})
if err != nil {
t.Fatal(err.Error())
}
@@ -158,10 +106,10 @@ func TestCanaryRouter_SetRoutes(t *testing.T) {
for _, http := range vs.Spec.Http {
for _, route := range http.Route {
if route.Destination.Host == fmt.Sprintf("%s-primary", canary.Spec.TargetRef.Name) {
if route.Destination.Host == fmt.Sprintf("%s-primary", mocks.canary.Spec.TargetRef.Name) {
pRoute = route
}
if route.Destination.Host == canary.Spec.TargetRef.Name {
if route.Destination.Host == mocks.canary.Spec.TargetRef.Name {
cRoute = route
}
}

View File

@@ -2,6 +2,7 @@ package controller
import (
"fmt"
"strings"
"time"
flaggerv1 "github.com/stefanprodan/flagger/pkg/apis/flagger/v1alpha3"
@@ -12,7 +13,7 @@ import (
// for new canaries new jobs are created and started
// for the removed canaries the jobs are stopped and deleted
func (c *Controller) scheduleCanaries() {
current := make(map[string]bool)
current := make(map[string]string)
stats := make(map[string]int)
c.canaries.Range(func(key interface{}, value interface{}) bool {
@@ -20,7 +21,7 @@ func (c *Controller) scheduleCanaries() {
// format: <name>.<namespace>
name := key.(string)
current[name] = true
current[name] = fmt.Sprintf("%s.%s", canary.Spec.TargetRef.Name, canary.Namespace)
// schedule new jobs
if _, exists := c.jobs[name]; !exists {
@@ -54,18 +55,27 @@ func (c *Controller) scheduleCanaries() {
}
}
// check if multiple canaries have the same target
for canaryName, targetName := range current {
for name, target := range current {
if name != canaryName && target == targetName {
c.logger.With("canary", canaryName).Errorf("Bad things will happen! Found more than one canary with the same target %s", targetName)
}
}
}
// set total canaries per namespace metric
for k, v := range stats {
c.recorder.SetTotal(k, v)
}
}
func (c *Controller) advanceCanary(name string, namespace string) {
func (c *Controller) advanceCanary(name string, namespace string, skipLivenessChecks bool) {
begin := time.Now()
// check if the canary exists
cd, err := c.flaggerClient.FlaggerV1alpha3().Canaries(namespace).Get(name, v1.GetOptions{})
if err != nil {
c.logger.Errorf("Canary %s.%s not found", name, namespace)
c.logger.With("canary", fmt.Sprintf("%s.%s", name, namespace)).Errorf("Canary %s.%s not found", name, namespace)
return
}
@@ -81,6 +91,16 @@ func (c *Controller) advanceCanary(name string, namespace string) {
return
}
shouldAdvance, err := c.deployer.ShouldAdvance(cd)
if err != nil {
c.recordEventWarningf(cd, "%v", err)
return
}
if !shouldAdvance {
return
}
// set max weight default value to 100%
maxWeight := 100
if cd.Spec.CanaryAnalysis.MaxWeight > 0 {
@@ -88,9 +108,11 @@ func (c *Controller) advanceCanary(name string, namespace string) {
}
// check primary deployment status
if _, err := c.deployer.IsPrimaryReady(cd); err != nil {
c.recordEventWarningf(cd, "%v", err)
return
if !skipLivenessChecks {
if _, err := c.deployer.IsPrimaryReady(cd); err != nil {
c.recordEventWarningf(cd, "%v", err)
return
}
}
// check if virtual service exists
@@ -104,7 +126,33 @@ func (c *Controller) advanceCanary(name string, namespace string) {
c.recorder.SetWeight(cd, primaryRoute.Weight, canaryRoute.Weight)
// check if canary analysis should start (canary revision has changes) or continue
if ok := c.checkCanaryStatus(cd, c.deployer); !ok {
if ok := c.checkCanaryStatus(cd, shouldAdvance); !ok {
return
}
// check if canary revision changed during analysis
if restart := c.hasCanaryRevisionChanged(cd); restart {
c.recordEventInfof(cd, "New revision detected! Restarting analysis for %s.%s",
cd.Spec.TargetRef.Name, cd.Namespace)
// route all traffic back to primary
primaryRoute.Weight = 100
canaryRoute.Weight = 0
if err := c.router.SetRoutes(cd, primaryRoute, canaryRoute); err != nil {
c.recordEventWarningf(cd, "%v", err)
return
}
// reset status
status := flaggerv1.CanaryStatus{
Phase: flaggerv1.CanaryProgressing,
CanaryWeight: 0,
FailedChecks: 0,
}
if err := c.deployer.SyncStatus(cd, status); err != nil {
c.recordEventWarningf(cd, "%v", err)
return
}
return
}
@@ -113,14 +161,17 @@ func (c *Controller) advanceCanary(name string, namespace string) {
}()
// check canary deployment status
retriable, err := c.deployer.IsCanaryReady(cd)
if err != nil && retriable {
c.recordEventWarningf(cd, "%v", err)
return
var retriable = true
if !skipLivenessChecks {
retriable, err = c.deployer.IsCanaryReady(cd)
if err != nil && retriable {
c.recordEventWarningf(cd, "%v", err)
return
}
}
// check if the number of failed checks reached the threshold
if cd.Status.State == flaggerv1.CanaryRunning &&
if cd.Status.Phase == flaggerv1.CanaryProgressing &&
(!retriable || cd.Status.FailedChecks >= cd.Spec.CanaryAnalysis.Threshold) {
if cd.Status.FailedChecks >= cd.Spec.CanaryAnalysis.Threshold {
@@ -156,8 +207,8 @@ func (c *Controller) advanceCanary(name string, namespace string) {
}
// mark canary as failed
if err := c.deployer.SyncStatus(cd, flaggerv1.CanaryStatus{State: flaggerv1.CanaryFailed}); err != nil {
c.logger.Errorf("%v", err)
if err := c.deployer.SyncStatus(cd, flaggerv1.CanaryStatus{Phase: flaggerv1.CanaryFailed, CanaryWeight: 0}); err != nil {
c.logger.With("canary", fmt.Sprintf("%s.%s", cd.Name, cd.Namespace)).Errorf("%v", err)
return
}
@@ -168,10 +219,10 @@ func (c *Controller) advanceCanary(name string, namespace string) {
// check if the canary success rate is above the threshold
// skip check if no traffic is routed to canary
if canaryRoute.Weight == 0 {
c.recordEventInfof(cd, "Starting canary deployment for %s.%s", cd.Name, cd.Namespace)
c.recordEventInfof(cd, "Starting canary analysis for %s.%s", cd.Spec.TargetRef.Name, cd.Namespace)
} else {
if ok := c.analyseCanary(cd); !ok {
if err := c.deployer.SetFailedChecks(cd, cd.Status.FailedChecks+1); err != nil {
if err := c.deployer.SetStatusFailedChecks(cd, cd.Status.FailedChecks+1); err != nil {
c.recordEventWarningf(cd, "%v", err)
return
}
@@ -195,6 +246,12 @@ func (c *Controller) advanceCanary(name string, namespace string) {
return
}
// update weight status
if err := c.deployer.SetStatusWeight(cd, canaryRoute.Weight); err != nil {
c.recordEventWarningf(cd, "%v", err)
return
}
c.recorder.SetWeight(cd, primaryRoute.Weight, canaryRoute.Weight)
c.recordEventInfof(cd, "Advance %s.%s canary weight %v", cd.Name, cd.Namespace, canaryRoute.Weight)
@@ -226,8 +283,8 @@ func (c *Controller) advanceCanary(name string, namespace string) {
return
}
// update status
if err := c.deployer.SetState(cd, flaggerv1.CanaryFinished); err != nil {
// update status phase
if err := c.deployer.SetStatusPhase(cd, flaggerv1.CanarySucceeded); err != nil {
c.recordEventWarningf(cd, "%v", err)
return
}
@@ -237,15 +294,15 @@ func (c *Controller) advanceCanary(name string, namespace string) {
}
}
func (c *Controller) checkCanaryStatus(cd *flaggerv1.Canary, deployer CanaryDeployer) bool {
func (c *Controller) checkCanaryStatus(cd *flaggerv1.Canary, shouldAdvance bool) bool {
c.recorder.SetStatus(cd)
if cd.Status.State == "running" {
if cd.Status.Phase == flaggerv1.CanaryProgressing {
return true
}
if cd.Status.State == "" {
if err := deployer.SyncStatus(cd, flaggerv1.CanaryStatus{State: flaggerv1.CanaryInitialized}); err != nil {
c.logger.Errorf("%v", err)
if cd.Status.Phase == "" {
if err := c.deployer.SyncStatus(cd, flaggerv1.CanaryStatus{Phase: flaggerv1.CanaryInitialized}); err != nil {
c.logger.With("canary", fmt.Sprintf("%s.%s", cd.Name, cd.Namespace)).Errorf("%v", err)
return false
}
c.recorder.SetStatus(cd)
@@ -255,16 +312,16 @@ func (c *Controller) checkCanaryStatus(cd *flaggerv1.Canary, deployer CanaryDepl
return false
}
if diff, err := deployer.IsNewSpec(cd); diff {
if shouldAdvance {
c.recordEventInfof(cd, "New revision detected! Scaling up %s.%s", cd.Spec.TargetRef.Name, cd.Namespace)
c.sendNotification(cd, "New revision detected, starting canary analysis.",
true, false)
if err = deployer.Scale(cd, 1); err != nil {
if err := c.deployer.Scale(cd, 1); err != nil {
c.recordEventErrorf(cd, "%v", err)
return false
}
if err := deployer.SyncStatus(cd, flaggerv1.CanaryStatus{State: flaggerv1.CanaryRunning}); err != nil {
c.logger.Errorf("%v", err)
if err := c.deployer.SyncStatus(cd, flaggerv1.CanaryStatus{Phase: flaggerv1.CanaryProgressing}); err != nil {
c.logger.With("canary", fmt.Sprintf("%s.%s", cd.Name, cd.Namespace)).Errorf("%v", err)
return false
}
c.recorder.SetStatus(cd)
@@ -273,13 +330,40 @@ func (c *Controller) checkCanaryStatus(cd *flaggerv1.Canary, deployer CanaryDepl
return false
}
func (c *Controller) hasCanaryRevisionChanged(cd *flaggerv1.Canary) bool {
if cd.Status.Phase == flaggerv1.CanaryProgressing {
if diff, _ := c.deployer.IsNewSpec(cd); diff {
return true
}
if diff, _ := c.deployer.configTracker.HasConfigChanged(cd); diff {
return true
}
}
return false
}
func (c *Controller) analyseCanary(r *flaggerv1.Canary) bool {
// run external checks
for _, webhook := range r.Spec.CanaryAnalysis.Webhooks {
err := CallWebhook(r.Name, r.Namespace, webhook)
if err != nil {
c.recordEventWarningf(r, "Halt %s.%s advancement external check %s failed %v",
r.Name, r.Namespace, webhook.Name, err)
return false
}
}
// run metrics checks
for _, metric := range r.Spec.CanaryAnalysis.Metrics {
if metric.Name == "istio_requests_total" {
val, err := c.observer.GetDeploymentCounter(r.Spec.TargetRef.Name, r.Namespace, metric.Name, metric.Interval)
if err != nil {
c.recordEventErrorf(r, "Metrics server %s query failed: %v", c.observer.metricsServer, err)
if strings.Contains(err.Error(), "no values found") {
c.recordEventWarningf(r, "Halt advancement no values found for metric %s probably %s.%s is not receiving traffic",
metric.Name, r.Spec.TargetRef.Name, r.Namespace)
} else {
c.recordEventErrorf(r, "Metrics server %s query failed: %v", c.observer.metricsServer, err)
}
return false
}
if float64(metric.Threshold) > val {
@@ -304,15 +388,5 @@ func (c *Controller) analyseCanary(r *flaggerv1.Canary) bool {
}
}
// run external checks
for _, webhook := range r.Spec.CanaryAnalysis.Webhooks {
err := CallWebhook(r.Name, r.Namespace, webhook)
if err != nil {
c.recordEventWarningf(r, "Halt %s.%s advancement external check %s failed %v",
r.Name, r.Namespace, webhook.Name, err)
return false
}
}
return true
}

View File

@@ -1,140 +1,36 @@
package controller
import (
"sync"
"testing"
"time"
fakeIstio "github.com/knative/pkg/client/clientset/versioned/fake"
"github.com/stefanprodan/flagger/pkg/apis/flagger/v1alpha3"
fakeFlagger "github.com/stefanprodan/flagger/pkg/client/clientset/versioned/fake"
informers "github.com/stefanprodan/flagger/pkg/client/informers/externalversions"
"github.com/stefanprodan/flagger/pkg/logging"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes/fake"
"k8s.io/client-go/tools/record"
"k8s.io/client-go/util/workqueue"
)
var (
alwaysReady = func() bool { return true }
noResyncPeriodFunc = func() time.Duration { return 0 }
"testing"
)
func TestScheduler_Init(t *testing.T) {
canary := newTestCanary()
dep := newTestDeployment()
hpa := newTestHPA()
mocks := SetupMocks()
mocks.ctrl.advanceCanary("podinfo", "default", false)
flaggerClient := fakeFlagger.NewSimpleClientset(canary)
kubeClient := fake.NewSimpleClientset(dep, hpa)
istioClient := fakeIstio.NewSimpleClientset()
logger, _ := logging.NewLogger("debug")
deployer := CanaryDeployer{
flaggerClient: flaggerClient,
kubeClient: kubeClient,
logger: logger,
}
router := CanaryRouter{
flaggerClient: flaggerClient,
kubeClient: kubeClient,
istioClient: istioClient,
logger: logger,
}
observer := CanaryObserver{
metricsServer: "fake",
}
flaggerInformerFactory := informers.NewSharedInformerFactory(flaggerClient, noResyncPeriodFunc())
flaggerInformer := flaggerInformerFactory.Flagger().V1alpha3().Canaries()
ctrl := &Controller{
kubeClient: kubeClient,
istioClient: istioClient,
flaggerClient: flaggerClient,
flaggerLister: flaggerInformer.Lister(),
flaggerSynced: flaggerInformer.Informer().HasSynced,
workqueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), controllerAgentName),
eventRecorder: &record.FakeRecorder{},
logger: logger,
canaries: new(sync.Map),
flaggerWindow: time.Second,
deployer: deployer,
router: router,
observer: observer,
recorder: NewCanaryRecorder(false),
}
ctrl.flaggerSynced = alwaysReady
ctrl.advanceCanary("podinfo", "default")
_, err := kubeClient.AppsV1().Deployments("default").Get("podinfo-primary", metav1.GetOptions{})
_, err := mocks.kubeClient.AppsV1().Deployments("default").Get("podinfo-primary", metav1.GetOptions{})
if err != nil {
t.Fatal(err.Error())
}
}
func TestScheduler_NewRevision(t *testing.T) {
canary := newTestCanary()
dep := newTestDeployment()
hpa := newTestHPA()
flaggerClient := fakeFlagger.NewSimpleClientset(canary)
kubeClient := fake.NewSimpleClientset(dep, hpa)
istioClient := fakeIstio.NewSimpleClientset()
logger, _ := logging.NewLogger("debug")
deployer := CanaryDeployer{
flaggerClient: flaggerClient,
kubeClient: kubeClient,
logger: logger,
}
router := CanaryRouter{
flaggerClient: flaggerClient,
kubeClient: kubeClient,
istioClient: istioClient,
logger: logger,
}
observer := CanaryObserver{
metricsServer: "fake",
}
flaggerInformerFactory := informers.NewSharedInformerFactory(flaggerClient, noResyncPeriodFunc())
flaggerInformer := flaggerInformerFactory.Flagger().V1alpha3().Canaries()
ctrl := &Controller{
kubeClient: kubeClient,
istioClient: istioClient,
flaggerClient: flaggerClient,
flaggerLister: flaggerInformer.Lister(),
flaggerSynced: flaggerInformer.Informer().HasSynced,
workqueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), controllerAgentName),
eventRecorder: &record.FakeRecorder{},
logger: logger,
canaries: new(sync.Map),
flaggerWindow: time.Second,
deployer: deployer,
router: router,
observer: observer,
recorder: NewCanaryRecorder(false),
}
ctrl.flaggerSynced = alwaysReady
// init
ctrl.advanceCanary("podinfo", "default")
mocks := SetupMocks()
mocks.ctrl.advanceCanary("podinfo", "default", false)
// update
dep2 := newTestDeploymentUpdated()
_, err := kubeClient.AppsV1().Deployments("default").Update(dep2)
dep2 := newTestDeploymentV2()
_, err := mocks.kubeClient.AppsV1().Deployments("default").Update(dep2)
if err != nil {
t.Fatal(err.Error())
}
// detect changes
ctrl.advanceCanary("podinfo", "default")
mocks.ctrl.advanceCanary("podinfo", "default", false)
c, err := kubeClient.AppsV1().Deployments("default").Get("podinfo", metav1.GetOptions{})
c, err := mocks.kubeClient.AppsV1().Deployments("default").Get("podinfo", metav1.GetOptions{})
if err != nil {
t.Fatal(err.Error())
}
@@ -145,69 +41,179 @@ func TestScheduler_NewRevision(t *testing.T) {
}
func TestScheduler_Rollback(t *testing.T) {
canary := newTestCanary()
dep := newTestDeployment()
hpa := newTestHPA()
flaggerClient := fakeFlagger.NewSimpleClientset(canary)
kubeClient := fake.NewSimpleClientset(dep, hpa)
istioClient := fakeIstio.NewSimpleClientset()
logger, _ := logging.NewLogger("debug")
deployer := CanaryDeployer{
flaggerClient: flaggerClient,
kubeClient: kubeClient,
logger: logger,
}
router := CanaryRouter{
flaggerClient: flaggerClient,
kubeClient: kubeClient,
istioClient: istioClient,
logger: logger,
}
observer := CanaryObserver{
metricsServer: "fake",
}
flaggerInformerFactory := informers.NewSharedInformerFactory(flaggerClient, noResyncPeriodFunc())
flaggerInformer := flaggerInformerFactory.Flagger().V1alpha3().Canaries()
ctrl := &Controller{
kubeClient: kubeClient,
istioClient: istioClient,
flaggerClient: flaggerClient,
flaggerLister: flaggerInformer.Lister(),
flaggerSynced: flaggerInformer.Informer().HasSynced,
workqueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), controllerAgentName),
eventRecorder: &record.FakeRecorder{},
logger: logger,
canaries: new(sync.Map),
flaggerWindow: time.Second,
deployer: deployer,
router: router,
observer: observer,
recorder: NewCanaryRecorder(false),
}
ctrl.flaggerSynced = alwaysReady
mocks := SetupMocks()
// init
ctrl.advanceCanary("podinfo", "default")
mocks.ctrl.advanceCanary("podinfo", "default", true)
// update failed checks to max
err := deployer.SyncStatus(canary, v1alpha3.CanaryStatus{State: v1alpha3.CanaryRunning, FailedChecks: 11})
err := mocks.deployer.SyncStatus(mocks.canary, v1alpha3.CanaryStatus{Phase: v1alpha3.CanaryProgressing, FailedChecks: 11})
if err != nil {
t.Fatal(err.Error())
}
// detect changes
ctrl.advanceCanary("podinfo", "default")
mocks.ctrl.advanceCanary("podinfo", "default", true)
c, err := flaggerClient.FlaggerV1alpha3().Canaries("default").Get("podinfo", metav1.GetOptions{})
c, err := mocks.flaggerClient.FlaggerV1alpha3().Canaries("default").Get("podinfo", metav1.GetOptions{})
if err != nil {
t.Fatal(err.Error())
}
if c.Status.State != v1alpha3.CanaryFailed {
t.Errorf("Got canary state %v wanted %v", c.Status.State, v1alpha3.CanaryFailed)
if c.Status.Phase != v1alpha3.CanaryFailed {
t.Errorf("Got canary state %v wanted %v", c.Status.Phase, v1alpha3.CanaryFailed)
}
}
func TestScheduler_NewRevisionReset(t *testing.T) {
mocks := SetupMocks()
// init
mocks.ctrl.advanceCanary("podinfo", "default", false)
// first update
dep2 := newTestDeploymentV2()
_, err := mocks.kubeClient.AppsV1().Deployments("default").Update(dep2)
if err != nil {
t.Fatal(err.Error())
}
// detect changes
mocks.ctrl.advanceCanary("podinfo", "default", true)
// advance
mocks.ctrl.advanceCanary("podinfo", "default", true)
primaryRoute, canaryRoute, err := mocks.router.GetRoutes(mocks.canary)
if err != nil {
t.Fatal(err.Error())
}
if primaryRoute.Weight != 90 {
t.Errorf("Got primary route %v wanted %v", primaryRoute.Weight, 90)
}
if canaryRoute.Weight != 10 {
t.Errorf("Got canary route %v wanted %v", canaryRoute.Weight, 10)
}
// second update
dep2.Spec.Template.Spec.ServiceAccountName = "test"
_, err = mocks.kubeClient.AppsV1().Deployments("default").Update(dep2)
if err != nil {
t.Fatal(err.Error())
}
// detect changes
mocks.ctrl.advanceCanary("podinfo", "default", true)
primaryRoute, canaryRoute, err = mocks.router.GetRoutes(mocks.canary)
if err != nil {
t.Fatal(err.Error())
}
if primaryRoute.Weight != 100 {
t.Errorf("Got primary route %v wanted %v", primaryRoute.Weight, 100)
}
if canaryRoute.Weight != 0 {
t.Errorf("Got canary route %v wanted %v", canaryRoute.Weight, 0)
}
}
func TestScheduler_Promotion(t *testing.T) {
mocks := SetupMocks()
// init
mocks.ctrl.advanceCanary("podinfo", "default", false)
// update
dep2 := newTestDeploymentV2()
_, err := mocks.kubeClient.AppsV1().Deployments("default").Update(dep2)
if err != nil {
t.Fatal(err.Error())
}
// detect pod spec changes
mocks.ctrl.advanceCanary("podinfo", "default", true)
config2 := NewTestConfigMapV2()
_, err = mocks.kubeClient.CoreV1().ConfigMaps("default").Update(config2)
if err != nil {
t.Fatal(err.Error())
}
secret2 := NewTestSecretV2()
_, err = mocks.kubeClient.CoreV1().Secrets("default").Update(secret2)
if err != nil {
t.Fatal(err.Error())
}
// detect configs changes
mocks.ctrl.advanceCanary("podinfo", "default", true)
primaryRoute, canaryRoute, err := mocks.router.GetRoutes(mocks.canary)
if err != nil {
t.Fatal(err.Error())
}
primaryRoute.Weight = 60
canaryRoute.Weight = 40
err = mocks.ctrl.router.SetRoutes(mocks.canary, primaryRoute, canaryRoute)
if err != nil {
t.Fatal(err.Error())
}
// advance
mocks.ctrl.advanceCanary("podinfo", "default", true)
// promote
mocks.ctrl.advanceCanary("podinfo", "default", true)
primaryRoute, canaryRoute, err = mocks.router.GetRoutes(mocks.canary)
if err != nil {
t.Fatal(err.Error())
}
if primaryRoute.Weight != 100 {
t.Errorf("Got primary route %v wanted %v", primaryRoute.Weight, 100)
}
if canaryRoute.Weight != 0 {
t.Errorf("Got canary route %v wanted %v", canaryRoute.Weight, 0)
}
primaryDep, err := mocks.kubeClient.AppsV1().Deployments("default").Get("podinfo-primary", metav1.GetOptions{})
if err != nil {
t.Fatal(err.Error())
}
primaryImage := primaryDep.Spec.Template.Spec.Containers[0].Image
canaryImage := dep2.Spec.Template.Spec.Containers[0].Image
if primaryImage != canaryImage {
t.Errorf("Got primary image %v wanted %v", primaryImage, canaryImage)
}
configPrimary, err := mocks.kubeClient.CoreV1().ConfigMaps("default").Get("podinfo-config-env-primary", metav1.GetOptions{})
if err != nil {
t.Fatal(err.Error())
}
if configPrimary.Data["color"] != config2.Data["color"] {
t.Errorf("Got primary ConfigMap color %s wanted %s", configPrimary.Data["color"], config2.Data["color"])
}
secretPrimary, err := mocks.kubeClient.CoreV1().Secrets("default").Get("podinfo-secret-env-primary", metav1.GetOptions{})
if err != nil {
t.Fatal(err.Error())
}
if string(secretPrimary.Data["apiKey"]) != string(secret2.Data["apiKey"]) {
t.Errorf("Got primary secret %s wanted %s", secretPrimary.Data["apiKey"], secret2.Data["apiKey"])
}
c, err := mocks.flaggerClient.FlaggerV1alpha3().Canaries("default").Get("podinfo", metav1.GetOptions{})
if err != nil {
t.Fatal(err.Error())
}
if c.Status.Phase != v1alpha3.CanarySucceeded {
t.Errorf("Got canary state %v wanted %v", c.Status.Phase, v1alpha3.CanarySucceeded)
}
}

374
pkg/controller/tracker.go Normal file
View File

@@ -0,0 +1,374 @@
package controller
import (
"crypto/sha256"
"encoding/json"
"fmt"
flaggerv1 "github.com/stefanprodan/flagger/pkg/apis/flagger/v1alpha3"
clientset "github.com/stefanprodan/flagger/pkg/client/clientset/versioned"
"go.uber.org/zap"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/client-go/kubernetes"
)
// ConfigTracker is managing the operations for Kubernetes ConfigMaps and Secrets
type ConfigTracker struct {
kubeClient kubernetes.Interface
flaggerClient clientset.Interface
logger *zap.SugaredLogger
}
type ConfigRefType string
const (
ConfigRefMap ConfigRefType = "configmap"
ConfigRefSecret ConfigRefType = "secret"
)
// ConfigRef holds the reference to a tracked Kubernetes ConfigMap or Secret
type ConfigRef struct {
Name string
Type ConfigRefType
Checksum string
}
// GetName returns the config ref type and name
func (c *ConfigRef) GetName() string {
return fmt.Sprintf("%s/%s", c.Type, c.Name)
}
func checksum(data interface{}) string {
jsonBytes, _ := json.Marshal(data)
hashBytes := sha256.Sum256(jsonBytes)
return fmt.Sprintf("%x", hashBytes[:8])
}
// getRefFromConfigMap transforms a Kubernetes ConfigMap into a ConfigRef
// and computes the checksum of the ConfigMap data
func (ct *ConfigTracker) getRefFromConfigMap(name string, namespace string) (*ConfigRef, error) {
config, err := ct.kubeClient.CoreV1().ConfigMaps(namespace).Get(name, metav1.GetOptions{})
if err != nil {
return nil, err
}
return &ConfigRef{
Name: config.Name,
Type: ConfigRefMap,
Checksum: checksum(config.Data),
}, nil
}
// getRefFromConfigMap transforms a Kubernetes Secret into a ConfigRef
// and computes the checksum of the Secret data
func (ct *ConfigTracker) getRefFromSecret(name string, namespace string) (*ConfigRef, error) {
secret, err := ct.kubeClient.CoreV1().Secrets(namespace).Get(name, metav1.GetOptions{})
if err != nil {
return nil, err
}
// ignore registry secrets (those should be set via service account)
if secret.Type != corev1.SecretTypeOpaque &&
secret.Type != corev1.SecretTypeBasicAuth &&
secret.Type != corev1.SecretTypeSSHAuth &&
secret.Type != corev1.SecretTypeTLS {
ct.logger.Debugf("ignoring secret %s.%s type not supported %v", name, namespace, secret.Type)
return nil, nil
}
return &ConfigRef{
Name: secret.Name,
Type: ConfigRefSecret,
Checksum: checksum(secret.Data),
}, nil
}
// GetTargetConfigs scans the target deployment for Kubernetes ConfigMaps and Secretes
// and returns a list of config references
func (ct *ConfigTracker) GetTargetConfigs(cd *flaggerv1.Canary) (map[string]ConfigRef, error) {
res := make(map[string]ConfigRef)
targetName := cd.Spec.TargetRef.Name
targetDep, err := ct.kubeClient.AppsV1().Deployments(cd.Namespace).Get(targetName, metav1.GetOptions{})
if err != nil {
if errors.IsNotFound(err) {
return res, fmt.Errorf("deployment %s.%s not found", targetName, cd.Namespace)
}
return res, fmt.Errorf("deployment %s.%s query error %v", targetName, cd.Namespace, err)
}
// scan volumes
for _, volume := range targetDep.Spec.Template.Spec.Volumes {
if cmv := volume.ConfigMap; cmv != nil {
config, err := ct.getRefFromConfigMap(cmv.Name, cd.Namespace)
if err != nil {
ct.logger.Errorf("configMap %s.%s query error %v", cmv.Name, cd.Namespace, err)
continue
}
if config != nil {
res[config.GetName()] = *config
}
}
if sv := volume.Secret; sv != nil {
secret, err := ct.getRefFromSecret(sv.SecretName, cd.Namespace)
if err != nil {
ct.logger.Errorf("secret %s.%s query error %v", sv.SecretName, cd.Namespace, err)
continue
}
if secret != nil {
res[secret.GetName()] = *secret
}
}
}
// scan containers
for _, container := range targetDep.Spec.Template.Spec.Containers {
// scan env
for _, env := range container.Env {
if env.ValueFrom != nil {
switch {
case env.ValueFrom.ConfigMapKeyRef != nil:
name := env.ValueFrom.ConfigMapKeyRef.LocalObjectReference.Name
config, err := ct.getRefFromConfigMap(name, cd.Namespace)
if err != nil {
ct.logger.Errorf("configMap %s.%s query error %v", name, cd.Namespace, err)
continue
}
if config != nil {
res[config.GetName()] = *config
}
case env.ValueFrom.SecretKeyRef != nil:
name := env.ValueFrom.SecretKeyRef.LocalObjectReference.Name
secret, err := ct.getRefFromSecret(name, cd.Namespace)
if err != nil {
ct.logger.Errorf("secret %s.%s query error %v", name, cd.Namespace, err)
continue
}
if secret != nil {
res[secret.GetName()] = *secret
}
}
}
}
// scan envFrom
for _, envFrom := range container.EnvFrom {
switch {
case envFrom.ConfigMapRef != nil:
name := envFrom.ConfigMapRef.LocalObjectReference.Name
config, err := ct.getRefFromConfigMap(name, cd.Namespace)
if err != nil {
ct.logger.Errorf("configMap %s.%s query error %v", name, cd.Namespace, err)
continue
}
if config != nil {
res[config.GetName()] = *config
}
case envFrom.SecretRef != nil:
name := envFrom.SecretRef.LocalObjectReference.Name
secret, err := ct.getRefFromSecret(name, cd.Namespace)
if err != nil {
ct.logger.Errorf("secret %s.%s query error %v", name, cd.Namespace, err)
continue
}
if secret != nil {
res[secret.GetName()] = *secret
}
}
}
}
return res, nil
}
// GetConfigRefs returns a map of configs and their checksum
func (ct *ConfigTracker) GetConfigRefs(cd *flaggerv1.Canary) (*map[string]string, error) {
res := make(map[string]string)
configs, err := ct.GetTargetConfigs(cd)
if err != nil {
return nil, err
}
for _, cfg := range configs {
res[cfg.GetName()] = cfg.Checksum
}
return &res, nil
}
// HasConfigChanged checks for changes in ConfigMaps and Secretes by comparing
// the checksum for each ConfigRef stored in Canary.Status.TrackedConfigs
func (ct *ConfigTracker) HasConfigChanged(cd *flaggerv1.Canary) (bool, error) {
configs, err := ct.GetTargetConfigs(cd)
if err != nil {
return false, err
}
if len(configs) == 0 && cd.Status.TrackedConfigs == nil {
return false, nil
}
if len(configs) > 0 && cd.Status.TrackedConfigs == nil {
return true, nil
}
trackedConfigs := *cd.Status.TrackedConfigs
if len(configs) != len(trackedConfigs) {
return true, nil
}
for _, cfg := range configs {
if trackedConfigs[cfg.GetName()] != cfg.Checksum {
ct.logger.With("canary", fmt.Sprintf("%s.%s", cd.Name, cd.Namespace)).
Infof("%s %s has changed", cfg.Type, cfg.Name)
return true, nil
}
}
return false, nil
}
// CreatePrimaryConfigs syncs the primary Kubernetes ConfigMaps and Secretes
// with those found in the target deployment
func (ct *ConfigTracker) CreatePrimaryConfigs(cd *flaggerv1.Canary, refs map[string]ConfigRef) error {
for _, ref := range refs {
switch ref.Type {
case ConfigRefMap:
config, err := ct.kubeClient.CoreV1().ConfigMaps(cd.Namespace).Get(ref.Name, metav1.GetOptions{})
if err != nil {
return err
}
primaryName := fmt.Sprintf("%s-primary", config.GetName())
primaryConfigMap := &corev1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: primaryName,
Namespace: cd.Namespace,
Labels: config.Labels,
OwnerReferences: []metav1.OwnerReference{
*metav1.NewControllerRef(cd, schema.GroupVersionKind{
Group: flaggerv1.SchemeGroupVersion.Group,
Version: flaggerv1.SchemeGroupVersion.Version,
Kind: flaggerv1.CanaryKind,
}),
},
},
Data: config.Data,
}
// update or insert primary ConfigMap
_, err = ct.kubeClient.CoreV1().ConfigMaps(cd.Namespace).Update(primaryConfigMap)
if err != nil {
if errors.IsNotFound(err) {
_, err = ct.kubeClient.CoreV1().ConfigMaps(cd.Namespace).Create(primaryConfigMap)
if err != nil {
return err
}
} else {
return err
}
}
ct.logger.With("canary", fmt.Sprintf("%s.%s", cd.Name, cd.Namespace)).
Infof("ConfigMap %s synced", primaryConfigMap.GetName())
case ConfigRefSecret:
secret, err := ct.kubeClient.CoreV1().Secrets(cd.Namespace).Get(ref.Name, metav1.GetOptions{})
if err != nil {
return err
}
primaryName := fmt.Sprintf("%s-primary", secret.GetName())
primarySecret := &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: primaryName,
Namespace: cd.Namespace,
Labels: secret.Labels,
OwnerReferences: []metav1.OwnerReference{
*metav1.NewControllerRef(cd, schema.GroupVersionKind{
Group: flaggerv1.SchemeGroupVersion.Group,
Version: flaggerv1.SchemeGroupVersion.Version,
Kind: flaggerv1.CanaryKind,
}),
},
},
Type: secret.Type,
Data: secret.Data,
}
// update or insert primary Secret
_, err = ct.kubeClient.CoreV1().Secrets(cd.Namespace).Update(primarySecret)
if err != nil {
if errors.IsNotFound(err) {
_, err = ct.kubeClient.CoreV1().Secrets(cd.Namespace).Create(primarySecret)
if err != nil {
return err
}
} else {
return err
}
}
ct.logger.With("canary", fmt.Sprintf("%s.%s", cd.Name, cd.Namespace)).
Infof("Secret %s synced", primarySecret.GetName())
}
}
return nil
}
// ApplyPrimaryConfigs appends the primary suffix to all ConfigMaps and Secretes found in the PodSpec
func (ct *ConfigTracker) ApplyPrimaryConfigs(spec corev1.PodSpec, refs map[string]ConfigRef) corev1.PodSpec {
// update volumes
for i, volume := range spec.Volumes {
if cmv := volume.ConfigMap; cmv != nil {
name := fmt.Sprintf("%s/%s", ConfigRefMap, cmv.Name)
if _, exists := refs[name]; exists {
spec.Volumes[i].ConfigMap.Name += "-primary"
}
}
if sv := volume.Secret; sv != nil {
name := fmt.Sprintf("%s/%s", ConfigRefSecret, sv.SecretName)
if _, exists := refs[name]; exists {
spec.Volumes[i].Secret.SecretName += "-primary"
}
}
}
// update containers
for _, container := range spec.Containers {
// update env
for i, env := range container.Env {
if env.ValueFrom != nil {
switch {
case env.ValueFrom.ConfigMapKeyRef != nil:
name := fmt.Sprintf("%s/%s", ConfigRefMap, env.ValueFrom.ConfigMapKeyRef.Name)
if _, exists := refs[name]; exists {
container.Env[i].ValueFrom.ConfigMapKeyRef.Name += "-primary"
}
case env.ValueFrom.SecretKeyRef != nil:
name := fmt.Sprintf("%s/%s", ConfigRefSecret, env.ValueFrom.SecretKeyRef.Name)
if _, exists := refs[name]; exists {
container.Env[i].ValueFrom.SecretKeyRef.Name += "-primary"
}
}
}
}
// update envFrom
for i, envFrom := range container.EnvFrom {
switch {
case envFrom.ConfigMapRef != nil:
name := fmt.Sprintf("%s/%s", ConfigRefMap, envFrom.ConfigMapRef.Name)
if _, exists := refs[name]; exists {
container.EnvFrom[i].ConfigMapRef.Name += "-primary"
}
case envFrom.SecretRef != nil:
name := fmt.Sprintf("%s/%s", ConfigRefSecret, envFrom.SecretRef.Name)
if _, exists := refs[name]; exists {
container.EnvFrom[i].SecretRef.Name += "-primary"
}
}
}
}
return spec
}

View File

@@ -19,7 +19,10 @@ func CallWebhook(name string, namepace string, w flaggerv1.CanaryWebhook) error
payload := flaggerv1.CanaryWebhookPayload{
Name: name,
Namespace: namepace,
Metadata: w.Metadata,
}
if w.Metadata != nil {
payload.Metadata = *w.Metadata
}
payloadBin, err := json.Marshal(payload)

107
pkg/loadtester/runner.go Normal file
View File

@@ -0,0 +1,107 @@
package loadtester
import (
"context"
"encoding/hex"
"fmt"
"go.uber.org/zap"
"hash/fnv"
"os/exec"
"sync"
"sync/atomic"
"time"
)
type TaskRunner struct {
logger *zap.SugaredLogger
timeout time.Duration
todoTasks *sync.Map
runningTasks *sync.Map
totalExecs uint64
logCmdOutput bool
}
type Task struct {
Canary string
Command string
}
func (t Task) Hash() string {
fnvHash := fnv.New32()
fnvBytes := fnvHash.Sum([]byte(t.Canary + t.Command))
return hex.EncodeToString(fnvBytes[:])
}
func NewTaskRunner(logger *zap.SugaredLogger, timeout time.Duration, logCmdOutput bool) *TaskRunner {
return &TaskRunner{
logger: logger,
todoTasks: new(sync.Map),
runningTasks: new(sync.Map),
timeout: timeout,
logCmdOutput: logCmdOutput,
}
}
func (tr *TaskRunner) Add(task Task) {
tr.todoTasks.Store(task.Hash(), task)
}
func (tr *TaskRunner) GetTotalExecs() uint64 {
return atomic.LoadUint64(&tr.totalExecs)
}
func (tr *TaskRunner) runAll() {
tr.todoTasks.Range(func(key interface{}, value interface{}) bool {
task := value.(Task)
go func(t Task) {
// remove task from the to do list
tr.todoTasks.Delete(t.Hash())
// check if task is already running, if not run the task's command
if _, exists := tr.runningTasks.Load(t.Hash()); !exists {
// save the task in the running list
tr.runningTasks.Store(t.Hash(), t)
// create timeout context
ctx, cancel := context.WithTimeout(context.Background(), tr.timeout)
defer cancel()
// increment the total exec counter
atomic.AddUint64(&tr.totalExecs, 1)
tr.logger.With("canary", t.Canary).Infof("command starting %s", t.Command)
cmd := exec.CommandContext(ctx, "sh", "-c", t.Command)
// execute task
out, err := cmd.CombinedOutput()
if err != nil {
tr.logger.With("canary", t.Canary).Errorf("command failed %s %v %s", t.Command, err, out)
} else {
if tr.logCmdOutput {
fmt.Printf("%s\n", out)
}
tr.logger.With("canary", t.Canary).Infof("command finished %s", t.Command)
}
// remove task from the running list
tr.runningTasks.Delete(t.Hash())
} else {
tr.logger.With("canary", t.Canary).Infof("command skipped %s is already running", t.Command)
}
}(task)
return true
})
}
func (tr *TaskRunner) Start(interval time.Duration, stopCh <-chan struct{}) {
tickChan := time.NewTicker(interval).C
for {
select {
case <-tickChan:
tr.runAll()
case <-stopCh:
tr.logger.Info("shutting down the task runner")
return
}
}
}

View File

@@ -0,0 +1,52 @@
package loadtester
import (
"github.com/stefanprodan/flagger/pkg/logging"
"testing"
"time"
)
func TestTaskRunner_Start(t *testing.T) {
stop := make(chan struct{})
logger, _ := logging.NewLogger("debug")
tr := NewTaskRunner(logger, time.Hour, false)
go tr.Start(10*time.Millisecond, stop)
task1 := Task{
Canary: "podinfo.default",
Command: "sleep 0.6",
}
task2 := Task{
Canary: "podinfo.default",
Command: "sleep 0.7",
}
tr.Add(task1)
tr.Add(task2)
time.Sleep(100 * time.Millisecond)
tr.Add(task1)
tr.Add(task2)
time.Sleep(100 * time.Millisecond)
tr.Add(task1)
tr.Add(task2)
if tr.GetTotalExecs() != 2 {
t.Errorf("Got total executed commands %v wanted %v", tr.GetTotalExecs(), 2)
}
time.Sleep(time.Second)
tr.Add(task1)
tr.Add(task2)
time.Sleep(time.Second)
if tr.GetTotalExecs() != 4 {
t.Errorf("Got total executed commands %v wanted %v", tr.GetTotalExecs(), 4)
}
}

85
pkg/loadtester/server.go Normal file
View File

@@ -0,0 +1,85 @@
package loadtester
import (
"context"
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"time"
"github.com/prometheus/client_golang/prometheus/promhttp"
flaggerv1 "github.com/stefanprodan/flagger/pkg/apis/flagger/v1alpha3"
"go.uber.org/zap"
)
// ListenAndServe starts a web server and waits for SIGTERM
func ListenAndServe(port string, timeout time.Duration, logger *zap.SugaredLogger, taskRunner *TaskRunner, stopCh <-chan struct{}) {
mux := http.DefaultServeMux
mux.Handle("/metrics", promhttp.Handler())
mux.HandleFunc("/healthz", func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusOK)
w.Write([]byte("OK"))
})
mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
body, err := ioutil.ReadAll(r.Body)
if err != nil {
logger.Error("reading the request body failed", zap.Error(err))
w.WriteHeader(http.StatusBadRequest)
return
}
defer r.Body.Close()
payload := &flaggerv1.CanaryWebhookPayload{}
err = json.Unmarshal(body, payload)
if err != nil {
logger.Error("decoding the request body failed", zap.Error(err))
w.WriteHeader(http.StatusBadRequest)
return
}
if len(payload.Metadata) > 0 {
if cmd, ok := payload.Metadata["cmd"]; ok {
taskRunner.Add(Task{
Canary: fmt.Sprintf("%s.%s", payload.Name, payload.Namespace),
Command: cmd,
})
} else {
w.WriteHeader(http.StatusBadRequest)
w.Write([]byte("cmd not found in metadata"))
return
}
} else {
w.WriteHeader(http.StatusBadRequest)
w.Write([]byte("metadata not found in payload"))
return
}
w.WriteHeader(http.StatusAccepted)
})
srv := &http.Server{
Addr: ":" + port,
Handler: mux,
ReadTimeout: 5 * time.Second,
WriteTimeout: 1 * time.Minute,
IdleTimeout: 15 * time.Second,
}
// run server in background
go func() {
if err := srv.ListenAndServe(); err != http.ErrServerClosed {
logger.Fatalf("HTTP server crashed %v", err)
}
}()
// wait for SIGTERM or SIGINT
<-stopCh
ctx, cancel := context.WithTimeout(context.Background(), timeout)
defer cancel()
if err := srv.Shutdown(ctx); err != nil {
logger.Errorf("HTTP server graceful shutdown failed %v", err)
} else {
logger.Info("HTTP server stopped")
}
}

View File

@@ -1,4 +1,4 @@
package version
var VERSION = "0.3.0"
var VERSION = "0.5.0"
var REVISION = "unknown"