mirror of
https://github.com/fluxcd/flagger.git
synced 2026-02-15 18:40:12 +00:00
Compare commits
59 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
b8a7ea8534 | ||
|
|
afe4d59d5a | ||
|
|
0f2697df23 | ||
|
|
05664fa648 | ||
|
|
3b2564f34b | ||
|
|
dd0cf2d588 | ||
|
|
7c66f23c6a | ||
|
|
a9f034de1a | ||
|
|
6ad2dca57a | ||
|
|
e8353c110b | ||
|
|
dbf26ddf53 | ||
|
|
acc72d207f | ||
|
|
a784f83464 | ||
|
|
07d8355363 | ||
|
|
f7a439274e | ||
|
|
bd6d446cb8 | ||
|
|
385d0e0549 | ||
|
|
02236374d8 | ||
|
|
c46fe55ad0 | ||
|
|
36a54fbf2a | ||
|
|
60f6b05397 | ||
|
|
6d8a7343b7 | ||
|
|
aff8b117d4 | ||
|
|
1b3c3b22b3 | ||
|
|
1d31b5ed90 | ||
|
|
1ef310f00d | ||
|
|
acdd2c46d5 | ||
|
|
9872e6bc16 | ||
|
|
10c2bdec86 | ||
|
|
4bf3b70048 | ||
|
|
ada446bbaa | ||
|
|
c4981ef4db | ||
|
|
d1b84cd31d | ||
|
|
9232c8647a | ||
|
|
23e8c7d616 | ||
|
|
42607fbd64 | ||
|
|
28781a5f02 | ||
|
|
3589e11244 | ||
|
|
5e880d3942 | ||
|
|
f7e675144d | ||
|
|
3bff2c339b | ||
|
|
b035c1e7fb | ||
|
|
7ae0d49e80 | ||
|
|
07f66e849d | ||
|
|
06c29051eb | ||
|
|
83118faeb3 | ||
|
|
aa2c28c733 | ||
|
|
10185407f6 | ||
|
|
c1bde57c17 | ||
|
|
882b4b2d23 | ||
|
|
cac585157f | ||
|
|
cc2860a49f | ||
|
|
bec96356ec | ||
|
|
b5c648ea54 | ||
|
|
e6e3e500be | ||
|
|
537e8fdaf7 | ||
|
|
322c83bdad | ||
|
|
41f0ba0247 | ||
|
|
b67b49fde6 |
17
.github/main.workflow
vendored
Normal file
17
.github/main.workflow
vendored
Normal file
@@ -0,0 +1,17 @@
|
||||
workflow "Publish Helm charts" {
|
||||
on = "push"
|
||||
resolves = ["helm-push"]
|
||||
}
|
||||
|
||||
action "helm-lint" {
|
||||
uses = "stefanprodan/gh-actions/helm@master"
|
||||
args = ["lint charts/*"]
|
||||
}
|
||||
|
||||
action "helm-push" {
|
||||
needs = ["helm-lint"]
|
||||
uses = "stefanprodan/gh-actions/helm-gh-pages@master"
|
||||
args = ["charts/*","https://flagger.app"]
|
||||
secrets = ["GITHUB_TOKEN"]
|
||||
}
|
||||
|
||||
@@ -23,9 +23,10 @@ after_success:
|
||||
- if [ -z "$DOCKER_USER" ]; then
|
||||
echo "PR build, skipping image push";
|
||||
else
|
||||
docker tag stefanprodan/flagger:latest quay.io/stefanprodan/flagger:${TRAVIS_COMMIT};
|
||||
BRANCH_COMMIT=${TRAVIS_BRANCH}-$(echo ${TRAVIS_COMMIT} | head -c7);
|
||||
docker tag stefanprodan/flagger:latest quay.io/stefanprodan/flagger:${BRANCH_COMMIT};
|
||||
echo $DOCKER_PASS | docker login -u=$DOCKER_USER --password-stdin quay.io;
|
||||
docker push quay.io/stefanprodan/flagger:${TRAVIS_COMMIT};
|
||||
docker push quay.io/stefanprodan/flagger:${BRANCH_COMMIT};
|
||||
fi
|
||||
- if [ -z "$TRAVIS_TAG" ]; then
|
||||
echo "Not a release, skipping image push";
|
||||
|
||||
44
Dockerfile.loadtester
Normal file
44
Dockerfile.loadtester
Normal file
@@ -0,0 +1,44 @@
|
||||
FROM golang:1.11 AS hey-builder
|
||||
|
||||
RUN mkdir -p /go/src/github.com/rakyll/hey/
|
||||
|
||||
WORKDIR /go/src/github.com/rakyll/hey
|
||||
|
||||
ADD https://github.com/rakyll/hey/archive/v0.1.1.tar.gz .
|
||||
|
||||
RUN tar xzf v0.1.1.tar.gz --strip 1
|
||||
|
||||
RUN go get ./...
|
||||
|
||||
RUN CGO_ENABLED=0 GOOS=linux GOARCH=amd64 \
|
||||
go install -ldflags '-w -extldflags "-static"' \
|
||||
/go/src/github.com/rakyll/hey
|
||||
|
||||
FROM golang:1.11 AS builder
|
||||
|
||||
RUN mkdir -p /go/src/github.com/stefanprodan/flagger/
|
||||
|
||||
WORKDIR /go/src/github.com/stefanprodan/flagger
|
||||
|
||||
COPY . .
|
||||
|
||||
RUN go test -race ./pkg/loadtester/
|
||||
|
||||
RUN CGO_ENABLED=0 GOOS=linux go build -a -installsuffix cgo -o loadtester ./cmd/loadtester/*
|
||||
|
||||
FROM alpine:3.8
|
||||
|
||||
RUN addgroup -S app \
|
||||
&& adduser -S -g app app \
|
||||
&& apk --no-cache add ca-certificates curl
|
||||
|
||||
WORKDIR /home/app
|
||||
|
||||
COPY --from=hey-builder /go/bin/hey /usr/local/bin/hey
|
||||
COPY --from=builder /go/src/github.com/stefanprodan/flagger/loadtester .
|
||||
|
||||
RUN chown -R app:app ./
|
||||
|
||||
USER app
|
||||
|
||||
ENTRYPOINT ["./loadtester"]
|
||||
8
Makefile
8
Makefile
@@ -3,6 +3,8 @@ VERSION?=$(shell grep 'VERSION' pkg/version/version.go | awk '{ print $$4 }' | t
|
||||
VERSION_MINOR:=$(shell grep 'VERSION' pkg/version/version.go | awk '{ print $$4 }' | tr -d '"' | rev | cut -d'.' -f2- | rev)
|
||||
PATCH:=$(shell grep 'VERSION' pkg/version/version.go | awk '{ print $$4 }' | tr -d '"' | awk -F. '{print $$NF}')
|
||||
SOURCE_DIRS = cmd pkg/apis pkg/controller pkg/server pkg/logging pkg/version
|
||||
LT_VERSION?=$(shell grep 'VERSION' cmd/loadtester/main.go | awk '{ print $$4 }' | tr -d '"' | head -n1)
|
||||
|
||||
run:
|
||||
go run cmd/flagger/* -kubeconfig=$$HOME/.kube/config -log-level=info \
|
||||
-metrics-server=https://prometheus.iowa.weavedx.com \
|
||||
@@ -29,7 +31,7 @@ test: test-fmt test-codegen
|
||||
go test ./...
|
||||
|
||||
helm-package:
|
||||
cd charts/ && helm package flagger/ && helm package grafana/
|
||||
cd charts/ && helm package flagger/ && helm package grafana/ && helm package loadtester/
|
||||
mv charts/*.tgz docs/
|
||||
helm repo index docs --url https://stefanprodan.github.io/flagger --merge ./docs/index.yaml
|
||||
|
||||
@@ -77,3 +79,7 @@ reset-test:
|
||||
kubectl delete -f ./artifacts/namespaces
|
||||
kubectl apply -f ./artifacts/namespaces
|
||||
kubectl apply -f ./artifacts/canaries
|
||||
|
||||
loadtester-push:
|
||||
docker build -t quay.io/stefanprodan/flagger-loadtester:$(LT_VERSION) . -f Dockerfile.loadtester
|
||||
docker push quay.io/stefanprodan/flagger-loadtester:$(LT_VERSION)
|
||||
58
README.md
58
README.md
@@ -8,8 +8,8 @@
|
||||
|
||||
Flagger is a Kubernetes operator that automates the promotion of canary deployments
|
||||
using Istio routing for traffic shifting and Prometheus metrics for canary analysis.
|
||||
The canary analysis can be extended with webhooks for running integration tests, load tests or any other custom
|
||||
validation.
|
||||
The canary analysis can be extended with webhooks for running integration tests,
|
||||
load tests or any other custom validation.
|
||||
|
||||
### Install
|
||||
|
||||
@@ -25,11 +25,10 @@ helm repo add flagger https://flagger.app
|
||||
# install or upgrade
|
||||
helm upgrade -i flagger flagger/flagger \
|
||||
--namespace=istio-system \
|
||||
--set metricsServer=http://prometheus.istio-system:9090 \
|
||||
--set controlLoopInterval=1m
|
||||
--set metricsServer=http://prometheus.istio-system:9090
|
||||
```
|
||||
|
||||
Flagger is compatible with Kubernetes >1.10.0 and Istio >1.0.0.
|
||||
Flagger is compatible with Kubernetes >1.11.0 and Istio >1.0.0.
|
||||
|
||||
### Usage
|
||||
|
||||
@@ -75,7 +74,7 @@ You can change the canary analysis _max weight_ and the _step weight_ percentage
|
||||
For a deployment named _podinfo_, a canary promotion can be defined using Flagger's custom resource:
|
||||
|
||||
```yaml
|
||||
apiVersion: flagger.app/v1alpha2
|
||||
apiVersion: flagger.app/v1alpha3
|
||||
kind: Canary
|
||||
metadata:
|
||||
name: podinfo
|
||||
@@ -102,8 +101,10 @@ spec:
|
||||
- public-gateway.istio-system.svc.cluster.local
|
||||
# Istio virtual service host names (optional)
|
||||
hosts:
|
||||
- app.iowa.weavedx.com
|
||||
- podinfo.example.com
|
||||
canaryAnalysis:
|
||||
# schedule interval (default 60s)
|
||||
interval: 1m
|
||||
# max number of failed metric checks before rollback
|
||||
threshold: 10
|
||||
# max traffic percentage routed to canary
|
||||
@@ -126,12 +127,11 @@ spec:
|
||||
interval: 30s
|
||||
# external checks (optional)
|
||||
webhooks:
|
||||
- name: integration-tests
|
||||
url: http://podinfo.test:9898/echo
|
||||
timeout: 1m
|
||||
- name: load-test
|
||||
url: http://flagger-loadtester.test/
|
||||
timeout: 5s
|
||||
metadata:
|
||||
test: "all"
|
||||
token: "16688eb5e9f289f1991c"
|
||||
cmd: "hey -z 1m -q 10 -c 2 http://podinfo.test:9898/"
|
||||
```
|
||||
|
||||
The canary analysis is using the following promql queries:
|
||||
@@ -210,6 +210,13 @@ kubectl apply -f ${REPO}/artifacts/canaries/deployment.yaml
|
||||
kubectl apply -f ${REPO}/artifacts/canaries/hpa.yaml
|
||||
```
|
||||
|
||||
Deploy the load testing service to generate traffic during the canary analysis:
|
||||
|
||||
```bash
|
||||
kubectl -n test apply -f ${REPO}/artifacts/loadtester/deployment.yaml
|
||||
kubectl -n test apply -f ${REPO}/artifacts/loadtester/service.yaml
|
||||
```
|
||||
|
||||
Create a canary promotion custom resource (replace the Istio gateway and the internet domain with your own):
|
||||
|
||||
```bash
|
||||
@@ -238,18 +245,19 @@ Trigger a canary deployment by updating the container image:
|
||||
|
||||
```bash
|
||||
kubectl -n test set image deployment/podinfo \
|
||||
podinfod=quay.io/stefanprodan/podinfo:1.2.1
|
||||
podinfod=quay.io/stefanprodan/podinfo:1.4.0
|
||||
```
|
||||
|
||||
Flagger detects that the deployment revision changed and starts a new rollout:
|
||||
Flagger detects that the deployment revision changed and starts a new canary analysis:
|
||||
|
||||
```
|
||||
kubectl -n test describe canary/podinfo
|
||||
|
||||
Status:
|
||||
Canary Revision: 19871136
|
||||
Failed Checks: 0
|
||||
State: finished
|
||||
Canary Weight: 0
|
||||
Failed Checks: 0
|
||||
Last Transition Time: 2019-01-16T13:47:16Z
|
||||
Phase: Succeeded
|
||||
Events:
|
||||
Type Reason Age From Message
|
||||
---- ------ ---- ---- -------
|
||||
@@ -271,6 +279,15 @@ Events:
|
||||
Normal Synced 5s flagger Promotion completed! Scaling down podinfo.test
|
||||
```
|
||||
|
||||
You can monitor all canaries with:
|
||||
|
||||
```bash
|
||||
watch kubectl get canaries --all-namespaces
|
||||
|
||||
NAMESPACE NAME STATUS WEIGHT LASTTRANSITIONTIME
|
||||
test podinfo Progressing 5 2019-01-16T14:05:07Z
|
||||
```
|
||||
|
||||
During the canary analysis you can generate HTTP 500 errors and high latency to test if Flagger pauses the rollout.
|
||||
|
||||
Create a tester pod and exec into it:
|
||||
@@ -299,9 +316,10 @@ the canary is scaled to zero and the rollout is marked as failed.
|
||||
kubectl -n test describe canary/podinfo
|
||||
|
||||
Status:
|
||||
Canary Revision: 16695041
|
||||
Failed Checks: 10
|
||||
State: failed
|
||||
Canary Weight: 0
|
||||
Failed Checks: 10
|
||||
Last Transition Time: 2019-01-16T13:47:16Z
|
||||
Phase: Failed
|
||||
Events:
|
||||
Type Reason Age From Message
|
||||
---- ------ ---- ---- -------
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
apiVersion: flagger.app/v1alpha2
|
||||
apiVersion: flagger.app/v1alpha3
|
||||
kind: Canary
|
||||
metadata:
|
||||
name: podinfo
|
||||
@@ -27,6 +27,8 @@ spec:
|
||||
hosts:
|
||||
- app.iowa.weavedx.com
|
||||
canaryAnalysis:
|
||||
# schedule interval (default 60s)
|
||||
interval: 10s
|
||||
# max number of failed metric checks before rollback
|
||||
threshold: 10
|
||||
# max traffic percentage routed to canary
|
||||
@@ -49,9 +51,8 @@ spec:
|
||||
interval: 30s
|
||||
# external checks (optional)
|
||||
webhooks:
|
||||
- name: integration-tests
|
||||
url: http://podinfo.test:9898/echo
|
||||
timeout: 1m
|
||||
- name: load-test
|
||||
url: http://flagger-loadtester.test/
|
||||
timeout: 5s
|
||||
metadata:
|
||||
test: "all"
|
||||
token: "16688eb5e9f289f1991c"
|
||||
cmd: "hey -z 1m -q 10 -c 2 http://podinfo.test:9898/"
|
||||
|
||||
@@ -4,11 +4,14 @@ metadata:
|
||||
name: canaries.flagger.app
|
||||
spec:
|
||||
group: flagger.app
|
||||
version: v1alpha2
|
||||
version: v1alpha3
|
||||
versions:
|
||||
- name: v1alpha2
|
||||
- name: v1alpha3
|
||||
served: true
|
||||
storage: true
|
||||
- name: v1alpha2
|
||||
served: true
|
||||
storage: false
|
||||
- name: v1alpha1
|
||||
served: true
|
||||
storage: false
|
||||
@@ -16,7 +19,21 @@ spec:
|
||||
plural: canaries
|
||||
singular: canary
|
||||
kind: Canary
|
||||
categories:
|
||||
- all
|
||||
scope: Namespaced
|
||||
subresources:
|
||||
status: {}
|
||||
additionalPrinterColumns:
|
||||
- name: Status
|
||||
type: string
|
||||
JSONPath: .status.phase
|
||||
- name: Weight
|
||||
type: string
|
||||
JSONPath: .status.canaryWeight
|
||||
- name: LastTransitionTime
|
||||
type: string
|
||||
JSONPath: .status.lastTransitionTime
|
||||
validation:
|
||||
openAPIV3Schema:
|
||||
properties:
|
||||
@@ -39,7 +56,9 @@ spec:
|
||||
name:
|
||||
type: string
|
||||
autoscalerRef:
|
||||
type: object
|
||||
anyOf:
|
||||
- type: string
|
||||
- type: object
|
||||
required: ['apiVersion', 'kind', 'name']
|
||||
properties:
|
||||
apiVersion:
|
||||
@@ -56,6 +75,9 @@ spec:
|
||||
type: number
|
||||
canaryAnalysis:
|
||||
properties:
|
||||
interval:
|
||||
type: string
|
||||
pattern: "^[0-9]+(m|s)"
|
||||
threshold:
|
||||
type: number
|
||||
maxWeight:
|
||||
@@ -73,7 +95,7 @@ spec:
|
||||
type: string
|
||||
interval:
|
||||
type: string
|
||||
pattern: "^[0-9]+(m)"
|
||||
pattern: "^[0-9]+(m|s)"
|
||||
threshold:
|
||||
type: number
|
||||
webhooks:
|
||||
@@ -90,4 +112,4 @@ spec:
|
||||
format: url
|
||||
timeout:
|
||||
type: string
|
||||
pattern: "^[0-9]+(s)"
|
||||
pattern: "^[0-9]+(m|s)"
|
||||
|
||||
@@ -22,7 +22,7 @@ spec:
|
||||
serviceAccountName: flagger
|
||||
containers:
|
||||
- name: flagger
|
||||
image: quay.io/stefanprodan/flagger:0.2.0
|
||||
image: quay.io/stefanprodan/flagger:0.4.1
|
||||
imagePullPolicy: Always
|
||||
ports:
|
||||
- name: http
|
||||
|
||||
60
artifacts/loadtester/deployment.yaml
Normal file
60
artifacts/loadtester/deployment.yaml
Normal file
@@ -0,0 +1,60 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: flagger-loadtester
|
||||
labels:
|
||||
app: flagger-loadtester
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: flagger-loadtester
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: flagger-loadtester
|
||||
annotations:
|
||||
prometheus.io/scrape: "true"
|
||||
spec:
|
||||
containers:
|
||||
- name: loadtester
|
||||
image: quay.io/stefanprodan/flagger-loadtester:0.1.0
|
||||
imagePullPolicy: IfNotPresent
|
||||
ports:
|
||||
- name: http
|
||||
containerPort: 8080
|
||||
command:
|
||||
- ./loadtester
|
||||
- -port=8080
|
||||
- -log-level=info
|
||||
- -timeout=1h
|
||||
- -log-cmd-output=true
|
||||
livenessProbe:
|
||||
exec:
|
||||
command:
|
||||
- wget
|
||||
- --quiet
|
||||
- --tries=1
|
||||
- --timeout=4
|
||||
- --spider
|
||||
- http://localhost:8080/healthz
|
||||
timeoutSeconds: 5
|
||||
readinessProbe:
|
||||
exec:
|
||||
command:
|
||||
- wget
|
||||
- --quiet
|
||||
- --tries=1
|
||||
- --timeout=4
|
||||
- --spider
|
||||
- http://localhost:8080/healthz
|
||||
timeoutSeconds: 5
|
||||
resources:
|
||||
limits:
|
||||
memory: "512Mi"
|
||||
cpu: "1000m"
|
||||
requests:
|
||||
memory: "32Mi"
|
||||
cpu: "10m"
|
||||
securityContext:
|
||||
readOnlyRootFilesystem: true
|
||||
runAsUser: 10001
|
||||
15
artifacts/loadtester/service.yaml
Normal file
15
artifacts/loadtester/service.yaml
Normal file
@@ -0,0 +1,15 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: flagger-loadtester
|
||||
labels:
|
||||
app: flagger-loadtester
|
||||
spec:
|
||||
type: ClusterIP
|
||||
selector:
|
||||
app: flagger-loadtester
|
||||
ports:
|
||||
- name: http
|
||||
port: 80
|
||||
protocol: TCP
|
||||
targetPort: http
|
||||
@@ -1,11 +1,11 @@
|
||||
apiVersion: v1
|
||||
name: flagger
|
||||
version: 0.2.0
|
||||
appVersion: 0.2.0
|
||||
kubeVersion: ">=1.9.0-0"
|
||||
version: 0.4.1
|
||||
appVersion: 0.4.1
|
||||
kubeVersion: ">=1.11.0-0"
|
||||
engine: gotpl
|
||||
description: Flagger is a Kubernetes operator that automates the promotion of canary deployments using Istio routing for traffic shifting and Prometheus metrics for canary analysis.
|
||||
home: https://flagger.app
|
||||
home: https://docs.flagger.app
|
||||
icon: https://raw.githubusercontent.com/stefanprodan/flagger/master/docs/logo/flagger-icon.png
|
||||
sources:
|
||||
- https://github.com/stefanprodan/flagger
|
||||
|
||||
@@ -1,14 +1,14 @@
|
||||
# Flagger
|
||||
|
||||
[Flagger](https://flagger.app) is a Kubernetes operator that automates the promotion of canary deployments
|
||||
using Istio routing for traffic shifting and Prometheus metrics for canary analysis.
|
||||
[Flagger](https://github.com/stefanprodan/flagger) is a Kubernetes operator that automates the promotion of
|
||||
canary deployments using Istio routing for traffic shifting and Prometheus metrics for canary analysis.
|
||||
Flagger implements a control loop that gradually shifts traffic to the canary while measuring key performance indicators
|
||||
like HTTP requests success rate, requests average duration and pods health.
|
||||
Based on the KPIs analysis a canary is promoted or aborted and the analysis result is published to Slack.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
* Kubernetes >= 1.9
|
||||
* Kubernetes >= 1.11
|
||||
* Istio >= 1.0
|
||||
* Prometheus >= 2.6
|
||||
|
||||
@@ -48,7 +48,6 @@ Parameter | Description | Default
|
||||
`image.repository` | image repository | `quay.io/stefanprodan/flagger`
|
||||
`image.tag` | image tag | `<VERSION>`
|
||||
`image.pullPolicy` | image pull policy | `IfNotPresent`
|
||||
`controlLoopInterval` | wait interval between checks | `10s`
|
||||
`metricsServer` | Prometheus URL | `http://prometheus.istio-system:9090`
|
||||
`slack.url` | Slack incoming webhook | None
|
||||
`slack.channel` | Slack channel | None
|
||||
@@ -68,7 +67,8 @@ Specify each parameter using the `--set key=value[,key=value]` argument to `helm
|
||||
```console
|
||||
$ helm upgrade -i flagger flagger/flagger \
|
||||
--namespace istio-system \
|
||||
--set controlLoopInterval=1m
|
||||
--set slack.url=https://hooks.slack.com/services/YOUR/SLACK/WEBHOOK \
|
||||
--set slack.channel=general
|
||||
```
|
||||
|
||||
Alternatively, a YAML file that specifies the values for the above parameters can be provided while installing the chart. For example,
|
||||
@@ -80,5 +80,5 @@ $ helm upgrade -i flagger flagger/flagger \
|
||||
```
|
||||
|
||||
> **Tip**: You can use the default [values.yaml](values.yaml)
|
||||
```
|
||||
|
||||
|
||||
|
||||
@@ -5,11 +5,14 @@ metadata:
|
||||
name: canaries.flagger.app
|
||||
spec:
|
||||
group: flagger.app
|
||||
version: v1alpha2
|
||||
version: v1alpha3
|
||||
versions:
|
||||
- name: v1alpha2
|
||||
- name: v1alpha3
|
||||
served: true
|
||||
storage: true
|
||||
- name: v1alpha2
|
||||
served: true
|
||||
storage: false
|
||||
- name: v1alpha1
|
||||
served: true
|
||||
storage: false
|
||||
@@ -17,7 +20,21 @@ spec:
|
||||
plural: canaries
|
||||
singular: canary
|
||||
kind: Canary
|
||||
categories:
|
||||
- all
|
||||
scope: Namespaced
|
||||
subresources:
|
||||
status: {}
|
||||
additionalPrinterColumns:
|
||||
- name: Status
|
||||
type: string
|
||||
JSONPath: .status.phase
|
||||
- name: Weight
|
||||
type: string
|
||||
JSONPath: .status.canaryWeight
|
||||
- name: LastTransitionTime
|
||||
type: string
|
||||
JSONPath: .status.lastTransitionTime
|
||||
validation:
|
||||
openAPIV3Schema:
|
||||
properties:
|
||||
@@ -40,7 +57,9 @@ spec:
|
||||
name:
|
||||
type: string
|
||||
autoscalerRef:
|
||||
type: object
|
||||
anyOf:
|
||||
- type: string
|
||||
- type: object
|
||||
required: ['apiVersion', 'kind', 'name']
|
||||
properties:
|
||||
apiVersion:
|
||||
@@ -57,6 +76,9 @@ spec:
|
||||
type: number
|
||||
canaryAnalysis:
|
||||
properties:
|
||||
interval:
|
||||
type: string
|
||||
pattern: "^[0-9]+(m|s)"
|
||||
threshold:
|
||||
type: number
|
||||
maxWeight:
|
||||
@@ -74,7 +96,7 @@ spec:
|
||||
type: string
|
||||
interval:
|
||||
type: string
|
||||
pattern: "^[0-9]+(m)"
|
||||
pattern: "^[0-9]+(m|s)"
|
||||
threshold:
|
||||
type: number
|
||||
webhooks:
|
||||
@@ -91,6 +113,5 @@ spec:
|
||||
format: url
|
||||
timeout:
|
||||
type: string
|
||||
pattern: "^[0-9]+(s)"
|
||||
|
||||
pattern: "^[0-9]+(m|s)"
|
||||
{{- end }}
|
||||
|
||||
@@ -35,7 +35,6 @@ spec:
|
||||
command:
|
||||
- ./flagger
|
||||
- -log-level=info
|
||||
- -control-loop-interval={{ .Values.controlLoopInterval }}
|
||||
- -metrics-server={{ .Values.metricsServer }}
|
||||
{{- if .Values.slack.url }}
|
||||
- -slack-url={{ .Values.slack.url }}
|
||||
|
||||
@@ -2,10 +2,9 @@
|
||||
|
||||
image:
|
||||
repository: quay.io/stefanprodan/flagger
|
||||
tag: 0.2.0
|
||||
tag: 0.4.1
|
||||
pullPolicy: IfNotPresent
|
||||
|
||||
controlLoopInterval: "10s"
|
||||
metricsServer: "http://prometheus.istio-system.svc.cluster.local:9090"
|
||||
|
||||
slack:
|
||||
|
||||
@@ -6,7 +6,7 @@ Grafana dashboards for monitoring progressive deployments powered by Istio, Prom
|
||||
|
||||
## Prerequisites
|
||||
|
||||
* Kubernetes >= 1.9
|
||||
* Kubernetes >= 1.11
|
||||
* Istio >= 1.0
|
||||
* Prometheus >= 2.6
|
||||
|
||||
@@ -75,5 +75,5 @@ helm install flagger/grafana --name flagger-grafana -f values.yaml
|
||||
```
|
||||
|
||||
> **Tip**: You can use the default [values.yaml](values.yaml)
|
||||
```
|
||||
|
||||
|
||||
|
||||
22
charts/loadtester/.helmignore
Normal file
22
charts/loadtester/.helmignore
Normal file
@@ -0,0 +1,22 @@
|
||||
# Patterns to ignore when building packages.
|
||||
# This supports shell glob matching, relative path matching, and
|
||||
# negation (prefixed with !). Only one pattern per line.
|
||||
.DS_Store
|
||||
# Common VCS dirs
|
||||
.git/
|
||||
.gitignore
|
||||
.bzr/
|
||||
.bzrignore
|
||||
.hg/
|
||||
.hgignore
|
||||
.svn/
|
||||
# Common backup files
|
||||
*.swp
|
||||
*.bak
|
||||
*.tmp
|
||||
*~
|
||||
# Various IDEs
|
||||
.project
|
||||
.idea/
|
||||
*.tmproj
|
||||
.vscode/
|
||||
20
charts/loadtester/Chart.yaml
Normal file
20
charts/loadtester/Chart.yaml
Normal file
@@ -0,0 +1,20 @@
|
||||
apiVersion: v1
|
||||
name: loadtester
|
||||
version: 0.1.0
|
||||
appVersion: 0.1.0
|
||||
kubeVersion: ">=1.11.0-0"
|
||||
engine: gotpl
|
||||
description: Flagger's load testing services based on rakyll/hey that generates traffic during canary analysis when configured as a webhook.
|
||||
home: https://docs.flagger.app
|
||||
icon: https://raw.githubusercontent.com/stefanprodan/flagger/master/docs/logo/flagger-icon.png
|
||||
sources:
|
||||
- https://github.com/stefanprodan/flagger
|
||||
maintainers:
|
||||
- name: stefanprodan
|
||||
url: https://github.com/stefanprodan
|
||||
email: stefanprodan@users.noreply.github.com
|
||||
keywords:
|
||||
- canary
|
||||
- istio
|
||||
- gitops
|
||||
- load testing
|
||||
78
charts/loadtester/README.md
Normal file
78
charts/loadtester/README.md
Normal file
@@ -0,0 +1,78 @@
|
||||
# Flagger load testing service
|
||||
|
||||
[Flagger's](https://github.com/stefanprodan/flagger) load testing service is based on
|
||||
[rakyll/hey](https://github.com/rakyll/hey)
|
||||
and can be used to generates traffic during canary analysis when configured as a webhook.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
* Kubernetes >= 1.11
|
||||
* Istio >= 1.0
|
||||
|
||||
## Installing the Chart
|
||||
|
||||
Add Flagger Helm repository:
|
||||
|
||||
```console
|
||||
helm repo add flagger https://flagger.app
|
||||
```
|
||||
|
||||
To install the chart with the release name `flagger-loadtester`:
|
||||
|
||||
```console
|
||||
helm upgrade -i flagger-loadtester flagger/loadtester
|
||||
```
|
||||
|
||||
The command deploys Grafana on the Kubernetes cluster in the default namespace.
|
||||
|
||||
> **Tip**: Note that the namespace where you deploy the load tester should have the Istio sidecar injection enabled
|
||||
|
||||
The [configuration](#configuration) section lists the parameters that can be configured during installation.
|
||||
|
||||
## Uninstalling the Chart
|
||||
|
||||
To uninstall/delete the `flagger-loadtester` deployment:
|
||||
|
||||
```console
|
||||
helm delete --purge flagger-loadtester
|
||||
```
|
||||
|
||||
The command removes all the Kubernetes components associated with the chart and deletes the release.
|
||||
|
||||
## Configuration
|
||||
|
||||
The following tables lists the configurable parameters of the load tester chart and their default values.
|
||||
|
||||
Parameter | Description | Default
|
||||
--- | --- | ---
|
||||
`image.repository` | Image repository | `quay.io/stefanprodan/flagger-loadtester`
|
||||
`image.pullPolicy` | Image pull policy | `IfNotPresent`
|
||||
`image.tag` | Image tag | `<VERSION>`
|
||||
`replicaCount` | desired number of pods | `1`
|
||||
`resources.requests.cpu` | CPU requests | `10m`
|
||||
`resources.requests.memory` | memory requests | `64Mi`
|
||||
`tolerations` | List of node taints to tolerate | `[]`
|
||||
`affinity` | node/pod affinities | `node`
|
||||
`nodeSelector` | node labels for pod assignment | `{}`
|
||||
`service.type` | type of service | `ClusterIP`
|
||||
`service.port` | ClusterIP port | `80`
|
||||
`cmd.logOutput` | Log the command output to stderr | `true`
|
||||
`cmd.timeout` | Command execution timeout | `1h`
|
||||
`logLevel` | Log level can be debug, info, warning, error or panic | `info`
|
||||
|
||||
Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example,
|
||||
|
||||
```console
|
||||
helm install flagger/loadtester --name flagger-loadtester \
|
||||
--set cmd.logOutput=false
|
||||
```
|
||||
|
||||
Alternatively, a YAML file that specifies the values for the above parameters can be provided while installing the chart. For example,
|
||||
|
||||
```console
|
||||
helm install flagger/loadtester --name flagger-loadtester -f values.yaml
|
||||
```
|
||||
|
||||
> **Tip**: You can use the default [values.yaml](values.yaml)
|
||||
|
||||
|
||||
1
charts/loadtester/templates/NOTES.txt
Normal file
1
charts/loadtester/templates/NOTES.txt
Normal file
@@ -0,0 +1 @@
|
||||
Flagger's load testing service is available at http://{{ include "loadtester.fullname" . }}.{{ .Release.Namespace }}/
|
||||
32
charts/loadtester/templates/_helpers.tpl
Normal file
32
charts/loadtester/templates/_helpers.tpl
Normal file
@@ -0,0 +1,32 @@
|
||||
{{/* vim: set filetype=mustache: */}}
|
||||
{{/*
|
||||
Expand the name of the chart.
|
||||
*/}}
|
||||
{{- define "loadtester.name" -}}
|
||||
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Create a default fully qualified app name.
|
||||
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
|
||||
If release name contains chart name it will be used as a full name.
|
||||
*/}}
|
||||
{{- define "loadtester.fullname" -}}
|
||||
{{- if .Values.fullnameOverride -}}
|
||||
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
|
||||
{{- else -}}
|
||||
{{- $name := default .Chart.Name .Values.nameOverride -}}
|
||||
{{- if contains $name .Release.Name -}}
|
||||
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
|
||||
{{- else -}}
|
||||
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Create chart name and version as used by the chart label.
|
||||
*/}}
|
||||
{{- define "loadtester.chart" -}}
|
||||
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
|
||||
{{- end -}}
|
||||
66
charts/loadtester/templates/deployment.yaml
Normal file
66
charts/loadtester/templates/deployment.yaml
Normal file
@@ -0,0 +1,66 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: {{ include "loadtester.fullname" . }}
|
||||
labels:
|
||||
app.kubernetes.io/name: {{ include "loadtester.name" . }}
|
||||
helm.sh/chart: {{ include "loadtester.chart" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
spec:
|
||||
replicas: {{ .Values.replicaCount }}
|
||||
selector:
|
||||
matchLabels:
|
||||
app: {{ include "loadtester.name" . }}
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: {{ include "loadtester.name" . }}
|
||||
spec:
|
||||
containers:
|
||||
- name: {{ .Chart.Name }}
|
||||
image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
|
||||
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
||||
ports:
|
||||
- name: http
|
||||
containerPort: 8080
|
||||
command:
|
||||
- ./loadtester
|
||||
- -port=8080
|
||||
- -log-level={{ .Values.logLevel }}
|
||||
- -timeout={{ .Values.cmd.timeout }}
|
||||
- -log-cmd-output={{ .Values.cmd.logOutput }}
|
||||
livenessProbe:
|
||||
exec:
|
||||
command:
|
||||
- wget
|
||||
- --quiet
|
||||
- --tries=1
|
||||
- --timeout=4
|
||||
- --spider
|
||||
- http://localhost:8080/healthz
|
||||
timeoutSeconds: 5
|
||||
readinessProbe:
|
||||
exec:
|
||||
command:
|
||||
- wget
|
||||
- --quiet
|
||||
- --tries=1
|
||||
- --timeout=4
|
||||
- --spider
|
||||
- http://localhost:8080/healthz
|
||||
timeoutSeconds: 5
|
||||
resources:
|
||||
{{- toYaml .Values.resources | nindent 12 }}
|
||||
{{- with .Values.nodeSelector }}
|
||||
nodeSelector:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.affinity }}
|
||||
affinity:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.tolerations }}
|
||||
tolerations:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
18
charts/loadtester/templates/service.yaml
Normal file
18
charts/loadtester/templates/service.yaml
Normal file
@@ -0,0 +1,18 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: {{ include "loadtester.fullname" . }}
|
||||
labels:
|
||||
app.kubernetes.io/name: {{ include "loadtester.name" . }}
|
||||
helm.sh/chart: {{ include "loadtester.chart" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
spec:
|
||||
type: {{ .Values.service.type }}
|
||||
ports:
|
||||
- port: {{ .Values.service.port }}
|
||||
targetPort: http
|
||||
protocol: TCP
|
||||
name: http
|
||||
selector:
|
||||
app: {{ include "loadtester.name" . }}
|
||||
29
charts/loadtester/values.yaml
Normal file
29
charts/loadtester/values.yaml
Normal file
@@ -0,0 +1,29 @@
|
||||
replicaCount: 1
|
||||
|
||||
image:
|
||||
repository: quay.io/stefanprodan/flagger-loadtester
|
||||
tag: 0.1.0
|
||||
pullPolicy: IfNotPresent
|
||||
|
||||
logLevel: info
|
||||
cmd:
|
||||
logOutput: true
|
||||
timeout: 1h
|
||||
|
||||
nameOverride: ""
|
||||
fullnameOverride: ""
|
||||
|
||||
service:
|
||||
type: ClusterIP
|
||||
port: 80
|
||||
|
||||
resources:
|
||||
requests:
|
||||
cpu: 10m
|
||||
memory: 64Mi
|
||||
|
||||
nodeSelector: {}
|
||||
|
||||
tolerations: []
|
||||
|
||||
affinity: {}
|
||||
@@ -37,7 +37,7 @@ func init() {
|
||||
flag.StringVar(&kubeconfig, "kubeconfig", "", "Path to a kubeconfig. Only required if out-of-cluster.")
|
||||
flag.StringVar(&masterURL, "master", "", "The address of the Kubernetes API server. Overrides any value in kubeconfig. Only required if out-of-cluster.")
|
||||
flag.StringVar(&metricsServer, "metrics-server", "http://prometheus:9090", "Prometheus URL")
|
||||
flag.DurationVar(&controlLoopInterval, "control-loop-interval", 10*time.Second, "wait interval between rollouts")
|
||||
flag.DurationVar(&controlLoopInterval, "control-loop-interval", 10*time.Second, "Kubernetes API sync interval")
|
||||
flag.StringVar(&logLevel, "log-level", "debug", "Log level can be: debug, info, warning, error.")
|
||||
flag.StringVar(&port, "port", "8080", "Port to listen on.")
|
||||
flag.StringVar(&slackURL, "slack-url", "", "Slack hook URL.")
|
||||
@@ -77,7 +77,7 @@ func main() {
|
||||
}
|
||||
|
||||
flaggerInformerFactory := informers.NewSharedInformerFactory(flaggerClient, time.Second*30)
|
||||
canaryInformer := flaggerInformerFactory.Flagger().V1alpha2().Canaries()
|
||||
canaryInformer := flaggerInformerFactory.Flagger().V1alpha3().Canaries()
|
||||
|
||||
logger.Infof("Starting flagger version %s revision %s", version.VERSION, version.REVISION)
|
||||
|
||||
|
||||
44
cmd/loadtester/main.go
Normal file
44
cmd/loadtester/main.go
Normal file
@@ -0,0 +1,44 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"github.com/knative/pkg/signals"
|
||||
"github.com/stefanprodan/flagger/pkg/loadtester"
|
||||
"github.com/stefanprodan/flagger/pkg/logging"
|
||||
"log"
|
||||
"time"
|
||||
)
|
||||
|
||||
var VERSION = "0.1.0"
|
||||
var (
|
||||
logLevel string
|
||||
port string
|
||||
timeout time.Duration
|
||||
logCmdOutput bool
|
||||
)
|
||||
|
||||
func init() {
|
||||
flag.StringVar(&logLevel, "log-level", "debug", "Log level can be: debug, info, warning, error.")
|
||||
flag.StringVar(&port, "port", "9090", "Port to listen on.")
|
||||
flag.DurationVar(&timeout, "timeout", time.Hour, "Command exec timeout.")
|
||||
flag.BoolVar(&logCmdOutput, "log-cmd-output", true, "Log command output to stderr")
|
||||
}
|
||||
|
||||
func main() {
|
||||
flag.Parse()
|
||||
|
||||
logger, err := logging.NewLogger(logLevel)
|
||||
if err != nil {
|
||||
log.Fatalf("Error creating logger: %v", err)
|
||||
}
|
||||
defer logger.Sync()
|
||||
|
||||
stopCh := signals.SetupSignalHandler()
|
||||
|
||||
taskRunner := loadtester.NewTaskRunner(logger, timeout, logCmdOutput)
|
||||
|
||||
go taskRunner.Start(100*time.Millisecond, stopCh)
|
||||
|
||||
logger.Infof("Starting load tester v%s API on port %s", VERSION, port)
|
||||
loadtester.ListenAndServe(port, time.Minute, logger, taskRunner, stopCh)
|
||||
}
|
||||
@@ -1 +0,0 @@
|
||||
flagger.app
|
||||
@@ -1,11 +0,0 @@
|
||||
# Flagger
|
||||
|
||||
Flagger is a Kubernetes operator that automates the promotion of canary deployments using Istio routing for traffic
|
||||
shifting and Prometheus metrics for canary analysis.
|
||||
|
||||
Flagger implements a control loop that gradually shifts traffic to the canary while measuring key performance
|
||||
indicators like HTTP requests success rate, requests average duration and pods health. Based on the KPIs analysis
|
||||
a canary is promoted or aborted and the analysis result is published to Slack.
|
||||
|
||||
### For the install instructions and usage examples please see [docs.flagger.app](https://docs.flagger.app)
|
||||
|
||||
@@ -1,55 +0,0 @@
|
||||
title: Flagger - Istio Progressive Delivery Kubernetes Operator
|
||||
|
||||
remote_theme: errordeveloper/simple-project-homepage
|
||||
repository: stefanprodan/flagger
|
||||
by_weaveworks: true
|
||||
|
||||
url: "https://flagger.app"
|
||||
baseurl: "/"
|
||||
|
||||
twitter:
|
||||
username: "stefanprodan"
|
||||
author:
|
||||
twitter: "stefanprodan"
|
||||
|
||||
# Set default og:image
|
||||
defaults:
|
||||
- scope: {path: ""}
|
||||
values: {image: "diagrams/flagger-overview.png"}
|
||||
|
||||
# See: https://material.io/guidelines/style/color.html
|
||||
# Use color-name-value, like pink-200 or deep-purple-100
|
||||
brand_color: "amber-400"
|
||||
|
||||
# How article URLs are structured.
|
||||
# See: https://jekyllrb.com/docs/permalinks/
|
||||
permalink: posts/:title/
|
||||
|
||||
# "UA-NNNNNNNN-N"
|
||||
google_analytics: ""
|
||||
|
||||
# Language. For example, if you write in Japanese, use "ja"
|
||||
lang: "en"
|
||||
|
||||
# How many posts are visible on the home page without clicking "View More"
|
||||
num_posts_visible_initially: 5
|
||||
|
||||
# Date format: See http://strftime.net/
|
||||
date_format: "%b %-d, %Y"
|
||||
|
||||
plugins:
|
||||
- jekyll-feed
|
||||
- jekyll-readme-index
|
||||
- jekyll-seo-tag
|
||||
- jekyll-sitemap
|
||||
- jemoji
|
||||
# # required for local builds with starefossen/github-pages
|
||||
# - jekyll-github-metadata
|
||||
# - jekyll-mentions
|
||||
# - jekyll-redirect-from
|
||||
# - jekyll-remote-theme
|
||||
|
||||
exclude:
|
||||
- CNAME
|
||||
- gitbook
|
||||
|
||||
BIN
docs/diagrams/flagger-load-testing.png
Normal file
BIN
docs/diagrams/flagger-load-testing.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 159 KiB |
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
@@ -4,13 +4,20 @@ description: Flagger is an Istio progressive delivery Kubernetes operator
|
||||
|
||||
# Introduction
|
||||
|
||||
[Flagger](https://github.com/stefanprodan/flagger) is a **Kubernetes** operator that automates the promotion of canary deployments using **Istio** routing for traffic shifting and **Prometheus** metrics for canary analysis.
|
||||
[Flagger](https://github.com/stefanprodan/flagger) is a **Kubernetes** operator that automates the promotion of canary
|
||||
deployments using **Istio** routing for traffic shifting and **Prometheus** metrics for canary analysis.
|
||||
The canary analysis can be extended with webhooks for running integration tests,
|
||||
load tests or any other custom validation.
|
||||
|
||||
Flagger implements a control loop that gradually shifts traffic to the canary while measuring key performance indicators like HTTP requests success rate, requests average duration and pods health. Based on the **KPIs** analysis a canary is promoted or aborted and the analysis result is published to **Slack**.
|
||||
Flagger implements a control loop that gradually shifts traffic to the canary while measuring key performance
|
||||
indicators like HTTP requests success rate, requests average duration and pods health.
|
||||
Based on the **KPIs** analysis a canary is promoted or aborted and the analysis result is published to **Slack**.
|
||||
|
||||

|
||||
|
||||
Flagger can be configured with Kubernetes custom resources \(canaries.flagger.app kind\) and is compatible with any CI/CD solutions made for Kubernetes. Since Flagger is declarative and reacts to Kubernetes events, it can be used in **GitOps** pipelines together with Weave Flux or JenkinsX.
|
||||
Flagger can be configured with Kubernetes custom resources \(canaries.flagger.app kind\) and is compatible with
|
||||
any CI/CD solutions made for Kubernetes. Since Flagger is declarative and reacts to Kubernetes events,
|
||||
it can be used in **GitOps** pipelines together with Weave Flux or JenkinsX.
|
||||
|
||||
This project is sponsored by [Weaveworks](https://www.weave.works/)
|
||||
|
||||
|
||||
@@ -5,8 +5,8 @@
|
||||
|
||||
## Install
|
||||
|
||||
* [Install Flagger](install/installing-flagger.md)
|
||||
* [Install Grafana](install/installing-grafana.md)
|
||||
* [Install Flagger](install/install-flagger.md)
|
||||
* [Install Grafana](install/install-grafana.md)
|
||||
* [Install Istio](install/install-istio.md)
|
||||
|
||||
## Usage
|
||||
|
||||
@@ -1,15 +1,17 @@
|
||||
# How it works
|
||||
|
||||
[Flagger](https://github.com/stefanprodan/flagger) takes a Kubernetes deployment and optionally a horizontal pod autoscaler \(HPA\) and creates a series of objects \(Kubernetes deployments, ClusterIP services and Istio virtual services\) to drive the canary analysis and promotion.
|
||||
[Flagger](https://github.com/stefanprodan/flagger) takes a Kubernetes deployment and optionally
|
||||
a horizontal pod autoscaler \(HPA\) and creates a series of objects
|
||||
\(Kubernetes deployments, ClusterIP services and Istio virtual services\) to drive the canary analysis and promotion.
|
||||
|
||||

|
||||

|
||||
|
||||
### Canary Custom Resource
|
||||
|
||||
For a deployment named _podinfo_, a canary promotion can be defined using Flagger's custom resource:
|
||||
|
||||
```yaml
|
||||
apiVersion: flagger.app/v1alpha2
|
||||
apiVersion: flagger.app/v1alpha3
|
||||
kind: Canary
|
||||
metadata:
|
||||
name: podinfo
|
||||
@@ -36,8 +38,10 @@ spec:
|
||||
- public-gateway.istio-system.svc.cluster.local
|
||||
# Istio virtual service host names (optional)
|
||||
hosts:
|
||||
- app.iowa.weavedx.com
|
||||
- podinfo.example.com
|
||||
canaryAnalysis:
|
||||
# schedule interval (default 60s)
|
||||
interval: 1m
|
||||
# max number of failed metric checks before rollback
|
||||
threshold: 10
|
||||
# max traffic percentage routed to canary
|
||||
@@ -63,14 +67,35 @@ spec:
|
||||
- name: integration-tests
|
||||
url: http://podinfo.test:9898/echo
|
||||
timeout: 1m
|
||||
# key-value pairs (optional)
|
||||
metadata:
|
||||
test: "all"
|
||||
token: "16688eb5e9f289f1991c"
|
||||
```
|
||||
|
||||
**Note** that the target deployment must have a single label selector in the format `app: <DEPLOYMENT-NAME>`:
|
||||
|
||||
```yaml
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: podinfo
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: podinfo
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: podinfo
|
||||
```
|
||||
|
||||
The target deployment should expose a TCP port that will be used by Flagger to create the ClusterIP Service and
|
||||
the Istio Virtual Service. The container port from the target deployment should match the `service.port` value.
|
||||
|
||||
### Canary Deployment
|
||||
|
||||

|
||||

|
||||
|
||||
Gated canary promotion stages:
|
||||
|
||||
@@ -89,27 +114,32 @@ Gated canary promotion stages:
|
||||
* scale to zero the canary deployment and mark it as failed
|
||||
* wait for the canary deployment to be updated \(revision bump\) and start over
|
||||
* increase canary traffic weight by 5% \(step weight\) till it reaches 50% \(max weight\)
|
||||
* halt advancement while canary request success rate is under the threshold
|
||||
* halt advancement while canary request duration P99 is over the threshold
|
||||
* halt advancement if the primary or canary deployment becomes unhealthy
|
||||
* halt advancement while canary deployment is being scaled up/down by HPA
|
||||
* halt advancement if any of the webhook calls are failing
|
||||
* halt advancement while canary request success rate is under the threshold
|
||||
* halt advancement while canary request duration P99 is over the threshold
|
||||
* promote canary to primary
|
||||
* copy canary deployment spec template over primary
|
||||
* wait for primary rolling update to finish
|
||||
* halt advancement if pods are unhealthy
|
||||
* route all traffic to primary
|
||||
* scale to zero the canary deployment
|
||||
* mark rollout as finished
|
||||
* mark the canary deployment as finished
|
||||
* wait for the canary deployment to be updated \(revision bump\) and start over
|
||||
|
||||
You can change the canary analysis _max weight_ and the _step weight_ percentage in the Flagger's custom resource.
|
||||
|
||||
### Canary Analysis
|
||||
|
||||
The canary analysis runs periodically until it reaches the maximum traffic weight or the failed checks threshold.
|
||||
|
||||
Spec:
|
||||
|
||||
```yaml
|
||||
canaryAnalysis:
|
||||
# schedule interval (default 60s)
|
||||
interval: 1m
|
||||
# max number of failed metric checks before rollback
|
||||
threshold: 10
|
||||
# max traffic percentage routed to canary
|
||||
@@ -117,19 +147,20 @@ Spec:
|
||||
maxWeight: 50
|
||||
# canary increment step
|
||||
# percentage (0-100)
|
||||
stepWeight: 5
|
||||
stepWeight: 2
|
||||
```
|
||||
|
||||
The above analysis, if it succeeds, will run for 25 minutes while validating the HTTP metrics and webhooks every minute.
|
||||
You can determine the minimum time that it takes to validate and promote a canary deployment using this formula:
|
||||
|
||||
```
|
||||
controlLoopInterval * (maxWeight / stepWeight)
|
||||
interval * (maxWeight / stepWeight)
|
||||
```
|
||||
|
||||
And the time it takes for a canary to be rollback:
|
||||
And the time it takes for a canary to be rollback when the metrics or webhook checks are failing:
|
||||
|
||||
```
|
||||
controlLoopInterval * threshold
|
||||
interval * threshold
|
||||
```
|
||||
|
||||
### HTTP Metrics
|
||||
@@ -205,10 +236,12 @@ histogram_quantile(0.99,
|
||||
)
|
||||
```
|
||||
|
||||
> **Note** that the metric interval should be lower or equal to the control loop interval.
|
||||
|
||||
### Webhooks
|
||||
|
||||
The canary analysis can be extended with webhooks.
|
||||
Flagger would call a URL (HTTP POST) and determine from the response status code (HTTP 2xx) if the canary is failing or not.
|
||||
Flagger will call each webhook URL and determine from the response status code (HTTP 2xx) if the canary is failing or not.
|
||||
|
||||
Spec:
|
||||
|
||||
@@ -217,13 +250,21 @@ Spec:
|
||||
webhooks:
|
||||
- name: integration-tests
|
||||
url: http://podinfo.test:9898/echo
|
||||
timeout: 1m
|
||||
timeout: 30s
|
||||
metadata:
|
||||
test: "all"
|
||||
token: "16688eb5e9f289f1991c"
|
||||
- name: load-tests
|
||||
url: http://podinfo.test:9898/echo
|
||||
timeout: 30s
|
||||
metadata:
|
||||
key1: "val1"
|
||||
key2: "val2"
|
||||
```
|
||||
|
||||
Webhook payload:
|
||||
> **Note** that the sum of all webhooks timeouts should be lower than the control loop interval.
|
||||
|
||||
Webhook payload (HTTP POST):
|
||||
|
||||
```json
|
||||
{
|
||||
@@ -243,4 +284,78 @@ Response status codes:
|
||||
|
||||
On a non-2xx response Flagger will include the response body (if any) in the failed checks log and Kubernetes events.
|
||||
|
||||
### Load Testing
|
||||
|
||||
For workloads that are not receiving constant traffic Flagger can be configured with a webhook,
|
||||
that when called, will start a load test for the target workload.
|
||||
If the target workload doesn't receive any traffic during the canary analysis,
|
||||
Flagger metric checks will fail with "no values found for metric istio_requests_total".
|
||||
|
||||
Flagger comes with a load testing service based on [rakyll/hey](https://github.com/rakyll/hey)
|
||||
that generates traffic during analysis when configured as a webhook.
|
||||
|
||||

|
||||
|
||||
First you need to deploy the load test runner in a namespace with Istio sidecar injection enabled:
|
||||
|
||||
```bash
|
||||
export REPO=https://raw.githubusercontent.com/stefanprodan/flagger/master
|
||||
|
||||
kubectl -n test apply -f ${REPO}/artifacts/loadtester/deployment.yaml
|
||||
kubectl -n test apply -f ${REPO}/artifacts/loadtester/service.yaml
|
||||
```
|
||||
|
||||
Or by using Helm:
|
||||
|
||||
```bash
|
||||
helm repo add flagger https://flagger.app
|
||||
|
||||
helm upgrade -i flagger-loadtester flagger/loadtester \
|
||||
--namepace=test \
|
||||
--set cmd.logOutput=true \
|
||||
--set cmd.timeout=1h
|
||||
```
|
||||
|
||||
When deployed the load tester API will be available at `http://flagger-loadtester.test/`.
|
||||
|
||||
Now you can add webhooks to the canary analysis spec:
|
||||
|
||||
```yaml
|
||||
webhooks:
|
||||
- name: load-test-get
|
||||
url: http://flagger-loadtester.test/
|
||||
timeout: 5s
|
||||
metadata:
|
||||
cmd: "hey -z 1m -q 10 -c 2 http://podinfo.test:9898/"
|
||||
- name: load-test-post
|
||||
url: http://flagger-loadtester.test/
|
||||
timeout: 5s
|
||||
metadata:
|
||||
cmd: "hey -z 1m -q 10 -c 2 -m POST -d '{test: 2}' http://podinfo.test:9898/echo"
|
||||
```
|
||||
|
||||
When the canary analysis starts, Flagger will call the webhooks and the load tester will run the `hey` commands
|
||||
in the background, if they are not already running. This will ensure that during the
|
||||
analysis, the `podinfo.test` virtual service will receive a steady steam of GET and POST requests.
|
||||
|
||||
If your workload is exposed outside the mesh with the Istio Gateway and TLS you can point `hey` to the
|
||||
public URL and use HTTP2.
|
||||
|
||||
```yaml
|
||||
webhooks:
|
||||
- name: load-test-get
|
||||
url: http://flagger-loadtester.test/
|
||||
timeout: 5s
|
||||
metadata:
|
||||
cmd: "hey -z 1m -q 10 -c 2 -h2 https://podinfo.example.com/"
|
||||
```
|
||||
|
||||
The load tester can run arbitrary commands as long as the binary is present in the container image.
|
||||
For example if you you want to replace `hey` with another CLI, you can create your own Docker image:
|
||||
|
||||
```dockerfile
|
||||
FROM quay.io/stefanprodan/flagger-loadtester:<VER>
|
||||
|
||||
RUN curl -Lo /usr/local/bin/my-cli https://github.com/user/repo/releases/download/ver/my-cli \
|
||||
&& chmod +x /usr/local/bin/my-cli
|
||||
```
|
||||
|
||||
@@ -1,10 +1,12 @@
|
||||
# Install Flagger
|
||||
|
||||
Before installing Flagger make sure you have [Istio](https://istio.io) running with Prometheus enabled. If you are new to Istio you can follow this GKE guide [Istio service mesh walk-through](https://docs.flagger.app/install/install-istio).
|
||||
Before installing Flagger make sure you have [Istio](https://istio.io) running with Prometheus enabled.
|
||||
If you are new to Istio you can follow this GKE guide
|
||||
[Istio service mesh walk-through](https://docs.flagger.app/install/install-istio).
|
||||
|
||||
**Prerequisites**
|
||||
|
||||
* Kubernetes >= 1.9
|
||||
* Kubernetes >= 1.11
|
||||
* Istio >= 1.0
|
||||
* Prometheus >= 2.6
|
||||
|
||||
@@ -21,8 +23,7 @@ Deploy Flagger in the _**istio-system**_ namespace:
|
||||
```bash
|
||||
helm upgrade -i flagger flagger/flagger \
|
||||
--namespace=istio-system \
|
||||
--set metricsServer=http://prometheus.istio-system:9090 \
|
||||
--set controlLoopInterval=1m
|
||||
--set metricsServer=http://prometheus.istio-system:9090
|
||||
```
|
||||
|
||||
Enable **Slack** notifications:
|
||||
@@ -61,11 +62,12 @@ helm delete --purge flagger
|
||||
|
||||
The command removes all the Kubernetes components associated with the chart and deletes the release.
|
||||
|
||||
{% hint style="info" %}
|
||||
On uninstall the Flagger CRD will not be removed. Deleting the CRD will make Kubernetes remove all the objects owned by the CRD like Istio virtual services, Kubernetes deployments and ClusterIP services.
|
||||
{% endhint %}
|
||||
> **Note** that on uninstall the Canary CRD will not be removed.
|
||||
Deleting the CRD will make Kubernetes remove all the objects owned by Flagger like Istio virtual services,
|
||||
Kubernetes deployments and ClusterIP services.
|
||||
|
||||
If you want to remove all the objects created by Flagger you have delete the canary CRD with kubectl:
|
||||
|
||||
If you want to remove all the objects created by Flagger you have delete the Canary CRD with kubectl:
|
||||
|
||||
```text
|
||||
kubectl delete crd canaries.flagger.app
|
||||
@@ -1,12 +1,14 @@
|
||||
# Install Istio
|
||||
|
||||
This guide walks you through setting up Istio with Jaeger, Prometheus, Grafana and Let’s Encrypt TLS for ingress gateway on Google Kubernetes Engine.
|
||||
This guide walks you through setting up Istio with Jaeger, Prometheus, Grafana and
|
||||
Let’s Encrypt TLS for ingress gateway on Google Kubernetes Engine.
|
||||
|
||||

|
||||
|
||||
### Prerequisites
|
||||
|
||||
You will be creating a cluster on Google’s Kubernetes Engine \(GKE\), if you don’t have an account you can sign up [here](https://cloud.google.com/free/) for free credits.
|
||||
You will be creating a cluster on Google’s Kubernetes Engine \(GKE\),
|
||||
if you don’t have an account you can sign up [here](https://cloud.google.com/free/) for free credits.
|
||||
|
||||
Login into GCP, create a project and enable billing for it.
|
||||
|
||||
@@ -64,7 +66,9 @@ gcloud container clusters create istio \
|
||||
--scopes=gke-default,compute-rw,storage-rw
|
||||
```
|
||||
|
||||
The above command will create a default node pool consisting of `n1-highcpu-4` \(vCPU: 4, RAM 3.60GB, DISK: 30GB\) preemptible VMs. Preemptible VMs are up to 80% cheaper than regular instances and are terminated and replaced after a maximum of 24 hours.
|
||||
The above command will create a default node pool consisting of `n1-highcpu-4` \(vCPU: 4, RAM 3.60GB, DISK: 30GB\)
|
||||
preemptible VMs. Preemptible VMs are up to 80% cheaper than regular instances and are terminated and replaced
|
||||
after a maximum of 24 hours.
|
||||
|
||||
Set up credentials for `kubectl`:
|
||||
|
||||
@@ -330,10 +334,11 @@ kubectl create secret generic cert-manager-credentials \
|
||||
--namespace=istio-system
|
||||
```
|
||||
|
||||
Create a letsencrypt issuer for CloudDNS \(replace `email@example.com` with a valid email address and `my-gcp-project`with your project ID\):
|
||||
Create a letsencrypt issuer for CloudDNS \(replace `email@example.com` with a valid email address and
|
||||
`my-gcp-project`with your project ID\):
|
||||
|
||||
```yaml
|
||||
apiVersion: v1alpha2
|
||||
apiVersion: certmanager.k8s.io/v1alpha1
|
||||
kind: Issuer
|
||||
metadata:
|
||||
name: letsencrypt-prod
|
||||
@@ -363,7 +368,7 @@ kubectl apply -f ./letsencrypt-issuer.yaml
|
||||
Create a wildcard certificate \(replace `example.com` with your domain\):
|
||||
|
||||
```yaml
|
||||
apiVersion: v1alpha2
|
||||
apiVersion: certmanager.k8s.io/v1alpha1
|
||||
kind: Certificate
|
||||
metadata:
|
||||
name: istio-gateway
|
||||
@@ -403,7 +408,10 @@ Recreate Istio ingress gateway pods:
|
||||
kubectl -n istio-system delete pods -l istio=ingressgateway
|
||||
```
|
||||
|
||||
Note that Istio gateway doesn't reload the certificates from the TLS secret on cert-manager renewal. Since the GKE cluster is made out of preemptible VMs the gateway pods will be replaced once every 24h, if your not using preemptible nodes then you need to manually kill the gateway pods every two months before the certificate expires.
|
||||
Note that Istio gateway doesn't reload the certificates from the TLS secret on cert-manager renewal.
|
||||
Since the GKE cluster is made out of preemptible VMs the gateway pods will be replaced once every 24h,
|
||||
if your not using preemptible nodes then you need to manually kill the gateway pods every two months
|
||||
before the certificate expires.
|
||||
|
||||
### Expose services outside the service mesh
|
||||
|
||||
|
||||
@@ -12,11 +12,13 @@ helm upgrade -i flagger flagger/flagger \
|
||||
--set slack.user=flagger
|
||||
```
|
||||
|
||||
Once configured with a Slack incoming **webhook**, Flagger will post messages when a canary deployment has been initialised, when a new revision has been detected and if the canary analysis failed or succeeded.
|
||||
Once configured with a Slack incoming **webhook**, Flagger will post messages when a canary deployment
|
||||
has been initialised, when a new revision has been detected and if the canary analysis failed or succeeded.
|
||||
|
||||

|
||||
|
||||
A canary deployment will be rolled back if the progress deadline exceeded or if the analysis reached the maximum number of failed checks:
|
||||
A canary deployment will be rolled back if the progress deadline exceeded or if the analysis reached the
|
||||
maximum number of failed checks:
|
||||
|
||||

|
||||
|
||||
|
||||
@@ -44,7 +44,8 @@ Promotion completed! podinfo.test
|
||||
|
||||
### Metrics
|
||||
|
||||
Flagger exposes Prometheus metrics that can be used to determine the canary analysis status and the destination weight values:
|
||||
Flagger exposes Prometheus metrics that can be used to determine the canary analysis status and
|
||||
the destination weight values:
|
||||
|
||||
```bash
|
||||
# Canaries total gauge
|
||||
@@ -65,5 +66,4 @@ flagger_canary_duration_seconds_sum{name="podinfo",namespace="test"} 17.3561329
|
||||
flagger_canary_duration_seconds_count{name="podinfo",namespace="test"} 6
|
||||
```
|
||||
|
||||
####
|
||||
|
||||
|
||||
@@ -17,10 +17,17 @@ kubectl apply -f ${REPO}/artifacts/canaries/deployment.yaml
|
||||
kubectl apply -f ${REPO}/artifacts/canaries/hpa.yaml
|
||||
```
|
||||
|
||||
Deploy the load testing service to generate traffic during the canary analysis:
|
||||
|
||||
```bash
|
||||
kubectl -n test apply -f ${REPO}/artifacts/loadtester/deployment.yaml
|
||||
kubectl -n test apply -f ${REPO}/artifacts/loadtester/service.yaml
|
||||
```
|
||||
|
||||
Create a canary custom resource \(replace example.com with your own domain\):
|
||||
|
||||
```yaml
|
||||
apiVersion: v1alpha2
|
||||
apiVersion: flagger.app/v1alpha3
|
||||
kind: Canary
|
||||
metadata:
|
||||
name: podinfo
|
||||
@@ -49,6 +56,8 @@ spec:
|
||||
hosts:
|
||||
- app.example.com
|
||||
canaryAnalysis:
|
||||
# schedule interval (default 60s)
|
||||
interval: 1m
|
||||
# max number of failed metric checks before rollback
|
||||
threshold: 5
|
||||
# max traffic percentage routed to canary
|
||||
@@ -68,6 +77,13 @@ spec:
|
||||
# milliseconds
|
||||
threshold: 500
|
||||
interval: 30s
|
||||
# generate traffic during analysis
|
||||
webhooks:
|
||||
- name: load-test
|
||||
url: http://flagger-loadtester.test/
|
||||
timeout: 5s
|
||||
metadata:
|
||||
cmd: "hey -z 1m -q 10 -c 2 http://podinfo.test:9898/"
|
||||
```
|
||||
|
||||
Save the above resource as podinfo-canary.yaml and then apply it:
|
||||
@@ -97,7 +113,7 @@ Trigger a canary deployment by updating the container image:
|
||||
|
||||
```bash
|
||||
kubectl -n test set image deployment/podinfo \
|
||||
podinfod=quay.io/stefanprodan/podinfo:1.2.1
|
||||
podinfod=quay.io/stefanprodan/podinfo:1.4.0
|
||||
```
|
||||
|
||||
Flagger detects that the deployment revision changed and starts a new rollout:
|
||||
@@ -106,9 +122,9 @@ Flagger detects that the deployment revision changed and starts a new rollout:
|
||||
kubectl -n test describe canary/podinfo
|
||||
|
||||
Status:
|
||||
Canary Revision: 19871136
|
||||
Failed Checks: 0
|
||||
State: finished
|
||||
Canary Weight: 0
|
||||
Failed Checks: 0
|
||||
Phase: Succeeded
|
||||
Events:
|
||||
Type Reason Age From Message
|
||||
---- ------ ---- ---- -------
|
||||
@@ -130,6 +146,17 @@ Events:
|
||||
Normal Synced 5s flagger Promotion completed! Scaling down podinfo.test
|
||||
```
|
||||
|
||||
You can monitor all canaries with:
|
||||
|
||||
```bash
|
||||
watch kubectl get canaries --all-namespaces
|
||||
|
||||
NAMESPACE NAME STATUS WEIGHT LASTTRANSITIONTIME
|
||||
test podinfo Progressing 15 2019-01-16T14:05:07Z
|
||||
prod frontend Succeeded 0 2019-01-15T16:15:07Z
|
||||
prod backend Failed 0 2019-01-14T17:05:07Z
|
||||
```
|
||||
|
||||
During the canary analysis you can generate HTTP 500 errors and high latency to test if Flagger pauses the rollout.
|
||||
|
||||
Create a tester pod and exec into it:
|
||||
@@ -160,9 +187,9 @@ When the number of failed checks reaches the canary analysis threshold, the traf
|
||||
kubectl -n test describe canary/podinfo
|
||||
|
||||
Status:
|
||||
Canary Revision: 16695041
|
||||
Failed Checks: 10
|
||||
State: failed
|
||||
Canary Weight: 0
|
||||
Failed Checks: 10
|
||||
Phase: Failed
|
||||
Events:
|
||||
Type Reason Age From Message
|
||||
---- ------ ---- ---- -------
|
||||
@@ -179,5 +206,3 @@ Events:
|
||||
Warning Synced 1m flagger Canary failed! Scaling down podinfo.test
|
||||
```
|
||||
|
||||
####
|
||||
|
||||
|
||||
Binary file not shown.
@@ -1,96 +0,0 @@
|
||||
apiVersion: v1
|
||||
entries:
|
||||
flagger:
|
||||
- apiVersion: v1
|
||||
appVersion: 0.2.0
|
||||
created: 2019-01-04T13:38:42.239798+02:00
|
||||
description: Flagger is a Kubernetes operator that automates the promotion of
|
||||
canary deployments using Istio routing for traffic shifting and Prometheus metrics
|
||||
for canary analysis.
|
||||
digest: 800b5fd1a0b2854ee8412b3170c36ecda3d382f209e18b475ee1d5e3c7fa2f83
|
||||
engine: gotpl
|
||||
home: https://flagger.app
|
||||
icon: https://raw.githubusercontent.com/stefanprodan/flagger/master/docs/logo/flagger-icon.png
|
||||
keywords:
|
||||
- canary
|
||||
- istio
|
||||
- gitops
|
||||
kubeVersion: '>=1.9.0-0'
|
||||
maintainers:
|
||||
- email: stefanprodan@users.noreply.github.com
|
||||
name: stefanprodan
|
||||
url: https://github.com/stefanprodan
|
||||
name: flagger
|
||||
sources:
|
||||
- https://github.com/stefanprodan/flagger
|
||||
urls:
|
||||
- https://stefanprodan.github.io/flagger/flagger-0.2.0.tgz
|
||||
version: 0.2.0
|
||||
- apiVersion: v1
|
||||
appVersion: 0.1.2
|
||||
created: 2019-01-04T13:38:42.239389+02:00
|
||||
description: Flagger is a Kubernetes operator that automates the promotion of
|
||||
canary deployments using Istio routing for traffic shifting and Prometheus metrics
|
||||
for canary analysis.
|
||||
digest: 0029ef8dd20ebead3d84638eaa4b44d60b3e2bd953b4b7a1169963ce93a4e87c
|
||||
engine: gotpl
|
||||
home: https://flagger.app
|
||||
icon: https://raw.githubusercontent.com/stefanprodan/flagger/master/docs/logo/flagger-icon.png
|
||||
keywords:
|
||||
- canary
|
||||
- istio
|
||||
- gitops
|
||||
kubeVersion: '>=1.9.0-0'
|
||||
maintainers:
|
||||
- email: stefanprodan@users.noreply.github.com
|
||||
name: stefanprodan
|
||||
url: https://github.com/stefanprodan
|
||||
name: flagger
|
||||
sources:
|
||||
- https://github.com/stefanprodan/flagger
|
||||
urls:
|
||||
- https://stefanprodan.github.io/flagger/flagger-0.1.2.tgz
|
||||
version: 0.1.2
|
||||
- apiVersion: v1
|
||||
appVersion: 0.1.1
|
||||
created: 2019-01-04T13:38:42.238504+02:00
|
||||
description: Flagger is a Kubernetes operator that automates the promotion of
|
||||
canary deployments using Istio routing for traffic shifting and Prometheus metrics
|
||||
for canary analysis.
|
||||
digest: 2bb8f72fcf63a5ba5ecbaa2ab0d0446f438ec93fbf3a598cd7de45e64d8f9628
|
||||
home: https://github.com/stefanprodan/flagger
|
||||
name: flagger
|
||||
urls:
|
||||
- https://stefanprodan.github.io/flagger/flagger-0.1.1.tgz
|
||||
version: 0.1.1
|
||||
- apiVersion: v1
|
||||
appVersion: 0.1.0
|
||||
created: 2019-01-04T13:38:42.237702+02:00
|
||||
description: Flagger is a Kubernetes operator that automates the promotion of
|
||||
canary deployments using Istio routing for traffic shifting and Prometheus metrics
|
||||
for canary analysis.
|
||||
digest: 03e05634149e13ddfddae6757266d65c271878a026c21c7d1429c16712bf3845
|
||||
home: https://github.com/stefanprodan/flagger
|
||||
name: flagger
|
||||
urls:
|
||||
- https://stefanprodan.github.io/flagger/flagger-0.1.0.tgz
|
||||
version: 0.1.0
|
||||
grafana:
|
||||
- apiVersion: v1
|
||||
appVersion: 5.4.2
|
||||
created: 2019-01-04T13:38:42.24034+02:00
|
||||
description: Grafana dashboards for monitoring Flagger canary deployments
|
||||
digest: f94c0c2eaf7a7db7ef070575d280c37f93922c0e11ebdf203482c9f43603a1c9
|
||||
home: https://flagger.app
|
||||
icon: https://raw.githubusercontent.com/stefanprodan/flagger/master/docs/logo/flagger-icon.png
|
||||
maintainers:
|
||||
- email: stefanprodan@users.noreply.github.com
|
||||
name: stefanprodan
|
||||
url: https://github.com/stefanprodan
|
||||
name: grafana
|
||||
sources:
|
||||
- https://github.com/stefanprodan/flagger
|
||||
urls:
|
||||
- https://stefanprodan.github.io/flagger/grafana-0.1.0.tgz
|
||||
version: 0.1.0
|
||||
generated: 2019-01-04T13:38:42.236727+02:00
|
||||
@@ -23,6 +23,6 @@ CODEGEN_PKG=${CODEGEN_PKG:-$(cd ${SCRIPT_ROOT}; ls -d -1 ./vendor/k8s.io/code-ge
|
||||
|
||||
${CODEGEN_PKG}/generate-groups.sh "deepcopy,client,informer,lister" \
|
||||
github.com/stefanprodan/flagger/pkg/client github.com/stefanprodan/flagger/pkg/apis \
|
||||
flagger:v1alpha2 \
|
||||
flagger:v1alpha3 \
|
||||
--go-header-file ${SCRIPT_ROOT}/hack/boilerplate.go.txt
|
||||
|
||||
|
||||
@@ -16,6 +16,6 @@ limitations under the License.
|
||||
|
||||
// +k8s:deepcopy-gen=package
|
||||
|
||||
// Package v1alpha2 is the v1alpha2 version of the API.
|
||||
// Package v1alpha3 is the v1alpha3 version of the API.
|
||||
// +groupName=flagger.app
|
||||
package v1alpha2
|
||||
package v1alpha3
|
||||
@@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package v1alpha2
|
||||
package v1alpha3
|
||||
|
||||
import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
@@ -25,7 +25,7 @@ import (
|
||||
)
|
||||
|
||||
// SchemeGroupVersion is group version used to register these objects
|
||||
var SchemeGroupVersion = schema.GroupVersion{Group: rollout.GroupName, Version: "v1alpha2"}
|
||||
var SchemeGroupVersion = schema.GroupVersion{Group: rollout.GroupName, Version: "v1alpha3"}
|
||||
|
||||
// Kind takes an unqualified kind and returns back a Group qualified GroupKind
|
||||
func Kind(kind string) schema.GroupKind {
|
||||
@@ -14,16 +14,18 @@ See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package v1alpha2
|
||||
package v1alpha3
|
||||
|
||||
import (
|
||||
hpav1 "k8s.io/api/autoscaling/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
CanaryKind = "Canary"
|
||||
ProgressDeadlineSeconds = 600
|
||||
AnalysisInterval = 60 * time.Second
|
||||
)
|
||||
|
||||
// +genclient
|
||||
@@ -44,7 +46,8 @@ type CanarySpec struct {
|
||||
TargetRef hpav1.CrossVersionObjectReference `json:"targetRef"`
|
||||
|
||||
// reference to autoscaling resource
|
||||
AutoscalerRef hpav1.CrossVersionObjectReference `json:"autoscalerRef"`
|
||||
// +optional
|
||||
AutoscalerRef *hpav1.CrossVersionObjectReference `json:"autoscalerRef,omitempty"`
|
||||
|
||||
// virtual service spec
|
||||
Service CanaryService `json:"service"`
|
||||
@@ -53,7 +56,7 @@ type CanarySpec struct {
|
||||
CanaryAnalysis CanaryAnalysis `json:"canaryAnalysis"`
|
||||
|
||||
// the maximum time in seconds for a canary deployment to make progress
|
||||
// before it is considered to be failed. Defaults to 60s.
|
||||
// before it is considered to be failed. Defaults to ten minutes.
|
||||
ProgressDeadlineSeconds *int32 `json:"progressDeadlineSeconds,omitempty"`
|
||||
}
|
||||
|
||||
@@ -67,21 +70,30 @@ type CanaryList struct {
|
||||
Items []Canary `json:"items"`
|
||||
}
|
||||
|
||||
// CanaryState used for status state op
|
||||
type CanaryState string
|
||||
// CanaryPhase is a label for the condition of a canary at the current time
|
||||
type CanaryPhase string
|
||||
|
||||
const (
|
||||
CanaryRunning CanaryState = "running"
|
||||
CanaryFinished CanaryState = "finished"
|
||||
CanaryFailed CanaryState = "failed"
|
||||
CanaryInitialized CanaryState = "initialized"
|
||||
// CanaryInitialized means the primary deployment, hpa and ClusterIP services
|
||||
// have been created along with the Istio virtual service
|
||||
CanaryInitialized CanaryPhase = "Initialized"
|
||||
// CanaryProgressing means the canary analysis is underway
|
||||
CanaryProgressing CanaryPhase = "Progressing"
|
||||
// CanarySucceeded means the canary analysis has been successful
|
||||
// and the canary deployment has been promoted
|
||||
CanarySucceeded CanaryPhase = "Succeeded"
|
||||
// CanaryFailed means the canary analysis failed
|
||||
// and the canary deployment has been scaled to zero
|
||||
CanaryFailed CanaryPhase = "Failed"
|
||||
)
|
||||
|
||||
// CanaryStatus is used for state persistence (read-only)
|
||||
type CanaryStatus struct {
|
||||
State CanaryState `json:"state"`
|
||||
CanaryRevision string `json:"canaryRevision"`
|
||||
FailedChecks int `json:"failedChecks"`
|
||||
Phase CanaryPhase `json:"phase"`
|
||||
FailedChecks int `json:"failedChecks"`
|
||||
CanaryWeight int `json:"canaryWeight"`
|
||||
// +optional
|
||||
LastAppliedSpec string `json:"lastAppliedSpec,omitempty"`
|
||||
// +optional
|
||||
LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty"`
|
||||
}
|
||||
@@ -96,6 +108,7 @@ type CanaryService struct {
|
||||
|
||||
// CanaryAnalysis is used to describe how the analysis should be done
|
||||
type CanaryAnalysis struct {
|
||||
Interval string `json:"interval"`
|
||||
Threshold int `json:"threshold"`
|
||||
MaxWeight int `json:"maxWeight"`
|
||||
StepWeight int `json:"stepWeight"`
|
||||
@@ -121,9 +134,9 @@ type CanaryWebhook struct {
|
||||
|
||||
// CanaryWebhookPayload holds the deployment info and metadata sent to webhooks
|
||||
type CanaryWebhookPayload struct {
|
||||
Name string `json:"name"`
|
||||
Namespace string `json:"namespace"`
|
||||
Metadata *map[string]string `json:"metadata,omitempty"`
|
||||
Name string `json:"name"`
|
||||
Namespace string `json:"namespace"`
|
||||
Metadata map[string]string `json:"metadata,omitempty"`
|
||||
}
|
||||
|
||||
// GetProgressDeadlineSeconds returns the progress deadline (default 600s)
|
||||
@@ -134,3 +147,17 @@ func (c *Canary) GetProgressDeadlineSeconds() int {
|
||||
|
||||
return ProgressDeadlineSeconds
|
||||
}
|
||||
|
||||
// GetAnalysisInterval returns the canary analysis interval (default 60s)
|
||||
func (c *Canary) GetAnalysisInterval() time.Duration {
|
||||
if c.Spec.CanaryAnalysis.Interval == "" {
|
||||
return AnalysisInterval
|
||||
}
|
||||
|
||||
interval, err := time.ParseDuration(c.Spec.CanaryAnalysis.Interval)
|
||||
if err != nil {
|
||||
return AnalysisInterval
|
||||
}
|
||||
|
||||
return interval
|
||||
}
|
||||
@@ -18,9 +18,10 @@ limitations under the License.
|
||||
|
||||
// Code generated by deepcopy-gen. DO NOT EDIT.
|
||||
|
||||
package v1alpha2
|
||||
package v1alpha3
|
||||
|
||||
import (
|
||||
v1 "k8s.io/api/autoscaling/v1"
|
||||
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||
)
|
||||
|
||||
@@ -159,7 +160,11 @@ func (in *CanaryService) DeepCopy() *CanaryService {
|
||||
func (in *CanarySpec) DeepCopyInto(out *CanarySpec) {
|
||||
*out = *in
|
||||
out.TargetRef = in.TargetRef
|
||||
out.AutoscalerRef = in.AutoscalerRef
|
||||
if in.AutoscalerRef != nil {
|
||||
in, out := &in.AutoscalerRef, &out.AutoscalerRef
|
||||
*out = new(v1.CrossVersionObjectReference)
|
||||
**out = **in
|
||||
}
|
||||
in.Service.DeepCopyInto(&out.Service)
|
||||
in.CanaryAnalysis.DeepCopyInto(&out.CanaryAnalysis)
|
||||
if in.ProgressDeadlineSeconds != nil {
|
||||
@@ -229,13 +234,9 @@ func (in *CanaryWebhookPayload) DeepCopyInto(out *CanaryWebhookPayload) {
|
||||
*out = *in
|
||||
if in.Metadata != nil {
|
||||
in, out := &in.Metadata, &out.Metadata
|
||||
*out = new(map[string]string)
|
||||
if **in != nil {
|
||||
in, out := *in, *out
|
||||
*out = make(map[string]string, len(*in))
|
||||
for key, val := range *in {
|
||||
(*out)[key] = val
|
||||
}
|
||||
*out = make(map[string]string, len(*in))
|
||||
for key, val := range *in {
|
||||
(*out)[key] = val
|
||||
}
|
||||
}
|
||||
return
|
||||
@@ -19,7 +19,7 @@ limitations under the License.
|
||||
package versioned
|
||||
|
||||
import (
|
||||
flaggerv1alpha2 "github.com/stefanprodan/flagger/pkg/client/clientset/versioned/typed/flagger/v1alpha2"
|
||||
flaggerv1alpha3 "github.com/stefanprodan/flagger/pkg/client/clientset/versioned/typed/flagger/v1alpha3"
|
||||
discovery "k8s.io/client-go/discovery"
|
||||
rest "k8s.io/client-go/rest"
|
||||
flowcontrol "k8s.io/client-go/util/flowcontrol"
|
||||
@@ -27,27 +27,27 @@ import (
|
||||
|
||||
type Interface interface {
|
||||
Discovery() discovery.DiscoveryInterface
|
||||
FlaggerV1alpha2() flaggerv1alpha2.FlaggerV1alpha2Interface
|
||||
FlaggerV1alpha3() flaggerv1alpha3.FlaggerV1alpha3Interface
|
||||
// Deprecated: please explicitly pick a version if possible.
|
||||
Flagger() flaggerv1alpha2.FlaggerV1alpha2Interface
|
||||
Flagger() flaggerv1alpha3.FlaggerV1alpha3Interface
|
||||
}
|
||||
|
||||
// Clientset contains the clients for groups. Each group has exactly one
|
||||
// version included in a Clientset.
|
||||
type Clientset struct {
|
||||
*discovery.DiscoveryClient
|
||||
flaggerV1alpha2 *flaggerv1alpha2.FlaggerV1alpha2Client
|
||||
flaggerV1alpha3 *flaggerv1alpha3.FlaggerV1alpha3Client
|
||||
}
|
||||
|
||||
// FlaggerV1alpha2 retrieves the FlaggerV1alpha2Client
|
||||
func (c *Clientset) FlaggerV1alpha2() flaggerv1alpha2.FlaggerV1alpha2Interface {
|
||||
return c.flaggerV1alpha2
|
||||
// FlaggerV1alpha3 retrieves the FlaggerV1alpha3Client
|
||||
func (c *Clientset) FlaggerV1alpha3() flaggerv1alpha3.FlaggerV1alpha3Interface {
|
||||
return c.flaggerV1alpha3
|
||||
}
|
||||
|
||||
// Deprecated: Flagger retrieves the default version of FlaggerClient.
|
||||
// Please explicitly pick a version.
|
||||
func (c *Clientset) Flagger() flaggerv1alpha2.FlaggerV1alpha2Interface {
|
||||
return c.flaggerV1alpha2
|
||||
func (c *Clientset) Flagger() flaggerv1alpha3.FlaggerV1alpha3Interface {
|
||||
return c.flaggerV1alpha3
|
||||
}
|
||||
|
||||
// Discovery retrieves the DiscoveryClient
|
||||
@@ -66,7 +66,7 @@ func NewForConfig(c *rest.Config) (*Clientset, error) {
|
||||
}
|
||||
var cs Clientset
|
||||
var err error
|
||||
cs.flaggerV1alpha2, err = flaggerv1alpha2.NewForConfig(&configShallowCopy)
|
||||
cs.flaggerV1alpha3, err = flaggerv1alpha3.NewForConfig(&configShallowCopy)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -82,7 +82,7 @@ func NewForConfig(c *rest.Config) (*Clientset, error) {
|
||||
// panics if there is an error in the config.
|
||||
func NewForConfigOrDie(c *rest.Config) *Clientset {
|
||||
var cs Clientset
|
||||
cs.flaggerV1alpha2 = flaggerv1alpha2.NewForConfigOrDie(c)
|
||||
cs.flaggerV1alpha3 = flaggerv1alpha3.NewForConfigOrDie(c)
|
||||
|
||||
cs.DiscoveryClient = discovery.NewDiscoveryClientForConfigOrDie(c)
|
||||
return &cs
|
||||
@@ -91,7 +91,7 @@ func NewForConfigOrDie(c *rest.Config) *Clientset {
|
||||
// New creates a new Clientset for the given RESTClient.
|
||||
func New(c rest.Interface) *Clientset {
|
||||
var cs Clientset
|
||||
cs.flaggerV1alpha2 = flaggerv1alpha2.New(c)
|
||||
cs.flaggerV1alpha3 = flaggerv1alpha3.New(c)
|
||||
|
||||
cs.DiscoveryClient = discovery.NewDiscoveryClient(c)
|
||||
return &cs
|
||||
|
||||
@@ -20,8 +20,8 @@ package fake
|
||||
|
||||
import (
|
||||
clientset "github.com/stefanprodan/flagger/pkg/client/clientset/versioned"
|
||||
flaggerv1alpha2 "github.com/stefanprodan/flagger/pkg/client/clientset/versioned/typed/flagger/v1alpha2"
|
||||
fakeflaggerv1alpha2 "github.com/stefanprodan/flagger/pkg/client/clientset/versioned/typed/flagger/v1alpha2/fake"
|
||||
flaggerv1alpha3 "github.com/stefanprodan/flagger/pkg/client/clientset/versioned/typed/flagger/v1alpha3"
|
||||
fakeflaggerv1alpha3 "github.com/stefanprodan/flagger/pkg/client/clientset/versioned/typed/flagger/v1alpha3/fake"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
"k8s.io/client-go/discovery"
|
||||
@@ -71,12 +71,12 @@ func (c *Clientset) Discovery() discovery.DiscoveryInterface {
|
||||
|
||||
var _ clientset.Interface = &Clientset{}
|
||||
|
||||
// FlaggerV1alpha2 retrieves the FlaggerV1alpha2Client
|
||||
func (c *Clientset) FlaggerV1alpha2() flaggerv1alpha2.FlaggerV1alpha2Interface {
|
||||
return &fakeflaggerv1alpha2.FakeFlaggerV1alpha2{Fake: &c.Fake}
|
||||
// FlaggerV1alpha3 retrieves the FlaggerV1alpha3Client
|
||||
func (c *Clientset) FlaggerV1alpha3() flaggerv1alpha3.FlaggerV1alpha3Interface {
|
||||
return &fakeflaggerv1alpha3.FakeFlaggerV1alpha3{Fake: &c.Fake}
|
||||
}
|
||||
|
||||
// Flagger retrieves the FlaggerV1alpha2Client
|
||||
func (c *Clientset) Flagger() flaggerv1alpha2.FlaggerV1alpha2Interface {
|
||||
return &fakeflaggerv1alpha2.FakeFlaggerV1alpha2{Fake: &c.Fake}
|
||||
// Flagger retrieves the FlaggerV1alpha3Client
|
||||
func (c *Clientset) Flagger() flaggerv1alpha3.FlaggerV1alpha3Interface {
|
||||
return &fakeflaggerv1alpha3.FakeFlaggerV1alpha3{Fake: &c.Fake}
|
||||
}
|
||||
|
||||
@@ -19,7 +19,7 @@ limitations under the License.
|
||||
package fake
|
||||
|
||||
import (
|
||||
flaggerv1alpha2 "github.com/stefanprodan/flagger/pkg/apis/flagger/v1alpha2"
|
||||
flaggerv1alpha3 "github.com/stefanprodan/flagger/pkg/apis/flagger/v1alpha3"
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||
schema "k8s.io/apimachinery/pkg/runtime/schema"
|
||||
@@ -50,5 +50,5 @@ func init() {
|
||||
// After this, RawExtensions in Kubernetes types will serialize kube-aggregator types
|
||||
// correctly.
|
||||
func AddToScheme(scheme *runtime.Scheme) {
|
||||
flaggerv1alpha2.AddToScheme(scheme)
|
||||
flaggerv1alpha3.AddToScheme(scheme)
|
||||
}
|
||||
|
||||
@@ -19,7 +19,7 @@ limitations under the License.
|
||||
package scheme
|
||||
|
||||
import (
|
||||
flaggerv1alpha2 "github.com/stefanprodan/flagger/pkg/apis/flagger/v1alpha2"
|
||||
flaggerv1alpha3 "github.com/stefanprodan/flagger/pkg/apis/flagger/v1alpha3"
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||
schema "k8s.io/apimachinery/pkg/runtime/schema"
|
||||
@@ -50,5 +50,5 @@ func init() {
|
||||
// After this, RawExtensions in Kubernetes types will serialize kube-aggregator types
|
||||
// correctly.
|
||||
func AddToScheme(scheme *runtime.Scheme) {
|
||||
flaggerv1alpha2.AddToScheme(scheme)
|
||||
flaggerv1alpha3.AddToScheme(scheme)
|
||||
}
|
||||
|
||||
@@ -16,10 +16,10 @@ limitations under the License.
|
||||
|
||||
// Code generated by client-gen. DO NOT EDIT.
|
||||
|
||||
package v1alpha2
|
||||
package v1alpha3
|
||||
|
||||
import (
|
||||
v1alpha2 "github.com/stefanprodan/flagger/pkg/apis/flagger/v1alpha2"
|
||||
v1alpha3 "github.com/stefanprodan/flagger/pkg/apis/flagger/v1alpha3"
|
||||
scheme "github.com/stefanprodan/flagger/pkg/client/clientset/versioned/scheme"
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
types "k8s.io/apimachinery/pkg/types"
|
||||
@@ -35,15 +35,15 @@ type CanariesGetter interface {
|
||||
|
||||
// CanaryInterface has methods to work with Canary resources.
|
||||
type CanaryInterface interface {
|
||||
Create(*v1alpha2.Canary) (*v1alpha2.Canary, error)
|
||||
Update(*v1alpha2.Canary) (*v1alpha2.Canary, error)
|
||||
UpdateStatus(*v1alpha2.Canary) (*v1alpha2.Canary, error)
|
||||
Create(*v1alpha3.Canary) (*v1alpha3.Canary, error)
|
||||
Update(*v1alpha3.Canary) (*v1alpha3.Canary, error)
|
||||
UpdateStatus(*v1alpha3.Canary) (*v1alpha3.Canary, error)
|
||||
Delete(name string, options *v1.DeleteOptions) error
|
||||
DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error
|
||||
Get(name string, options v1.GetOptions) (*v1alpha2.Canary, error)
|
||||
List(opts v1.ListOptions) (*v1alpha2.CanaryList, error)
|
||||
Get(name string, options v1.GetOptions) (*v1alpha3.Canary, error)
|
||||
List(opts v1.ListOptions) (*v1alpha3.CanaryList, error)
|
||||
Watch(opts v1.ListOptions) (watch.Interface, error)
|
||||
Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha2.Canary, err error)
|
||||
Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha3.Canary, err error)
|
||||
CanaryExpansion
|
||||
}
|
||||
|
||||
@@ -54,7 +54,7 @@ type canaries struct {
|
||||
}
|
||||
|
||||
// newCanaries returns a Canaries
|
||||
func newCanaries(c *FlaggerV1alpha2Client, namespace string) *canaries {
|
||||
func newCanaries(c *FlaggerV1alpha3Client, namespace string) *canaries {
|
||||
return &canaries{
|
||||
client: c.RESTClient(),
|
||||
ns: namespace,
|
||||
@@ -62,8 +62,8 @@ func newCanaries(c *FlaggerV1alpha2Client, namespace string) *canaries {
|
||||
}
|
||||
|
||||
// Get takes name of the canary, and returns the corresponding canary object, and an error if there is any.
|
||||
func (c *canaries) Get(name string, options v1.GetOptions) (result *v1alpha2.Canary, err error) {
|
||||
result = &v1alpha2.Canary{}
|
||||
func (c *canaries) Get(name string, options v1.GetOptions) (result *v1alpha3.Canary, err error) {
|
||||
result = &v1alpha3.Canary{}
|
||||
err = c.client.Get().
|
||||
Namespace(c.ns).
|
||||
Resource("canaries").
|
||||
@@ -75,8 +75,8 @@ func (c *canaries) Get(name string, options v1.GetOptions) (result *v1alpha2.Can
|
||||
}
|
||||
|
||||
// List takes label and field selectors, and returns the list of Canaries that match those selectors.
|
||||
func (c *canaries) List(opts v1.ListOptions) (result *v1alpha2.CanaryList, err error) {
|
||||
result = &v1alpha2.CanaryList{}
|
||||
func (c *canaries) List(opts v1.ListOptions) (result *v1alpha3.CanaryList, err error) {
|
||||
result = &v1alpha3.CanaryList{}
|
||||
err = c.client.Get().
|
||||
Namespace(c.ns).
|
||||
Resource("canaries").
|
||||
@@ -97,8 +97,8 @@ func (c *canaries) Watch(opts v1.ListOptions) (watch.Interface, error) {
|
||||
}
|
||||
|
||||
// Create takes the representation of a canary and creates it. Returns the server's representation of the canary, and an error, if there is any.
|
||||
func (c *canaries) Create(canary *v1alpha2.Canary) (result *v1alpha2.Canary, err error) {
|
||||
result = &v1alpha2.Canary{}
|
||||
func (c *canaries) Create(canary *v1alpha3.Canary) (result *v1alpha3.Canary, err error) {
|
||||
result = &v1alpha3.Canary{}
|
||||
err = c.client.Post().
|
||||
Namespace(c.ns).
|
||||
Resource("canaries").
|
||||
@@ -109,8 +109,8 @@ func (c *canaries) Create(canary *v1alpha2.Canary) (result *v1alpha2.Canary, err
|
||||
}
|
||||
|
||||
// Update takes the representation of a canary and updates it. Returns the server's representation of the canary, and an error, if there is any.
|
||||
func (c *canaries) Update(canary *v1alpha2.Canary) (result *v1alpha2.Canary, err error) {
|
||||
result = &v1alpha2.Canary{}
|
||||
func (c *canaries) Update(canary *v1alpha3.Canary) (result *v1alpha3.Canary, err error) {
|
||||
result = &v1alpha3.Canary{}
|
||||
err = c.client.Put().
|
||||
Namespace(c.ns).
|
||||
Resource("canaries").
|
||||
@@ -124,8 +124,8 @@ func (c *canaries) Update(canary *v1alpha2.Canary) (result *v1alpha2.Canary, err
|
||||
// UpdateStatus was generated because the type contains a Status member.
|
||||
// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
|
||||
|
||||
func (c *canaries) UpdateStatus(canary *v1alpha2.Canary) (result *v1alpha2.Canary, err error) {
|
||||
result = &v1alpha2.Canary{}
|
||||
func (c *canaries) UpdateStatus(canary *v1alpha3.Canary) (result *v1alpha3.Canary, err error) {
|
||||
result = &v1alpha3.Canary{}
|
||||
err = c.client.Put().
|
||||
Namespace(c.ns).
|
||||
Resource("canaries").
|
||||
@@ -160,8 +160,8 @@ func (c *canaries) DeleteCollection(options *v1.DeleteOptions, listOptions v1.Li
|
||||
}
|
||||
|
||||
// Patch applies the patch and returns the patched canary.
|
||||
func (c *canaries) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha2.Canary, err error) {
|
||||
result = &v1alpha2.Canary{}
|
||||
func (c *canaries) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha3.Canary, err error) {
|
||||
result = &v1alpha3.Canary{}
|
||||
err = c.client.Patch(pt).
|
||||
Namespace(c.ns).
|
||||
Resource("canaries").
|
||||
@@ -17,4 +17,4 @@ limitations under the License.
|
||||
// Code generated by client-gen. DO NOT EDIT.
|
||||
|
||||
// This package has the automatically generated typed clients.
|
||||
package v1alpha2
|
||||
package v1alpha3
|
||||
@@ -19,7 +19,7 @@ limitations under the License.
|
||||
package fake
|
||||
|
||||
import (
|
||||
v1alpha2 "github.com/stefanprodan/flagger/pkg/apis/flagger/v1alpha2"
|
||||
v1alpha3 "github.com/stefanprodan/flagger/pkg/apis/flagger/v1alpha3"
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
labels "k8s.io/apimachinery/pkg/labels"
|
||||
schema "k8s.io/apimachinery/pkg/runtime/schema"
|
||||
@@ -30,29 +30,29 @@ import (
|
||||
|
||||
// FakeCanaries implements CanaryInterface
|
||||
type FakeCanaries struct {
|
||||
Fake *FakeFlaggerV1alpha2
|
||||
Fake *FakeFlaggerV1alpha3
|
||||
ns string
|
||||
}
|
||||
|
||||
var canariesResource = schema.GroupVersionResource{Group: "flagger.app", Version: "v1alpha2", Resource: "canaries"}
|
||||
var canariesResource = schema.GroupVersionResource{Group: "flagger.app", Version: "v1alpha3", Resource: "canaries"}
|
||||
|
||||
var canariesKind = schema.GroupVersionKind{Group: "flagger.app", Version: "v1alpha2", Kind: "Canary"}
|
||||
var canariesKind = schema.GroupVersionKind{Group: "flagger.app", Version: "v1alpha3", Kind: "Canary"}
|
||||
|
||||
// Get takes name of the canary, and returns the corresponding canary object, and an error if there is any.
|
||||
func (c *FakeCanaries) Get(name string, options v1.GetOptions) (result *v1alpha2.Canary, err error) {
|
||||
func (c *FakeCanaries) Get(name string, options v1.GetOptions) (result *v1alpha3.Canary, err error) {
|
||||
obj, err := c.Fake.
|
||||
Invokes(testing.NewGetAction(canariesResource, c.ns, name), &v1alpha2.Canary{})
|
||||
Invokes(testing.NewGetAction(canariesResource, c.ns, name), &v1alpha3.Canary{})
|
||||
|
||||
if obj == nil {
|
||||
return nil, err
|
||||
}
|
||||
return obj.(*v1alpha2.Canary), err
|
||||
return obj.(*v1alpha3.Canary), err
|
||||
}
|
||||
|
||||
// List takes label and field selectors, and returns the list of Canaries that match those selectors.
|
||||
func (c *FakeCanaries) List(opts v1.ListOptions) (result *v1alpha2.CanaryList, err error) {
|
||||
func (c *FakeCanaries) List(opts v1.ListOptions) (result *v1alpha3.CanaryList, err error) {
|
||||
obj, err := c.Fake.
|
||||
Invokes(testing.NewListAction(canariesResource, canariesKind, c.ns, opts), &v1alpha2.CanaryList{})
|
||||
Invokes(testing.NewListAction(canariesResource, canariesKind, c.ns, opts), &v1alpha3.CanaryList{})
|
||||
|
||||
if obj == nil {
|
||||
return nil, err
|
||||
@@ -62,8 +62,8 @@ func (c *FakeCanaries) List(opts v1.ListOptions) (result *v1alpha2.CanaryList, e
|
||||
if label == nil {
|
||||
label = labels.Everything()
|
||||
}
|
||||
list := &v1alpha2.CanaryList{ListMeta: obj.(*v1alpha2.CanaryList).ListMeta}
|
||||
for _, item := range obj.(*v1alpha2.CanaryList).Items {
|
||||
list := &v1alpha3.CanaryList{ListMeta: obj.(*v1alpha3.CanaryList).ListMeta}
|
||||
for _, item := range obj.(*v1alpha3.CanaryList).Items {
|
||||
if label.Matches(labels.Set(item.Labels)) {
|
||||
list.Items = append(list.Items, item)
|
||||
}
|
||||
@@ -79,43 +79,43 @@ func (c *FakeCanaries) Watch(opts v1.ListOptions) (watch.Interface, error) {
|
||||
}
|
||||
|
||||
// Create takes the representation of a canary and creates it. Returns the server's representation of the canary, and an error, if there is any.
|
||||
func (c *FakeCanaries) Create(canary *v1alpha2.Canary) (result *v1alpha2.Canary, err error) {
|
||||
func (c *FakeCanaries) Create(canary *v1alpha3.Canary) (result *v1alpha3.Canary, err error) {
|
||||
obj, err := c.Fake.
|
||||
Invokes(testing.NewCreateAction(canariesResource, c.ns, canary), &v1alpha2.Canary{})
|
||||
Invokes(testing.NewCreateAction(canariesResource, c.ns, canary), &v1alpha3.Canary{})
|
||||
|
||||
if obj == nil {
|
||||
return nil, err
|
||||
}
|
||||
return obj.(*v1alpha2.Canary), err
|
||||
return obj.(*v1alpha3.Canary), err
|
||||
}
|
||||
|
||||
// Update takes the representation of a canary and updates it. Returns the server's representation of the canary, and an error, if there is any.
|
||||
func (c *FakeCanaries) Update(canary *v1alpha2.Canary) (result *v1alpha2.Canary, err error) {
|
||||
func (c *FakeCanaries) Update(canary *v1alpha3.Canary) (result *v1alpha3.Canary, err error) {
|
||||
obj, err := c.Fake.
|
||||
Invokes(testing.NewUpdateAction(canariesResource, c.ns, canary), &v1alpha2.Canary{})
|
||||
Invokes(testing.NewUpdateAction(canariesResource, c.ns, canary), &v1alpha3.Canary{})
|
||||
|
||||
if obj == nil {
|
||||
return nil, err
|
||||
}
|
||||
return obj.(*v1alpha2.Canary), err
|
||||
return obj.(*v1alpha3.Canary), err
|
||||
}
|
||||
|
||||
// UpdateStatus was generated because the type contains a Status member.
|
||||
// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
|
||||
func (c *FakeCanaries) UpdateStatus(canary *v1alpha2.Canary) (*v1alpha2.Canary, error) {
|
||||
func (c *FakeCanaries) UpdateStatus(canary *v1alpha3.Canary) (*v1alpha3.Canary, error) {
|
||||
obj, err := c.Fake.
|
||||
Invokes(testing.NewUpdateSubresourceAction(canariesResource, "status", c.ns, canary), &v1alpha2.Canary{})
|
||||
Invokes(testing.NewUpdateSubresourceAction(canariesResource, "status", c.ns, canary), &v1alpha3.Canary{})
|
||||
|
||||
if obj == nil {
|
||||
return nil, err
|
||||
}
|
||||
return obj.(*v1alpha2.Canary), err
|
||||
return obj.(*v1alpha3.Canary), err
|
||||
}
|
||||
|
||||
// Delete takes name of the canary and deletes it. Returns an error if one occurs.
|
||||
func (c *FakeCanaries) Delete(name string, options *v1.DeleteOptions) error {
|
||||
_, err := c.Fake.
|
||||
Invokes(testing.NewDeleteAction(canariesResource, c.ns, name), &v1alpha2.Canary{})
|
||||
Invokes(testing.NewDeleteAction(canariesResource, c.ns, name), &v1alpha3.Canary{})
|
||||
|
||||
return err
|
||||
}
|
||||
@@ -124,17 +124,17 @@ func (c *FakeCanaries) Delete(name string, options *v1.DeleteOptions) error {
|
||||
func (c *FakeCanaries) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
|
||||
action := testing.NewDeleteCollectionAction(canariesResource, c.ns, listOptions)
|
||||
|
||||
_, err := c.Fake.Invokes(action, &v1alpha2.CanaryList{})
|
||||
_, err := c.Fake.Invokes(action, &v1alpha3.CanaryList{})
|
||||
return err
|
||||
}
|
||||
|
||||
// Patch applies the patch and returns the patched canary.
|
||||
func (c *FakeCanaries) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha2.Canary, err error) {
|
||||
func (c *FakeCanaries) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha3.Canary, err error) {
|
||||
obj, err := c.Fake.
|
||||
Invokes(testing.NewPatchSubresourceAction(canariesResource, c.ns, name, data, subresources...), &v1alpha2.Canary{})
|
||||
Invokes(testing.NewPatchSubresourceAction(canariesResource, c.ns, name, data, subresources...), &v1alpha3.Canary{})
|
||||
|
||||
if obj == nil {
|
||||
return nil, err
|
||||
}
|
||||
return obj.(*v1alpha2.Canary), err
|
||||
return obj.(*v1alpha3.Canary), err
|
||||
}
|
||||
@@ -19,22 +19,22 @@ limitations under the License.
|
||||
package fake
|
||||
|
||||
import (
|
||||
v1alpha2 "github.com/stefanprodan/flagger/pkg/client/clientset/versioned/typed/flagger/v1alpha2"
|
||||
v1alpha3 "github.com/stefanprodan/flagger/pkg/client/clientset/versioned/typed/flagger/v1alpha3"
|
||||
rest "k8s.io/client-go/rest"
|
||||
testing "k8s.io/client-go/testing"
|
||||
)
|
||||
|
||||
type FakeFlaggerV1alpha2 struct {
|
||||
type FakeFlaggerV1alpha3 struct {
|
||||
*testing.Fake
|
||||
}
|
||||
|
||||
func (c *FakeFlaggerV1alpha2) Canaries(namespace string) v1alpha2.CanaryInterface {
|
||||
func (c *FakeFlaggerV1alpha3) Canaries(namespace string) v1alpha3.CanaryInterface {
|
||||
return &FakeCanaries{c, namespace}
|
||||
}
|
||||
|
||||
// RESTClient returns a RESTClient that is used to communicate
|
||||
// with API server by this client implementation.
|
||||
func (c *FakeFlaggerV1alpha2) RESTClient() rest.Interface {
|
||||
func (c *FakeFlaggerV1alpha3) RESTClient() rest.Interface {
|
||||
var ret *rest.RESTClient
|
||||
return ret
|
||||
}
|
||||
@@ -16,31 +16,31 @@ limitations under the License.
|
||||
|
||||
// Code generated by client-gen. DO NOT EDIT.
|
||||
|
||||
package v1alpha2
|
||||
package v1alpha3
|
||||
|
||||
import (
|
||||
v1alpha2 "github.com/stefanprodan/flagger/pkg/apis/flagger/v1alpha2"
|
||||
v1alpha3 "github.com/stefanprodan/flagger/pkg/apis/flagger/v1alpha3"
|
||||
"github.com/stefanprodan/flagger/pkg/client/clientset/versioned/scheme"
|
||||
serializer "k8s.io/apimachinery/pkg/runtime/serializer"
|
||||
rest "k8s.io/client-go/rest"
|
||||
)
|
||||
|
||||
type FlaggerV1alpha2Interface interface {
|
||||
type FlaggerV1alpha3Interface interface {
|
||||
RESTClient() rest.Interface
|
||||
CanariesGetter
|
||||
}
|
||||
|
||||
// FlaggerV1alpha2Client is used to interact with features provided by the flagger.app group.
|
||||
type FlaggerV1alpha2Client struct {
|
||||
// FlaggerV1alpha3Client is used to interact with features provided by the flagger.app group.
|
||||
type FlaggerV1alpha3Client struct {
|
||||
restClient rest.Interface
|
||||
}
|
||||
|
||||
func (c *FlaggerV1alpha2Client) Canaries(namespace string) CanaryInterface {
|
||||
func (c *FlaggerV1alpha3Client) Canaries(namespace string) CanaryInterface {
|
||||
return newCanaries(c, namespace)
|
||||
}
|
||||
|
||||
// NewForConfig creates a new FlaggerV1alpha2Client for the given config.
|
||||
func NewForConfig(c *rest.Config) (*FlaggerV1alpha2Client, error) {
|
||||
// NewForConfig creates a new FlaggerV1alpha3Client for the given config.
|
||||
func NewForConfig(c *rest.Config) (*FlaggerV1alpha3Client, error) {
|
||||
config := *c
|
||||
if err := setConfigDefaults(&config); err != nil {
|
||||
return nil, err
|
||||
@@ -49,12 +49,12 @@ func NewForConfig(c *rest.Config) (*FlaggerV1alpha2Client, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &FlaggerV1alpha2Client{client}, nil
|
||||
return &FlaggerV1alpha3Client{client}, nil
|
||||
}
|
||||
|
||||
// NewForConfigOrDie creates a new FlaggerV1alpha2Client for the given config and
|
||||
// NewForConfigOrDie creates a new FlaggerV1alpha3Client for the given config and
|
||||
// panics if there is an error in the config.
|
||||
func NewForConfigOrDie(c *rest.Config) *FlaggerV1alpha2Client {
|
||||
func NewForConfigOrDie(c *rest.Config) *FlaggerV1alpha3Client {
|
||||
client, err := NewForConfig(c)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
@@ -62,13 +62,13 @@ func NewForConfigOrDie(c *rest.Config) *FlaggerV1alpha2Client {
|
||||
return client
|
||||
}
|
||||
|
||||
// New creates a new FlaggerV1alpha2Client for the given RESTClient.
|
||||
func New(c rest.Interface) *FlaggerV1alpha2Client {
|
||||
return &FlaggerV1alpha2Client{c}
|
||||
// New creates a new FlaggerV1alpha3Client for the given RESTClient.
|
||||
func New(c rest.Interface) *FlaggerV1alpha3Client {
|
||||
return &FlaggerV1alpha3Client{c}
|
||||
}
|
||||
|
||||
func setConfigDefaults(config *rest.Config) error {
|
||||
gv := v1alpha2.SchemeGroupVersion
|
||||
gv := v1alpha3.SchemeGroupVersion
|
||||
config.GroupVersion = &gv
|
||||
config.APIPath = "/apis"
|
||||
config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs}
|
||||
@@ -82,7 +82,7 @@ func setConfigDefaults(config *rest.Config) error {
|
||||
|
||||
// RESTClient returns a RESTClient that is used to communicate
|
||||
// with API server by this client implementation.
|
||||
func (c *FlaggerV1alpha2Client) RESTClient() rest.Interface {
|
||||
func (c *FlaggerV1alpha3Client) RESTClient() rest.Interface {
|
||||
if c == nil {
|
||||
return nil
|
||||
}
|
||||
@@ -16,6 +16,6 @@ limitations under the License.
|
||||
|
||||
// Code generated by client-gen. DO NOT EDIT.
|
||||
|
||||
package v1alpha2
|
||||
package v1alpha3
|
||||
|
||||
type CanaryExpansion interface{}
|
||||
@@ -19,14 +19,14 @@ limitations under the License.
|
||||
package flagger
|
||||
|
||||
import (
|
||||
v1alpha2 "github.com/stefanprodan/flagger/pkg/client/informers/externalversions/flagger/v1alpha2"
|
||||
v1alpha3 "github.com/stefanprodan/flagger/pkg/client/informers/externalversions/flagger/v1alpha3"
|
||||
internalinterfaces "github.com/stefanprodan/flagger/pkg/client/informers/externalversions/internalinterfaces"
|
||||
)
|
||||
|
||||
// Interface provides access to each of this group's versions.
|
||||
type Interface interface {
|
||||
// V1alpha2 provides access to shared informers for resources in V1alpha2.
|
||||
V1alpha2() v1alpha2.Interface
|
||||
// V1alpha3 provides access to shared informers for resources in V1alpha3.
|
||||
V1alpha3() v1alpha3.Interface
|
||||
}
|
||||
|
||||
type group struct {
|
||||
@@ -40,7 +40,7 @@ func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakList
|
||||
return &group{factory: f, namespace: namespace, tweakListOptions: tweakListOptions}
|
||||
}
|
||||
|
||||
// V1alpha2 returns a new v1alpha2.Interface.
|
||||
func (g *group) V1alpha2() v1alpha2.Interface {
|
||||
return v1alpha2.New(g.factory, g.namespace, g.tweakListOptions)
|
||||
// V1alpha3 returns a new v1alpha3.Interface.
|
||||
func (g *group) V1alpha3() v1alpha3.Interface {
|
||||
return v1alpha3.New(g.factory, g.namespace, g.tweakListOptions)
|
||||
}
|
||||
|
||||
@@ -16,15 +16,15 @@ limitations under the License.
|
||||
|
||||
// Code generated by informer-gen. DO NOT EDIT.
|
||||
|
||||
package v1alpha2
|
||||
package v1alpha3
|
||||
|
||||
import (
|
||||
time "time"
|
||||
|
||||
flaggerv1alpha2 "github.com/stefanprodan/flagger/pkg/apis/flagger/v1alpha2"
|
||||
flaggerv1alpha3 "github.com/stefanprodan/flagger/pkg/apis/flagger/v1alpha3"
|
||||
versioned "github.com/stefanprodan/flagger/pkg/client/clientset/versioned"
|
||||
internalinterfaces "github.com/stefanprodan/flagger/pkg/client/informers/externalversions/internalinterfaces"
|
||||
v1alpha2 "github.com/stefanprodan/flagger/pkg/client/listers/flagger/v1alpha2"
|
||||
v1alpha3 "github.com/stefanprodan/flagger/pkg/client/listers/flagger/v1alpha3"
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||
watch "k8s.io/apimachinery/pkg/watch"
|
||||
@@ -35,7 +35,7 @@ import (
|
||||
// Canaries.
|
||||
type CanaryInformer interface {
|
||||
Informer() cache.SharedIndexInformer
|
||||
Lister() v1alpha2.CanaryLister
|
||||
Lister() v1alpha3.CanaryLister
|
||||
}
|
||||
|
||||
type canaryInformer struct {
|
||||
@@ -61,16 +61,16 @@ func NewFilteredCanaryInformer(client versioned.Interface, namespace string, res
|
||||
if tweakListOptions != nil {
|
||||
tweakListOptions(&options)
|
||||
}
|
||||
return client.FlaggerV1alpha2().Canaries(namespace).List(options)
|
||||
return client.FlaggerV1alpha3().Canaries(namespace).List(options)
|
||||
},
|
||||
WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
|
||||
if tweakListOptions != nil {
|
||||
tweakListOptions(&options)
|
||||
}
|
||||
return client.FlaggerV1alpha2().Canaries(namespace).Watch(options)
|
||||
return client.FlaggerV1alpha3().Canaries(namespace).Watch(options)
|
||||
},
|
||||
},
|
||||
&flaggerv1alpha2.Canary{},
|
||||
&flaggerv1alpha3.Canary{},
|
||||
resyncPeriod,
|
||||
indexers,
|
||||
)
|
||||
@@ -81,9 +81,9 @@ func (f *canaryInformer) defaultInformer(client versioned.Interface, resyncPerio
|
||||
}
|
||||
|
||||
func (f *canaryInformer) Informer() cache.SharedIndexInformer {
|
||||
return f.factory.InformerFor(&flaggerv1alpha2.Canary{}, f.defaultInformer)
|
||||
return f.factory.InformerFor(&flaggerv1alpha3.Canary{}, f.defaultInformer)
|
||||
}
|
||||
|
||||
func (f *canaryInformer) Lister() v1alpha2.CanaryLister {
|
||||
return v1alpha2.NewCanaryLister(f.Informer().GetIndexer())
|
||||
func (f *canaryInformer) Lister() v1alpha3.CanaryLister {
|
||||
return v1alpha3.NewCanaryLister(f.Informer().GetIndexer())
|
||||
}
|
||||
@@ -16,7 +16,7 @@ limitations under the License.
|
||||
|
||||
// Code generated by informer-gen. DO NOT EDIT.
|
||||
|
||||
package v1alpha2
|
||||
package v1alpha3
|
||||
|
||||
import (
|
||||
internalinterfaces "github.com/stefanprodan/flagger/pkg/client/informers/externalversions/internalinterfaces"
|
||||
@@ -21,7 +21,7 @@ package externalversions
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
v1alpha2 "github.com/stefanprodan/flagger/pkg/apis/flagger/v1alpha2"
|
||||
v1alpha3 "github.com/stefanprodan/flagger/pkg/apis/flagger/v1alpha3"
|
||||
schema "k8s.io/apimachinery/pkg/runtime/schema"
|
||||
cache "k8s.io/client-go/tools/cache"
|
||||
)
|
||||
@@ -52,9 +52,9 @@ func (f *genericInformer) Lister() cache.GenericLister {
|
||||
// TODO extend this to unknown resources with a client pool
|
||||
func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource) (GenericInformer, error) {
|
||||
switch resource {
|
||||
// Group=flagger.app, Version=v1alpha2
|
||||
case v1alpha2.SchemeGroupVersion.WithResource("canaries"):
|
||||
return &genericInformer{resource: resource.GroupResource(), informer: f.Flagger().V1alpha2().Canaries().Informer()}, nil
|
||||
// Group=flagger.app, Version=v1alpha3
|
||||
case v1alpha3.SchemeGroupVersion.WithResource("canaries"):
|
||||
return &genericInformer{resource: resource.GroupResource(), informer: f.Flagger().V1alpha3().Canaries().Informer()}, nil
|
||||
|
||||
}
|
||||
|
||||
|
||||
@@ -16,10 +16,10 @@ limitations under the License.
|
||||
|
||||
// Code generated by lister-gen. DO NOT EDIT.
|
||||
|
||||
package v1alpha2
|
||||
package v1alpha3
|
||||
|
||||
import (
|
||||
v1alpha2 "github.com/stefanprodan/flagger/pkg/apis/flagger/v1alpha2"
|
||||
v1alpha3 "github.com/stefanprodan/flagger/pkg/apis/flagger/v1alpha3"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
@@ -28,7 +28,7 @@ import (
|
||||
// CanaryLister helps list Canaries.
|
||||
type CanaryLister interface {
|
||||
// List lists all Canaries in the indexer.
|
||||
List(selector labels.Selector) (ret []*v1alpha2.Canary, err error)
|
||||
List(selector labels.Selector) (ret []*v1alpha3.Canary, err error)
|
||||
// Canaries returns an object that can list and get Canaries.
|
||||
Canaries(namespace string) CanaryNamespaceLister
|
||||
CanaryListerExpansion
|
||||
@@ -45,9 +45,9 @@ func NewCanaryLister(indexer cache.Indexer) CanaryLister {
|
||||
}
|
||||
|
||||
// List lists all Canaries in the indexer.
|
||||
func (s *canaryLister) List(selector labels.Selector) (ret []*v1alpha2.Canary, err error) {
|
||||
func (s *canaryLister) List(selector labels.Selector) (ret []*v1alpha3.Canary, err error) {
|
||||
err = cache.ListAll(s.indexer, selector, func(m interface{}) {
|
||||
ret = append(ret, m.(*v1alpha2.Canary))
|
||||
ret = append(ret, m.(*v1alpha3.Canary))
|
||||
})
|
||||
return ret, err
|
||||
}
|
||||
@@ -60,9 +60,9 @@ func (s *canaryLister) Canaries(namespace string) CanaryNamespaceLister {
|
||||
// CanaryNamespaceLister helps list and get Canaries.
|
||||
type CanaryNamespaceLister interface {
|
||||
// List lists all Canaries in the indexer for a given namespace.
|
||||
List(selector labels.Selector) (ret []*v1alpha2.Canary, err error)
|
||||
List(selector labels.Selector) (ret []*v1alpha3.Canary, err error)
|
||||
// Get retrieves the Canary from the indexer for a given namespace and name.
|
||||
Get(name string) (*v1alpha2.Canary, error)
|
||||
Get(name string) (*v1alpha3.Canary, error)
|
||||
CanaryNamespaceListerExpansion
|
||||
}
|
||||
|
||||
@@ -74,21 +74,21 @@ type canaryNamespaceLister struct {
|
||||
}
|
||||
|
||||
// List lists all Canaries in the indexer for a given namespace.
|
||||
func (s canaryNamespaceLister) List(selector labels.Selector) (ret []*v1alpha2.Canary, err error) {
|
||||
func (s canaryNamespaceLister) List(selector labels.Selector) (ret []*v1alpha3.Canary, err error) {
|
||||
err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) {
|
||||
ret = append(ret, m.(*v1alpha2.Canary))
|
||||
ret = append(ret, m.(*v1alpha3.Canary))
|
||||
})
|
||||
return ret, err
|
||||
}
|
||||
|
||||
// Get retrieves the Canary from the indexer for a given namespace and name.
|
||||
func (s canaryNamespaceLister) Get(name string) (*v1alpha2.Canary, error) {
|
||||
func (s canaryNamespaceLister) Get(name string) (*v1alpha3.Canary, error) {
|
||||
obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !exists {
|
||||
return nil, errors.NewNotFound(v1alpha2.Resource("canary"), name)
|
||||
return nil, errors.NewNotFound(v1alpha3.Resource("canary"), name)
|
||||
}
|
||||
return obj.(*v1alpha2.Canary), nil
|
||||
return obj.(*v1alpha3.Canary), nil
|
||||
}
|
||||
@@ -16,7 +16,7 @@ limitations under the License.
|
||||
|
||||
// Code generated by lister-gen. DO NOT EDIT.
|
||||
|
||||
package v1alpha2
|
||||
package v1alpha3
|
||||
|
||||
// CanaryListerExpansion allows custom methods to be added to
|
||||
// CanaryLister.
|
||||
@@ -7,11 +7,11 @@ import (
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
istioclientset "github.com/knative/pkg/client/clientset/versioned"
|
||||
flaggerv1 "github.com/stefanprodan/flagger/pkg/apis/flagger/v1alpha2"
|
||||
flaggerv1 "github.com/stefanprodan/flagger/pkg/apis/flagger/v1alpha3"
|
||||
clientset "github.com/stefanprodan/flagger/pkg/client/clientset/versioned"
|
||||
flaggerscheme "github.com/stefanprodan/flagger/pkg/client/clientset/versioned/scheme"
|
||||
flaggerinformers "github.com/stefanprodan/flagger/pkg/client/informers/externalversions/flagger/v1alpha2"
|
||||
flaggerlisters "github.com/stefanprodan/flagger/pkg/client/listers/flagger/v1alpha2"
|
||||
flaggerinformers "github.com/stefanprodan/flagger/pkg/client/informers/externalversions/flagger/v1alpha3"
|
||||
flaggerlisters "github.com/stefanprodan/flagger/pkg/client/listers/flagger/v1alpha3"
|
||||
"github.com/stefanprodan/flagger/pkg/notifier"
|
||||
"go.uber.org/zap"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
@@ -40,6 +40,7 @@ type Controller struct {
|
||||
eventRecorder record.EventRecorder
|
||||
logger *zap.SugaredLogger
|
||||
canaries *sync.Map
|
||||
jobs map[string]CanaryJob
|
||||
deployer CanaryDeployer
|
||||
router CanaryRouter
|
||||
observer CanaryObserver
|
||||
@@ -98,6 +99,7 @@ func NewController(
|
||||
eventRecorder: eventRecorder,
|
||||
logger: logger,
|
||||
canaries: new(sync.Map),
|
||||
jobs: map[string]CanaryJob{},
|
||||
flaggerWindow: flaggerWindow,
|
||||
deployer: deployer,
|
||||
router: router,
|
||||
@@ -246,17 +248,17 @@ func checkCustomResourceType(obj interface{}, logger *zap.SugaredLogger) (flagge
|
||||
}
|
||||
|
||||
func (c *Controller) recordEventInfof(r *flaggerv1.Canary, template string, args ...interface{}) {
|
||||
c.logger.Infof(template, args...)
|
||||
c.logger.With("canary", fmt.Sprintf("%s.%s", r.Name, r.Namespace)).Infof(template, args...)
|
||||
c.eventRecorder.Event(r, corev1.EventTypeNormal, "Synced", fmt.Sprintf(template, args...))
|
||||
}
|
||||
|
||||
func (c *Controller) recordEventErrorf(r *flaggerv1.Canary, template string, args ...interface{}) {
|
||||
c.logger.Errorf(template, args...)
|
||||
c.logger.With("canary", fmt.Sprintf("%s.%s", r.Name, r.Namespace)).Errorf(template, args...)
|
||||
c.eventRecorder.Event(r, corev1.EventTypeWarning, "Synced", fmt.Sprintf(template, args...))
|
||||
}
|
||||
|
||||
func (c *Controller) recordEventWarningf(r *flaggerv1.Canary, template string, args ...interface{}) {
|
||||
c.logger.Infof(template, args...)
|
||||
c.logger.With("canary", fmt.Sprintf("%s.%s", r.Name, r.Namespace)).Infof(template, args...)
|
||||
c.eventRecorder.Event(r, corev1.EventTypeWarning, "Synced", fmt.Sprintf(template, args...))
|
||||
}
|
||||
|
||||
|
||||
@@ -9,7 +9,7 @@ import (
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"github.com/google/go-cmp/cmp/cmpopts"
|
||||
istioclientset "github.com/knative/pkg/client/clientset/versioned"
|
||||
flaggerv1 "github.com/stefanprodan/flagger/pkg/apis/flagger/v1alpha2"
|
||||
flaggerv1 "github.com/stefanprodan/flagger/pkg/apis/flagger/v1alpha3"
|
||||
clientset "github.com/stefanprodan/flagger/pkg/client/clientset/versioned"
|
||||
"go.uber.org/zap"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
@@ -32,15 +32,17 @@ type CanaryDeployer struct {
|
||||
|
||||
// Promote copies the pod spec from canary to primary
|
||||
func (c *CanaryDeployer) Promote(cd *flaggerv1.Canary) error {
|
||||
canary, err := c.kubeClient.AppsV1().Deployments(cd.Namespace).Get(cd.Spec.TargetRef.Name, metav1.GetOptions{})
|
||||
targetName := cd.Spec.TargetRef.Name
|
||||
primaryName := fmt.Sprintf("%s-primary", targetName)
|
||||
|
||||
canary, err := c.kubeClient.AppsV1().Deployments(cd.Namespace).Get(targetName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
if errors.IsNotFound(err) {
|
||||
return fmt.Errorf("deployment %s.%s not found", cd.Spec.TargetRef.Name, cd.Namespace)
|
||||
return fmt.Errorf("deployment %s.%s not found", targetName, cd.Namespace)
|
||||
}
|
||||
return fmt.Errorf("deployment %s.%s query error %v", cd.Spec.TargetRef.Name, cd.Namespace, err)
|
||||
return fmt.Errorf("deployment %s.%s query error %v", targetName, cd.Namespace, err)
|
||||
}
|
||||
|
||||
primaryName := fmt.Sprintf("%s-primary", cd.Spec.TargetRef.Name)
|
||||
primary, err := c.kubeClient.AppsV1().Deployments(cd.Namespace).Get(primaryName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
if errors.IsNotFound(err) {
|
||||
@@ -49,15 +51,17 @@ func (c *CanaryDeployer) Promote(cd *flaggerv1.Canary) error {
|
||||
return fmt.Errorf("deployment %s.%s query error %v", primaryName, cd.Namespace, err)
|
||||
}
|
||||
|
||||
primary.Spec.ProgressDeadlineSeconds = canary.Spec.ProgressDeadlineSeconds
|
||||
primary.Spec.MinReadySeconds = canary.Spec.MinReadySeconds
|
||||
primary.Spec.RevisionHistoryLimit = canary.Spec.RevisionHistoryLimit
|
||||
primary.Spec.Strategy = canary.Spec.Strategy
|
||||
primary.Spec.Template.Spec = canary.Spec.Template.Spec
|
||||
_, err = c.kubeClient.AppsV1().Deployments(primary.Namespace).Update(primary)
|
||||
if err != nil {
|
||||
return fmt.Errorf("updating template spec %s.%s failed: %v", primary.GetName(), primary.Namespace, err)
|
||||
primaryCopy := primary.DeepCopy()
|
||||
primaryCopy.Spec.ProgressDeadlineSeconds = canary.Spec.ProgressDeadlineSeconds
|
||||
primaryCopy.Spec.MinReadySeconds = canary.Spec.MinReadySeconds
|
||||
primaryCopy.Spec.RevisionHistoryLimit = canary.Spec.RevisionHistoryLimit
|
||||
primaryCopy.Spec.Strategy = canary.Spec.Strategy
|
||||
primaryCopy.Spec.Template.Spec = canary.Spec.Template.Spec
|
||||
|
||||
_, err = c.kubeClient.AppsV1().Deployments(cd.Namespace).Update(primaryCopy)
|
||||
if err != nil {
|
||||
return fmt.Errorf("updating deployment %s.%s template spec failed: %v",
|
||||
primaryCopy.GetName(), primaryCopy.Namespace, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -78,15 +82,11 @@ func (c *CanaryDeployer) IsPrimaryReady(cd *flaggerv1.Canary) (bool, error) {
|
||||
|
||||
retriable, err := c.isDeploymentReady(primary, cd.GetProgressDeadlineSeconds())
|
||||
if err != nil {
|
||||
if retriable {
|
||||
return retriable, fmt.Errorf("Halt %s.%s advancement %s", cd.Name, cd.Namespace, err.Error())
|
||||
} else {
|
||||
return retriable, err
|
||||
}
|
||||
return retriable, fmt.Errorf("Halt advancement %s.%s %s", primaryName, cd.Namespace, err.Error())
|
||||
}
|
||||
|
||||
if primary.Spec.Replicas == int32p(0) {
|
||||
return true, fmt.Errorf("halt %s.%s advancement primary deployment is scaled to zero",
|
||||
return true, fmt.Errorf("Halt %s.%s advancement primary deployment is scaled to zero",
|
||||
cd.Name, cd.Namespace)
|
||||
}
|
||||
return true, nil
|
||||
@@ -96,18 +96,19 @@ func (c *CanaryDeployer) IsPrimaryReady(cd *flaggerv1.Canary) (bool, error) {
|
||||
// the deployment is in the middle of a rolling update or if the pods are unhealthy
|
||||
// it will return a non retriable error if the rolling update is stuck
|
||||
func (c *CanaryDeployer) IsCanaryReady(cd *flaggerv1.Canary) (bool, error) {
|
||||
canary, err := c.kubeClient.AppsV1().Deployments(cd.Namespace).Get(cd.Spec.TargetRef.Name, metav1.GetOptions{})
|
||||
targetName := cd.Spec.TargetRef.Name
|
||||
canary, err := c.kubeClient.AppsV1().Deployments(cd.Namespace).Get(targetName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
if errors.IsNotFound(err) {
|
||||
return true, fmt.Errorf("deployment %s.%s not found", cd.Spec.TargetRef.Name, cd.Namespace)
|
||||
return true, fmt.Errorf("deployment %s.%s not found", targetName, cd.Namespace)
|
||||
}
|
||||
return true, fmt.Errorf("deployment %s.%s query error %v", cd.Spec.TargetRef.Name, cd.Namespace, err)
|
||||
return true, fmt.Errorf("deployment %s.%s query error %v", targetName, cd.Namespace, err)
|
||||
}
|
||||
|
||||
retriable, err := c.isDeploymentReady(canary, cd.GetProgressDeadlineSeconds())
|
||||
if err != nil {
|
||||
if retriable {
|
||||
return retriable, fmt.Errorf("Halt %s.%s advancement %s", cd.Name, cd.Namespace, err.Error())
|
||||
return retriable, fmt.Errorf("Halt advancement %s.%s %s", targetName, cd.Namespace, err.Error())
|
||||
} else {
|
||||
return retriable, fmt.Errorf("deployment does not have minimum availability for more than %vs",
|
||||
cd.GetProgressDeadlineSeconds())
|
||||
@@ -119,22 +120,23 @@ func (c *CanaryDeployer) IsCanaryReady(cd *flaggerv1.Canary) (bool, error) {
|
||||
|
||||
// IsNewSpec returns true if the canary deployment pod spec has changed
|
||||
func (c *CanaryDeployer) IsNewSpec(cd *flaggerv1.Canary) (bool, error) {
|
||||
canary, err := c.kubeClient.AppsV1().Deployments(cd.Namespace).Get(cd.Spec.TargetRef.Name, metav1.GetOptions{})
|
||||
targetName := cd.Spec.TargetRef.Name
|
||||
canary, err := c.kubeClient.AppsV1().Deployments(cd.Namespace).Get(targetName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
if errors.IsNotFound(err) {
|
||||
return false, fmt.Errorf("deployment %s.%s not found", cd.Spec.TargetRef.Name, cd.Namespace)
|
||||
return false, fmt.Errorf("deployment %s.%s not found", targetName, cd.Namespace)
|
||||
}
|
||||
return false, fmt.Errorf("deployment %s.%s query error %v", cd.Spec.TargetRef.Name, cd.Namespace, err)
|
||||
return false, fmt.Errorf("deployment %s.%s query error %v", targetName, cd.Namespace, err)
|
||||
}
|
||||
|
||||
if cd.Status.CanaryRevision == "" {
|
||||
if cd.Status.LastAppliedSpec == "" {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
newSpec := &canary.Spec.Template.Spec
|
||||
oldSpecJson, err := base64.StdEncoding.DecodeString(cd.Status.CanaryRevision)
|
||||
oldSpecJson, err := base64.StdEncoding.DecodeString(cd.Status.LastAppliedSpec)
|
||||
if err != nil {
|
||||
return false, err
|
||||
return false, fmt.Errorf("%s.%s decode error %v", cd.Name, cd.Namespace, err)
|
||||
}
|
||||
oldSpec := &corev1.PodSpec{}
|
||||
err = json.Unmarshal(oldSpecJson, oldSpec)
|
||||
@@ -150,31 +152,60 @@ func (c *CanaryDeployer) IsNewSpec(cd *flaggerv1.Canary) (bool, error) {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// SetFailedChecks updates the canary failed checks counter
|
||||
func (c *CanaryDeployer) SetFailedChecks(cd *flaggerv1.Canary, val int) error {
|
||||
cd.Status.FailedChecks = val
|
||||
cd.Status.LastTransitionTime = metav1.Now()
|
||||
cd, err := c.flaggerClient.FlaggerV1alpha2().Canaries(cd.Namespace).Update(cd)
|
||||
// ShouldAdvance determines if the canary analysis can proceed
|
||||
func (c *CanaryDeployer) ShouldAdvance(cd *flaggerv1.Canary) (bool, error) {
|
||||
if cd.Status.LastAppliedSpec == "" || cd.Status.Phase == flaggerv1.CanaryProgressing {
|
||||
return true, nil
|
||||
}
|
||||
return c.IsNewSpec(cd)
|
||||
}
|
||||
|
||||
// SetStatusFailedChecks updates the canary failed checks counter
|
||||
func (c *CanaryDeployer) SetStatusFailedChecks(cd *flaggerv1.Canary, val int) error {
|
||||
cdCopy := cd.DeepCopy()
|
||||
cdCopy.Status.FailedChecks = val
|
||||
cdCopy.Status.LastTransitionTime = metav1.Now()
|
||||
|
||||
cd, err := c.flaggerClient.FlaggerV1alpha3().Canaries(cd.Namespace).UpdateStatus(cdCopy)
|
||||
if err != nil {
|
||||
return fmt.Errorf("deployment %s.%s update error %v", cd.Spec.TargetRef.Name, cd.Namespace, err)
|
||||
return fmt.Errorf("canary %s.%s status update error %v", cdCopy.Name, cdCopy.Namespace, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetState updates the canary status state
|
||||
func (c *CanaryDeployer) SetState(cd *flaggerv1.Canary, state flaggerv1.CanaryState) error {
|
||||
cd.Status.State = state
|
||||
cd.Status.LastTransitionTime = metav1.Now()
|
||||
cd, err := c.flaggerClient.FlaggerV1alpha2().Canaries(cd.Namespace).Update(cd)
|
||||
// SetStatusWeight updates the canary status weight value
|
||||
func (c *CanaryDeployer) SetStatusWeight(cd *flaggerv1.Canary, val int) error {
|
||||
cdCopy := cd.DeepCopy()
|
||||
cdCopy.Status.CanaryWeight = val
|
||||
cdCopy.Status.LastTransitionTime = metav1.Now()
|
||||
|
||||
cd, err := c.flaggerClient.FlaggerV1alpha3().Canaries(cd.Namespace).UpdateStatus(cdCopy)
|
||||
if err != nil {
|
||||
return fmt.Errorf("deployment %s.%s update error %v", cd.Spec.TargetRef.Name, cd.Namespace, err)
|
||||
return fmt.Errorf("canary %s.%s status update error %v", cdCopy.Name, cdCopy.Namespace, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetStatusPhase updates the canary status phase
|
||||
func (c *CanaryDeployer) SetStatusPhase(cd *flaggerv1.Canary, phase flaggerv1.CanaryPhase) error {
|
||||
cdCopy := cd.DeepCopy()
|
||||
cdCopy.Status.Phase = phase
|
||||
cdCopy.Status.LastTransitionTime = metav1.Now()
|
||||
|
||||
if phase != flaggerv1.CanaryProgressing {
|
||||
cdCopy.Status.CanaryWeight = 0
|
||||
}
|
||||
|
||||
cd, err := c.flaggerClient.FlaggerV1alpha3().Canaries(cd.Namespace).UpdateStatus(cdCopy)
|
||||
if err != nil {
|
||||
return fmt.Errorf("canary %s.%s status update error %v", cdCopy.Name, cdCopy.Namespace, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// SyncStatus encodes the canary pod spec and updates the canary status
|
||||
func (c *CanaryDeployer) SyncStatus(cd *flaggerv1.Canary, status flaggerv1.CanaryStatus) error {
|
||||
canary, err := c.kubeClient.AppsV1().Deployments(cd.Namespace).Get(cd.Spec.TargetRef.Name, metav1.GetOptions{})
|
||||
dep, err := c.kubeClient.AppsV1().Deployments(cd.Namespace).Get(cd.Spec.TargetRef.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
if errors.IsNotFound(err) {
|
||||
return fmt.Errorf("deployment %s.%s not found", cd.Spec.TargetRef.Name, cd.Namespace)
|
||||
@@ -182,36 +213,42 @@ func (c *CanaryDeployer) SyncStatus(cd *flaggerv1.Canary, status flaggerv1.Canar
|
||||
return fmt.Errorf("deployment %s.%s query error %v", cd.Spec.TargetRef.Name, cd.Namespace, err)
|
||||
}
|
||||
|
||||
specJson, err := json.Marshal(canary.Spec.Template.Spec)
|
||||
specJson, err := json.Marshal(dep.Spec.Template.Spec)
|
||||
if err != nil {
|
||||
return fmt.Errorf("deployment %s.%s marshal error %v", cd.Spec.TargetRef.Name, cd.Namespace, err)
|
||||
}
|
||||
|
||||
specEnc := base64.StdEncoding.EncodeToString(specJson)
|
||||
cd.Status.State = status.State
|
||||
cd.Status.FailedChecks = status.FailedChecks
|
||||
cd.Status.CanaryRevision = specEnc
|
||||
cd.Status.LastTransitionTime = metav1.Now()
|
||||
cd, err = c.flaggerClient.FlaggerV1alpha2().Canaries(cd.Namespace).Update(cd)
|
||||
cdCopy := cd.DeepCopy()
|
||||
cdCopy.Status.Phase = status.Phase
|
||||
cdCopy.Status.CanaryWeight = status.CanaryWeight
|
||||
cdCopy.Status.FailedChecks = status.FailedChecks
|
||||
cdCopy.Status.LastAppliedSpec = base64.StdEncoding.EncodeToString(specJson)
|
||||
cdCopy.Status.LastTransitionTime = metav1.Now()
|
||||
|
||||
cd, err = c.flaggerClient.FlaggerV1alpha3().Canaries(cd.Namespace).UpdateStatus(cdCopy)
|
||||
if err != nil {
|
||||
return fmt.Errorf("deployment %s.%s update error %v", cd.Spec.TargetRef.Name, cd.Namespace, err)
|
||||
return fmt.Errorf("canary %s.%s status update error %v", cdCopy.Name, cdCopy.Namespace, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Scale sets the canary deployment replicas
|
||||
func (c *CanaryDeployer) Scale(cd *flaggerv1.Canary, replicas int32) error {
|
||||
canary, err := c.kubeClient.AppsV1().Deployments(cd.Namespace).Get(cd.Spec.TargetRef.Name, metav1.GetOptions{})
|
||||
targetName := cd.Spec.TargetRef.Name
|
||||
dep, err := c.kubeClient.AppsV1().Deployments(cd.Namespace).Get(targetName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
if errors.IsNotFound(err) {
|
||||
return fmt.Errorf("deployment %s.%s not found", cd.Spec.TargetRef.Name, cd.Namespace)
|
||||
return fmt.Errorf("deployment %s.%s not found", targetName, cd.Namespace)
|
||||
}
|
||||
return fmt.Errorf("deployment %s.%s query error %v", cd.Spec.TargetRef.Name, cd.Namespace, err)
|
||||
return fmt.Errorf("deployment %s.%s query error %v", targetName, cd.Namespace, err)
|
||||
}
|
||||
canary.Spec.Replicas = int32p(replicas)
|
||||
canary, err = c.kubeClient.AppsV1().Deployments(canary.Namespace).Update(canary)
|
||||
|
||||
depCopy := dep.DeepCopy()
|
||||
depCopy.Spec.Replicas = int32p(replicas)
|
||||
|
||||
_, err = c.kubeClient.AppsV1().Deployments(dep.Namespace).Update(depCopy)
|
||||
if err != nil {
|
||||
return fmt.Errorf("scaling %s.%s to %v failed: %v", canary.GetName(), canary.Namespace, replicas, err)
|
||||
return fmt.Errorf("scaling %s.%s to %v failed: %v", depCopy.GetName(), depCopy.Namespace, replicas, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -224,14 +261,14 @@ func (c *CanaryDeployer) Sync(cd *flaggerv1.Canary) error {
|
||||
return fmt.Errorf("creating deployment %s.%s failed: %v", primaryName, cd.Namespace, err)
|
||||
}
|
||||
|
||||
if cd.Status.State == "" {
|
||||
c.logger.Infof("Scaling down %s.%s", cd.Spec.TargetRef.Name, cd.Namespace)
|
||||
if cd.Status.Phase == "" {
|
||||
c.logger.With("canary", fmt.Sprintf("%s.%s", cd.Name, cd.Namespace)).Infof("Scaling down %s.%s", cd.Spec.TargetRef.Name, cd.Namespace)
|
||||
if err := c.Scale(cd, 0); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if cd.Spec.AutoscalerRef.Kind == "HorizontalPodAutoscaler" {
|
||||
if cd.Spec.AutoscalerRef != nil && cd.Spec.AutoscalerRef.Kind == "HorizontalPodAutoscaler" {
|
||||
if err := c.createPrimaryHpa(cd); err != nil {
|
||||
return fmt.Errorf("creating hpa %s.%s failed: %v", primaryName, cd.Namespace, err)
|
||||
}
|
||||
@@ -240,13 +277,13 @@ func (c *CanaryDeployer) Sync(cd *flaggerv1.Canary) error {
|
||||
}
|
||||
|
||||
func (c *CanaryDeployer) createPrimaryDeployment(cd *flaggerv1.Canary) error {
|
||||
canaryName := cd.Spec.TargetRef.Name
|
||||
targetName := cd.Spec.TargetRef.Name
|
||||
primaryName := fmt.Sprintf("%s-primary", cd.Spec.TargetRef.Name)
|
||||
|
||||
canaryDep, err := c.kubeClient.AppsV1().Deployments(cd.Namespace).Get(canaryName, metav1.GetOptions{})
|
||||
canaryDep, err := c.kubeClient.AppsV1().Deployments(cd.Namespace).Get(targetName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
if errors.IsNotFound(err) {
|
||||
return fmt.Errorf("deployment %s.%s not found, retrying", canaryName, cd.Namespace)
|
||||
return fmt.Errorf("deployment %s.%s not found, retrying", targetName, cd.Namespace)
|
||||
}
|
||||
return err
|
||||
}
|
||||
@@ -292,7 +329,7 @@ func (c *CanaryDeployer) createPrimaryDeployment(cd *flaggerv1.Canary) error {
|
||||
return err
|
||||
}
|
||||
|
||||
c.logger.Infof("Deployment %s.%s created", primaryDep.GetName(), cd.Namespace)
|
||||
c.logger.With("canary", fmt.Sprintf("%s.%s", cd.Name, cd.Namespace)).Infof("Deployment %s.%s created", primaryDep.GetName(), cd.Namespace)
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -340,7 +377,7 @@ func (c *CanaryDeployer) createPrimaryHpa(cd *flaggerv1.Canary) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
c.logger.Infof("HorizontalPodAutoscaler %s.%s created", primaryHpa.GetName(), cd.Namespace)
|
||||
c.logger.With("canary", fmt.Sprintf("%s.%s", cd.Name, cd.Namespace)).Infof("HorizontalPodAutoscaler %s.%s created", primaryHpa.GetName(), cd.Namespace)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
||||
@@ -3,7 +3,7 @@ package controller
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stefanprodan/flagger/pkg/apis/flagger/v1alpha2"
|
||||
"github.com/stefanprodan/flagger/pkg/apis/flagger/v1alpha3"
|
||||
fakeFlagger "github.com/stefanprodan/flagger/pkg/client/clientset/versioned/fake"
|
||||
"github.com/stefanprodan/flagger/pkg/logging"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
@@ -14,30 +14,30 @@ import (
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
)
|
||||
|
||||
func newTestCanary() *v1alpha2.Canary {
|
||||
cd := &v1alpha2.Canary{
|
||||
TypeMeta: metav1.TypeMeta{APIVersion: v1alpha2.SchemeGroupVersion.String()},
|
||||
func newTestCanary() *v1alpha3.Canary {
|
||||
cd := &v1alpha3.Canary{
|
||||
TypeMeta: metav1.TypeMeta{APIVersion: v1alpha3.SchemeGroupVersion.String()},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "default",
|
||||
Name: "podinfo",
|
||||
},
|
||||
Spec: v1alpha2.CanarySpec{
|
||||
Spec: v1alpha3.CanarySpec{
|
||||
TargetRef: hpav1.CrossVersionObjectReference{
|
||||
Name: "podinfo",
|
||||
APIVersion: "apps/v1",
|
||||
Kind: "Deployment",
|
||||
},
|
||||
AutoscalerRef: hpav1.CrossVersionObjectReference{
|
||||
AutoscalerRef: &hpav1.CrossVersionObjectReference{
|
||||
Name: "podinfo",
|
||||
APIVersion: "autoscaling/v2beta1",
|
||||
Kind: "HorizontalPodAutoscaler",
|
||||
}, Service: v1alpha2.CanaryService{
|
||||
}, Service: v1alpha3.CanaryService{
|
||||
Port: 9898,
|
||||
}, CanaryAnalysis: v1alpha2.CanaryAnalysis{
|
||||
}, CanaryAnalysis: v1alpha3.CanaryAnalysis{
|
||||
Threshold: 10,
|
||||
StepWeight: 10,
|
||||
MaxWeight: 50,
|
||||
Metrics: []v1alpha2.CanaryMetric{
|
||||
Metrics: []v1alpha3.CanaryMetric{
|
||||
{
|
||||
Name: "istio_requests_total",
|
||||
Threshold: 99,
|
||||
@@ -351,12 +351,12 @@ func TestCanaryDeployer_SetFailedChecks(t *testing.T) {
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
|
||||
err = deployer.SetFailedChecks(canary, 1)
|
||||
err = deployer.SetStatusFailedChecks(canary, 1)
|
||||
if err != nil {
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
|
||||
res, err := flaggerClient.FlaggerV1alpha2().Canaries("default").Get("podinfo", metav1.GetOptions{})
|
||||
res, err := flaggerClient.FlaggerV1alpha3().Canaries("default").Get("podinfo", metav1.GetOptions{})
|
||||
if err != nil {
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
@@ -387,18 +387,18 @@ func TestCanaryDeployer_SetState(t *testing.T) {
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
|
||||
err = deployer.SetState(canary, v1alpha2.CanaryRunning)
|
||||
err = deployer.SetStatusPhase(canary, v1alpha3.CanaryProgressing)
|
||||
if err != nil {
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
|
||||
res, err := flaggerClient.FlaggerV1alpha2().Canaries("default").Get("podinfo", metav1.GetOptions{})
|
||||
res, err := flaggerClient.FlaggerV1alpha3().Canaries("default").Get("podinfo", metav1.GetOptions{})
|
||||
if err != nil {
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
|
||||
if res.Status.State != v1alpha2.CanaryRunning {
|
||||
t.Errorf("Got %v wanted %v", res.Status.State, v1alpha2.CanaryRunning)
|
||||
if res.Status.Phase != v1alpha3.CanaryProgressing {
|
||||
t.Errorf("Got %v wanted %v", res.Status.Phase, v1alpha3.CanaryProgressing)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -423,8 +423,8 @@ func TestCanaryDeployer_SyncStatus(t *testing.T) {
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
|
||||
status := v1alpha2.CanaryStatus{
|
||||
State: v1alpha2.CanaryRunning,
|
||||
status := v1alpha3.CanaryStatus{
|
||||
Phase: v1alpha3.CanaryProgressing,
|
||||
FailedChecks: 2,
|
||||
}
|
||||
err = deployer.SyncStatus(canary, status)
|
||||
@@ -432,13 +432,13 @@ func TestCanaryDeployer_SyncStatus(t *testing.T) {
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
|
||||
res, err := flaggerClient.FlaggerV1alpha2().Canaries("default").Get("podinfo", metav1.GetOptions{})
|
||||
res, err := flaggerClient.FlaggerV1alpha3().Canaries("default").Get("podinfo", metav1.GetOptions{})
|
||||
if err != nil {
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
|
||||
if res.Status.State != status.State {
|
||||
t.Errorf("Got state %v wanted %v", res.Status.State, status.State)
|
||||
if res.Status.Phase != status.Phase {
|
||||
t.Errorf("Got state %v wanted %v", res.Status.Phase, status.Phase)
|
||||
}
|
||||
|
||||
if res.Status.FailedChecks != status.FailedChecks {
|
||||
|
||||
35
pkg/controller/job.go
Normal file
35
pkg/controller/job.go
Normal file
@@ -0,0 +1,35 @@
|
||||
package controller
|
||||
|
||||
import "time"
|
||||
|
||||
// CanaryJob holds the reference to a canary deployment schedule
|
||||
type CanaryJob struct {
|
||||
Name string
|
||||
Namespace string
|
||||
SkipTests bool
|
||||
function func(name string, namespace string, skipTests bool)
|
||||
done chan bool
|
||||
ticker *time.Ticker
|
||||
}
|
||||
|
||||
// Start runs the canary analysis on a schedule
|
||||
func (j CanaryJob) Start() {
|
||||
go func() {
|
||||
// run the infra bootstrap on job creation
|
||||
j.function(j.Name, j.Namespace, j.SkipTests)
|
||||
for {
|
||||
select {
|
||||
case <-j.ticker.C:
|
||||
j.function(j.Name, j.Namespace, j.SkipTests)
|
||||
case <-j.done:
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// Stop closes the job channel and stops the ticker
|
||||
func (j CanaryJob) Stop() {
|
||||
close(j.done)
|
||||
j.ticker.Stop()
|
||||
}
|
||||
@@ -5,7 +5,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
flaggerv1 "github.com/stefanprodan/flagger/pkg/apis/flagger/v1alpha2"
|
||||
flaggerv1 "github.com/stefanprodan/flagger/pkg/apis/flagger/v1alpha3"
|
||||
)
|
||||
|
||||
// CanaryRecorder records the canary analysis as Prometheus metrics
|
||||
@@ -72,8 +72,8 @@ func (cr *CanaryRecorder) SetTotal(namespace string, total int) {
|
||||
// SetStatus sets the last known canary analysis status
|
||||
func (cr *CanaryRecorder) SetStatus(cd *flaggerv1.Canary) {
|
||||
status := 1
|
||||
switch cd.Status.State {
|
||||
case flaggerv1.CanaryRunning:
|
||||
switch cd.Status.Phase {
|
||||
case flaggerv1.CanaryProgressing:
|
||||
status = 0
|
||||
case flaggerv1.CanaryFailed:
|
||||
status = 2
|
||||
|
||||
@@ -5,7 +5,7 @@ import (
|
||||
|
||||
istiov1alpha3 "github.com/knative/pkg/apis/istio/v1alpha3"
|
||||
istioclientset "github.com/knative/pkg/client/clientset/versioned"
|
||||
flaggerv1 "github.com/stefanprodan/flagger/pkg/apis/flagger/v1alpha2"
|
||||
flaggerv1 "github.com/stefanprodan/flagger/pkg/apis/flagger/v1alpha3"
|
||||
clientset "github.com/stefanprodan/flagger/pkg/client/clientset/versioned"
|
||||
"go.uber.org/zap"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
@@ -42,13 +42,13 @@ func (c *CanaryRouter) Sync(cd *flaggerv1.Canary) error {
|
||||
}
|
||||
|
||||
func (c *CanaryRouter) createServices(cd *flaggerv1.Canary) error {
|
||||
canaryName := cd.Spec.TargetRef.Name
|
||||
primaryName := fmt.Sprintf("%s-primary", canaryName)
|
||||
canaryService, err := c.kubeClient.CoreV1().Services(cd.Namespace).Get(canaryName, metav1.GetOptions{})
|
||||
targetName := cd.Spec.TargetRef.Name
|
||||
primaryName := fmt.Sprintf("%s-primary", targetName)
|
||||
canaryService, err := c.kubeClient.CoreV1().Services(cd.Namespace).Get(targetName, metav1.GetOptions{})
|
||||
if errors.IsNotFound(err) {
|
||||
canaryService = &corev1.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: canaryName,
|
||||
Name: targetName,
|
||||
Namespace: cd.Namespace,
|
||||
OwnerReferences: []metav1.OwnerReference{
|
||||
*metav1.NewControllerRef(cd, schema.GroupVersionKind{
|
||||
@@ -60,7 +60,7 @@ func (c *CanaryRouter) createServices(cd *flaggerv1.Canary) error {
|
||||
},
|
||||
Spec: corev1.ServiceSpec{
|
||||
Type: corev1.ServiceTypeClusterIP,
|
||||
Selector: map[string]string{"app": canaryName},
|
||||
Selector: map[string]string{"app": targetName},
|
||||
Ports: []corev1.ServicePort{
|
||||
{
|
||||
Name: "http",
|
||||
@@ -79,7 +79,7 @@ func (c *CanaryRouter) createServices(cd *flaggerv1.Canary) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
c.logger.Infof("Service %s.%s created", canaryService.GetName(), cd.Namespace)
|
||||
c.logger.With("canary", fmt.Sprintf("%s.%s", cd.Name, cd.Namespace)).Infof("Service %s.%s created", canaryService.GetName(), cd.Namespace)
|
||||
}
|
||||
|
||||
canaryTestServiceName := fmt.Sprintf("%s-canary", cd.Spec.TargetRef.Name)
|
||||
@@ -99,7 +99,7 @@ func (c *CanaryRouter) createServices(cd *flaggerv1.Canary) error {
|
||||
},
|
||||
Spec: corev1.ServiceSpec{
|
||||
Type: corev1.ServiceTypeClusterIP,
|
||||
Selector: map[string]string{"app": canaryName},
|
||||
Selector: map[string]string{"app": targetName},
|
||||
Ports: []corev1.ServicePort{
|
||||
{
|
||||
Name: "http",
|
||||
@@ -118,7 +118,7 @@ func (c *CanaryRouter) createServices(cd *flaggerv1.Canary) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
c.logger.Infof("Service %s.%s created", canaryTestService.GetName(), cd.Namespace)
|
||||
c.logger.With("canary", fmt.Sprintf("%s.%s", cd.Name, cd.Namespace)).Infof("Service %s.%s created", canaryTestService.GetName(), cd.Namespace)
|
||||
}
|
||||
|
||||
primaryService, err := c.kubeClient.CoreV1().Services(cd.Namespace).Get(primaryName, metav1.GetOptions{})
|
||||
@@ -157,22 +157,23 @@ func (c *CanaryRouter) createServices(cd *flaggerv1.Canary) error {
|
||||
return err
|
||||
}
|
||||
|
||||
c.logger.Infof("Service %s.%s created", primaryService.GetName(), cd.Namespace)
|
||||
c.logger.With("canary", fmt.Sprintf("%s.%s", cd.Name, cd.Namespace)).Infof("Service %s.%s created", primaryService.GetName(), cd.Namespace)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *CanaryRouter) createVirtualService(cd *flaggerv1.Canary) error {
|
||||
canaryName := cd.Spec.TargetRef.Name
|
||||
primaryName := fmt.Sprintf("%s-primary", canaryName)
|
||||
hosts := append(cd.Spec.Service.Hosts, canaryName)
|
||||
targetName := cd.Spec.TargetRef.Name
|
||||
primaryName := fmt.Sprintf("%s-primary", targetName)
|
||||
hosts := append(cd.Spec.Service.Hosts, targetName)
|
||||
gateways := append(cd.Spec.Service.Gateways, "mesh")
|
||||
virtualService, err := c.istioClient.NetworkingV1alpha3().VirtualServices(cd.Namespace).Get(canaryName, metav1.GetOptions{})
|
||||
virtualService, err := c.istioClient.NetworkingV1alpha3().VirtualServices(cd.Namespace).Get(targetName, metav1.GetOptions{})
|
||||
if errors.IsNotFound(err) {
|
||||
c.logger.Debugf("VirtualService %s.%s not found", targetName, cd.Namespace)
|
||||
virtualService = &istiov1alpha3.VirtualService{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: cd.Name,
|
||||
Name: targetName,
|
||||
Namespace: cd.Namespace,
|
||||
OwnerReferences: []metav1.OwnerReference{
|
||||
*metav1.NewControllerRef(cd, schema.GroupVersionKind{
|
||||
@@ -199,7 +200,7 @@ func (c *CanaryRouter) createVirtualService(cd *flaggerv1.Canary) error {
|
||||
},
|
||||
{
|
||||
Destination: istiov1alpha3.Destination{
|
||||
Host: canaryName,
|
||||
Host: targetName,
|
||||
Port: istiov1alpha3.PortSelector{
|
||||
Number: uint32(cd.Spec.Service.Port),
|
||||
},
|
||||
@@ -212,11 +213,12 @@ func (c *CanaryRouter) createVirtualService(cd *flaggerv1.Canary) error {
|
||||
},
|
||||
}
|
||||
|
||||
c.logger.Debugf("Creating VirtualService %s.%s", virtualService.GetName(), cd.Namespace)
|
||||
_, err = c.istioClient.NetworkingV1alpha3().VirtualServices(cd.Namespace).Create(virtualService)
|
||||
if err != nil {
|
||||
return fmt.Errorf("VirtualService %s.%s create error %v", cd.Name, cd.Namespace, err)
|
||||
return fmt.Errorf("VirtualService %s.%s create error %v", targetName, cd.Namespace, err)
|
||||
}
|
||||
c.logger.Infof("VirtualService %s.%s created", virtualService.GetName(), cd.Namespace)
|
||||
c.logger.With("canary", fmt.Sprintf("%s.%s", cd.Name, cd.Namespace)).Infof("VirtualService %s.%s created", virtualService.GetName(), cd.Namespace)
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -228,23 +230,24 @@ func (c *CanaryRouter) GetRoutes(cd *flaggerv1.Canary) (
|
||||
canary istiov1alpha3.DestinationWeight,
|
||||
err error,
|
||||
) {
|
||||
targetName := cd.Spec.TargetRef.Name
|
||||
vs := &istiov1alpha3.VirtualService{}
|
||||
vs, err = c.istioClient.NetworkingV1alpha3().VirtualServices(cd.Namespace).Get(cd.Name, v1.GetOptions{})
|
||||
vs, err = c.istioClient.NetworkingV1alpha3().VirtualServices(cd.Namespace).Get(targetName, v1.GetOptions{})
|
||||
if err != nil {
|
||||
if errors.IsNotFound(err) {
|
||||
err = fmt.Errorf("VirtualService %s.%s not found", cd.Name, cd.Namespace)
|
||||
err = fmt.Errorf("VirtualService %s.%s not found", targetName, cd.Namespace)
|
||||
return
|
||||
}
|
||||
err = fmt.Errorf("VirtualService %s.%s query error %v", cd.Name, cd.Namespace, err)
|
||||
err = fmt.Errorf("VirtualService %s.%s query error %v", targetName, cd.Namespace, err)
|
||||
return
|
||||
}
|
||||
|
||||
for _, http := range vs.Spec.Http {
|
||||
for _, route := range http.Route {
|
||||
if route.Destination.Host == fmt.Sprintf("%s-primary", cd.Spec.TargetRef.Name) {
|
||||
if route.Destination.Host == fmt.Sprintf("%s-primary", targetName) {
|
||||
primary = route
|
||||
}
|
||||
if route.Destination.Host == cd.Spec.TargetRef.Name {
|
||||
if route.Destination.Host == targetName {
|
||||
canary = route
|
||||
}
|
||||
}
|
||||
@@ -252,7 +255,7 @@ func (c *CanaryRouter) GetRoutes(cd *flaggerv1.Canary) (
|
||||
|
||||
if primary.Weight == 0 && canary.Weight == 0 {
|
||||
err = fmt.Errorf("VirtualService %s.%s does not contain routes for %s and %s",
|
||||
cd.Name, cd.Namespace, fmt.Sprintf("%s-primary", cd.Spec.TargetRef.Name), cd.Spec.TargetRef.Name)
|
||||
targetName, cd.Namespace, fmt.Sprintf("%s-primary", targetName), targetName)
|
||||
}
|
||||
|
||||
return
|
||||
@@ -264,23 +267,26 @@ func (c *CanaryRouter) SetRoutes(
|
||||
primary istiov1alpha3.DestinationWeight,
|
||||
canary istiov1alpha3.DestinationWeight,
|
||||
) error {
|
||||
vs, err := c.istioClient.NetworkingV1alpha3().VirtualServices(cd.Namespace).Get(cd.Name, v1.GetOptions{})
|
||||
targetName := cd.Spec.TargetRef.Name
|
||||
vs, err := c.istioClient.NetworkingV1alpha3().VirtualServices(cd.Namespace).Get(targetName, v1.GetOptions{})
|
||||
if err != nil {
|
||||
if errors.IsNotFound(err) {
|
||||
return fmt.Errorf("VirtualService %s.%s not found", cd.Name, cd.Namespace)
|
||||
return fmt.Errorf("VirtualService %s.%s not found", targetName, cd.Namespace)
|
||||
|
||||
}
|
||||
return fmt.Errorf("VirtualService %s.%s query error %v", cd.Name, cd.Namespace, err)
|
||||
return fmt.Errorf("VirtualService %s.%s query error %v", targetName, cd.Namespace, err)
|
||||
}
|
||||
vs.Spec.Http = []istiov1alpha3.HTTPRoute{
|
||||
|
||||
vsCopy := vs.DeepCopy()
|
||||
vsCopy.Spec.Http = []istiov1alpha3.HTTPRoute{
|
||||
{
|
||||
Route: []istiov1alpha3.DestinationWeight{primary, canary},
|
||||
},
|
||||
}
|
||||
|
||||
vs, err = c.istioClient.NetworkingV1alpha3().VirtualServices(cd.Namespace).Update(vs)
|
||||
vs, err = c.istioClient.NetworkingV1alpha3().VirtualServices(cd.Namespace).Update(vsCopy)
|
||||
if err != nil {
|
||||
return fmt.Errorf("VirtualService %s.%s update failed: %v", cd.Name, cd.Namespace, err)
|
||||
return fmt.Errorf("VirtualService %s.%s update failed: %v", targetName, cd.Namespace, err)
|
||||
|
||||
}
|
||||
return nil
|
||||
|
||||
@@ -2,39 +2,80 @@ package controller
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
flaggerv1 "github.com/stefanprodan/flagger/pkg/apis/flagger/v1alpha2"
|
||||
flaggerv1 "github.com/stefanprodan/flagger/pkg/apis/flagger/v1alpha3"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
// scheduleCanaries synchronises the canary map with the jobs map,
|
||||
// for new canaries new jobs are created and started
|
||||
// for the removed canaries the jobs are stopped and deleted
|
||||
func (c *Controller) scheduleCanaries() {
|
||||
current := make(map[string]string)
|
||||
stats := make(map[string]int)
|
||||
|
||||
c.canaries.Range(func(key interface{}, value interface{}) bool {
|
||||
r := value.(*flaggerv1.Canary)
|
||||
if r.Spec.TargetRef.Kind == "Deployment" {
|
||||
go c.advanceCanary(r.Name, r.Namespace)
|
||||
canary := value.(*flaggerv1.Canary)
|
||||
|
||||
// format: <name>.<namespace>
|
||||
name := key.(string)
|
||||
current[name] = fmt.Sprintf("%s.%s", canary.Spec.TargetRef.Name, canary.Namespace)
|
||||
|
||||
// schedule new jobs
|
||||
if _, exists := c.jobs[name]; !exists {
|
||||
job := CanaryJob{
|
||||
Name: canary.Name,
|
||||
Namespace: canary.Namespace,
|
||||
function: c.advanceCanary,
|
||||
done: make(chan bool),
|
||||
ticker: time.NewTicker(canary.GetAnalysisInterval()),
|
||||
}
|
||||
|
||||
c.jobs[name] = job
|
||||
job.Start()
|
||||
}
|
||||
|
||||
t, ok := stats[r.Namespace]
|
||||
// compute canaries per namespace total
|
||||
t, ok := stats[canary.Namespace]
|
||||
if !ok {
|
||||
stats[r.Namespace] = 1
|
||||
stats[canary.Namespace] = 1
|
||||
} else {
|
||||
stats[r.Namespace] = t + 1
|
||||
stats[canary.Namespace] = t + 1
|
||||
}
|
||||
return true
|
||||
})
|
||||
|
||||
// cleanup deleted jobs
|
||||
for job := range c.jobs {
|
||||
if _, exists := current[job]; !exists {
|
||||
c.jobs[job].Stop()
|
||||
delete(c.jobs, job)
|
||||
}
|
||||
}
|
||||
|
||||
// check if multiple canaries have the same target
|
||||
for canaryName, targetName := range current {
|
||||
for name, target := range current {
|
||||
if name != canaryName && target == targetName {
|
||||
c.logger.With("canary", canaryName).Errorf("Bad things will happen! Found more than one canary with the same target %s", targetName)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// set total canaries per namespace metric
|
||||
for k, v := range stats {
|
||||
c.recorder.SetTotal(k, v)
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Controller) advanceCanary(name string, namespace string) {
|
||||
func (c *Controller) advanceCanary(name string, namespace string, skipLivenessChecks bool) {
|
||||
begin := time.Now()
|
||||
// check if the canary exists
|
||||
cd, err := c.flaggerClient.FlaggerV1alpha2().Canaries(namespace).Get(name, v1.GetOptions{})
|
||||
cd, err := c.flaggerClient.FlaggerV1alpha3().Canaries(namespace).Get(name, v1.GetOptions{})
|
||||
if err != nil {
|
||||
c.logger.Errorf("Canary %s.%s not found", name, namespace)
|
||||
c.logger.With("canary", fmt.Sprintf("%s.%s", name, namespace)).Errorf("Canary %s.%s not found", name, namespace)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -50,6 +91,13 @@ func (c *Controller) advanceCanary(name string, namespace string) {
|
||||
return
|
||||
}
|
||||
|
||||
if ok, err := c.deployer.ShouldAdvance(cd); !ok {
|
||||
if err != nil {
|
||||
c.recordEventWarningf(cd, "%v", err)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// set max weight default value to 100%
|
||||
maxWeight := 100
|
||||
if cd.Spec.CanaryAnalysis.MaxWeight > 0 {
|
||||
@@ -57,9 +105,11 @@ func (c *Controller) advanceCanary(name string, namespace string) {
|
||||
}
|
||||
|
||||
// check primary deployment status
|
||||
if _, err := c.deployer.IsPrimaryReady(cd); err != nil {
|
||||
c.recordEventWarningf(cd, "%v", err)
|
||||
return
|
||||
if !skipLivenessChecks {
|
||||
if _, err := c.deployer.IsPrimaryReady(cd); err != nil {
|
||||
c.recordEventWarningf(cd, "%v", err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// check if virtual service exists
|
||||
@@ -73,7 +123,33 @@ func (c *Controller) advanceCanary(name string, namespace string) {
|
||||
c.recorder.SetWeight(cd, primaryRoute.Weight, canaryRoute.Weight)
|
||||
|
||||
// check if canary analysis should start (canary revision has changes) or continue
|
||||
if ok := c.checkCanaryStatus(cd, c.deployer); !ok {
|
||||
if ok := c.checkCanaryStatus(cd); !ok {
|
||||
return
|
||||
}
|
||||
|
||||
// check if canary revision changed during analysis
|
||||
if restart := c.hasCanaryRevisionChanged(cd); restart {
|
||||
c.recordEventInfof(cd, "New revision detected! Restarting analysis for %s.%s",
|
||||
cd.Spec.TargetRef.Name, cd.Namespace)
|
||||
|
||||
// route all traffic back to primary
|
||||
primaryRoute.Weight = 100
|
||||
canaryRoute.Weight = 0
|
||||
if err := c.router.SetRoutes(cd, primaryRoute, canaryRoute); err != nil {
|
||||
c.recordEventWarningf(cd, "%v", err)
|
||||
return
|
||||
}
|
||||
|
||||
// reset status
|
||||
status := flaggerv1.CanaryStatus{
|
||||
Phase: flaggerv1.CanaryProgressing,
|
||||
CanaryWeight: 0,
|
||||
FailedChecks: 0,
|
||||
}
|
||||
if err := c.deployer.SyncStatus(cd, status); err != nil {
|
||||
c.recordEventWarningf(cd, "%v", err)
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
@@ -82,14 +158,17 @@ func (c *Controller) advanceCanary(name string, namespace string) {
|
||||
}()
|
||||
|
||||
// check canary deployment status
|
||||
retriable, err := c.deployer.IsCanaryReady(cd)
|
||||
if err != nil && retriable {
|
||||
c.recordEventWarningf(cd, "%v", err)
|
||||
return
|
||||
var retriable = true
|
||||
if !skipLivenessChecks {
|
||||
retriable, err = c.deployer.IsCanaryReady(cd)
|
||||
if err != nil && retriable {
|
||||
c.recordEventWarningf(cd, "%v", err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// check if the number of failed checks reached the threshold
|
||||
if cd.Status.State == flaggerv1.CanaryRunning &&
|
||||
if cd.Status.Phase == flaggerv1.CanaryProgressing &&
|
||||
(!retriable || cd.Status.FailedChecks >= cd.Spec.CanaryAnalysis.Threshold) {
|
||||
|
||||
if cd.Status.FailedChecks >= cd.Spec.CanaryAnalysis.Threshold {
|
||||
@@ -125,8 +204,8 @@ func (c *Controller) advanceCanary(name string, namespace string) {
|
||||
}
|
||||
|
||||
// mark canary as failed
|
||||
if err := c.deployer.SyncStatus(cd, flaggerv1.CanaryStatus{State: flaggerv1.CanaryFailed}); err != nil {
|
||||
c.logger.Errorf("%v", err)
|
||||
if err := c.deployer.SyncStatus(cd, flaggerv1.CanaryStatus{Phase: flaggerv1.CanaryFailed, CanaryWeight: 0}); err != nil {
|
||||
c.logger.With("canary", fmt.Sprintf("%s.%s", cd.Name, cd.Namespace)).Errorf("%v", err)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -137,10 +216,10 @@ func (c *Controller) advanceCanary(name string, namespace string) {
|
||||
// check if the canary success rate is above the threshold
|
||||
// skip check if no traffic is routed to canary
|
||||
if canaryRoute.Weight == 0 {
|
||||
c.recordEventInfof(cd, "Starting canary deployment for %s.%s", cd.Name, cd.Namespace)
|
||||
c.recordEventInfof(cd, "Starting canary analysis for %s.%s", cd.Spec.TargetRef.Name, cd.Namespace)
|
||||
} else {
|
||||
if ok := c.analyseCanary(cd); !ok {
|
||||
if err := c.deployer.SetFailedChecks(cd, cd.Status.FailedChecks+1); err != nil {
|
||||
if err := c.deployer.SetStatusFailedChecks(cd, cd.Status.FailedChecks+1); err != nil {
|
||||
c.recordEventWarningf(cd, "%v", err)
|
||||
return
|
||||
}
|
||||
@@ -164,6 +243,12 @@ func (c *Controller) advanceCanary(name string, namespace string) {
|
||||
return
|
||||
}
|
||||
|
||||
// update weight status
|
||||
if err := c.deployer.SetStatusWeight(cd, canaryRoute.Weight); err != nil {
|
||||
c.recordEventWarningf(cd, "%v", err)
|
||||
return
|
||||
}
|
||||
|
||||
c.recorder.SetWeight(cd, primaryRoute.Weight, canaryRoute.Weight)
|
||||
c.recordEventInfof(cd, "Advance %s.%s canary weight %v", cd.Name, cd.Namespace, canaryRoute.Weight)
|
||||
|
||||
@@ -195,8 +280,8 @@ func (c *Controller) advanceCanary(name string, namespace string) {
|
||||
return
|
||||
}
|
||||
|
||||
// update status
|
||||
if err := c.deployer.SetState(cd, flaggerv1.CanaryFinished); err != nil {
|
||||
// update status phase
|
||||
if err := c.deployer.SetStatusPhase(cd, flaggerv1.CanarySucceeded); err != nil {
|
||||
c.recordEventWarningf(cd, "%v", err)
|
||||
return
|
||||
}
|
||||
@@ -206,15 +291,15 @@ func (c *Controller) advanceCanary(name string, namespace string) {
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Controller) checkCanaryStatus(cd *flaggerv1.Canary, deployer CanaryDeployer) bool {
|
||||
func (c *Controller) checkCanaryStatus(cd *flaggerv1.Canary) bool {
|
||||
c.recorder.SetStatus(cd)
|
||||
if cd.Status.State == "running" {
|
||||
if cd.Status.Phase == flaggerv1.CanaryProgressing {
|
||||
return true
|
||||
}
|
||||
|
||||
if cd.Status.State == "" {
|
||||
if err := deployer.SyncStatus(cd, flaggerv1.CanaryStatus{State: flaggerv1.CanaryInitialized}); err != nil {
|
||||
c.logger.Errorf("%v", err)
|
||||
if cd.Status.Phase == "" {
|
||||
if err := c.deployer.SyncStatus(cd, flaggerv1.CanaryStatus{Phase: flaggerv1.CanaryInitialized}); err != nil {
|
||||
c.logger.With("canary", fmt.Sprintf("%s.%s", cd.Name, cd.Namespace)).Errorf("%v", err)
|
||||
return false
|
||||
}
|
||||
c.recorder.SetStatus(cd)
|
||||
@@ -224,16 +309,16 @@ func (c *Controller) checkCanaryStatus(cd *flaggerv1.Canary, deployer CanaryDepl
|
||||
return false
|
||||
}
|
||||
|
||||
if diff, err := deployer.IsNewSpec(cd); diff {
|
||||
if diff, err := c.deployer.IsNewSpec(cd); diff {
|
||||
c.recordEventInfof(cd, "New revision detected! Scaling up %s.%s", cd.Spec.TargetRef.Name, cd.Namespace)
|
||||
c.sendNotification(cd, "New revision detected, starting canary analysis.",
|
||||
true, false)
|
||||
if err = deployer.Scale(cd, 1); err != nil {
|
||||
if err = c.deployer.Scale(cd, 1); err != nil {
|
||||
c.recordEventErrorf(cd, "%v", err)
|
||||
return false
|
||||
}
|
||||
if err := deployer.SyncStatus(cd, flaggerv1.CanaryStatus{State: flaggerv1.CanaryRunning}); err != nil {
|
||||
c.logger.Errorf("%v", err)
|
||||
if err := c.deployer.SyncStatus(cd, flaggerv1.CanaryStatus{Phase: flaggerv1.CanaryProgressing}); err != nil {
|
||||
c.logger.With("canary", fmt.Sprintf("%s.%s", cd.Name, cd.Namespace)).Errorf("%v", err)
|
||||
return false
|
||||
}
|
||||
c.recorder.SetStatus(cd)
|
||||
@@ -242,13 +327,37 @@ func (c *Controller) checkCanaryStatus(cd *flaggerv1.Canary, deployer CanaryDepl
|
||||
return false
|
||||
}
|
||||
|
||||
func (c *Controller) hasCanaryRevisionChanged(cd *flaggerv1.Canary) bool {
|
||||
if cd.Status.Phase == flaggerv1.CanaryProgressing {
|
||||
if diff, _ := c.deployer.IsNewSpec(cd); diff {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (c *Controller) analyseCanary(r *flaggerv1.Canary) bool {
|
||||
// run external checks
|
||||
for _, webhook := range r.Spec.CanaryAnalysis.Webhooks {
|
||||
err := CallWebhook(r.Name, r.Namespace, webhook)
|
||||
if err != nil {
|
||||
c.recordEventWarningf(r, "Halt %s.%s advancement external check %s failed %v",
|
||||
r.Name, r.Namespace, webhook.Name, err)
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// run metrics checks
|
||||
for _, metric := range r.Spec.CanaryAnalysis.Metrics {
|
||||
if metric.Name == "istio_requests_total" {
|
||||
val, err := c.observer.GetDeploymentCounter(r.Spec.TargetRef.Name, r.Namespace, metric.Name, metric.Interval)
|
||||
if err != nil {
|
||||
c.recordEventErrorf(r, "Metrics server %s query failed: %v", c.observer.metricsServer, err)
|
||||
if strings.Contains(err.Error(), "no values found") {
|
||||
c.recordEventWarningf(r, "Halt advancement no values found for metric %s probably %s.%s is not receiving traffic",
|
||||
metric.Name, r.Spec.TargetRef.Name, r.Namespace)
|
||||
} else {
|
||||
c.recordEventErrorf(r, "Metrics server %s query failed: %v", c.observer.metricsServer, err)
|
||||
}
|
||||
return false
|
||||
}
|
||||
if float64(metric.Threshold) > val {
|
||||
@@ -273,15 +382,5 @@ func (c *Controller) analyseCanary(r *flaggerv1.Canary) bool {
|
||||
}
|
||||
}
|
||||
|
||||
// run external checks
|
||||
for _, webhook := range r.Spec.CanaryAnalysis.Webhooks {
|
||||
err := CallWebhook(r.Name, r.Namespace, webhook)
|
||||
if err != nil {
|
||||
c.recordEventWarningf(r, "Halt %s.%s advancement external check %s failed %v",
|
||||
r.Name, r.Namespace, webhook.Name, err)
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
@@ -1,12 +1,16 @@
|
||||
package controller
|
||||
|
||||
import (
|
||||
"go.uber.org/zap"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
istioclientset "github.com/knative/pkg/client/clientset/versioned"
|
||||
fakeIstio "github.com/knative/pkg/client/clientset/versioned/fake"
|
||||
"github.com/stefanprodan/flagger/pkg/apis/flagger/v1alpha2"
|
||||
"github.com/stefanprodan/flagger/pkg/apis/flagger/v1alpha3"
|
||||
clientset "github.com/stefanprodan/flagger/pkg/client/clientset/versioned"
|
||||
fakeFlagger "github.com/stefanprodan/flagger/pkg/client/clientset/versioned/fake"
|
||||
informers "github.com/stefanprodan/flagger/pkg/client/informers/externalversions"
|
||||
"github.com/stefanprodan/flagger/pkg/logging"
|
||||
@@ -21,6 +25,39 @@ var (
|
||||
noResyncPeriodFunc = func() time.Duration { return 0 }
|
||||
)
|
||||
|
||||
func newTestController(
|
||||
kubeClient kubernetes.Interface,
|
||||
istioClient istioclientset.Interface,
|
||||
flaggerClient clientset.Interface,
|
||||
logger *zap.SugaredLogger,
|
||||
deployer CanaryDeployer,
|
||||
router CanaryRouter,
|
||||
observer CanaryObserver,
|
||||
) *Controller {
|
||||
flaggerInformerFactory := informers.NewSharedInformerFactory(flaggerClient, noResyncPeriodFunc())
|
||||
flaggerInformer := flaggerInformerFactory.Flagger().V1alpha3().Canaries()
|
||||
|
||||
ctrl := &Controller{
|
||||
kubeClient: kubeClient,
|
||||
istioClient: istioClient,
|
||||
flaggerClient: flaggerClient,
|
||||
flaggerLister: flaggerInformer.Lister(),
|
||||
flaggerSynced: flaggerInformer.Informer().HasSynced,
|
||||
workqueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), controllerAgentName),
|
||||
eventRecorder: &record.FakeRecorder{},
|
||||
logger: logger,
|
||||
canaries: new(sync.Map),
|
||||
flaggerWindow: time.Second,
|
||||
deployer: deployer,
|
||||
router: router,
|
||||
observer: observer,
|
||||
recorder: NewCanaryRecorder(false),
|
||||
}
|
||||
ctrl.flaggerSynced = alwaysReady
|
||||
|
||||
return ctrl
|
||||
}
|
||||
|
||||
func TestScheduler_Init(t *testing.T) {
|
||||
canary := newTestCanary()
|
||||
dep := newTestDeployment()
|
||||
@@ -45,29 +82,9 @@ func TestScheduler_Init(t *testing.T) {
|
||||
observer := CanaryObserver{
|
||||
metricsServer: "fake",
|
||||
}
|
||||
ctrl := newTestController(kubeClient, istioClient, flaggerClient, logger, deployer, router, observer)
|
||||
|
||||
flaggerInformerFactory := informers.NewSharedInformerFactory(flaggerClient, noResyncPeriodFunc())
|
||||
flaggerInformer := flaggerInformerFactory.Flagger().V1alpha2().Canaries()
|
||||
|
||||
ctrl := &Controller{
|
||||
kubeClient: kubeClient,
|
||||
istioClient: istioClient,
|
||||
flaggerClient: flaggerClient,
|
||||
flaggerLister: flaggerInformer.Lister(),
|
||||
flaggerSynced: flaggerInformer.Informer().HasSynced,
|
||||
workqueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), controllerAgentName),
|
||||
eventRecorder: &record.FakeRecorder{},
|
||||
logger: logger,
|
||||
canaries: new(sync.Map),
|
||||
flaggerWindow: time.Second,
|
||||
deployer: deployer,
|
||||
router: router,
|
||||
observer: observer,
|
||||
recorder: NewCanaryRecorder(false),
|
||||
}
|
||||
ctrl.flaggerSynced = alwaysReady
|
||||
|
||||
ctrl.advanceCanary("podinfo", "default")
|
||||
ctrl.advanceCanary("podinfo", "default", false)
|
||||
|
||||
_, err := kubeClient.AppsV1().Deployments("default").Get("podinfo-primary", metav1.GetOptions{})
|
||||
if err != nil {
|
||||
@@ -99,30 +116,10 @@ func TestScheduler_NewRevision(t *testing.T) {
|
||||
observer := CanaryObserver{
|
||||
metricsServer: "fake",
|
||||
}
|
||||
|
||||
flaggerInformerFactory := informers.NewSharedInformerFactory(flaggerClient, noResyncPeriodFunc())
|
||||
flaggerInformer := flaggerInformerFactory.Flagger().V1alpha2().Canaries()
|
||||
|
||||
ctrl := &Controller{
|
||||
kubeClient: kubeClient,
|
||||
istioClient: istioClient,
|
||||
flaggerClient: flaggerClient,
|
||||
flaggerLister: flaggerInformer.Lister(),
|
||||
flaggerSynced: flaggerInformer.Informer().HasSynced,
|
||||
workqueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), controllerAgentName),
|
||||
eventRecorder: &record.FakeRecorder{},
|
||||
logger: logger,
|
||||
canaries: new(sync.Map),
|
||||
flaggerWindow: time.Second,
|
||||
deployer: deployer,
|
||||
router: router,
|
||||
observer: observer,
|
||||
recorder: NewCanaryRecorder(false),
|
||||
}
|
||||
ctrl.flaggerSynced = alwaysReady
|
||||
ctrl := newTestController(kubeClient, istioClient, flaggerClient, logger, deployer, router, observer)
|
||||
|
||||
// init
|
||||
ctrl.advanceCanary("podinfo", "default")
|
||||
ctrl.advanceCanary("podinfo", "default", false)
|
||||
|
||||
// update
|
||||
dep2 := newTestDeploymentUpdated()
|
||||
@@ -132,7 +129,7 @@ func TestScheduler_NewRevision(t *testing.T) {
|
||||
}
|
||||
|
||||
// detect changes
|
||||
ctrl.advanceCanary("podinfo", "default")
|
||||
ctrl.advanceCanary("podinfo", "default", false)
|
||||
|
||||
c, err := kubeClient.AppsV1().Deployments("default").Get("podinfo", metav1.GetOptions{})
|
||||
if err != nil {
|
||||
@@ -168,46 +165,195 @@ func TestScheduler_Rollback(t *testing.T) {
|
||||
observer := CanaryObserver{
|
||||
metricsServer: "fake",
|
||||
}
|
||||
|
||||
flaggerInformerFactory := informers.NewSharedInformerFactory(flaggerClient, noResyncPeriodFunc())
|
||||
flaggerInformer := flaggerInformerFactory.Flagger().V1alpha2().Canaries()
|
||||
|
||||
ctrl := &Controller{
|
||||
kubeClient: kubeClient,
|
||||
istioClient: istioClient,
|
||||
flaggerClient: flaggerClient,
|
||||
flaggerLister: flaggerInformer.Lister(),
|
||||
flaggerSynced: flaggerInformer.Informer().HasSynced,
|
||||
workqueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), controllerAgentName),
|
||||
eventRecorder: &record.FakeRecorder{},
|
||||
logger: logger,
|
||||
canaries: new(sync.Map),
|
||||
flaggerWindow: time.Second,
|
||||
deployer: deployer,
|
||||
router: router,
|
||||
observer: observer,
|
||||
recorder: NewCanaryRecorder(false),
|
||||
}
|
||||
ctrl.flaggerSynced = alwaysReady
|
||||
ctrl := newTestController(kubeClient, istioClient, flaggerClient, logger, deployer, router, observer)
|
||||
|
||||
// init
|
||||
ctrl.advanceCanary("podinfo", "default")
|
||||
ctrl.advanceCanary("podinfo", "default", true)
|
||||
|
||||
// update failed checks to max
|
||||
err := deployer.SyncStatus(canary, v1alpha2.CanaryStatus{State: v1alpha2.CanaryRunning, FailedChecks: 11})
|
||||
err := deployer.SyncStatus(canary, v1alpha3.CanaryStatus{Phase: v1alpha3.CanaryProgressing, FailedChecks: 11})
|
||||
if err != nil {
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
|
||||
// detect changes
|
||||
ctrl.advanceCanary("podinfo", "default")
|
||||
ctrl.advanceCanary("podinfo", "default", true)
|
||||
|
||||
c, err := flaggerClient.FlaggerV1alpha2().Canaries("default").Get("podinfo", metav1.GetOptions{})
|
||||
c, err := flaggerClient.FlaggerV1alpha3().Canaries("default").Get("podinfo", metav1.GetOptions{})
|
||||
if err != nil {
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
|
||||
if c.Status.State != v1alpha2.CanaryFailed {
|
||||
t.Errorf("Got canary state %v wanted %v", c.Status.State, v1alpha2.CanaryFailed)
|
||||
if c.Status.Phase != v1alpha3.CanaryFailed {
|
||||
t.Errorf("Got canary state %v wanted %v", c.Status.Phase, v1alpha3.CanaryFailed)
|
||||
}
|
||||
}
|
||||
|
||||
func TestScheduler_NewRevisionReset(t *testing.T) {
|
||||
canary := newTestCanary()
|
||||
dep := newTestDeployment()
|
||||
hpa := newTestHPA()
|
||||
|
||||
flaggerClient := fakeFlagger.NewSimpleClientset(canary)
|
||||
kubeClient := fake.NewSimpleClientset(dep, hpa)
|
||||
istioClient := fakeIstio.NewSimpleClientset()
|
||||
|
||||
logger, _ := logging.NewLogger("debug")
|
||||
deployer := CanaryDeployer{
|
||||
flaggerClient: flaggerClient,
|
||||
kubeClient: kubeClient,
|
||||
logger: logger,
|
||||
}
|
||||
router := CanaryRouter{
|
||||
flaggerClient: flaggerClient,
|
||||
kubeClient: kubeClient,
|
||||
istioClient: istioClient,
|
||||
logger: logger,
|
||||
}
|
||||
observer := CanaryObserver{
|
||||
metricsServer: "fake",
|
||||
}
|
||||
ctrl := newTestController(kubeClient, istioClient, flaggerClient, logger, deployer, router, observer)
|
||||
|
||||
// init
|
||||
ctrl.advanceCanary("podinfo", "default", false)
|
||||
|
||||
// first update
|
||||
dep2 := newTestDeploymentUpdated()
|
||||
_, err := kubeClient.AppsV1().Deployments("default").Update(dep2)
|
||||
if err != nil {
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
|
||||
// detect changes
|
||||
ctrl.advanceCanary("podinfo", "default", true)
|
||||
// advance
|
||||
ctrl.advanceCanary("podinfo", "default", true)
|
||||
|
||||
primaryRoute, canaryRoute, err := router.GetRoutes(canary)
|
||||
if err != nil {
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
|
||||
if primaryRoute.Weight != 90 {
|
||||
t.Errorf("Got primary route %v wanted %v", primaryRoute.Weight, 90)
|
||||
}
|
||||
|
||||
if canaryRoute.Weight != 10 {
|
||||
t.Errorf("Got canary route %v wanted %v", canaryRoute.Weight, 10)
|
||||
}
|
||||
|
||||
// second update
|
||||
dep2.Spec.Template.Spec.ServiceAccountName = "test"
|
||||
_, err = kubeClient.AppsV1().Deployments("default").Update(dep2)
|
||||
if err != nil {
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
|
||||
// detect changes
|
||||
ctrl.advanceCanary("podinfo", "default", true)
|
||||
|
||||
primaryRoute, canaryRoute, err = router.GetRoutes(canary)
|
||||
if err != nil {
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
|
||||
if primaryRoute.Weight != 100 {
|
||||
t.Errorf("Got primary route %v wanted %v", primaryRoute.Weight, 100)
|
||||
}
|
||||
|
||||
if canaryRoute.Weight != 0 {
|
||||
t.Errorf("Got canary route %v wanted %v", canaryRoute.Weight, 0)
|
||||
}
|
||||
}
|
||||
|
||||
func TestScheduler_Promotion(t *testing.T) {
|
||||
canary := newTestCanary()
|
||||
dep := newTestDeployment()
|
||||
hpa := newTestHPA()
|
||||
|
||||
flaggerClient := fakeFlagger.NewSimpleClientset(canary)
|
||||
kubeClient := fake.NewSimpleClientset(dep, hpa)
|
||||
istioClient := fakeIstio.NewSimpleClientset()
|
||||
|
||||
logger, _ := logging.NewLogger("debug")
|
||||
deployer := CanaryDeployer{
|
||||
flaggerClient: flaggerClient,
|
||||
kubeClient: kubeClient,
|
||||
logger: logger,
|
||||
}
|
||||
router := CanaryRouter{
|
||||
flaggerClient: flaggerClient,
|
||||
kubeClient: kubeClient,
|
||||
istioClient: istioClient,
|
||||
logger: logger,
|
||||
}
|
||||
observer := CanaryObserver{
|
||||
metricsServer: "fake",
|
||||
}
|
||||
ctrl := newTestController(kubeClient, istioClient, flaggerClient, logger, deployer, router, observer)
|
||||
|
||||
// init
|
||||
ctrl.advanceCanary("podinfo", "default", false)
|
||||
|
||||
// update
|
||||
dep2 := newTestDeploymentUpdated()
|
||||
_, err := kubeClient.AppsV1().Deployments("default").Update(dep2)
|
||||
if err != nil {
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
|
||||
// detect changes
|
||||
ctrl.advanceCanary("podinfo", "default", true)
|
||||
|
||||
primaryRoute, canaryRoute, err := router.GetRoutes(canary)
|
||||
if err != nil {
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
|
||||
primaryRoute.Weight = 60
|
||||
canaryRoute.Weight = 40
|
||||
err = ctrl.router.SetRoutes(canary, primaryRoute, canaryRoute)
|
||||
if err != nil {
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
|
||||
// advance
|
||||
ctrl.advanceCanary("podinfo", "default", true)
|
||||
|
||||
// promote
|
||||
ctrl.advanceCanary("podinfo", "default", true)
|
||||
|
||||
primaryRoute, canaryRoute, err = router.GetRoutes(canary)
|
||||
if err != nil {
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
|
||||
if primaryRoute.Weight != 100 {
|
||||
t.Errorf("Got primary route %v wanted %v", primaryRoute.Weight, 100)
|
||||
}
|
||||
|
||||
if canaryRoute.Weight != 0 {
|
||||
t.Errorf("Got canary route %v wanted %v", canaryRoute.Weight, 0)
|
||||
}
|
||||
|
||||
primaryDep, err := kubeClient.AppsV1().Deployments("default").Get("podinfo-primary", metav1.GetOptions{})
|
||||
if err != nil {
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
|
||||
primaryImage := primaryDep.Spec.Template.Spec.Containers[0].Image
|
||||
canaryImage := dep2.Spec.Template.Spec.Containers[0].Image
|
||||
if primaryImage != canaryImage {
|
||||
t.Errorf("Got primary image %v wanted %v", primaryImage, canaryImage)
|
||||
}
|
||||
|
||||
c, err := flaggerClient.FlaggerV1alpha3().Canaries("default").Get("podinfo", metav1.GetOptions{})
|
||||
if err != nil {
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
|
||||
if c.Status.Phase != v1alpha3.CanarySucceeded {
|
||||
t.Errorf("Got canary state %v wanted %v", c.Status.Phase, v1alpha3.CanarySucceeded)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -6,7 +6,7 @@ import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
flaggerv1 "github.com/stefanprodan/flagger/pkg/apis/flagger/v1alpha2"
|
||||
flaggerv1 "github.com/stefanprodan/flagger/pkg/apis/flagger/v1alpha3"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
@@ -19,7 +19,10 @@ func CallWebhook(name string, namepace string, w flaggerv1.CanaryWebhook) error
|
||||
payload := flaggerv1.CanaryWebhookPayload{
|
||||
Name: name,
|
||||
Namespace: namepace,
|
||||
Metadata: w.Metadata,
|
||||
}
|
||||
|
||||
if w.Metadata != nil {
|
||||
payload.Metadata = *w.Metadata
|
||||
}
|
||||
|
||||
payloadBin, err := json.Marshal(payload)
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
package controller
|
||||
|
||||
import (
|
||||
flaggerv1 "github.com/stefanprodan/flagger/pkg/apis/flagger/v1alpha2"
|
||||
flaggerv1 "github.com/stefanprodan/flagger/pkg/apis/flagger/v1alpha3"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
|
||||
107
pkg/loadtester/runner.go
Normal file
107
pkg/loadtester/runner.go
Normal file
@@ -0,0 +1,107 @@
|
||||
package loadtester
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"go.uber.org/zap"
|
||||
"hash/fnv"
|
||||
"os/exec"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
)
|
||||
|
||||
type TaskRunner struct {
|
||||
logger *zap.SugaredLogger
|
||||
timeout time.Duration
|
||||
todoTasks *sync.Map
|
||||
runningTasks *sync.Map
|
||||
totalExecs uint64
|
||||
logCmdOutput bool
|
||||
}
|
||||
|
||||
type Task struct {
|
||||
Canary string
|
||||
Command string
|
||||
}
|
||||
|
||||
func (t Task) Hash() string {
|
||||
fnvHash := fnv.New32()
|
||||
fnvBytes := fnvHash.Sum([]byte(t.Canary + t.Command))
|
||||
return hex.EncodeToString(fnvBytes[:])
|
||||
}
|
||||
|
||||
func NewTaskRunner(logger *zap.SugaredLogger, timeout time.Duration, logCmdOutput bool) *TaskRunner {
|
||||
return &TaskRunner{
|
||||
logger: logger,
|
||||
todoTasks: new(sync.Map),
|
||||
runningTasks: new(sync.Map),
|
||||
timeout: timeout,
|
||||
logCmdOutput: logCmdOutput,
|
||||
}
|
||||
}
|
||||
|
||||
func (tr *TaskRunner) Add(task Task) {
|
||||
tr.todoTasks.Store(task.Hash(), task)
|
||||
}
|
||||
|
||||
func (tr *TaskRunner) GetTotalExecs() uint64 {
|
||||
return atomic.LoadUint64(&tr.totalExecs)
|
||||
}
|
||||
|
||||
func (tr *TaskRunner) runAll() {
|
||||
tr.todoTasks.Range(func(key interface{}, value interface{}) bool {
|
||||
task := value.(Task)
|
||||
go func(t Task) {
|
||||
// remove task from the to do list
|
||||
tr.todoTasks.Delete(t.Hash())
|
||||
|
||||
// check if task is already running, if not run the task's command
|
||||
if _, exists := tr.runningTasks.Load(t.Hash()); !exists {
|
||||
// save the task in the running list
|
||||
tr.runningTasks.Store(t.Hash(), t)
|
||||
|
||||
// create timeout context
|
||||
ctx, cancel := context.WithTimeout(context.Background(), tr.timeout)
|
||||
defer cancel()
|
||||
|
||||
// increment the total exec counter
|
||||
atomic.AddUint64(&tr.totalExecs, 1)
|
||||
|
||||
tr.logger.With("canary", t.Canary).Infof("command starting %s", t.Command)
|
||||
cmd := exec.CommandContext(ctx, "sh", "-c", t.Command)
|
||||
|
||||
// execute task
|
||||
out, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
tr.logger.With("canary", t.Canary).Errorf("command failed %s %v %s", t.Command, err, out)
|
||||
} else {
|
||||
if tr.logCmdOutput {
|
||||
fmt.Printf("%s\n", out)
|
||||
}
|
||||
tr.logger.With("canary", t.Canary).Infof("command finished %s", t.Command)
|
||||
}
|
||||
|
||||
// remove task from the running list
|
||||
tr.runningTasks.Delete(t.Hash())
|
||||
} else {
|
||||
tr.logger.With("canary", t.Canary).Infof("command skipped %s is already running", t.Command)
|
||||
}
|
||||
}(task)
|
||||
return true
|
||||
})
|
||||
}
|
||||
|
||||
func (tr *TaskRunner) Start(interval time.Duration, stopCh <-chan struct{}) {
|
||||
tickChan := time.NewTicker(interval).C
|
||||
for {
|
||||
select {
|
||||
case <-tickChan:
|
||||
tr.runAll()
|
||||
case <-stopCh:
|
||||
tr.logger.Info("shutting down the task runner")
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
52
pkg/loadtester/runner_test.go
Normal file
52
pkg/loadtester/runner_test.go
Normal file
@@ -0,0 +1,52 @@
|
||||
package loadtester
|
||||
|
||||
import (
|
||||
"github.com/stefanprodan/flagger/pkg/logging"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestTaskRunner_Start(t *testing.T) {
|
||||
stop := make(chan struct{})
|
||||
logger, _ := logging.NewLogger("debug")
|
||||
tr := NewTaskRunner(logger, time.Hour, false)
|
||||
|
||||
go tr.Start(10*time.Millisecond, stop)
|
||||
|
||||
task1 := Task{
|
||||
Canary: "podinfo.default",
|
||||
Command: "sleep 0.6",
|
||||
}
|
||||
task2 := Task{
|
||||
Canary: "podinfo.default",
|
||||
Command: "sleep 0.7",
|
||||
}
|
||||
|
||||
tr.Add(task1)
|
||||
tr.Add(task2)
|
||||
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
|
||||
tr.Add(task1)
|
||||
tr.Add(task2)
|
||||
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
|
||||
tr.Add(task1)
|
||||
tr.Add(task2)
|
||||
|
||||
if tr.GetTotalExecs() != 2 {
|
||||
t.Errorf("Got total executed commands %v wanted %v", tr.GetTotalExecs(), 2)
|
||||
}
|
||||
|
||||
time.Sleep(time.Second)
|
||||
|
||||
tr.Add(task1)
|
||||
tr.Add(task2)
|
||||
|
||||
time.Sleep(time.Second)
|
||||
|
||||
if tr.GetTotalExecs() != 4 {
|
||||
t.Errorf("Got total executed commands %v wanted %v", tr.GetTotalExecs(), 4)
|
||||
}
|
||||
}
|
||||
85
pkg/loadtester/server.go
Normal file
85
pkg/loadtester/server.go
Normal file
@@ -0,0 +1,85 @@
|
||||
package loadtester
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||
flaggerv1 "github.com/stefanprodan/flagger/pkg/apis/flagger/v1alpha3"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
// ListenAndServe starts a web server and waits for SIGTERM
|
||||
func ListenAndServe(port string, timeout time.Duration, logger *zap.SugaredLogger, taskRunner *TaskRunner, stopCh <-chan struct{}) {
|
||||
mux := http.DefaultServeMux
|
||||
mux.Handle("/metrics", promhttp.Handler())
|
||||
mux.HandleFunc("/healthz", func(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(http.StatusOK)
|
||||
w.Write([]byte("OK"))
|
||||
})
|
||||
mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
|
||||
body, err := ioutil.ReadAll(r.Body)
|
||||
if err != nil {
|
||||
logger.Error("reading the request body failed", zap.Error(err))
|
||||
w.WriteHeader(http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
defer r.Body.Close()
|
||||
|
||||
payload := &flaggerv1.CanaryWebhookPayload{}
|
||||
err = json.Unmarshal(body, payload)
|
||||
if err != nil {
|
||||
logger.Error("decoding the request body failed", zap.Error(err))
|
||||
w.WriteHeader(http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
if len(payload.Metadata) > 0 {
|
||||
if cmd, ok := payload.Metadata["cmd"]; ok {
|
||||
taskRunner.Add(Task{
|
||||
Canary: fmt.Sprintf("%s.%s", payload.Name, payload.Namespace),
|
||||
Command: cmd,
|
||||
})
|
||||
} else {
|
||||
w.WriteHeader(http.StatusBadRequest)
|
||||
w.Write([]byte("cmd not found in metadata"))
|
||||
return
|
||||
}
|
||||
} else {
|
||||
w.WriteHeader(http.StatusBadRequest)
|
||||
w.Write([]byte("metadata not found in payload"))
|
||||
return
|
||||
}
|
||||
|
||||
w.WriteHeader(http.StatusAccepted)
|
||||
})
|
||||
srv := &http.Server{
|
||||
Addr: ":" + port,
|
||||
Handler: mux,
|
||||
ReadTimeout: 5 * time.Second,
|
||||
WriteTimeout: 1 * time.Minute,
|
||||
IdleTimeout: 15 * time.Second,
|
||||
}
|
||||
|
||||
// run server in background
|
||||
go func() {
|
||||
if err := srv.ListenAndServe(); err != http.ErrServerClosed {
|
||||
logger.Fatalf("HTTP server crashed %v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
// wait for SIGTERM or SIGINT
|
||||
<-stopCh
|
||||
ctx, cancel := context.WithTimeout(context.Background(), timeout)
|
||||
defer cancel()
|
||||
|
||||
if err := srv.Shutdown(ctx); err != nil {
|
||||
logger.Errorf("HTTP server graceful shutdown failed %v", err)
|
||||
} else {
|
||||
logger.Info("HTTP server stopped")
|
||||
}
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
package version
|
||||
|
||||
var VERSION = "0.2.0"
|
||||
var VERSION = "0.4.1"
|
||||
var REVISION = "unknown"
|
||||
|
||||
Reference in New Issue
Block a user