mirror of
https://github.com/fluxcd/flagger.git
synced 2026-02-15 10:30:01 +00:00
Compare commits
27 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
65e9a402cf | ||
|
|
f7513b33a6 | ||
|
|
0b3fa517d3 | ||
|
|
507075920c | ||
|
|
a212f032a6 | ||
|
|
eb8755249f | ||
|
|
73bb2a9fa2 | ||
|
|
5d3ffa8c90 | ||
|
|
87f143f5fd | ||
|
|
f56b6dd6a7 | ||
|
|
5e40340f9c | ||
|
|
2456737df7 | ||
|
|
1191d708de | ||
|
|
4d26971fc7 | ||
|
|
0421b32834 | ||
|
|
360dd63e49 | ||
|
|
f1670dbe6a | ||
|
|
e7ad5c0381 | ||
|
|
2cfe2a105a | ||
|
|
bc83cee503 | ||
|
|
5091d3573c | ||
|
|
ffe5dd91c5 | ||
|
|
d76b560967 | ||
|
|
f062ef3a57 | ||
|
|
5fc1baf4df | ||
|
|
777b77b69e | ||
|
|
5d221e781a |
@@ -132,6 +132,9 @@ jobs:
|
||||
- run: test/e2e-kind.sh
|
||||
- run: test/e2e-nginx.sh
|
||||
- run: test/e2e-nginx-tests.sh
|
||||
- run: test/e2e-nginx-cleanup.sh
|
||||
- run: test/e2e-nginx-custom-annotations.sh
|
||||
- run: test/e2e-nginx-tests.sh
|
||||
|
||||
e2e-linkerd-testing:
|
||||
machine: true
|
||||
|
||||
33
CHANGELOG.md
33
CHANGELOG.md
@@ -2,6 +2,39 @@
|
||||
|
||||
All notable changes to this project are documented in this file.
|
||||
|
||||
## 0.18.4 (2019-09-08)
|
||||
|
||||
Adds support for NGINX custom annotations and Helm v3 acceptance testing
|
||||
|
||||
#### Features
|
||||
|
||||
- Add annotations prefix for NGINX ingresses [#293](https://github.com/weaveworks/flagger/pull/293)
|
||||
- Add wide columns in CRD [#289](https://github.com/weaveworks/flagger/pull/289)
|
||||
- loadtester: implement Helm v3 test command [#296](https://github.com/weaveworks/flagger/pull/296)
|
||||
- loadtester: add gPRC health check to load tester image [#295](https://github.com/weaveworks/flagger/pull/295)
|
||||
|
||||
#### Fixes
|
||||
|
||||
- loadtester: fix tests error logging [#286](https://github.com/weaveworks/flagger/pull/286)
|
||||
|
||||
## 0.18.3 (2019-08-22)
|
||||
|
||||
Adds support for tillerless helm tests and protobuf health checking
|
||||
|
||||
#### Features
|
||||
|
||||
- loadtester: add support for tillerless helm [#280](https://github.com/weaveworks/flagger/pull/280)
|
||||
- loadtester: add support for protobuf health checking [#280](https://github.com/weaveworks/flagger/pull/280)
|
||||
|
||||
#### Improvements
|
||||
|
||||
- Set HTTP listeners for AppMesh virtual routers [#272](https://github.com/weaveworks/flagger/pull/272)
|
||||
|
||||
#### Fixes
|
||||
|
||||
- Add missing fields to CRD validation spec [#271](https://github.com/weaveworks/flagger/pull/271)
|
||||
- Fix App Mesh backends validation in CRD [#281](https://github.com/weaveworks/flagger/pull/281)
|
||||
|
||||
## 0.18.2 (2019-08-05)
|
||||
|
||||
Fixes multi-port support for Istio
|
||||
|
||||
@@ -9,13 +9,24 @@ WORKDIR /home/app
|
||||
RUN curl -sSLo hey "https://storage.googleapis.com/jblabs/dist/hey_linux_v0.1.2" && \
|
||||
chmod +x hey && mv hey /usr/local/bin/hey
|
||||
|
||||
RUN curl -sSL "https://get.helm.sh/helm-v2.12.3-linux-amd64.tar.gz" | tar xvz && \
|
||||
RUN curl -sSL "https://get.helm.sh/helm-v2.14.3-linux-amd64.tar.gz" | tar xvz && \
|
||||
chmod +x linux-amd64/helm && mv linux-amd64/helm /usr/local/bin/helm && \
|
||||
chmod +x linux-amd64/tiller && mv linux-amd64/tiller /usr/local/bin/tiller && \
|
||||
rm -rf linux-amd64
|
||||
|
||||
RUN curl -sSL "https://get.helm.sh/helm-v3.0.0-beta.3-linux-amd64.tar.gz" | tar xvz && \
|
||||
chmod +x linux-amd64/helm && mv linux-amd64/helm /usr/local/bin/helmv3 && \
|
||||
rm -rf linux-amd64
|
||||
|
||||
RUN GRPC_HEALTH_PROBE_VERSION=v0.3.0 && \
|
||||
wget -qO /usr/local/bin/grpc_health_probe https://github.com/grpc-ecosystem/grpc-health-probe/releases/download/${GRPC_HEALTH_PROBE_VERSION}/grpc_health_probe-linux-amd64 && \
|
||||
chmod +x /usr/local/bin/grpc_health_probe
|
||||
|
||||
RUN curl -sSL "https://github.com/bojand/ghz/releases/download/v0.39.0/ghz_0.39.0_Linux_x86_64.tar.gz" | tar xz -C /tmp && \
|
||||
mv /tmp/ghz /usr/local/bin && chmod +x /usr/local/bin/ghz && rm -rf /tmp/ghz-web
|
||||
|
||||
ADD https://raw.githubusercontent.com/grpc/grpc-proto/master/grpc/health/v1/health.proto /tmp/ghz/health.proto
|
||||
|
||||
RUN ls /tmp
|
||||
|
||||
COPY ./bin/loadtester .
|
||||
@@ -24,4 +35,7 @@ RUN chown -R app:app ./
|
||||
|
||||
USER app
|
||||
|
||||
RUN curl -sSL "https://github.com/rimusz/helm-tiller/archive/v0.8.3.tar.gz" | tar xvz && \
|
||||
helm init --client-only && helm plugin install helm-tiller-0.8.3 && helm plugin list
|
||||
|
||||
ENTRYPOINT ["./loadtester"]
|
||||
|
||||
@@ -25,7 +25,7 @@ spec:
|
||||
spec:
|
||||
containers:
|
||||
- name: podinfod
|
||||
image: quay.io/stefanprodan/podinfo:1.7.0
|
||||
image: quay.io/stefanprodan/podinfo:2.0.0
|
||||
imagePullPolicy: IfNotPresent
|
||||
ports:
|
||||
- containerPort: 9898
|
||||
|
||||
@@ -33,6 +33,22 @@ spec:
|
||||
- name: Weight
|
||||
type: string
|
||||
JSONPath: .status.canaryWeight
|
||||
- name: FailedChecks
|
||||
type: string
|
||||
JSONPath: .status.failedChecks
|
||||
priority: 1
|
||||
- name: Interval
|
||||
type: string
|
||||
JSONPath: .spec.canaryAnalysis.interval
|
||||
priority: 1
|
||||
- name: StepWeight
|
||||
type: string
|
||||
JSONPath: .spec.canaryAnalysis.stepWeight
|
||||
priority: 1
|
||||
- name: MaxWeight
|
||||
type: string
|
||||
JSONPath: .spec.canaryAnalysis.maxWeight
|
||||
priority: 1
|
||||
- name: LastTransitionTime
|
||||
type: string
|
||||
JSONPath: .status.lastTransitionTime
|
||||
@@ -54,7 +70,7 @@ spec:
|
||||
targetRef:
|
||||
description: Deployment selector
|
||||
type: object
|
||||
required: ['apiVersion', 'kind', 'name']
|
||||
required: ["apiVersion", "kind", "name"]
|
||||
properties:
|
||||
apiVersion:
|
||||
type: string
|
||||
@@ -67,7 +83,7 @@ spec:
|
||||
anyOf:
|
||||
- type: string
|
||||
- type: object
|
||||
required: ['apiVersion', 'kind', 'name']
|
||||
required: ["apiVersion", "kind", "name"]
|
||||
properties:
|
||||
apiVersion:
|
||||
type: string
|
||||
@@ -80,7 +96,7 @@ spec:
|
||||
anyOf:
|
||||
- type: string
|
||||
- type: object
|
||||
required: ['apiVersion', 'kind', 'name']
|
||||
required: ["apiVersion", "kind", "name"]
|
||||
properties:
|
||||
apiVersion:
|
||||
type: string
|
||||
@@ -90,7 +106,7 @@ spec:
|
||||
type: string
|
||||
service:
|
||||
type: object
|
||||
required: ['port']
|
||||
required: ["port"]
|
||||
properties:
|
||||
port:
|
||||
description: Container port number
|
||||
@@ -108,7 +124,7 @@ spec:
|
||||
description: AppMesh backend array
|
||||
anyOf:
|
||||
- type: string
|
||||
- type: object
|
||||
- type: array
|
||||
timeout:
|
||||
description: Istio HTTP or gRPC request timeout
|
||||
type: string
|
||||
@@ -178,7 +194,7 @@ spec:
|
||||
properties:
|
||||
items:
|
||||
type: object
|
||||
required: ['name', 'threshold']
|
||||
required: ["name", "threshold"]
|
||||
properties:
|
||||
name:
|
||||
description: Name of the Prometheus metric
|
||||
@@ -199,7 +215,7 @@ spec:
|
||||
properties:
|
||||
items:
|
||||
type: object
|
||||
required: ['name', 'url', 'timeout']
|
||||
required: ["name", "url", "timeout"]
|
||||
properties:
|
||||
name:
|
||||
description: Name of the webhook
|
||||
@@ -262,7 +278,7 @@ spec:
|
||||
properties:
|
||||
items:
|
||||
type: object
|
||||
required: ['type', 'status', 'reason']
|
||||
required: ["type", "status", "reason"]
|
||||
properties:
|
||||
lastTransitionTime:
|
||||
description: LastTransitionTime of this condition
|
||||
|
||||
@@ -22,7 +22,7 @@ spec:
|
||||
serviceAccountName: flagger
|
||||
containers:
|
||||
- name: flagger
|
||||
image: weaveworks/flagger:0.18.2
|
||||
image: weaveworks/flagger:0.18.4
|
||||
imagePullPolicy: IfNotPresent
|
||||
ports:
|
||||
- name: http
|
||||
|
||||
@@ -19,7 +19,7 @@ spec:
|
||||
serviceAccountName: tiller
|
||||
containers:
|
||||
- name: helmtester
|
||||
image: weaveworks/flagger-loadtester:0.4.0
|
||||
image: weaveworks/flagger-loadtester:0.8.0
|
||||
imagePullPolicy: IfNotPresent
|
||||
ports:
|
||||
- name: http
|
||||
|
||||
@@ -17,7 +17,7 @@ spec:
|
||||
spec:
|
||||
containers:
|
||||
- name: loadtester
|
||||
image: weaveworks/flagger-loadtester:0.6.1
|
||||
image: weaveworks/flagger-loadtester:0.8.0
|
||||
imagePullPolicy: IfNotPresent
|
||||
ports:
|
||||
- name: http
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
apiVersion: v1
|
||||
name: flagger
|
||||
version: 0.18.2
|
||||
appVersion: 0.18.2
|
||||
version: 0.18.4
|
||||
appVersion: 0.18.4
|
||||
kubeVersion: ">=1.11.0-0"
|
||||
engine: gotpl
|
||||
description: Flagger is a Kubernetes operator that automates the promotion of canary deployments using Istio, Linkerd, App Mesh, Gloo or NGINX routing for traffic shifting and Prometheus metrics for canary analysis.
|
||||
|
||||
@@ -74,6 +74,7 @@ Parameter | Description | Default
|
||||
`msteams.url` | Microsoft Teams incoming webhook | None
|
||||
`leaderElection.enabled` | leader election must be enabled when running more than one replica | `false`
|
||||
`leaderElection.replicaCount` | number of replicas | `1`
|
||||
`ingressAnnotationsPrefix` | annotations prefix for ingresses | `custom.ingress.kubernetes.io`
|
||||
`rbac.create` | if `true`, create and use RBAC resources | `true`
|
||||
`rbac.pspEnabled` | If `true`, create and use a restricted pod security policy | `false`
|
||||
`crd.create` | if `true`, create Flagger's CRDs | `true`
|
||||
|
||||
@@ -34,6 +34,22 @@ spec:
|
||||
- name: Weight
|
||||
type: string
|
||||
JSONPath: .status.canaryWeight
|
||||
- name: FailedChecks
|
||||
type: string
|
||||
JSONPath: .status.failedChecks
|
||||
priority: 1
|
||||
- name: Interval
|
||||
type: string
|
||||
JSONPath: .spec.canaryAnalysis.interval
|
||||
priority: 1
|
||||
- name: StepWeight
|
||||
type: string
|
||||
JSONPath: .spec.canaryAnalysis.stepWeight
|
||||
priority: 1
|
||||
- name: MaxWeight
|
||||
type: string
|
||||
JSONPath: .spec.canaryAnalysis.maxWeight
|
||||
priority: 1
|
||||
- name: LastTransitionTime
|
||||
type: string
|
||||
JSONPath: .status.lastTransitionTime
|
||||
@@ -109,7 +125,7 @@ spec:
|
||||
description: AppMesh backend array
|
||||
anyOf:
|
||||
- type: string
|
||||
- type: object
|
||||
- type: array
|
||||
timeout:
|
||||
description: Istio HTTP or gRPC request timeout
|
||||
type: string
|
||||
|
||||
@@ -72,6 +72,9 @@ spec:
|
||||
- -enable-leader-election=true
|
||||
- -leader-election-namespace={{ .Release.Namespace }}
|
||||
{{- end }}
|
||||
{{- if .Values.ingressAnnotationsPrefix }}
|
||||
- -ingress-annotations-prefix={{ .Values.ingressAnnotationsPrefix }}
|
||||
{{- end }}
|
||||
livenessProbe:
|
||||
exec:
|
||||
command:
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
|
||||
image:
|
||||
repository: weaveworks/flagger
|
||||
tag: 0.18.2
|
||||
tag: 0.18.4
|
||||
pullPolicy: IfNotPresent
|
||||
pullSecret:
|
||||
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
apiVersion: v1
|
||||
name: loadtester
|
||||
version: 0.6.0
|
||||
appVersion: 0.6.1
|
||||
version: 0.8.0
|
||||
appVersion: 0.8.0
|
||||
kubeVersion: ">=1.11.0-0"
|
||||
engine: gotpl
|
||||
description: Flagger's load testing services based on rakyll/hey and bojand/ghz that generates traffic during canary analysis when configured as a webhook.
|
||||
|
||||
@@ -21,6 +21,8 @@ spec:
|
||||
spec:
|
||||
{{- if .Values.serviceAccountName }}
|
||||
serviceAccountName: {{ .Values.serviceAccountName }}
|
||||
{{- else if .Values.rbac.create }}
|
||||
serviceAccountName: {{ include "loadtester.fullname" . }}
|
||||
{{- end }}
|
||||
containers:
|
||||
- name: {{ .Chart.Name }}
|
||||
|
||||
54
charts/loadtester/templates/rbac.yaml
Normal file
54
charts/loadtester/templates/rbac.yaml
Normal file
@@ -0,0 +1,54 @@
|
||||
---
|
||||
{{- if .Values.rbac.create }}
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
{{- if eq .Values.rbac.scope "cluster" }}
|
||||
kind: ClusterRole
|
||||
{{- else }}
|
||||
kind: Role
|
||||
{{- end }}
|
||||
metadata:
|
||||
name: {{ template "loadtester.fullname" . }}
|
||||
labels:
|
||||
helm.sh/chart: {{ template "loadtester.chart" . }}
|
||||
app.kubernetes.io/name: {{ template "loadtester.name" . }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
rules:
|
||||
{{ toYaml .Values.rbac.rules | indent 2 }}
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
{{- if eq .Values.rbac.scope "cluster" }}
|
||||
kind: ClusterRoleBinding
|
||||
{{- else }}
|
||||
kind: RoleBinding
|
||||
{{- end }}
|
||||
metadata:
|
||||
name: {{ template "loadtester.fullname" . }}
|
||||
labels:
|
||||
helm.sh/chart: {{ template "loadtester.chart" . }}
|
||||
app.kubernetes.io/name: {{ template "loadtester.name" . }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
{{- if eq .Values.rbac.scope "cluster" }}
|
||||
kind: ClusterRole
|
||||
{{- else }}
|
||||
kind: Role
|
||||
{{- end }}
|
||||
name: {{ template "loadtester.fullname" . }}
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: {{ template "loadtester.fullname" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: {{ template "loadtester.fullname" . }}
|
||||
labels:
|
||||
helm.sh/chart: {{ template "loadtester.chart" . }}
|
||||
app.kubernetes.io/name: {{ template "loadtester.name" . }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
{{- end }}
|
||||
@@ -2,7 +2,7 @@ replicaCount: 1
|
||||
|
||||
image:
|
||||
repository: weaveworks/flagger-loadtester
|
||||
tag: 0.6.1
|
||||
tag: 0.8.0
|
||||
pullPolicy: IfNotPresent
|
||||
|
||||
logLevel: info
|
||||
@@ -27,6 +27,20 @@ tolerations: []
|
||||
|
||||
affinity: {}
|
||||
|
||||
rbac:
|
||||
# rbac.create: `true` if rbac resources should be created
|
||||
create: false
|
||||
# rbac.scope: `cluster` to create cluster-scope rbac resources (ClusterRole/ClusterRoleBinding)
|
||||
# otherwise, namespace-scope rbac resources will be created (Role/RoleBinding)
|
||||
scope:
|
||||
# rbac.rules: array of rules to apply to the role. example:
|
||||
# rules:
|
||||
# - apiGroups: [""]
|
||||
# resources: ["pods"]
|
||||
# verbs: ["list", "get"]
|
||||
rules: []
|
||||
|
||||
# name of an existing service account to use - if not creating rbac resources
|
||||
serviceAccountName: ""
|
||||
|
||||
# App Mesh virtual node settings
|
||||
|
||||
@@ -33,25 +33,26 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
masterURL string
|
||||
kubeconfig string
|
||||
metricsServer string
|
||||
controlLoopInterval time.Duration
|
||||
logLevel string
|
||||
port string
|
||||
msteamsURL string
|
||||
slackURL string
|
||||
slackUser string
|
||||
slackChannel string
|
||||
threadiness int
|
||||
zapReplaceGlobals bool
|
||||
zapEncoding string
|
||||
namespace string
|
||||
meshProvider string
|
||||
selectorLabels string
|
||||
enableLeaderElection bool
|
||||
leaderElectionNamespace string
|
||||
ver bool
|
||||
masterURL string
|
||||
kubeconfig string
|
||||
metricsServer string
|
||||
controlLoopInterval time.Duration
|
||||
logLevel string
|
||||
port string
|
||||
msteamsURL string
|
||||
slackURL string
|
||||
slackUser string
|
||||
slackChannel string
|
||||
threadiness int
|
||||
zapReplaceGlobals bool
|
||||
zapEncoding string
|
||||
namespace string
|
||||
meshProvider string
|
||||
selectorLabels string
|
||||
ingressAnnotationsPrefix string
|
||||
enableLeaderElection bool
|
||||
leaderElectionNamespace string
|
||||
ver bool
|
||||
)
|
||||
|
||||
func init() {
|
||||
@@ -71,6 +72,7 @@ func init() {
|
||||
flag.StringVar(&namespace, "namespace", "", "Namespace that flagger would watch canary object.")
|
||||
flag.StringVar(&meshProvider, "mesh-provider", "istio", "Service mesh provider, can be istio, linkerd, appmesh, supergloo, nginx or smi.")
|
||||
flag.StringVar(&selectorLabels, "selector-labels", "app,name,app.kubernetes.io/name", "List of pod labels that Flagger uses to create pod selectors.")
|
||||
flag.StringVar(&ingressAnnotationsPrefix, "ingress-annotations-prefix", "nginx.ingress.kubernetes.io", "Annotations prefix for ingresses.")
|
||||
flag.BoolVar(&enableLeaderElection, "enable-leader-election", false, "Enable leader election.")
|
||||
flag.StringVar(&leaderElectionNamespace, "leader-election-namespace", "kube-system", "Namespace used to create the leader election config map.")
|
||||
flag.BoolVar(&ver, "version", false, "Print version")
|
||||
@@ -175,7 +177,7 @@ func main() {
|
||||
// start HTTP server
|
||||
go server.ListenAndServe(port, 3*time.Second, logger, stopCh)
|
||||
|
||||
routerFactory := router.NewFactory(cfg, kubeClient, flaggerClient, logger, meshClient)
|
||||
routerFactory := router.NewFactory(cfg, kubeClient, flaggerClient, ingressAnnotationsPrefix, logger, meshClient)
|
||||
|
||||
c := controller.NewController(
|
||||
kubeClient,
|
||||
|
||||
@@ -10,7 +10,7 @@ import (
|
||||
"time"
|
||||
)
|
||||
|
||||
var VERSION = "0.6.1"
|
||||
var VERSION = "0.8.0"
|
||||
var (
|
||||
logLevel string
|
||||
port string
|
||||
|
||||
@@ -798,6 +798,18 @@ webhooks:
|
||||
cmd: "ghz -z 1m -q 10 -c 2 --insecure podinfo.test:9898"
|
||||
```
|
||||
|
||||
`ghz` uses reflection to identify which gRPC method to call. If you do not wish to enable reflection for your gRPC service you can implement a standardized health check from the [grpc-proto](https://github.com/grpc/grpc-proto) library. To use this [health check schema](https://github.com/grpc/grpc-proto/blob/master/grpc/health/v1/health.proto) without reflection you can pass a parameter to `ghz` like this
|
||||
|
||||
```yaml
|
||||
webhooks:
|
||||
- name: grpc-load-test-no-reflection
|
||||
url: http://flagger-loadtester.test/
|
||||
timeout: 5s
|
||||
metadata:
|
||||
type: cmd
|
||||
cmd: "ghz --insecure --proto=/tmp/ghz/health.proto --call=grpc.health.v1.Health/Check podinfo.test:9898"
|
||||
```
|
||||
|
||||
The load tester can run arbitrary commands as long as the binary is present in the container image.
|
||||
For example if you you want to replace `hey` with another CLI, you can create your own Docker image:
|
||||
|
||||
@@ -870,6 +882,20 @@ Now you can add pre-rollout webhooks to the canary analysis spec:
|
||||
When the canary analysis starts, Flagger will call the pre-rollout webhooks before routing traffic to the canary.
|
||||
If the helm test fails, Flagger will retry until the analysis threshold is reached and the canary is rolled back.
|
||||
|
||||
If you are using Helm v3, you'll have to create a dedicated service account and add the release namespace to the test command:
|
||||
|
||||
```yaml
|
||||
canaryAnalysis:
|
||||
webhooks:
|
||||
- name: "smoke test"
|
||||
type: pre-rollout
|
||||
url: http://flagger-helmtester.kube-system/
|
||||
timeout: 3m
|
||||
metadata:
|
||||
type: "helmv3"
|
||||
cmd: "test run {{ .Release.Name }} --cleanup -n {{ .Release.Namespace }}"
|
||||
```
|
||||
|
||||
As an alternative to Helm you can use the [Bash Automated Testing System](https://github.com/bats-core/bats-core) to run your tests.
|
||||
|
||||
```yaml
|
||||
|
||||
@@ -186,7 +186,7 @@ Install cert-manager's CRDs:
|
||||
```bash
|
||||
CERT_REPO=https://raw.githubusercontent.com/jetstack/cert-manager
|
||||
|
||||
kubectl apply -f ${CERT_REPO}/release-0.7/deploy/manifests/00-crds.yaml
|
||||
kubectl apply -f ${CERT_REPO}/release-0.10/deploy/manifests/00-crds.yaml
|
||||
```
|
||||
|
||||
Create the cert-manager namespace and disable resource validation:
|
||||
@@ -204,7 +204,7 @@ helm repo add jetstack https://charts.jetstack.io && \
|
||||
helm repo update && \
|
||||
helm upgrade -i cert-manager \
|
||||
--namespace cert-manager \
|
||||
--version v0.7.0 \
|
||||
--version v0.10.0 \
|
||||
jetstack/cert-manager
|
||||
```
|
||||
|
||||
@@ -339,7 +339,7 @@ Find the GKE Istio version with:
|
||||
kubectl -n istio-system get deploy istio-pilot -oyaml | grep image:
|
||||
```
|
||||
|
||||
Install Prometheus in istio-system namespace (replace `1.0.6-gke.3` with your version):
|
||||
Install Prometheus in istio-system namespace:
|
||||
|
||||
```bash
|
||||
kubectl -n istio-system apply -f \
|
||||
|
||||
1
go.mod
1
go.mod
@@ -56,6 +56,7 @@ require (
|
||||
)
|
||||
|
||||
replace (
|
||||
git.apache.org/thrift.git => github.com/apache/thrift v0.12.0
|
||||
github.com/google/uuid => github.com/google/uuid v1.0.0
|
||||
golang.org/x/crypto => golang.org/x/crypto v0.0.0-20181025213731-e84da0312774
|
||||
golang.org/x/net => golang.org/x/net v0.0.0-20190206173232-65e2d4e15006
|
||||
|
||||
@@ -33,6 +33,22 @@ spec:
|
||||
- name: Weight
|
||||
type: string
|
||||
JSONPath: .status.canaryWeight
|
||||
- name: FailedChecks
|
||||
type: string
|
||||
JSONPath: .status.failedChecks
|
||||
priority: 1
|
||||
- name: Interval
|
||||
type: string
|
||||
JSONPath: .spec.canaryAnalysis.interval
|
||||
priority: 1
|
||||
- name: StepWeight
|
||||
type: string
|
||||
JSONPath: .spec.canaryAnalysis.stepWeight
|
||||
priority: 1
|
||||
- name: MaxWeight
|
||||
type: string
|
||||
JSONPath: .spec.canaryAnalysis.maxWeight
|
||||
priority: 1
|
||||
- name: LastTransitionTime
|
||||
type: string
|
||||
JSONPath: .status.lastTransitionTime
|
||||
@@ -54,7 +70,7 @@ spec:
|
||||
targetRef:
|
||||
description: Deployment selector
|
||||
type: object
|
||||
required: ['apiVersion', 'kind', 'name']
|
||||
required: ["apiVersion", "kind", "name"]
|
||||
properties:
|
||||
apiVersion:
|
||||
type: string
|
||||
@@ -67,7 +83,7 @@ spec:
|
||||
anyOf:
|
||||
- type: string
|
||||
- type: object
|
||||
required: ['apiVersion', 'kind', 'name']
|
||||
required: ["apiVersion", "kind", "name"]
|
||||
properties:
|
||||
apiVersion:
|
||||
type: string
|
||||
@@ -80,7 +96,7 @@ spec:
|
||||
anyOf:
|
||||
- type: string
|
||||
- type: object
|
||||
required: ['apiVersion', 'kind', 'name']
|
||||
required: ["apiVersion", "kind", "name"]
|
||||
properties:
|
||||
apiVersion:
|
||||
type: string
|
||||
@@ -90,7 +106,7 @@ spec:
|
||||
type: string
|
||||
service:
|
||||
type: object
|
||||
required: ['port']
|
||||
required: ["port"]
|
||||
properties:
|
||||
port:
|
||||
description: Container port number
|
||||
@@ -108,7 +124,7 @@ spec:
|
||||
description: AppMesh backend array
|
||||
anyOf:
|
||||
- type: string
|
||||
- type: object
|
||||
- type: array
|
||||
timeout:
|
||||
description: Istio HTTP or gRPC request timeout
|
||||
type: string
|
||||
@@ -178,7 +194,7 @@ spec:
|
||||
properties:
|
||||
items:
|
||||
type: object
|
||||
required: ['name', 'threshold']
|
||||
required: ["name", "threshold"]
|
||||
properties:
|
||||
name:
|
||||
description: Name of the Prometheus metric
|
||||
@@ -199,7 +215,7 @@ spec:
|
||||
properties:
|
||||
items:
|
||||
type: object
|
||||
required: ['name', 'url', 'timeout']
|
||||
required: ["name", "url", "timeout"]
|
||||
properties:
|
||||
name:
|
||||
description: Name of the webhook
|
||||
@@ -262,7 +278,7 @@ spec:
|
||||
properties:
|
||||
items:
|
||||
type: object
|
||||
required: ['type', 'status', 'reason']
|
||||
required: ["type", "status", "reason"]
|
||||
properties:
|
||||
lastTransitionTime:
|
||||
description: LastTransitionTime of this condition
|
||||
|
||||
@@ -8,4 +8,4 @@ resources:
|
||||
- deployment.yaml
|
||||
images:
|
||||
- name: weaveworks/flagger
|
||||
newTag: 0.18.2
|
||||
newTag: 0.18.4
|
||||
|
||||
@@ -17,7 +17,7 @@ spec:
|
||||
spec:
|
||||
containers:
|
||||
- name: loadtester
|
||||
image: weaveworks/flagger-loadtester:0.6.1
|
||||
image: weaveworks/flagger-loadtester:0.8.0
|
||||
imagePullPolicy: IfNotPresent
|
||||
ports:
|
||||
- name: http
|
||||
|
||||
@@ -82,7 +82,7 @@ func SetupMocks(abtest bool) Mocks {
|
||||
flaggerInformer := flaggerInformerFactory.Flagger().V1alpha3().Canaries()
|
||||
|
||||
// init router
|
||||
rf := router.NewFactory(nil, kubeClient, flaggerClient, logger, flaggerClient)
|
||||
rf := router.NewFactory(nil, kubeClient, flaggerClient, "annotationsPrefix", logger, flaggerClient)
|
||||
|
||||
// init observer
|
||||
observerFactory, _ := metrics.NewFactory("fake", "istio", 5*time.Second)
|
||||
|
||||
@@ -24,7 +24,7 @@ func (task *BashTask) Run(ctx context.Context) (bool, error) {
|
||||
|
||||
if err != nil {
|
||||
task.logger.With("canary", task.canary).Errorf("command failed %s %v %s", task.command, err, out)
|
||||
return false, fmt.Errorf(" %v %v", err, out)
|
||||
return false, fmt.Errorf(" %v %s", err, out)
|
||||
} else {
|
||||
if task.logCmdOutput {
|
||||
fmt.Printf("%s\n", out)
|
||||
|
||||
@@ -20,14 +20,14 @@ func (task *HelmTask) Hash() string {
|
||||
}
|
||||
|
||||
func (task *HelmTask) Run(ctx context.Context) (bool, error) {
|
||||
helmCmd := fmt.Sprintf("helm %s", task.command)
|
||||
helmCmd := fmt.Sprintf("%s %s", TaskTypeHelm, task.command)
|
||||
task.logger.With("canary", task.canary).Infof("running command %v", helmCmd)
|
||||
|
||||
cmd := exec.CommandContext(ctx, "helm", strings.Fields(task.command)...)
|
||||
cmd := exec.CommandContext(ctx, TaskTypeHelm, strings.Fields(task.command)...)
|
||||
out, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
task.logger.With("canary", task.canary).Errorf("command failed %s %v %s", task.command, err, out)
|
||||
return false, fmt.Errorf(" %v %v", err, out)
|
||||
return false, fmt.Errorf(" %v %s", err, out)
|
||||
} else {
|
||||
if task.logCmdOutput {
|
||||
fmt.Printf("%s\n", out)
|
||||
|
||||
42
pkg/loadtester/helmv3.go
Normal file
42
pkg/loadtester/helmv3.go
Normal file
@@ -0,0 +1,42 @@
|
||||
package loadtester
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os/exec"
|
||||
"strings"
|
||||
)
|
||||
|
||||
const TaskTypeHelmv3 = "helmv3"
|
||||
|
||||
type HelmTaskv3 struct {
|
||||
TaskBase
|
||||
command string
|
||||
logCmdOutput bool
|
||||
}
|
||||
|
||||
func (task *HelmTaskv3) Hash() string {
|
||||
return hash(task.canary + task.command)
|
||||
}
|
||||
|
||||
func (task *HelmTaskv3) Run(ctx context.Context) (bool, error) {
|
||||
helmCmd := fmt.Sprintf("%s %s", TaskTypeHelmv3, task.command)
|
||||
task.logger.With("canary", task.canary).Infof("running command %v", helmCmd)
|
||||
|
||||
cmd := exec.CommandContext(ctx, TaskTypeHelmv3, strings.Fields(task.command)...)
|
||||
out, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
task.logger.With("canary", task.canary).Errorf("command failed %s %v %s", task.command, err, out)
|
||||
return false, fmt.Errorf(" %v %s", err, out)
|
||||
} else {
|
||||
if task.logCmdOutput {
|
||||
fmt.Printf("%s\n", out)
|
||||
}
|
||||
task.logger.With("canary", task.canary).Infof("command finished %v", helmCmd)
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (task *HelmTaskv3) String() string {
|
||||
return task.command
|
||||
}
|
||||
@@ -185,6 +185,31 @@ func ListenAndServe(port string, timeout time.Duration, logger *zap.SugaredLogge
|
||||
return
|
||||
}
|
||||
|
||||
// run helmv3 command (blocking task)
|
||||
if typ == TaskTypeHelmv3 {
|
||||
helm := HelmTaskv3{
|
||||
command: payload.Metadata["cmd"],
|
||||
logCmdOutput: true,
|
||||
TaskBase: TaskBase{
|
||||
canary: fmt.Sprintf("%s.%s", payload.Name, payload.Namespace),
|
||||
logger: logger,
|
||||
},
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), taskRunner.timeout)
|
||||
defer cancel()
|
||||
|
||||
ok, err := helm.Run(ctx)
|
||||
if !ok {
|
||||
w.WriteHeader(http.StatusInternalServerError)
|
||||
w.Write([]byte(err.Error()))
|
||||
return
|
||||
}
|
||||
|
||||
w.WriteHeader(http.StatusOK)
|
||||
return
|
||||
}
|
||||
|
||||
taskFactory, ok := GetTaskFactory(typ)
|
||||
if !ok {
|
||||
w.WriteHeader(http.StatusBadRequest)
|
||||
|
||||
@@ -11,23 +11,26 @@ import (
|
||||
)
|
||||
|
||||
type Factory struct {
|
||||
kubeConfig *restclient.Config
|
||||
kubeClient kubernetes.Interface
|
||||
meshClient clientset.Interface
|
||||
flaggerClient clientset.Interface
|
||||
logger *zap.SugaredLogger
|
||||
kubeConfig *restclient.Config
|
||||
kubeClient kubernetes.Interface
|
||||
meshClient clientset.Interface
|
||||
flaggerClient clientset.Interface
|
||||
ingressAnnotationsPrefix string
|
||||
logger *zap.SugaredLogger
|
||||
}
|
||||
|
||||
func NewFactory(kubeConfig *restclient.Config, kubeClient kubernetes.Interface,
|
||||
flaggerClient clientset.Interface,
|
||||
ingressAnnotationsPrefix string,
|
||||
logger *zap.SugaredLogger,
|
||||
meshClient clientset.Interface) *Factory {
|
||||
return &Factory{
|
||||
kubeConfig: kubeConfig,
|
||||
meshClient: meshClient,
|
||||
kubeClient: kubeClient,
|
||||
flaggerClient: flaggerClient,
|
||||
logger: logger,
|
||||
kubeConfig: kubeConfig,
|
||||
meshClient: meshClient,
|
||||
kubeClient: kubeClient,
|
||||
flaggerClient: flaggerClient,
|
||||
ingressAnnotationsPrefix: ingressAnnotationsPrefix,
|
||||
logger: logger,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -51,8 +54,9 @@ func (factory *Factory) MeshRouter(provider string) Interface {
|
||||
return &NopRouter{}
|
||||
case provider == "nginx":
|
||||
return &IngressRouter{
|
||||
logger: factory.logger,
|
||||
kubeClient: factory.kubeClient,
|
||||
logger: factory.logger,
|
||||
kubeClient: factory.kubeClient,
|
||||
annotationsPrefix: factory.ingressAnnotationsPrefix,
|
||||
}
|
||||
case provider == "appmesh":
|
||||
return &AppMeshRouter{
|
||||
|
||||
@@ -2,6 +2,9 @@ package router
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
flaggerv1 "github.com/weaveworks/flagger/pkg/apis/flagger/v1alpha3"
|
||||
"go.uber.org/zap"
|
||||
@@ -10,13 +13,12 @@ import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type IngressRouter struct {
|
||||
kubeClient kubernetes.Interface
|
||||
logger *zap.SugaredLogger
|
||||
kubeClient kubernetes.Interface
|
||||
annotationsPrefix string
|
||||
logger *zap.SugaredLogger
|
||||
}
|
||||
|
||||
func (i *IngressRouter) Reconcile(canary *flaggerv1.Canary) error {
|
||||
@@ -115,7 +117,7 @@ func (i *IngressRouter) GetRoutes(canary *flaggerv1.Canary) (
|
||||
// A/B testing
|
||||
if len(canary.Spec.CanaryAnalysis.Match) > 0 {
|
||||
for k := range canaryIngress.Annotations {
|
||||
if k == "nginx.ingress.kubernetes.io/canary-by-cookie" || k == "nginx.ingress.kubernetes.io/canary-by-header" {
|
||||
if k == i.GetAnnotationWithPrefix("canary-by-cookie") || k == i.GetAnnotationWithPrefix("canary-by-header") {
|
||||
return 0, 100, nil
|
||||
}
|
||||
}
|
||||
@@ -123,7 +125,7 @@ func (i *IngressRouter) GetRoutes(canary *flaggerv1.Canary) (
|
||||
|
||||
// Canary
|
||||
for k, v := range canaryIngress.Annotations {
|
||||
if k == "nginx.ingress.kubernetes.io/canary-weight" {
|
||||
if k == i.GetAnnotationWithPrefix("canary-weight") {
|
||||
val, err := strconv.Atoi(v)
|
||||
if err != nil {
|
||||
return 0, 0, err
|
||||
@@ -170,12 +172,12 @@ func (i *IngressRouter) SetRoutes(
|
||||
iClone.Annotations = i.makeHeaderAnnotations(iClone.Annotations, header, headerValue, cookie)
|
||||
} else {
|
||||
// canary
|
||||
iClone.Annotations["nginx.ingress.kubernetes.io/canary-weight"] = fmt.Sprintf("%v", canaryWeight)
|
||||
iClone.Annotations[i.GetAnnotationWithPrefix("canary-weight")] = fmt.Sprintf("%v", canaryWeight)
|
||||
}
|
||||
|
||||
// toggle canary
|
||||
if canaryWeight > 0 {
|
||||
iClone.Annotations["nginx.ingress.kubernetes.io/canary"] = "true"
|
||||
iClone.Annotations[i.GetAnnotationWithPrefix("canary")] = "true"
|
||||
} else {
|
||||
iClone.Annotations = i.makeAnnotations(iClone.Annotations)
|
||||
}
|
||||
@@ -191,14 +193,14 @@ func (i *IngressRouter) SetRoutes(
|
||||
func (i *IngressRouter) makeAnnotations(annotations map[string]string) map[string]string {
|
||||
res := make(map[string]string)
|
||||
for k, v := range annotations {
|
||||
if !strings.Contains(k, "nginx.ingress.kubernetes.io/canary") &&
|
||||
if !strings.Contains(k, i.GetAnnotationWithPrefix("canary")) &&
|
||||
!strings.Contains(k, "kubectl.kubernetes.io/last-applied-configuration") {
|
||||
res[k] = v
|
||||
}
|
||||
}
|
||||
|
||||
res["nginx.ingress.kubernetes.io/canary"] = "false"
|
||||
res["nginx.ingress.kubernetes.io/canary-weight"] = "0"
|
||||
res[i.GetAnnotationWithPrefix("canary")] = "false"
|
||||
res[i.GetAnnotationWithPrefix("canary-weight")] = "0"
|
||||
|
||||
return res
|
||||
}
|
||||
@@ -207,25 +209,29 @@ func (i *IngressRouter) makeHeaderAnnotations(annotations map[string]string,
|
||||
header string, headerValue string, cookie string) map[string]string {
|
||||
res := make(map[string]string)
|
||||
for k, v := range annotations {
|
||||
if !strings.Contains(v, "nginx.ingress.kubernetes.io/canary") {
|
||||
if !strings.Contains(v, i.GetAnnotationWithPrefix("canary")) {
|
||||
res[k] = v
|
||||
}
|
||||
}
|
||||
|
||||
res["nginx.ingress.kubernetes.io/canary"] = "true"
|
||||
res["nginx.ingress.kubernetes.io/canary-weight"] = "0"
|
||||
res[i.GetAnnotationWithPrefix("canary")] = "true"
|
||||
res[i.GetAnnotationWithPrefix("canary-weight")] = "0"
|
||||
|
||||
if cookie != "" {
|
||||
res["nginx.ingress.kubernetes.io/canary-by-cookie"] = cookie
|
||||
res[i.GetAnnotationWithPrefix("canary-by-cookie")] = cookie
|
||||
}
|
||||
|
||||
if header != "" {
|
||||
res["nginx.ingress.kubernetes.io/canary-by-header"] = header
|
||||
res[i.GetAnnotationWithPrefix("canary-by-header")] = header
|
||||
}
|
||||
|
||||
if headerValue != "" {
|
||||
res["nginx.ingress.kubernetes.io/canary-by-header-value"] = headerValue
|
||||
res[i.GetAnnotationWithPrefix("canary-by-header-value")] = headerValue
|
||||
}
|
||||
|
||||
return res
|
||||
}
|
||||
|
||||
func (i *IngressRouter) GetAnnotationWithPrefix(suffix string) string {
|
||||
return fmt.Sprintf("%v/%v", i.annotationsPrefix, suffix)
|
||||
}
|
||||
|
||||
@@ -2,15 +2,17 @@ package router
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"testing"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
func TestIngressRouter_Reconcile(t *testing.T) {
|
||||
mocks := setupfakeClients()
|
||||
router := &IngressRouter{
|
||||
logger: mocks.logger,
|
||||
kubeClient: mocks.kubeClient,
|
||||
logger: mocks.logger,
|
||||
kubeClient: mocks.kubeClient,
|
||||
annotationsPrefix: "custom.ingress.kubernetes.io",
|
||||
}
|
||||
|
||||
err := router.Reconcile(mocks.ingressCanary)
|
||||
@@ -18,8 +20,8 @@ func TestIngressRouter_Reconcile(t *testing.T) {
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
|
||||
canaryAn := "nginx.ingress.kubernetes.io/canary"
|
||||
canaryWeightAn := "nginx.ingress.kubernetes.io/canary-weight"
|
||||
canaryAn := "custom.ingress.kubernetes.io/canary"
|
||||
canaryWeightAn := "custom.ingress.kubernetes.io/canary-weight"
|
||||
|
||||
canaryName := fmt.Sprintf("%s-canary", mocks.ingressCanary.Spec.IngressRef.Name)
|
||||
inCanary, err := router.kubeClient.ExtensionsV1beta1().Ingresses("default").Get(canaryName, metav1.GetOptions{})
|
||||
@@ -44,8 +46,9 @@ func TestIngressRouter_Reconcile(t *testing.T) {
|
||||
func TestIngressRouter_GetSetRoutes(t *testing.T) {
|
||||
mocks := setupfakeClients()
|
||||
router := &IngressRouter{
|
||||
logger: mocks.logger,
|
||||
kubeClient: mocks.kubeClient,
|
||||
logger: mocks.logger,
|
||||
kubeClient: mocks.kubeClient,
|
||||
annotationsPrefix: "prefix1.nginx.ingress.kubernetes.io",
|
||||
}
|
||||
|
||||
err := router.Reconcile(mocks.ingressCanary)
|
||||
@@ -66,8 +69,8 @@ func TestIngressRouter_GetSetRoutes(t *testing.T) {
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
|
||||
canaryAn := "nginx.ingress.kubernetes.io/canary"
|
||||
canaryWeightAn := "nginx.ingress.kubernetes.io/canary-weight"
|
||||
canaryAn := "prefix1.nginx.ingress.kubernetes.io/canary"
|
||||
canaryWeightAn := "prefix1.nginx.ingress.kubernetes.io/canary-weight"
|
||||
|
||||
canaryName := fmt.Sprintf("%s-canary", mocks.ingressCanary.Spec.IngressRef.Name)
|
||||
inCanary, err := router.kubeClient.ExtensionsV1beta1().Ingresses("default").Get(canaryName, metav1.GetOptions{})
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
package version
|
||||
|
||||
var VERSION = "0.18.2"
|
||||
var VERSION = "0.18.4"
|
||||
var REVISION = "unknown"
|
||||
|
||||
@@ -25,14 +25,17 @@ The e2e testing infrastructure is powered by CircleCI and [Kubernetes Kind](http
|
||||
* install latest stable kubectl [e2e-kind.sh](e2e-kind.sh)
|
||||
* install Kubernetes Kind [e2e-kind.sh](e2e-kind.sh)
|
||||
* create local Kubernetes cluster with kind [e2e-kind.sh](e2e-kind.sh)
|
||||
* install latest stable Helm CLI [e2e-nginx.sh](e2e-istio.sh)
|
||||
* deploy Tiller on the local cluster [e2e-nginx.sh](e2e-istio.sh)
|
||||
* install NGINX ingress with Helm [e2e-nginx.sh](e2e-istio.sh)
|
||||
* install latest stable Helm CLI [e2e-nginx.sh](e2e-nginx.sh)
|
||||
* deploy Tiller on the local cluster [e2e-nginx.sh](e2e-nginx.sh)
|
||||
* install NGINX ingress with Helm [e2e-nginx.sh](e2e-nginx.sh)
|
||||
* load Flagger image onto the local cluster [e2e-nginx.sh](e2e-nginx.sh)
|
||||
* install Flagger and Prometheus in the ingress-nginx namespace [e2e-nginx.sh](e2e-nginx.sh)
|
||||
* create a test namespace [e2e-nginx-tests.sh](e2e-tests.sh)
|
||||
* deploy the load tester in the test namespace [e2e-nginx-tests.sh](e2e-tests.sh)
|
||||
* deploy the demo workload (podinfo) and ingress in the test namespace [e2e-nginx-tests.sh](e2e-tests.sh)
|
||||
* test the canary initialization [e2e-nginx-tests.sh](e2e-tests.sh)
|
||||
* test the canary analysis and promotion using weighted traffic and the load testing webhook [e2e-nginx-tests.sh](e2e-tests.sh)
|
||||
* test the A/B testing analysis and promotion using header filters and pre/post rollout webhooks [e2e-nginx-tests.sh](e2e-tests.sh)
|
||||
* create a test namespace [e2e-nginx-tests.sh](e2e-nginx-tests.sh)
|
||||
* deploy the load tester in the test namespace [e2e-nginx-tests.sh](e2e-nginx-tests.sh)
|
||||
* deploy the demo workload (podinfo) and ingress in the test namespace [e2e-nginx-tests.sh](e2e-nginx-tests.sh)
|
||||
* test the canary initialization [e2e-nginx-tests.sh](e2e-nginx-tests.sh)
|
||||
* test the canary analysis and promotion using weighted traffic and the load testing webhook [e2e-nginx-tests.sh](e2e-nginx-tests.sh)
|
||||
* test the A/B testing analysis and promotion using header filters and pre/post rollout webhooks [e2e-nginx-tests.sh](e2e-nginx-tests.sh)
|
||||
* cleanup test environment [e2e-nginx-cleanup.sh](e2e-nginx-cleanup.sh)
|
||||
* install NGINX Ingress and Flagger with custom ingress annotations prefix [e2e-nginx-custom-annotations.sh](e2e-nginx-custom-annotations.sh)
|
||||
* repeat the canary and A/B testing workflow [e2e-nginx-tests.sh](e2e-nginx-tests.sh)
|
||||
14
test/e2e-nginx-cleanup.sh
Executable file
14
test/e2e-nginx-cleanup.sh
Executable file
@@ -0,0 +1,14 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
set -o errexit
|
||||
|
||||
export KUBECONFIG="$(kind get kubeconfig-path --name="kind")"
|
||||
|
||||
echo '>>> Deleting NGINX Ingress'
|
||||
helm delete --purge nginx-ingress
|
||||
|
||||
echo '>>> Deleting Flagger'
|
||||
helm delete --purge flagger
|
||||
|
||||
echo '>>> Cleanup test namespace'
|
||||
kubectl delete namespace test --ignore-not-found=true
|
||||
41
test/e2e-nginx-custom-annotations.sh
Executable file
41
test/e2e-nginx-custom-annotations.sh
Executable file
@@ -0,0 +1,41 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
set -o errexit
|
||||
|
||||
REPO_ROOT=$(git rev-parse --show-toplevel)
|
||||
export KUBECONFIG="$(kind get kubeconfig-path --name="kind")"
|
||||
NGINX_VERSION=1.12.1
|
||||
|
||||
echo '>>> Installing NGINX Ingress'
|
||||
helm upgrade -i nginx-ingress stable/nginx-ingress --version=${NGINX_VERSION} \
|
||||
--wait \
|
||||
--namespace ingress-nginx \
|
||||
--set controller.stats.enabled=true \
|
||||
--set controller.metrics.enabled=true \
|
||||
--set controller.podAnnotations."prometheus\.io/scrape"=true \
|
||||
--set controller.podAnnotations."prometheus\.io/port"=10254 \
|
||||
--set controller.service.type=NodePort
|
||||
|
||||
kubectl -n ingress-nginx patch deployment/nginx-ingress-controller \
|
||||
--type='json' -p='[{"op": "add", "path": "/spec/template/spec/containers/0/args/-", "value": "--annotations-prefix=custom.ingress.kubernetes.io"}]'
|
||||
|
||||
kubectl -n ingress-nginx rollout status deployment/nginx-ingress-controller
|
||||
kubectl -n ingress-nginx get all
|
||||
|
||||
echo '>>> Loading Flagger image'
|
||||
kind load docker-image test/flagger:latest
|
||||
|
||||
echo '>>> Installing Flagger'
|
||||
helm install ${REPO_ROOT}/charts/flagger \
|
||||
--name flagger \
|
||||
--namespace ingress-nginx \
|
||||
--set prometheus.install=true \
|
||||
--set ingressAnnotationsPrefix="custom.ingress.kubernetes.io" \
|
||||
--set meshProvider=nginx \
|
||||
--set crd.create=false
|
||||
|
||||
kubectl -n ingress-nginx set image deployment/flagger flagger=test/flagger:latest
|
||||
|
||||
kubectl -n ingress-nginx rollout status deployment/flagger
|
||||
kubectl -n ingress-nginx rollout status deployment/flagger-prometheus
|
||||
|
||||
@@ -43,9 +43,31 @@ spec:
|
||||
maxWeight: 30
|
||||
stepWeight: 10
|
||||
metrics:
|
||||
- name: request-success-rate
|
||||
- name: "http-request-success-rate"
|
||||
threshold: 99
|
||||
interval: 1m
|
||||
query: |
|
||||
100 - sum(
|
||||
rate(
|
||||
http_request_duration_seconds_count{
|
||||
kubernetes_namespace="test",
|
||||
kubernetes_pod_name=~"podinfo-[0-9a-zA-Z]+(-[0-9a-zA-Z]+)",
|
||||
path="root",
|
||||
status!~"5.*"
|
||||
}[1m]
|
||||
)
|
||||
)
|
||||
/
|
||||
sum(
|
||||
rate(
|
||||
http_request_duration_seconds_count{
|
||||
kubernetes_namespace="test",
|
||||
kubernetes_pod_name=~"podinfo-[0-9a-zA-Z]+(-[0-9a-zA-Z]+)",
|
||||
path="root"
|
||||
}[1m]
|
||||
)
|
||||
)
|
||||
* 100
|
||||
- name: "latency"
|
||||
threshold: 0.5
|
||||
interval: 1m
|
||||
@@ -55,7 +77,8 @@ spec:
|
||||
rate(
|
||||
http_request_duration_seconds_bucket{
|
||||
kubernetes_namespace="test",
|
||||
kubernetes_pod_name=~"podinfo-[0-9a-zA-Z]+(-[0-9a-zA-Z]+)"
|
||||
kubernetes_pod_name=~"podinfo-[0-9a-zA-Z]+(-[0-9a-zA-Z]+)",
|
||||
path="root"
|
||||
}[1m]
|
||||
)
|
||||
) by (le)
|
||||
@@ -94,7 +117,14 @@ echo '>>> Waiting for canary promotion'
|
||||
retries=50
|
||||
count=0
|
||||
ok=false
|
||||
failed=false
|
||||
until ${ok}; do
|
||||
kubectl -n test get canary/podinfo | grep 'Failed' && failed=true || failed=false
|
||||
if ${failed}; then
|
||||
kubectl -n ingress-nginx logs deployment/test-flagger
|
||||
echo "Canary failed!"
|
||||
exit 1
|
||||
fi
|
||||
kubectl -n test describe deployment/podinfo-primary | grep '1.4.1' && ok=true || ok=false
|
||||
sleep 10
|
||||
kubectl -n ingress-nginx logs deployment/flagger --tail 1
|
||||
@@ -144,9 +174,31 @@ spec:
|
||||
cookie:
|
||||
exact: "canary"
|
||||
metrics:
|
||||
- name: request-success-rate
|
||||
- name: "http-request-success-rate"
|
||||
threshold: 99
|
||||
interval: 1m
|
||||
query: |
|
||||
100 - sum(
|
||||
rate(
|
||||
http_request_duration_seconds_count{
|
||||
kubernetes_namespace="test",
|
||||
kubernetes_pod_name=~"podinfo-[0-9a-zA-Z]+(-[0-9a-zA-Z]+)",
|
||||
path="root",
|
||||
status!~"5.*"
|
||||
}[1m]
|
||||
)
|
||||
)
|
||||
/
|
||||
sum(
|
||||
rate(
|
||||
http_request_duration_seconds_count{
|
||||
kubernetes_namespace="test",
|
||||
kubernetes_pod_name=~"podinfo-[0-9a-zA-Z]+(-[0-9a-zA-Z]+)",
|
||||
path="root"
|
||||
}[1m]
|
||||
)
|
||||
)
|
||||
* 100
|
||||
webhooks:
|
||||
- name: pre
|
||||
type: pre-rollout
|
||||
|
||||
Reference in New Issue
Block a user