mirror of
https://github.com/fluxcd/flagger.git
synced 2026-02-16 10:59:53 +00:00
Compare commits
17 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
1902884b56 | ||
|
|
98d2805267 | ||
|
|
24a74d3589 | ||
|
|
15463456ec | ||
|
|
752eceed4b | ||
|
|
eadce34d6f | ||
|
|
11ccf34bbc | ||
|
|
e308678ed5 | ||
|
|
cbe72f0aa2 | ||
|
|
bc84e1c154 | ||
|
|
344bd45a0e | ||
|
|
72014f736f | ||
|
|
0a2949b6ad | ||
|
|
2ff695ecfe | ||
|
|
8d0b54e059 | ||
|
|
121a65fad0 | ||
|
|
ecaa203091 |
@@ -1,6 +1,6 @@
|
||||
version: 2.1
|
||||
jobs:
|
||||
e2e-testing:
|
||||
e2e-istio-testing:
|
||||
machine: true
|
||||
steps:
|
||||
- checkout
|
||||
@@ -18,11 +18,20 @@ jobs:
|
||||
- run: test/e2e-build.sh supergloo:test.supergloo-system
|
||||
- run: test/e2e-tests.sh canary
|
||||
|
||||
e2e-nginx-testing:
|
||||
machine: true
|
||||
steps:
|
||||
- checkout
|
||||
- run: test/e2e-kind.sh
|
||||
- run: test/e2e-nginx.sh
|
||||
- run: test/e2e-nginx-build.sh
|
||||
- run: test/e2e-nginx-tests.sh
|
||||
|
||||
workflows:
|
||||
version: 2
|
||||
build-and-test:
|
||||
jobs:
|
||||
- e2e-testing:
|
||||
- e2e-istio-testing:
|
||||
filters:
|
||||
branches:
|
||||
ignore:
|
||||
@@ -36,3 +45,10 @@ workflows:
|
||||
- /gh-pages.*/
|
||||
- /docs-.*/
|
||||
- /release-.*/
|
||||
- e2e-nginx-testing:
|
||||
filters:
|
||||
branches:
|
||||
ignore:
|
||||
- /gh-pages.*/
|
||||
- /docs-.*/
|
||||
- /release-.*/
|
||||
20
CHANGELOG.md
20
CHANGELOG.md
@@ -2,6 +2,26 @@
|
||||
|
||||
All notable changes to this project are documented in this file.
|
||||
|
||||
## 0.13.2 (2019-04-11)
|
||||
|
||||
Fixes for Jenkins X deployments (prevent the jx GC from removing the primary instance)
|
||||
|
||||
#### Fixes
|
||||
|
||||
- Do not copy labels from canary to primary deployment [#178](https://github.com/weaveworks/flagger/pull/178)
|
||||
|
||||
#### Improvements
|
||||
|
||||
- Add NGINX ingress controller e2e and unit tests [#176](https://github.com/weaveworks/flagger/pull/176)
|
||||
|
||||
## 0.13.1 (2019-04-09)
|
||||
|
||||
Fixes for custom metrics checks and NGINX Prometheus queries
|
||||
|
||||
#### Fixes
|
||||
|
||||
- Fix promql queries for custom checks and NGINX [#174](https://github.com/weaveworks/flagger/pull/174)
|
||||
|
||||
## 0.13.0 (2019-04-08)
|
||||
|
||||
Adds support for [NGINX](https://docs.flagger.app/usage/nginx-progressive-delivery) ingress controller
|
||||
|
||||
@@ -22,7 +22,7 @@ spec:
|
||||
serviceAccountName: flagger
|
||||
containers:
|
||||
- name: flagger
|
||||
image: weaveworks/flagger:0.13.0
|
||||
image: weaveworks/flagger:0.13.2
|
||||
imagePullPolicy: IfNotPresent
|
||||
ports:
|
||||
- name: http
|
||||
|
||||
@@ -43,11 +43,20 @@ spec:
|
||||
# percentage (0-100)
|
||||
threshold: 99
|
||||
interval: 1m
|
||||
- name: request-duration
|
||||
# maximum avg req duration
|
||||
# milliseconds
|
||||
threshold: 500
|
||||
- name: "latency"
|
||||
threshold: 0.5
|
||||
interval: 1m
|
||||
query: |
|
||||
histogram_quantile(0.99,
|
||||
sum(
|
||||
rate(
|
||||
http_request_duration_seconds_bucket{
|
||||
kubernetes_namespace="test",
|
||||
kubernetes_pod_name=~"podinfo-[0-9a-zA-Z]+(-[0-9a-zA-Z]+)"
|
||||
}[1m]
|
||||
)
|
||||
) by (le)
|
||||
)
|
||||
# external checks (optional)
|
||||
webhooks:
|
||||
- name: load-test
|
||||
|
||||
@@ -9,7 +9,7 @@ metadata:
|
||||
kubernetes.io/ingress.class: "nginx"
|
||||
spec:
|
||||
rules:
|
||||
- host: app.exmaple.com
|
||||
- host: app.example.com
|
||||
http:
|
||||
paths:
|
||||
- backend:
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
apiVersion: v1
|
||||
name: flagger
|
||||
version: 0.13.0
|
||||
appVersion: 0.13.0
|
||||
version: 0.13.2
|
||||
appVersion: 0.13.2
|
||||
kubeVersion: ">=1.11.0-0"
|
||||
engine: gotpl
|
||||
description: Flagger is a Kubernetes operator that automates the promotion of canary deployments using Istio, App Mesh or NGINX routing for traffic shifting and Prometheus metrics for canary analysis.
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
|
||||
image:
|
||||
repository: weaveworks/flagger
|
||||
tag: 0.13.0
|
||||
tag: 0.13.2
|
||||
pullPolicy: IfNotPresent
|
||||
|
||||
metricsServer: "http://prometheus:9090"
|
||||
|
||||
@@ -14,7 +14,9 @@ Install NGINX with Helm:
|
||||
helm upgrade -i nginx-ingress stable/nginx-ingress \
|
||||
--namespace ingress-nginx \
|
||||
--set controller.stats.enabled=true \
|
||||
--set controller.metrics.enabled=true
|
||||
--set controller.metrics.enabled=true \
|
||||
--set controller.podAnnotations."prometheus\.io/scrape"=true \
|
||||
--set controller.podAnnotations."prometheus\.io/port"=10254
|
||||
```
|
||||
|
||||
Install Flagger and the Prometheus add-on in the same namespace as NGINX:
|
||||
@@ -65,7 +67,7 @@ helm upgrade -i flagger-loadtester flagger/loadtester \
|
||||
--namespace=test
|
||||
```
|
||||
|
||||
Create an ingress definition (replace `app.exmaple.com` with your own domain):
|
||||
Create an ingress definition (replace `app.example.com` with your own domain):
|
||||
|
||||
```yaml
|
||||
apiVersion: extensions/v1beta1
|
||||
@@ -79,7 +81,7 @@ metadata:
|
||||
kubernetes.io/ingress.class: "nginx"
|
||||
spec:
|
||||
rules:
|
||||
- host: app.exmaple.com
|
||||
- host: app.example.com
|
||||
http:
|
||||
paths:
|
||||
- backend:
|
||||
@@ -93,7 +95,7 @@ Save the above resource as podinfo-ingress.yaml and then apply it:
|
||||
kubectl apply -f ./podinfo-ingress.yaml
|
||||
```
|
||||
|
||||
Create a canary custom resource (replace `app.exmaple.com` with your own domain):
|
||||
Create a canary custom resource (replace `app.example.com` with your own domain):
|
||||
|
||||
```yaml
|
||||
apiVersion: flagger.app/v1alpha3
|
||||
@@ -247,7 +249,7 @@ podinfod=quay.io/stefanprodan/podinfo:1.4.2
|
||||
Generate HTTP 500 errors:
|
||||
|
||||
```bash
|
||||
watch curl http://app.exmaple.com/status/500
|
||||
watch curl http://app.example.com/status/500
|
||||
```
|
||||
|
||||
When the number of failed checks reaches the canary analysis threshold, the traffic is routed back to the primary,
|
||||
@@ -276,6 +278,70 @@ Events:
|
||||
Warning Synced 1m flagger Canary failed! Scaling down podinfo.test
|
||||
```
|
||||
|
||||
### Custom metrics
|
||||
|
||||
The canary analysis can be extended with Prometheus queries.
|
||||
|
||||
The demo app is instrumented with Prometheus so you can create a custom check that will use the HTTP request duration
|
||||
histogram to validate the canary.
|
||||
|
||||
Edit the canary analysis and add the following metric:
|
||||
|
||||
```yaml
|
||||
canaryAnalysis:
|
||||
metrics:
|
||||
- name: "latency"
|
||||
threshold: 0.5
|
||||
interval: 1m
|
||||
query: |
|
||||
histogram_quantile(0.99,
|
||||
sum(
|
||||
rate(
|
||||
http_request_duration_seconds_bucket{
|
||||
kubernetes_namespace="test",
|
||||
kubernetes_pod_name=~"podinfo-[0-9a-zA-Z]+(-[0-9a-zA-Z]+)"
|
||||
}[1m]
|
||||
)
|
||||
) by (le)
|
||||
)
|
||||
```
|
||||
|
||||
The threshold is set to 500ms so if the average request duration in the last minute
|
||||
goes over half a second then the analysis will fail and the canary will not be promoted.
|
||||
|
||||
Trigger a canary deployment by updating the container image:
|
||||
|
||||
```bash
|
||||
kubectl -n test set image deployment/podinfo \
|
||||
podinfod=quay.io/stefanprodan/podinfo:1.4.3
|
||||
```
|
||||
|
||||
Generate high response latency:
|
||||
|
||||
```bash
|
||||
watch curl http://app.exmaple.com/delay/2
|
||||
```
|
||||
|
||||
Watch Flagger logs:
|
||||
|
||||
```
|
||||
kubectl -n nginx-ingress logs deployment/flagger -f | jq .msg
|
||||
|
||||
Starting canary deployment for podinfo.test
|
||||
Advance podinfo.test canary weight 5
|
||||
Advance podinfo.test canary weight 10
|
||||
Advance podinfo.test canary weight 15
|
||||
Halt podinfo.test advancement latency 1.20 > 0.5
|
||||
Halt podinfo.test advancement latency 1.45 > 0.5
|
||||
Halt podinfo.test advancement latency 1.60 > 0.5
|
||||
Halt podinfo.test advancement latency 1.69 > 0.5
|
||||
Halt podinfo.test advancement latency 1.70 > 0.5
|
||||
Rolling back podinfo.test failed checks threshold reached 5
|
||||
Canary failed! Scaling down podinfo.test
|
||||
```
|
||||
|
||||
If you have Slack configured, Flagger will send a notification with the reason why the canary failed.
|
||||
|
||||
### A/B Testing
|
||||
|
||||
Besides weighted routing, Flagger can be configured to route traffic to the canary based on HTTP match conditions.
|
||||
@@ -315,7 +381,7 @@ Edit the canary analysis, remove the max/step weight and add the match condition
|
||||
```
|
||||
|
||||
The above configuration will run an analysis for ten minutes targeting users that have a `canary` cookie set to `always` or
|
||||
those that call the service using the `X-Canary: always` header.
|
||||
those that call the service using the `X-Canary: insider` header.
|
||||
|
||||
Trigger a canary deployment by updating the container image:
|
||||
|
||||
|
||||
@@ -214,7 +214,6 @@ func (c *Deployer) createPrimaryDeployment(cd *flaggerv1.Canary) (string, error)
|
||||
primaryDep = &appsv1.Deployment{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: primaryName,
|
||||
Labels: canaryDep.Labels,
|
||||
Namespace: cd.Namespace,
|
||||
OwnerReferences: []metav1.OwnerReference{
|
||||
*metav1.NewControllerRef(cd, schema.GroupVersionKind{
|
||||
|
||||
@@ -9,13 +9,13 @@ import (
|
||||
|
||||
const nginxSuccessRateQuery = `
|
||||
sum(rate(
|
||||
nginx_ingress_controller_requests{kubernetes_namespace="{{ .Namespace }}",
|
||||
nginx_ingress_controller_requests{namespace="{{ .Namespace }}",
|
||||
ingress="{{ .Name }}",
|
||||
status!~"5.*"}
|
||||
[{{ .Interval }}]))
|
||||
/
|
||||
sum(rate(
|
||||
nginx_ingress_controller_requests{kubernetes_namespace="{{ .Namespace }}",
|
||||
nginx_ingress_controller_requests{namespace="{{ .Namespace }}",
|
||||
ingress="{{ .Name }}"}
|
||||
[{{ .Interval }}]))
|
||||
* 100
|
||||
@@ -68,10 +68,10 @@ func (c *Observer) GetNginxSuccessRate(name string, namespace string, metric str
|
||||
|
||||
const nginxRequestDurationQuery = `
|
||||
sum(rate(
|
||||
nginx_ingress_controller_ingress_upstream_latency_seconds_sum{kubernetes_namespace="{{ .Namespace }}",
|
||||
nginx_ingress_controller_ingress_upstream_latency_seconds_sum{namespace="{{ .Namespace }}",
|
||||
ingress="{{ .Name }}"}[{{ .Interval }}]))
|
||||
/
|
||||
sum(rate(nginx_ingress_controller_ingress_upstream_latency_seconds_count{kubernetes_namespace="{{ .Namespace }}",
|
||||
sum(rate(nginx_ingress_controller_ingress_upstream_latency_seconds_count{namespace="{{ .Namespace }}",
|
||||
ingress="{{ .Name }}"}[{{ .Interval }}])) * 1000
|
||||
`
|
||||
|
||||
|
||||
@@ -20,7 +20,7 @@ func Test_NginxSuccessRateQueryRender(t *testing.T) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
expected := `sum(rate(nginx_ingress_controller_requests{kubernetes_namespace="nginx",ingress="podinfo",status!~"5.*"}[1m])) / sum(rate(nginx_ingress_controller_requests{kubernetes_namespace="nginx",ingress="podinfo"}[1m])) * 100`
|
||||
expected := `sum(rate(nginx_ingress_controller_requests{namespace="nginx",ingress="podinfo",status!~"5.*"}[1m])) / sum(rate(nginx_ingress_controller_requests{namespace="nginx",ingress="podinfo"}[1m])) * 100`
|
||||
|
||||
if query != expected {
|
||||
t.Errorf("\nGot %s \nWanted %s", query, expected)
|
||||
@@ -43,7 +43,7 @@ func Test_NginxRequestDurationQueryRender(t *testing.T) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
expected := `sum(rate(nginx_ingress_controller_ingress_upstream_latency_seconds_sum{kubernetes_namespace="nginx",ingress="podinfo"}[1m])) /sum(rate(nginx_ingress_controller_ingress_upstream_latency_seconds_count{kubernetes_namespace="nginx",ingress="podinfo"}[1m])) * 1000`
|
||||
expected := `sum(rate(nginx_ingress_controller_ingress_upstream_latency_seconds_sum{namespace="nginx",ingress="podinfo"}[1m])) /sum(rate(nginx_ingress_controller_ingress_upstream_latency_seconds_count{namespace="nginx",ingress="podinfo"}[1m])) * 1000`
|
||||
|
||||
if query != expected {
|
||||
t.Errorf("\nGot %s \nWanted %s", query, expected)
|
||||
|
||||
@@ -99,7 +99,9 @@ func (c *Observer) GetScalar(query string) (float64, error) {
|
||||
query = strings.Replace(query, " ", "", -1)
|
||||
|
||||
var value *float64
|
||||
result, err := c.queryMetric(query)
|
||||
|
||||
querySt := url.QueryEscape(query)
|
||||
result, err := c.queryMetric(querySt)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
112
pkg/router/ingress_test.go
Normal file
112
pkg/router/ingress_test.go
Normal file
@@ -0,0 +1,112 @@
|
||||
package router
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestIngressRouter_Reconcile(t *testing.T) {
|
||||
mocks := setupfakeClients()
|
||||
router := &IngressRouter{
|
||||
logger: mocks.logger,
|
||||
kubeClient: mocks.kubeClient,
|
||||
}
|
||||
|
||||
err := router.Reconcile(mocks.ingressCanary)
|
||||
if err != nil {
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
|
||||
canaryAn := "nginx.ingress.kubernetes.io/canary"
|
||||
canaryWeightAn := "nginx.ingress.kubernetes.io/canary-weight"
|
||||
|
||||
canaryName := fmt.Sprintf("%s-canary", mocks.ingressCanary.Spec.IngressRef.Name)
|
||||
inCanary, err := router.kubeClient.ExtensionsV1beta1().Ingresses("default").Get(canaryName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
|
||||
if _, ok := inCanary.Annotations[canaryAn]; !ok {
|
||||
t.Errorf("Canary annotation missing")
|
||||
}
|
||||
|
||||
// test initialisation
|
||||
if inCanary.Annotations[canaryAn] != "false" {
|
||||
t.Errorf("Got canary annotation %v wanted false", inCanary.Annotations[canaryAn])
|
||||
}
|
||||
|
||||
if inCanary.Annotations[canaryWeightAn] != "0" {
|
||||
t.Errorf("Got canary weight annotation %v wanted 0", inCanary.Annotations[canaryWeightAn])
|
||||
}
|
||||
}
|
||||
|
||||
func TestIngressRouter_GetSetRoutes(t *testing.T) {
|
||||
mocks := setupfakeClients()
|
||||
router := &IngressRouter{
|
||||
logger: mocks.logger,
|
||||
kubeClient: mocks.kubeClient,
|
||||
}
|
||||
|
||||
err := router.Reconcile(mocks.ingressCanary)
|
||||
if err != nil {
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
|
||||
p, c, err := router.GetRoutes(mocks.ingressCanary)
|
||||
if err != nil {
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
|
||||
p = 50
|
||||
c = 50
|
||||
|
||||
err = router.SetRoutes(mocks.ingressCanary, p, c)
|
||||
if err != nil {
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
|
||||
canaryAn := "nginx.ingress.kubernetes.io/canary"
|
||||
canaryWeightAn := "nginx.ingress.kubernetes.io/canary-weight"
|
||||
|
||||
canaryName := fmt.Sprintf("%s-canary", mocks.ingressCanary.Spec.IngressRef.Name)
|
||||
inCanary, err := router.kubeClient.ExtensionsV1beta1().Ingresses("default").Get(canaryName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
|
||||
if _, ok := inCanary.Annotations[canaryAn]; !ok {
|
||||
t.Errorf("Canary annotation missing")
|
||||
}
|
||||
|
||||
// test rollout
|
||||
if inCanary.Annotations[canaryAn] != "true" {
|
||||
t.Errorf("Got canary annotation %v wanted true", inCanary.Annotations[canaryAn])
|
||||
}
|
||||
|
||||
if inCanary.Annotations[canaryWeightAn] != "50" {
|
||||
t.Errorf("Got canary weight annotation %v wanted 50", inCanary.Annotations[canaryWeightAn])
|
||||
}
|
||||
|
||||
p = 100
|
||||
c = 0
|
||||
|
||||
err = router.SetRoutes(mocks.ingressCanary, p, c)
|
||||
if err != nil {
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
|
||||
inCanary, err = router.kubeClient.ExtensionsV1beta1().Ingresses("default").Get(canaryName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
|
||||
// test promotion
|
||||
if inCanary.Annotations[canaryAn] != "false" {
|
||||
t.Errorf("Got canary annotation %v wanted false", inCanary.Annotations[canaryAn])
|
||||
}
|
||||
|
||||
if inCanary.Annotations[canaryWeightAn] != "0" {
|
||||
t.Errorf("Got canary weight annotation %v wanted 0", inCanary.Annotations[canaryWeightAn])
|
||||
}
|
||||
}
|
||||
@@ -11,7 +11,9 @@ import (
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
hpav1 "k8s.io/api/autoscaling/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/api/extensions/v1beta1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
)
|
||||
@@ -20,6 +22,7 @@ type fakeClients struct {
|
||||
canary *v1alpha3.Canary
|
||||
abtest *v1alpha3.Canary
|
||||
appmeshCanary *v1alpha3.Canary
|
||||
ingressCanary *v1alpha3.Canary
|
||||
kubeClient kubernetes.Interface
|
||||
meshClient clientset.Interface
|
||||
flaggerClient clientset.Interface
|
||||
@@ -30,9 +33,10 @@ func setupfakeClients() fakeClients {
|
||||
canary := newMockCanary()
|
||||
abtest := newMockABTest()
|
||||
appmeshCanary := newMockCanaryAppMesh()
|
||||
flaggerClient := fakeFlagger.NewSimpleClientset(canary, abtest, appmeshCanary)
|
||||
ingressCanary := newMockCanaryIngress()
|
||||
flaggerClient := fakeFlagger.NewSimpleClientset(canary, abtest, appmeshCanary, ingressCanary)
|
||||
|
||||
kubeClient := fake.NewSimpleClientset(newMockDeployment(), newMockABTestDeployment())
|
||||
kubeClient := fake.NewSimpleClientset(newMockDeployment(), newMockABTestDeployment(), newMockIngress())
|
||||
|
||||
meshClient := fakeFlagger.NewSimpleClientset()
|
||||
logger, _ := logger.NewLogger("debug")
|
||||
@@ -41,6 +45,7 @@ func setupfakeClients() fakeClients {
|
||||
canary: canary,
|
||||
abtest: abtest,
|
||||
appmeshCanary: appmeshCanary,
|
||||
ingressCanary: ingressCanary,
|
||||
kubeClient: kubeClient,
|
||||
meshClient: meshClient,
|
||||
flaggerClient: flaggerClient,
|
||||
@@ -266,3 +271,73 @@ func newMockABTestDeployment() *appsv1.Deployment {
|
||||
|
||||
return d
|
||||
}
|
||||
|
||||
func newMockCanaryIngress() *v1alpha3.Canary {
|
||||
cd := &v1alpha3.Canary{
|
||||
TypeMeta: metav1.TypeMeta{APIVersion: v1alpha3.SchemeGroupVersion.String()},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "default",
|
||||
Name: "nginx",
|
||||
},
|
||||
Spec: v1alpha3.CanarySpec{
|
||||
TargetRef: hpav1.CrossVersionObjectReference{
|
||||
Name: "podinfo",
|
||||
APIVersion: "apps/v1",
|
||||
Kind: "Deployment",
|
||||
},
|
||||
IngressRef: &hpav1.CrossVersionObjectReference{
|
||||
Name: "podinfo",
|
||||
APIVersion: "extensions/v1beta1",
|
||||
Kind: "Ingress",
|
||||
},
|
||||
Service: v1alpha3.CanaryService{
|
||||
Port: 9898,
|
||||
}, CanaryAnalysis: v1alpha3.CanaryAnalysis{
|
||||
Threshold: 10,
|
||||
StepWeight: 10,
|
||||
MaxWeight: 50,
|
||||
Metrics: []v1alpha3.CanaryMetric{
|
||||
{
|
||||
Name: "request-success-rate",
|
||||
Threshold: 99,
|
||||
Interval: "1m",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
return cd
|
||||
}
|
||||
|
||||
func newMockIngress() *v1beta1.Ingress {
|
||||
return &v1beta1.Ingress{
|
||||
TypeMeta: metav1.TypeMeta{APIVersion: v1beta1.SchemeGroupVersion.String()},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "default",
|
||||
Name: "podinfo",
|
||||
Annotations: map[string]string{
|
||||
"kubernetes.io/ingress.class": "nginx",
|
||||
},
|
||||
},
|
||||
Spec: v1beta1.IngressSpec{
|
||||
Rules: []v1beta1.IngressRule{
|
||||
{
|
||||
Host: "app.example.com",
|
||||
IngressRuleValue: v1beta1.IngressRuleValue{
|
||||
HTTP: &v1beta1.HTTPIngressRuleValue{
|
||||
Paths: []v1beta1.HTTPIngressPath{
|
||||
{
|
||||
Path: "/",
|
||||
Backend: v1beta1.IngressBackend{
|
||||
ServiceName: "podinfo",
|
||||
ServicePort: intstr.FromInt(9898),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
package version
|
||||
|
||||
var VERSION = "0.13.0"
|
||||
var VERSION = "0.13.2"
|
||||
var REVISION = "unknown"
|
||||
|
||||
@@ -1,4 +0,0 @@
|
||||
FROM golang:1.11
|
||||
|
||||
RUN go get -u sigs.k8s.io/kind
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
|
||||
The e2e testing infrastructure is powered by CircleCI and [Kubernetes Kind](https://github.com/kubernetes-sigs/kind).
|
||||
|
||||
CircleCI e2e workflow:
|
||||
### CircleCI e2e Istio workflow
|
||||
|
||||
* install latest stable kubectl [e2e-kind.sh](e2e-kind.sh)
|
||||
* install Kubernetes Kind [e2e-kind.sh](e2e-kind.sh)
|
||||
@@ -21,4 +21,20 @@ CircleCI e2e workflow:
|
||||
* test the canary analysis and promotion using weighted traffic and the load testing webhook [e2e-tests.sh](e2e-tests.sh)
|
||||
* test the A/B testing analysis and promotion using cookies filters and pre/post rollout webhooks [e2e-tests.sh](e2e-tests.sh)
|
||||
|
||||
### CircleCI e2e NGINX ingress workflow
|
||||
|
||||
* install latest stable kubectl [e2e-kind.sh](e2e-kind.sh)
|
||||
* install Kubernetes Kind [e2e-kind.sh](e2e-kind.sh)
|
||||
* create local Kubernetes cluster with kind [e2e-kind.sh](e2e-kind.sh)
|
||||
* install latest stable Helm CLI [e2e-nginx.sh](e2e-istio.sh)
|
||||
* deploy Tiller on the local cluster [e2e-nginx.sh](e2e-istio.sh)
|
||||
* install NGINX ingress with Helm [e2e-nginx.sh](e2e-istio.sh)
|
||||
* build Flagger container image [e2e-nginx-build.sh](e2e-build.sh)
|
||||
* load Flagger image onto the local cluster [e2e-nginx-build.sh](e2e-build.sh)
|
||||
* install Flagger and Prometheus in the ingress-nginx namespace [e2e-nginx-build.sh](e2e-build.sh)
|
||||
* create a test namespace [e2e-nginx-tests.sh](e2e-tests.sh)
|
||||
* deploy the load tester in the test namespace [e2e-nginx-tests.sh](e2e-tests.sh)
|
||||
* deploy the demo workload (podinfo) and ingress in the test namespace [e2e-nginx-tests.sh](e2e-tests.sh)
|
||||
* test the canary initialization [e2e-nginx-tests.sh](e2e-tests.sh)
|
||||
* test the canary analysis and promotion using weighted traffic and the load testing webhook [e2e-nginx-tests.sh](e2e-tests.sh)
|
||||
* test the A/B testing analysis and promotion using header filters and pre/post rollout webhooks [e2e-nginx-tests.sh](e2e-tests.sh)
|
||||
|
||||
17
test/e2e-ingress.yaml
Normal file
17
test/e2e-ingress.yaml
Normal file
@@ -0,0 +1,17 @@
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: podinfo
|
||||
namespace: test
|
||||
labels:
|
||||
app: podinfo
|
||||
annotations:
|
||||
kubernetes.io/ingress.class: "nginx"
|
||||
spec:
|
||||
rules:
|
||||
- host: app.example.com
|
||||
http:
|
||||
paths:
|
||||
- backend:
|
||||
serviceName: podinfo
|
||||
servicePort: 9898
|
||||
24
test/e2e-nginx-build.sh
Executable file
24
test/e2e-nginx-build.sh
Executable file
@@ -0,0 +1,24 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
set -o errexit
|
||||
|
||||
REPO_ROOT=$(git rev-parse --show-toplevel)
|
||||
export KUBECONFIG="$(kind get kubeconfig-path --name="kind")"
|
||||
|
||||
echo '>>> Building Flagger'
|
||||
cd ${REPO_ROOT} && docker build -t test/flagger:latest . -f Dockerfile
|
||||
|
||||
echo '>>> Installing Flagger'
|
||||
kind load docker-image test/flagger:latest
|
||||
|
||||
echo '>>> Installing Flagger'
|
||||
helm upgrade -i flagger ${REPO_ROOT}/charts/flagger \
|
||||
--wait \
|
||||
--namespace ingress-nginx \
|
||||
--set prometheus.install=true \
|
||||
--set meshProvider=nginx
|
||||
|
||||
kubectl -n ingress-nginx set image deployment/flagger flagger=test/flagger:latest
|
||||
|
||||
kubectl -n ingress-nginx rollout status deployment/flagger
|
||||
kubectl -n ingress-nginx rollout status deployment/flagger-prometheus
|
||||
194
test/e2e-nginx-tests.sh
Executable file
194
test/e2e-nginx-tests.sh
Executable file
@@ -0,0 +1,194 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# This script runs e2e tests for Canary initialization, analysis and promotion
|
||||
# Prerequisites: Kubernetes Kind, Helm and NGINX ingress controller
|
||||
|
||||
set -o errexit
|
||||
|
||||
REPO_ROOT=$(git rev-parse --show-toplevel)
|
||||
export KUBECONFIG="$(kind get kubeconfig-path --name="kind")"
|
||||
|
||||
echo '>>> Creating test namespace'
|
||||
kubectl create namespace test
|
||||
|
||||
echo '>>> Installing load tester'
|
||||
kubectl -n test apply -f ${REPO_ROOT}/artifacts/loadtester/
|
||||
kubectl -n test rollout status deployment/flagger-loadtester
|
||||
|
||||
echo '>>> Initialising canary'
|
||||
kubectl apply -f ${REPO_ROOT}/test/e2e-workload.yaml
|
||||
kubectl apply -f ${REPO_ROOT}/test/e2e-ingress.yaml
|
||||
|
||||
cat <<EOF | kubectl apply -f -
|
||||
apiVersion: flagger.app/v1alpha3
|
||||
kind: Canary
|
||||
metadata:
|
||||
name: podinfo
|
||||
namespace: test
|
||||
spec:
|
||||
targetRef:
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
name: podinfo
|
||||
ingressRef:
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Ingress
|
||||
name: podinfo
|
||||
progressDeadlineSeconds: 60
|
||||
service:
|
||||
port: 9898
|
||||
canaryAnalysis:
|
||||
interval: 15s
|
||||
threshold: 15
|
||||
maxWeight: 30
|
||||
stepWeight: 10
|
||||
metrics:
|
||||
- name: request-success-rate
|
||||
threshold: 99
|
||||
interval: 1m
|
||||
- name: "latency"
|
||||
threshold: 0.5
|
||||
interval: 1m
|
||||
query: |
|
||||
histogram_quantile(0.99,
|
||||
sum(
|
||||
rate(
|
||||
http_request_duration_seconds_bucket{
|
||||
kubernetes_namespace="test",
|
||||
kubernetes_pod_name=~"podinfo-[0-9a-zA-Z]+(-[0-9a-zA-Z]+)"
|
||||
}[1m]
|
||||
)
|
||||
) by (le)
|
||||
)
|
||||
webhooks:
|
||||
- name: load-test
|
||||
url: http://flagger-loadtester.test/
|
||||
timeout: 5s
|
||||
metadata:
|
||||
type: cmd
|
||||
cmd: "hey -z 10m -q 10 -c 2 -host app.example.com http://nginx-ingress-controller.ingress-nginx"
|
||||
logCmdOutput: "true"
|
||||
EOF
|
||||
|
||||
echo '>>> Waiting for primary to be ready'
|
||||
retries=50
|
||||
count=0
|
||||
ok=false
|
||||
until ${ok}; do
|
||||
kubectl -n test get canary/podinfo | grep 'Initialized' && ok=true || ok=false
|
||||
sleep 5
|
||||
count=$(($count + 1))
|
||||
if [[ ${count} -eq ${retries} ]]; then
|
||||
kubectl -n ingress-nginx logs deployment/flagger
|
||||
echo "No more retries left"
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
|
||||
echo '✔ Canary initialization test passed'
|
||||
|
||||
echo '>>> Triggering canary deployment'
|
||||
kubectl -n test set image deployment/podinfo podinfod=quay.io/stefanprodan/podinfo:1.4.1
|
||||
|
||||
echo '>>> Waiting for canary promotion'
|
||||
retries=50
|
||||
count=0
|
||||
ok=false
|
||||
until ${ok}; do
|
||||
kubectl -n test describe deployment/podinfo-primary | grep '1.4.1' && ok=true || ok=false
|
||||
sleep 10
|
||||
kubectl -n ingress-nginx logs deployment/flagger --tail 1
|
||||
count=$(($count + 1))
|
||||
if [[ ${count} -eq ${retries} ]]; then
|
||||
kubectl -n test describe deployment/podinfo
|
||||
kubectl -n test describe deployment/podinfo-primary
|
||||
kubectl -n ingress-nginx logs deployment/flagger
|
||||
echo "No more retries left"
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
|
||||
echo '✔ Canary promotion test passed'
|
||||
|
||||
if [ "$1" = "canary" ]; then
|
||||
exit 0
|
||||
fi
|
||||
|
||||
cat <<EOF | kubectl apply -f -
|
||||
apiVersion: flagger.app/v1alpha3
|
||||
kind: Canary
|
||||
metadata:
|
||||
name: podinfo
|
||||
namespace: test
|
||||
spec:
|
||||
targetRef:
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
name: podinfo
|
||||
ingressRef:
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Ingress
|
||||
name: podinfo
|
||||
progressDeadlineSeconds: 60
|
||||
service:
|
||||
port: 9898
|
||||
canaryAnalysis:
|
||||
interval: 10s
|
||||
threshold: 5
|
||||
iterations: 5
|
||||
match:
|
||||
- headers:
|
||||
x-canary:
|
||||
exact: "insider"
|
||||
- headers:
|
||||
cookie:
|
||||
exact: "canary"
|
||||
metrics:
|
||||
- name: request-success-rate
|
||||
threshold: 99
|
||||
interval: 1m
|
||||
webhooks:
|
||||
- name: pre
|
||||
type: pre-rollout
|
||||
url: http://flagger-loadtester.test/
|
||||
timeout: 5s
|
||||
metadata:
|
||||
type: cmd
|
||||
cmd: "hey -z 10m -q 10 -c 2 -H 'X-Canary: insider' -host app.example.com http://nginx-ingress-controller.ingress-nginx"
|
||||
logCmdOutput: "true"
|
||||
- name: post
|
||||
type: post-rollout
|
||||
url: http://flagger-loadtester.test/
|
||||
timeout: 15s
|
||||
metadata:
|
||||
type: cmd
|
||||
cmd: "curl -sH 'Host: app.example.com' http://nginx-ingress-controller.ingress-nginx"
|
||||
logCmdOutput: "true"
|
||||
EOF
|
||||
|
||||
echo '>>> Triggering A/B testing'
|
||||
kubectl -n test set image deployment/podinfo podinfod=quay.io/stefanprodan/podinfo:1.4.2
|
||||
|
||||
echo '>>> Waiting for A/B testing promotion'
|
||||
retries=50
|
||||
count=0
|
||||
ok=false
|
||||
until ${ok}; do
|
||||
kubectl -n test describe deployment/podinfo-primary | grep '1.4.2' && ok=true || ok=false
|
||||
sleep 10
|
||||
kubectl -n ingress-nginx logs deployment/flagger --tail 1
|
||||
count=$(($count + 1))
|
||||
if [[ ${count} -eq ${retries} ]]; then
|
||||
kubectl -n test describe deployment/podinfo
|
||||
kubectl -n test describe deployment/podinfo-primary
|
||||
kubectl -n ingress-nginx logs deployment/flagger
|
||||
echo "No more retries left"
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
|
||||
echo '✔ A/B testing promotion test passed'
|
||||
|
||||
kubectl -n ingress-nginx logs deployment/flagger
|
||||
|
||||
echo '✔ All tests passed'
|
||||
29
test/e2e-nginx.sh
Executable file
29
test/e2e-nginx.sh
Executable file
@@ -0,0 +1,29 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
set -o errexit
|
||||
|
||||
REPO_ROOT=$(git rev-parse --show-toplevel)
|
||||
export KUBECONFIG="$(kind get kubeconfig-path --name="kind")"
|
||||
|
||||
echo ">>> Installing Helm"
|
||||
curl https://raw.githubusercontent.com/kubernetes/helm/master/scripts/get | bash
|
||||
|
||||
echo '>>> Installing Tiller'
|
||||
kubectl --namespace kube-system create sa tiller
|
||||
kubectl create clusterrolebinding tiller-cluster-rule --clusterrole=cluster-admin --serviceaccount=kube-system:tiller
|
||||
helm init --service-account tiller --upgrade --wait
|
||||
|
||||
echo '>>> Installing NGINX Ingress'
|
||||
helm upgrade -i nginx-ingress stable/nginx-ingress \
|
||||
--wait \
|
||||
--namespace ingress-nginx \
|
||||
--set controller.stats.enabled=true \
|
||||
--set controller.metrics.enabled=true \
|
||||
--set controller.podAnnotations."prometheus\.io/scrape"=true \
|
||||
--set controller.podAnnotations."prometheus\.io/port"=10254 \
|
||||
--set controller.service.type=NodePort
|
||||
|
||||
kubectl -n ingress-nginx rollout status deployment/nginx-ingress-controller
|
||||
kubectl -n ingress-nginx get all
|
||||
|
||||
|
||||
Reference in New Issue
Block a user