Compare commits

...

23 Commits

Author SHA1 Message Date
Stefan Prodan
f6fa5e3891 Merge pull request #270 from weaveworks/prep-0.18.2
Release v0.18.2
2019-08-05 18:57:54 +03:00
stefanprodan
a305a0b705 Release v0.18.2 2019-08-05 18:43:57 +03:00
Stefan Prodan
dfe619e2ea Merge pull request #269 from weaveworks/helm-circleci
Publish Helm chart from CircleCI
2019-08-05 17:57:21 +03:00
stefanprodan
2b3d425b70 Publish Helm chart from CircleCI 2019-08-05 17:08:33 +03:00
Stefan Prodan
6e55fea413 Merge pull request #268 from weaveworks/istio-1.2.3
Update e2e backends
2019-08-03 15:44:53 +03:00
stefanprodan
b6a08b6615 Fix AppMesh mesh name in docs 2019-08-03 15:24:31 +03:00
stefanprodan
eaa6906516 Update e2e NGINX ingress to v1.12.1 2019-08-03 13:42:27 +03:00
stefanprodan
62a7a92f2a Update e2e Gloo to v0.18.8 2019-08-03 13:01:57 +03:00
stefanprodan
3aeb0945c5 Update e2e Istio to v1.2.3 2019-08-03 12:05:21 +03:00
Stefan Prodan
e8c85efeae Merge pull request #267 from fcantournet/fix_virtualservices_multipleports
Fix Port discovery with multiple port services
2019-08-03 12:04:04 +03:00
Félix Cantournet
6651f6452b Multiple port canary: fix FAQ and add e2e tests 2019-08-02 14:23:58 +02:00
Félix Cantournet
0ca48d77be Fix Port discovery with multiple port services
This fixes issue https://github.com/weaveworks/flagger/issues/263

We actually don't need to specify any ports in the VirtualService
and DestinationRules.
Istio will create clusters/listerners for each named port we have declared in
the kubernetes services and the router can be shared as it operates only on L7 criterias

Also contains a tiny clean-up of imports
2019-08-02 10:07:00 +02:00
Stefan Prodan
a9e0e018e3 Merge pull request #266 from ExpediaInc/master
Parameterize image pull secrets for private docker repos
2019-08-01 11:07:53 +03:00
Sky Moon
122d11f445 Merge pull request #1 from ExpediaInc/parameterizeImagePullSecrets
parameterize image pull secrets for private docker repos.
2019-08-01 00:50:15 -07:00
cmoon
b03555858c parameterize image pull secrets for private docker repos. 2019-08-01 00:47:07 -07:00
Stefan Prodan
dcc5a40441 Merge pull request #262 from weaveworks/prep-0.18.1
Release v0.18.1
2019-07-30 13:52:25 +03:00
stefanprodan
8c949f59de Package helm charts locally 2019-07-30 13:35:09 +03:00
stefanprodan
e8d91a0375 Release v0.18.1 2019-07-30 13:22:51 +03:00
Stefan Prodan
fae9aa664d Merge pull request #261 from weaveworks/blue-green-e2e
Fix Blue/Green metrics provider and add e2e tests
2019-07-30 13:16:20 +03:00
stefanprodan
c31e9e5a96 Use Linkerd metrics for ingress and kubernetes routers 2019-07-30 13:00:28 +03:00
stefanprodan
99fff98274 Kustomize: set Flagger log level to info 2019-07-30 12:43:02 +03:00
stefanprodan
11d84bf35d Enable kubernetes metric provider 2019-07-30 12:27:53 +03:00
stefanprodan
e56ba480c7 Add Blue/Green e2e tests 2019-07-30 12:02:15 +03:00
25 changed files with 243 additions and 34 deletions

View File

@@ -78,6 +78,17 @@ jobs:
- run: test/e2e-istio.sh
- run: test/e2e-tests.sh
e2e-kubernetes-testing:
machine: true
steps:
- checkout
- attach_workspace:
at: /tmp/bin
- run: test/container-build.sh
- run: test/e2e-kind.sh
- run: test/e2e-kubernetes.sh
- run: test/e2e-kubernetes-tests.sh
e2e-smi-istio-testing:
machine: true
steps:
@@ -133,6 +144,47 @@ jobs:
- run: test/e2e-linkerd.sh
- run: test/e2e-linkerd-tests.sh
push-helm-charts:
docker:
- image: circleci/golang:1.12
steps:
- checkout
- run:
name: Install kubectl
command: sudo curl -L https://storage.googleapis.com/kubernetes-release/release/$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)/bin/linux/amd64/kubectl -o /usr/local/bin/kubectl && sudo chmod +x /usr/local/bin/kubectl
- run:
name: Install helm
command: sudo curl -L https://storage.googleapis.com/kubernetes-helm/helm-v2.14.2-linux-amd64.tar.gz | tar xz && sudo mv linux-amd64/helm /bin/helm && sudo rm -rf linux-amd64
- run:
name: Initialize helm
command: helm init --client-only --kubeconfig=$HOME/.kube/kubeconfig
- run:
name: Lint charts
command: |
helm lint ./charts/*
- run:
name: Package charts
command: |
mkdir $HOME/charts
helm package ./charts/* --destination $HOME/charts
- run:
name: Publish charts
command: |
if echo "${CIRCLE_TAG}" | grep -Eq "[0-9]+(\.[0-9]+)*(-[a-z]+)?$"; then
REPOSITORY="https://weaveworksbot:${GITHUB_TOKEN}@github.com/weaveworks/flagger.git"
git config user.email weaveworksbot@users.noreply.github.com
git config user.name weaveworksbot
git remote set-url origin ${REPOSITORY}
git checkout gh-pages
mv -f $HOME/charts/*.tgz .
helm repo index . --url https://flagger.app
git add .
git commit -m "Publish Helm charts v${CIRCLE_TAG}"
git push origin gh-pages
else
echo "Not a release! Skip charts publish"
fi
workflows:
version: 2
build-test-push:
@@ -145,7 +197,7 @@ workflows:
- e2e-istio-testing:
requires:
- build-binary
- e2e-smi-istio-testing:
- e2e-kubernetes-testing:
requires:
- build-binary
# - e2e-supergloo-testing:
@@ -164,7 +216,7 @@ workflows:
requires:
- build-binary
- e2e-istio-testing
- e2e-smi-istio-testing
- e2e-kubernetes-testing
#- e2e-supergloo-testing
- e2e-gloo-testing
- e2e-nginx-testing
@@ -187,6 +239,14 @@ workflows:
tags:
ignore: /^chart.*/
- push-binary:
requires:
- push-container
filters:
branches:
ignore: /.*/
tags:
ignore: /^chart.*/
- push-helm-charts:
requires:
- push-container
filters:

View File

@@ -2,6 +2,26 @@
All notable changes to this project are documented in this file.
## 0.18.2 (2019-08-05)
Fixes multi-port support for Istio
#### Fixes
- Fix port discovery for multiple port services [#267](https://github.com/weaveworks/flagger/pull/267)
#### Improvements
- Update e2e testing to Istio v1.2.3, Gloo v0.18.8 and NGINX ingress chart v1.12.1 [#268](https://github.com/weaveworks/flagger/pull/268)
## 0.18.1 (2019-07-30)
Fixes Blue/Green style deployments for Kubernetes and Linkerd providers
#### Fixes
- Fix Blue/Green metrics provider and add e2e tests [#261](https://github.com/weaveworks/flagger/pull/261)
## 0.18.0 (2019-07-29)
Adds support for [manual gating](https://docs.flagger.app/how-it-works#manual-gating) and pausing/resuming an ongoing analysis
@@ -18,8 +38,8 @@ Adds support for [manual gating](https://docs.flagger.app/how-it-works#manual-ga
#### Breaking changes
- due to the status sub-resource changes in [#240](https://github.com/weaveworks/flagger/pull/240), when upgrading Flagger the canaries status phase will be reset to `Initialized`
- upgrading Flagger with Helm will fail due to Helm poor support of CRDs, see [workaround](https://github.com/weaveworks/flagger/issues/223)
- Due to the status sub-resource changes in [#240](https://github.com/weaveworks/flagger/pull/240), when upgrading Flagger the canaries status phase will be reset to `Initialized`
- Upgrading Flagger with Helm will fail due to Helm poor support of CRDs, see [workaround](https://github.com/weaveworks/flagger/issues/223)
## 0.17.0 (2019-07-08)

View File

@@ -63,8 +63,9 @@ test: test-fmt test-codegen
helm-package:
cd charts/ && helm package ./*
mv charts/*.tgz docs/
helm repo index docs --url https://weaveworks.github.io/flagger --merge ./docs/index.yaml
mv charts/*.tgz bin/
curl -s https://raw.githubusercontent.com/weaveworks/flagger/gh-pages/index.yaml > ./bin/index.yaml
helm repo index bin --url https://flagger.app --merge ./bin/index.yaml
helm-up:
helm upgrade --install flagger ./charts/flagger --namespace=istio-system --set crd.create=false

View File

@@ -22,7 +22,7 @@ spec:
serviceAccountName: flagger
containers:
- name: flagger
image: weaveworks/flagger:0.18.0
image: weaveworks/flagger:0.18.2
imagePullPolicy: IfNotPresent
ports:
- name: http

View File

@@ -1,7 +1,7 @@
apiVersion: v1
name: flagger
version: 0.18.0
appVersion: 0.18.0
version: 0.18.2
appVersion: 0.18.2
kubeVersion: ">=1.11.0-0"
engine: gotpl
description: Flagger is a Kubernetes operator that automates the promotion of canary deployments using Istio, Linkerd, App Mesh, Gloo or NGINX routing for traffic shifting and Prometheus metrics for canary analysis.

View File

@@ -32,6 +32,10 @@ spec:
app.kubernetes.io/name: {{ template "flagger.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
topologyKey: kubernetes.io/hostname
{{- if .Values.image.pullSecret }}
imagePullSecrets:
- name: {{ .Values.image.pullSecret }}
{{- end }}
containers:
- name: flagger
securityContext:

View File

@@ -2,8 +2,9 @@
image:
repository: weaveworks/flagger
tag: 0.18.0
tag: 0.18.2
pullPolicy: IfNotPresent
pullSecret:
metricsServer: "http://prometheus:9090"

View File

@@ -232,8 +232,7 @@ spec:
mode: ISTIO_MUTUAL
```
Both port `8080` and `9090` will be added to the ClusterIP services but the virtual service
will point to the port specified in `spec.service.port`.
Both port `8080` and `9090` will be added to the ClusterIP services.
### Label selectors

View File

@@ -37,8 +37,10 @@ Deploy the load testing service to generate traffic during the canary analysis:
```bash
helm upgrade -i flagger-loadtester flagger/loadtester \
--namespace=test \
--set meshName=global.appmesh-system \
--set "backends[0]=podinfo.test"
--set meshName=global \
--set "backends[0]=podinfo.test" \
--set "backends[1]=podinfo-canary.test" \
--set "backends[2]=podinfo-primary.test"
```
Create a canary custom resource:
@@ -67,7 +69,7 @@ spec:
# container port
port: 9898
# App Mesh reference
meshName: global.appmesh-system
meshName: global
# App Mesh egress (optional)
backends:
- backend.test

View File

@@ -8,4 +8,4 @@ resources:
- deployment.yaml
images:
- name: weaveworks/flagger
newTag: 0.18.0
newTag: 0.18.2

View File

@@ -8,6 +8,7 @@ spec:
containers:
- name: flagger
args:
- -log-level=info
- -mesh-provider=istio
- -metrics-server=http://prometheus:9090
- -slack-user=flagger

View File

@@ -8,6 +8,7 @@ spec:
containers:
- name: flagger
args:
- -log-level=info
- -mesh-provider=kubernetes
- -metrics-server=http://flagger-prometheus:9090
- -slack-user=flagger

View File

@@ -8,6 +8,7 @@ spec:
containers:
- name: flagger
args:
- -log-level=info
- -mesh-provider=linkerd
- -metrics-server=http://linkerd-prometheus:9090
- -slack-user=flagger

View File

@@ -614,8 +614,8 @@ func (c *Controller) analyseCanary(r *flaggerv1.Canary) bool {
if r.Spec.Provider != "" {
metricsProvider = r.Spec.Provider
// set the metrics provider to Linkerd Prometheus when using NGINX as Linkerd Ingress
if r.Spec.Provider == "nginx" && strings.Contains(c.meshProvider, "linkerd") {
// set the metrics server to Linkerd Prometheus when Linkerd is the default mesh provider
if strings.Contains(c.meshProvider, "linkerd") {
metricsProvider = "linkerd"
}
}

View File

@@ -28,6 +28,10 @@ func (factory Factory) Observer(provider string) Interface {
return &HttpObserver{
client: factory.Client,
}
case provider == "kubernetes":
return &HttpObserver{
client: factory.Client,
}
case provider == "appmesh":
return &EnvoyObserver{
client: factory.Client,

View File

@@ -2,6 +2,7 @@ package router
import (
"fmt"
"github.com/google/go-cmp/cmp"
"github.com/google/go-cmp/cmp/cmpopts"
flaggerv1 "github.com/weaveworks/flagger/pkg/apis/flagger/v1alpha3"
@@ -9,7 +10,6 @@ import (
clientset "github.com/weaveworks/flagger/pkg/client/clientset/versioned"
"go.uber.org/zap"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/apis/meta/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/client-go/kubernetes"
@@ -236,7 +236,7 @@ func (ir *IstioRouter) GetRoutes(canary *flaggerv1.Canary) (
) {
targetName := canary.Spec.TargetRef.Name
vs := &istiov1alpha3.VirtualService{}
vs, err = ir.istioClient.NetworkingV1alpha3().VirtualServices(canary.Namespace).Get(targetName, v1.GetOptions{})
vs, err = ir.istioClient.NetworkingV1alpha3().VirtualServices(canary.Namespace).Get(targetName, metav1.GetOptions{})
if err != nil {
if errors.IsNotFound(err) {
err = fmt.Errorf("VirtualService %s.%s not found", targetName, canary.Namespace)
@@ -283,7 +283,7 @@ func (ir *IstioRouter) SetRoutes(
primaryName := fmt.Sprintf("%s-primary", targetName)
canaryName := fmt.Sprintf("%s-canary", targetName)
vs, err := ir.istioClient.NetworkingV1alpha3().VirtualServices(canary.Namespace).Get(targetName, v1.GetOptions{})
vs, err := ir.istioClient.NetworkingV1alpha3().VirtualServices(canary.Namespace).Get(targetName, metav1.GetOptions{})
if err != nil {
if errors.IsNotFound(err) {
return fmt.Errorf("VirtualService %s.%s not found", targetName, canary.Namespace)
@@ -383,12 +383,5 @@ func makeDestination(canary *flaggerv1.Canary, host string, weight int) istiov1a
Weight: weight,
}
// if port discovery is enabled then we need to explicitly set the destination port
if canary.Spec.Service.PortDiscovery {
dest.Destination.Port = &istiov1alpha3.PortSelector{
Number: uint32(canary.Spec.Service.Port),
}
}
return dest
}

View File

@@ -1,4 +1,4 @@
package version
var VERSION = "0.18.0"
var VERSION = "0.18.2"
var REVISION = "unknown"

View File

@@ -2,7 +2,7 @@
set -o errexit
GLOO_VER="0.17.6"
GLOO_VER="0.18.8"
REPO_ROOT=$(git rev-parse --show-toplevel)
export KUBECONFIG="$(kind get kubeconfig-path --name="kind")"
@@ -12,7 +12,7 @@ helm upgrade -i gloo gloo/gloo --version ${GLOO_VER} \
--namespace gloo-system
kubectl -n gloo-system rollout status deployment/gloo
kubectl -n gloo-system rollout status deployment/gateway-proxy
kubectl -n gloo-system rollout status deployment/gateway-proxy-v2
kubectl -n gloo-system get all
echo '>>> Installing Flagger'

View File

@@ -2,7 +2,7 @@
set -o errexit
ISTIO_VER="1.2.2"
ISTIO_VER="1.2.3"
REPO_ROOT=$(git rev-parse --show-toplevel)
export KUBECONFIG="$(kind get kubeconfig-path --name="kind")"

98
test/e2e-kubernetes-tests.sh Executable file
View File

@@ -0,0 +1,98 @@
#!/usr/bin/env bash
# This script runs e2e tests for Blue/Green initialization, analysis and promotion
# Prerequisites: Kubernetes Kind, Kustomize
set -o errexit
REPO_ROOT=$(git rev-parse --show-toplevel)
export KUBECONFIG="$(kind get kubeconfig-path --name="kind")"
echo '>>> Creating test namespace'
kubectl create namespace test
echo '>>> Installing the load tester'
kubectl apply -k ${REPO_ROOT}/kustomize/tester
kubectl -n test rollout status deployment/flagger-loadtester
echo '>>> Initialising canary'
kubectl apply -f ${REPO_ROOT}/test/e2e-workload.yaml
cat <<EOF | kubectl apply -f -
apiVersion: flagger.app/v1alpha3
kind: Canary
metadata:
name: podinfo
namespace: test
spec:
provider: kubernetes
targetRef:
apiVersion: apps/v1
kind: Deployment
name: podinfo
progressDeadlineSeconds: 60
service:
port: 9898
portDiscovery: true
canaryAnalysis:
interval: 15s
threshold: 10
iterations: 5
metrics:
- name: request-success-rate
threshold: 99
interval: 1m
- name: request-duration
threshold: 500
interval: 30s
webhooks:
- name: load-test
url: http://flagger-loadtester.test/
timeout: 5s
metadata:
type: cmd
cmd: "hey -z 10m -q 10 -c 2 http://podinfo-canary.test:9898/"
logCmdOutput: "true"
EOF
echo '>>> Waiting for primary to be ready'
retries=50
count=0
ok=false
until ${ok}; do
kubectl -n test get canary/podinfo | grep 'Initialized' && ok=true || ok=false
sleep 5
count=$(($count + 1))
if [[ ${count} -eq ${retries} ]]; then
kubectl -n flagger-system logs deployment/flagger
echo "No more retries left"
exit 1
fi
done
echo '✔ Canary initialization test passed'
echo '>>> Triggering canary deployment'
kubectl -n test set image deployment/podinfo podinfod=quay.io/stefanprodan/podinfo:1.7.0
echo '>>> Waiting for canary promotion'
retries=50
count=0
ok=false
until ${ok}; do
kubectl -n test describe deployment/podinfo-primary | grep '1.7.0' && ok=true || ok=false
sleep 10
kubectl -n flagger-system logs deployment/flagger --tail 1
count=$(($count + 1))
if [[ ${count} -eq ${retries} ]]; then
kubectl -n test describe deployment/podinfo
kubectl -n test describe deployment/podinfo-primary
kubectl -n flagger-system logs deployment/flagger
echo "No more retries left"
exit 1
fi
done
echo '✔ Canary promotion test passed'
kubectl -n flagger-system logs deployment/flagger

17
test/e2e-kubernetes.sh Executable file
View File

@@ -0,0 +1,17 @@
#!/usr/bin/env bash
set -o errexit
REPO_ROOT=$(git rev-parse --show-toplevel)
export KUBECONFIG="$(kind get kubeconfig-path --name="kind")"
echo '>>> Loading Flagger image'
kind load docker-image test/flagger:latest
echo '>>> Installing Flagger'
kubectl apply -k ${REPO_ROOT}/kustomize/kubernetes
kubectl -n flagger-system set image deployment/flagger flagger=test/flagger:latest
kubectl -n flagger-system rollout status deployment/flagger
kubectl -n flagger-system rollout status deployment/flagger-prometheus

View File

@@ -4,7 +4,7 @@ set -o errexit
REPO_ROOT=$(git rev-parse --show-toplevel)
export KUBECONFIG="$(kind get kubeconfig-path --name="kind")"
NGINX_VERSION=1.8.2
NGINX_VERSION=1.12.1
echo '>>> Installing NGINX Ingress'
helm upgrade -i nginx-ingress stable/nginx-ingress --version=${NGINX_VERSION} \

View File

@@ -33,6 +33,7 @@ spec:
progressDeadlineSeconds: 60
service:
port: 9898
portDiscovery: true
headers:
request:
add:
@@ -142,6 +143,7 @@ spec:
name: podinfo
progressDeadlineSeconds: 60
service:
portDiscovery: true
port: 9898
canaryAnalysis:
interval: 10s
@@ -202,4 +204,4 @@ echo '✔ A/B testing promotion test passed'
kubectl -n istio-system logs deployment/flagger
echo '✔ All tests passed'
echo '✔ All tests passed'

View File

@@ -20,6 +20,7 @@ spec:
metadata:
annotations:
prometheus.io/scrape: "true"
prometheus.io/port: "9797"
labels:
app: podinfo
spec:
@@ -31,9 +32,13 @@ spec:
- containerPort: 9898
name: http
protocol: TCP
- containerPort: 9797
name: http-prom
protocol: TCP
command:
- ./podinfo
- --port=9898
- --port-metrics=9797
- --level=info
- --random-delay=false
- --random-error=false