Compare commits

...

43 Commits

Author SHA1 Message Date
stefanprodan
bb620ad94a Release v0.14.0 changelog 2019-05-21 13:54:54 +02:00
stefanprodan
7c6d1c48a3 Release v0.14.0 2019-05-21 13:54:15 +02:00
Stefan Prodan
bd5d884c8b Merge pull request #187 from weaveworks/docs-smi
Flagger docs SMI
2019-05-21 13:34:19 +02:00
Stefan Prodan
1c06721c9a Merge pull request #185 from weaveworks/docs-gloo
Add Gloo ingress controller docs
2019-05-21 13:24:19 +02:00
stefanprodan
1e29e2c4eb Fix Grafana Prometheus URL 2019-05-19 10:34:36 +03:00
stefanprodan
88c39d7379 Add Gloo canary deployment docs and diagram 2019-05-17 15:07:43 +03:00
stefanprodan
da43a152ba Add Gloo canary deployment example 2019-05-17 13:15:53 +03:00
stefanprodan
ec63aa9999 Add Gloo custom resources to RBAC 2019-05-17 11:55:15 +03:00
Stefan Prodan
7b9df746ad Merge pull request #179 from yuval-k/gloo2
Add support for Gloo
2019-05-17 11:17:27 +03:00
Yuval Kohavi
52d93ddda2 fix router tests 2019-05-16 13:08:53 -04:00
Yuval Kohavi
eb0331f2bf fix tests 2019-05-16 12:48:03 -04:00
Yuval Kohavi
6a66a87a44 PR updates 2019-05-16 07:28:22 -04:00
stefanprodan
f3cc810948 Update Flagger image tag (fix latency check) 2019-05-15 20:31:25 +03:00
Stefan Prodan
12d84b2e24 Merge pull request #183 from weaveworks/metrics-fix
Fix Istio latency check
2019-05-15 20:24:15 +03:00
stefanprodan
58bde24ece Fix Istio request duration test 2019-05-15 20:10:27 +03:00
stefanprodan
5b3fd0efca Set Istio request duration to milliseconds 2019-05-15 20:01:27 +03:00
stefanprodan
ee6e39afa6 Add SMI tutorial 2019-05-15 17:37:29 +03:00
Yuval Kohavi
677b9d9197 gloo metrics 2019-05-14 17:48:13 -04:00
Yuval Kohavi
786c5aa93a Merge remote-tracking branch 'upstream/master' into gloo2 2019-05-14 10:26:57 -04:00
Stefan Prodan
fd44f1fabf Merge pull request #182 from weaveworks/linkerd-metrics
Fix Linkerd promql queries
2019-05-14 15:23:37 +03:00
Stefan Prodan
b20e0178e1 Merge pull request #180 from weaveworks/smi
Add support for SMI
2019-05-14 13:24:29 +03:00
stefanprodan
5a490abfdd Remove the mesh gateway from docs examples 2019-05-14 13:06:52 +03:00
stefanprodan
674c79da94 Fix Linkerd promql queries
- include all inbound traffic stats
2019-05-14 12:14:47 +03:00
stefanprodan
23ebb4235d merge metrics-v2 into smi 2019-05-14 09:53:42 +03:00
Stefan Prodan
b2500d0ccb Merge pull request #181 from weaveworks/metrics-v2
Refactor the metrics package
2019-05-14 09:49:24 +03:00
stefanprodan
ee500d83ac Add Linkerd observer implementation 2019-05-13 17:51:39 +03:00
stefanprodan
0032c14a78 Refactor metrics
- add observer interface with builtin metrics functions
- add metrics observer factory
- add prometheus client
- implement the observer interface for istio, envoy and nginx
- remove deprecated istio and app mesh metric aliases (istio_requests_total, istio_request_duration_seconds_bucket, envoy_cluster_upstream_rq, envoy_cluster_upstream_rq_time_bucket)
2019-05-13 17:34:08 +03:00
stefanprodan
8fd3e927b8 Merge branch 'master' into smi 2019-05-12 13:58:37 +03:00
stefanprodan
7fe273a21d Fix SMI cluster role binding 2019-05-11 14:08:58 +03:00
stefanprodan
bd817cc520 Run SMI Istio e2e tests 2019-05-11 14:00:53 +03:00
stefanprodan
eb856fda13 Add SMI Istio e2e tests 2019-05-11 13:46:24 +03:00
stefanprodan
d63f05c92e Add SMI group to RBAC 2019-05-11 13:45:32 +03:00
stefanprodan
8fde6bdb8a Add SMI Istio adapter deployment 2019-05-11 13:35:36 +03:00
stefanprodan
8148120421 Enable Istio checks for SMI-Istio adapter 2019-05-11 13:06:06 +03:00
stefanprodan
95b8840bf2 Add SMI traffic split to router 2019-05-11 13:05:19 +03:00
stefanprodan
0e8b1ef20f Generate the SMI TrafficSplit clientset 2019-05-11 12:49:23 +03:00
Yuval Kohavi
0fbf4dcdb2 add canary promotion 2019-05-10 20:16:21 -04:00
Yuval Kohavi
7aca9468ac re-enable helm 2019-05-10 19:48:22 -04:00
Yuval Kohavi
a6c0f08fcc add gloo to circle 2019-05-10 19:44:46 -04:00
Yuval Kohavi
9c1bcc08bb float -> percent 2019-05-10 19:21:08 -04:00
Yuval Kohavi
87e9dfe3d3 e2e test 2019-05-10 19:16:16 -04:00
Yuval Kohavi
d7be66743e Merge remote-tracking branch 'upstream/master' into gloo2 2019-05-10 10:38:14 -04:00
Yuval Kohavi
350efb2bfe gloo upstream group support 2019-04-23 07:47:50 -04:00
74 changed files with 3916 additions and 869 deletions

View File

@@ -9,6 +9,15 @@ jobs:
- run: test/e2e-build.sh
- run: test/e2e-tests.sh
e2e-smi-istio-testing:
machine: true
steps:
- checkout
- run: test/e2e-kind.sh
- run: test/e2e-istio.sh
- run: test/e2e-smi-istio-build.sh
- run: test/e2e-tests.sh canary
e2e-supergloo-testing:
machine: true
steps:
@@ -18,6 +27,15 @@ jobs:
- run: test/e2e-build.sh supergloo:test.supergloo-system
- run: test/e2e-tests.sh canary
e2e-gloo-testing:
machine: true
steps:
- checkout
- run: test/e2e-kind.sh
- run: test/e2e-gloo.sh
- run: test/e2e-gloo-build.sh
- run: test/e2e-gloo-tests.sh
e2e-nginx-testing:
machine: true
steps:
@@ -38,6 +56,13 @@ workflows:
- /gh-pages.*/
- /docs-.*/
- /release-.*/
- e2e-smi-istio-testing:
filters:
branches:
ignore:
- /gh-pages.*/
- /docs-.*/
- /release-.*/
- e2e-supergloo-testing:
filters:
branches:
@@ -46,6 +71,13 @@ workflows:
- /docs-.*/
- /release-.*/
- e2e-nginx-testing:
filters:
branches:
ignore:
- /gh-pages.*/
- /docs-.*/
- /release-.*/
- e2e-gloo-testing:
filters:
branches:
ignore:

View File

@@ -2,6 +2,15 @@
All notable changes to this project are documented in this file.
## 0.14.0 (2019-05-21)
Adds support for Service Mesh Interface and [Gloo](https://docs.flagger.app/usage/gloo-progressive-delivery) ingress controller
#### Features
- Add support for SMI (Istio weighted traffic) [#180](https://github.com/weaveworks/flagger/pull/180)
- Add support for Gloo ingress controller (weighted traffic) [#179](https://github.com/weaveworks/flagger/pull/179)
## 0.13.2 (2019-04-11)
Fixes for Jenkins X deployments (prevent the jx GC from removing the primary instance)

View File

@@ -24,6 +24,18 @@ run-nginx:
-slack-url=https://hooks.slack.com/services/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 \
-slack-channel="devops-alerts"
run-smi:
go run cmd/flagger/* -kubeconfig=$$HOME/.kube/config -log-level=info -mesh-provider=smi:istio -namespace=smi \
-metrics-server=https://prometheus.istio.weavedx.com \
-slack-url=https://hooks.slack.com/services/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 \
-slack-channel="devops-alerts"
run-gloo:
go run cmd/flagger/* -kubeconfig=$$HOME/.kube/config -log-level=info -mesh-provider=gloo -namespace=gloo \
-metrics-server=https://prometheus.istio.weavedx.com \
-slack-url=https://hooks.slack.com/services/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 \
-slack-channel="devops-alerts"
build:
docker build -t weaveworks/flagger:$(TAG) . -f Dockerfile

View File

@@ -40,6 +40,7 @@ Flagger documentation can be found at [docs.flagger.app](https://docs.flagger.ap
* [Istio A/B testing](https://docs.flagger.app/usage/ab-testing)
* [App Mesh canary deployments](https://docs.flagger.app/usage/appmesh-progressive-delivery)
* [NGINX ingress controller canary deployments](https://docs.flagger.app/usage/nginx-progressive-delivery)
* [Gloo Canary Deployments](https://docs.flagger.app/usage/gloo-progressive-delivery.md)
* [Monitoring](https://docs.flagger.app/usage/monitoring)
* [Alerting](https://docs.flagger.app/usage/alerting)
* Tutorials
@@ -82,7 +83,6 @@ spec:
# Istio gateways (optional)
gateways:
- public-gateway.istio-system.svc.cluster.local
- mesh
# Istio virtual service host names (optional)
hosts:
- podinfo.example.com
@@ -93,17 +93,12 @@ spec:
# HTTP rewrite (optional)
rewrite:
uri: /
# Envoy timeout and retry policy (optional)
headers:
request:
add:
x-envoy-upstream-rq-timeout-ms: "15000"
x-envoy-max-retries: "10"
x-envoy-retry-on: "gateway-error,connect-failure,refused-stream"
# cross-origin resource sharing policy (optional)
corsPolicy:
allowOrigin:
- example.com
# request timeout (optional)
timeout: 5s
# promote the canary without analysing it (default false)
skipAnalysis: false
# define the canary analysis timing and KPIs

View File

@@ -59,6 +59,26 @@ rules:
- virtualservices
- virtualservices/status
verbs: ["*"]
- apiGroups:
- split.smi-spec.io
resources:
- trafficsplits
verbs: ["*"]
- apiGroups:
- gloo.solo.io
resources:
- settings
- upstreams
- upstreamgroups
- proxies
- virtualservices
verbs: ["*"]
- apiGroups:
- gateway.solo.io
resources:
- virtualservices
- gateways
verbs: ["*"]
- nonResourceURLs:
- /version
verbs:

View File

@@ -22,7 +22,7 @@ spec:
serviceAccountName: flagger
containers:
- name: flagger
image: weaveworks/flagger:0.13.2
image: weaveworks/flagger:0.14.0
imagePullPolicy: IfNotPresent
ports:
- name: http

View File

@@ -0,0 +1,36 @@
apiVersion: flagger.app/v1alpha3
kind: Canary
metadata:
name: podinfo
namespace: test
spec:
targetRef:
apiVersion: apps/v1
kind: Deployment
name: podinfo
progressDeadlineSeconds: 60
autoscalerRef:
apiVersion: autoscaling/v2beta1
kind: HorizontalPodAutoscaler
name: podinfo
service:
port: 9898
canaryAnalysis:
interval: 10s
threshold: 10
maxWeight: 50
stepWeight: 5
metrics:
- name: request-success-rate
threshold: 99
interval: 1m
- name: request-duration
threshold: 500
interval: 30s
webhooks:
- name: load-test
url: http://flagger-loadtester.test/
timeout: 5s
metadata:
type: cmd
cmd: "hey -z 1m -q 10 -c 2 http://gloo.example.com/"

View File

@@ -0,0 +1,67 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: podinfo
namespace: test
labels:
app: podinfo
spec:
minReadySeconds: 5
revisionHistoryLimit: 5
progressDeadlineSeconds: 60
strategy:
rollingUpdate:
maxUnavailable: 0
type: RollingUpdate
selector:
matchLabels:
app: podinfo
template:
metadata:
annotations:
prometheus.io/scrape: "true"
labels:
app: podinfo
spec:
containers:
- name: podinfod
image: quay.io/stefanprodan/podinfo:1.4.0
imagePullPolicy: IfNotPresent
ports:
- containerPort: 9898
name: http
protocol: TCP
command:
- ./podinfo
- --port=9898
- --level=info
- --random-delay=false
- --random-error=false
env:
- name: PODINFO_UI_COLOR
value: blue
livenessProbe:
exec:
command:
- podcli
- check
- http
- localhost:9898/healthz
initialDelaySeconds: 5
timeoutSeconds: 5
readinessProbe:
exec:
command:
- podcli
- check
- http
- localhost:9898/readyz
initialDelaySeconds: 5
timeoutSeconds: 5
resources:
limits:
cpu: 2000m
memory: 512Mi
requests:
cpu: 100m
memory: 64Mi

19
artifacts/gloo/hpa.yaml Normal file
View File

@@ -0,0 +1,19 @@
apiVersion: autoscaling/v2beta1
kind: HorizontalPodAutoscaler
metadata:
name: podinfo
namespace: test
spec:
scaleTargetRef:
apiVersion: apps/v1
kind: Deployment
name: podinfo
minReplicas: 1
maxReplicas: 4
metrics:
- type: Resource
resource:
name: cpu
# scale up if usage is above
# 99% of the requested CPU (100m)
targetAverageUtilization: 99

View File

@@ -0,0 +1,17 @@
apiVersion: gateway.solo.io/v1
kind: VirtualService
metadata:
name: podinfo
namespace: test
spec:
virtualHost:
domains:
- '*'
name: podinfo.default
routes:
- matcher:
prefix: /
routeAction:
upstreamGroup:
name: podinfo
namespace: gloo

View File

@@ -0,0 +1,131 @@
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: trafficsplits.split.smi-spec.io
spec:
additionalPrinterColumns:
- JSONPath: .spec.service
description: The service
name: Service
type: string
group: split.smi-spec.io
names:
kind: TrafficSplit
listKind: TrafficSplitList
plural: trafficsplits
singular: trafficsplit
scope: Namespaced
subresources:
status: {}
version: v1alpha1
versions:
- name: v1alpha1
served: true
storage: true
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: smi-adapter-istio
namespace: istio-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: smi-adapter-istio
rules:
- apiGroups:
- ""
resources:
- pods
- services
- endpoints
- persistentvolumeclaims
- events
- configmaps
- secrets
verbs:
- '*'
- apiGroups:
- apps
resources:
- deployments
- daemonsets
- replicasets
- statefulsets
verbs:
- '*'
- apiGroups:
- monitoring.coreos.com
resources:
- servicemonitors
verbs:
- get
- create
- apiGroups:
- apps
resourceNames:
- smi-adapter-istio
resources:
- deployments/finalizers
verbs:
- update
- apiGroups:
- split.smi-spec.io
resources:
- '*'
verbs:
- '*'
- apiGroups:
- networking.istio.io
resources:
- '*'
verbs:
- '*'
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: smi-adapter-istio
subjects:
- kind: ServiceAccount
name: smi-adapter-istio
namespace: istio-system
roleRef:
kind: ClusterRole
name: smi-adapter-istio
apiGroup: rbac.authorization.k8s.io
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: smi-adapter-istio
namespace: istio-system
spec:
replicas: 1
selector:
matchLabels:
name: smi-adapter-istio
template:
metadata:
labels:
name: smi-adapter-istio
annotations:
sidecar.istio.io/inject: "false"
spec:
serviceAccountName: smi-adapter-istio
containers:
- name: smi-adapter-istio
image: docker.io/stefanprodan/smi-adapter-istio:0.0.2-beta.1
command:
- smi-adapter-istio
imagePullPolicy: Always
env:
- name: WATCH_NAMESPACE
value: ""
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: OPERATOR_NAME
value: "smi-adapter-istio"

View File

@@ -1,7 +1,7 @@
apiVersion: v1
name: flagger
version: 0.13.2
appVersion: 0.13.2
version: 0.14.0
appVersion: 0.14.0
kubeVersion: ">=1.11.0-0"
engine: gotpl
description: Flagger is a Kubernetes operator that automates the promotion of canary deployments using Istio, App Mesh or NGINX routing for traffic shifting and Prometheus metrics for canary analysis.

View File

@@ -55,6 +55,26 @@ rules:
- virtualservices
- virtualservices/status
verbs: ["*"]
- apiGroups:
- split.smi-spec.io
resources:
- trafficsplits
verbs: ["*"]
- apiGroups:
- gloo.solo.io
resources:
- settings
- upstreams
- upstreamgroups
- proxies
- virtualservices
verbs: ["*"]
- apiGroups:
- gateway.solo.io
resources:
- virtualservices
- gateways
verbs: ["*"]
- nonResourceURLs:
- /version
verbs:

View File

@@ -2,7 +2,7 @@
image:
repository: weaveworks/flagger
tag: 0.13.2
tag: 0.14.0
pullPolicy: IfNotPresent
metricsServer: "http://prometheus:9090"

View File

@@ -45,8 +45,8 @@ var (
func init() {
flag.StringVar(&kubeconfig, "kubeconfig", "", "Path to a kubeconfig. Only required if out-of-cluster.")
flag.StringVar(&masterURL, "master", "", "The address of the Kubernetes API server. Overrides any value in kubeconfig. Only required if out-of-cluster.")
flag.StringVar(&metricsServer, "metrics-server", "http://prometheus:9090", "Prometheus URL")
flag.DurationVar(&controlLoopInterval, "control-loop-interval", 10*time.Second, "Kubernetes API sync interval")
flag.StringVar(&metricsServer, "metrics-server", "http://prometheus:9090", "Prometheus URL.")
flag.DurationVar(&controlLoopInterval, "control-loop-interval", 10*time.Second, "Kubernetes API sync interval.")
flag.StringVar(&logLevel, "log-level", "debug", "Log level can be: debug, info, warning, error.")
flag.StringVar(&port, "port", "8080", "Port to listen on.")
flag.StringVar(&slackURL, "slack-url", "", "Slack hook URL.")
@@ -55,9 +55,9 @@ func init() {
flag.IntVar(&threadiness, "threadiness", 2, "Worker concurrency.")
flag.BoolVar(&zapReplaceGlobals, "zap-replace-globals", false, "Whether to change the logging level of the global zap logger.")
flag.StringVar(&zapEncoding, "zap-encoding", "json", "Zap logger encoding.")
flag.StringVar(&namespace, "namespace", "", "Namespace that flagger would watch canary object")
flag.StringVar(&meshProvider, "mesh-provider", "istio", "Service mesh provider, can be istio or appmesh")
flag.StringVar(&selectorLabels, "selector-labels", "app,name,app.kubernetes.io/name", "List of pod labels that Flagger uses to create pod selectors")
flag.StringVar(&namespace, "namespace", "", "Namespace that flagger would watch canary object.")
flag.StringVar(&meshProvider, "mesh-provider", "istio", "Service mesh provider, can be istio, appmesh, supergloo, nginx or smi.")
flag.StringVar(&selectorLabels, "selector-labels", "app,name,app.kubernetes.io/name", "List of pod labels that Flagger uses to create pod selectors.")
}
func main() {
@@ -87,12 +87,12 @@ func main() {
meshClient, err := clientset.NewForConfig(cfg)
if err != nil {
logger.Fatalf("Error building istio clientset: %v", err)
logger.Fatalf("Error building mesh clientset: %v", err)
}
flaggerClient, err := clientset.NewForConfig(cfg)
if err != nil {
logger.Fatalf("Error building example clientset: %s", err.Error())
logger.Fatalf("Error building flagger clientset: %s", err.Error())
}
flaggerInformerFactory := informers.NewSharedInformerFactoryWithOptions(flaggerClient, time.Second*30, informers.WithNamespace(namespace))
@@ -116,7 +116,12 @@ func main() {
logger.Infof("Watching namespace %s", namespace)
}
ok, err := metrics.CheckMetricsServer(metricsServer)
observerFactory, err := metrics.NewFactory(metricsServer, meshProvider, 5*time.Second)
if err != nil {
logger.Fatalf("Error building prometheus client: %s", err.Error())
}
ok, err := observerFactory.Client.IsOnline()
if ok {
logger.Infof("Connected to metrics server %s", metricsServer)
} else {
@@ -148,6 +153,7 @@ func main() {
logger,
slack,
routerFactory,
observerFactory,
meshProvider,
version.VERSION,
labels,

Binary file not shown.

After

Width:  |  Height:  |  Size: 41 KiB

View File

@@ -16,10 +16,12 @@
* [Istio A/B Testing](usage/ab-testing.md)
* [App Mesh Canary Deployments](usage/appmesh-progressive-delivery.md)
* [NGINX Canary Deployments](usage/nginx-progressive-delivery.md)
* [Gloo Canary Deployments](usage/gloo-progressive-delivery.md)
* [Monitoring](usage/monitoring.md)
* [Alerting](usage/alerting.md)
## Tutorials
* [SMI Istio Canary Deployments](tutorials/flagger-smi-istio.md)
* [Canaries with Helm charts and GitOps](tutorials/canary-helm-gitops.md)
* [Zero downtime deployments](tutorials/zero-downtime-deployments.md)

View File

@@ -38,7 +38,6 @@ spec:
# Istio gateways (optional)
gateways:
- public-gateway.istio-system.svc.cluster.local
- mesh
# Istio virtual service host names (optional)
hosts:
- podinfo.example.com

View File

@@ -163,7 +163,7 @@ Deploy Grafana in the _**appmesh-system**_ namespace:
```bash
helm upgrade -i flagger-grafana flagger/grafana \
--namespace=appmesh-system \
--set url=http://prometheus.appmesh-system:9090
--set url=http://flagger-prometheus.appmesh-system:9090
```
You can access Grafana using port forwarding:

View File

@@ -0,0 +1,332 @@
# Flagger SMI
This guide shows you how to use the SMI Istio adapter and Flagger to automate canary deployments.
### Prerequisites
Flagger requires a Kubernetes cluster **v1.11** or newer with the following admission controllers enabled:
* MutatingAdmissionWebhook
* ValidatingAdmissionWebhook
Flagger depends on [Istio](https://istio.io/docs/setup/kubernetes/quick-start/) **v1.0.3** or newer
with traffic management, telemetry and Prometheus enabled.
A minimal Istio installation should contain the following services:
* istio-pilot
* istio-ingressgateway
* istio-sidecar-injector
* istio-telemetry
* prometheus
### Install Istio and the SMI adapter
Add Istio Helm repository:
```bash
helm repo add istio.io https://storage.googleapis.com/istio-release/releases/1.1.5/charts
```
Install Istio CRDs:
```bash
helm upgrade -i istio-init istio.io/istio-init --wait --namespace istio-system
kubectl -n istio-system wait --for=condition=complete job/istio-init-crd-11
```
Install Istio:
```bash
helm upgrade -i istio istio.io/istio --wait --namespace istio-system
```
Create a generic Istio gateway to expose services outside the mesh on HTTP:
```yaml
apiVersion: networking.istio.io/v1alpha3
kind: Gateway
metadata:
name: public-gateway
namespace: istio-system
spec:
selector:
istio: ingressgateway
servers:
- port:
number: 80
name: http
protocol: HTTP
hosts:
- "*"
```
Save the above resource as public-gateway.yaml and then apply it:
```bash
kubectl apply -f ./public-gateway.yaml
```
Find the Gateway load balancer IP and add a DNS record for it:
```bash
kubectl -n istio-system get svc/istio-ingressgateway -ojson | jq -r .status.loadBalancer.ingress[0].ip
```
Install the SMI adapter:
```bash
REPO=https://raw.githubusercontent.com/weaveworks/flagger/master
kubectl apply -f ${REPO}/artifacts/smi/istio-adapter.yaml
```
### Install Flagger and Grafana
Add Flagger Helm repository:
```bash
helm repo add flagger https://flagger.app
```
Deploy Flagger in the _**istio-system**_ namespace:
```bash
helm upgrade -i flagger flagger/flagger \
--namespace=istio-system \
--set image.tag=master-12d84b2 \
--set meshProvider=smi:istio
```
Flagger comes with a Grafana dashboard made for monitoring the canary deployments.
Deploy Grafana in the _**istio-system**_ namespace:
```bash
helm upgrade -i flagger-grafana flagger/grafana \
--namespace=istio-system \
--set url=http://prometheus.istio-system:9090
```
You can access Grafana using port forwarding:
```bash
kubectl -n istio-system port-forward svc/flagger-grafana 3000:80
```
### Workloads bootstrap
Create a test namespace with Istio sidecar injection enabled:
```bash
export REPO=https://raw.githubusercontent.com/weaveworks/flagger/master
kubectl apply -f ${REPO}/artifacts/namespaces/test.yaml
```
Create a deployment and a horizontal pod autoscaler:
```bash
kubectl apply -f ${REPO}/artifacts/canaries/deployment.yaml
kubectl apply -f ${REPO}/artifacts/canaries/hpa.yaml
```
Deploy the load testing service to generate traffic during the canary analysis:
```bash
kubectl -n test apply -f ${REPO}/artifacts/loadtester/deployment.yaml
kubectl -n test apply -f ${REPO}/artifacts/loadtester/service.yaml
```
Create a canary custom resource (replace example.com with your own domain):
```yaml
apiVersion: flagger.app/v1alpha3
kind: Canary
metadata:
name: podinfo
namespace: test
spec:
# deployment reference
targetRef:
apiVersion: apps/v1
kind: Deployment
name: podinfo
# the maximum time in seconds for the canary deployment
# to make progress before it is rollback (default 600s)
progressDeadlineSeconds: 60
# HPA reference (optional)
autoscalerRef:
apiVersion: autoscaling/v2beta1
kind: HorizontalPodAutoscaler
name: podinfo
service:
# container port
port: 9898
# Istio gateways (optional)
gateways:
- public-gateway.istio-system.svc.cluster.local
# Istio virtual service host names (optional)
hosts:
- app.example.com
canaryAnalysis:
# schedule interval (default 60s)
interval: 10s
# max number of failed metric checks before rollback
threshold: 5
# max traffic percentage routed to canary
# percentage (0-100)
maxWeight: 50
# canary increment step
# percentage (0-100)
stepWeight: 10
metrics:
- name: request-success-rate
# minimum req success rate (non 5xx responses)
# percentage (0-100)
threshold: 99
interval: 1m
- name: request-duration
# maximum req duration P99
# milliseconds
threshold: 500
interval: 30s
# generate traffic during analysis
webhooks:
- name: load-test
url: http://flagger-loadtester.test/
timeout: 5s
metadata:
cmd: "hey -z 1m -q 10 -c 2 http://podinfo.test:9898/"
```
Save the above resource as podinfo-canary.yaml and then apply it:
```bash
kubectl apply -f ./podinfo-canary.yaml
```
After a couple of seconds Flagger will create the canary objects:
```bash
# applied
deployment.apps/podinfo
horizontalpodautoscaler.autoscaling/podinfo
canary.flagger.app/podinfo
# generated
deployment.apps/podinfo-primary
horizontalpodautoscaler.autoscaling/podinfo-primary
service/podinfo
service/podinfo-canary
service/podinfo-primary
trafficsplits.split.smi-spec.io/podinfo
```
### Automated canary promotion
Flagger implements a control loop that gradually shifts traffic to the canary while measuring key performance indicators
like HTTP requests success rate, requests average duration and pod health.
Based on analysis of the KPIs a canary is promoted or aborted, and the analysis result is published to Slack.
![Flagger Canary Stages](https://raw.githubusercontent.com/weaveworks/flagger/master/docs/diagrams/flagger-canary-steps.png)
Trigger a canary deployment by updating the container image:
```bash
kubectl -n test set image deployment/podinfo \
podinfod=quay.io/stefanprodan/podinfo:1.4.1
```
Flagger detects that the deployment revision changed and starts a new rollout:
```text
kubectl -n istio-system logs deployment/flagger -f | jq .msg
New revision detected podinfo.test
Scaling up podinfo.test
Waiting for podinfo.test rollout to finish: 0 of 1 updated replicas are available
Advance podinfo.test canary weight 5
Advance podinfo.test canary weight 10
Advance podinfo.test canary weight 15
Advance podinfo.test canary weight 20
Advance podinfo.test canary weight 25
Advance podinfo.test canary weight 30
Advance podinfo.test canary weight 35
Advance podinfo.test canary weight 40
Advance podinfo.test canary weight 45
Advance podinfo.test canary weight 50
Copying podinfo.test template spec to podinfo-primary.test
Waiting for podinfo-primary.test rollout to finish: 1 of 2 updated replicas are available
Promotion completed! Scaling down podinfo.test
```
**Note** that if you apply new changes to the deployment during the canary analysis, Flagger will restart the analysis.
During the analysis the canarys progress can be monitored with Grafana. The Istio dashboard URL is
http://localhost:3000/d/flagger-istio/istio-canary?refresh=10s&orgId=1&var-namespace=test&var-primary=podinfo-primary&var-canary=podinfo
You can monitor all canaries with:
```bash
watch kubectl get canaries --all-namespaces
NAMESPACE NAME STATUS WEIGHT LASTTRANSITIONTIME
test podinfo Progressing 15 2019-05-16T14:05:07Z
prod frontend Succeeded 0 2019-05-15T16:15:07Z
prod backend Failed 0 2019-05-14T17:05:07Z
```
### Automated rollback
During the canary analysis you can generate HTTP 500 errors and high latency to test if Flagger pauses the rollout.
Create a tester pod and exec into it:
```bash
kubectl -n test run tester \
--image=quay.io/stefanprodan/podinfo:1.2.1 \
-- ./podinfo --port=9898
kubectl -n test exec -it tester-xx-xx sh
```
Generate HTTP 500 errors:
```bash
watch curl http://podinfo-canary:9898/status/500
```
Generate latency:
```bash
watch curl http://podinfo-canary:9898/delay/1
```
When the number of failed checks reaches the canary analysis threshold, the traffic is routed back to the primary,
the canary is scaled to zero and the rollout is marked as failed.
```text
kubectl -n test describe canary/podinfo
Status:
Canary Weight: 0
Failed Checks: 10
Phase: Failed
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal Synced 3m flagger Starting canary deployment for podinfo.test
Normal Synced 3m flagger Advance podinfo.test canary weight 5
Normal Synced 3m flagger Advance podinfo.test canary weight 10
Normal Synced 3m flagger Advance podinfo.test canary weight 15
Normal Synced 3m flagger Halt podinfo.test advancement success rate 69.17% < 99%
Normal Synced 2m flagger Halt podinfo.test advancement success rate 61.39% < 99%
Normal Synced 2m flagger Halt podinfo.test advancement success rate 55.06% < 99%
Normal Synced 2m flagger Halt podinfo.test advancement success rate 47.00% < 99%
Normal Synced 2m flagger (combined from similar events): Halt podinfo.test advancement success rate 38.08% < 99%
Warning Synced 1m flagger Rolling back podinfo.test failed checks threshold reached 10
Warning Synced 1m flagger Canary failed! Scaling down podinfo.test
```

View File

@@ -60,7 +60,6 @@ spec:
# Istio gateways (optional)
gateways:
- public-gateway.istio-system.svc.cluster.local
- mesh
# Istio virtual service host names (optional)
hosts:
- app.example.com

View File

@@ -0,0 +1,366 @@
# NGNIX Ingress Controller Canary Deployments
This guide shows you how to use the [Gloo](https://gloo.solo.io/) ingress controller and Flagger to automate canary deployments.
![Flagger Gloo Ingress Controller](https://raw.githubusercontent.com/weaveworks/flagger/master/docs/diagrams/flagger-gloo-overview.png)
### Prerequisites
Flagger requires a Kubernetes cluster **v1.11** or newer and Gloo ingress **0.13.29** or newer.
Install Gloo with Helm:
```bash
helm repo add gloo https://storage.googleapis.com/solo-public-helm
helm upgrade -i gloo gloo/gloo \
--namespace gloo-system
```
Install Flagger and the Prometheus add-on in the same namespace as Gloo:
```bash
helm repo add flagger https://flagger.app
helm upgrade -i flagger flagger/flagger \
--namespace gloo-system \
--set prometheus.install=true \
--set meshProvider=gloo
```
Optionally you can enable Slack notifications:
```bash
helm upgrade -i flagger flagger/flagger \
--reuse-values \
--namespace gloo-system \
--set slack.url=https://hooks.slack.com/services/YOUR/SLACK/WEBHOOK \
--set slack.channel=general \
--set slack.user=flagger
```
### Bootstrap
Flagger takes a Kubernetes deployment and optionally a horizontal pod autoscaler (HPA),
then creates a series of objects (Kubernetes deployments, ClusterIP services and Gloo upstream groups).
These objects expose the application outside the cluster and drive the canary analysis and promotion.
Create a test namespace:
```bash
kubectl create ns test
```
Create a deployment and a horizontal pod autoscaler:
```bash
kubectl apply -f ${REPO}/artifacts/gloo/deployment.yaml
kubectl apply -f ${REPO}/artifacts/gloo/hpa.yaml
```
Deploy the load testing service to generate traffic during the canary analysis:
```bash
helm upgrade -i flagger-loadtester flagger/loadtester \
--namespace=test
```
Create an virtual service definition that references an upstream group that will be generated by Flagger
(replace `app.example.com` with your own domain):
```yaml
apiVersion: gateway.solo.io/v1
kind: VirtualService
metadata:
name: podinfo
namespace: test
spec:
virtualHost:
domains:
- 'app.example.com'
name: podinfo.test
routes:
- matcher:
prefix: /
routeAction:
upstreamGroup:
name: podinfo
namespace: test
```
Save the above resource as podinfo-virtualservice.yaml and then apply it:
```bash
kubectl apply -f ./podinfo-virtualservice.yaml
```
Create a canary custom resource (replace `app.example.com` with your own domain):
```yaml
apiVersion: flagger.app/v1alpha3
kind: Canary
metadata:
name: podinfo
namespace: test
spec:
# deployment reference
targetRef:
apiVersion: apps/v1
kind: Deployment
name: podinfo
# HPA reference (optional)
autoscalerRef:
apiVersion: autoscaling/v2beta1
kind: HorizontalPodAutoscaler
name: podinfo
# the maximum time in seconds for the canary deployment
# to make progress before it is rollback (default 600s)
progressDeadlineSeconds: 60
service:
# container port
port: 9898
canaryAnalysis:
# schedule interval (default 60s)
interval: 10s
# max number of failed metric checks before rollback
threshold: 5
# max traffic percentage routed to canary
# percentage (0-100)
maxWeight: 50
# canary increment step
# percentage (0-100)
stepWeight: 5
# Gloo Prometheus checks
metrics:
- name: request-success-rate
# minimum req success rate (non 5xx responses)
# percentage (0-100)
threshold: 99
interval: 1m
- name: request-duration
# maximum req duration P99
# milliseconds
threshold: 500
interval: 30s
# load testing (optional)
webhooks:
- name: load-test
url: http://flagger-loadtester.test/
timeout: 5s
metadata:
type: cmd
cmd: "hey -z 1m -q 10 -c 2 http://app.example.com/"
```
Save the above resource as podinfo-canary.yaml and then apply it:
```bash
kubectl apply -f ./podinfo-canary.yaml
```
After a couple of seconds Flagger will create the canary objects:
```bash
# applied
deployment.apps/podinfo
horizontalpodautoscaler.autoscaling/podinfo
virtualservices.gateway.solo.io/podinfo
canary.flagger.app/podinfo
# generated
deployment.apps/podinfo-primary
horizontalpodautoscaler.autoscaling/podinfo-primary
service/podinfo
service/podinfo-canary
service/podinfo-primary
upstreamgroups.gloo.solo.io/podinfo
```
When the bootstrap finishes Flagger will set the canary status to initialized:
```bash
kubectl -n test get canary podinfo
NAME STATUS WEIGHT LASTTRANSITIONTIME
podinfo Initialized 0 2019-05-17T08:09:51Z
```
### Automated canary promotion
Flagger implements a control loop that gradually shifts traffic to the canary while measuring key performance indicators
like HTTP requests success rate, requests average duration and pod health.
Based on analysis of the KPIs a canary is promoted or aborted, and the analysis result is published to Slack.
![Flagger Canary Stages](https://raw.githubusercontent.com/weaveworks/flagger/master/docs/diagrams/flagger-canary-steps.png)
Trigger a canary deployment by updating the container image:
```bash
kubectl -n test set image deployment/podinfo \
podinfod=quay.io/stefanprodan/podinfo:1.4.1
```
Flagger detects that the deployment revision changed and starts a new rollout:
```text
kubectl -n test describe canary/podinfo
Status:
Canary Weight: 0
Failed Checks: 0
Phase: Succeeded
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal Synced 3m flagger New revision detected podinfo.test
Normal Synced 3m flagger Scaling up podinfo.test
Warning Synced 3m flagger Waiting for podinfo.test rollout to finish: 0 of 1 updated replicas are available
Normal Synced 3m flagger Advance podinfo.test canary weight 5
Normal Synced 3m flagger Advance podinfo.test canary weight 10
Normal Synced 3m flagger Advance podinfo.test canary weight 15
Normal Synced 2m flagger Advance podinfo.test canary weight 20
Normal Synced 2m flagger Advance podinfo.test canary weight 25
Normal Synced 1m flagger Advance podinfo.test canary weight 30
Normal Synced 1m flagger Advance podinfo.test canary weight 35
Normal Synced 55s flagger Advance podinfo.test canary weight 40
Normal Synced 45s flagger Advance podinfo.test canary weight 45
Normal Synced 35s flagger Advance podinfo.test canary weight 50
Normal Synced 25s flagger Copying podinfo.test template spec to podinfo-primary.test
Warning Synced 15s flagger Waiting for podinfo-primary.test rollout to finish: 1 of 2 updated replicas are available
Normal Synced 5s flagger Promotion completed! Scaling down podinfo.test
```
**Note** that if you apply new changes to the deployment during the canary analysis, Flagger will restart the analysis.
You can monitor all canaries with:
```bash
watch kubectl get canaries --all-namespaces
NAMESPACE NAME STATUS WEIGHT LASTTRANSITIONTIME
test podinfo Progressing 15 2019-05-17T14:05:07Z
prod frontend Succeeded 0 2019-05-17T16:15:07Z
prod backend Failed 0 2019-05-17T17:05:07Z
```
### Automated rollback
During the canary analysis you can generate HTTP 500 errors and high latency to test if Flagger pauses and rolls back the faulted version.
Trigger another canary deployment:
```bash
kubectl -n test set image deployment/podinfo \
podinfod=quay.io/stefanprodan/podinfo:1.4.2
```
Generate HTTP 500 errors:
```bash
watch curl http://app.example.com/status/500
```
Generate high latency:
```bash
watch curl http://app.example.com/delay/2
```
When the number of failed checks reaches the canary analysis threshold, the traffic is routed back to the primary,
the canary is scaled to zero and the rollout is marked as failed.
```text
kubectl -n test describe canary/podinfo
Status:
Canary Weight: 0
Failed Checks: 10
Phase: Failed
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal Synced 3m flagger Starting canary deployment for podinfo.test
Normal Synced 3m flagger Advance podinfo.test canary weight 5
Normal Synced 3m flagger Advance podinfo.test canary weight 10
Normal Synced 3m flagger Advance podinfo.test canary weight 15
Normal Synced 3m flagger Halt podinfo.test advancement success rate 69.17% < 99%
Normal Synced 2m flagger Halt podinfo.test advancement success rate 61.39% < 99%
Normal Synced 2m flagger Halt podinfo.test advancement success rate 55.06% < 99%
Normal Synced 2m flagger Halt podinfo.test advancement success rate 47.00% < 99%
Normal Synced 2m flagger (combined from similar events): Halt podinfo.test advancement success rate 38.08% < 99%
Warning Synced 1m flagger Rolling back podinfo.test failed checks threshold reached 10
Warning Synced 1m flagger Canary failed! Scaling down podinfo.test
```
### Custom metrics
The canary analysis can be extended with Prometheus queries.
The demo app is instrumented with Prometheus so you can create a custom check that will use the HTTP request duration
histogram to validate the canary.
Edit the canary analysis and add the following metric:
```yaml
canaryAnalysis:
metrics:
- name: "404s percentage"
threshold: 5
query: |
100 - sum(
rate(
http_request_duration_seconds_count{
kubernetes_namespace="test",
kubernetes_pod_name=~"podinfo-[0-9a-zA-Z]+(-[0-9a-zA-Z]+)"
status!="404"
}[1m]
)
)
/
sum(
rate(
http_request_duration_seconds_count{
kubernetes_namespace="test",
kubernetes_pod_name=~"podinfo-[0-9a-zA-Z]+(-[0-9a-zA-Z]+)"
}[1m]
)
) * 100
```
The above configuration validates the canary by checking if the HTTP 404 req/sec percentage is below 5
percent of the total traffic. If the 404s rate reaches the 5% threshold, then the canary fails.
Trigger a canary deployment by updating the container image:
```bash
kubectl -n test set image deployment/podinfo \
podinfod=quay.io/stefanprodan/podinfo:1.4.3
```
Generate 404s:
```bash
watch curl http://app.example.com/status/400
```
Watch Flagger logs:
```
kubectl -n gloo-system logs deployment/flagger -f | jq .msg
Starting canary deployment for podinfo.test
Advance podinfo.test canary weight 5
Advance podinfo.test canary weight 10
Advance podinfo.test canary weight 15
Halt podinfo.test advancement 404s percentage 6.20 > 5
Halt podinfo.test advancement 404s percentage 6.45 > 5
Halt podinfo.test advancement 404s percentage 7.60 > 5
Halt podinfo.test advancement 404s percentage 8.69 > 5
Halt podinfo.test advancement 404s percentage 9.70 > 5
Rolling back podinfo.test failed checks threshold reached 5
Canary failed! Scaling down podinfo.test
```
If you have Slack configured, Flagger will send a notification with the reason why the canary failed.

View File

@@ -54,7 +54,6 @@ spec:
# Istio gateways (optional)
gateways:
- public-gateway.istio-system.svc.cluster.local
- mesh
# Istio virtual service host names (optional)
hosts:
- app.example.com

View File

@@ -23,5 +23,5 @@ CODEGEN_PKG=${CODEGEN_PKG:-$(cd ${SCRIPT_ROOT}; ls -d -1 ./vendor/k8s.io/code-ge
${CODEGEN_PKG}/generate-groups.sh "deepcopy,client,informer,lister" \
github.com/weaveworks/flagger/pkg/client github.com/weaveworks/flagger/pkg/apis \
"appmesh:v1beta1 istio:v1alpha3 flagger:v1alpha3" \
"appmesh:v1beta1 istio:v1alpha3 flagger:v1alpha3 smi:v1alpha1" \
--go-header-file ${SCRIPT_ROOT}/hack/boilerplate.go.txt

5
pkg/apis/smi/register.go Normal file
View File

@@ -0,0 +1,5 @@
package smi
const (
GroupName = "split.smi-spec.io"
)

View File

@@ -0,0 +1,4 @@
// +k8s:deepcopy-gen=package
// +groupName=split.smi-spec.io
package v1alpha1

View File

@@ -0,0 +1,48 @@
package v1alpha1
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
ts "github.com/weaveworks/flagger/pkg/apis/smi"
)
// SchemeGroupVersion is the identifier for the API which includes
// the name of the group and the version of the API
var SchemeGroupVersion = schema.GroupVersion{
Group: ts.GroupName,
Version: "v1alpha1",
}
// Kind takes an unqualified kind and returns back a Group qualified GroupKind
func Kind(kind string) schema.GroupKind {
return SchemeGroupVersion.WithKind(kind).GroupKind()
}
// Resource takes an unqualified resource and returns a Group qualified GroupResource
func Resource(resource string) schema.GroupResource {
return SchemeGroupVersion.WithResource(resource).GroupResource()
}
var (
// SchemeBuilder collects functions that add things to a scheme. It's to allow
// code to compile without explicitly referencing generated types. You should
// declare one in each package that will have generated deep copy or conversion
// functions.
SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes)
// AddToScheme applies all the stored functions to the scheme. A non-nil error
// indicates that one function failed and the attempt was abandoned.
AddToScheme = SchemeBuilder.AddToScheme
)
// Adds the list of known types to Scheme.
func addKnownTypes(scheme *runtime.Scheme) error {
scheme.AddKnownTypes(SchemeGroupVersion,
&TrafficSplit{},
&TrafficSplitList{},
)
metav1.AddToGroupVersion(scheme, SchemeGroupVersion)
return nil
}

View File

@@ -0,0 +1,56 @@
package v1alpha1
import (
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// +genclient
// +genclient:noStatus
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// TrafficSplit allows users to incrementally direct percentages of traffic
// between various services. It will be used by clients such as ingress
// controllers or service mesh sidecars to split the outgoing traffic to
// different destinations.
type TrafficSplit struct {
metav1.TypeMeta `json:",inline"`
// Standard object's metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
// +optional
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Specification of the desired behavior of the traffic split.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
// +optional
Spec TrafficSplitSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
// Most recently observed status of the pod.
// This data may not be up to date.
// Populated by the system.
// Read-only.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
// +optional
//Status Status `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
}
// TrafficSplitSpec is the specification for a TrafficSplit
type TrafficSplitSpec struct {
Service string `json:"service,omitempty"`
Backends []TrafficSplitBackend `json:"backends,omitempty"`
}
// TrafficSplitBackend defines a backend
type TrafficSplitBackend struct {
Service string `json:"service,omitempty"`
Weight *resource.Quantity `json:"weight,omitempty"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
type TrafficSplitList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata"`
Items []TrafficSplit `json:"items"`
}

View File

@@ -0,0 +1,129 @@
// +build !ignore_autogenerated
/*
Copyright The Flagger Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by deepcopy-gen. DO NOT EDIT.
package v1alpha1
import (
runtime "k8s.io/apimachinery/pkg/runtime"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *TrafficSplit) DeepCopyInto(out *TrafficSplit) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TrafficSplit.
func (in *TrafficSplit) DeepCopy() *TrafficSplit {
if in == nil {
return nil
}
out := new(TrafficSplit)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *TrafficSplit) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *TrafficSplitBackend) DeepCopyInto(out *TrafficSplitBackend) {
*out = *in
if in.Weight != nil {
in, out := &in.Weight, &out.Weight
x := (*in).DeepCopy()
*out = &x
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TrafficSplitBackend.
func (in *TrafficSplitBackend) DeepCopy() *TrafficSplitBackend {
if in == nil {
return nil
}
out := new(TrafficSplitBackend)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *TrafficSplitList) DeepCopyInto(out *TrafficSplitList) {
*out = *in
out.TypeMeta = in.TypeMeta
out.ListMeta = in.ListMeta
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]TrafficSplit, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TrafficSplitList.
func (in *TrafficSplitList) DeepCopy() *TrafficSplitList {
if in == nil {
return nil
}
out := new(TrafficSplitList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *TrafficSplitList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *TrafficSplitSpec) DeepCopyInto(out *TrafficSplitSpec) {
*out = *in
if in.Backends != nil {
in, out := &in.Backends, &out.Backends
*out = make([]TrafficSplitBackend, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TrafficSplitSpec.
func (in *TrafficSplitSpec) DeepCopy() *TrafficSplitSpec {
if in == nil {
return nil
}
out := new(TrafficSplitSpec)
in.DeepCopyInto(out)
return out
}

View File

@@ -22,6 +22,7 @@ import (
appmeshv1beta1 "github.com/weaveworks/flagger/pkg/client/clientset/versioned/typed/appmesh/v1beta1"
flaggerv1alpha3 "github.com/weaveworks/flagger/pkg/client/clientset/versioned/typed/flagger/v1alpha3"
networkingv1alpha3 "github.com/weaveworks/flagger/pkg/client/clientset/versioned/typed/istio/v1alpha3"
splitv1alpha1 "github.com/weaveworks/flagger/pkg/client/clientset/versioned/typed/smi/v1alpha1"
discovery "k8s.io/client-go/discovery"
rest "k8s.io/client-go/rest"
flowcontrol "k8s.io/client-go/util/flowcontrol"
@@ -38,6 +39,9 @@ type Interface interface {
NetworkingV1alpha3() networkingv1alpha3.NetworkingV1alpha3Interface
// Deprecated: please explicitly pick a version if possible.
Networking() networkingv1alpha3.NetworkingV1alpha3Interface
SplitV1alpha1() splitv1alpha1.SplitV1alpha1Interface
// Deprecated: please explicitly pick a version if possible.
Split() splitv1alpha1.SplitV1alpha1Interface
}
// Clientset contains the clients for groups. Each group has exactly one
@@ -47,6 +51,7 @@ type Clientset struct {
appmeshV1beta1 *appmeshv1beta1.AppmeshV1beta1Client
flaggerV1alpha3 *flaggerv1alpha3.FlaggerV1alpha3Client
networkingV1alpha3 *networkingv1alpha3.NetworkingV1alpha3Client
splitV1alpha1 *splitv1alpha1.SplitV1alpha1Client
}
// AppmeshV1beta1 retrieves the AppmeshV1beta1Client
@@ -82,6 +87,17 @@ func (c *Clientset) Networking() networkingv1alpha3.NetworkingV1alpha3Interface
return c.networkingV1alpha3
}
// SplitV1alpha1 retrieves the SplitV1alpha1Client
func (c *Clientset) SplitV1alpha1() splitv1alpha1.SplitV1alpha1Interface {
return c.splitV1alpha1
}
// Deprecated: Split retrieves the default version of SplitClient.
// Please explicitly pick a version.
func (c *Clientset) Split() splitv1alpha1.SplitV1alpha1Interface {
return c.splitV1alpha1
}
// Discovery retrieves the DiscoveryClient
func (c *Clientset) Discovery() discovery.DiscoveryInterface {
if c == nil {
@@ -110,6 +126,10 @@ func NewForConfig(c *rest.Config) (*Clientset, error) {
if err != nil {
return nil, err
}
cs.splitV1alpha1, err = splitv1alpha1.NewForConfig(&configShallowCopy)
if err != nil {
return nil, err
}
cs.DiscoveryClient, err = discovery.NewDiscoveryClientForConfig(&configShallowCopy)
if err != nil {
@@ -125,6 +145,7 @@ func NewForConfigOrDie(c *rest.Config) *Clientset {
cs.appmeshV1beta1 = appmeshv1beta1.NewForConfigOrDie(c)
cs.flaggerV1alpha3 = flaggerv1alpha3.NewForConfigOrDie(c)
cs.networkingV1alpha3 = networkingv1alpha3.NewForConfigOrDie(c)
cs.splitV1alpha1 = splitv1alpha1.NewForConfigOrDie(c)
cs.DiscoveryClient = discovery.NewDiscoveryClientForConfigOrDie(c)
return &cs
@@ -136,6 +157,7 @@ func New(c rest.Interface) *Clientset {
cs.appmeshV1beta1 = appmeshv1beta1.New(c)
cs.flaggerV1alpha3 = flaggerv1alpha3.New(c)
cs.networkingV1alpha3 = networkingv1alpha3.New(c)
cs.splitV1alpha1 = splitv1alpha1.New(c)
cs.DiscoveryClient = discovery.NewDiscoveryClient(c)
return &cs

View File

@@ -26,6 +26,8 @@ import (
fakeflaggerv1alpha3 "github.com/weaveworks/flagger/pkg/client/clientset/versioned/typed/flagger/v1alpha3/fake"
networkingv1alpha3 "github.com/weaveworks/flagger/pkg/client/clientset/versioned/typed/istio/v1alpha3"
fakenetworkingv1alpha3 "github.com/weaveworks/flagger/pkg/client/clientset/versioned/typed/istio/v1alpha3/fake"
splitv1alpha1 "github.com/weaveworks/flagger/pkg/client/clientset/versioned/typed/smi/v1alpha1"
fakesplitv1alpha1 "github.com/weaveworks/flagger/pkg/client/clientset/versioned/typed/smi/v1alpha1/fake"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/client-go/discovery"
@@ -104,3 +106,13 @@ func (c *Clientset) NetworkingV1alpha3() networkingv1alpha3.NetworkingV1alpha3In
func (c *Clientset) Networking() networkingv1alpha3.NetworkingV1alpha3Interface {
return &fakenetworkingv1alpha3.FakeNetworkingV1alpha3{Fake: &c.Fake}
}
// SplitV1alpha1 retrieves the SplitV1alpha1Client
func (c *Clientset) SplitV1alpha1() splitv1alpha1.SplitV1alpha1Interface {
return &fakesplitv1alpha1.FakeSplitV1alpha1{Fake: &c.Fake}
}
// Split retrieves the SplitV1alpha1Client
func (c *Clientset) Split() splitv1alpha1.SplitV1alpha1Interface {
return &fakesplitv1alpha1.FakeSplitV1alpha1{Fake: &c.Fake}
}

View File

@@ -22,6 +22,7 @@ import (
appmeshv1beta1 "github.com/weaveworks/flagger/pkg/apis/appmesh/v1beta1"
flaggerv1alpha3 "github.com/weaveworks/flagger/pkg/apis/flagger/v1alpha3"
networkingv1alpha3 "github.com/weaveworks/flagger/pkg/apis/istio/v1alpha3"
splitv1alpha1 "github.com/weaveworks/flagger/pkg/apis/smi/v1alpha1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
schema "k8s.io/apimachinery/pkg/runtime/schema"
@@ -36,6 +37,7 @@ var localSchemeBuilder = runtime.SchemeBuilder{
appmeshv1beta1.AddToScheme,
flaggerv1alpha3.AddToScheme,
networkingv1alpha3.AddToScheme,
splitv1alpha1.AddToScheme,
}
// AddToScheme adds all types of this clientset into the given scheme. This allows composition

View File

@@ -22,6 +22,7 @@ import (
appmeshv1beta1 "github.com/weaveworks/flagger/pkg/apis/appmesh/v1beta1"
flaggerv1alpha3 "github.com/weaveworks/flagger/pkg/apis/flagger/v1alpha3"
networkingv1alpha3 "github.com/weaveworks/flagger/pkg/apis/istio/v1alpha3"
splitv1alpha1 "github.com/weaveworks/flagger/pkg/apis/smi/v1alpha1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
schema "k8s.io/apimachinery/pkg/runtime/schema"
@@ -36,6 +37,7 @@ var localSchemeBuilder = runtime.SchemeBuilder{
appmeshv1beta1.AddToScheme,
flaggerv1alpha3.AddToScheme,
networkingv1alpha3.AddToScheme,
splitv1alpha1.AddToScheme,
}
// AddToScheme adds all types of this clientset into the given scheme. This allows composition

View File

@@ -0,0 +1,20 @@
/*
Copyright The Flagger Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by client-gen. DO NOT EDIT.
// This package has the automatically generated typed clients.
package v1alpha1

View File

@@ -0,0 +1,20 @@
/*
Copyright The Flagger Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by client-gen. DO NOT EDIT.
// Package fake has the automatically generated clients.
package fake

View File

@@ -0,0 +1,40 @@
/*
Copyright The Flagger Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by client-gen. DO NOT EDIT.
package fake
import (
v1alpha1 "github.com/weaveworks/flagger/pkg/client/clientset/versioned/typed/smi/v1alpha1"
rest "k8s.io/client-go/rest"
testing "k8s.io/client-go/testing"
)
type FakeSplitV1alpha1 struct {
*testing.Fake
}
func (c *FakeSplitV1alpha1) TrafficSplits(namespace string) v1alpha1.TrafficSplitInterface {
return &FakeTrafficSplits{c, namespace}
}
// RESTClient returns a RESTClient that is used to communicate
// with API server by this client implementation.
func (c *FakeSplitV1alpha1) RESTClient() rest.Interface {
var ret *rest.RESTClient
return ret
}

View File

@@ -0,0 +1,128 @@
/*
Copyright The Flagger Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by client-gen. DO NOT EDIT.
package fake
import (
v1alpha1 "github.com/weaveworks/flagger/pkg/apis/smi/v1alpha1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
labels "k8s.io/apimachinery/pkg/labels"
schema "k8s.io/apimachinery/pkg/runtime/schema"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
testing "k8s.io/client-go/testing"
)
// FakeTrafficSplits implements TrafficSplitInterface
type FakeTrafficSplits struct {
Fake *FakeSplitV1alpha1
ns string
}
var trafficsplitsResource = schema.GroupVersionResource{Group: "split.smi-spec.io", Version: "v1alpha1", Resource: "trafficsplits"}
var trafficsplitsKind = schema.GroupVersionKind{Group: "split.smi-spec.io", Version: "v1alpha1", Kind: "TrafficSplit"}
// Get takes name of the trafficSplit, and returns the corresponding trafficSplit object, and an error if there is any.
func (c *FakeTrafficSplits) Get(name string, options v1.GetOptions) (result *v1alpha1.TrafficSplit, err error) {
obj, err := c.Fake.
Invokes(testing.NewGetAction(trafficsplitsResource, c.ns, name), &v1alpha1.TrafficSplit{})
if obj == nil {
return nil, err
}
return obj.(*v1alpha1.TrafficSplit), err
}
// List takes label and field selectors, and returns the list of TrafficSplits that match those selectors.
func (c *FakeTrafficSplits) List(opts v1.ListOptions) (result *v1alpha1.TrafficSplitList, err error) {
obj, err := c.Fake.
Invokes(testing.NewListAction(trafficsplitsResource, trafficsplitsKind, c.ns, opts), &v1alpha1.TrafficSplitList{})
if obj == nil {
return nil, err
}
label, _, _ := testing.ExtractFromListOptions(opts)
if label == nil {
label = labels.Everything()
}
list := &v1alpha1.TrafficSplitList{ListMeta: obj.(*v1alpha1.TrafficSplitList).ListMeta}
for _, item := range obj.(*v1alpha1.TrafficSplitList).Items {
if label.Matches(labels.Set(item.Labels)) {
list.Items = append(list.Items, item)
}
}
return list, err
}
// Watch returns a watch.Interface that watches the requested trafficSplits.
func (c *FakeTrafficSplits) Watch(opts v1.ListOptions) (watch.Interface, error) {
return c.Fake.
InvokesWatch(testing.NewWatchAction(trafficsplitsResource, c.ns, opts))
}
// Create takes the representation of a trafficSplit and creates it. Returns the server's representation of the trafficSplit, and an error, if there is any.
func (c *FakeTrafficSplits) Create(trafficSplit *v1alpha1.TrafficSplit) (result *v1alpha1.TrafficSplit, err error) {
obj, err := c.Fake.
Invokes(testing.NewCreateAction(trafficsplitsResource, c.ns, trafficSplit), &v1alpha1.TrafficSplit{})
if obj == nil {
return nil, err
}
return obj.(*v1alpha1.TrafficSplit), err
}
// Update takes the representation of a trafficSplit and updates it. Returns the server's representation of the trafficSplit, and an error, if there is any.
func (c *FakeTrafficSplits) Update(trafficSplit *v1alpha1.TrafficSplit) (result *v1alpha1.TrafficSplit, err error) {
obj, err := c.Fake.
Invokes(testing.NewUpdateAction(trafficsplitsResource, c.ns, trafficSplit), &v1alpha1.TrafficSplit{})
if obj == nil {
return nil, err
}
return obj.(*v1alpha1.TrafficSplit), err
}
// Delete takes name of the trafficSplit and deletes it. Returns an error if one occurs.
func (c *FakeTrafficSplits) Delete(name string, options *v1.DeleteOptions) error {
_, err := c.Fake.
Invokes(testing.NewDeleteAction(trafficsplitsResource, c.ns, name), &v1alpha1.TrafficSplit{})
return err
}
// DeleteCollection deletes a collection of objects.
func (c *FakeTrafficSplits) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
action := testing.NewDeleteCollectionAction(trafficsplitsResource, c.ns, listOptions)
_, err := c.Fake.Invokes(action, &v1alpha1.TrafficSplitList{})
return err
}
// Patch applies the patch and returns the patched trafficSplit.
func (c *FakeTrafficSplits) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.TrafficSplit, err error) {
obj, err := c.Fake.
Invokes(testing.NewPatchSubresourceAction(trafficsplitsResource, c.ns, name, pt, data, subresources...), &v1alpha1.TrafficSplit{})
if obj == nil {
return nil, err
}
return obj.(*v1alpha1.TrafficSplit), err
}

View File

@@ -0,0 +1,21 @@
/*
Copyright The Flagger Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by client-gen. DO NOT EDIT.
package v1alpha1
type TrafficSplitExpansion interface{}

View File

@@ -0,0 +1,90 @@
/*
Copyright The Flagger Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by client-gen. DO NOT EDIT.
package v1alpha1
import (
v1alpha1 "github.com/weaveworks/flagger/pkg/apis/smi/v1alpha1"
"github.com/weaveworks/flagger/pkg/client/clientset/versioned/scheme"
serializer "k8s.io/apimachinery/pkg/runtime/serializer"
rest "k8s.io/client-go/rest"
)
type SplitV1alpha1Interface interface {
RESTClient() rest.Interface
TrafficSplitsGetter
}
// SplitV1alpha1Client is used to interact with features provided by the split.smi-spec.io group.
type SplitV1alpha1Client struct {
restClient rest.Interface
}
func (c *SplitV1alpha1Client) TrafficSplits(namespace string) TrafficSplitInterface {
return newTrafficSplits(c, namespace)
}
// NewForConfig creates a new SplitV1alpha1Client for the given config.
func NewForConfig(c *rest.Config) (*SplitV1alpha1Client, error) {
config := *c
if err := setConfigDefaults(&config); err != nil {
return nil, err
}
client, err := rest.RESTClientFor(&config)
if err != nil {
return nil, err
}
return &SplitV1alpha1Client{client}, nil
}
// NewForConfigOrDie creates a new SplitV1alpha1Client for the given config and
// panics if there is an error in the config.
func NewForConfigOrDie(c *rest.Config) *SplitV1alpha1Client {
client, err := NewForConfig(c)
if err != nil {
panic(err)
}
return client
}
// New creates a new SplitV1alpha1Client for the given RESTClient.
func New(c rest.Interface) *SplitV1alpha1Client {
return &SplitV1alpha1Client{c}
}
func setConfigDefaults(config *rest.Config) error {
gv := v1alpha1.SchemeGroupVersion
config.GroupVersion = &gv
config.APIPath = "/apis"
config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs}
if config.UserAgent == "" {
config.UserAgent = rest.DefaultKubernetesUserAgent()
}
return nil
}
// RESTClient returns a RESTClient that is used to communicate
// with API server by this client implementation.
func (c *SplitV1alpha1Client) RESTClient() rest.Interface {
if c == nil {
return nil
}
return c.restClient
}

View File

@@ -0,0 +1,174 @@
/*
Copyright The Flagger Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by client-gen. DO NOT EDIT.
package v1alpha1
import (
"time"
v1alpha1 "github.com/weaveworks/flagger/pkg/apis/smi/v1alpha1"
scheme "github.com/weaveworks/flagger/pkg/client/clientset/versioned/scheme"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
rest "k8s.io/client-go/rest"
)
// TrafficSplitsGetter has a method to return a TrafficSplitInterface.
// A group's client should implement this interface.
type TrafficSplitsGetter interface {
TrafficSplits(namespace string) TrafficSplitInterface
}
// TrafficSplitInterface has methods to work with TrafficSplit resources.
type TrafficSplitInterface interface {
Create(*v1alpha1.TrafficSplit) (*v1alpha1.TrafficSplit, error)
Update(*v1alpha1.TrafficSplit) (*v1alpha1.TrafficSplit, error)
Delete(name string, options *v1.DeleteOptions) error
DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error
Get(name string, options v1.GetOptions) (*v1alpha1.TrafficSplit, error)
List(opts v1.ListOptions) (*v1alpha1.TrafficSplitList, error)
Watch(opts v1.ListOptions) (watch.Interface, error)
Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.TrafficSplit, err error)
TrafficSplitExpansion
}
// trafficSplits implements TrafficSplitInterface
type trafficSplits struct {
client rest.Interface
ns string
}
// newTrafficSplits returns a TrafficSplits
func newTrafficSplits(c *SplitV1alpha1Client, namespace string) *trafficSplits {
return &trafficSplits{
client: c.RESTClient(),
ns: namespace,
}
}
// Get takes name of the trafficSplit, and returns the corresponding trafficSplit object, and an error if there is any.
func (c *trafficSplits) Get(name string, options v1.GetOptions) (result *v1alpha1.TrafficSplit, err error) {
result = &v1alpha1.TrafficSplit{}
err = c.client.Get().
Namespace(c.ns).
Resource("trafficsplits").
Name(name).
VersionedParams(&options, scheme.ParameterCodec).
Do().
Into(result)
return
}
// List takes label and field selectors, and returns the list of TrafficSplits that match those selectors.
func (c *trafficSplits) List(opts v1.ListOptions) (result *v1alpha1.TrafficSplitList, err error) {
var timeout time.Duration
if opts.TimeoutSeconds != nil {
timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
}
result = &v1alpha1.TrafficSplitList{}
err = c.client.Get().
Namespace(c.ns).
Resource("trafficsplits").
VersionedParams(&opts, scheme.ParameterCodec).
Timeout(timeout).
Do().
Into(result)
return
}
// Watch returns a watch.Interface that watches the requested trafficSplits.
func (c *trafficSplits) Watch(opts v1.ListOptions) (watch.Interface, error) {
var timeout time.Duration
if opts.TimeoutSeconds != nil {
timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
}
opts.Watch = true
return c.client.Get().
Namespace(c.ns).
Resource("trafficsplits").
VersionedParams(&opts, scheme.ParameterCodec).
Timeout(timeout).
Watch()
}
// Create takes the representation of a trafficSplit and creates it. Returns the server's representation of the trafficSplit, and an error, if there is any.
func (c *trafficSplits) Create(trafficSplit *v1alpha1.TrafficSplit) (result *v1alpha1.TrafficSplit, err error) {
result = &v1alpha1.TrafficSplit{}
err = c.client.Post().
Namespace(c.ns).
Resource("trafficsplits").
Body(trafficSplit).
Do().
Into(result)
return
}
// Update takes the representation of a trafficSplit and updates it. Returns the server's representation of the trafficSplit, and an error, if there is any.
func (c *trafficSplits) Update(trafficSplit *v1alpha1.TrafficSplit) (result *v1alpha1.TrafficSplit, err error) {
result = &v1alpha1.TrafficSplit{}
err = c.client.Put().
Namespace(c.ns).
Resource("trafficsplits").
Name(trafficSplit.Name).
Body(trafficSplit).
Do().
Into(result)
return
}
// Delete takes name of the trafficSplit and deletes it. Returns an error if one occurs.
func (c *trafficSplits) Delete(name string, options *v1.DeleteOptions) error {
return c.client.Delete().
Namespace(c.ns).
Resource("trafficsplits").
Name(name).
Body(options).
Do().
Error()
}
// DeleteCollection deletes a collection of objects.
func (c *trafficSplits) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
var timeout time.Duration
if listOptions.TimeoutSeconds != nil {
timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second
}
return c.client.Delete().
Namespace(c.ns).
Resource("trafficsplits").
VersionedParams(&listOptions, scheme.ParameterCodec).
Timeout(timeout).
Body(options).
Do().
Error()
}
// Patch applies the patch and returns the patched trafficSplit.
func (c *trafficSplits) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.TrafficSplit, err error) {
result = &v1alpha1.TrafficSplit{}
err = c.client.Patch(pt).
Namespace(c.ns).
Resource("trafficsplits").
SubResource(subresources...).
Name(name).
Body(data).
Do().
Into(result)
return
}

View File

@@ -28,6 +28,7 @@ import (
flagger "github.com/weaveworks/flagger/pkg/client/informers/externalversions/flagger"
internalinterfaces "github.com/weaveworks/flagger/pkg/client/informers/externalversions/internalinterfaces"
istio "github.com/weaveworks/flagger/pkg/client/informers/externalversions/istio"
smi "github.com/weaveworks/flagger/pkg/client/informers/externalversions/smi"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
schema "k8s.io/apimachinery/pkg/runtime/schema"
@@ -177,6 +178,7 @@ type SharedInformerFactory interface {
Appmesh() appmesh.Interface
Flagger() flagger.Interface
Networking() istio.Interface
Split() smi.Interface
}
func (f *sharedInformerFactory) Appmesh() appmesh.Interface {
@@ -190,3 +192,7 @@ func (f *sharedInformerFactory) Flagger() flagger.Interface {
func (f *sharedInformerFactory) Networking() istio.Interface {
return istio.New(f, f.namespace, f.tweakListOptions)
}
func (f *sharedInformerFactory) Split() smi.Interface {
return smi.New(f, f.namespace, f.tweakListOptions)
}

View File

@@ -24,6 +24,7 @@ import (
v1beta1 "github.com/weaveworks/flagger/pkg/apis/appmesh/v1beta1"
v1alpha3 "github.com/weaveworks/flagger/pkg/apis/flagger/v1alpha3"
istiov1alpha3 "github.com/weaveworks/flagger/pkg/apis/istio/v1alpha3"
v1alpha1 "github.com/weaveworks/flagger/pkg/apis/smi/v1alpha1"
schema "k8s.io/apimachinery/pkg/runtime/schema"
cache "k8s.io/client-go/tools/cache"
)
@@ -70,6 +71,10 @@ func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource
case istiov1alpha3.SchemeGroupVersion.WithResource("virtualservices"):
return &genericInformer{resource: resource.GroupResource(), informer: f.Networking().V1alpha3().VirtualServices().Informer()}, nil
// Group=split.smi-spec.io, Version=v1alpha1
case v1alpha1.SchemeGroupVersion.WithResource("trafficsplits"):
return &genericInformer{resource: resource.GroupResource(), informer: f.Split().V1alpha1().TrafficSplits().Informer()}, nil
}
return nil, fmt.Errorf("no informer found for %v", resource)

View File

@@ -0,0 +1,46 @@
/*
Copyright The Flagger Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by informer-gen. DO NOT EDIT.
package split
import (
internalinterfaces "github.com/weaveworks/flagger/pkg/client/informers/externalversions/internalinterfaces"
v1alpha1 "github.com/weaveworks/flagger/pkg/client/informers/externalversions/smi/v1alpha1"
)
// Interface provides access to each of this group's versions.
type Interface interface {
// V1alpha1 provides access to shared informers for resources in V1alpha1.
V1alpha1() v1alpha1.Interface
}
type group struct {
factory internalinterfaces.SharedInformerFactory
namespace string
tweakListOptions internalinterfaces.TweakListOptionsFunc
}
// New returns a new Interface.
func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface {
return &group{factory: f, namespace: namespace, tweakListOptions: tweakListOptions}
}
// V1alpha1 returns a new v1alpha1.Interface.
func (g *group) V1alpha1() v1alpha1.Interface {
return v1alpha1.New(g.factory, g.namespace, g.tweakListOptions)
}

View File

@@ -0,0 +1,45 @@
/*
Copyright The Flagger Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by informer-gen. DO NOT EDIT.
package v1alpha1
import (
internalinterfaces "github.com/weaveworks/flagger/pkg/client/informers/externalversions/internalinterfaces"
)
// Interface provides access to all the informers in this group version.
type Interface interface {
// TrafficSplits returns a TrafficSplitInformer.
TrafficSplits() TrafficSplitInformer
}
type version struct {
factory internalinterfaces.SharedInformerFactory
namespace string
tweakListOptions internalinterfaces.TweakListOptionsFunc
}
// New returns a new Interface.
func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface {
return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions}
}
// TrafficSplits returns a TrafficSplitInformer.
func (v *version) TrafficSplits() TrafficSplitInformer {
return &trafficSplitInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions}
}

View File

@@ -0,0 +1,89 @@
/*
Copyright The Flagger Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by informer-gen. DO NOT EDIT.
package v1alpha1
import (
time "time"
smiv1alpha1 "github.com/weaveworks/flagger/pkg/apis/smi/v1alpha1"
versioned "github.com/weaveworks/flagger/pkg/client/clientset/versioned"
internalinterfaces "github.com/weaveworks/flagger/pkg/client/informers/externalversions/internalinterfaces"
v1alpha1 "github.com/weaveworks/flagger/pkg/client/listers/smi/v1alpha1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
watch "k8s.io/apimachinery/pkg/watch"
cache "k8s.io/client-go/tools/cache"
)
// TrafficSplitInformer provides access to a shared informer and lister for
// TrafficSplits.
type TrafficSplitInformer interface {
Informer() cache.SharedIndexInformer
Lister() v1alpha1.TrafficSplitLister
}
type trafficSplitInformer struct {
factory internalinterfaces.SharedInformerFactory
tweakListOptions internalinterfaces.TweakListOptionsFunc
namespace string
}
// NewTrafficSplitInformer constructs a new informer for TrafficSplit type.
// Always prefer using an informer factory to get a shared informer instead of getting an independent
// one. This reduces memory footprint and number of connections to the server.
func NewTrafficSplitInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
return NewFilteredTrafficSplitInformer(client, namespace, resyncPeriod, indexers, nil)
}
// NewFilteredTrafficSplitInformer constructs a new informer for TrafficSplit type.
// Always prefer using an informer factory to get a shared informer instead of getting an independent
// one. This reduces memory footprint and number of connections to the server.
func NewFilteredTrafficSplitInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
return cache.NewSharedIndexInformer(
&cache.ListWatch{
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
}
return client.SplitV1alpha1().TrafficSplits(namespace).List(options)
},
WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
}
return client.SplitV1alpha1().TrafficSplits(namespace).Watch(options)
},
},
&smiv1alpha1.TrafficSplit{},
resyncPeriod,
indexers,
)
}
func (f *trafficSplitInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
return NewFilteredTrafficSplitInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
}
func (f *trafficSplitInformer) Informer() cache.SharedIndexInformer {
return f.factory.InformerFor(&smiv1alpha1.TrafficSplit{}, f.defaultInformer)
}
func (f *trafficSplitInformer) Lister() v1alpha1.TrafficSplitLister {
return v1alpha1.NewTrafficSplitLister(f.Informer().GetIndexer())
}

View File

@@ -0,0 +1,27 @@
/*
Copyright The Flagger Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by lister-gen. DO NOT EDIT.
package v1alpha1
// TrafficSplitListerExpansion allows custom methods to be added to
// TrafficSplitLister.
type TrafficSplitListerExpansion interface{}
// TrafficSplitNamespaceListerExpansion allows custom methods to be added to
// TrafficSplitNamespaceLister.
type TrafficSplitNamespaceListerExpansion interface{}

View File

@@ -0,0 +1,94 @@
/*
Copyright The Flagger Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by lister-gen. DO NOT EDIT.
package v1alpha1
import (
v1alpha1 "github.com/weaveworks/flagger/pkg/apis/smi/v1alpha1"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/client-go/tools/cache"
)
// TrafficSplitLister helps list TrafficSplits.
type TrafficSplitLister interface {
// List lists all TrafficSplits in the indexer.
List(selector labels.Selector) (ret []*v1alpha1.TrafficSplit, err error)
// TrafficSplits returns an object that can list and get TrafficSplits.
TrafficSplits(namespace string) TrafficSplitNamespaceLister
TrafficSplitListerExpansion
}
// trafficSplitLister implements the TrafficSplitLister interface.
type trafficSplitLister struct {
indexer cache.Indexer
}
// NewTrafficSplitLister returns a new TrafficSplitLister.
func NewTrafficSplitLister(indexer cache.Indexer) TrafficSplitLister {
return &trafficSplitLister{indexer: indexer}
}
// List lists all TrafficSplits in the indexer.
func (s *trafficSplitLister) List(selector labels.Selector) (ret []*v1alpha1.TrafficSplit, err error) {
err = cache.ListAll(s.indexer, selector, func(m interface{}) {
ret = append(ret, m.(*v1alpha1.TrafficSplit))
})
return ret, err
}
// TrafficSplits returns an object that can list and get TrafficSplits.
func (s *trafficSplitLister) TrafficSplits(namespace string) TrafficSplitNamespaceLister {
return trafficSplitNamespaceLister{indexer: s.indexer, namespace: namespace}
}
// TrafficSplitNamespaceLister helps list and get TrafficSplits.
type TrafficSplitNamespaceLister interface {
// List lists all TrafficSplits in the indexer for a given namespace.
List(selector labels.Selector) (ret []*v1alpha1.TrafficSplit, err error)
// Get retrieves the TrafficSplit from the indexer for a given namespace and name.
Get(name string) (*v1alpha1.TrafficSplit, error)
TrafficSplitNamespaceListerExpansion
}
// trafficSplitNamespaceLister implements the TrafficSplitNamespaceLister
// interface.
type trafficSplitNamespaceLister struct {
indexer cache.Indexer
namespace string
}
// List lists all TrafficSplits in the indexer for a given namespace.
func (s trafficSplitNamespaceLister) List(selector labels.Selector) (ret []*v1alpha1.TrafficSplit, err error) {
err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) {
ret = append(ret, m.(*v1alpha1.TrafficSplit))
})
return ret, err
}
// Get retrieves the TrafficSplit from the indexer for a given namespace and name.
func (s trafficSplitNamespaceLister) Get(name string) (*v1alpha1.TrafficSplit, error) {
obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name)
if err != nil {
return nil, err
}
if !exists {
return nil, errors.NewNotFound(v1alpha1.Resource("trafficsplit"), name)
}
return obj.(*v1alpha1.TrafficSplit), nil
}

View File

@@ -33,23 +33,23 @@ const controllerAgentName = "flagger"
// Controller is managing the canary objects and schedules canary deployments
type Controller struct {
kubeClient kubernetes.Interface
istioClient clientset.Interface
flaggerClient clientset.Interface
flaggerLister flaggerlisters.CanaryLister
flaggerSynced cache.InformerSynced
flaggerWindow time.Duration
workqueue workqueue.RateLimitingInterface
eventRecorder record.EventRecorder
logger *zap.SugaredLogger
canaries *sync.Map
jobs map[string]CanaryJob
deployer canary.Deployer
observer metrics.Observer
recorder metrics.Recorder
notifier *notifier.Slack
routerFactory *router.Factory
meshProvider string
kubeClient kubernetes.Interface
istioClient clientset.Interface
flaggerClient clientset.Interface
flaggerLister flaggerlisters.CanaryLister
flaggerSynced cache.InformerSynced
flaggerWindow time.Duration
workqueue workqueue.RateLimitingInterface
eventRecorder record.EventRecorder
logger *zap.SugaredLogger
canaries *sync.Map
jobs map[string]CanaryJob
deployer canary.Deployer
recorder metrics.Recorder
notifier *notifier.Slack
routerFactory *router.Factory
observerFactory *metrics.Factory
meshProvider string
}
func NewController(
@@ -62,6 +62,7 @@ func NewController(
logger *zap.SugaredLogger,
notifier *notifier.Slack,
routerFactory *router.Factory,
observerFactory *metrics.Factory,
meshProvider string,
version string,
labels []string,
@@ -92,23 +93,23 @@ func NewController(
recorder.SetInfo(version, meshProvider)
ctrl := &Controller{
kubeClient: kubeClient,
istioClient: istioClient,
flaggerClient: flaggerClient,
flaggerLister: flaggerInformer.Lister(),
flaggerSynced: flaggerInformer.Informer().HasSynced,
workqueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), controllerAgentName),
eventRecorder: eventRecorder,
logger: logger,
canaries: new(sync.Map),
jobs: map[string]CanaryJob{},
flaggerWindow: flaggerWindow,
deployer: deployer,
observer: metrics.NewObserver(metricServer),
recorder: recorder,
notifier: notifier,
routerFactory: routerFactory,
meshProvider: meshProvider,
kubeClient: kubeClient,
istioClient: istioClient,
flaggerClient: flaggerClient,
flaggerLister: flaggerInformer.Lister(),
flaggerSynced: flaggerInformer.Informer().HasSynced,
workqueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), controllerAgentName),
eventRecorder: eventRecorder,
logger: logger,
canaries: new(sync.Map),
jobs: map[string]CanaryJob{},
flaggerWindow: flaggerWindow,
deployer: deployer,
observerFactory: observerFactory,
recorder: recorder,
notifier: notifier,
routerFactory: routerFactory,
meshProvider: meshProvider,
}
flaggerInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{

View File

@@ -37,7 +37,6 @@ type Mocks struct {
meshClient clientset.Interface
flaggerClient clientset.Interface
deployer canary.Deployer
observer metrics.Observer
ctrl *Controller
logger *zap.SugaredLogger
router router.Interface
@@ -77,7 +76,6 @@ func SetupMocks(abtest bool) Mocks {
FlaggerClient: flaggerClient,
},
}
observer := metrics.NewObserver("fake")
// init controller
flaggerInformerFactory := informers.NewSharedInformerFactory(flaggerClient, noResyncPeriodFunc())
@@ -86,21 +84,24 @@ func SetupMocks(abtest bool) Mocks {
// init router
rf := router.NewFactory(nil, kubeClient, flaggerClient, logger, flaggerClient)
// init observer
observerFactory, _ := metrics.NewFactory("fake", "istio", 5*time.Second)
ctrl := &Controller{
kubeClient: kubeClient,
istioClient: flaggerClient,
flaggerClient: flaggerClient,
flaggerLister: flaggerInformer.Lister(),
flaggerSynced: flaggerInformer.Informer().HasSynced,
workqueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), controllerAgentName),
eventRecorder: &record.FakeRecorder{},
logger: logger,
canaries: new(sync.Map),
flaggerWindow: time.Second,
deployer: deployer,
observer: observer,
recorder: metrics.NewRecorder(controllerAgentName, false),
routerFactory: rf,
kubeClient: kubeClient,
istioClient: flaggerClient,
flaggerClient: flaggerClient,
flaggerLister: flaggerInformer.Lister(),
flaggerSynced: flaggerInformer.Informer().HasSynced,
workqueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), controllerAgentName),
eventRecorder: &record.FakeRecorder{},
logger: logger,
canaries: new(sync.Map),
flaggerWindow: time.Second,
deployer: deployer,
observerFactory: observerFactory,
recorder: metrics.NewRecorder(controllerAgentName, false),
routerFactory: rf,
}
ctrl.flaggerSynced = alwaysReady
@@ -108,7 +109,6 @@ func SetupMocks(abtest bool) Mocks {
return Mocks{
canary: c,
observer: observer,
deployer: deployer,
logger: logger,
flaggerClient: flaggerClient,

View File

@@ -556,126 +556,65 @@ func (c *Controller) analyseCanary(r *flaggerv1.Canary) bool {
}
}
// create observer based on the mesh provider
observer := c.observerFactory.Observer()
// run metrics checks
for _, metric := range r.Spec.CanaryAnalysis.Metrics {
if metric.Interval == "" {
metric.Interval = r.GetMetricInterval()
}
// App Mesh checks
if c.meshProvider == "appmesh" {
if metric.Name == "request-success-rate" || metric.Name == "envoy_cluster_upstream_rq" {
val, err := c.observer.GetEnvoySuccessRate(r.Spec.TargetRef.Name, r.Namespace, metric.Name, metric.Interval)
if err != nil {
if strings.Contains(err.Error(), "no values found") {
c.recordEventWarningf(r, "Halt advancement no values found for metric %s probably %s.%s is not receiving traffic",
metric.Name, r.Spec.TargetRef.Name, r.Namespace)
} else {
c.recordEventErrorf(r, "Metrics server %s query failed: %v", c.observer.GetMetricsServer(), err)
}
return false
}
if float64(metric.Threshold) > val {
c.recordEventWarningf(r, "Halt %s.%s advancement success rate %.2f%% < %v%%",
r.Name, r.Namespace, val, metric.Threshold)
return false
}
}
if metric.Name == "request-duration" || metric.Name == "envoy_cluster_upstream_rq_time_bucket" {
val, err := c.observer.GetEnvoyRequestDuration(r.Spec.TargetRef.Name, r.Namespace, metric.Name, metric.Interval)
if err != nil {
c.recordEventErrorf(r, "Metrics server %s query failed: %v", c.observer.GetMetricsServer(), err)
return false
}
t := time.Duration(metric.Threshold) * time.Millisecond
if val > t {
c.recordEventWarningf(r, "Halt %s.%s advancement request duration %v > %v",
r.Name, r.Namespace, val, t)
return false
}
}
}
// Istio checks
if c.meshProvider == "istio" {
if metric.Name == "request-success-rate" || metric.Name == "istio_requests_total" {
val, err := c.observer.GetIstioSuccessRate(r.Spec.TargetRef.Name, r.Namespace, metric.Name, metric.Interval)
if err != nil {
if strings.Contains(err.Error(), "no values found") {
c.recordEventWarningf(r, "Halt advancement no values found for metric %s probably %s.%s is not receiving traffic",
metric.Name, r.Spec.TargetRef.Name, r.Namespace)
} else {
c.recordEventErrorf(r, "Metrics server %s query failed: %v", c.observer.GetMetricsServer(), err)
}
return false
}
if float64(metric.Threshold) > val {
c.recordEventWarningf(r, "Halt %s.%s advancement success rate %.2f%% < %v%%",
r.Name, r.Namespace, val, metric.Threshold)
return false
}
}
if metric.Name == "request-duration" || metric.Name == "istio_request_duration_seconds_bucket" {
val, err := c.observer.GetIstioRequestDuration(r.Spec.TargetRef.Name, r.Namespace, metric.Name, metric.Interval)
if err != nil {
c.recordEventErrorf(r, "Metrics server %s query failed: %v", c.observer.GetMetricsServer(), err)
return false
}
t := time.Duration(metric.Threshold) * time.Millisecond
if val > t {
c.recordEventWarningf(r, "Halt %s.%s advancement request duration %v > %v",
r.Name, r.Namespace, val, t)
return false
}
}
}
// NGINX checks
if c.meshProvider == "nginx" {
if metric.Name == "request-success-rate" {
val, err := c.observer.GetNginxSuccessRate(r.Spec.IngressRef.Name, r.Namespace, metric.Name, metric.Interval)
if err != nil {
if strings.Contains(err.Error(), "no values found") {
c.recordEventWarningf(r, "Halt advancement no values found for metric %s probably %s.%s is not receiving traffic",
metric.Name, r.Spec.TargetRef.Name, r.Namespace)
} else {
c.recordEventErrorf(r, "Metrics server %s query failed: %v", c.observer.GetMetricsServer(), err)
}
return false
}
if float64(metric.Threshold) > val {
c.recordEventWarningf(r, "Halt %s.%s advancement success rate %.2f%% < %v%%",
r.Name, r.Namespace, val, metric.Threshold)
return false
}
}
if metric.Name == "request-duration" {
val, err := c.observer.GetNginxRequestDuration(r.Spec.IngressRef.Name, r.Namespace, metric.Name, metric.Interval)
if err != nil {
c.recordEventErrorf(r, "Metrics server %s query failed: %v", c.observer.GetMetricsServer(), err)
return false
}
t := time.Duration(metric.Threshold) * time.Millisecond
if val > t {
c.recordEventWarningf(r, "Halt %s.%s advancement request duration %v > %v",
r.Name, r.Namespace, val, t)
return false
}
}
}
// custom checks
if metric.Query != "" {
val, err := c.observer.GetScalar(metric.Query)
if metric.Name == "request-success-rate" {
val, err := observer.GetRequestSuccessRate(r.Spec.TargetRef.Name, r.Namespace, metric.Interval)
if err != nil {
if strings.Contains(err.Error(), "no values found") {
c.recordEventWarningf(r, "Halt advancement no values found for metric %s probably %s.%s is not receiving traffic",
metric.Name, r.Spec.TargetRef.Name, r.Namespace)
} else {
c.recordEventErrorf(r, "Metrics server %s query failed: %v", c.observer.GetMetricsServer(), err)
c.recordEventErrorf(r, "Metrics server %s query failed: %v", c.observerFactory.Client.GetMetricsServer(), err)
}
return false
}
if float64(metric.Threshold) > val {
c.recordEventWarningf(r, "Halt %s.%s advancement success rate %.2f%% < %v%%",
r.Name, r.Namespace, val, metric.Threshold)
return false
}
//c.recordEventInfof(r, "Check %s passed %.2f%% > %v%%", metric.Name, val, metric.Threshold)
}
if metric.Name == "request-duration" {
val, err := observer.GetRequestDuration(r.Spec.TargetRef.Name, r.Namespace, metric.Interval)
if err != nil {
if strings.Contains(err.Error(), "no values found") {
c.recordEventWarningf(r, "Halt advancement no values found for metric %s probably %s.%s is not receiving traffic",
metric.Name, r.Spec.TargetRef.Name, r.Namespace)
} else {
c.recordEventErrorf(r, "Metrics server %s query failed: %v", c.observerFactory.Client.GetMetricsServer(), err)
}
return false
}
t := time.Duration(metric.Threshold) * time.Millisecond
if val > t {
c.recordEventWarningf(r, "Halt %s.%s advancement request duration %v > %v",
r.Name, r.Namespace, val, t)
return false
}
//c.recordEventInfof(r, "Check %s passed %v < %v", metric.Name, val, metric.Threshold)
}
// custom checks
if metric.Query != "" {
val, err := c.observerFactory.Client.RunQuery(metric.Query)
if err != nil {
if strings.Contains(err.Error(), "no values found") {
c.recordEventWarningf(r, "Halt advancement no values found for metric %s probably %s.%s is not receiving traffic",
metric.Name, r.Spec.TargetRef.Name, r.Namespace)
} else {
c.recordEventErrorf(r, "Metrics server %s query failed: %v", c.observerFactory.Client.GetMetricsServer(), err)
}
return false
}

184
pkg/metrics/client.go Normal file
View File

@@ -0,0 +1,184 @@
package metrics
import (
"bufio"
"bytes"
"context"
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"net/url"
"strconv"
"strings"
"text/template"
"time"
)
// PrometheusClient is executing promql queries
type PrometheusClient struct {
timeout time.Duration
url url.URL
}
type prometheusResponse struct {
Data struct {
Result []struct {
Metric struct {
Name string `json:"name"`
}
Value []interface{} `json:"value"`
}
}
}
// NewPrometheusClient creates a Prometheus client for the provided URL address
func NewPrometheusClient(address string, timeout time.Duration) (*PrometheusClient, error) {
promURL, err := url.Parse(address)
if err != nil {
return nil, err
}
return &PrometheusClient{timeout: timeout, url: *promURL}, nil
}
// RenderQuery renders the promql query using the provided text template
func (p *PrometheusClient) RenderQuery(name string, namespace string, interval string, tmpl string) (string, error) {
meta := struct {
Name string
Namespace string
Interval string
}{
name,
namespace,
interval,
}
t, err := template.New("tmpl").Parse(tmpl)
if err != nil {
return "", err
}
var data bytes.Buffer
b := bufio.NewWriter(&data)
if err := t.Execute(b, meta); err != nil {
return "", err
}
err = b.Flush()
if err != nil {
return "", err
}
return data.String(), nil
}
// RunQuery executes the promql and converts the result to float64
func (p *PrometheusClient) RunQuery(query string) (float64, error) {
if p.url.Host == "fake" {
return 100, nil
}
query = url.QueryEscape(p.TrimQuery(query))
u, err := url.Parse(fmt.Sprintf("./api/v1/query?query=%s", query))
if err != nil {
return 0, err
}
u = p.url.ResolveReference(u)
req, err := http.NewRequest("GET", u.String(), nil)
if err != nil {
return 0, err
}
ctx, cancel := context.WithTimeout(req.Context(), p.timeout)
defer cancel()
r, err := http.DefaultClient.Do(req.WithContext(ctx))
if err != nil {
return 0, err
}
defer r.Body.Close()
b, err := ioutil.ReadAll(r.Body)
if err != nil {
return 0, fmt.Errorf("error reading body: %s", err.Error())
}
if 400 <= r.StatusCode {
return 0, fmt.Errorf("error response: %s", string(b))
}
var result prometheusResponse
err = json.Unmarshal(b, &result)
if err != nil {
return 0, fmt.Errorf("error unmarshaling result: %s, '%s'", err.Error(), string(b))
}
var value *float64
for _, v := range result.Data.Result {
metricValue := v.Value[1]
switch metricValue.(type) {
case string:
f, err := strconv.ParseFloat(metricValue.(string), 64)
if err != nil {
return 0, err
}
value = &f
}
}
if value == nil {
return 0, fmt.Errorf("no values found")
}
return *value, nil
}
// TrimQuery takes a promql query and removes spaces, tabs and new lines
func (p *PrometheusClient) TrimQuery(query string) string {
query = strings.Replace(query, "\n", "", -1)
query = strings.Replace(query, "\t", "", -1)
query = strings.Replace(query, " ", "", -1)
return query
}
// IsOnline call Prometheus status endpoint and returns an error if the API is unreachable
func (p *PrometheusClient) IsOnline() (bool, error) {
u, err := url.Parse("./api/v1/status/flags")
if err != nil {
return false, err
}
u = p.url.ResolveReference(u)
req, err := http.NewRequest("GET", u.String(), nil)
if err != nil {
return false, err
}
ctx, cancel := context.WithTimeout(req.Context(), p.timeout)
defer cancel()
r, err := http.DefaultClient.Do(req.WithContext(ctx))
if err != nil {
return false, err
}
defer r.Body.Close()
b, err := ioutil.ReadAll(r.Body)
if err != nil {
return false, fmt.Errorf("error reading body: %s", err.Error())
}
if 400 <= r.StatusCode {
return false, fmt.Errorf("error response: %s", string(b))
}
return true, nil
}
func (p *PrometheusClient) GetMetricsServer() string {
return p.url.RawQuery
}

View File

@@ -0,0 +1,85 @@
package metrics
import (
"net/http"
"net/http/httptest"
"testing"
"time"
)
func TestPrometheusClient_RunQuery(t *testing.T) {
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
json := `{"status":"success","data":{"resultType":"vector","result":[{"metric":{},"value":[1545905245.458,"100"]}]}}`
w.Write([]byte(json))
}))
defer ts.Close()
client, err := NewPrometheusClient(ts.URL, time.Second)
if err != nil {
t.Fatal(err)
}
query := `
histogram_quantile(0.99,
sum(
rate(
http_request_duration_seconds_bucket{
kubernetes_namespace="test",
kubernetes_pod_name=~"podinfo-[0-9a-zA-Z]+(-[0-9a-zA-Z]+)"
}[1m]
)
) by (le)
)`
val, err := client.RunQuery(query)
if err != nil {
t.Fatal(err.Error())
}
if val != 100 {
t.Errorf("Got %v wanted %v", val, 100)
}
}
func TestPrometheusClient_IsOnline(t *testing.T) {
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
json := `{"status":"success","data":{"config.file":"/etc/prometheus/prometheus.yml"}}`
w.Write([]byte(json))
}))
defer ts.Close()
client, err := NewPrometheusClient(ts.URL, time.Second)
if err != nil {
t.Fatal(err)
}
ok, err := client.IsOnline()
if err != nil {
t.Fatal(err.Error())
}
if !ok {
t.Errorf("Got %v wanted %v", ok, true)
}
}
func TestPrometheusClient_IsOffline(t *testing.T) {
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusBadGateway)
}))
defer ts.Close()
client, err := NewPrometheusClient(ts.URL, time.Second)
if err != nil {
t.Fatal(err)
}
ok, err := client.IsOnline()
if err == nil {
t.Errorf("Got no error wanted %v", http.StatusBadGateway)
}
if ok {
t.Errorf("Got %v wanted %v", ok, false)
}
}

View File

@@ -1,119 +1,73 @@
package metrics
import (
"fmt"
"net/url"
"strconv"
"time"
)
const envoySuccessRateQuery = `
sum(rate(
envoy_cluster_upstream_rq{kubernetes_namespace="{{ .Namespace }}",
kubernetes_pod_name=~"{{ .Name }}-[0-9a-zA-Z]+(-[0-9a-zA-Z]+)",
envoy_response_code!~"5.*"}
[{{ .Interval }}]))
/
sum(rate(
envoy_cluster_upstream_rq{kubernetes_namespace="{{ .Namespace }}",
kubernetes_pod_name=~"{{ .Name }}-[0-9a-zA-Z]+(-[0-9a-zA-Z]+)"}
[{{ .Interval }}]))
* 100
`
func (c *Observer) GetEnvoySuccessRate(name string, namespace string, metric string, interval string) (float64, error) {
if c.metricsServer == "fake" {
return 100, nil
}
meta := struct {
Name string
Namespace string
Interval string
}{
name,
namespace,
interval,
}
query, err := render(meta, envoySuccessRateQuery)
if err != nil {
return 0, err
}
var rate *float64
querySt := url.QueryEscape(query)
result, err := c.queryMetric(querySt)
if err != nil {
return 0, err
}
for _, v := range result.Data.Result {
metricValue := v.Value[1]
switch metricValue.(type) {
case string:
f, err := strconv.ParseFloat(metricValue.(string), 64)
if err != nil {
return 0, err
}
rate = &f
}
}
if rate == nil {
return 0, fmt.Errorf("no values found for metric %s", metric)
}
return *rate, nil
var envoyQueries = map[string]string{
"request-success-rate": `
sum(
rate(
envoy_cluster_upstream_rq{
kubernetes_namespace="{{ .Namespace }}",
kubernetes_pod_name=~"{{ .Name }}-[0-9a-zA-Z]+(-[0-9a-zA-Z]+)",
envoy_response_code!~"5.*"
}[{{ .Interval }}]
)
)
/
sum(
rate(
envoy_cluster_upstream_rq{
kubernetes_namespace="{{ .Namespace }}",
kubernetes_pod_name=~"{{ .Name }}-[0-9a-zA-Z]+(-[0-9a-zA-Z]+)"
}[{{ .Interval }}]
)
)
* 100`,
"request-duration": `
histogram_quantile(
0.99,
sum(
rate(
envoy_cluster_upstream_rq_time_bucket{
kubernetes_namespace="{{ .Namespace }}",
kubernetes_pod_name=~"{{ .Name }}-[0-9a-zA-Z]+(-[0-9a-zA-Z]+)"
}[{{ .Interval }}]
)
) by (le)
)`,
}
const envoyRequestDurationQuery = `
histogram_quantile(0.99, sum(rate(
envoy_cluster_upstream_rq_time_bucket{kubernetes_namespace="{{ .Namespace }}",
kubernetes_pod_name=~"{{ .Name }}-[0-9a-zA-Z]+(-[0-9a-zA-Z]+)"}
[{{ .Interval }}])) by (le))
`
type EnvoyObserver struct {
client *PrometheusClient
}
// GetEnvoyRequestDuration returns the 99P requests delay using envoy_cluster_upstream_rq_time_bucket metrics
func (c *Observer) GetEnvoyRequestDuration(name string, namespace string, metric string, interval string) (time.Duration, error) {
if c.metricsServer == "fake" {
return 1, nil
}
meta := struct {
Name string
Namespace string
Interval string
}{
name,
namespace,
interval,
}
query, err := render(meta, envoyRequestDurationQuery)
func (ob *EnvoyObserver) GetRequestSuccessRate(name string, namespace string, interval string) (float64, error) {
query, err := ob.client.RenderQuery(name, namespace, interval, envoyQueries["request-success-rate"])
if err != nil {
return 0, err
}
var rate *float64
querySt := url.QueryEscape(query)
result, err := c.queryMetric(querySt)
value, err := ob.client.RunQuery(query)
if err != nil {
return 0, err
}
for _, v := range result.Data.Result {
metricValue := v.Value[1]
switch metricValue.(type) {
case string:
f, err := strconv.ParseFloat(metricValue.(string), 64)
if err != nil {
return 0, err
}
rate = &f
}
return value, nil
}
func (ob *EnvoyObserver) GetRequestDuration(name string, namespace string, interval string) (time.Duration, error) {
query, err := ob.client.RenderQuery(name, namespace, interval, envoyQueries["request-duration"])
if err != nil {
return 0, err
}
if rate == nil {
return 0, fmt.Errorf("no values found for metric %s", metric)
value, err := ob.client.RunQuery(query)
if err != nil {
return 0, err
}
ms := time.Duration(int64(*rate)) * time.Millisecond
ms := time.Duration(int64(value)) * time.Millisecond
return ms, nil
}

View File

@@ -1,51 +1,74 @@
package metrics
import (
"net/http"
"net/http/httptest"
"testing"
"time"
)
func Test_EnvoySuccessRateQueryRender(t *testing.T) {
meta := struct {
Name string
Namespace string
Interval string
}{
"podinfo",
"default",
"1m",
}
func TestEnvoyObserver_GetRequestSuccessRate(t *testing.T) {
expected := `sum(rate(envoy_cluster_upstream_rq{kubernetes_namespace="default",kubernetes_pod_name=~"podinfo-[0-9a-zA-Z]+(-[0-9a-zA-Z]+)",envoy_response_code!~"5.*"}[1m]))/sum(rate(envoy_cluster_upstream_rq{kubernetes_namespace="default",kubernetes_pod_name=~"podinfo-[0-9a-zA-Z]+(-[0-9a-zA-Z]+)"}[1m]))*100`
query, err := render(meta, envoySuccessRateQuery)
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
promql := r.URL.Query()["query"][0]
if promql != expected {
t.Errorf("\nGot %s \nWanted %s", promql, expected)
}
json := `{"status":"success","data":{"resultType":"vector","result":[{"metric":{},"value":[1,"100"]}]}}`
w.Write([]byte(json))
}))
defer ts.Close()
client, err := NewPrometheusClient(ts.URL, time.Second)
if err != nil {
t.Fatal(err)
}
expected := `sum(rate(envoy_cluster_upstream_rq{kubernetes_namespace="default",kubernetes_pod_name=~"podinfo-[0-9a-zA-Z]+(-[0-9a-zA-Z]+)",envoy_response_code!~"5.*"}[1m])) / sum(rate(envoy_cluster_upstream_rq{kubernetes_namespace="default",kubernetes_pod_name=~"podinfo-[0-9a-zA-Z]+(-[0-9a-zA-Z]+)"}[1m])) * 100`
observer := &EnvoyObserver{
client: client,
}
if query != expected {
t.Errorf("\nGot %s \nWanted %s", query, expected)
val, err := observer.GetRequestSuccessRate("podinfo", "default", "1m")
if err != nil {
t.Fatal(err.Error())
}
if val != 100 {
t.Errorf("Got %v wanted %v", val, 100)
}
}
func Test_EnvoyRequestDurationQueryRender(t *testing.T) {
meta := struct {
Name string
Namespace string
Interval string
}{
"podinfo",
"default",
"1m",
}
func TestEnvoyObserver_GetRequestDuration(t *testing.T) {
expected := `histogram_quantile(0.99,sum(rate(envoy_cluster_upstream_rq_time_bucket{kubernetes_namespace="default",kubernetes_pod_name=~"podinfo-[0-9a-zA-Z]+(-[0-9a-zA-Z]+)"}[1m]))by(le))`
query, err := render(meta, envoyRequestDurationQuery)
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
promql := r.URL.Query()["query"][0]
if promql != expected {
t.Errorf("\nGot %s \nWanted %s", promql, expected)
}
json := `{"status":"success","data":{"resultType":"vector","result":[{"metric":{},"value":[1,"100"]}]}}`
w.Write([]byte(json))
}))
defer ts.Close()
client, err := NewPrometheusClient(ts.URL, time.Second)
if err != nil {
t.Fatal(err)
}
expected := `histogram_quantile(0.99, sum(rate(envoy_cluster_upstream_rq_time_bucket{kubernetes_namespace="default",kubernetes_pod_name=~"podinfo-[0-9a-zA-Z]+(-[0-9a-zA-Z]+)"}[1m])) by (le))`
observer := &EnvoyObserver{
client: client,
}
if query != expected {
t.Errorf("\nGot %s \nWanted %s", query, expected)
val, err := observer.GetRequestDuration("podinfo", "default", "1m")
if err != nil {
t.Fatal(err.Error())
}
if val != 100*time.Millisecond {
t.Errorf("Got %v wanted %v", val, 100*time.Millisecond)
}
}

48
pkg/metrics/factory.go Normal file
View File

@@ -0,0 +1,48 @@
package metrics
import (
"strings"
"time"
)
type Factory struct {
MeshProvider string
Client *PrometheusClient
}
func NewFactory(metricsServer string, meshProvider string, timeout time.Duration) (*Factory, error) {
client, err := NewPrometheusClient(metricsServer, timeout)
if err != nil {
return nil, err
}
return &Factory{
MeshProvider: meshProvider,
Client: client,
}, nil
}
func (factory Factory) Observer() Interface {
switch {
case factory.MeshProvider == "appmesh":
return &EnvoyObserver{
client: factory.Client,
}
case factory.MeshProvider == "nginx":
return &NginxObserver{
client: factory.Client,
}
case strings.HasPrefix(factory.MeshProvider, "gloo"):
return &GlooObserver{
client: factory.Client,
}
case factory.MeshProvider == "smi:linkerd":
return &LinkerdObserver{
client: factory.Client,
}
default:
return &IstioObserver{
client: factory.Client,
}
}
}

72
pkg/metrics/gloo.go Normal file
View File

@@ -0,0 +1,72 @@
package metrics
import (
"time"
)
//envoy_cluster_name="test-podinfo-primary-9898_gloo-system"
var glooQueries = map[string]string{
"request-success-rate": `
sum(
rate(
envoy_cluster_upstream_rq{
envoy_cluster_name=~"{{ .Namespace }}-{{ .Name }}-canary-[0-9a-zA-Z-]+_[0-9a-zA-Z-]+",
envoy_response_code!~"5.*"
}[{{ .Interval }}]
)
)
/
sum(
rate(
envoy_cluster_upstream_rq{
envoy_cluster_name=~"{{ .Namespace }}-{{ .Name }}-canary-[0-9a-zA-Z-]+_[0-9a-zA-Z-]+",
}[{{ .Interval }}]
)
)
* 100`,
"request-duration": `
histogram_quantile(
0.99,
sum(
rate(
envoy_cluster_upstream_rq_time_bucket{
envoy_cluster_name=~"{{ .Namespace }}-{{ .Name }}-canary-[0-9a-zA-Z-]+_[0-9a-zA-Z-]+",
}[{{ .Interval }}]
)
) by (le)
)`,
}
type GlooObserver struct {
client *PrometheusClient
}
func (ob *GlooObserver) GetRequestSuccessRate(name string, namespace string, interval string) (float64, error) {
query, err := ob.client.RenderQuery(name, namespace, interval, glooQueries["request-success-rate"])
if err != nil {
return 0, err
}
value, err := ob.client.RunQuery(query)
if err != nil {
return 0, err
}
return value, nil
}
func (ob *GlooObserver) GetRequestDuration(name string, namespace string, interval string) (time.Duration, error) {
query, err := ob.client.RenderQuery(name, namespace, interval, glooQueries["request-duration"])
if err != nil {
return 0, err
}
value, err := ob.client.RunQuery(query)
if err != nil {
return 0, err
}
ms := time.Duration(int64(value)) * time.Millisecond
return ms, nil
}

74
pkg/metrics/gloo_test.go Normal file
View File

@@ -0,0 +1,74 @@
package metrics
import (
"net/http"
"net/http/httptest"
"testing"
"time"
)
func TestGlooObserver_GetRequestSuccessRate(t *testing.T) {
expected := `sum(rate(envoy_cluster_upstream_rq{envoy_cluster_name=~"default-podinfo-canary-[0-9a-zA-Z-]+_[0-9a-zA-Z-]+",envoy_response_code!~"5.*"}[1m]))/sum(rate(envoy_cluster_upstream_rq{envoy_cluster_name=~"default-podinfo-canary-[0-9a-zA-Z-]+_[0-9a-zA-Z-]+",}[1m]))*100`
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
promql := r.URL.Query()["query"][0]
if promql != expected {
t.Errorf("\nGot %s \nWanted %s", promql, expected)
}
json := `{"status":"success","data":{"resultType":"vector","result":[{"metric":{},"value":[1,"100"]}]}}`
w.Write([]byte(json))
}))
defer ts.Close()
client, err := NewPrometheusClient(ts.URL, time.Second)
if err != nil {
t.Fatal(err)
}
observer := &GlooObserver{
client: client,
}
val, err := observer.GetRequestSuccessRate("podinfo", "default", "1m")
if err != nil {
t.Fatal(err.Error())
}
if val != 100 {
t.Errorf("Got %v wanted %v", val, 100)
}
}
func TestGlooObserver_GetRequestDuration(t *testing.T) {
expected := `histogram_quantile(0.99,sum(rate(envoy_cluster_upstream_rq_time_bucket{envoy_cluster_name=~"default-podinfo-canary-[0-9a-zA-Z-]+_[0-9a-zA-Z-]+",}[1m]))by(le))`
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
promql := r.URL.Query()["query"][0]
if promql != expected {
t.Errorf("\nGot %s \nWanted %s", promql, expected)
}
json := `{"status":"success","data":{"resultType":"vector","result":[{"metric":{},"value":[1,"100"]}]}}`
w.Write([]byte(json))
}))
defer ts.Close()
client, err := NewPrometheusClient(ts.URL, time.Second)
if err != nil {
t.Fatal(err)
}
observer := &GlooObserver{
client: client,
}
val, err := observer.GetRequestDuration("podinfo", "default", "1m")
if err != nil {
t.Fatal(err.Error())
}
if val != 100*time.Millisecond {
t.Errorf("Got %v wanted %v", val, 100*time.Millisecond)
}
}

View File

@@ -1,123 +1,76 @@
package metrics
import (
"fmt"
"net/url"
"strconv"
"time"
)
const istioSuccessRateQuery = `
sum(rate(
istio_requests_total{reporter="destination",
destination_workload_namespace="{{ .Namespace }}",
destination_workload=~"{{ .Name }}",
response_code!~"5.*"}
[{{ .Interval }}]))
/
sum(rate(
istio_requests_total{reporter="destination",
destination_workload_namespace="{{ .Namespace }}",
destination_workload=~"{{ .Name }}"}
[{{ .Interval }}]))
* 100
`
// GetIstioSuccessRate returns the requests success rate (non 5xx) using istio_requests_total metric
func (c *Observer) GetIstioSuccessRate(name string, namespace string, metric string, interval string) (float64, error) {
if c.metricsServer == "fake" {
return 100, nil
}
meta := struct {
Name string
Namespace string
Interval string
}{
name,
namespace,
interval,
}
query, err := render(meta, istioSuccessRateQuery)
if err != nil {
return 0, err
}
var rate *float64
querySt := url.QueryEscape(query)
result, err := c.queryMetric(querySt)
if err != nil {
return 0, err
}
for _, v := range result.Data.Result {
metricValue := v.Value[1]
switch metricValue.(type) {
case string:
f, err := strconv.ParseFloat(metricValue.(string), 64)
if err != nil {
return 0, err
}
rate = &f
}
}
if rate == nil {
return 0, fmt.Errorf("no values found for metric %s", metric)
}
return *rate, nil
var istioQueries = map[string]string{
"request-success-rate": `
sum(
rate(
istio_requests_total{
reporter="destination",
destination_workload_namespace="{{ .Namespace }}",
destination_workload=~"{{ .Name }}",
response_code!~"5.*"
}[{{ .Interval }}]
)
)
/
sum(
rate(
istio_requests_total{
reporter="destination",
destination_workload_namespace="{{ .Namespace }}",
destination_workload=~"{{ .Name }}"
}[{{ .Interval }}]
)
)
* 100`,
"request-duration": `
histogram_quantile(
0.99,
sum(
rate(
istio_request_duration_seconds_bucket{
reporter="destination",
destination_workload_namespace="{{ .Namespace }}",
destination_workload=~"{{ .Name }}"
}[{{ .Interval }}]
)
) by (le)
)`,
}
const istioRequestDurationQuery = `
histogram_quantile(0.99, sum(rate(
istio_request_duration_seconds_bucket{reporter="destination",
destination_workload_namespace="{{ .Namespace }}",
destination_workload=~"{{ .Name }}"}
[{{ .Interval }}])) by (le))
`
type IstioObserver struct {
client *PrometheusClient
}
// GetIstioRequestDuration returns the 99P requests delay using istio_request_duration_seconds_bucket metrics
func (c *Observer) GetIstioRequestDuration(name string, namespace string, metric string, interval string) (time.Duration, error) {
if c.metricsServer == "fake" {
return 1, nil
}
meta := struct {
Name string
Namespace string
Interval string
}{
name,
namespace,
interval,
}
query, err := render(meta, istioRequestDurationQuery)
func (ob *IstioObserver) GetRequestSuccessRate(name string, namespace string, interval string) (float64, error) {
query, err := ob.client.RenderQuery(name, namespace, interval, istioQueries["request-success-rate"])
if err != nil {
return 0, err
}
var rate *float64
querySt := url.QueryEscape(query)
result, err := c.queryMetric(querySt)
value, err := ob.client.RunQuery(query)
if err != nil {
return 0, err
}
for _, v := range result.Data.Result {
metricValue := v.Value[1]
switch metricValue.(type) {
case string:
f, err := strconv.ParseFloat(metricValue.(string), 64)
if err != nil {
return 0, err
}
rate = &f
}
return value, nil
}
func (ob *IstioObserver) GetRequestDuration(name string, namespace string, interval string) (time.Duration, error) {
query, err := ob.client.RenderQuery(name, namespace, interval, istioQueries["request-duration"])
if err != nil {
return 0, err
}
if rate == nil {
return 0, fmt.Errorf("no values found for metric %s", metric)
value, err := ob.client.RunQuery(query)
if err != nil {
return 0, err
}
ms := time.Duration(int64(*rate*1000)) * time.Millisecond
ms := time.Duration(int64(value*1000)) * time.Millisecond
return ms, nil
}

View File

@@ -1,51 +1,74 @@
package metrics
import (
"net/http"
"net/http/httptest"
"testing"
"time"
)
func Test_IstioSuccessRateQueryRender(t *testing.T) {
meta := struct {
Name string
Namespace string
Interval string
}{
"podinfo",
"default",
"1m",
}
func TestIstioObserver_GetRequestSuccessRate(t *testing.T) {
expected := `sum(rate(istio_requests_total{reporter="destination",destination_workload_namespace="default",destination_workload=~"podinfo",response_code!~"5.*"}[1m]))/sum(rate(istio_requests_total{reporter="destination",destination_workload_namespace="default",destination_workload=~"podinfo"}[1m]))*100`
query, err := render(meta, istioSuccessRateQuery)
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
promql := r.URL.Query()["query"][0]
if promql != expected {
t.Errorf("\nGot %s \nWanted %s", promql, expected)
}
json := `{"status":"success","data":{"resultType":"vector","result":[{"metric":{},"value":[1,"100"]}]}}`
w.Write([]byte(json))
}))
defer ts.Close()
client, err := NewPrometheusClient(ts.URL, time.Second)
if err != nil {
t.Fatal(err)
}
expected := `sum(rate(istio_requests_total{reporter="destination",destination_workload_namespace="default",destination_workload=~"podinfo",response_code!~"5.*"}[1m])) / sum(rate(istio_requests_total{reporter="destination",destination_workload_namespace="default",destination_workload=~"podinfo"}[1m])) * 100`
observer := &IstioObserver{
client: client,
}
if query != expected {
t.Errorf("\nGot %s \nWanted %s", query, expected)
val, err := observer.GetRequestSuccessRate("podinfo", "default", "1m")
if err != nil {
t.Fatal(err.Error())
}
if val != 100 {
t.Errorf("Got %v wanted %v", val, 100)
}
}
func Test_IstioRequestDurationQueryRender(t *testing.T) {
meta := struct {
Name string
Namespace string
Interval string
}{
"podinfo",
"default",
"1m",
}
func TestIstioObserver_GetRequestDuration(t *testing.T) {
expected := `histogram_quantile(0.99,sum(rate(istio_request_duration_seconds_bucket{reporter="destination",destination_workload_namespace="default",destination_workload=~"podinfo"}[1m]))by(le))`
query, err := render(meta, istioRequestDurationQuery)
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
promql := r.URL.Query()["query"][0]
if promql != expected {
t.Errorf("\nGot %s \nWanted %s", promql, expected)
}
json := `{"status":"success","data":{"resultType":"vector","result":[{"metric":{},"value":[1,"0.100"]}]}}`
w.Write([]byte(json))
}))
defer ts.Close()
client, err := NewPrometheusClient(ts.URL, time.Second)
if err != nil {
t.Fatal(err)
}
expected := `histogram_quantile(0.99, sum(rate(istio_request_duration_seconds_bucket{reporter="destination",destination_workload_namespace="default",destination_workload=~"podinfo"}[1m])) by (le))`
observer := &IstioObserver{
client: client,
}
if query != expected {
t.Errorf("\nGot %s \nWanted %s", query, expected)
val, err := observer.GetRequestDuration("podinfo", "default", "1m")
if err != nil {
t.Fatal(err.Error())
}
if val != 100*time.Millisecond {
t.Errorf("Got %v wanted %v", val, 100*time.Millisecond)
}
}

76
pkg/metrics/linkerd.go Normal file
View File

@@ -0,0 +1,76 @@
package metrics
import (
"time"
)
var linkerdQueries = map[string]string{
"request-success-rate": `
sum(
rate(
response_total{
namespace="{{ .Namespace }}",
deployment=~"{{ .Name }}",
classification="failure",
direction="inbound"
}[{{ .Interval }}]
)
)
/
sum(
rate(
response_total{
namespace="{{ .Namespace }}",
deployment=~"{{ .Name }}",
direction="inbound"
}[{{ .Interval }}]
)
)
* 100`,
"request-duration": `
histogram_quantile(
0.99,
sum(
rate(
response_latency_ms_bucket{
namespace="{{ .Namespace }}",
deployment=~"{{ .Name }}",
direction="inbound"
}[{{ .Interval }}]
)
) by (le)
)`,
}
type LinkerdObserver struct {
client *PrometheusClient
}
func (ob *LinkerdObserver) GetRequestSuccessRate(name string, namespace string, interval string) (float64, error) {
query, err := ob.client.RenderQuery(name, namespace, interval, linkerdQueries["request-success-rate"])
if err != nil {
return 0, err
}
value, err := ob.client.RunQuery(query)
if err != nil {
return 0, err
}
return value, nil
}
func (ob *LinkerdObserver) GetRequestDuration(name string, namespace string, interval string) (time.Duration, error) {
query, err := ob.client.RenderQuery(name, namespace, interval, linkerdQueries["request-duration"])
if err != nil {
return 0, err
}
value, err := ob.client.RunQuery(query)
if err != nil {
return 0, err
}
ms := time.Duration(int64(value)) * time.Millisecond
return ms, nil
}

View File

@@ -0,0 +1,74 @@
package metrics
import (
"net/http"
"net/http/httptest"
"testing"
"time"
)
func TestLinkerdObserver_GetRequestSuccessRate(t *testing.T) {
expected := `sum(rate(response_total{namespace="default",deployment=~"podinfo",classification="failure",direction="inbound"}[1m]))/sum(rate(response_total{namespace="default",deployment=~"podinfo",direction="inbound"}[1m]))*100`
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
promql := r.URL.Query()["query"][0]
if promql != expected {
t.Errorf("\nGot %s \nWanted %s", promql, expected)
}
json := `{"status":"success","data":{"resultType":"vector","result":[{"metric":{},"value":[1,"100"]}]}}`
w.Write([]byte(json))
}))
defer ts.Close()
client, err := NewPrometheusClient(ts.URL, time.Second)
if err != nil {
t.Fatal(err)
}
observer := &LinkerdObserver{
client: client,
}
val, err := observer.GetRequestSuccessRate("podinfo", "default", "1m")
if err != nil {
t.Fatal(err.Error())
}
if val != 100 {
t.Errorf("Got %v wanted %v", val, 100)
}
}
func TestLinkerdObserver_GetRequestDuration(t *testing.T) {
expected := `histogram_quantile(0.99,sum(rate(response_latency_ms_bucket{namespace="default",deployment=~"podinfo",direction="inbound"}[1m]))by(le))`
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
promql := r.URL.Query()["query"][0]
if promql != expected {
t.Errorf("\nGot %s \nWanted %s", promql, expected)
}
json := `{"status":"success","data":{"resultType":"vector","result":[{"metric":{},"value":[1,"100"]}]}}`
w.Write([]byte(json))
}))
defer ts.Close()
client, err := NewPrometheusClient(ts.URL, time.Second)
if err != nil {
t.Fatal(err)
}
observer := &LinkerdObserver{
client: client,
}
val, err := observer.GetRequestDuration("podinfo", "default", "1m")
if err != nil {
t.Fatal(err.Error())
}
if val != 100*time.Millisecond {
t.Errorf("Got %v wanted %v", val, 100*time.Millisecond)
}
}

View File

@@ -1,122 +1,80 @@
package metrics
import (
"fmt"
"net/url"
"strconv"
"time"
)
const nginxSuccessRateQuery = `
sum(rate(
nginx_ingress_controller_requests{namespace="{{ .Namespace }}",
ingress="{{ .Name }}",
status!~"5.*"}
[{{ .Interval }}]))
/
sum(rate(
nginx_ingress_controller_requests{namespace="{{ .Namespace }}",
ingress="{{ .Name }}"}
[{{ .Interval }}]))
* 100
`
// GetNginxSuccessRate returns the requests success rate (non 5xx) using nginx_ingress_controller_requests metric
func (c *Observer) GetNginxSuccessRate(name string, namespace string, metric string, interval string) (float64, error) {
if c.metricsServer == "fake" {
return 100, nil
}
meta := struct {
Name string
Namespace string
Interval string
}{
name,
namespace,
interval,
}
query, err := render(meta, nginxSuccessRateQuery)
if err != nil {
return 0, err
}
var rate *float64
querySt := url.QueryEscape(query)
result, err := c.queryMetric(querySt)
if err != nil {
return 0, err
}
for _, v := range result.Data.Result {
metricValue := v.Value[1]
switch metricValue.(type) {
case string:
f, err := strconv.ParseFloat(metricValue.(string), 64)
if err != nil {
return 0, err
}
rate = &f
}
}
if rate == nil {
return 0, fmt.Errorf("no values found for metric %s", metric)
}
return *rate, nil
var nginxQueries = map[string]string{
"request-success-rate": `
sum(
rate(
nginx_ingress_controller_requests{
namespace="{{ .Namespace }}",
ingress="{{ .Name }}",
status!~"5.*"
}[{{ .Interval }}]
)
)
/
sum(
rate(
nginx_ingress_controller_requests{
namespace="{{ .Namespace }}",
ingress="{{ .Name }}"
}[{{ .Interval }}]
)
)
* 100`,
"request-duration": `
sum(
rate(
nginx_ingress_controller_ingress_upstream_latency_seconds_sum{
namespace="{{ .Namespace }}",
ingress="{{ .Name }}"
}[{{ .Interval }}]
)
)
/
sum(
rate(
nginx_ingress_controller_ingress_upstream_latency_seconds_count{
namespace="{{ .Namespace }}",
ingress="{{ .Name }}"
}[{{ .Interval }}]
)
)
* 1000`,
}
const nginxRequestDurationQuery = `
sum(rate(
nginx_ingress_controller_ingress_upstream_latency_seconds_sum{namespace="{{ .Namespace }}",
ingress="{{ .Name }}"}[{{ .Interval }}]))
/
sum(rate(nginx_ingress_controller_ingress_upstream_latency_seconds_count{namespace="{{ .Namespace }}",
ingress="{{ .Name }}"}[{{ .Interval }}])) * 1000
`
type NginxObserver struct {
client *PrometheusClient
}
// GetNginxRequestDuration returns the avg requests latency using nginx_ingress_controller_ingress_upstream_latency_seconds_sum metric
func (c *Observer) GetNginxRequestDuration(name string, namespace string, metric string, interval string) (time.Duration, error) {
if c.metricsServer == "fake" {
return 1, nil
}
meta := struct {
Name string
Namespace string
Interval string
}{
name,
namespace,
interval,
}
query, err := render(meta, nginxRequestDurationQuery)
func (ob *NginxObserver) GetRequestSuccessRate(name string, namespace string, interval string) (float64, error) {
query, err := ob.client.RenderQuery(name, namespace, interval, nginxQueries["request-success-rate"])
if err != nil {
return 0, err
}
var rate *float64
querySt := url.QueryEscape(query)
result, err := c.queryMetric(querySt)
value, err := ob.client.RunQuery(query)
if err != nil {
return 0, err
}
for _, v := range result.Data.Result {
metricValue := v.Value[1]
switch metricValue.(type) {
case string:
f, err := strconv.ParseFloat(metricValue.(string), 64)
if err != nil {
return 0, err
}
rate = &f
}
return value, nil
}
func (ob *NginxObserver) GetRequestDuration(name string, namespace string, interval string) (time.Duration, error) {
query, err := ob.client.RenderQuery(name, namespace, interval, nginxQueries["request-duration"])
if err != nil {
return 0, err
}
if rate == nil {
return 0, fmt.Errorf("no values found for metric %s", metric)
value, err := ob.client.RunQuery(query)
if err != nil {
return 0, err
}
ms := time.Duration(int64(*rate)) * time.Millisecond
ms := time.Duration(int64(value)) * time.Millisecond
return ms, nil
}

View File

@@ -1,51 +1,74 @@
package metrics
import (
"net/http"
"net/http/httptest"
"testing"
"time"
)
func Test_NginxSuccessRateQueryRender(t *testing.T) {
meta := struct {
Name string
Namespace string
Interval string
}{
"podinfo",
"nginx",
"1m",
}
func TestNginxObserver_GetRequestSuccessRate(t *testing.T) {
expected := `sum(rate(nginx_ingress_controller_requests{namespace="nginx",ingress="podinfo",status!~"5.*"}[1m]))/sum(rate(nginx_ingress_controller_requests{namespace="nginx",ingress="podinfo"}[1m]))*100`
query, err := render(meta, nginxSuccessRateQuery)
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
promql := r.URL.Query()["query"][0]
if promql != expected {
t.Errorf("\nGot %s \nWanted %s", promql, expected)
}
json := `{"status":"success","data":{"resultType":"vector","result":[{"metric":{},"value":[1,"100"]}]}}`
w.Write([]byte(json))
}))
defer ts.Close()
client, err := NewPrometheusClient(ts.URL, time.Second)
if err != nil {
t.Fatal(err)
}
expected := `sum(rate(nginx_ingress_controller_requests{namespace="nginx",ingress="podinfo",status!~"5.*"}[1m])) / sum(rate(nginx_ingress_controller_requests{namespace="nginx",ingress="podinfo"}[1m])) * 100`
observer := &NginxObserver{
client: client,
}
if query != expected {
t.Errorf("\nGot %s \nWanted %s", query, expected)
val, err := observer.GetRequestSuccessRate("podinfo", "nginx", "1m")
if err != nil {
t.Fatal(err.Error())
}
if val != 100 {
t.Errorf("Got %v wanted %v", val, 100)
}
}
func Test_NginxRequestDurationQueryRender(t *testing.T) {
meta := struct {
Name string
Namespace string
Interval string
}{
"podinfo",
"nginx",
"1m",
}
func TestNginxObserver_GetRequestDuration(t *testing.T) {
expected := `sum(rate(nginx_ingress_controller_ingress_upstream_latency_seconds_sum{namespace="nginx",ingress="podinfo"}[1m]))/sum(rate(nginx_ingress_controller_ingress_upstream_latency_seconds_count{namespace="nginx",ingress="podinfo"}[1m]))*1000`
query, err := render(meta, nginxRequestDurationQuery)
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
promql := r.URL.Query()["query"][0]
if promql != expected {
t.Errorf("\nGot %s \nWanted %s", promql, expected)
}
json := `{"status":"success","data":{"resultType":"vector","result":[{"metric":{},"value":[1,"100"]}]}}`
w.Write([]byte(json))
}))
defer ts.Close()
client, err := NewPrometheusClient(ts.URL, time.Second)
if err != nil {
t.Fatal(err)
}
expected := `sum(rate(nginx_ingress_controller_ingress_upstream_latency_seconds_sum{namespace="nginx",ingress="podinfo"}[1m])) /sum(rate(nginx_ingress_controller_ingress_upstream_latency_seconds_count{namespace="nginx",ingress="podinfo"}[1m])) * 1000`
observer := &NginxObserver{
client: client,
}
if query != expected {
t.Errorf("\nGot %s \nWanted %s", query, expected)
val, err := observer.GetRequestDuration("podinfo", "nginx", "1m")
if err != nil {
t.Fatal(err.Error())
}
if val != 100*time.Millisecond {
t.Errorf("Got %v wanted %v", val, 100*time.Millisecond)
}
}

View File

@@ -1,186 +1,10 @@
package metrics
import (
"bufio"
"bytes"
"context"
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"net/url"
"strconv"
"strings"
"text/template"
"time"
)
// Observer is used to query Prometheus
type Observer struct {
metricsServer string
}
type vectorQueryResponse struct {
Data struct {
Result []struct {
Metric struct {
Code string `json:"response_code"`
Name string `json:"destination_workload"`
}
Value []interface{} `json:"value"`
}
}
}
// NewObserver creates a new observer
func NewObserver(metricsServer string) Observer {
return Observer{
metricsServer: metricsServer,
}
}
// GetMetricsServer returns the Prometheus URL
func (c *Observer) GetMetricsServer() string {
return c.metricsServer
}
func (c *Observer) queryMetric(query string) (*vectorQueryResponse, error) {
promURL, err := url.Parse(c.metricsServer)
if err != nil {
return nil, err
}
u, err := url.Parse(fmt.Sprintf("./api/v1/query?query=%s", query))
if err != nil {
return nil, err
}
u = promURL.ResolveReference(u)
req, err := http.NewRequest("GET", u.String(), nil)
if err != nil {
return nil, err
}
ctx, cancel := context.WithTimeout(req.Context(), 5*time.Second)
defer cancel()
r, err := http.DefaultClient.Do(req.WithContext(ctx))
if err != nil {
return nil, err
}
defer r.Body.Close()
b, err := ioutil.ReadAll(r.Body)
if err != nil {
return nil, fmt.Errorf("error reading body: %s", err.Error())
}
if 400 <= r.StatusCode {
return nil, fmt.Errorf("error response: %s", string(b))
}
var values vectorQueryResponse
err = json.Unmarshal(b, &values)
if err != nil {
return nil, fmt.Errorf("error unmarshaling result: %s, '%s'", err.Error(), string(b))
}
return &values, nil
}
// GetScalar runs the promql query and returns the first value found
func (c *Observer) GetScalar(query string) (float64, error) {
if c.metricsServer == "fake" {
return 100, nil
}
query = strings.Replace(query, "\n", "", -1)
query = strings.Replace(query, " ", "", -1)
var value *float64
querySt := url.QueryEscape(query)
result, err := c.queryMetric(querySt)
if err != nil {
return 0, err
}
for _, v := range result.Data.Result {
metricValue := v.Value[1]
switch metricValue.(type) {
case string:
f, err := strconv.ParseFloat(metricValue.(string), 64)
if err != nil {
return 0, err
}
value = &f
}
}
if value == nil {
return 0, fmt.Errorf("no values found for query %s", query)
}
return *value, nil
}
// CheckMetricsServer call Prometheus status endpoint and returns an error if
// the API is unreachable
func CheckMetricsServer(address string) (bool, error) {
promURL, err := url.Parse(address)
if err != nil {
return false, err
}
u, err := url.Parse("./api/v1/status/flags")
if err != nil {
return false, err
}
u = promURL.ResolveReference(u)
req, err := http.NewRequest("GET", u.String(), nil)
if err != nil {
return false, err
}
ctx, cancel := context.WithTimeout(req.Context(), 5*time.Second)
defer cancel()
r, err := http.DefaultClient.Do(req.WithContext(ctx))
if err != nil {
return false, err
}
defer r.Body.Close()
b, err := ioutil.ReadAll(r.Body)
if err != nil {
return false, fmt.Errorf("error reading body: %s", err.Error())
}
if 400 <= r.StatusCode {
return false, fmt.Errorf("error response: %s", string(b))
}
return true, nil
}
func render(meta interface{}, tmpl string) (string, error) {
t, err := template.New("tmpl").Parse(tmpl)
if err != nil {
return "", err
}
var data bytes.Buffer
b := bufio.NewWriter(&data)
if err := t.Execute(b, meta); err != nil {
return "", err
}
err = b.Flush()
if err != nil {
return "", err
}
res := strings.ReplaceAll(data.String(), "\n", "")
return res, nil
type Interface interface {
GetRequestSuccessRate(name string, namespace string, interval string) (float64, error)
GetRequestDuration(name string, namespace string, interval string) (time.Duration, error)
}

View File

@@ -1,119 +0,0 @@
package metrics
import (
"net/http"
"net/http/httptest"
"testing"
"time"
)
func TestCanaryObserver_GetEnvoySuccessRate(t *testing.T) {
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
json := `{"status":"success","data":{"resultType":"vector","result":[{"metric":{},"value":[1545905245.458,"100"]}]}}`
w.Write([]byte(json))
}))
defer ts.Close()
observer := NewObserver(ts.URL)
val, err := observer.GetEnvoySuccessRate("podinfo", "default", "envoy_cluster_upstream_rq", "1m")
if err != nil {
t.Fatal(err.Error())
}
if val != 100 {
t.Errorf("Got %v wanted %v", val, 100)
}
}
func TestCanaryObserver_GetEnvoyRequestDuration(t *testing.T) {
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
json := `{"status":"success","data":{"resultType":"vector","result":[{"metric":{},"value":[1545905245.596,"200"]}]}}`
w.Write([]byte(json))
}))
defer ts.Close()
observer := NewObserver(ts.URL)
val, err := observer.GetEnvoyRequestDuration("podinfo", "default", "envoy_cluster_upstream_rq_time_bucket", "1m")
if err != nil {
t.Fatal(err.Error())
}
if val != 200*time.Millisecond {
t.Errorf("Got %v wanted %v", val, 200*time.Millisecond)
}
}
func TestCanaryObserver_GetIstioSuccessRate(t *testing.T) {
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
json := `{"status":"success","data":{"resultType":"vector","result":[{"metric":{},"value":[1545905245.458,"100"]}]}}`
w.Write([]byte(json))
}))
defer ts.Close()
observer := NewObserver(ts.URL)
val, err := observer.GetIstioSuccessRate("podinfo", "default", "istio_requests_total", "1m")
if err != nil {
t.Fatal(err.Error())
}
if val != 100 {
t.Errorf("Got %v wanted %v", val, 100)
}
}
func TestCanaryObserver_GetIstioRequestDuration(t *testing.T) {
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
json := `{"status":"success","data":{"resultType":"vector","result":[{"metric":{},"value":[1545905245.596,"0.2"]}]}}`
w.Write([]byte(json))
}))
defer ts.Close()
observer := NewObserver(ts.URL)
val, err := observer.GetIstioRequestDuration("podinfo", "default", "istio_request_duration_seconds_bucket", "1m")
if err != nil {
t.Fatal(err.Error())
}
if val != 200*time.Millisecond {
t.Errorf("Got %v wanted %v", val, 200*time.Millisecond)
}
}
func TestCheckMetricsServer(t *testing.T) {
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
json := `{"status":"success","data":{"config.file":"/etc/prometheus/prometheus.yml"}}`
w.Write([]byte(json))
}))
defer ts.Close()
ok, err := CheckMetricsServer(ts.URL)
if err != nil {
t.Fatal(err.Error())
}
if !ok {
t.Errorf("Got %v wanted %v", ok, true)
}
}
func TestCheckMetricsServer_Offline(t *testing.T) {
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusBadGateway)
}))
defer ts.Close()
ok, err := CheckMetricsServer(ts.URL)
if err == nil {
t.Errorf("Got no error wanted %v", http.StatusBadGateway)
}
if ok {
t.Errorf("Got %v wanted %v", ok, false)
}
}

View File

@@ -56,12 +56,27 @@ func (factory *Factory) MeshRouter(provider string) Interface {
kubeClient: factory.kubeClient,
appmeshClient: factory.meshClient,
}
case strings.HasPrefix(provider, "smi:"):
mesh := strings.TrimPrefix(provider, "smi:")
return &SmiRouter{
logger: factory.logger,
flaggerClient: factory.flaggerClient,
kubeClient: factory.kubeClient,
smiClient: factory.meshClient,
targetMesh: mesh,
}
case strings.HasPrefix(provider, "supergloo"):
supergloo, err := NewSuperglooRouter(context.TODO(), provider, factory.flaggerClient, factory.logger, factory.kubeConfig)
if err != nil {
panic("failed creating supergloo client")
}
return supergloo
case strings.HasPrefix(provider, "gloo"):
gloo, err := NewGlooRouter(context.TODO(), provider, factory.flaggerClient, factory.logger, factory.kubeConfig)
if err != nil {
panic("failed creating gloo client")
}
return gloo
default:
return &IstioRouter{
logger: factory.logger,

187
pkg/router/gloo.go Normal file
View File

@@ -0,0 +1,187 @@
package router
import (
"context"
"fmt"
"strings"
solokitclients "github.com/solo-io/solo-kit/pkg/api/v1/clients"
"github.com/solo-io/solo-kit/pkg/api/v1/clients/factory"
"github.com/solo-io/solo-kit/pkg/api/v1/clients/kube"
crdv1 "github.com/solo-io/solo-kit/pkg/api/v1/clients/kube/crd/solo.io/v1"
solokitcore "github.com/solo-io/solo-kit/pkg/api/v1/resources/core"
solokiterror "github.com/solo-io/solo-kit/pkg/errors"
gloov1 "github.com/solo-io/gloo/projects/gloo/pkg/api/v1"
flaggerv1 "github.com/weaveworks/flagger/pkg/apis/flagger/v1alpha3"
clientset "github.com/weaveworks/flagger/pkg/client/clientset/versioned"
"go.uber.org/zap"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/client-go/rest"
)
// GlooRouter is managing Istio virtual services
type GlooRouter struct {
ugClient gloov1.UpstreamGroupClient
logger *zap.SugaredLogger
upstreamDiscoveryNs string
}
func NewGlooRouter(ctx context.Context, provider string, flaggerClient clientset.Interface, logger *zap.SugaredLogger, cfg *rest.Config) (*GlooRouter, error) {
// TODO if cfg is nil use memory client instead?
sharedCache := kube.NewKubeCache(ctx)
upstreamGroupClient, err := gloov1.NewUpstreamGroupClient(&factory.KubeResourceClientFactory{
Crd: gloov1.UpstreamGroupCrd,
Cfg: cfg,
SharedCache: sharedCache,
SkipCrdCreation: true,
})
if err != nil {
// this should never happen.
return nil, fmt.Errorf("creating UpstreamGroup client %v", err)
}
if err := upstreamGroupClient.Register(); err != nil {
return nil, err
}
upstreamDiscoveryNs := ""
if strings.HasPrefix(provider, "gloo:") {
upstreamDiscoveryNs = strings.TrimPrefix(provider, "gloo:")
}
return NewGlooRouterWithClient(ctx, upstreamGroupClient, upstreamDiscoveryNs, logger), nil
}
func NewGlooRouterWithClient(ctx context.Context, routingRuleClient gloov1.UpstreamGroupClient, upstreamDiscoveryNs string, logger *zap.SugaredLogger) *GlooRouter {
if upstreamDiscoveryNs == "" {
upstreamDiscoveryNs = "gloo-system"
}
return &GlooRouter{ugClient: routingRuleClient, logger: logger, upstreamDiscoveryNs: upstreamDiscoveryNs}
}
// Reconcile creates or updates the Istio virtual service
func (gr *GlooRouter) Reconcile(canary *flaggerv1.Canary) error {
// do we have routes already?
if _, _, err := gr.GetRoutes(canary); err == nil {
// we have routes, no need to do anything else
return nil
} else if solokiterror.IsNotExist(err) {
return gr.SetRoutes(canary, 100, 0)
} else {
return err
}
}
// GetRoutes returns the destinations weight for primary and canary
func (gr *GlooRouter) GetRoutes(canary *flaggerv1.Canary) (
primaryWeight int,
canaryWeight int,
err error,
) {
targetName := canary.Spec.TargetRef.Name
var ug *gloov1.UpstreamGroup
ug, err = gr.ugClient.Read(canary.Namespace, targetName, solokitclients.ReadOpts{})
if err != nil {
return
}
dests := ug.GetDestinations()
for _, dest := range dests {
if dest.GetDestination().GetUpstream().Name == upstreamName(canary.Namespace, fmt.Sprintf("%s-primary", targetName), canary.Spec.Service.Port) {
primaryWeight = int(dest.Weight)
}
if dest.GetDestination().GetUpstream().Name == upstreamName(canary.Namespace, fmt.Sprintf("%s-canary", targetName), canary.Spec.Service.Port) {
canaryWeight = int(dest.Weight)
}
}
if primaryWeight == 0 && canaryWeight == 0 {
err = fmt.Errorf("RoutingRule %s.%s does not contain routes for %s-primary and %s-canary",
targetName, canary.Namespace, targetName, targetName)
}
return
}
// SetRoutes updates the destinations weight for primary and canary
func (gr *GlooRouter) SetRoutes(
canary *flaggerv1.Canary,
primaryWeight int,
canaryWeight int,
) error {
targetName := canary.Spec.TargetRef.Name
if primaryWeight == 0 && canaryWeight == 0 {
return fmt.Errorf("RoutingRule %s.%s update failed: no valid weights", targetName, canary.Namespace)
}
destinations := []*gloov1.WeightedDestination{}
destinations = append(destinations, &gloov1.WeightedDestination{
Destination: &gloov1.Destination{
Upstream: solokitcore.ResourceRef{
Name: upstreamName(canary.Namespace, fmt.Sprintf("%s-primary", targetName), canary.Spec.Service.Port),
Namespace: gr.upstreamDiscoveryNs,
},
},
Weight: uint32(primaryWeight),
})
destinations = append(destinations, &gloov1.WeightedDestination{
Destination: &gloov1.Destination{
Upstream: solokitcore.ResourceRef{
Name: upstreamName(canary.Namespace, fmt.Sprintf("%s-canary", targetName), canary.Spec.Service.Port),
Namespace: gr.upstreamDiscoveryNs,
},
},
Weight: uint32(canaryWeight),
})
upstreamGroup := &gloov1.UpstreamGroup{
Metadata: solokitcore.Metadata{
Name: canary.Spec.TargetRef.Name,
Namespace: canary.Namespace,
},
Destinations: destinations,
}
return gr.writeUpstreamGroupRuleForCanary(canary, upstreamGroup)
}
func (gr *GlooRouter) writeUpstreamGroupRuleForCanary(canary *flaggerv1.Canary, ug *gloov1.UpstreamGroup) error {
targetName := canary.Spec.TargetRef.Name
if oldUg, err := gr.ugClient.Read(ug.Metadata.Namespace, ug.Metadata.Name, solokitclients.ReadOpts{}); err != nil {
if solokiterror.IsNotExist(err) {
gr.logger.With("canary", fmt.Sprintf("%s.%s", canary.Name, canary.Namespace)).
Infof("UpstreamGroup %s created", ug.Metadata.Name)
} else {
return fmt.Errorf("RoutingRule %s.%s read failed: %v", targetName, canary.Namespace, err)
}
} else {
ug.Metadata.ResourceVersion = oldUg.Metadata.ResourceVersion
// if the old and the new one are equal, no need to do anything.
oldUg.Status = solokitcore.Status{}
if oldUg.Equal(ug) {
return nil
}
}
kubeWriteOpts := &kube.KubeWriteOpts{
PreWriteCallback: func(r *crdv1.Resource) {
r.ObjectMeta.OwnerReferences = []metav1.OwnerReference{
*metav1.NewControllerRef(canary, schema.GroupVersionKind{
Group: flaggerv1.SchemeGroupVersion.Group,
Version: flaggerv1.SchemeGroupVersion.Version,
Kind: flaggerv1.CanaryKind,
}),
}
},
}
writeOpts := solokitclients.WriteOpts{OverwriteExisting: true, StorageWriteOpts: kubeWriteOpts}
_, err := gr.ugClient.Write(ug, writeOpts)
if err != nil {
return fmt.Errorf("UpstreamGroup %s.%s update failed: %v", targetName, canary.Namespace, err)
}
return nil
}

142
pkg/router/gloo_test.go Normal file
View File

@@ -0,0 +1,142 @@
package router
import (
"context"
"fmt"
"testing"
gloov1 "github.com/solo-io/gloo/projects/gloo/pkg/api/v1"
solokitclients "github.com/solo-io/solo-kit/pkg/api/v1/clients"
"github.com/solo-io/solo-kit/pkg/api/v1/clients/factory"
solokitmemory "github.com/solo-io/solo-kit/pkg/api/v1/clients/memory"
)
func TestGlooRouter_Sync(t *testing.T) {
mocks := setupfakeClients()
upstreamGroupClient, err := gloov1.NewUpstreamGroupClient(&factory.MemoryResourceClientFactory{
Cache: solokitmemory.NewInMemoryResourceCache(),
})
if err != nil {
t.Fatal(err.Error())
}
if err := upstreamGroupClient.Register(); err != nil {
t.Fatal(err.Error())
}
router := NewGlooRouterWithClient(context.TODO(), upstreamGroupClient, "gloo-system", mocks.logger)
err = router.Reconcile(mocks.canary)
if err != nil {
t.Fatal(err.Error())
}
// test insert
ug, err := upstreamGroupClient.Read("default", "podinfo", solokitclients.ReadOpts{})
if err != nil {
t.Fatal(err.Error())
}
dests := ug.GetDestinations()
if len(dests) != 2 {
t.Errorf("Got Destinations %v wanted %v", len(dests), 2)
}
if dests[0].Weight != 100 {
t.Errorf("Primary weight should is %v wanted 100", dests[0].Weight)
}
if dests[1].Weight != 0 {
t.Errorf("Canary weight should is %v wanted 0", dests[0].Weight)
}
}
func TestGlooRouter_SetRoutes(t *testing.T) {
mocks := setupfakeClients()
upstreamGroupClient, err := gloov1.NewUpstreamGroupClient(&factory.MemoryResourceClientFactory{
Cache: solokitmemory.NewInMemoryResourceCache(),
})
if err != nil {
t.Fatal(err.Error())
}
if err := upstreamGroupClient.Register(); err != nil {
t.Fatal(err.Error())
}
router := NewGlooRouterWithClient(context.TODO(), upstreamGroupClient, "gloo-system", mocks.logger)
err = router.Reconcile(mocks.canary)
if err != nil {
t.Fatal(err.Error())
}
p, c, err := router.GetRoutes(mocks.canary)
if err != nil {
t.Fatal(err.Error())
}
p = 50
c = 50
err = router.SetRoutes(mocks.canary, p, c)
if err != nil {
t.Fatal(err.Error())
}
ug, err := upstreamGroupClient.Read("default", "podinfo", solokitclients.ReadOpts{})
if err != nil {
t.Fatal(err.Error())
}
var pRoute *gloov1.WeightedDestination
var cRoute *gloov1.WeightedDestination
targetName := mocks.canary.Spec.TargetRef.Name
for _, dest := range ug.GetDestinations() {
if dest.GetDestination().GetUpstream().Name == upstreamName(mocks.canary.Namespace, fmt.Sprintf("%s-primary", targetName), mocks.canary.Spec.Service.Port) {
pRoute = dest
}
if dest.GetDestination().GetUpstream().Name == upstreamName(mocks.canary.Namespace, fmt.Sprintf("%s-canary", targetName), mocks.canary.Spec.Service.Port) {
cRoute = dest
}
}
if pRoute.Weight != uint32(p) {
t.Errorf("Got primary weight %v wanted %v", pRoute.Weight, p)
}
if cRoute.Weight != uint32(c) {
t.Errorf("Got canary weight %v wanted %v", cRoute.Weight, c)
}
}
func TestGlooRouter_GetRoutes(t *testing.T) {
mocks := setupfakeClients()
upstreamGroupClient, err := gloov1.NewUpstreamGroupClient(&factory.MemoryResourceClientFactory{
Cache: solokitmemory.NewInMemoryResourceCache(),
})
if err != nil {
t.Fatal(err.Error())
}
if err := upstreamGroupClient.Register(); err != nil {
t.Fatal(err.Error())
}
router := NewGlooRouterWithClient(context.TODO(), upstreamGroupClient, "gloo-system", mocks.logger)
err = router.Reconcile(mocks.canary)
if err != nil {
t.Fatal(err.Error())
}
p, c, err := router.GetRoutes(mocks.canary)
if err != nil {
t.Fatal(err.Error())
}
if p != 100 {
t.Errorf("Got primary weight %v wanted %v", p, 100)
}
if c != 0 {
t.Errorf("Got canary weight %v wanted %v", c, 0)
}
}

190
pkg/router/smi.go Normal file
View File

@@ -0,0 +1,190 @@
package router
import (
"encoding/json"
"fmt"
"github.com/google/go-cmp/cmp"
"github.com/google/go-cmp/cmp/cmpopts"
flaggerv1 "github.com/weaveworks/flagger/pkg/apis/flagger/v1alpha3"
smiv1 "github.com/weaveworks/flagger/pkg/apis/smi/v1alpha1"
clientset "github.com/weaveworks/flagger/pkg/client/clientset/versioned"
"go.uber.org/zap"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/client-go/kubernetes"
)
type SmiRouter struct {
kubeClient kubernetes.Interface
flaggerClient clientset.Interface
smiClient clientset.Interface
logger *zap.SugaredLogger
targetMesh string
}
// Reconcile creates or updates the SMI traffic split
func (sr *SmiRouter) Reconcile(canary *flaggerv1.Canary) error {
targetName := canary.Spec.TargetRef.Name
canaryName := fmt.Sprintf("%s-canary", targetName)
primaryName := fmt.Sprintf("%s-primary", targetName)
var host string
if len(canary.Spec.Service.Hosts) > 0 {
host = canary.Spec.Service.Hosts[0]
} else {
host = targetName
}
tsSpec := smiv1.TrafficSplitSpec{
Service: host,
Backends: []smiv1.TrafficSplitBackend{
{
Service: canaryName,
Weight: resource.NewQuantity(0, resource.DecimalExponent),
},
{
Service: primaryName,
Weight: resource.NewQuantity(100, resource.DecimalExponent),
},
},
}
ts, err := sr.smiClient.SplitV1alpha1().TrafficSplits(canary.Namespace).Get(targetName, metav1.GetOptions{})
// create traffic split
if errors.IsNotFound(err) {
t := &smiv1.TrafficSplit{
ObjectMeta: metav1.ObjectMeta{
Name: targetName,
Namespace: canary.Namespace,
OwnerReferences: []metav1.OwnerReference{
*metav1.NewControllerRef(canary, schema.GroupVersionKind{
Group: flaggerv1.SchemeGroupVersion.Group,
Version: flaggerv1.SchemeGroupVersion.Version,
Kind: flaggerv1.CanaryKind,
}),
},
Annotations: sr.makeAnnotations(canary.Spec.Service.Gateways),
},
Spec: tsSpec,
}
_, err := sr.smiClient.SplitV1alpha1().TrafficSplits(canary.Namespace).Create(t)
if err != nil {
return err
}
sr.logger.With("canary", fmt.Sprintf("%s.%s", canary.Name, canary.Namespace)).
Infof("TrafficSplit %s.%s created", t.GetName(), canary.Namespace)
return nil
}
if err != nil {
return fmt.Errorf("traffic split %s query error %v", targetName, err)
}
// update traffic split
if diff := cmp.Diff(tsSpec, ts.Spec, cmpopts.IgnoreTypes(resource.Quantity{})); diff != "" {
tsClone := ts.DeepCopy()
tsClone.Spec = tsSpec
_, err := sr.smiClient.SplitV1alpha1().TrafficSplits(canary.Namespace).Update(tsClone)
if err != nil {
return fmt.Errorf("TrafficSplit %s update error %v", targetName, err)
}
sr.logger.With("canary", fmt.Sprintf("%s.%s", canary.Name, canary.Namespace)).
Infof("TrafficSplit %s.%s updated", targetName, canary.Namespace)
return nil
}
return nil
}
// GetRoutes returns the destinations weight for primary and canary
func (sr *SmiRouter) GetRoutes(canary *flaggerv1.Canary) (
primaryWeight int,
canaryWeight int,
err error,
) {
targetName := canary.Spec.TargetRef.Name
canaryName := fmt.Sprintf("%s-canary", targetName)
primaryName := fmt.Sprintf("%s-primary", targetName)
ts, err := sr.smiClient.SplitV1alpha1().TrafficSplits(canary.Namespace).Get(targetName, metav1.GetOptions{})
if err != nil {
if errors.IsNotFound(err) {
err = fmt.Errorf("TrafficSplit %s.%s not found", targetName, canary.Namespace)
return
}
err = fmt.Errorf("TrafficSplit %s.%s query error %v", targetName, canary.Namespace, err)
return
}
for _, r := range ts.Spec.Backends {
w, _ := r.Weight.AsInt64()
if r.Service == primaryName {
primaryWeight = int(w)
}
if r.Service == canaryName {
canaryWeight = int(w)
}
}
if primaryWeight == 0 && canaryWeight == 0 {
err = fmt.Errorf("TrafficSplit %s.%s does not contain routes for %s and %s",
targetName, canary.Namespace, primaryName, canaryName)
}
return
}
// SetRoutes updates the destinations weight for primary and canary
func (sr *SmiRouter) SetRoutes(
canary *flaggerv1.Canary,
primaryWeight int,
canaryWeight int,
) error {
targetName := canary.Spec.TargetRef.Name
canaryName := fmt.Sprintf("%s-canary", targetName)
primaryName := fmt.Sprintf("%s-primary", targetName)
ts, err := sr.smiClient.SplitV1alpha1().TrafficSplits(canary.Namespace).Get(targetName, metav1.GetOptions{})
if err != nil {
if errors.IsNotFound(err) {
return fmt.Errorf("TrafficSplit %s.%s not found", targetName, canary.Namespace)
}
return fmt.Errorf("TrafficSplit %s.%s query error %v", targetName, canary.Namespace, err)
}
backends := []smiv1.TrafficSplitBackend{
{
Service: canaryName,
Weight: resource.NewQuantity(int64(canaryWeight), resource.DecimalExponent),
},
{
Service: primaryName,
Weight: resource.NewQuantity(int64(primaryWeight), resource.DecimalExponent),
},
}
tsClone := ts.DeepCopy()
tsClone.Spec.Backends = backends
_, err = sr.smiClient.SplitV1alpha1().TrafficSplits(canary.Namespace).Update(tsClone)
if err != nil {
return fmt.Errorf("TrafficSplit %s update error %v", targetName, err)
}
return nil
}
func (sr *SmiRouter) makeAnnotations(gateways []string) map[string]string {
res := make(map[string]string)
if sr.targetMesh == "istio" && len(gateways) > 0 {
g, _ := json.Marshal(gateways)
res["VirtualService.v1alpha3.networking.istio.io/spec.gateways"] = string(g)
}
return res
}

View File

@@ -1,4 +1,4 @@
package version
var VERSION = "0.13.2"
var VERSION = "0.14.0"
var REVISION = "unknown"

27
test/e2e-gloo-build.sh Executable file
View File

@@ -0,0 +1,27 @@
#!/usr/bin/env bash
set -o errexit
REPO_ROOT=$(git rev-parse --show-toplevel)
export KUBECONFIG="$(kind get kubeconfig-path --name="kind")"
echo '>>> Building Flagger'
cd ${REPO_ROOT} && docker build -t test/flagger:latest . -f Dockerfile
echo '>>> Installing Flagger'
kind load docker-image test/flagger:latest
echo '>>> Installing Flagger'
helm upgrade -i flagger ${REPO_ROOT}/charts/flagger \
--wait \
--namespace gloo-system \
--set prometheus.install=true \
--set meshProvider=gloo
# Give flagger permissions for gloo objects
kubectl create clusterrolebinding flagger-gloo --clusterrole=gloo-role-gateway --serviceaccount=gloo-system:flagger
kubectl -n gloo-system set image deployment/flagger flagger=test/flagger:latest
kubectl -n gloo-system rollout status deployment/flagger
kubectl -n gloo-system rollout status deployment/flagger-prometheus

97
test/e2e-gloo-tests.sh Executable file
View File

@@ -0,0 +1,97 @@
#!/usr/bin/env bash
# This script runs e2e tests for Canary initialization, analysis and promotion
# Prerequisites: Kubernetes Kind, Helm and NGINX ingress controller
set -o errexit
REPO_ROOT=$(git rev-parse --show-toplevel)
export KUBECONFIG="$(kind get kubeconfig-path --name="kind")"
echo '>>> Creating test namespace'
kubectl create namespace test
echo ">>> Downloading Gloo CLI"
curl -SsL https://github.com/solo-io/gloo/releases/download/v0.13.29/glooctl-linux-amd64 > glooctl
chmod +x glooctl
echo '>>> Installing load tester'
kubectl -n test apply -f ${REPO_ROOT}/artifacts/loadtester/
kubectl -n test rollout status deployment/flagger-loadtester
echo '>>> Initialising canary'
kubectl apply -f ${REPO_ROOT}/test/e2e-workload.yaml
./glooctl add route --path-prefix / --upstream-group-name podinfo --upstream-group-namespace test
cat <<EOF | kubectl apply -f -
apiVersion: flagger.app/v1alpha3
kind: Canary
metadata:
name: podinfo
namespace: test
spec:
targetRef:
apiVersion: apps/v1
kind: Deployment
name: podinfo
progressDeadlineSeconds: 60
service:
port: 9898
canaryAnalysis:
interval: 15s
threshold: 15
maxWeight: 30
stepWeight: 10
metrics:
- name: request-success-rate
threshold: 99
interval: 1m
webhooks:
- name: load-test
url: http://flagger-loadtester.test/
timeout: 5s
metadata:
type: cmd
cmd: "hey -z 10m -q 10 -c 2 -host app.example.com http://gateway-proxy.gloo-system"
logCmdOutput: "true"
EOF
echo '>>> Waiting for primary to be ready'
retries=50
count=0
ok=false
until ${ok}; do
kubectl -n test get canary/podinfo | grep 'Initialized' && ok=true || ok=false
sleep 5
count=$(($count + 1))
if [[ ${count} -eq ${retries} ]]; then
kubectl -n gloo-system logs deployment/flagger
echo "No more retries left"
exit 1
fi
done
echo '✔ Canary initialization test passed'
echo '>>> Triggering canary deployment'
kubectl -n test set image deployment/podinfo podinfod=quay.io/stefanprodan/podinfo:1.4.1
echo '>>> Waiting for canary promotion'
retries=50
count=0
ok=false
until ${ok}; do
kubectl -n test describe deployment/podinfo-primary | grep '1.4.1' && ok=true || ok=false
sleep 10
kubectl -n gloo-system logs deployment/flagger --tail 1
count=$(($count + 1))
if [[ ${count} -eq ${retries} ]]; then
kubectl -n test describe deployment/podinfo
kubectl -n test describe deployment/podinfo-primary
kubectl -n gloo-system logs deployment/flagger
echo "No more retries left"
exit 1
fi
done
echo '✔ Canary promotion test passed'

25
test/e2e-gloo.sh Executable file
View File

@@ -0,0 +1,25 @@
#!/usr/bin/env bash
set -o errexit
REPO_ROOT=$(git rev-parse --show-toplevel)
export KUBECONFIG="$(kind get kubeconfig-path --name="kind")"
echo ">>> Installing Helm"
curl https://raw.githubusercontent.com/kubernetes/helm/master/scripts/get | bash
echo '>>> Installing Tiller'
kubectl --namespace kube-system create sa tiller
kubectl create clusterrolebinding tiller-cluster-rule --clusterrole=cluster-admin --serviceaccount=kube-system:tiller
helm init --service-account tiller --upgrade --wait
helm repo add gloo https://storage.googleapis.com/solo-public-helm
echo '>>> Installing Gloo'
helm upgrade -i gloo gloo/gloo --version 0.13.29 \
--wait \
--namespace gloo-system \
--set gatewayProxies.gateway-proxy.service.type=NodePort
kubectl -n gloo-system rollout status deployment/gloo
kubectl -n gloo-system rollout status deployment/gateway-proxy
kubectl -n gloo-system get all

26
test/e2e-smi-istio-build.sh Executable file
View File

@@ -0,0 +1,26 @@
#!/usr/bin/env bash
set -o errexit
REPO_ROOT=$(git rev-parse --show-toplevel)
export KUBECONFIG="$(kind get kubeconfig-path --name="kind")"
echo '>>> Building Flagger'
cd ${REPO_ROOT} && docker build -t test/flagger:latest . -f Dockerfile
kind load docker-image test/flagger:latest
echo '>>> Installing Flagger'
helm upgrade -i flagger ${REPO_ROOT}/charts/flagger \
--wait \
--namespace istio-system \
--set meshProvider=smi:istio
kubectl -n istio-system set image deployment/flagger flagger=test/flagger:latest
kubectl -n istio-system rollout status deployment/flagger
echo '>>> Installing the SMI Istio adapter'
kubectl apply -f ${REPO_ROOT}/artifacts/smi/istio-adapter.yaml
kubectl -n istio-system rollout status deployment/smi-adapter-istio