Compare commits
88 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
1140af8dc7 | ||
|
|
a2688c3910 | ||
|
|
75b27ab3f3 | ||
|
|
59d3f55fb2 | ||
|
|
f34739f334 | ||
|
|
90c71ec18f | ||
|
|
395234d7c8 | ||
|
|
e322ba0065 | ||
|
|
6db8b96f72 | ||
|
|
44d7e96e96 | ||
|
|
1662479c8d | ||
|
|
2e351fcf0d | ||
|
|
5d81876d07 | ||
|
|
c81e6989ec | ||
|
|
4d61a896c3 | ||
|
|
d148933ab3 | ||
|
|
04a56a3591 | ||
|
|
4a354e74d4 | ||
|
|
1e3e6427d5 | ||
|
|
38826108c8 | ||
|
|
4c4752f907 | ||
|
|
94dcd6c94d | ||
|
|
eabef3db30 | ||
|
|
6750f10ffa | ||
|
|
56cb888cbf | ||
|
|
b3e7fb3417 | ||
|
|
2c6e1baca2 | ||
|
|
c8358929d1 | ||
|
|
1dc7677dfb | ||
|
|
8e699a7543 | ||
|
|
cbbabdfac0 | ||
|
|
9d92de234c | ||
|
|
ba65975fb5 | ||
|
|
ef423b2078 | ||
|
|
f451b4e36c | ||
|
|
0856e13ee6 | ||
|
|
87b9fa8ca7 | ||
|
|
5b43d3d314 | ||
|
|
ac4972dd8d | ||
|
|
8a8f68af5d | ||
|
|
c669dc0c4b | ||
|
|
863a5466cc | ||
|
|
e2347c84e3 | ||
|
|
e0e673f565 | ||
|
|
30cbf2a741 | ||
|
|
f58de3801c | ||
|
|
7c6b88d4c1 | ||
|
|
0c0ebaecd5 | ||
|
|
1925f99118 | ||
|
|
6f2a22a1cc | ||
|
|
ee04082cd7 | ||
|
|
efd901ac3a | ||
|
|
e565789ae8 | ||
|
|
d3953004f6 | ||
|
|
df1d9e3011 | ||
|
|
631c55fa6e | ||
|
|
29cdd43288 | ||
|
|
9b79af9fcd | ||
|
|
2c9c1adb47 | ||
|
|
5dfb5808c4 | ||
|
|
bb0175aebf | ||
|
|
adaf4c99c0 | ||
|
|
bed6ed09d5 | ||
|
|
4ff67a85ce | ||
|
|
702f4fcd14 | ||
|
|
8a03ae153d | ||
|
|
434c6149ab | ||
|
|
97fc4a90ae | ||
|
|
217ef06930 | ||
|
|
71057946e6 | ||
|
|
a74ad52c72 | ||
|
|
12d26874f8 | ||
|
|
27de9ce151 | ||
|
|
9e7cd5a8c5 | ||
|
|
38cb487b64 | ||
|
|
05ca266c5e | ||
|
|
5cc26de645 | ||
|
|
2b9a195fa3 | ||
|
|
4454749eec | ||
|
|
b435a03fab | ||
|
|
7c166e2b40 | ||
|
|
f7a7963dcf | ||
|
|
9c77c0d69c | ||
|
|
e8a9555346 | ||
|
|
59751dd007 | ||
|
|
9c4d4d16b6 | ||
|
|
0e3d1b3e8f | ||
|
|
f119b78940 |
16
.circleci/config.yml
Normal file
@@ -0,0 +1,16 @@
|
||||
version: 2.1
|
||||
jobs:
|
||||
e2e-testing:
|
||||
machine: true
|
||||
steps:
|
||||
- checkout
|
||||
- run: test/e2e-kind.sh
|
||||
- run: test/e2e-istio.sh
|
||||
- run: test/e2e-build.sh
|
||||
- run: test/e2e-tests.sh
|
||||
|
||||
workflows:
|
||||
version: 2
|
||||
build-and-test:
|
||||
jobs:
|
||||
- e2e-testing
|
||||
3
.gitignore
vendored
@@ -11,3 +11,6 @@
|
||||
# Output of the go coverage tool, specifically when used with LiteIDE
|
||||
*.out
|
||||
.DS_Store
|
||||
|
||||
bin/
|
||||
artifacts/gcloud/
|
||||
15
.travis.yml
@@ -12,12 +12,17 @@ addons:
|
||||
packages:
|
||||
- docker-ce
|
||||
|
||||
#before_script:
|
||||
# - go get -u sigs.k8s.io/kind
|
||||
# - curl https://raw.githubusercontent.com/kubernetes/helm/master/scripts/get | bash
|
||||
# - curl -LO https://storage.googleapis.com/kubernetes-release/release/$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)/bin/linux/amd64/kubectl && chmod +x kubectl && sudo mv kubectl /usr/local/bin/
|
||||
|
||||
script:
|
||||
- set -e
|
||||
- make test-fmt
|
||||
- make test-codegen
|
||||
- go test -race -coverprofile=coverage.txt -covermode=atomic ./pkg/controller/
|
||||
- make build
|
||||
- set -e
|
||||
- make test-fmt
|
||||
- make test-codegen
|
||||
- go test -race -coverprofile=coverage.txt -covermode=atomic ./pkg/controller/
|
||||
- make build
|
||||
|
||||
after_success:
|
||||
- if [ -z "$DOCKER_USER" ]; then
|
||||
|
||||
163
CHANGELOG.md
Normal file
@@ -0,0 +1,163 @@
|
||||
# Changelog
|
||||
|
||||
All notable changes to this project are documented in this file.
|
||||
|
||||
## 0.7.0 (2019-02-28)
|
||||
|
||||
Adds support for custom metric checks, HTTP timeouts and HTTP retries
|
||||
|
||||
#### Features
|
||||
|
||||
- Allow custom promql queries in the canary analysis spec [##60](https://github.com/stefanprodan/flagger/pull/#60)
|
||||
- Add HTTP timeout and retries to canary service spec [##62](https://github.com/stefanprodan/flagger/pull/#62)
|
||||
|
||||
## 0.6.0 (2019-02-25)
|
||||
|
||||
Allows for [HTTPMatchRequests](https://istio.io/docs/reference/config/istio.networking.v1alpha3/#HTTPMatchRequest)
|
||||
and [HTTPRewrite](https://istio.io/docs/reference/config/istio.networking.v1alpha3/#HTTPRewrite)
|
||||
to be customized in the service spec of the canary custom resource.
|
||||
|
||||
#### Features
|
||||
|
||||
- Add HTTP match conditions and URI rewrite to the canary service spec [#55](https://github.com/stefanprodan/flagger/pull/55)
|
||||
- Update virtual service when the canary service spec changes
|
||||
[#54](https://github.com/stefanprodan/flagger/pull/54)
|
||||
[#51](https://github.com/stefanprodan/flagger/pull/51)
|
||||
|
||||
#### Improvements
|
||||
|
||||
- Run e2e testing on [Kubernetes Kind](https://github.com/kubernetes-sigs/kind) for canary promotion
|
||||
[#53](https://github.com/stefanprodan/flagger/pull/53)
|
||||
|
||||
## 0.5.1 (2019-02-14)
|
||||
|
||||
Allows skipping the analysis phase to ship changes directly to production
|
||||
|
||||
#### Features
|
||||
|
||||
- Add option to skip the canary analysis [#46](https://github.com/stefanprodan/flagger/pull/46)
|
||||
|
||||
#### Fixes
|
||||
|
||||
- Reject deployment if the pod label selector doesn't match `app: <DEPLOYMENT_NAME>` [#43](https://github.com/stefanprodan/flagger/pull/43)
|
||||
|
||||
## 0.5.0 (2019-01-30)
|
||||
|
||||
Track changes in ConfigMaps and Secrets [#37](https://github.com/stefanprodan/flagger/pull/37)
|
||||
|
||||
#### Features
|
||||
|
||||
- Promote configmaps and secrets changes from canary to primary
|
||||
- Detect changes in configmaps and/or secrets and (re)start canary analysis
|
||||
- Add configs checksum to Canary CRD status
|
||||
- Create primary configmaps and secrets at bootstrap
|
||||
- Scan canary volumes and containers for configmaps and secrets
|
||||
|
||||
#### Fixes
|
||||
|
||||
- Copy deployment labels from canary to primary at bootstrap and promotion
|
||||
|
||||
## 0.4.1 (2019-01-24)
|
||||
|
||||
Load testing webhook [#35](https://github.com/stefanprodan/flagger/pull/35)
|
||||
|
||||
#### Features
|
||||
|
||||
- Add the load tester chart to Flagger Helm repository
|
||||
- Implement a load test runner based on [rakyll/hey](https://github.com/rakyll/hey)
|
||||
- Log warning when no values are found for Istio metric due to lack of traffic
|
||||
|
||||
#### Fixes
|
||||
|
||||
- Run wekbooks before the metrics checks to avoid failures when using a load tester
|
||||
|
||||
## 0.4.0 (2019-01-18)
|
||||
|
||||
Restart canary analysis if revision changes [#31](https://github.com/stefanprodan/flagger/pull/31)
|
||||
|
||||
#### Breaking changes
|
||||
|
||||
- Drop support for Kubernetes 1.10
|
||||
|
||||
#### Features
|
||||
|
||||
- Detect changes during canary analysis and reset advancement
|
||||
- Add status and additional printer columns to CRD
|
||||
- Add canary name and namespace to controller structured logs
|
||||
|
||||
#### Fixes
|
||||
|
||||
- Allow canary name to be different to the target name
|
||||
- Check if multiple canaries have the same target and log error
|
||||
- Use deep copy when updating Kubernetes objects
|
||||
- Skip readiness checks if canary analysis has finished
|
||||
|
||||
## 0.3.0 (2019-01-11)
|
||||
|
||||
Configurable canary analysis duration [#20](https://github.com/stefanprodan/flagger/pull/20)
|
||||
|
||||
#### Breaking changes
|
||||
|
||||
- Helm chart: flag `controlLoopInterval` has been removed
|
||||
|
||||
#### Features
|
||||
|
||||
- CRD: canaries.flagger.app v1alpha3
|
||||
- Schedule canary analysis independently based on `canaryAnalysis.interval`
|
||||
- Add analysis interval to Canary CRD (defaults to one minute)
|
||||
- Make autoscaler (HPA) reference optional
|
||||
|
||||
## 0.2.0 (2019-01-04)
|
||||
|
||||
Webhooks [#18](https://github.com/stefanprodan/flagger/pull/18)
|
||||
|
||||
#### Features
|
||||
|
||||
- CRD: canaries.flagger.app v1alpha2
|
||||
- Implement canary external checks based on webhooks HTTP POST calls
|
||||
- Add webhooks to Canary CRD
|
||||
- Move docs to gitbook [docs.flagger.app](https://docs.flagger.app)
|
||||
|
||||
## 0.1.2 (2018-12-06)
|
||||
|
||||
Improve Slack notifications [#14](https://github.com/stefanprodan/flagger/pull/14)
|
||||
|
||||
#### Features
|
||||
|
||||
- Add canary analysis metadata to init and start Slack messages
|
||||
- Add rollback reason to failed canary Slack messages
|
||||
|
||||
## 0.1.1 (2018-11-28)
|
||||
|
||||
Canary progress deadline [#10](https://github.com/stefanprodan/flagger/pull/10)
|
||||
|
||||
#### Features
|
||||
|
||||
- Rollback canary based on the deployment progress deadline check
|
||||
- Add progress deadline to Canary CRD (defaults to 10 minutes)
|
||||
|
||||
## 0.1.0 (2018-11-25)
|
||||
|
||||
First stable release
|
||||
|
||||
#### Features
|
||||
|
||||
- CRD: canaries.flagger.app v1alpha1
|
||||
- Notifications: post canary events to Slack
|
||||
- Instrumentation: expose Prometheus metrics for canary status and traffic weight percentage
|
||||
- Autoscaling: add HPA reference to CRD and create primary HPA at bootstrap
|
||||
- Bootstrap: create primary deployment, ClusterIP services and Istio virtual service based on CRD spec
|
||||
|
||||
|
||||
## 0.0.1 (2018-10-07)
|
||||
|
||||
Initial semver release
|
||||
|
||||
#### Features
|
||||
|
||||
- Implement canary rollback based on failed checks threshold
|
||||
- Scale up the deployment when canary revision changes
|
||||
- Add OpenAPI v3 schema validation to Canary CRD
|
||||
- Use CRD status for canary state persistence
|
||||
- Add Helm charts for Flagger and Grafana
|
||||
- Add canary analysis Grafana dashboard
|
||||
13
Gopkg.lock
generated
@@ -163,12 +163,9 @@
|
||||
revision = "f2b4162afba35581b6d4a50d3b8f34e33c144682"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:03a74b0d86021c8269b52b7c908eb9bb3852ff590b363dad0a807cf58cec2f89"
|
||||
digest = "1:05ddd9088c0cfb8eaa3adf3626977caa6d96b3959a3bd8c91fef932fd1696c34"
|
||||
name = "github.com/knative/pkg"
|
||||
packages = [
|
||||
"apis",
|
||||
"apis/duck",
|
||||
"apis/duck/v1alpha1",
|
||||
"apis/istio",
|
||||
"apis/istio/authentication",
|
||||
"apis/istio/authentication/v1alpha1",
|
||||
@@ -179,14 +176,12 @@
|
||||
"client/clientset/versioned/scheme",
|
||||
"client/clientset/versioned/typed/authentication/v1alpha1",
|
||||
"client/clientset/versioned/typed/authentication/v1alpha1/fake",
|
||||
"client/clientset/versioned/typed/duck/v1alpha1",
|
||||
"client/clientset/versioned/typed/duck/v1alpha1/fake",
|
||||
"client/clientset/versioned/typed/istio/v1alpha3",
|
||||
"client/clientset/versioned/typed/istio/v1alpha3/fake",
|
||||
"signals",
|
||||
]
|
||||
pruneopts = "NUT"
|
||||
revision = "c15d7c8f2220a7578b33504df6edefa948c845ae"
|
||||
revision = "f9612ef73847258e381e749c4f45b0f5e03b66e9"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:5985ef4caf91ece5d54817c11ea25f182697534f8ae6521eadcd628c142ac4b6"
|
||||
@@ -476,10 +471,9 @@
|
||||
version = "kubernetes-1.11.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:4b0d523ee389c762d02febbcfa0734c4530ebe87abe925db18f05422adcb33e8"
|
||||
digest = "1:83b01e3d6f85c4e911de84febd69a2d3ece614c5a4a518fbc2b5d59000645980"
|
||||
name = "k8s.io/apimachinery"
|
||||
packages = [
|
||||
"pkg/api/equality",
|
||||
"pkg/api/errors",
|
||||
"pkg/api/meta",
|
||||
"pkg/api/resource",
|
||||
@@ -693,6 +687,7 @@
|
||||
"github.com/knative/pkg/client/clientset/versioned",
|
||||
"github.com/knative/pkg/client/clientset/versioned/fake",
|
||||
"github.com/knative/pkg/signals",
|
||||
"github.com/prometheus/client_golang/prometheus",
|
||||
"github.com/prometheus/client_golang/prometheus/promhttp",
|
||||
"go.uber.org/zap",
|
||||
"go.uber.org/zap/zapcore",
|
||||
|
||||
@@ -47,7 +47,7 @@ required = [
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/knative/pkg"
|
||||
revision = "c15d7c8f2220a7578b33504df6edefa948c845ae"
|
||||
revision = "f9612ef73847258e381e749c4f45b0f5e03b66e9"
|
||||
|
||||
[[override]]
|
||||
name = "github.com/golang/glog"
|
||||
|
||||
4
Makefile
@@ -7,7 +7,7 @@ LT_VERSION?=$(shell grep 'VERSION' cmd/loadtester/main.go | awk '{ print $$4 }'
|
||||
|
||||
run:
|
||||
go run cmd/flagger/* -kubeconfig=$$HOME/.kube/config -log-level=info \
|
||||
-metrics-server=https://prometheus.iowa.weavedx.com \
|
||||
-metrics-server=https://prometheus.istio.weavedx.com \
|
||||
-slack-url=https://hooks.slack.com/services/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 \
|
||||
-slack-channel="devops-alerts"
|
||||
|
||||
@@ -31,7 +31,7 @@ test: test-fmt test-codegen
|
||||
go test ./...
|
||||
|
||||
helm-package:
|
||||
cd charts/ && helm package flagger/ && helm package grafana/ && helm package loadtester/
|
||||
cd charts/ && helm package ./*
|
||||
mv charts/*.tgz docs/
|
||||
helm repo index docs --url https://stefanprodan.github.io/flagger --merge ./docs/index.yaml
|
||||
|
||||
|
||||
407
README.md
@@ -8,9 +8,38 @@
|
||||
|
||||
Flagger is a Kubernetes operator that automates the promotion of canary deployments
|
||||
using Istio routing for traffic shifting and Prometheus metrics for canary analysis.
|
||||
The canary analysis can be extended with webhooks for running integration tests,
|
||||
The canary analysis can be extended with webhooks for running acceptance tests,
|
||||
load tests or any other custom validation.
|
||||
|
||||
Flagger implements a control loop that gradually shifts traffic to the canary while measuring key performance
|
||||
indicators like HTTP requests success rate, requests average duration and pods health.
|
||||
Based on analysis of the KPIs a canary is promoted or aborted, and the analysis result is published to Slack.
|
||||
|
||||

|
||||
|
||||
### Documentation
|
||||
|
||||
Flagger documentation can be found at [docs.flagger.app](https://docs.flagger.app)
|
||||
|
||||
* Install
|
||||
* [Flagger install on Kubernetes](https://docs.flagger.app/install/flagger-install-on-kubernetes)
|
||||
* [Flagger install on GKE](https://docs.flagger.app/install/flagger-install-on-google-cloud)
|
||||
* How it works
|
||||
* [Canary custom resource](https://docs.flagger.app/how-it-works#canary-custom-resource)
|
||||
* [Virtual Service](https://docs.flagger.app/how-it-works#virtual-service)
|
||||
* [Canary deployment stages](https://docs.flagger.app/how-it-works#canary-deployment)
|
||||
* [Canary analysis](https://docs.flagger.app/how-it-works#canary-analysis)
|
||||
* [HTTP metrics](https://docs.flagger.app/how-it-works#http-metrics)
|
||||
* [Custom metrics](https://docs.flagger.app/how-it-works#custom-metrics)
|
||||
* [Webhooks](https://docs.flagger.app/how-it-works#webhooks)
|
||||
* [Load testing](https://docs.flagger.app/how-it-works#load-testing)
|
||||
* Usage
|
||||
* [Canary promotions and rollbacks](https://docs.flagger.app/usage/progressive-delivery)
|
||||
* [Monitoring](https://docs.flagger.app/usage/monitoring)
|
||||
* [Alerting](https://docs.flagger.app/usage/alerting)
|
||||
* Tutorials
|
||||
* [Canary deployments with Helm charts and Weave Flux](https://docs.flagger.app/tutorials/canary-helm-gitops)
|
||||
|
||||
### Install
|
||||
|
||||
Before installing Flagger make sure you have Istio setup up with Prometheus enabled.
|
||||
@@ -30,50 +59,15 @@ helm upgrade -i flagger flagger/flagger \
|
||||
|
||||
Flagger is compatible with Kubernetes >1.11.0 and Istio >1.0.0.
|
||||
|
||||
### Usage
|
||||
### Canary CRD
|
||||
|
||||
Flagger takes a Kubernetes deployment and creates a series of objects
|
||||
(Kubernetes [deployments](https://kubernetes.io/docs/concepts/workloads/controllers/deployment/),
|
||||
ClusterIP [services](https://kubernetes.io/docs/concepts/services-networking/service/) and
|
||||
Istio [virtual services](https://istio.io/docs/reference/config/istio.networking.v1alpha3/#VirtualService))
|
||||
to drive the canary analysis and promotion.
|
||||
Flagger takes a Kubernetes deployment and optionally a horizontal pod autoscaler (HPA),
|
||||
then creates a series of objects (Kubernetes deployments, ClusterIP services and Istio virtual services).
|
||||
These objects expose the application on the mesh and drive the canary analysis and promotion.
|
||||
|
||||
Flagger keeps track of ConfigMaps and Secrets referenced by a Kubernetes Deployment and triggers a canary analysis if any of those objects change.
|
||||
When promoting a workload in production, both code (container images) and configuration (config maps and secrets) are being synchronised.
|
||||
|
||||

|
||||
|
||||
Gated canary promotion stages:
|
||||
|
||||
* scan for canary deployments
|
||||
* check Istio virtual service routes are mapped to primary and canary ClusterIP services
|
||||
* check primary and canary deployments status
|
||||
* halt advancement if a rolling update is underway
|
||||
* halt advancement if pods are unhealthy
|
||||
* increase canary traffic weight percentage from 0% to 5% (step weight)
|
||||
* call webhooks and check results
|
||||
* check canary HTTP request success rate and latency
|
||||
* halt advancement if any metric is under the specified threshold
|
||||
* increment the failed checks counter
|
||||
* check if the number of failed checks reached the threshold
|
||||
* route all traffic to primary
|
||||
* scale to zero the canary deployment and mark it as failed
|
||||
* wait for the canary deployment to be updated and start over
|
||||
* increase canary traffic weight by 5% (step weight) till it reaches 50% (max weight)
|
||||
* halt advancement while canary request success rate is under the threshold
|
||||
* halt advancement while canary request duration P99 is over the threshold
|
||||
* halt advancement if the primary or canary deployment becomes unhealthy
|
||||
* halt advancement while canary deployment is being scaled up/down by HPA
|
||||
* promote canary to primary
|
||||
* copy ConfigMaps and Secrets from canary to primary
|
||||
* copy canary deployment spec template over primary
|
||||
* wait for primary rolling update to finish
|
||||
* halt advancement if pods are unhealthy
|
||||
* route all traffic to primary
|
||||
* scale to zero the canary deployment
|
||||
* mark rollout as finished
|
||||
* wait for the canary deployment to be updated and start over
|
||||
|
||||
For a deployment named _podinfo_, a canary promotion can be defined using Flagger's custom resource:
|
||||
|
||||
```yaml
|
||||
@@ -105,6 +99,21 @@ spec:
|
||||
# Istio virtual service host names (optional)
|
||||
hosts:
|
||||
- podinfo.example.com
|
||||
# HTTP match conditions (optional)
|
||||
match:
|
||||
- uri:
|
||||
prefix: /
|
||||
# HTTP rewrite (optional)
|
||||
rewrite:
|
||||
uri: /
|
||||
# timeout for HTTP requests (optional)
|
||||
timeout: 5s
|
||||
# retry policy when a HTTP request fails (optional)
|
||||
retries:
|
||||
attempts: 3
|
||||
# promote the canary without analysing it (default false)
|
||||
skipAnalysis: false
|
||||
# define the canary analysis timing and KPIs
|
||||
canaryAnalysis:
|
||||
# schedule interval (default 60s)
|
||||
interval: 1m
|
||||
@@ -118,6 +127,7 @@ spec:
|
||||
stepWeight: 5
|
||||
# Istio Prometheus checks
|
||||
metrics:
|
||||
# builtin Istio checks
|
||||
- name: istio_requests_total
|
||||
# minimum req success rate (non 5xx responses)
|
||||
# percentage (0-100)
|
||||
@@ -128,6 +138,16 @@ spec:
|
||||
# milliseconds
|
||||
threshold: 500
|
||||
interval: 30s
|
||||
# custom check
|
||||
- name: "kafka lag"
|
||||
threshold: 100
|
||||
query: |
|
||||
avg_over_time(
|
||||
kafka_consumergroup_lag{
|
||||
consumergroup=~"podinfo-consumer-.*",
|
||||
topic="podinfo"
|
||||
}[1m]
|
||||
)
|
||||
# external checks (optional)
|
||||
webhooks:
|
||||
- name: load-test
|
||||
@@ -137,314 +157,13 @@ spec:
|
||||
cmd: "hey -z 1m -q 10 -c 2 http://podinfo.test:9898/"
|
||||
```
|
||||
|
||||
The canary analysis is using the following promql queries:
|
||||
|
||||
_HTTP requests success rate percentage_
|
||||
|
||||
```sql
|
||||
sum(
|
||||
rate(
|
||||
istio_requests_total{
|
||||
reporter="destination",
|
||||
destination_workload_namespace=~"$namespace",
|
||||
destination_workload=~"$workload",
|
||||
response_code!~"5.*"
|
||||
}[$interval]
|
||||
)
|
||||
)
|
||||
/
|
||||
sum(
|
||||
rate(
|
||||
istio_requests_total{
|
||||
reporter="destination",
|
||||
destination_workload_namespace=~"$namespace",
|
||||
destination_workload=~"$workload"
|
||||
}[$interval]
|
||||
)
|
||||
)
|
||||
```
|
||||
|
||||
_HTTP requests milliseconds duration P99_
|
||||
|
||||
```sql
|
||||
histogram_quantile(0.99,
|
||||
sum(
|
||||
irate(
|
||||
istio_request_duration_seconds_bucket{
|
||||
reporter="destination",
|
||||
destination_workload=~"$workload",
|
||||
destination_workload_namespace=~"$namespace"
|
||||
}[$interval]
|
||||
)
|
||||
) by (le)
|
||||
)
|
||||
```
|
||||
|
||||
The canary analysis can be extended with webhooks.
|
||||
Flagger will call the webhooks (HTTP POST) and determine from the response status code (HTTP 2xx) if the canary is failing or not.
|
||||
|
||||
Webhook payload:
|
||||
|
||||
```json
|
||||
{
|
||||
"name": "podinfo",
|
||||
"namespace": "test",
|
||||
"metadata": {
|
||||
"test": "all",
|
||||
"token": "16688eb5e9f289f1991c"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Automated canary analysis, promotions and rollbacks
|
||||
|
||||
Create a test namespace with Istio sidecar injection enabled:
|
||||
|
||||
```bash
|
||||
export REPO=https://raw.githubusercontent.com/stefanprodan/flagger/master
|
||||
|
||||
kubectl apply -f ${REPO}/artifacts/namespaces/test.yaml
|
||||
```
|
||||
|
||||
Create a deployment and a horizontal pod autoscaler:
|
||||
|
||||
```bash
|
||||
kubectl apply -f ${REPO}/artifacts/canaries/deployment.yaml
|
||||
kubectl apply -f ${REPO}/artifacts/canaries/hpa.yaml
|
||||
```
|
||||
|
||||
Deploy the load testing service to generate traffic during the canary analysis:
|
||||
|
||||
```bash
|
||||
kubectl -n test apply -f ${REPO}/artifacts/loadtester/deployment.yaml
|
||||
kubectl -n test apply -f ${REPO}/artifacts/loadtester/service.yaml
|
||||
```
|
||||
|
||||
Create a canary promotion custom resource (replace the Istio gateway and the internet domain with your own):
|
||||
|
||||
```bash
|
||||
kubectl apply -f ${REPO}/artifacts/canaries/canary.yaml
|
||||
```
|
||||
|
||||
After a couple of seconds Flagger will create the canary objects:
|
||||
|
||||
```bash
|
||||
# applied
|
||||
deployment.apps/podinfo
|
||||
horizontalpodautoscaler.autoscaling/podinfo
|
||||
canary.flagger.app/podinfo
|
||||
# generated
|
||||
deployment.apps/podinfo-primary
|
||||
horizontalpodautoscaler.autoscaling/podinfo-primary
|
||||
service/podinfo
|
||||
service/podinfo-canary
|
||||
service/podinfo-primary
|
||||
virtualservice.networking.istio.io/podinfo
|
||||
```
|
||||
|
||||

|
||||
|
||||
Trigger a canary deployment by updating the container image:
|
||||
|
||||
```bash
|
||||
kubectl -n test set image deployment/podinfo \
|
||||
podinfod=quay.io/stefanprodan/podinfo:1.4.0
|
||||
```
|
||||
|
||||
**Note** that Flagger tracks changes in the deployment `PodSpec` but also in `ConfigMaps` and `Secrets`
|
||||
that are referenced in the pod's volumes and containers environment variables.
|
||||
|
||||
Flagger detects that the deployment revision changed and starts a new canary analysis:
|
||||
|
||||
```
|
||||
kubectl -n test describe canary/podinfo
|
||||
|
||||
Status:
|
||||
Canary Weight: 0
|
||||
Failed Checks: 0
|
||||
Last Transition Time: 2019-01-16T13:47:16Z
|
||||
Phase: Succeeded
|
||||
Events:
|
||||
Type Reason Age From Message
|
||||
---- ------ ---- ---- -------
|
||||
Normal Synced 3m flagger New revision detected podinfo.test
|
||||
Normal Synced 3m flagger Scaling up podinfo.test
|
||||
Warning Synced 3m flagger Waiting for podinfo.test rollout to finish: 0 of 1 updated replicas are available
|
||||
Normal Synced 3m flagger Advance podinfo.test canary weight 5
|
||||
Normal Synced 3m flagger Advance podinfo.test canary weight 10
|
||||
Normal Synced 3m flagger Advance podinfo.test canary weight 15
|
||||
Normal Synced 2m flagger Advance podinfo.test canary weight 20
|
||||
Normal Synced 2m flagger Advance podinfo.test canary weight 25
|
||||
Normal Synced 1m flagger Advance podinfo.test canary weight 30
|
||||
Normal Synced 1m flagger Advance podinfo.test canary weight 35
|
||||
Normal Synced 55s flagger Advance podinfo.test canary weight 40
|
||||
Normal Synced 45s flagger Advance podinfo.test canary weight 45
|
||||
Normal Synced 35s flagger Advance podinfo.test canary weight 50
|
||||
Normal Synced 25s flagger Copying podinfo.test template spec to podinfo-primary.test
|
||||
Warning Synced 15s flagger Waiting for podinfo-primary.test rollout to finish: 1 of 2 updated replicas are available
|
||||
Normal Synced 5s flagger Promotion completed! Scaling down podinfo.test
|
||||
```
|
||||
|
||||
You can monitor all canaries with:
|
||||
|
||||
```bash
|
||||
watch kubectl get canaries --all-namespaces
|
||||
|
||||
NAMESPACE NAME STATUS WEIGHT LASTTRANSITIONTIME
|
||||
test podinfo Progressing 5 2019-01-16T14:05:07Z
|
||||
```
|
||||
|
||||
During the canary analysis you can generate HTTP 500 errors and high latency to test if Flagger pauses the rollout.
|
||||
|
||||
Create a tester pod and exec into it:
|
||||
|
||||
```bash
|
||||
kubectl -n test run tester --image=quay.io/stefanprodan/podinfo:1.2.1 -- ./podinfo --port=9898
|
||||
kubectl -n test exec -it tester-xx-xx sh
|
||||
```
|
||||
|
||||
Generate HTTP 500 errors:
|
||||
|
||||
```bash
|
||||
watch curl http://podinfo-canary:9898/status/500
|
||||
```
|
||||
|
||||
Generate latency:
|
||||
|
||||
```bash
|
||||
watch curl http://podinfo-canary:9898/delay/1
|
||||
```
|
||||
|
||||
When the number of failed checks reaches the canary analysis threshold, the traffic is routed back to the primary,
|
||||
the canary is scaled to zero and the rollout is marked as failed.
|
||||
|
||||
```
|
||||
kubectl -n test describe canary/podinfo
|
||||
|
||||
Status:
|
||||
Canary Weight: 0
|
||||
Failed Checks: 10
|
||||
Last Transition Time: 2019-01-16T13:47:16Z
|
||||
Phase: Failed
|
||||
Events:
|
||||
Type Reason Age From Message
|
||||
---- ------ ---- ---- -------
|
||||
Normal Synced 3m flagger Starting canary deployment for podinfo.test
|
||||
Normal Synced 3m flagger Advance podinfo.test canary weight 5
|
||||
Normal Synced 3m flagger Advance podinfo.test canary weight 10
|
||||
Normal Synced 3m flagger Advance podinfo.test canary weight 15
|
||||
Normal Synced 3m flagger Halt podinfo.test advancement success rate 69.17% < 99%
|
||||
Normal Synced 2m flagger Halt podinfo.test advancement success rate 61.39% < 99%
|
||||
Normal Synced 2m flagger Halt podinfo.test advancement success rate 55.06% < 99%
|
||||
Normal Synced 2m flagger Halt podinfo.test advancement success rate 47.00% < 99%
|
||||
Normal Synced 2m flagger (combined from similar events): Halt podinfo.test advancement success rate 38.08% < 99%
|
||||
Warning Synced 1m flagger Rolling back podinfo.test failed checks threshold reached 10
|
||||
Warning Synced 1m flagger Canary failed! Scaling down podinfo.test
|
||||
```
|
||||
|
||||
**Note** that if you apply new changes to the deployment during the canary analysis, Flagger will restart the analysis.
|
||||
|
||||
### Monitoring
|
||||
|
||||
Flagger comes with a Grafana dashboard made for canary analysis.
|
||||
|
||||
Install Grafana with Helm:
|
||||
|
||||
```bash
|
||||
helm upgrade -i flagger-grafana flagger/grafana \
|
||||
--namespace=istio-system \
|
||||
--set url=http://prometheus.istio-system:9090
|
||||
```
|
||||
|
||||
The dashboard shows the RED and USE metrics for the primary and canary workloads:
|
||||
|
||||

|
||||
|
||||
The canary errors and latency spikes have been recorded as Kubernetes events and logged by Flagger in json format:
|
||||
|
||||
```
|
||||
kubectl -n istio-system logs deployment/flagger --tail=100 | jq .msg
|
||||
|
||||
Starting canary deployment for podinfo.test
|
||||
Advance podinfo.test canary weight 5
|
||||
Advance podinfo.test canary weight 10
|
||||
Advance podinfo.test canary weight 15
|
||||
Advance podinfo.test canary weight 20
|
||||
Advance podinfo.test canary weight 25
|
||||
Advance podinfo.test canary weight 30
|
||||
Advance podinfo.test canary weight 35
|
||||
Halt podinfo.test advancement success rate 98.69% < 99%
|
||||
Advance podinfo.test canary weight 40
|
||||
Halt podinfo.test advancement request duration 1.515s > 500ms
|
||||
Advance podinfo.test canary weight 45
|
||||
Advance podinfo.test canary weight 50
|
||||
Copying podinfo.test template spec to podinfo-primary.test
|
||||
Halt podinfo-primary.test advancement waiting for rollout to finish: 1 old replicas are pending termination
|
||||
Scaling down podinfo.test
|
||||
Promotion completed! podinfo.test
|
||||
```
|
||||
|
||||
Flagger exposes Prometheus metrics that can be used to determine the canary analysis status and the destination weight values:
|
||||
|
||||
```bash
|
||||
# Canaries total gauge
|
||||
flagger_canary_total{namespace="test"} 1
|
||||
|
||||
# Canary promotion last known status gauge
|
||||
# 0 - running, 1 - successful, 2 - failed
|
||||
flagger_canary_status{name="podinfo" namespace="test"} 1
|
||||
|
||||
# Canary traffic weight gauge
|
||||
flagger_canary_weight{workload="podinfo-primary" namespace="test"} 95
|
||||
flagger_canary_weight{workload="podinfo" namespace="test"} 5
|
||||
|
||||
# Seconds spent performing canary analysis histogram
|
||||
flagger_canary_duration_seconds_bucket{name="podinfo",namespace="test",le="10"} 6
|
||||
flagger_canary_duration_seconds_bucket{name="podinfo",namespace="test",le="+Inf"} 6
|
||||
flagger_canary_duration_seconds_sum{name="podinfo",namespace="test"} 17.3561329
|
||||
flagger_canary_duration_seconds_count{name="podinfo",namespace="test"} 6
|
||||
```
|
||||
|
||||
### Alerting
|
||||
|
||||
Flagger can be configured to send Slack notifications:
|
||||
|
||||
```bash
|
||||
helm upgrade -i flagger flagger/flagger \
|
||||
--namespace=istio-system \
|
||||
--set slack.url=https://hooks.slack.com/services/YOUR/SLACK/WEBHOOK \
|
||||
--set slack.channel=general \
|
||||
--set slack.user=flagger
|
||||
```
|
||||
|
||||
Once configured with a Slack incoming webhook, Flagger will post messages when a canary deployment has been initialized,
|
||||
when a new revision has been detected and if the canary analysis failed or succeeded.
|
||||
|
||||

|
||||
|
||||
A canary deployment will be rolled back if the progress deadline exceeded or if the analysis
|
||||
reached the maximum number of failed checks:
|
||||
|
||||

|
||||
|
||||
Besides Slack, you can use Alertmanager to trigger alerts when a canary deployment failed:
|
||||
|
||||
```yaml
|
||||
- alert: canary_rollback
|
||||
expr: flagger_canary_status > 1
|
||||
for: 1m
|
||||
labels:
|
||||
severity: warning
|
||||
annotations:
|
||||
summary: "Canary failed"
|
||||
description: "Workload {{ $labels.name }} namespace {{ $labels.namespace }}"
|
||||
```
|
||||
For more details on how the canary analysis and promotion works please [read the docs](https://docs.flagger.app/how-it-works).
|
||||
|
||||
### Roadmap
|
||||
|
||||
* Extend the validation mechanism to support other metrics than HTTP success rate and latency
|
||||
* Add A/B testing capabilities using fixed routing based on HTTP headers and cookies match conditions
|
||||
* Integrate with other service mesh technologies like AWS AppMesh and Linkerd v2
|
||||
* Add support for comparing the canary metrics to the primary ones and do the validation based on the derivation between the two
|
||||
* Extend the canary analysis and promotion to other types than Kubernetes deployments such as Flux Helm releases or OpenFaaS functions
|
||||
|
||||
### Contributing
|
||||
|
||||
|
||||
@@ -25,7 +25,17 @@ spec:
|
||||
- public-gateway.istio-system.svc.cluster.local
|
||||
# Istio virtual service host names (optional)
|
||||
hosts:
|
||||
- app.iowa.weavedx.com
|
||||
- app.istio.weavedx.com
|
||||
# Istio virtual service HTTP match conditions (optional)
|
||||
match:
|
||||
- uri:
|
||||
prefix: /
|
||||
# Istio virtual service HTTP rewrite (optional)
|
||||
rewrite:
|
||||
uri: /
|
||||
# for emergency cases when you want to ship changes
|
||||
# in production without analysing the canary
|
||||
skipAnalysis: false
|
||||
canaryAnalysis:
|
||||
# schedule interval (default 60s)
|
||||
interval: 10s
|
||||
|
||||
@@ -25,7 +25,7 @@ spec:
|
||||
spec:
|
||||
containers:
|
||||
- name: podinfod
|
||||
image: quay.io/stefanprodan/podinfo:1.3.0
|
||||
image: quay.io/stefanprodan/podinfo:1.4.0
|
||||
imagePullPolicy: IfNotPresent
|
||||
ports:
|
||||
- containerPort: 9898
|
||||
|
||||
6
artifacts/cluster/namespaces/test.yaml
Normal file
@@ -0,0 +1,6 @@
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: test
|
||||
labels:
|
||||
istio-injection: enabled
|
||||
26
artifacts/cluster/releases/test/backend.yaml
Normal file
@@ -0,0 +1,26 @@
|
||||
apiVersion: flux.weave.works/v1beta1
|
||||
kind: HelmRelease
|
||||
metadata:
|
||||
name: backend
|
||||
namespace: test
|
||||
annotations:
|
||||
flux.weave.works/automated: "true"
|
||||
flux.weave.works/tag.chart-image: regexp:^1.4.*
|
||||
spec:
|
||||
releaseName: backend
|
||||
chart:
|
||||
repository: https://flagger.app/
|
||||
name: podinfo
|
||||
version: 2.0.0
|
||||
values:
|
||||
image:
|
||||
repository: quay.io/stefanprodan/podinfo
|
||||
tag: 1.4.0
|
||||
httpServer:
|
||||
timeout: 30s
|
||||
canary:
|
||||
enabled: true
|
||||
istioIngress:
|
||||
enabled: false
|
||||
loadtest:
|
||||
enabled: true
|
||||
27
artifacts/cluster/releases/test/frontend.yaml
Normal file
@@ -0,0 +1,27 @@
|
||||
apiVersion: flux.weave.works/v1beta1
|
||||
kind: HelmRelease
|
||||
metadata:
|
||||
name: frontend
|
||||
namespace: test
|
||||
annotations:
|
||||
flux.weave.works/automated: "true"
|
||||
flux.weave.works/tag.chart-image: semver:~1.4
|
||||
spec:
|
||||
releaseName: frontend
|
||||
chart:
|
||||
repository: https://flagger.app/
|
||||
name: podinfo
|
||||
version: 2.0.0
|
||||
values:
|
||||
image:
|
||||
repository: quay.io/stefanprodan/podinfo
|
||||
tag: 1.4.0
|
||||
backend: http://backend-podinfo:9898/echo
|
||||
canary:
|
||||
enabled: true
|
||||
istioIngress:
|
||||
enabled: true
|
||||
gateway: public-gateway.istio-system.svc.cluster.local
|
||||
host: frontend.istio.example.com
|
||||
loadtest:
|
||||
enabled: true
|
||||
18
artifacts/cluster/releases/test/loadtester.yaml
Normal file
@@ -0,0 +1,18 @@
|
||||
apiVersion: flux.weave.works/v1beta1
|
||||
kind: HelmRelease
|
||||
metadata:
|
||||
name: loadtester
|
||||
namespace: test
|
||||
annotations:
|
||||
flux.weave.works/automated: "true"
|
||||
flux.weave.works/tag.chart-image: glob:0.*
|
||||
spec:
|
||||
releaseName: flagger-loadtester
|
||||
chart:
|
||||
repository: https://flagger.app/
|
||||
name: loadtester
|
||||
version: 0.1.0
|
||||
values:
|
||||
image:
|
||||
repository: quay.io/stefanprodan/flagger-loadtester
|
||||
tag: 0.1.0
|
||||
@@ -73,6 +73,10 @@ spec:
|
||||
properties:
|
||||
port:
|
||||
type: number
|
||||
timeout:
|
||||
type: string
|
||||
skipAnalysis:
|
||||
type: boolean
|
||||
canaryAnalysis:
|
||||
properties:
|
||||
interval:
|
||||
@@ -89,7 +93,7 @@ spec:
|
||||
properties:
|
||||
items:
|
||||
type: object
|
||||
required: ['name', 'interval', 'threshold']
|
||||
required: ['name', 'threshold']
|
||||
properties:
|
||||
name:
|
||||
type: string
|
||||
@@ -98,6 +102,8 @@ spec:
|
||||
pattern: "^[0-9]+(m|s)"
|
||||
threshold:
|
||||
type: number
|
||||
query:
|
||||
type: string
|
||||
webhooks:
|
||||
type: array
|
||||
properties:
|
||||
|
||||
@@ -22,8 +22,8 @@ spec:
|
||||
serviceAccountName: flagger
|
||||
containers:
|
||||
- name: flagger
|
||||
image: quay.io/stefanprodan/flagger:0.5.0
|
||||
imagePullPolicy: Always
|
||||
image: quay.io/stefanprodan/flagger:0.7.0
|
||||
imagePullPolicy: IfNotPresent
|
||||
ports:
|
||||
- name: http
|
||||
containerPort: 8080
|
||||
|
||||
27
artifacts/gke/istio-gateway.yaml
Normal file
@@ -0,0 +1,27 @@
|
||||
apiVersion: networking.istio.io/v1alpha3
|
||||
kind: Gateway
|
||||
metadata:
|
||||
name: public-gateway
|
||||
namespace: istio-system
|
||||
spec:
|
||||
selector:
|
||||
istio: ingressgateway
|
||||
servers:
|
||||
- port:
|
||||
number: 80
|
||||
name: http
|
||||
protocol: HTTP
|
||||
hosts:
|
||||
- "*"
|
||||
tls:
|
||||
httpsRedirect: true
|
||||
- port:
|
||||
number: 443
|
||||
name: https
|
||||
protocol: HTTPS
|
||||
hosts:
|
||||
- "*"
|
||||
tls:
|
||||
mode: SIMPLE
|
||||
privateKey: /etc/istio/ingressgateway-certs/tls.key
|
||||
serverCertificate: /etc/istio/ingressgateway-certs/tls.crt
|
||||
443
artifacts/gke/istio-prometheus.yaml
Normal file
@@ -0,0 +1,443 @@
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: prometheus
|
||||
labels:
|
||||
app: prometheus
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources:
|
||||
- nodes
|
||||
- services
|
||||
- endpoints
|
||||
- pods
|
||||
- nodes/proxy
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups: [""]
|
||||
resources:
|
||||
- configmaps
|
||||
verbs: ["get"]
|
||||
- nonResourceURLs: ["/metrics"]
|
||||
verbs: ["get"]
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: prometheus
|
||||
labels:
|
||||
app: prometheus
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: prometheus
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: prometheus
|
||||
namespace: istio-system
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: prometheus
|
||||
namespace: istio-system
|
||||
labels:
|
||||
app: prometheus
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: prometheus
|
||||
namespace: istio-system
|
||||
labels:
|
||||
app: prometheus
|
||||
data:
|
||||
prometheus.yml: |-
|
||||
global:
|
||||
scrape_interval: 15s
|
||||
scrape_configs:
|
||||
|
||||
- job_name: 'istio-mesh'
|
||||
# Override the global default and scrape targets from this job every 5 seconds.
|
||||
scrape_interval: 5s
|
||||
|
||||
kubernetes_sd_configs:
|
||||
- role: endpoints
|
||||
namespaces:
|
||||
names:
|
||||
- istio-system
|
||||
|
||||
relabel_configs:
|
||||
- source_labels: [__meta_kubernetes_service_name, __meta_kubernetes_endpoint_port_name]
|
||||
action: keep
|
||||
regex: istio-telemetry;prometheus
|
||||
|
||||
|
||||
# Scrape config for envoy stats
|
||||
- job_name: 'envoy-stats'
|
||||
metrics_path: /stats/prometheus
|
||||
kubernetes_sd_configs:
|
||||
- role: pod
|
||||
|
||||
relabel_configs:
|
||||
- source_labels: [__meta_kubernetes_pod_container_port_name]
|
||||
action: keep
|
||||
regex: '.*-envoy-prom'
|
||||
- source_labels: [__address__, __meta_kubernetes_pod_annotation_prometheus_io_port]
|
||||
action: replace
|
||||
regex: ([^:]+)(?::\d+)?;(\d+)
|
||||
replacement: $1:15090
|
||||
target_label: __address__
|
||||
- action: labelmap
|
||||
regex: __meta_kubernetes_pod_label_(.+)
|
||||
- source_labels: [__meta_kubernetes_namespace]
|
||||
action: replace
|
||||
target_label: namespace
|
||||
- source_labels: [__meta_kubernetes_pod_name]
|
||||
action: replace
|
||||
target_label: pod_name
|
||||
|
||||
metric_relabel_configs:
|
||||
# Exclude some of the envoy metrics that have massive cardinality
|
||||
# This list may need to be pruned further moving forward, as informed
|
||||
# by performance and scalability testing.
|
||||
- source_labels: [ cluster_name ]
|
||||
regex: '(outbound|inbound|prometheus_stats).*'
|
||||
action: drop
|
||||
- source_labels: [ tcp_prefix ]
|
||||
regex: '(outbound|inbound|prometheus_stats).*'
|
||||
action: drop
|
||||
- source_labels: [ listener_address ]
|
||||
regex: '(.+)'
|
||||
action: drop
|
||||
- source_labels: [ http_conn_manager_listener_prefix ]
|
||||
regex: '(.+)'
|
||||
action: drop
|
||||
- source_labels: [ http_conn_manager_prefix ]
|
||||
regex: '(.+)'
|
||||
action: drop
|
||||
- source_labels: [ __name__ ]
|
||||
regex: 'envoy_tls.*'
|
||||
action: drop
|
||||
- source_labels: [ __name__ ]
|
||||
regex: 'envoy_tcp_downstream.*'
|
||||
action: drop
|
||||
- source_labels: [ __name__ ]
|
||||
regex: 'envoy_http_(stats|admin).*'
|
||||
action: drop
|
||||
- source_labels: [ __name__ ]
|
||||
regex: 'envoy_cluster_(lb|retry|bind|internal|max|original).*'
|
||||
action: drop
|
||||
|
||||
|
||||
- job_name: 'istio-policy'
|
||||
# Override the global default and scrape targets from this job every 5 seconds.
|
||||
scrape_interval: 5s
|
||||
# metrics_path defaults to '/metrics'
|
||||
# scheme defaults to 'http'.
|
||||
|
||||
kubernetes_sd_configs:
|
||||
- role: endpoints
|
||||
namespaces:
|
||||
names:
|
||||
- istio-system
|
||||
|
||||
|
||||
relabel_configs:
|
||||
- source_labels: [__meta_kubernetes_service_name, __meta_kubernetes_endpoint_port_name]
|
||||
action: keep
|
||||
regex: istio-policy;http-monitoring
|
||||
|
||||
- job_name: 'istio-telemetry'
|
||||
# Override the global default and scrape targets from this job every 5 seconds.
|
||||
scrape_interval: 5s
|
||||
# metrics_path defaults to '/metrics'
|
||||
# scheme defaults to 'http'.
|
||||
|
||||
kubernetes_sd_configs:
|
||||
- role: endpoints
|
||||
namespaces:
|
||||
names:
|
||||
- istio-system
|
||||
|
||||
relabel_configs:
|
||||
- source_labels: [__meta_kubernetes_service_name, __meta_kubernetes_endpoint_port_name]
|
||||
action: keep
|
||||
regex: istio-telemetry;http-monitoring
|
||||
|
||||
- job_name: 'pilot'
|
||||
# Override the global default and scrape targets from this job every 5 seconds.
|
||||
scrape_interval: 5s
|
||||
# metrics_path defaults to '/metrics'
|
||||
# scheme defaults to 'http'.
|
||||
|
||||
kubernetes_sd_configs:
|
||||
- role: endpoints
|
||||
namespaces:
|
||||
names:
|
||||
- istio-system
|
||||
|
||||
relabel_configs:
|
||||
- source_labels: [__meta_kubernetes_service_name, __meta_kubernetes_endpoint_port_name]
|
||||
action: keep
|
||||
regex: istio-pilot;http-monitoring
|
||||
|
||||
- job_name: 'galley'
|
||||
# Override the global default and scrape targets from this job every 5 seconds.
|
||||
scrape_interval: 5s
|
||||
# metrics_path defaults to '/metrics'
|
||||
# scheme defaults to 'http'.
|
||||
|
||||
kubernetes_sd_configs:
|
||||
- role: endpoints
|
||||
namespaces:
|
||||
names:
|
||||
- istio-system
|
||||
|
||||
relabel_configs:
|
||||
- source_labels: [__meta_kubernetes_service_name, __meta_kubernetes_endpoint_port_name]
|
||||
action: keep
|
||||
regex: istio-galley;http-monitoring
|
||||
|
||||
# scrape config for API servers
|
||||
- job_name: 'kubernetes-apiservers'
|
||||
kubernetes_sd_configs:
|
||||
- role: endpoints
|
||||
namespaces:
|
||||
names:
|
||||
- default
|
||||
scheme: https
|
||||
tls_config:
|
||||
ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
|
||||
bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
|
||||
relabel_configs:
|
||||
- source_labels: [__meta_kubernetes_service_name, __meta_kubernetes_endpoint_port_name]
|
||||
action: keep
|
||||
regex: kubernetes;https
|
||||
|
||||
# scrape config for nodes (kubelet)
|
||||
- job_name: 'kubernetes-nodes'
|
||||
scheme: https
|
||||
tls_config:
|
||||
ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
|
||||
bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
|
||||
kubernetes_sd_configs:
|
||||
- role: node
|
||||
relabel_configs:
|
||||
- action: labelmap
|
||||
regex: __meta_kubernetes_node_label_(.+)
|
||||
- target_label: __address__
|
||||
replacement: kubernetes.default.svc:443
|
||||
- source_labels: [__meta_kubernetes_node_name]
|
||||
regex: (.+)
|
||||
target_label: __metrics_path__
|
||||
replacement: /api/v1/nodes/${1}/proxy/metrics
|
||||
|
||||
# Scrape config for Kubelet cAdvisor.
|
||||
#
|
||||
# This is required for Kubernetes 1.7.3 and later, where cAdvisor metrics
|
||||
# (those whose names begin with 'container_') have been removed from the
|
||||
# Kubelet metrics endpoint. This job scrapes the cAdvisor endpoint to
|
||||
# retrieve those metrics.
|
||||
#
|
||||
# In Kubernetes 1.7.0-1.7.2, these metrics are only exposed on the cAdvisor
|
||||
# HTTP endpoint; use "replacement: /api/v1/nodes/${1}:4194/proxy/metrics"
|
||||
# in that case (and ensure cAdvisor's HTTP server hasn't been disabled with
|
||||
# the --cadvisor-port=0 Kubelet flag).
|
||||
#
|
||||
# This job is not necessary and should be removed in Kubernetes 1.6 and
|
||||
# earlier versions, or it will cause the metrics to be scraped twice.
|
||||
- job_name: 'kubernetes-cadvisor'
|
||||
scheme: https
|
||||
tls_config:
|
||||
ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
|
||||
bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
|
||||
kubernetes_sd_configs:
|
||||
- role: node
|
||||
relabel_configs:
|
||||
- action: labelmap
|
||||
regex: __meta_kubernetes_node_label_(.+)
|
||||
- target_label: __address__
|
||||
replacement: kubernetes.default.svc:443
|
||||
- source_labels: [__meta_kubernetes_node_name]
|
||||
regex: (.+)
|
||||
target_label: __metrics_path__
|
||||
replacement: /api/v1/nodes/${1}/proxy/metrics/cadvisor
|
||||
|
||||
# scrape config for service endpoints.
|
||||
- job_name: 'kubernetes-service-endpoints'
|
||||
kubernetes_sd_configs:
|
||||
- role: endpoints
|
||||
relabel_configs:
|
||||
- source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scrape]
|
||||
action: keep
|
||||
regex: true
|
||||
- source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scheme]
|
||||
action: replace
|
||||
target_label: __scheme__
|
||||
regex: (https?)
|
||||
- source_labels: [__meta_kubernetes_service_annotation_prometheus_io_path]
|
||||
action: replace
|
||||
target_label: __metrics_path__
|
||||
regex: (.+)
|
||||
- source_labels: [__address__, __meta_kubernetes_service_annotation_prometheus_io_port]
|
||||
action: replace
|
||||
target_label: __address__
|
||||
regex: ([^:]+)(?::\d+)?;(\d+)
|
||||
replacement: $1:$2
|
||||
- action: labelmap
|
||||
regex: __meta_kubernetes_service_label_(.+)
|
||||
- source_labels: [__meta_kubernetes_namespace]
|
||||
action: replace
|
||||
target_label: kubernetes_namespace
|
||||
- source_labels: [__meta_kubernetes_service_name]
|
||||
action: replace
|
||||
target_label: kubernetes_name
|
||||
|
||||
- job_name: 'kubernetes-pods'
|
||||
kubernetes_sd_configs:
|
||||
- role: pod
|
||||
relabel_configs: # If first two labels are present, pod should be scraped by the istio-secure job.
|
||||
- source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_scrape]
|
||||
action: keep
|
||||
regex: true
|
||||
- source_labels: [__meta_kubernetes_pod_annotation_sidecar_istio_io_status]
|
||||
action: drop
|
||||
regex: (.+)
|
||||
- source_labels: [__meta_kubernetes_pod_annotation_istio_mtls]
|
||||
action: drop
|
||||
regex: (true)
|
||||
- source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_path]
|
||||
action: replace
|
||||
target_label: __metrics_path__
|
||||
regex: (.+)
|
||||
- source_labels: [__address__, __meta_kubernetes_pod_annotation_prometheus_io_port]
|
||||
action: replace
|
||||
regex: ([^:]+)(?::\d+)?;(\d+)
|
||||
replacement: $1:$2
|
||||
target_label: __address__
|
||||
- action: labelmap
|
||||
regex: __meta_kubernetes_pod_label_(.+)
|
||||
- source_labels: [__meta_kubernetes_namespace]
|
||||
action: replace
|
||||
target_label: namespace
|
||||
- source_labels: [__meta_kubernetes_pod_name]
|
||||
action: replace
|
||||
target_label: pod_name
|
||||
|
||||
- job_name: 'kubernetes-pods-istio-secure'
|
||||
scheme: https
|
||||
tls_config:
|
||||
ca_file: /etc/istio-certs/root-cert.pem
|
||||
cert_file: /etc/istio-certs/cert-chain.pem
|
||||
key_file: /etc/istio-certs/key.pem
|
||||
insecure_skip_verify: true # prometheus does not support secure naming.
|
||||
kubernetes_sd_configs:
|
||||
- role: pod
|
||||
relabel_configs:
|
||||
- source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_scrape]
|
||||
action: keep
|
||||
regex: true
|
||||
# sidecar status annotation is added by sidecar injector and
|
||||
# istio_workload_mtls_ability can be specifically placed on a pod to indicate its ability to receive mtls traffic.
|
||||
- source_labels: [__meta_kubernetes_pod_annotation_sidecar_istio_io_status, __meta_kubernetes_pod_annotation_istio_mtls]
|
||||
action: keep
|
||||
regex: (([^;]+);([^;]*))|(([^;]*);(true))
|
||||
- source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_path]
|
||||
action: replace
|
||||
target_label: __metrics_path__
|
||||
regex: (.+)
|
||||
- source_labels: [__address__] # Only keep address that is host:port
|
||||
action: keep # otherwise an extra target with ':443' is added for https scheme
|
||||
regex: ([^:]+):(\d+)
|
||||
- source_labels: [__address__, __meta_kubernetes_pod_annotation_prometheus_io_port]
|
||||
action: replace
|
||||
regex: ([^:]+)(?::\d+)?;(\d+)
|
||||
replacement: $1:$2
|
||||
target_label: __address__
|
||||
- action: labelmap
|
||||
regex: __meta_kubernetes_pod_label_(.+)
|
||||
- source_labels: [__meta_kubernetes_namespace]
|
||||
action: replace
|
||||
target_label: namespace
|
||||
- source_labels: [__meta_kubernetes_pod_name]
|
||||
action: replace
|
||||
target_label: pod_name
|
||||
---
|
||||
|
||||
# Source: istio/charts/prometheus/templates/service.yaml
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: prometheus
|
||||
namespace: istio-system
|
||||
annotations:
|
||||
prometheus.io/scrape: 'true'
|
||||
labels:
|
||||
name: prometheus
|
||||
spec:
|
||||
selector:
|
||||
app: prometheus
|
||||
ports:
|
||||
- name: http-prometheus
|
||||
protocol: TCP
|
||||
port: 9090
|
||||
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: prometheus
|
||||
namespace: istio-system
|
||||
labels:
|
||||
app: prometheus
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: prometheus
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: prometheus
|
||||
annotations:
|
||||
sidecar.istio.io/inject: "false"
|
||||
scheduler.alpha.kubernetes.io/critical-pod: ""
|
||||
spec:
|
||||
serviceAccountName: prometheus
|
||||
containers:
|
||||
- name: prometheus
|
||||
image: "docker.io/prom/prometheus:v2.7.1"
|
||||
imagePullPolicy: IfNotPresent
|
||||
args:
|
||||
- '--storage.tsdb.retention=6h'
|
||||
- '--config.file=/etc/prometheus/prometheus.yml'
|
||||
ports:
|
||||
- containerPort: 9090
|
||||
name: http
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /-/healthy
|
||||
port: 9090
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /-/ready
|
||||
port: 9090
|
||||
resources:
|
||||
requests:
|
||||
cpu: 10m
|
||||
|
||||
volumeMounts:
|
||||
- name: config-volume
|
||||
mountPath: /etc/prometheus
|
||||
- mountPath: /etc/istio-certs
|
||||
name: istio-certs
|
||||
volumes:
|
||||
- name: config-volume
|
||||
configMap:
|
||||
name: prometheus
|
||||
- name: istio-certs
|
||||
secret:
|
||||
defaultMode: 420
|
||||
optional: true
|
||||
secretName: istio.default
|
||||
45
artifacts/routing/destination-rule.yaml
Normal file
@@ -0,0 +1,45 @@
|
||||
apiVersion: networking.istio.io/v1alpha3
|
||||
kind: VirtualService
|
||||
metadata:
|
||||
name: podinfo
|
||||
namespace: test
|
||||
spec:
|
||||
gateways:
|
||||
- public-gateway.istio-system.svc.cluster.local
|
||||
- mesh
|
||||
hosts:
|
||||
- podinfo.istio.weavedx.com
|
||||
- podinfo
|
||||
http:
|
||||
- route:
|
||||
- destination:
|
||||
host: podinfo
|
||||
subset: primary
|
||||
weight: 50
|
||||
- destination:
|
||||
host: podinfo
|
||||
subset: canary
|
||||
weight: 50
|
||||
---
|
||||
apiVersion: networking.istio.io/v1alpha3
|
||||
kind: DestinationRule
|
||||
metadata:
|
||||
name: podinfo-destination
|
||||
namespace: test
|
||||
spec:
|
||||
host: podinfo
|
||||
trafficPolicy:
|
||||
loadBalancer:
|
||||
consistentHash:
|
||||
httpCookie:
|
||||
name: istiouser
|
||||
ttl: 30s
|
||||
subsets:
|
||||
- name: primary
|
||||
labels:
|
||||
app: podinfo
|
||||
role: primary
|
||||
- name: canary
|
||||
labels:
|
||||
app: podinfo
|
||||
role: canary
|
||||
@@ -1,7 +1,7 @@
|
||||
apiVersion: v1
|
||||
name: flagger
|
||||
version: 0.5.0
|
||||
appVersion: 0.5.0
|
||||
version: 0.7.0
|
||||
appVersion: 0.7.0
|
||||
kubeVersion: ">=1.11.0-0"
|
||||
engine: gotpl
|
||||
description: Flagger is a Kubernetes operator that automates the promotion of canary deployments using Istio routing for traffic shifting and Prometheus metrics for canary analysis.
|
||||
|
||||
@@ -74,6 +74,10 @@ spec:
|
||||
properties:
|
||||
port:
|
||||
type: number
|
||||
timeout:
|
||||
type: string
|
||||
skipAnalysis:
|
||||
type: boolean
|
||||
canaryAnalysis:
|
||||
properties:
|
||||
interval:
|
||||
@@ -90,7 +94,7 @@ spec:
|
||||
properties:
|
||||
items:
|
||||
type: object
|
||||
required: ['name', 'interval', 'threshold']
|
||||
required: ['name', 'threshold']
|
||||
properties:
|
||||
name:
|
||||
type: string
|
||||
@@ -99,6 +103,8 @@ spec:
|
||||
pattern: "^[0-9]+(m|s)"
|
||||
threshold:
|
||||
type: number
|
||||
query:
|
||||
type: string
|
||||
webhooks:
|
||||
type: array
|
||||
properties:
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
|
||||
image:
|
||||
repository: quay.io/stefanprodan/flagger
|
||||
tag: 0.5.0
|
||||
tag: 0.7.0
|
||||
pullPolicy: IfNotPresent
|
||||
|
||||
metricsServer: "http://prometheus.istio-system.svc.cluster.local:9090"
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
apiVersion: v1
|
||||
name: grafana
|
||||
version: 0.1.0
|
||||
appVersion: 5.4.2
|
||||
version: 1.0.0
|
||||
appVersion: 5.4.3
|
||||
description: Grafana dashboards for monitoring Flagger canary deployments
|
||||
icon: https://raw.githubusercontent.com/stefanprodan/flagger/master/docs/logo/flagger-icon.png
|
||||
home: https://flagger.app
|
||||
|
||||
@@ -2,7 +2,6 @@
|
||||
"annotations": {
|
||||
"list": [
|
||||
{
|
||||
"$$hashKey": "object:1587",
|
||||
"builtIn": 1,
|
||||
"datasource": "-- Grafana --",
|
||||
"enable": true,
|
||||
@@ -16,8 +15,8 @@
|
||||
"editable": true,
|
||||
"gnetId": null,
|
||||
"graphTooltip": 0,
|
||||
"id": null,
|
||||
"iteration": 1534587617141,
|
||||
"id": 1,
|
||||
"iteration": 1549736611069,
|
||||
"links": [],
|
||||
"panels": [
|
||||
{
|
||||
@@ -179,7 +178,6 @@
|
||||
"tableColumn": "",
|
||||
"targets": [
|
||||
{
|
||||
"$$hashKey": "object:2857",
|
||||
"expr": "sum(irate(istio_requests_total{reporter=\"destination\",destination_workload_namespace=~\"$namespace\",destination_workload=~\"$primary\",response_code!~\"5.*\"}[30s])) / sum(irate(istio_requests_total{reporter=\"destination\",destination_workload_namespace=~\"$namespace\",destination_workload=~\"$primary\"}[30s]))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
@@ -344,7 +342,6 @@
|
||||
"tableColumn": "",
|
||||
"targets": [
|
||||
{
|
||||
"$$hashKey": "object:2810",
|
||||
"expr": "sum(irate(istio_requests_total{reporter=\"destination\",destination_workload_namespace=~\"$namespace\",destination_workload=~\"$canary\",response_code!~\"5.*\"}[30s])) / sum(irate(istio_requests_total{reporter=\"destination\",destination_workload_namespace=~\"$namespace\",destination_workload=~\"$canary\"}[30s]))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
@@ -363,7 +360,7 @@
|
||||
"value": "null"
|
||||
}
|
||||
],
|
||||
"valueName": "avg"
|
||||
"valueName": "current"
|
||||
},
|
||||
{
|
||||
"aliasColors": {},
|
||||
@@ -432,6 +429,7 @@
|
||||
],
|
||||
"thresholds": [],
|
||||
"timeFrom": null,
|
||||
"timeRegions": [],
|
||||
"timeShift": null,
|
||||
"title": "Primary: Request Duration",
|
||||
"tooltip": {
|
||||
@@ -464,7 +462,11 @@
|
||||
"min": null,
|
||||
"show": false
|
||||
}
|
||||
]
|
||||
],
|
||||
"yaxis": {
|
||||
"align": false,
|
||||
"alignLevel": null
|
||||
}
|
||||
},
|
||||
{
|
||||
"aliasColors": {},
|
||||
@@ -533,6 +535,7 @@
|
||||
],
|
||||
"thresholds": [],
|
||||
"timeFrom": null,
|
||||
"timeRegions": [],
|
||||
"timeShift": null,
|
||||
"title": "Canary: Request Duration",
|
||||
"tooltip": {
|
||||
@@ -565,7 +568,11 @@
|
||||
"min": null,
|
||||
"show": false
|
||||
}
|
||||
]
|
||||
],
|
||||
"yaxis": {
|
||||
"align": false,
|
||||
"alignLevel": null
|
||||
}
|
||||
},
|
||||
{
|
||||
"content": "<div class=\"dashboard-header text-center\">\n<span>USE: $canary.$namespace</span>\n</div>",
|
||||
@@ -623,7 +630,6 @@
|
||||
"steppedLine": false,
|
||||
"targets": [
|
||||
{
|
||||
"$$hashKey": "object:1685",
|
||||
"expr": "sum(rate(container_cpu_usage_seconds_total{cpu=\"total\",namespace=\"$namespace\",pod_name=~\"$primary.*\", container_name!~\"POD|istio-proxy\"}[1m])) by (pod_name)",
|
||||
"format": "time_series",
|
||||
"hide": false,
|
||||
@@ -634,6 +640,7 @@
|
||||
],
|
||||
"thresholds": [],
|
||||
"timeFrom": null,
|
||||
"timeRegions": [],
|
||||
"timeShift": null,
|
||||
"title": "Primary: CPU Usage by Pod",
|
||||
"tooltip": {
|
||||
@@ -651,7 +658,6 @@
|
||||
},
|
||||
"yaxes": [
|
||||
{
|
||||
"$$hashKey": "object:1845",
|
||||
"format": "s",
|
||||
"label": "CPU seconds / second",
|
||||
"logBase": 1,
|
||||
@@ -660,7 +666,6 @@
|
||||
"show": true
|
||||
},
|
||||
{
|
||||
"$$hashKey": "object:1846",
|
||||
"format": "short",
|
||||
"label": null,
|
||||
"logBase": 1,
|
||||
@@ -668,7 +673,11 @@
|
||||
"min": null,
|
||||
"show": false
|
||||
}
|
||||
]
|
||||
],
|
||||
"yaxis": {
|
||||
"align": false,
|
||||
"alignLevel": null
|
||||
}
|
||||
},
|
||||
{
|
||||
"aliasColors": {},
|
||||
@@ -711,7 +720,6 @@
|
||||
"steppedLine": false,
|
||||
"targets": [
|
||||
{
|
||||
"$$hashKey": "object:1685",
|
||||
"expr": "sum(rate(container_cpu_usage_seconds_total{cpu=\"total\",namespace=\"$namespace\",pod_name=~\"$canary.*\", pod_name!~\"$primary.*\", container_name!~\"POD|istio-proxy\"}[1m])) by (pod_name)",
|
||||
"format": "time_series",
|
||||
"hide": false,
|
||||
@@ -722,6 +730,7 @@
|
||||
],
|
||||
"thresholds": [],
|
||||
"timeFrom": null,
|
||||
"timeRegions": [],
|
||||
"timeShift": null,
|
||||
"title": "Canary: CPU Usage by Pod",
|
||||
"tooltip": {
|
||||
@@ -739,7 +748,6 @@
|
||||
},
|
||||
"yaxes": [
|
||||
{
|
||||
"$$hashKey": "object:1845",
|
||||
"format": "s",
|
||||
"label": "CPU seconds / second",
|
||||
"logBase": 1,
|
||||
@@ -748,7 +756,6 @@
|
||||
"show": true
|
||||
},
|
||||
{
|
||||
"$$hashKey": "object:1846",
|
||||
"format": "short",
|
||||
"label": null,
|
||||
"logBase": 1,
|
||||
@@ -756,7 +763,11 @@
|
||||
"min": null,
|
||||
"show": false
|
||||
}
|
||||
]
|
||||
],
|
||||
"yaxis": {
|
||||
"align": false,
|
||||
"alignLevel": null
|
||||
}
|
||||
},
|
||||
{
|
||||
"aliasColors": {},
|
||||
@@ -799,7 +810,6 @@
|
||||
"steppedLine": false,
|
||||
"targets": [
|
||||
{
|
||||
"$$hashKey": "object:1685",
|
||||
"expr": "sum(container_memory_working_set_bytes{namespace=\"$namespace\",pod_name=~\"$primary.*\", container_name!~\"POD|istio-proxy\"}) by (pod_name)",
|
||||
"format": "time_series",
|
||||
"hide": false,
|
||||
@@ -811,6 +821,7 @@
|
||||
],
|
||||
"thresholds": [],
|
||||
"timeFrom": null,
|
||||
"timeRegions": [],
|
||||
"timeShift": null,
|
||||
"title": "Primary: Memory Usage by Pod",
|
||||
"tooltip": {
|
||||
@@ -828,7 +839,6 @@
|
||||
},
|
||||
"yaxes": [
|
||||
{
|
||||
"$$hashKey": "object:1845",
|
||||
"decimals": null,
|
||||
"format": "bytes",
|
||||
"label": "",
|
||||
@@ -838,7 +848,6 @@
|
||||
"show": true
|
||||
},
|
||||
{
|
||||
"$$hashKey": "object:1846",
|
||||
"format": "short",
|
||||
"label": null,
|
||||
"logBase": 1,
|
||||
@@ -846,7 +855,11 @@
|
||||
"min": null,
|
||||
"show": false
|
||||
}
|
||||
]
|
||||
],
|
||||
"yaxis": {
|
||||
"align": false,
|
||||
"alignLevel": null
|
||||
}
|
||||
},
|
||||
{
|
||||
"aliasColors": {},
|
||||
@@ -889,7 +902,6 @@
|
||||
"steppedLine": false,
|
||||
"targets": [
|
||||
{
|
||||
"$$hashKey": "object:1685",
|
||||
"expr": "sum(container_memory_working_set_bytes{namespace=\"$namespace\",pod_name=~\"$canary.*\", pod_name!~\"$primary.*\", container_name!~\"POD|istio-proxy\"}) by (pod_name)",
|
||||
"format": "time_series",
|
||||
"hide": false,
|
||||
@@ -901,6 +913,7 @@
|
||||
],
|
||||
"thresholds": [],
|
||||
"timeFrom": null,
|
||||
"timeRegions": [],
|
||||
"timeShift": null,
|
||||
"title": "Canary: Memory Usage by Pod",
|
||||
"tooltip": {
|
||||
@@ -918,7 +931,6 @@
|
||||
},
|
||||
"yaxes": [
|
||||
{
|
||||
"$$hashKey": "object:1845",
|
||||
"decimals": null,
|
||||
"format": "bytes",
|
||||
"label": "",
|
||||
@@ -928,7 +940,6 @@
|
||||
"show": true
|
||||
},
|
||||
{
|
||||
"$$hashKey": "object:1846",
|
||||
"format": "short",
|
||||
"label": null,
|
||||
"logBase": 1,
|
||||
@@ -936,7 +947,11 @@
|
||||
"min": null,
|
||||
"show": false
|
||||
}
|
||||
]
|
||||
],
|
||||
"yaxis": {
|
||||
"align": false,
|
||||
"alignLevel": null
|
||||
}
|
||||
},
|
||||
{
|
||||
"aliasColors": {},
|
||||
@@ -975,12 +990,10 @@
|
||||
"renderer": "flot",
|
||||
"seriesOverrides": [
|
||||
{
|
||||
"$$hashKey": "object:3641",
|
||||
"alias": "received",
|
||||
"color": "#f9d9f9"
|
||||
},
|
||||
{
|
||||
"$$hashKey": "object:3649",
|
||||
"alias": "transmited",
|
||||
"color": "#f29191"
|
||||
}
|
||||
@@ -990,7 +1003,6 @@
|
||||
"steppedLine": false,
|
||||
"targets": [
|
||||
{
|
||||
"$$hashKey": "object:2598",
|
||||
"expr": "sum(rate (container_network_receive_bytes_total{namespace=\"$namespace\",pod_name=~\"$primary.*\"}[1m])) ",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
@@ -998,7 +1010,6 @@
|
||||
"refId": "A"
|
||||
},
|
||||
{
|
||||
"$$hashKey": "object:3245",
|
||||
"expr": "-sum (rate (container_network_transmit_bytes_total{namespace=\"$namespace\",pod_name=~\"$primary.*\"}[1m]))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
@@ -1008,6 +1019,7 @@
|
||||
],
|
||||
"thresholds": [],
|
||||
"timeFrom": null,
|
||||
"timeRegions": [],
|
||||
"timeShift": null,
|
||||
"title": "Primary: Network I/O",
|
||||
"tooltip": {
|
||||
@@ -1025,7 +1037,6 @@
|
||||
},
|
||||
"yaxes": [
|
||||
{
|
||||
"$$hashKey": "object:1845",
|
||||
"decimals": null,
|
||||
"format": "Bps",
|
||||
"label": "",
|
||||
@@ -1035,7 +1046,6 @@
|
||||
"show": true
|
||||
},
|
||||
{
|
||||
"$$hashKey": "object:1846",
|
||||
"format": "short",
|
||||
"label": null,
|
||||
"logBase": 1,
|
||||
@@ -1043,7 +1053,11 @@
|
||||
"min": null,
|
||||
"show": false
|
||||
}
|
||||
]
|
||||
],
|
||||
"yaxis": {
|
||||
"align": false,
|
||||
"alignLevel": null
|
||||
}
|
||||
},
|
||||
{
|
||||
"aliasColors": {},
|
||||
@@ -1082,12 +1096,10 @@
|
||||
"renderer": "flot",
|
||||
"seriesOverrides": [
|
||||
{
|
||||
"$$hashKey": "object:3641",
|
||||
"alias": "received",
|
||||
"color": "#f9d9f9"
|
||||
},
|
||||
{
|
||||
"$$hashKey": "object:3649",
|
||||
"alias": "transmited",
|
||||
"color": "#f29191"
|
||||
}
|
||||
@@ -1097,7 +1109,6 @@
|
||||
"steppedLine": false,
|
||||
"targets": [
|
||||
{
|
||||
"$$hashKey": "object:2598",
|
||||
"expr": "sum(rate (container_network_receive_bytes_total{namespace=\"$namespace\",pod_name=~\"$canary.*\",pod_name!~\"$primary.*\"}[1m])) ",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
@@ -1105,7 +1116,6 @@
|
||||
"refId": "A"
|
||||
},
|
||||
{
|
||||
"$$hashKey": "object:3245",
|
||||
"expr": "-sum (rate (container_network_transmit_bytes_total{namespace=\"$namespace\",pod_name=~\"$canary.*\",pod_name!~\"$primary.*\"}[1m]))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
@@ -1115,6 +1125,7 @@
|
||||
],
|
||||
"thresholds": [],
|
||||
"timeFrom": null,
|
||||
"timeRegions": [],
|
||||
"timeShift": null,
|
||||
"title": "Canary: Network I/O",
|
||||
"tooltip": {
|
||||
@@ -1132,7 +1143,6 @@
|
||||
},
|
||||
"yaxes": [
|
||||
{
|
||||
"$$hashKey": "object:1845",
|
||||
"decimals": null,
|
||||
"format": "Bps",
|
||||
"label": "",
|
||||
@@ -1142,7 +1152,6 @@
|
||||
"show": true
|
||||
},
|
||||
{
|
||||
"$$hashKey": "object:1846",
|
||||
"format": "short",
|
||||
"label": null,
|
||||
"logBase": 1,
|
||||
@@ -1150,7 +1159,11 @@
|
||||
"min": null,
|
||||
"show": false
|
||||
}
|
||||
]
|
||||
],
|
||||
"yaxis": {
|
||||
"align": false,
|
||||
"alignLevel": null
|
||||
}
|
||||
},
|
||||
{
|
||||
"content": "<div class=\"dashboard-header text-center\">\n<span>IN/OUTBOUND: $canary.$namespace</span>\n</div>",
|
||||
@@ -1205,7 +1218,6 @@
|
||||
"steppedLine": false,
|
||||
"targets": [
|
||||
{
|
||||
"$$hashKey": "object:1953",
|
||||
"expr": "round(sum(irate(istio_requests_total{connection_security_policy=\"mutual_tls\", destination_workload_namespace=~\"$namespace\", destination_workload=~\"$primary\", reporter=\"destination\"}[30s])) by (source_workload, source_workload_namespace, response_code), 0.001)",
|
||||
"format": "time_series",
|
||||
"hide": false,
|
||||
@@ -1215,7 +1227,6 @@
|
||||
"step": 2
|
||||
},
|
||||
{
|
||||
"$$hashKey": "object:1954",
|
||||
"expr": "round(sum(irate(istio_requests_total{connection_security_policy!=\"mutual_tls\", destination_workload_namespace=~\"$namespace\", destination_workload=~\"$primary\", reporter=\"destination\"}[30s])) by (source_workload, source_workload_namespace, response_code), 0.001)",
|
||||
"format": "time_series",
|
||||
"hide": false,
|
||||
@@ -1227,6 +1238,7 @@
|
||||
],
|
||||
"thresholds": [],
|
||||
"timeFrom": null,
|
||||
"timeRegions": [],
|
||||
"timeShift": null,
|
||||
"title": "Primary: Incoming Requests by Source And Response Code",
|
||||
"tooltip": {
|
||||
@@ -1246,7 +1258,6 @@
|
||||
},
|
||||
"yaxes": [
|
||||
{
|
||||
"$$hashKey": "object:1999",
|
||||
"format": "ops",
|
||||
"label": null,
|
||||
"logBase": 1,
|
||||
@@ -1255,7 +1266,6 @@
|
||||
"show": true
|
||||
},
|
||||
{
|
||||
"$$hashKey": "object:2000",
|
||||
"format": "short",
|
||||
"label": null,
|
||||
"logBase": 1,
|
||||
@@ -1263,7 +1273,11 @@
|
||||
"min": null,
|
||||
"show": false
|
||||
}
|
||||
]
|
||||
],
|
||||
"yaxis": {
|
||||
"align": false,
|
||||
"alignLevel": null
|
||||
}
|
||||
},
|
||||
{
|
||||
"aliasColors": {},
|
||||
@@ -1323,6 +1337,7 @@
|
||||
],
|
||||
"thresholds": [],
|
||||
"timeFrom": null,
|
||||
"timeRegions": [],
|
||||
"timeShift": null,
|
||||
"title": "Canary: Incoming Requests by Source And Response Code",
|
||||
"tooltip": {
|
||||
@@ -1357,7 +1372,11 @@
|
||||
"min": null,
|
||||
"show": false
|
||||
}
|
||||
]
|
||||
],
|
||||
"yaxis": {
|
||||
"align": false,
|
||||
"alignLevel": null
|
||||
}
|
||||
},
|
||||
{
|
||||
"aliasColors": {},
|
||||
@@ -1416,6 +1435,7 @@
|
||||
],
|
||||
"thresholds": [],
|
||||
"timeFrom": null,
|
||||
"timeRegions": [],
|
||||
"timeShift": null,
|
||||
"title": "Primary: Outgoing Requests by Destination And Response Code",
|
||||
"tooltip": {
|
||||
@@ -1450,7 +1470,11 @@
|
||||
"min": null,
|
||||
"show": false
|
||||
}
|
||||
]
|
||||
],
|
||||
"yaxis": {
|
||||
"align": false,
|
||||
"alignLevel": null
|
||||
}
|
||||
},
|
||||
{
|
||||
"aliasColors": {},
|
||||
@@ -1509,6 +1533,7 @@
|
||||
],
|
||||
"thresholds": [],
|
||||
"timeFrom": null,
|
||||
"timeRegions": [],
|
||||
"timeShift": null,
|
||||
"title": "Canary: Outgoing Requests by Destination And Response Code",
|
||||
"tooltip": {
|
||||
@@ -1543,7 +1568,11 @@
|
||||
"min": null,
|
||||
"show": false
|
||||
}
|
||||
]
|
||||
],
|
||||
"yaxis": {
|
||||
"align": false,
|
||||
"alignLevel": null
|
||||
}
|
||||
}
|
||||
],
|
||||
"refresh": "10s",
|
||||
@@ -1555,10 +1584,12 @@
|
||||
{
|
||||
"allValue": null,
|
||||
"current": {
|
||||
"text": "demo",
|
||||
"value": "demo"
|
||||
"selected": true,
|
||||
"text": "test",
|
||||
"value": "test"
|
||||
},
|
||||
"datasource": "prometheus",
|
||||
"definition": "",
|
||||
"hide": 0,
|
||||
"includeAll": false,
|
||||
"label": "Namespace",
|
||||
@@ -1568,6 +1599,7 @@
|
||||
"query": "query_result(sum(istio_requests_total) by (destination_workload_namespace) or sum(istio_tcp_sent_bytes_total) by (destination_workload_namespace))",
|
||||
"refresh": 1,
|
||||
"regex": "/.*_namespace=\"([^\"]*).*/",
|
||||
"skipUrlSync": false,
|
||||
"sort": 0,
|
||||
"tagValuesQuery": "",
|
||||
"tags": [],
|
||||
@@ -1578,10 +1610,12 @@
|
||||
{
|
||||
"allValue": null,
|
||||
"current": {
|
||||
"text": "primary",
|
||||
"value": "primary"
|
||||
"selected": false,
|
||||
"text": "backend-primary",
|
||||
"value": "backend-primary"
|
||||
},
|
||||
"datasource": "prometheus",
|
||||
"definition": "",
|
||||
"hide": 0,
|
||||
"includeAll": false,
|
||||
"label": "Primary",
|
||||
@@ -1591,6 +1625,7 @@
|
||||
"query": "query_result(sum(istio_requests_total{destination_workload_namespace=~\"$namespace\"}) by (destination_service_name))",
|
||||
"refresh": 1,
|
||||
"regex": "/.*destination_service_name=\"([^\"]*).*/",
|
||||
"skipUrlSync": false,
|
||||
"sort": 1,
|
||||
"tagValuesQuery": "",
|
||||
"tags": [],
|
||||
@@ -1601,10 +1636,12 @@
|
||||
{
|
||||
"allValue": null,
|
||||
"current": {
|
||||
"text": "canary",
|
||||
"value": "canary"
|
||||
"selected": true,
|
||||
"text": "backend",
|
||||
"value": "backend"
|
||||
},
|
||||
"datasource": "prometheus",
|
||||
"definition": "",
|
||||
"hide": 0,
|
||||
"includeAll": false,
|
||||
"label": "Canary",
|
||||
@@ -1614,6 +1651,7 @@
|
||||
"query": "query_result(sum(istio_requests_total{destination_workload_namespace=~\"$namespace\"}) by (destination_service_name))",
|
||||
"refresh": 1,
|
||||
"regex": "/.*destination_service_name=\"([^\"]*).*/",
|
||||
"skipUrlSync": false,
|
||||
"sort": 1,
|
||||
"tagValuesQuery": "",
|
||||
"tags": [],
|
||||
@@ -1653,7 +1691,7 @@
|
||||
]
|
||||
},
|
||||
"timezone": "",
|
||||
"title": "Canary analysis",
|
||||
"title": "Flagger canary",
|
||||
"uid": "RdykD7tiz",
|
||||
"version": 2
|
||||
}
|
||||
"version": 3
|
||||
}
|
||||
@@ -38,12 +38,21 @@ spec:
|
||||
# path: /
|
||||
# port: http
|
||||
env:
|
||||
- name: GF_PATHS_PROVISIONING
|
||||
value: /etc/grafana/provisioning/
|
||||
{{- if .Values.password }}
|
||||
- name: GF_SECURITY_ADMIN_USER
|
||||
value: {{ .Values.user }}
|
||||
- name: GF_SECURITY_ADMIN_PASSWORD
|
||||
value: {{ .Values.password }}
|
||||
- name: GF_PATHS_PROVISIONING
|
||||
value: /etc/grafana/provisioning/
|
||||
{{- else }}
|
||||
- name: GF_AUTH_BASIC_ENABLED
|
||||
value: "false"
|
||||
- name: GF_AUTH_ANONYMOUS_ENABLED
|
||||
value: "true"
|
||||
- name: GF_AUTH_ANONYMOUS_ORG_ROLE
|
||||
value: Admin
|
||||
{{- end }}
|
||||
volumeMounts:
|
||||
- name: grafana
|
||||
mountPath: /var/lib/grafana
|
||||
|
||||
@@ -6,7 +6,7 @@ replicaCount: 1
|
||||
|
||||
image:
|
||||
repository: grafana/grafana
|
||||
tag: 5.4.2
|
||||
tag: 5.4.3
|
||||
pullPolicy: IfNotPresent
|
||||
|
||||
service:
|
||||
@@ -28,7 +28,7 @@ tolerations: []
|
||||
affinity: {}
|
||||
|
||||
user: admin
|
||||
password: admin
|
||||
password:
|
||||
|
||||
# Istio Prometheus instance
|
||||
url: http://prometheus:9090
|
||||
|
||||
21
charts/podinfo/.helmignore
Normal file
@@ -0,0 +1,21 @@
|
||||
# Patterns to ignore when building packages.
|
||||
# This supports shell glob matching, relative path matching, and
|
||||
# negation (prefixed with !). Only one pattern per line.
|
||||
.DS_Store
|
||||
# Common VCS dirs
|
||||
.git/
|
||||
.gitignore
|
||||
.bzr/
|
||||
.bzrignore
|
||||
.hg/
|
||||
.hgignore
|
||||
.svn/
|
||||
# Common backup files
|
||||
*.swp
|
||||
*.bak
|
||||
*.tmp
|
||||
*~
|
||||
# Various IDEs
|
||||
.project
|
||||
.idea/
|
||||
*.tmproj
|
||||
12
charts/podinfo/Chart.yaml
Normal file
@@ -0,0 +1,12 @@
|
||||
apiVersion: v1
|
||||
version: 2.0.0
|
||||
appVersion: 1.4.0
|
||||
name: podinfo
|
||||
engine: gotpl
|
||||
description: Flagger canary deployment demo chart
|
||||
home: https://github.com/stefanprodan/flagger
|
||||
maintainers:
|
||||
- email: stefanprodan@users.noreply.github.com
|
||||
name: stefanprodan
|
||||
sources:
|
||||
- https://github.com/stefanprodan/flagger
|
||||
79
charts/podinfo/README.md
Normal file
@@ -0,0 +1,79 @@
|
||||
# Podinfo
|
||||
|
||||
Podinfo is a tiny web application made with Go
|
||||
that showcases best practices of running canary deployments with Flagger and Istio.
|
||||
|
||||
## Installing the Chart
|
||||
|
||||
Add Flagger Helm repository:
|
||||
|
||||
```console
|
||||
helm repo add flagger https://flagger.app
|
||||
```
|
||||
|
||||
To install the chart with the release name `frontend`:
|
||||
|
||||
```console
|
||||
helm upgrade -i frontend flagger/podinfo \
|
||||
--namespace test \
|
||||
--set nameOverride=frontend \
|
||||
--set backend=http://backend.test:9898/echo \
|
||||
--set canary.enabled=true \
|
||||
--set canary.istioIngress.enabled=true \
|
||||
--set canary.istioIngress.gateway=public-gateway.istio-system.svc.cluster.local \
|
||||
--set canary.istioIngress.host=frontend.istio.example.com
|
||||
```
|
||||
|
||||
To install the chart as `backend`:
|
||||
|
||||
```console
|
||||
helm upgrade -i backend flagger/podinfo \
|
||||
--namespace test \
|
||||
--set nameOverride=backend \
|
||||
--set canary.enabled=true
|
||||
```
|
||||
|
||||
## Uninstalling the Chart
|
||||
|
||||
To uninstall/delete the `frontend` deployment:
|
||||
|
||||
```console
|
||||
$ helm delete --purge frontend
|
||||
```
|
||||
|
||||
The command removes all the Kubernetes components associated with the chart and deletes the release.
|
||||
|
||||
## Configuration
|
||||
|
||||
The following tables lists the configurable parameters of the podinfo chart and their default values.
|
||||
|
||||
Parameter | Description | Default
|
||||
--- | --- | ---
|
||||
`image.repository` | image repository | `quay.io/stefanprodan/podinfo`
|
||||
`image.tag` | image tag | `<VERSION>`
|
||||
`image.pullPolicy` | image pull policy | `IfNotPresent`
|
||||
`hpa.enabled` | enables HPA | `true`
|
||||
`hpa.cpu` | target CPU usage per pod | `80`
|
||||
`hpa.memory` | target memory usage per pod | `512Mi`
|
||||
`hpa.minReplicas` | maximum pod replicas | `2`
|
||||
`hpa.maxReplicas` | maximum pod replicas | `4`
|
||||
`resources.requests/cpu` | pod CPU request | `1m`
|
||||
`resources.requests/memory` | pod memory request | `16Mi`
|
||||
`backend` | backend URL | None
|
||||
`faults.delay` | random HTTP response delays between 0 and 5 seconds | `false`
|
||||
`faults.error` | 1/3 chances of a random HTTP response error | `false`
|
||||
|
||||
Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example,
|
||||
|
||||
```console
|
||||
$ helm install flagger/podinfo --name frontend \
|
||||
--set=image.tag=1.4.1,hpa.enabled=false
|
||||
```
|
||||
|
||||
Alternatively, a YAML file that specifies the values for the above parameters can be provided while installing the chart. For example,
|
||||
|
||||
```console
|
||||
$ helm install flagger/podinfo --name frontend -f values.yaml
|
||||
```
|
||||
|
||||
|
||||
1
charts/podinfo/templates/NOTES.txt
Normal file
@@ -0,0 +1 @@
|
||||
podinfo {{ .Release.Name }} deployed!
|
||||
43
charts/podinfo/templates/_helpers.tpl
Normal file
@@ -0,0 +1,43 @@
|
||||
{{/* vim: set filetype=mustache: */}}
|
||||
{{/*
|
||||
Expand the name of the chart.
|
||||
*/}}
|
||||
{{- define "podinfo.name" -}}
|
||||
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Create a default fully qualified app name.
|
||||
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
|
||||
If release name contains chart name it will be used as a full name.
|
||||
*/}}
|
||||
{{- define "podinfo.fullname" -}}
|
||||
{{- if .Values.fullnameOverride -}}
|
||||
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
|
||||
{{- else -}}
|
||||
{{- $name := default .Chart.Name .Values.nameOverride -}}
|
||||
{{- if contains $name .Release.Name -}}
|
||||
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
|
||||
{{- else -}}
|
||||
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Create chart name and version as used by the chart label.
|
||||
*/}}
|
||||
{{- define "podinfo.chart" -}}
|
||||
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Create chart name suffix.
|
||||
*/}}
|
||||
{{- define "podinfo.suffix" -}}
|
||||
{{- if .Values.canary.enabled -}}
|
||||
{{- "-primary" -}}
|
||||
{{- else -}}
|
||||
{{- "" -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
54
charts/podinfo/templates/canary.yaml
Normal file
@@ -0,0 +1,54 @@
|
||||
{{- if .Values.canary.enabled }}
|
||||
apiVersion: flagger.app/v1alpha3
|
||||
kind: Canary
|
||||
metadata:
|
||||
name: {{ template "podinfo.fullname" . }}
|
||||
labels:
|
||||
app: {{ template "podinfo.name" . }}
|
||||
chart: {{ template "podinfo.chart" . }}
|
||||
release: {{ .Release.Name }}
|
||||
heritage: {{ .Release.Service }}
|
||||
spec:
|
||||
targetRef:
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
name: {{ template "podinfo.fullname" . }}
|
||||
progressDeadlineSeconds: 60
|
||||
autoscalerRef:
|
||||
apiVersion: autoscaling/v2beta1
|
||||
kind: HorizontalPodAutoscaler
|
||||
name: {{ template "podinfo.fullname" . }}
|
||||
service:
|
||||
port: {{ .Values.service.port }}
|
||||
{{- if .Values.canary.istioIngress.enabled }}
|
||||
gateways:
|
||||
- {{ .Values.canary.istioIngress.gateway }}
|
||||
hosts:
|
||||
- {{ .Values.canary.istioIngress.host }}
|
||||
{{- end }}
|
||||
canaryAnalysis:
|
||||
interval: {{ .Values.canary.analysis.interval }}
|
||||
threshold: {{ .Values.canary.analysis.threshold }}
|
||||
maxWeight: {{ .Values.canary.analysis.maxWeight }}
|
||||
stepWeight: {{ .Values.canary.analysis.stepWeight }}
|
||||
metrics:
|
||||
- name: istio_requests_total
|
||||
threshold: {{ .Values.canary.thresholds.successRate }}
|
||||
interval: 1m
|
||||
- name: istio_request_duration_seconds_bucket
|
||||
threshold: {{ .Values.canary.thresholds.latency }}
|
||||
interval: 1m
|
||||
{{- if .Values.canary.loadtest.enabled }}
|
||||
webhooks:
|
||||
- name: load-test-get
|
||||
url: {{ .Values.canary.loadtest.url }}
|
||||
timeout: 5s
|
||||
metadata:
|
||||
cmd: "hey -z 1m -q 5 -c 2 http://{{ template "podinfo.fullname" . }}.{{ .Release.Namespace }}:{{ .Values.service.port }}"
|
||||
- name: load-test-post
|
||||
url: {{ .Values.canary.loadtest.url }}
|
||||
timeout: 5s
|
||||
metadata:
|
||||
cmd: "hey -z 1m -q 5 -c 2 -m POST -d '{\"test\": true}' http://{{ template "podinfo.fullname" . }}.{{ .Release.Namespace }}:{{ .Values.service.port }}/echo"
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
15
charts/podinfo/templates/configmap.yaml
Normal file
@@ -0,0 +1,15 @@
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: {{ template "podinfo.fullname" . }}
|
||||
labels:
|
||||
app: {{ template "podinfo.name" . }}
|
||||
chart: {{ template "podinfo.chart" . }}
|
||||
release: {{ .Release.Name }}
|
||||
heritage: {{ .Release.Service }}
|
||||
data:
|
||||
config.yaml: |-
|
||||
# http settings
|
||||
http-client-timeout: 1m
|
||||
http-server-timeout: {{ .Values.httpServer.timeout }}
|
||||
http-server-shutdown-timeout: 5s
|
||||
93
charts/podinfo/templates/deployment.yaml
Normal file
@@ -0,0 +1,93 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: {{ template "podinfo.fullname" . }}
|
||||
labels:
|
||||
app: {{ template "podinfo.name" . }}
|
||||
chart: {{ template "podinfo.chart" . }}
|
||||
release: {{ .Release.Name }}
|
||||
heritage: {{ .Release.Service }}
|
||||
spec:
|
||||
strategy:
|
||||
type: RollingUpdate
|
||||
rollingUpdate:
|
||||
maxUnavailable: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: {{ template "podinfo.fullname" . }}
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: {{ template "podinfo.fullname" . }}
|
||||
annotations:
|
||||
prometheus.io/scrape: 'true'
|
||||
spec:
|
||||
terminationGracePeriodSeconds: 30
|
||||
containers:
|
||||
- name: {{ .Chart.Name }}
|
||||
image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
|
||||
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
||||
command:
|
||||
- ./podinfo
|
||||
- --port={{ .Values.service.port }}
|
||||
- --level={{ .Values.logLevel }}
|
||||
- --random-delay={{ .Values.faults.delay }}
|
||||
- --random-error={{ .Values.faults.error }}
|
||||
- --config-path=/podinfo/config
|
||||
env:
|
||||
{{- if .Values.message }}
|
||||
- name: PODINFO_UI_MESSAGE
|
||||
value: {{ .Values.message }}
|
||||
{{- end }}
|
||||
{{- if .Values.backend }}
|
||||
- name: PODINFO_BACKEND_URL
|
||||
value: {{ .Values.backend }}
|
||||
{{- end }}
|
||||
ports:
|
||||
- name: http
|
||||
containerPort: {{ .Values.service.port }}
|
||||
protocol: TCP
|
||||
livenessProbe:
|
||||
exec:
|
||||
command:
|
||||
- podcli
|
||||
- check
|
||||
- http
|
||||
- localhost:{{ .Values.service.port }}/healthz
|
||||
initialDelaySeconds: 5
|
||||
timeoutSeconds: 5
|
||||
readinessProbe:
|
||||
exec:
|
||||
command:
|
||||
- podcli
|
||||
- check
|
||||
- http
|
||||
- localhost:{{ .Values.service.port }}/readyz
|
||||
initialDelaySeconds: 5
|
||||
timeoutSeconds: 5
|
||||
volumeMounts:
|
||||
- name: data
|
||||
mountPath: /data
|
||||
- name: config
|
||||
mountPath: /podinfo/config
|
||||
readOnly: true
|
||||
resources:
|
||||
{{ toYaml .Values.resources | indent 12 }}
|
||||
{{- with .Values.nodeSelector }}
|
||||
nodeSelector:
|
||||
{{ toYaml . | indent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.affinity }}
|
||||
affinity:
|
||||
{{ toYaml . | indent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.tolerations }}
|
||||
tolerations:
|
||||
{{ toYaml . | indent 8 }}
|
||||
{{- end }}
|
||||
volumes:
|
||||
- name: data
|
||||
emptyDir: {}
|
||||
- name: config
|
||||
configMap:
|
||||
name: {{ template "podinfo.fullname" . }}
|
||||
37
charts/podinfo/templates/hpa.yaml
Normal file
@@ -0,0 +1,37 @@
|
||||
{{- if .Values.hpa.enabled -}}
|
||||
apiVersion: autoscaling/v2beta1
|
||||
kind: HorizontalPodAutoscaler
|
||||
metadata:
|
||||
name: {{ template "podinfo.fullname" . }}
|
||||
labels:
|
||||
app: {{ template "podinfo.name" . }}
|
||||
chart: {{ template "podinfo.chart" . }}
|
||||
release: {{ .Release.Name }}
|
||||
heritage: {{ .Release.Service }}
|
||||
spec:
|
||||
scaleTargetRef:
|
||||
apiVersion: apps/v1beta2
|
||||
kind: Deployment
|
||||
name: {{ template "podinfo.fullname" . }}
|
||||
minReplicas: {{ .Values.hpa.minReplicas }}
|
||||
maxReplicas: {{ .Values.hpa.maxReplicas }}
|
||||
metrics:
|
||||
{{- if .Values.hpa.cpu }}
|
||||
- type: Resource
|
||||
resource:
|
||||
name: cpu
|
||||
targetAverageUtilization: {{ .Values.hpa.cpu }}
|
||||
{{- end }}
|
||||
{{- if .Values.hpa.memory }}
|
||||
- type: Resource
|
||||
resource:
|
||||
name: memory
|
||||
targetAverageValue: {{ .Values.hpa.memory }}
|
||||
{{- end }}
|
||||
{{- if .Values.hpa.requests }}
|
||||
- type: Pod
|
||||
pods:
|
||||
metricName: http_requests
|
||||
targetAverageValue: {{ .Values.hpa.requests }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
20
charts/podinfo/templates/service.yaml
Normal file
@@ -0,0 +1,20 @@
|
||||
{{- if not .Values.canary.enabled }}
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: {{ template "podinfo.fullname" . }}
|
||||
labels:
|
||||
app: {{ template "podinfo.name" . }}
|
||||
chart: {{ template "podinfo.chart" . }}
|
||||
release: {{ .Release.Name }}
|
||||
heritage: {{ .Release.Service }}
|
||||
spec:
|
||||
type: {{ .Values.service.type }}
|
||||
ports:
|
||||
- port: {{ .Values.service.port }}
|
||||
targetPort: http
|
||||
protocol: TCP
|
||||
name: http
|
||||
selector:
|
||||
app: {{ template "podinfo.fullname" . }}
|
||||
{{- end }}
|
||||
22
charts/podinfo/templates/tests/test-config.yaml
Executable file
@@ -0,0 +1,22 @@
|
||||
{{- $url := printf "%s%s.%s:%v" (include "podinfo.fullname" .) (include "podinfo.suffix" .) .Release.Namespace .Values.service.port -}}
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: {{ template "podinfo.fullname" . }}-tests
|
||||
labels:
|
||||
heritage: {{ .Release.Service }}
|
||||
release: {{ .Release.Name }}
|
||||
chart: {{ .Chart.Name }}-{{ .Chart.Version }}
|
||||
app: {{ template "podinfo.name" . }}
|
||||
data:
|
||||
run.sh: |-
|
||||
@test "HTTP POST /echo" {
|
||||
run curl --retry 3 --connect-timeout 2 -sSX POST -d 'test' {{ $url }}/echo
|
||||
[ $output = "test" ]
|
||||
}
|
||||
@test "HTTP POST /store" {
|
||||
curl --retry 3 --connect-timeout 2 -sSX POST -d 'test' {{ $url }}/store
|
||||
}
|
||||
@test "HTTP GET /" {
|
||||
curl --retry 3 --connect-timeout 2 -sS {{ $url }} | grep hostname
|
||||
}
|
||||
43
charts/podinfo/templates/tests/test-pod.yaml
Executable file
@@ -0,0 +1,43 @@
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: {{ template "podinfo.fullname" . }}-tests-{{ randAlphaNum 5 | lower }}
|
||||
annotations:
|
||||
"helm.sh/hook": test-success
|
||||
sidecar.istio.io/inject: "false"
|
||||
labels:
|
||||
heritage: {{ .Release.Service }}
|
||||
release: {{ .Release.Name }}
|
||||
chart: {{ .Chart.Name }}-{{ .Chart.Version }}
|
||||
app: {{ template "podinfo.name" . }}
|
||||
spec:
|
||||
initContainers:
|
||||
- name: "test-framework"
|
||||
image: "dduportal/bats:0.4.0"
|
||||
command:
|
||||
- "bash"
|
||||
- "-c"
|
||||
- |
|
||||
set -ex
|
||||
# copy bats to tools dir
|
||||
cp -R /usr/local/libexec/ /tools/bats/
|
||||
volumeMounts:
|
||||
- mountPath: /tools
|
||||
name: tools
|
||||
containers:
|
||||
- name: {{ .Release.Name }}-ui-test
|
||||
image: dduportal/bats:0.4.0
|
||||
command: ["/tools/bats/bats", "-t", "/tests/run.sh"]
|
||||
volumeMounts:
|
||||
- mountPath: /tests
|
||||
name: tests
|
||||
readOnly: true
|
||||
- mountPath: /tools
|
||||
name: tools
|
||||
volumes:
|
||||
- name: tests
|
||||
configMap:
|
||||
name: {{ template "podinfo.fullname" . }}-tests
|
||||
- name: tools
|
||||
emptyDir: {}
|
||||
restartPolicy: Never
|
||||
73
charts/podinfo/values.yaml
Normal file
@@ -0,0 +1,73 @@
|
||||
# Default values for podinfo.
|
||||
image:
|
||||
repository: quay.io/stefanprodan/podinfo
|
||||
tag: 1.4.0
|
||||
pullPolicy: IfNotPresent
|
||||
|
||||
service:
|
||||
type: ClusterIP
|
||||
port: 9898
|
||||
|
||||
hpa:
|
||||
enabled: true
|
||||
minReplicas: 2
|
||||
maxReplicas: 2
|
||||
cpu: 80
|
||||
memory: 512Mi
|
||||
|
||||
canary:
|
||||
enabled: true
|
||||
istioIngress:
|
||||
enabled: false
|
||||
# Istio ingress gateway name
|
||||
gateway: public-gateway.istio-system.svc.cluster.local
|
||||
# external host name eg. podinfo.example.com
|
||||
host:
|
||||
analysis:
|
||||
# schedule interval (default 60s)
|
||||
interval: 15s
|
||||
# max number of failed metric checks before rollback
|
||||
threshold: 10
|
||||
# max traffic percentage routed to canary
|
||||
# percentage (0-100)
|
||||
maxWeight: 50
|
||||
# canary increment step
|
||||
# percentage (0-100)
|
||||
stepWeight: 5
|
||||
thresholds:
|
||||
# minimum req success rate (non 5xx responses)
|
||||
# percentage (0-100)
|
||||
successRate: 99
|
||||
# maximum req duration P99
|
||||
# milliseconds
|
||||
latency: 500
|
||||
loadtest:
|
||||
enabled: false
|
||||
# load tester address
|
||||
url: http://flagger-loadtester.test/
|
||||
|
||||
resources:
|
||||
limits:
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 32Mi
|
||||
|
||||
nodeSelector: {}
|
||||
|
||||
tolerations: []
|
||||
|
||||
affinity: {}
|
||||
|
||||
nameOverride: ""
|
||||
fullnameOverride: ""
|
||||
|
||||
logLevel: info
|
||||
backend: #http://backend-podinfo:9898/echo
|
||||
message: #UI greetings
|
||||
|
||||
faults:
|
||||
delay: false
|
||||
error: false
|
||||
|
||||
httpServer:
|
||||
timeout: 30s
|
||||
BIN
docs/diagrams/flagger-flux-gitops.png
Normal file
|
After Width: | Height: | Size: 130 KiB |
BIN
docs/diagrams/flagger-gke-istio.png
Normal file
|
After Width: | Height: | Size: 196 KiB |
BIN
docs/diagrams/istio-cert-manager-gke.png
Normal file
|
After Width: | Height: | Size: 205 KiB |
@@ -6,16 +6,16 @@ description: Flagger is an Istio progressive delivery Kubernetes operator
|
||||
|
||||
[Flagger](https://github.com/stefanprodan/flagger) is a **Kubernetes** operator that automates the promotion of canary
|
||||
deployments using **Istio** routing for traffic shifting and **Prometheus** metrics for canary analysis.
|
||||
The canary analysis can be extended with webhooks for running integration tests,
|
||||
load tests or any other custom validation.
|
||||
The canary analysis can be extended with webhooks for running
|
||||
system integration/acceptance tests, load tests, or any other custom validation.
|
||||
|
||||
Flagger implements a control loop that gradually shifts traffic to the canary while measuring key performance
|
||||
indicators like HTTP requests success rate, requests average duration and pods health.
|
||||
Based on the **KPIs** analysis a canary is promoted or aborted and the analysis result is published to **Slack**.
|
||||
Based on analysis of the **KPIs** a canary is promoted or aborted, and the analysis result is published to **Slack**.
|
||||
|
||||

|
||||
|
||||
Flagger can be configured with Kubernetes custom resources \(canaries.flagger.app kind\) and is compatible with
|
||||
Flagger can be configured with Kubernetes custom resources and is compatible with
|
||||
any CI/CD solutions made for Kubernetes. Since Flagger is declarative and reacts to Kubernetes events,
|
||||
it can be used in **GitOps** pipelines together with Weave Flux or JenkinsX.
|
||||
|
||||
|
||||
@@ -5,9 +5,8 @@
|
||||
|
||||
## Install
|
||||
|
||||
* [Install Flagger](install/install-flagger.md)
|
||||
* [Install Grafana](install/install-grafana.md)
|
||||
* [Install Istio](install/install-istio.md)
|
||||
* [Flagger Install on Kubernetes](install/flagger-install-on-kubernetes.md)
|
||||
* [Flagger Install on Google Cloud](install/flagger-install-on-google-cloud.md)
|
||||
|
||||
## Usage
|
||||
|
||||
@@ -15,3 +14,6 @@
|
||||
* [Monitoring](usage/monitoring.md)
|
||||
* [Alerting](usage/alerting.md)
|
||||
|
||||
## Tutorials
|
||||
|
||||
* [Canaries with Helm charts and GitOps](tutorials/canary-helm-gitops.md)
|
||||
|
||||
@@ -39,6 +39,22 @@ spec:
|
||||
# Istio virtual service host names (optional)
|
||||
hosts:
|
||||
- podinfo.example.com
|
||||
# HTTP match conditions (optional)
|
||||
match:
|
||||
- uri:
|
||||
prefix: /
|
||||
# HTTP rewrite (optional)
|
||||
rewrite:
|
||||
uri: /
|
||||
# timeout for HTTP requests (optional)
|
||||
timeout: 5s
|
||||
# retry policy when a HTTP request fails (optional)
|
||||
retries:
|
||||
attempts: 3
|
||||
perTryTimeout: 3s
|
||||
# promote the canary without analysing it (default false)
|
||||
skipAnalysis: false
|
||||
# define the canary analysis timing and KPIs
|
||||
canaryAnalysis:
|
||||
# schedule interval (default 60s)
|
||||
interval: 1m
|
||||
@@ -93,7 +109,124 @@ spec:
|
||||
The target deployment should expose a TCP port that will be used by Flagger to create the ClusterIP Service and
|
||||
the Istio Virtual Service. The container port from the target deployment should match the `service.port` value.
|
||||
|
||||
### Canary Deployment
|
||||
### Virtual Service
|
||||
|
||||
Flagger creates an Istio Virtual Service based on the Canary service spec.
|
||||
|
||||
The following spec exposes the `frontend` workload inside the mesh on `frontend.test.svc.cluster.local:9898`
|
||||
and outside the mesh on `frontend.example.com`. You'll have to specify an Istio ingress gateway for external hosts.
|
||||
|
||||
```yaml
|
||||
apiVersion: flagger.app/v1alpha3
|
||||
kind: Canary
|
||||
metadata:
|
||||
name: frontend
|
||||
namespace: test
|
||||
service:
|
||||
# container port
|
||||
port: 9898
|
||||
# Istio gateways (optional)
|
||||
gateways:
|
||||
- public-gateway.istio-system.svc.cluster.local
|
||||
# Istio virtual service host names (optional)
|
||||
hosts:
|
||||
- frontend.example.com
|
||||
# Istio virtual service HTTP match conditions (optional)
|
||||
match:
|
||||
- uri:
|
||||
prefix: /
|
||||
# Istio virtual service HTTP rewrite (optional)
|
||||
rewrite:
|
||||
uri: /
|
||||
```
|
||||
|
||||
For the above spec Flagger will generate the following virtual service:
|
||||
|
||||
```yaml
|
||||
apiVersion: networking.istio.io/v1alpha3
|
||||
kind: VirtualService
|
||||
metadata:
|
||||
name: frontend
|
||||
namespace: test
|
||||
ownerReferences:
|
||||
- apiVersion: flagger.app/v1alpha3
|
||||
blockOwnerDeletion: true
|
||||
controller: true
|
||||
kind: Canary
|
||||
name: podinfo
|
||||
uid: 3a4a40dd-3875-11e9-8e1d-42010a9c0fd1
|
||||
spec:
|
||||
gateways:
|
||||
- public-gateway.istio-system.svc.cluster.local
|
||||
- mesh
|
||||
hosts:
|
||||
- frontend.example.com
|
||||
- frontend
|
||||
http:
|
||||
- match:
|
||||
- uri:
|
||||
prefix: /
|
||||
rewrite:
|
||||
uri: /
|
||||
route:
|
||||
- destination:
|
||||
host: frontend-primary
|
||||
port:
|
||||
number: 9898
|
||||
weight: 100
|
||||
- destination:
|
||||
host: frontend-canary
|
||||
port:
|
||||
number: 9898
|
||||
weight: 0
|
||||
```
|
||||
|
||||
Flagger keeps in sync the virtual service with the canary service spec. Any direct modification of the virtual
|
||||
service spec will be overwritten.
|
||||
|
||||
To expose a workload inside the mesh on `http://backend.test.svc.cluster.local:9898`,
|
||||
the service spec can contain only the container port:
|
||||
|
||||
```yaml
|
||||
apiVersion: flagger.app/v1alpha3
|
||||
kind: Canary
|
||||
metadata:
|
||||
name: backend
|
||||
namespace: test
|
||||
spec:
|
||||
service:
|
||||
port: 9898
|
||||
```
|
||||
|
||||
Based on the above spec, Flagger will create several ClusterIP services like:
|
||||
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: backend-primary
|
||||
ownerReferences:
|
||||
- apiVersion: flagger.app/v1alpha3
|
||||
blockOwnerDeletion: true
|
||||
controller: true
|
||||
kind: Canary
|
||||
name: backend
|
||||
uid: 2ca1a9c7-2ef6-11e9-bd01-42010a9c0145
|
||||
spec:
|
||||
type: ClusterIP
|
||||
ports:
|
||||
- name: http
|
||||
port: 9898
|
||||
protocol: TCP
|
||||
targetPort: 9898
|
||||
selector:
|
||||
app: backend-primary
|
||||
```
|
||||
|
||||
Flagger works for user facing apps exposed outside the cluster via an ingress gateway
|
||||
and for backend HTTP APIs that are accessible only from inside the mesh.
|
||||
|
||||
### Canary Stages
|
||||
|
||||

|
||||
|
||||
@@ -152,6 +285,9 @@ Spec:
|
||||
# canary increment step
|
||||
# percentage (0-100)
|
||||
stepWeight: 2
|
||||
# deploy straight to production without
|
||||
# the metrics and webhook checks
|
||||
skipAnalysis: false
|
||||
```
|
||||
|
||||
The above analysis, if it succeeds, will run for 25 minutes while validating the HTTP metrics and webhooks every minute.
|
||||
@@ -167,6 +303,11 @@ And the time it takes for a canary to be rollback when the metrics or webhook ch
|
||||
interval * threshold
|
||||
```
|
||||
|
||||
In emergency cases, you may want to skip the analysis phase and ship changes directly to production.
|
||||
At any time you can set the `spec.skipAnalysis: true`.
|
||||
When skip analysis is enabled, Flagger checks if the canary deployment is healthy and
|
||||
promotes it without analysing it. If an analysis is underway, Flagger cancels it and runs the promotion.
|
||||
|
||||
### HTTP Metrics
|
||||
|
||||
The canary analysis is using the following Prometheus queries:
|
||||
@@ -242,6 +383,49 @@ histogram_quantile(0.99,
|
||||
|
||||
> **Note** that the metric interval should be lower or equal to the control loop interval.
|
||||
|
||||
### Custom Metrics
|
||||
|
||||
The canary analysis can be extended with custom Prometheus queries.
|
||||
|
||||
```yaml
|
||||
canaryAnalysis:
|
||||
threshold: 1
|
||||
maxWeight: 50
|
||||
stepWeight: 5
|
||||
metrics:
|
||||
- name: "404s percentage"
|
||||
threshold: 5
|
||||
query: |
|
||||
100 - sum(
|
||||
rate(
|
||||
istio_requests_total{
|
||||
reporter="destination",
|
||||
destination_workload_namespace="test",
|
||||
destination_workload="podinfo",
|
||||
response_code!="404"
|
||||
}[1m]
|
||||
)
|
||||
)
|
||||
/
|
||||
sum(
|
||||
rate(
|
||||
istio_requests_total{
|
||||
reporter="destination",
|
||||
destination_workload_namespace="test",
|
||||
destination_workload="podinfo"
|
||||
}[1m]
|
||||
)
|
||||
) * 100
|
||||
```
|
||||
|
||||
The above configuration validates the canary by checking
|
||||
if the HTTP 404 req/sec percentage is below 5 percent of the total traffic.
|
||||
If the 404s rate reaches the 5% threshold, then the canary fails.
|
||||
|
||||
When specifying a query, Flagger will run the promql query and convert the result to float64.
|
||||
Then it compares the query result value with the metric threshold value.
|
||||
|
||||
|
||||
### Webhooks
|
||||
|
||||
The canary analysis can be extended with webhooks.
|
||||
@@ -252,14 +436,14 @@ Spec:
|
||||
```yaml
|
||||
canaryAnalysis:
|
||||
webhooks:
|
||||
- name: integration-tests
|
||||
url: http://podinfo.test:9898/echo
|
||||
- name: integration-test
|
||||
url: http://int-runner.test:8080/
|
||||
timeout: 30s
|
||||
metadata:
|
||||
test: "all"
|
||||
token: "16688eb5e9f289f1991c"
|
||||
- name: load-tests
|
||||
url: http://podinfo.test:9898/echo
|
||||
- name: db-test
|
||||
url: http://migration-check.db/query
|
||||
timeout: 30s
|
||||
metadata:
|
||||
key1: "val1"
|
||||
|
||||
@@ -1,16 +1,15 @@
|
||||
# Install Istio
|
||||
# Flagger install on Google Cloud
|
||||
|
||||
This guide walks you through setting up Istio with Jaeger, Prometheus, Grafana and
|
||||
Let’s Encrypt TLS for ingress gateway on Google Kubernetes Engine.
|
||||
This guide walks you through setting up Flagger and Istio on Google Kubernetes Engine.
|
||||
|
||||

|
||||

|
||||
|
||||
### Prerequisites
|
||||
|
||||
You will be creating a cluster on Google’s Kubernetes Engine \(GKE\),
|
||||
if you don’t have an account you can sign up [here](https://cloud.google.com/free/) for free credits.
|
||||
|
||||
Login into GCP, create a project and enable billing for it.
|
||||
Login into Google Cloud, create a project and enable billing for it.
|
||||
|
||||
Install the [gcloud](https://cloud.google.com/sdk/) command line utility and configure your project with `gcloud init`.
|
||||
|
||||
@@ -23,8 +22,8 @@ gcloud config set project PROJECT_ID
|
||||
Set the default compute region and zone:
|
||||
|
||||
```text
|
||||
gcloud config set compute/region europe-west3
|
||||
gcloud config set compute/zone europe-west3-a
|
||||
gcloud config set compute/region us-central1
|
||||
gcloud config set compute/zone us-central1-a
|
||||
```
|
||||
|
||||
Enable the Kubernetes and Cloud DNS services for your project:
|
||||
@@ -34,46 +33,42 @@ gcloud services enable container.googleapis.com
|
||||
gcloud services enable dns.googleapis.com
|
||||
```
|
||||
|
||||
Install the `kubectl` command-line tool:
|
||||
Install the kubectl command-line tool:
|
||||
|
||||
```text
|
||||
gcloud components install kubectl
|
||||
```
|
||||
|
||||
Install the `helm` command-line tool:
|
||||
|
||||
```text
|
||||
brew install kubernetes-helm
|
||||
```
|
||||
|
||||
### GKE cluster setup
|
||||
|
||||
Create a cluster with three nodes using the latest Kubernetes version:
|
||||
Create a cluster with the Istio add-on:
|
||||
|
||||
```bash
|
||||
k8s_version=$(gcloud container get-server-config --format=json \
|
||||
| jq -r '.validNodeVersions[0]')
|
||||
K8S_VERSION=$(gcloud container get-server-config --format=json \
|
||||
| jq -r '.validMasterVersions[0]')
|
||||
|
||||
gcloud container clusters create istio \
|
||||
--cluster-version=${k8s_version} \
|
||||
--zone=europe-west3-a \
|
||||
--num-nodes=3 \
|
||||
gcloud beta container clusters create istio \
|
||||
--cluster-version=${K8S_VERSION} \
|
||||
--zone=us-central1-a \
|
||||
--num-nodes=2 \
|
||||
--machine-type=n1-highcpu-4 \
|
||||
--preemptible \
|
||||
--no-enable-cloud-logging \
|
||||
--no-enable-cloud-monitoring \
|
||||
--disk-size=30 \
|
||||
--enable-autorepair \
|
||||
--scopes=gke-default,compute-rw,storage-rw
|
||||
--addons=HorizontalPodAutoscaling,Istio \
|
||||
--istio-config=auth=MTLS_PERMISSIVE
|
||||
```
|
||||
|
||||
The above command will create a default node pool consisting of `n1-highcpu-4` \(vCPU: 4, RAM 3.60GB, DISK: 30GB\)
|
||||
The above command will create a default node pool consisting of two `n1-highcpu-4` \(vCPU: 4, RAM 3.60GB, DISK: 30GB\)
|
||||
preemptible VMs. Preemptible VMs are up to 80% cheaper than regular instances and are terminated and replaced
|
||||
after a maximum of 24 hours.
|
||||
|
||||
Set up credentials for `kubectl`:
|
||||
|
||||
```bash
|
||||
gcloud container clusters get-credentials istio -z=europe-west3-a
|
||||
gcloud container clusters get-credentials istio
|
||||
```
|
||||
|
||||
Create a cluster admin role binding:
|
||||
@@ -87,9 +82,11 @@ kubectl create clusterrolebinding "cluster-admin-$(whoami)" \
|
||||
Validate your setup with:
|
||||
|
||||
```bash
|
||||
kubectl get nodes -o wide
|
||||
kubectl -n istio-system get svc
|
||||
```
|
||||
|
||||
In a couple of seconds GCP should allocate an external IP to the `istio-ingressgateway` service.
|
||||
|
||||
### Cloud DNS setup
|
||||
|
||||
You will need an internet domain and access to the registrar to change the name servers to Google Cloud DNS.
|
||||
@@ -116,34 +113,30 @@ Wait for the name servers to change \(replace `example.com` with your domain\):
|
||||
watch dig +short NS example.com
|
||||
```
|
||||
|
||||
Create a static IP address named `istio-gateway-ip` in the same region as your GKE cluster:
|
||||
Create a static IP address named `istio-gateway` using the Istio ingress IP:
|
||||
|
||||
```bash
|
||||
gcloud compute addresses create istio-gateway-ip --region europe-west3
|
||||
export GATEWAY_IP=$(kubectl -n istio-system get svc/istio-ingressgateway -ojson \
|
||||
| jq -r .status.loadBalancer.ingress[0].ip)
|
||||
|
||||
gcloud compute addresses create istio-gateway --addresses ${GATEWAY_IP} --region us-central1
|
||||
```
|
||||
|
||||
Find the static IP address:
|
||||
|
||||
```bash
|
||||
gcloud compute addresses describe istio-gateway-ip --region europe-west3
|
||||
```
|
||||
|
||||
Create the following DNS records \(replace `example.com` with your domain and set your Istio Gateway IP\):
|
||||
Create the following DNS records \(replace `example.com` with your domain\):
|
||||
|
||||
```bash
|
||||
DOMAIN="example.com"
|
||||
GATEWAYIP="35.198.98.90"
|
||||
|
||||
gcloud dns record-sets transaction start --zone=istio
|
||||
|
||||
gcloud dns record-sets transaction add --zone=istio \
|
||||
--name="${DOMAIN}" --ttl=300 --type=A ${GATEWAYIP}
|
||||
--name="${DOMAIN}" --ttl=300 --type=A ${GATEWAY_IP}
|
||||
|
||||
gcloud dns record-sets transaction add --zone=istio \
|
||||
--name="www.${DOMAIN}" --ttl=300 --type=A ${GATEWAYIP}
|
||||
--name="www.${DOMAIN}" --ttl=300 --type=A ${GATEWAY_IP}
|
||||
|
||||
gcloud dns record-sets transaction add --zone=istio \
|
||||
--name="*.${DOMAIN}" --ttl=300 --type=A ${GATEWAYIP}
|
||||
--name="*.${DOMAIN}" --ttl=300 --type=A ${GATEWAY_IP}
|
||||
|
||||
gcloud dns record-sets transaction execute --zone istio
|
||||
```
|
||||
@@ -154,31 +147,22 @@ Verify that the wildcard DNS is working \(replace `example.com` with your domain
|
||||
watch host test.example.com
|
||||
```
|
||||
|
||||
### Install Istio with Helm
|
||||
### Install Helm
|
||||
|
||||
Download the latest Istio release:
|
||||
Install the [Helm](https://docs.helm.sh/using_helm/#installing-helm) command-line tool:
|
||||
|
||||
```bash
|
||||
curl -L https://git.io/getLatestIstio | sh -
|
||||
```
|
||||
|
||||
Navigate to `istio-x.x.x` dir and copy the Istio CLI in your bin:
|
||||
|
||||
```bash
|
||||
cd istio-x.x.x/
|
||||
sudo cp ./bin/istioctl /usr/local/bin/istioctl
|
||||
```
|
||||
|
||||
Apply the Istio CRDs:
|
||||
|
||||
```bash
|
||||
kubectl apply -f ./install/kubernetes/helm/istio/templates/crds.yaml
|
||||
```text
|
||||
brew install kubernetes-helm
|
||||
```
|
||||
|
||||
Create a service account and a cluster role binding for Tiller:
|
||||
|
||||
```bash
|
||||
kubectl apply -f ./install/kubernetes/helm/helm-service-account.yaml
|
||||
kubectl -n kube-system create sa tiller
|
||||
|
||||
kubectl create clusterrolebinding tiller-cluster-rule \
|
||||
--clusterrole=cluster-admin \
|
||||
--serviceaccount=kube-system:tiller
|
||||
```
|
||||
|
||||
Deploy Tiller in the `kube-system` namespace:
|
||||
@@ -187,125 +171,51 @@ Deploy Tiller in the `kube-system` namespace:
|
||||
helm init --service-account tiller
|
||||
```
|
||||
|
||||
Find the GKE IP ranges:
|
||||
You should consider using SSL between Helm and Tiller, for more information on securing your Helm
|
||||
installation see [docs.helm.sh](https://docs.helm.sh/using_helm/#securing-your-helm-installation).
|
||||
|
||||
### Install cert-manager
|
||||
|
||||
Jetstack's [cert-manager](https://github.com/jetstack/cert-manager)
|
||||
is a Kubernetes operator that automatically creates and manages TLS certs issued by Let’s Encrypt.
|
||||
|
||||
You'll be using cert-manager to provision a wildcard certificate for the Istio ingress gateway.
|
||||
|
||||
Install cert-manager's CRDs:
|
||||
|
||||
```bash
|
||||
gcloud container clusters describe istio --zone=europe-west3-a \
|
||||
| grep -e clusterIpv4Cidr -e servicesIpv4Cidr
|
||||
CERT_REPO=https://raw.githubusercontent.com/jetstack/cert-manager
|
||||
|
||||
kubectl apply -f ${CERT_REPO}/release-0.6/deploy/manifests/00-crds.yaml
|
||||
```
|
||||
|
||||
You'll be using the IP ranges to allow unrestricted egress traffic for services running inside the service mesh.
|
||||
|
||||
Configure Istio with Prometheus, Jaeger, and cert-manager:
|
||||
|
||||
```yaml
|
||||
global:
|
||||
nodePort: false
|
||||
proxy:
|
||||
# replace with your GKE IP ranges
|
||||
includeIPRanges: "10.28.0.0/14,10.7.240.0/20"
|
||||
|
||||
sidecarInjectorWebhook:
|
||||
enabled: true
|
||||
enableNamespacesByDefault: false
|
||||
|
||||
gateways:
|
||||
enabled: true
|
||||
istio-ingressgateway:
|
||||
replicaCount: 2
|
||||
autoscaleMin: 2
|
||||
autoscaleMax: 3
|
||||
# replace with your Istio Gateway IP
|
||||
loadBalancerIP: "35.198.98.90"
|
||||
type: LoadBalancer
|
||||
|
||||
pilot:
|
||||
enabled: true
|
||||
replicaCount: 1
|
||||
autoscaleMin: 1
|
||||
autoscaleMax: 1
|
||||
resources:
|
||||
requests:
|
||||
cpu: 500m
|
||||
memory: 1024Mi
|
||||
|
||||
grafana:
|
||||
enabled: true
|
||||
security:
|
||||
enabled: true
|
||||
adminUser: admin
|
||||
# change the password
|
||||
adminPassword: admin
|
||||
|
||||
prometheus:
|
||||
enabled: true
|
||||
|
||||
servicegraph:
|
||||
enabled: true
|
||||
|
||||
tracing:
|
||||
enabled: true
|
||||
jaeger:
|
||||
tag: 1.7
|
||||
|
||||
certmanager:
|
||||
enabled: true
|
||||
```
|
||||
|
||||
Save the above file as `my-istio.yaml` and install Istio with Helm:
|
||||
Create the cert-manager namespace and disable resource validation:
|
||||
|
||||
```bash
|
||||
helm upgrade --install istio ./install/kubernetes/helm/istio \
|
||||
--namespace=istio-system \
|
||||
-f ./my-istio.yaml
|
||||
kubectl create namespace cert-manager
|
||||
|
||||
kubectl label namespace cert-manager certmanager.k8s.io/disable-validation=true
|
||||
```
|
||||
|
||||
Verify that Istio workloads are running:
|
||||
Install cert-manager with Helm:
|
||||
|
||||
```text
|
||||
kubectl -n istio-system get pods
|
||||
```bash
|
||||
helm repo update && helm upgrade -i cert-manager \
|
||||
--namespace cert-manager \
|
||||
--version v0.6.0 \
|
||||
stable/cert-manager
|
||||
```
|
||||
|
||||
### Configure Istio Gateway with LE TLS
|
||||
### Istio Gateway TLS setup
|
||||
|
||||

|
||||

|
||||
|
||||
Create a Istio Gateway in istio-system namespace with HTTPS redirect:
|
||||
Create a generic Istio Gateway to expose services outside the mesh on HTTPS:
|
||||
|
||||
```yaml
|
||||
apiVersion: networking.istio.io/v1alpha3
|
||||
kind: Gateway
|
||||
metadata:
|
||||
name: public-gateway
|
||||
namespace: istio-system
|
||||
spec:
|
||||
selector:
|
||||
istio: ingressgateway
|
||||
servers:
|
||||
- port:
|
||||
number: 80
|
||||
name: http
|
||||
protocol: HTTP
|
||||
hosts:
|
||||
- "*"
|
||||
tls:
|
||||
httpsRedirect: true
|
||||
- port:
|
||||
number: 443
|
||||
name: https
|
||||
protocol: HTTPS
|
||||
hosts:
|
||||
- "*"
|
||||
tls:
|
||||
mode: SIMPLE
|
||||
privateKey: /etc/istio/ingressgateway-certs/tls.key
|
||||
serverCertificate: /etc/istio/ingressgateway-certs/tls.crt
|
||||
```
|
||||
```bash
|
||||
REPO=https://raw.githubusercontent.com/stefanprodan/flagger/master
|
||||
|
||||
Save the above resource as istio-gateway.yaml and then apply it:
|
||||
|
||||
```text
|
||||
kubectl apply -f ./istio-gateway.yaml
|
||||
kubectl apply -f ${REPO}/artifacts/gke/istio-gateway.yaml
|
||||
```
|
||||
|
||||
Create a service account with Cloud DNS admin role \(replace `my-gcp-project` with your project ID\):
|
||||
@@ -387,37 +297,76 @@ spec:
|
||||
- "example.com"
|
||||
```
|
||||
|
||||
Save the above resource as of-cert.yaml and then apply it:
|
||||
Save the above resource as istio-gateway-cert.yaml and then apply it:
|
||||
|
||||
```text
|
||||
kubectl apply -f ./of-cert.yaml
|
||||
kubectl apply -f ./istio-gateway-cert.yaml
|
||||
```
|
||||
|
||||
In a couple of seconds cert-manager should fetch a wildcard certificate from letsencrypt.org:
|
||||
|
||||
```text
|
||||
kubectl -n istio-system logs deployment/certmanager -f
|
||||
kubectl -n istio-system describe certificate istio-gateway
|
||||
|
||||
Certificate issued successfully
|
||||
Certificate istio-system/istio-gateway scheduled for renewal in 1438 hours
|
||||
Events:
|
||||
Type Reason Age From Message
|
||||
---- ------ ---- ---- -------
|
||||
Normal CertIssued 1m52s cert-manager Certificate issued successfully
|
||||
```
|
||||
|
||||
Recreate Istio ingress gateway pods:
|
||||
|
||||
```bash
|
||||
kubectl -n istio-system delete pods -l istio=ingressgateway
|
||||
kubectl -n istio-system get pods -l istio=ingressgateway
|
||||
```
|
||||
|
||||
Note that Istio gateway doesn't reload the certificates from the TLS secret on cert-manager renewal.
|
||||
Since the GKE cluster is made out of preemptible VMs the gateway pods will be replaced once every 24h,
|
||||
if your not using preemptible nodes then you need to manually kill the gateway pods every two months
|
||||
if your not using preemptible nodes then you need to manually delete the gateway pods every two months
|
||||
before the certificate expires.
|
||||
|
||||
### Expose services outside the service mesh
|
||||
### Install Prometheus
|
||||
|
||||
In order to expose services via the Istio Gateway you have to create a Virtual Service attached to Istio Gateway.
|
||||
The GKE Istio add-on does not include a Prometheus instance that scrapes the Istio telemetry service.
|
||||
Because Flagger uses the Istio HTTP metrics to run the canary analysis you have to deploy the following
|
||||
Prometheus configuration that's similar to the one that comes with the official Istio Helm chart.
|
||||
|
||||
Create a virtual service in `istio-system` namespace for Grafana \(replace `example.com` with your domain\):
|
||||
```bash
|
||||
REPO=https://raw.githubusercontent.com/stefanprodan/flagger/master
|
||||
|
||||
kubectl apply -f ${REPO}/artifacts/gke/istio-prometheus.yaml
|
||||
```
|
||||
|
||||
### Install Flagger and Grafana
|
||||
|
||||
Add Flagger Helm repository:
|
||||
|
||||
```bash
|
||||
helm repo add flagger https://flagger.app
|
||||
```
|
||||
|
||||
Deploy Flagger in the `istio-system` namespace with Slack notifications enabled:
|
||||
|
||||
```bash
|
||||
helm upgrade -i flagger flagger/flagger \
|
||||
--namespace=istio-system \
|
||||
--set metricsServer=http://prometheus.istio-system:9090 \
|
||||
--set slack.url=https://hooks.slack.com/services/YOUR/SLACK/WEBHOOK \
|
||||
--set slack.channel=general \
|
||||
--set slack.user=flagger
|
||||
```
|
||||
|
||||
Deploy Grafana in the `istio-system` namespace:
|
||||
|
||||
```bash
|
||||
helm upgrade -i flagger-grafana flagger/grafana \
|
||||
--namespace=istio-system \
|
||||
--set url=http://prometheus.istio-system:9090 \
|
||||
--set user=admin \
|
||||
--set password=replace-me
|
||||
```
|
||||
|
||||
Expose Grafana through the public gateway by creating a virtual service \(replace `example.com` with your domain\):
|
||||
|
||||
```yaml
|
||||
apiVersion: networking.istio.io/v1alpha3
|
||||
@@ -433,8 +382,7 @@ spec:
|
||||
http:
|
||||
- route:
|
||||
- destination:
|
||||
host: grafana
|
||||
timeout: 30s
|
||||
host: flagger-grafana
|
||||
```
|
||||
|
||||
Save the above resource as grafana-virtual-service.yaml and then apply it:
|
||||
@@ -444,17 +392,3 @@ kubectl apply -f ./grafana-virtual-service.yaml
|
||||
```
|
||||
|
||||
Navigate to `http://grafana.example.com` in your browser and you should be redirected to the HTTPS version.
|
||||
|
||||
Check that HTTP2 is enabled:
|
||||
|
||||
```bash
|
||||
curl -I --http2 https://grafana.example.com
|
||||
|
||||
HTTP/2 200
|
||||
content-type: text/html; charset=UTF-8
|
||||
x-envoy-upstream-service-time: 3
|
||||
server: envoy
|
||||
```
|
||||
|
||||
|
||||
|
||||
143
docs/gitbook/install/flagger-install-on-kubernetes.md
Normal file
@@ -0,0 +1,143 @@
|
||||
# Flagger install on Kubernetes
|
||||
|
||||
This guide walks you through setting up Flagger on a Kubernetes cluster.
|
||||
|
||||
### Prerequisites
|
||||
|
||||
Flagger requires a Kubernetes cluster **v1.11** or newer with the following admission controllers enabled:
|
||||
|
||||
* MutatingAdmissionWebhook
|
||||
* ValidatingAdmissionWebhook
|
||||
|
||||
Flagger depends on [Istio](https://istio.io/docs/setup/kubernetes/quick-start/) **v1.0.3** or newer
|
||||
with traffic management, telemetry and Prometheus enabled.
|
||||
|
||||
A minimal Istio installation should contain the following services:
|
||||
|
||||
* istio-pilot
|
||||
* istio-ingressgateway
|
||||
* istio-sidecar-injector
|
||||
* istio-telemetry
|
||||
* prometheus
|
||||
|
||||
### Install Flagger
|
||||
|
||||
Add Flagger Helm repository:
|
||||
|
||||
```bash
|
||||
helm repo add flagger https://flagger.app
|
||||
```
|
||||
|
||||
Deploy Flagger in the _**istio-system**_ namespace:
|
||||
|
||||
```bash
|
||||
helm upgrade -i flagger flagger/flagger \
|
||||
--namespace=istio-system \
|
||||
--set metricsServer=http://prometheus.istio-system:9090
|
||||
```
|
||||
|
||||
You can install Flagger in any namespace as long as it can talk to the Istio Prometheus service on port 9090.
|
||||
|
||||
Enable **Slack** notifications:
|
||||
|
||||
```bash
|
||||
helm upgrade -i flagger flagger/flagger \
|
||||
--namespace=istio-system \
|
||||
--set slack.url=https://hooks.slack.com/services/YOUR/SLACK/WEBHOOK \
|
||||
--set slack.channel=general \
|
||||
--set slack.user=flagger
|
||||
```
|
||||
|
||||
If you don't have Tiller you can use the helm template command and apply the generated yaml with kubectl:
|
||||
|
||||
```bash
|
||||
# generate
|
||||
helm template flagger/flagger \
|
||||
--name flagger \
|
||||
--namespace=istio-system \
|
||||
--set metricsServer=http://prometheus.istio-system:9090 \
|
||||
> $HOME/flagger.yaml
|
||||
|
||||
# apply
|
||||
kubectl apply -f $HOME/flagger.yaml
|
||||
```
|
||||
|
||||
To uninstall the Flagger release with Helm run:
|
||||
|
||||
```text
|
||||
helm delete --purge flagger
|
||||
```
|
||||
|
||||
The command removes all the Kubernetes components associated with the chart and deletes the release.
|
||||
|
||||
> **Note** that on uninstall the Canary CRD will not be removed.
|
||||
Deleting the CRD will make Kubernetes remove all the objects owned by Flagger like Istio virtual services,
|
||||
Kubernetes deployments and ClusterIP services.
|
||||
|
||||
If you want to remove all the objects created by Flagger you have delete the Canary CRD with kubectl:
|
||||
|
||||
```text
|
||||
kubectl delete crd canaries.flagger.app
|
||||
```
|
||||
|
||||
### Install Grafana
|
||||
|
||||
Flagger comes with a Grafana dashboard made for monitoring the canary analysis.
|
||||
|
||||
Deploy Grafana in the _**istio-system**_ namespace:
|
||||
|
||||
```bash
|
||||
helm upgrade -i flagger-grafana flagger/grafana \
|
||||
--namespace=istio-system \
|
||||
--set url=http://prometheus.istio-system:9090 \
|
||||
--set user=admin \
|
||||
--set password=change-me
|
||||
```
|
||||
|
||||
Or use helm template command and apply the generated yaml with kubectl:
|
||||
|
||||
```bash
|
||||
# generate
|
||||
helm template flagger/grafana \
|
||||
--name flagger-grafana \
|
||||
--namespace=istio-system \
|
||||
--set url=http://prometheus.istio-system:9090 \
|
||||
--set user=admin \
|
||||
--set password=change-me \
|
||||
> $HOME/flagger-grafana.yaml
|
||||
|
||||
# apply
|
||||
kubectl apply -f $HOME/flagger-grafana.yaml
|
||||
```
|
||||
|
||||
You can access Grafana using port forwarding:
|
||||
|
||||
```bash
|
||||
kubectl -n istio-system port-forward svc/flagger-grafana 3000:3000
|
||||
```
|
||||
|
||||
### Install Load Tester
|
||||
|
||||
Flagger comes with an optional load testing service that generates traffic
|
||||
during canary analysis when configured as a webhook.
|
||||
|
||||
Deploy the load test runner with Helm:
|
||||
|
||||
```bash
|
||||
helm upgrade -i flagger-loadtester flagger/loadtester \
|
||||
--namepace=test \
|
||||
--set cmd.logOutput=true \
|
||||
--set cmd.timeout=1h
|
||||
```
|
||||
|
||||
Deploy with kubectl:
|
||||
|
||||
```bash
|
||||
export REPO=https://raw.githubusercontent.com/stefanprodan/flagger/master
|
||||
|
||||
kubectl -n test apply -f ${REPO}/artifacts/loadtester/deployment.yaml
|
||||
kubectl -n test apply -f ${REPO}/artifacts/loadtester/service.yaml
|
||||
```
|
||||
|
||||
> **Note** that the load tester should be deployed in a namespace with Istio sidecar injection enabled.
|
||||
|
||||
@@ -1,75 +0,0 @@
|
||||
# Install Flagger
|
||||
|
||||
Before installing Flagger make sure you have [Istio](https://istio.io) running with Prometheus enabled.
|
||||
If you are new to Istio you can follow this GKE guide
|
||||
[Istio service mesh walk-through](https://docs.flagger.app/install/install-istio).
|
||||
|
||||
**Prerequisites**
|
||||
|
||||
* Kubernetes >= 1.11
|
||||
* Istio >= 1.0
|
||||
* Prometheus >= 2.6
|
||||
|
||||
### Install with Helm and Tiller
|
||||
|
||||
Add Flagger Helm repository:
|
||||
|
||||
```bash
|
||||
helm repo add flagger https://flagger.app
|
||||
```
|
||||
|
||||
Deploy Flagger in the _**istio-system**_ namespace:
|
||||
|
||||
```bash
|
||||
helm upgrade -i flagger flagger/flagger \
|
||||
--namespace=istio-system \
|
||||
--set metricsServer=http://prometheus.istio-system:9090
|
||||
```
|
||||
|
||||
Enable **Slack** notifications:
|
||||
|
||||
```bash
|
||||
helm upgrade -i flagger flagger/flagger \
|
||||
--namespace=istio-system \
|
||||
--set slack.url=https://hooks.slack.com/services/YOUR/SLACK/WEBHOOK \
|
||||
--set slack.channel=general \
|
||||
--set slack.user=flagger
|
||||
```
|
||||
|
||||
### Install with kubectl
|
||||
|
||||
If you don't have Tiller you can use the helm template command and apply the generated yaml with kubectl:
|
||||
|
||||
```bash
|
||||
# generate
|
||||
helm template flagger/flagger \
|
||||
--name flagger \
|
||||
--namespace=istio-system \
|
||||
--set metricsServer=http://prometheus.istio-system:9090 \
|
||||
--set controlLoopInterval=1m > $HOME/flagger.yaml
|
||||
|
||||
# apply
|
||||
kubectl apply -f $HOME/flagger.yaml
|
||||
```
|
||||
|
||||
### Uninstall
|
||||
|
||||
To uninstall/delete the flagger release with Helm run:
|
||||
|
||||
```text
|
||||
helm delete --purge flagger
|
||||
```
|
||||
|
||||
The command removes all the Kubernetes components associated with the chart and deletes the release.
|
||||
|
||||
> **Note** that on uninstall the Canary CRD will not be removed.
|
||||
Deleting the CRD will make Kubernetes remove all the objects owned by Flagger like Istio virtual services,
|
||||
Kubernetes deployments and ClusterIP services.
|
||||
|
||||
|
||||
If you want to remove all the objects created by Flagger you have delete the Canary CRD with kubectl:
|
||||
|
||||
```text
|
||||
kubectl delete crd canaries.flagger.app
|
||||
```
|
||||
|
||||
@@ -1,48 +0,0 @@
|
||||
# Install Grafana
|
||||
|
||||
Flagger comes with a Grafana dashboard made for monitoring the canary analysis.
|
||||
|
||||
### Install with Helm and Tiller
|
||||
|
||||
Add Flagger Helm repository:
|
||||
|
||||
```bash
|
||||
helm repo add flagger https://flagger.app
|
||||
```
|
||||
|
||||
Deploy Grafana in the _**istio-system**_ namespace:
|
||||
|
||||
```bash
|
||||
helm upgrade -i flagger-grafana flagger/grafana \
|
||||
--namespace=istio-system \
|
||||
--set url=http://prometheus:9090 \
|
||||
--set user=admin \
|
||||
--set password=admin
|
||||
```
|
||||
|
||||
### Install with kubectl
|
||||
|
||||
If you don't have Tiller you can use the helm template command and apply the generated yaml with kubectl:
|
||||
|
||||
```bash
|
||||
# generate
|
||||
helm template flagger/grafana \
|
||||
--name flagger-grafana \
|
||||
--namespace=istio-system \
|
||||
--set user=admin \
|
||||
--set password=admin > $HOME/flagger-grafana.yaml
|
||||
|
||||
# apply
|
||||
kubectl apply -f $HOME/flagger-grafana.yaml
|
||||
```
|
||||
|
||||
### Uninstall
|
||||
|
||||
To uninstall/delete the Grafana release with Helm run:
|
||||
|
||||
```text
|
||||
helm delete --purge flagger-grafana
|
||||
```
|
||||
|
||||
The command removes all the Kubernetes components associated with the chart and deletes the release.
|
||||
|
||||
353
docs/gitbook/tutorials/canary-helm-gitops.md
Normal file
@@ -0,0 +1,353 @@
|
||||
# Canary Deployments with Helm Charts and GitOps
|
||||
|
||||
This guide shows you how to package a web app into a Helm chart, trigger canary deployments on Helm upgrade
|
||||
and automate the chart release process with Weave Flux.
|
||||
|
||||
### Packaging
|
||||
|
||||
You'll be using the [podinfo](https://github.com/stefanprodan/k8s-podinfo) chart.
|
||||
This chart packages a web app made with Go, it's configuration, a horizontal pod autoscaler (HPA)
|
||||
and the canary configuration file.
|
||||
|
||||
```
|
||||
├── Chart.yaml
|
||||
├── README.md
|
||||
├── templates
|
||||
│ ├── NOTES.txt
|
||||
│ ├── _helpers.tpl
|
||||
│ ├── canary.yaml
|
||||
│ ├── configmap.yaml
|
||||
│ ├── deployment.yaml
|
||||
│ └── hpa.yaml
|
||||
└── values.yaml
|
||||
```
|
||||
|
||||
You can find the chart source [here](https://github.com/stefanprodan/flagger/tree/master/charts/podinfo).
|
||||
|
||||
### Install
|
||||
|
||||
Create a test namespace with Istio sidecar injection enabled:
|
||||
|
||||
```bash
|
||||
export REPO=https://raw.githubusercontent.com/stefanprodan/flagger/master
|
||||
|
||||
kubectl apply -f ${REPO}/artifacts/namespaces/test.yaml
|
||||
```
|
||||
|
||||
Add Flagger Helm repository:
|
||||
|
||||
```bash
|
||||
helm repo add flagger https://flagger.app
|
||||
```
|
||||
|
||||
Install podinfo with the release name `frontend` (replace `example.com` with your own domain):
|
||||
|
||||
```bash
|
||||
helm upgrade -i frontend flagger/podinfo \
|
||||
--namespace test \
|
||||
--set nameOverride=frontend \
|
||||
--set backend=http://backend.test:9898/echo \
|
||||
--set canary.enabled=true \
|
||||
--set canary.istioIngress.enabled=true \
|
||||
--set canary.istioIngress.gateway=public-gateway.istio-system.svc.cluster.local \
|
||||
--set canary.istioIngress.host=frontend.istio.example.com
|
||||
```
|
||||
|
||||
Flagger takes a Kubernetes deployment and a horizontal pod autoscaler (HPA),
|
||||
then creates a series of objects (Kubernetes deployments, ClusterIP services and Istio virtual services).
|
||||
These objects expose the application on the mesh and drive the canary analysis and promotion.
|
||||
|
||||
```bash
|
||||
# generated by Helm
|
||||
configmap/frontend
|
||||
deployment.apps/frontend
|
||||
horizontalpodautoscaler.autoscaling/frontend
|
||||
canary.flagger.app/frontend
|
||||
|
||||
# generated by Flagger
|
||||
configmap/frontend-primary
|
||||
deployment.apps/frontend-primary
|
||||
horizontalpodautoscaler.autoscaling/frontend-primary
|
||||
service/frontend
|
||||
service/frontend-canary
|
||||
service/frontend-primary
|
||||
virtualservice.networking.istio.io/frontend
|
||||
```
|
||||
|
||||
When the `frontend-primary` deployment comes online,
|
||||
Flagger will route all traffic to the primary pods and scale to zero the `frontend` deployment.
|
||||
|
||||
Open your browser and navigate to the frontend URL:
|
||||
|
||||

|
||||
|
||||
Now let's install the `backend` release without exposing it outside the mesh:
|
||||
|
||||
```bash
|
||||
helm upgrade -i backend flagger/podinfo \
|
||||
--namespace test \
|
||||
--set nameOverride=backend \
|
||||
--set canary.enabled=true \
|
||||
--set canary.istioIngress.enabled=false
|
||||
```
|
||||
|
||||
Check if Flagger has successfully deployed the canaries:
|
||||
|
||||
```
|
||||
kubectl -n test get canaries
|
||||
|
||||
NAME STATUS WEIGHT LASTTRANSITIONTIME
|
||||
backend Initialized 0 2019-02-12T18:53:18Z
|
||||
frontend Initialized 0 2019-02-12T17:50:50Z
|
||||
```
|
||||
|
||||
Click on the ping button in the `frontend` UI to trigger a HTTP POST request
|
||||
that will reach the `backend` app:
|
||||
|
||||

|
||||
|
||||
We'll use the `/echo` endpoint (same as the one the ping button calls)
|
||||
to generate load on both apps during a canary deployment.
|
||||
|
||||
### Upgrade
|
||||
|
||||
First let's install a load testing service that will generate traffic during analysis:
|
||||
|
||||
```bash
|
||||
helm upgrade -i flagger-loadtester flagger/loadtester \
|
||||
--namepace=test
|
||||
```
|
||||
|
||||
Enable the load tester and deploy a new `frontend` version:
|
||||
|
||||
```bash
|
||||
helm upgrade -i frontend flagger/podinfo/ \
|
||||
--namespace test \
|
||||
--reuse-values \
|
||||
--set canary.loadtest.enabled=true \
|
||||
--set image.tag=1.4.1
|
||||
```
|
||||
|
||||
Flagger detects that the deployment revision changed and starts the canary analysis along with the load test:
|
||||
|
||||
```
|
||||
kubectl -n istio-system logs deployment/flagger -f | jq .msg
|
||||
|
||||
New revision detected! Scaling up frontend.test
|
||||
Halt advancement frontend.test waiting for rollout to finish: 0 of 2 updated replicas are available
|
||||
Starting canary analysis for frontend.test
|
||||
Advance frontend.test canary weight 5
|
||||
Advance frontend.test canary weight 10
|
||||
Advance frontend.test canary weight 15
|
||||
Advance frontend.test canary weight 20
|
||||
Advance frontend.test canary weight 25
|
||||
Advance frontend.test canary weight 30
|
||||
Advance frontend.test canary weight 35
|
||||
Advance frontend.test canary weight 40
|
||||
Advance frontend.test canary weight 45
|
||||
Advance frontend.test canary weight 50
|
||||
Copying frontend.test template spec to frontend-primary.test
|
||||
Halt advancement frontend-primary.test waiting for rollout to finish: 1 old replicas are pending termination
|
||||
Promotion completed! Scaling down frontend.test
|
||||
```
|
||||
|
||||
You can monitor the canary deployment with Grafana. Open the Flagger dashboard,
|
||||
select `test` from the namespace dropdown, `frontend-primary` from the primary dropdown and `frontend` from the
|
||||
canary dropdown.
|
||||
|
||||

|
||||
|
||||
Now trigger a canary deployment for the `backend` app, but this time you'll change a value in the configmap:
|
||||
|
||||
```bash
|
||||
helm upgrade -i backend flagger/podinfo/ \
|
||||
--namespace test \
|
||||
--reuse-values \
|
||||
--set canary.loadtest.enabled=true \
|
||||
--set httpServer.timeout=25s
|
||||
```
|
||||
|
||||
Generate HTTP 500 errors:
|
||||
|
||||
```bash
|
||||
kubectl -n test exec -it flagger-loadtester-xxx-yyy sh
|
||||
|
||||
watch curl http://backend-canary:9898/status/500
|
||||
```
|
||||
|
||||
Generate latency:
|
||||
|
||||
```bash
|
||||
kubectl -n test exec -it flagger-loadtester-xxx-yyy sh
|
||||
|
||||
watch curl http://backend-canary:9898/delay/1
|
||||
```
|
||||
|
||||
Flagger detects the config map change and starts a canary analysis. Flagger will pause the advancement
|
||||
when the HTTP success rate drops under 99% or when the average request duration in the last minute is over 500ms:
|
||||
|
||||
```
|
||||
kubectl -n test describe canary backend
|
||||
|
||||
Events:
|
||||
|
||||
ConfigMap backend has changed
|
||||
New revision detected! Scaling up backend.test
|
||||
Starting canary analysis for backend.test
|
||||
Advance backend.test canary weight 5
|
||||
Advance backend.test canary weight 10
|
||||
Advance backend.test canary weight 15
|
||||
Advance backend.test canary weight 20
|
||||
Advance backend.test canary weight 25
|
||||
Advance backend.test canary weight 30
|
||||
Advance backend.test canary weight 35
|
||||
Halt backend.test advancement success rate 62.50% < 99%
|
||||
Halt backend.test advancement success rate 88.24% < 99%
|
||||
Advance backend.test canary weight 40
|
||||
Advance backend.test canary weight 45
|
||||
Halt backend.test advancement request duration 2.415s > 500ms
|
||||
Halt backend.test advancement request duration 2.42s > 500ms
|
||||
Advance backend.test canary weight 50
|
||||
ConfigMap backend-primary synced
|
||||
Copying backend.test template spec to backend-primary.test
|
||||
Promotion completed! Scaling down backend.test
|
||||
```
|
||||
|
||||

|
||||
|
||||
If the number of failed checks reaches the canary analysis threshold, the traffic is routed back to the primary,
|
||||
the canary is scaled to zero and the rollout is marked as failed.
|
||||
|
||||
```bash
|
||||
kubectl -n test get canary
|
||||
|
||||
NAME STATUS WEIGHT LASTTRANSITIONTIME
|
||||
backend Succeeded 0 2019-02-12T19:33:11Z
|
||||
frontend Failed 0 2019-02-12T19:47:20Z
|
||||
```
|
||||
|
||||
If you've enabled the Slack notifications, you'll receive an alert with the reason why the `backend` promotion failed.
|
||||
|
||||
### GitOps automation
|
||||
|
||||
Instead of using Helm CLI from a CI tool to perform the install and upgrade,
|
||||
you could use a Git based approach. GitOps is a way to do Continuous Delivery,
|
||||
it works by using Git as a source of truth for declarative infrastructure and workloads.
|
||||
In the [GitOps model](https://www.weave.works/technologies/gitops/),
|
||||
any change to production must be committed in source control
|
||||
prior to being applied on the cluster. This way rollback and audit logs are provided by Git.
|
||||
|
||||

|
||||
|
||||
In order to apply the GitOps pipeline model to Flagger canary deployments you'll need
|
||||
a Git repository with your workloads definitions in YAML format,
|
||||
a container registry where your CI system pushes immutable images and
|
||||
an operator that synchronizes the Git repo with the cluster state.
|
||||
|
||||
Create a git repository with the following content:
|
||||
|
||||
```
|
||||
├── namespaces
|
||||
│ └── test.yaml
|
||||
└── releases
|
||||
└── test
|
||||
├── backend.yaml
|
||||
├── frontend.yaml
|
||||
└── loadtester.yaml
|
||||
```
|
||||
|
||||
You can find the git source [here](https://github.com/stefanprodan/flagger/tree/master/artifacts/cluster).
|
||||
|
||||
Define the `frontend` release using Flux `HelmRelease` custom resource:
|
||||
|
||||
```yaml
|
||||
apiVersion: flux.weave.works/v1beta1
|
||||
kind: HelmRelease
|
||||
metadata:
|
||||
name: frontend
|
||||
namespace: test
|
||||
annotations:
|
||||
flux.weave.works/automated: "true"
|
||||
flux.weave.works/tag.chart-image: semver:~1.4
|
||||
spec:
|
||||
releaseName: frontend
|
||||
chart:
|
||||
repository: https://stefanprodan.github.io/flagger/
|
||||
name: podinfo
|
||||
version: 2.0.0
|
||||
values:
|
||||
image:
|
||||
repository: quay.io/stefanprodan/podinfo
|
||||
tag: 1.4.0
|
||||
backend: http://backend-podinfo:9898/echo
|
||||
canary:
|
||||
enabled: true
|
||||
istioIngress:
|
||||
enabled: true
|
||||
gateway: public-gateway.istio-system.svc.cluster.local
|
||||
host: frontend.istio.example.com
|
||||
loadtest:
|
||||
enabled: true
|
||||
```
|
||||
|
||||
In the `chart` section I've defined the release source by specifying the Helm repository (hosted on GitHub Pages), chart name and version.
|
||||
In the `values` section I've overwritten the defaults set in values.yaml.
|
||||
|
||||
With the `flux.weave.works` annotations I instruct Flux to automate this release.
|
||||
When an image tag in the sem ver range of `1.4.0 - 1.4.99` is pushed to Quay,
|
||||
Flux will upgrade the Helm release and from there Flagger will pick up the change and start a canary deployment.
|
||||
|
||||
Install [Weave Flux](https://github.com/weaveworks/flux) and its Helm Operator by specifying your Git repo URL:
|
||||
|
||||
```bash
|
||||
helm repo add weaveworks https://weaveworks.github.io/flux
|
||||
|
||||
helm install --name flux \
|
||||
--set helmOperator.create=true \
|
||||
--set git.url=git@github.com:<USERNAME>/<REPOSITORY> \
|
||||
--namespace flux \
|
||||
weaveworks/flux
|
||||
```
|
||||
|
||||
At startup Flux generates a SSH key and logs the public key. Find the SSH public key with:
|
||||
|
||||
```bash
|
||||
kubectl -n flux logs deployment/flux | grep identity.pub | cut -d '"' -f2
|
||||
```
|
||||
|
||||
In order to sync your cluster state with Git you need to copy the public key and create a
|
||||
deploy key with write access on your GitHub repository.
|
||||
|
||||
Open GitHub, navigate to your fork, go to _Setting > Deploy keys_ click on _Add deploy key_,
|
||||
check _Allow write access_, paste the Flux public key and click _Add key_.
|
||||
|
||||
After a couple of seconds Flux will apply the Kubernetes resources from Git and Flagger will
|
||||
launch the `frontend` and `backend` apps.
|
||||
|
||||
A CI/CD pipeline for the `frontend` release could look like this:
|
||||
|
||||
* cut a release from the master branch of the podinfo code repo with the git tag `1.4.1`
|
||||
* CI builds the image and pushes the `podinfo:1.4.1` image to the container registry
|
||||
* Flux scans the registry and updates the Helm release `image.tag` to `1.4.1`
|
||||
* Flux commits and push the change to the cluster repo
|
||||
* Flux applies the updated Helm release on the cluster
|
||||
* Flux Helm Operator picks up the change and calls Tiller to upgrade the release
|
||||
* Flagger detects a revision change and scales up the `frontend` deployment
|
||||
* Flagger starts the load test and runs the canary analysis
|
||||
* Based on the analysis result the canary deployment is promoted to production or rolled back
|
||||
* Flagger sends a Slack notification with the canary result
|
||||
|
||||
If the canary fails, fix the bug, do another patch release eg `1.4.2` and the whole process will run again.
|
||||
|
||||
A canary deployment can fail due to any of the following reasons:
|
||||
|
||||
* the container image can't be downloaded
|
||||
* the deployment replica set is stuck for more then ten minutes (eg. due to a container crash loop)
|
||||
* the webooks (acceptance tests, load tests, etc) are returning a non 2xx response
|
||||
* the HTTP success rate (non 5xx responses) metric drops under the threshold
|
||||
* the HTTP average duration metric goes over the threshold
|
||||
* the Istio telemetry service is unable to collect traffic metrics
|
||||
* the metrics server (Prometheus) can't be reached
|
||||
|
||||
If you want to find out more about managing Helm releases with Flux here is an in-depth guide
|
||||
[github.com/stefanprodan/gitops-helm](https://github.com/stefanprodan/gitops-helm).
|
||||
@@ -113,7 +113,7 @@ Trigger a canary deployment by updating the container image:
|
||||
|
||||
```bash
|
||||
kubectl -n test set image deployment/podinfo \
|
||||
podinfod=quay.io/stefanprodan/podinfo:1.4.0
|
||||
podinfod=quay.io/stefanprodan/podinfo:1.4.1
|
||||
```
|
||||
|
||||
Flagger detects that the deployment revision changed and starts a new rollout:
|
||||
|
||||
BIN
docs/screens/demo-backend-dashboard.png
Normal file
|
After Width: | Height: | Size: 523 KiB |
BIN
docs/screens/demo-frontend-dashboard.png
Normal file
|
After Width: | Height: | Size: 349 KiB |
BIN
docs/screens/demo-frontend-jaeger.png
Normal file
|
After Width: | Height: | Size: 497 KiB |
BIN
docs/screens/demo-frontend.png
Normal file
|
After Width: | Height: | Size: 131 KiB |
BIN
docs/screens/flagger-grafana-dashboard.png
Normal file
|
After Width: | Height: | Size: 523 KiB |
|
Before Width: | Height: | Size: 442 KiB After Width: | Height: | Size: 440 KiB |
@@ -17,6 +17,7 @@ limitations under the License.
|
||||
package v1alpha3
|
||||
|
||||
import (
|
||||
istiov1alpha3 "github.com/knative/pkg/apis/istio/v1alpha3"
|
||||
hpav1 "k8s.io/api/autoscaling/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"time"
|
||||
@@ -26,6 +27,7 @@ const (
|
||||
CanaryKind = "Canary"
|
||||
ProgressDeadlineSeconds = 600
|
||||
AnalysisInterval = 60 * time.Second
|
||||
MetricInterval = "1m"
|
||||
)
|
||||
|
||||
// +genclient
|
||||
@@ -58,6 +60,10 @@ type CanarySpec struct {
|
||||
// the maximum time in seconds for a canary deployment to make progress
|
||||
// before it is considered to be failed. Defaults to ten minutes.
|
||||
ProgressDeadlineSeconds *int32 `json:"progressDeadlineSeconds,omitempty"`
|
||||
|
||||
// promote the canary without analysing it
|
||||
// +optional
|
||||
SkipAnalysis bool `json:"skipAnalysis,omitempty"`
|
||||
}
|
||||
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
@@ -103,9 +109,13 @@ type CanaryStatus struct {
|
||||
// CanaryService is used to create ClusterIP services
|
||||
// and Istio Virtual Service
|
||||
type CanaryService struct {
|
||||
Port int32 `json:"port"`
|
||||
Gateways []string `json:"gateways"`
|
||||
Hosts []string `json:"hosts"`
|
||||
Port int32 `json:"port"`
|
||||
Gateways []string `json:"gateways"`
|
||||
Hosts []string `json:"hosts"`
|
||||
Match []istiov1alpha3.HTTPMatchRequest `json:"match,omitempty"`
|
||||
Rewrite *istiov1alpha3.HTTPRewrite `json:"rewrite,omitempty"`
|
||||
Timeout string `json:"timeout,omitempty"`
|
||||
Retries *istiov1alpha3.HTTPRetry `json:"retries,omitempty"`
|
||||
}
|
||||
|
||||
// CanaryAnalysis is used to describe how the analysis should be done
|
||||
@@ -120,9 +130,11 @@ type CanaryAnalysis struct {
|
||||
|
||||
// CanaryMetric holds the reference to Istio metrics used for canary analysis
|
||||
type CanaryMetric struct {
|
||||
Name string `json:"name"`
|
||||
Interval string `json:"interval"`
|
||||
Threshold int `json:"threshold"`
|
||||
Name string `json:"name"`
|
||||
Interval string `json:"interval,omitempty"`
|
||||
Threshold float64 `json:"threshold"`
|
||||
// +optional
|
||||
Query string `json:"query,omitempty"`
|
||||
}
|
||||
|
||||
// CanaryWebhook holds the reference to external checks used for canary analysis
|
||||
@@ -163,3 +175,8 @@ func (c *Canary) GetAnalysisInterval() time.Duration {
|
||||
|
||||
return interval
|
||||
}
|
||||
|
||||
// GetMetricInterval returns the metric interval default value (1m)
|
||||
func (c *Canary) GetMetricInterval() string {
|
||||
return MetricInterval
|
||||
}
|
||||
|
||||
@@ -21,6 +21,7 @@ limitations under the License.
|
||||
package v1alpha3
|
||||
|
||||
import (
|
||||
istiov1alpha3 "github.com/knative/pkg/apis/istio/v1alpha3"
|
||||
v1 "k8s.io/api/autoscaling/v1"
|
||||
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||
)
|
||||
@@ -143,6 +144,23 @@ func (in *CanaryService) DeepCopyInto(out *CanaryService) {
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.Match != nil {
|
||||
in, out := &in.Match, &out.Match
|
||||
*out = make([]istiov1alpha3.HTTPMatchRequest, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
if in.Rewrite != nil {
|
||||
in, out := &in.Rewrite, &out.Rewrite
|
||||
*out = new(istiov1alpha3.HTTPRewrite)
|
||||
**out = **in
|
||||
}
|
||||
if in.Retries != nil {
|
||||
in, out := &in.Retries, &out.Retries
|
||||
*out = new(istiov1alpha3.HTTPRetry)
|
||||
**out = **in
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
|
||||
@@ -330,6 +330,11 @@ func (c *CanaryDeployer) createPrimaryDeployment(cd *flaggerv1.Canary) error {
|
||||
return err
|
||||
}
|
||||
|
||||
if appSel, ok := canaryDep.Spec.Selector.MatchLabels["app"]; !ok || appSel != canaryDep.Name {
|
||||
return fmt.Errorf("invalid label selector! Deployment %s.%s spec.selector.matchLabels must contain selector 'app: %s'",
|
||||
targetName, cd.Namespace, targetName)
|
||||
}
|
||||
|
||||
primaryDep, err := c.kubeClient.AppsV1().Deployments(cd.Namespace).Get(primaryName, metav1.GetOptions{})
|
||||
if errors.IsNotFound(err) {
|
||||
// create primary secrets and config maps
|
||||
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
@@ -73,6 +74,38 @@ func (c *CanaryObserver) queryMetric(query string) (*vectorQueryResponse, error)
|
||||
return &values, nil
|
||||
}
|
||||
|
||||
// GetScalar runs the promql query and returns the first value found
|
||||
func (c *CanaryObserver) GetScalar(query string) (float64, error) {
|
||||
if c.metricsServer == "fake" {
|
||||
return 100, nil
|
||||
}
|
||||
|
||||
query = strings.Replace(query, "\n", "", -1)
|
||||
query = strings.Replace(query, " ", "", -1)
|
||||
|
||||
var value *float64
|
||||
result, err := c.queryMetric(query)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
for _, v := range result.Data.Result {
|
||||
metricValue := v.Value[1]
|
||||
switch metricValue.(type) {
|
||||
case string:
|
||||
f, err := strconv.ParseFloat(metricValue.(string), 64)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
value = &f
|
||||
}
|
||||
}
|
||||
if value == nil {
|
||||
return 0, fmt.Errorf("no values found for query %s", query)
|
||||
}
|
||||
return *value, nil
|
||||
}
|
||||
|
||||
// GetDeploymentCounter returns the requests success rate using istio_requests_total metric
|
||||
func (c *CanaryObserver) GetDeploymentCounter(name string, namespace string, metric string, interval string) (float64, error) {
|
||||
if c.metricsServer == "fake" {
|
||||
|
||||
@@ -2,7 +2,8 @@ package controller
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"github.com/google/go-cmp/cmp/cmpopts"
|
||||
istiov1alpha3 "github.com/knative/pkg/apis/istio/v1alpha3"
|
||||
istioclientset "github.com/knative/pkg/client/clientset/versioned"
|
||||
flaggerv1 "github.com/stefanprodan/flagger/pkg/apis/flagger/v1alpha3"
|
||||
@@ -26,15 +27,14 @@ type CanaryRouter struct {
|
||||
logger *zap.SugaredLogger
|
||||
}
|
||||
|
||||
// Sync creates the primary and canary ClusterIP services
|
||||
// and sets up a virtual service with routes for the two services
|
||||
// all traffic goes to primary
|
||||
// Sync creates or updates the primary and canary ClusterIP services
|
||||
// and the Istio virtual service.
|
||||
func (c *CanaryRouter) Sync(cd *flaggerv1.Canary) error {
|
||||
err := c.createServices(cd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = c.createVirtualService(cd)
|
||||
err = c.syncVirtualService(cd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -163,14 +163,48 @@ func (c *CanaryRouter) createServices(cd *flaggerv1.Canary) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *CanaryRouter) createVirtualService(cd *flaggerv1.Canary) error {
|
||||
func (c *CanaryRouter) syncVirtualService(cd *flaggerv1.Canary) error {
|
||||
targetName := cd.Spec.TargetRef.Name
|
||||
primaryName := fmt.Sprintf("%s-primary", targetName)
|
||||
hosts := append(cd.Spec.Service.Hosts, targetName)
|
||||
gateways := append(cd.Spec.Service.Gateways, "mesh")
|
||||
route := []istiov1alpha3.DestinationWeight{
|
||||
{
|
||||
Destination: istiov1alpha3.Destination{
|
||||
Host: primaryName,
|
||||
Port: istiov1alpha3.PortSelector{
|
||||
Number: uint32(cd.Spec.Service.Port),
|
||||
},
|
||||
},
|
||||
Weight: 100,
|
||||
},
|
||||
{
|
||||
Destination: istiov1alpha3.Destination{
|
||||
Host: fmt.Sprintf("%s-canary", targetName),
|
||||
Port: istiov1alpha3.PortSelector{
|
||||
Number: uint32(cd.Spec.Service.Port),
|
||||
},
|
||||
},
|
||||
Weight: 0,
|
||||
},
|
||||
}
|
||||
newSpec := istiov1alpha3.VirtualServiceSpec{
|
||||
Hosts: hosts,
|
||||
Gateways: gateways,
|
||||
Http: []istiov1alpha3.HTTPRoute{
|
||||
{
|
||||
Match: cd.Spec.Service.Match,
|
||||
Rewrite: cd.Spec.Service.Rewrite,
|
||||
Timeout: cd.Spec.Service.Timeout,
|
||||
Retries: cd.Spec.Service.Retries,
|
||||
Route: route,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
virtualService, err := c.istioClient.NetworkingV1alpha3().VirtualServices(cd.Namespace).Get(targetName, metav1.GetOptions{})
|
||||
// insert
|
||||
if errors.IsNotFound(err) {
|
||||
c.logger.Debugf("VirtualService %s.%s not found", targetName, cd.Namespace)
|
||||
virtualService = &istiov1alpha3.VirtualService{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: targetName,
|
||||
@@ -183,42 +217,37 @@ func (c *CanaryRouter) createVirtualService(cd *flaggerv1.Canary) error {
|
||||
}),
|
||||
},
|
||||
},
|
||||
Spec: istiov1alpha3.VirtualServiceSpec{
|
||||
Hosts: hosts,
|
||||
Gateways: gateways,
|
||||
Http: []istiov1alpha3.HTTPRoute{
|
||||
{
|
||||
Route: []istiov1alpha3.DestinationWeight{
|
||||
{
|
||||
Destination: istiov1alpha3.Destination{
|
||||
Host: primaryName,
|
||||
Port: istiov1alpha3.PortSelector{
|
||||
Number: uint32(cd.Spec.Service.Port),
|
||||
},
|
||||
},
|
||||
Weight: 100,
|
||||
},
|
||||
{
|
||||
Destination: istiov1alpha3.Destination{
|
||||
Host: targetName,
|
||||
Port: istiov1alpha3.PortSelector{
|
||||
Number: uint32(cd.Spec.Service.Port),
|
||||
},
|
||||
},
|
||||
Weight: 0,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Spec: newSpec,
|
||||
}
|
||||
|
||||
c.logger.Debugf("Creating VirtualService %s.%s", virtualService.GetName(), cd.Namespace)
|
||||
_, err = c.istioClient.NetworkingV1alpha3().VirtualServices(cd.Namespace).Create(virtualService)
|
||||
if err != nil {
|
||||
return fmt.Errorf("VirtualService %s.%s create error %v", targetName, cd.Namespace, err)
|
||||
}
|
||||
c.logger.With("canary", fmt.Sprintf("%s.%s", cd.Name, cd.Namespace)).Infof("VirtualService %s.%s created", virtualService.GetName(), cd.Namespace)
|
||||
c.logger.With("canary", fmt.Sprintf("%s.%s", cd.Name, cd.Namespace)).
|
||||
Infof("VirtualService %s.%s created", virtualService.GetName(), cd.Namespace)
|
||||
return nil
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("VirtualService %s.%s query error %v", targetName, cd.Namespace, err)
|
||||
}
|
||||
|
||||
// update service but keep the original destination weights
|
||||
if virtualService != nil {
|
||||
if diff := cmp.Diff(newSpec, virtualService.Spec, cmpopts.IgnoreTypes(istiov1alpha3.DestinationWeight{})); diff != "" {
|
||||
//fmt.Println(diff)
|
||||
vtClone := virtualService.DeepCopy()
|
||||
vtClone.Spec = newSpec
|
||||
if len(virtualService.Spec.Http) > 0 {
|
||||
vtClone.Spec.Http[0].Route = virtualService.Spec.Http[0].Route
|
||||
}
|
||||
_, err = c.istioClient.NetworkingV1alpha3().VirtualServices(cd.Namespace).Update(vtClone)
|
||||
if err != nil {
|
||||
return fmt.Errorf("VirtualService %s.%s update error %v", targetName, cd.Namespace, err)
|
||||
}
|
||||
c.logger.With("canary", fmt.Sprintf("%s.%s", cd.Name, cd.Namespace)).
|
||||
Infof("VirtualService %s.%s updated", virtualService.GetName(), cd.Namespace)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -247,15 +276,15 @@ func (c *CanaryRouter) GetRoutes(cd *flaggerv1.Canary) (
|
||||
if route.Destination.Host == fmt.Sprintf("%s-primary", targetName) {
|
||||
primary = route
|
||||
}
|
||||
if route.Destination.Host == targetName {
|
||||
if route.Destination.Host == fmt.Sprintf("%s-canary", targetName) {
|
||||
canary = route
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if primary.Weight == 0 && canary.Weight == 0 {
|
||||
err = fmt.Errorf("VirtualService %s.%s does not contain routes for %s and %s",
|
||||
targetName, cd.Namespace, fmt.Sprintf("%s-primary", targetName), targetName)
|
||||
err = fmt.Errorf("VirtualService %s.%s does not contain routes for %s-primary and %s-canary",
|
||||
targetName, cd.Namespace, targetName, targetName)
|
||||
}
|
||||
|
||||
return
|
||||
@@ -280,7 +309,11 @@ func (c *CanaryRouter) SetRoutes(
|
||||
vsCopy := vs.DeepCopy()
|
||||
vsCopy.Spec.Http = []istiov1alpha3.HTTPRoute{
|
||||
{
|
||||
Route: []istiov1alpha3.DestinationWeight{primary, canary},
|
||||
Match: cd.Spec.Service.Match,
|
||||
Rewrite: cd.Spec.Service.Rewrite,
|
||||
Timeout: cd.Spec.Service.Timeout,
|
||||
Retries: cd.Spec.Service.Retries,
|
||||
Route: []istiov1alpha3.DestinationWeight{primary, canary},
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
@@ -8,14 +8,14 @@ import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
func TestCanaryRouter_Sync(t *testing.T) {
|
||||
func TestCanaryRouter_SyncClusterIPServices(t *testing.T) {
|
||||
mocks := SetupMocks()
|
||||
err := mocks.router.Sync(mocks.canary)
|
||||
if err != nil {
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
|
||||
canarySvc, err := mocks.kubeClient.CoreV1().Services("default").Get("podinfo", metav1.GetOptions{})
|
||||
canarySvc, err := mocks.kubeClient.CoreV1().Services("default").Get("podinfo-canary", metav1.GetOptions{})
|
||||
if err != nil {
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
@@ -40,19 +40,6 @@ func TestCanaryRouter_Sync(t *testing.T) {
|
||||
if primarySvc.Spec.Ports[0].Port != 9898 {
|
||||
t.Errorf("Got primary svc port %v wanted %v", primarySvc.Spec.Ports[0].Port, 9898)
|
||||
}
|
||||
|
||||
vs, err := mocks.istioClient.NetworkingV1alpha3().VirtualServices("default").Get("podinfo", metav1.GetOptions{})
|
||||
if err != nil {
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
|
||||
if len(vs.Spec.Http) != 1 {
|
||||
t.Errorf("Got Istio VS Http %v wanted %v", len(vs.Spec.Http), 1)
|
||||
}
|
||||
|
||||
if len(vs.Spec.Http[0].Route) != 2 {
|
||||
t.Errorf("Got Istio VS routes %v wanted %v", len(vs.Spec.Http[0].Route), 2)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCanaryRouter_GetRoutes(t *testing.T) {
|
||||
@@ -76,6 +63,87 @@ func TestCanaryRouter_GetRoutes(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestCanaryRouter_SyncVirtualService(t *testing.T) {
|
||||
mocks := SetupMocks()
|
||||
err := mocks.router.Sync(mocks.canary)
|
||||
if err != nil {
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
|
||||
// test insert
|
||||
vs, err := mocks.istioClient.NetworkingV1alpha3().VirtualServices("default").Get("podinfo", metav1.GetOptions{})
|
||||
if err != nil {
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
|
||||
if len(vs.Spec.Http) != 1 {
|
||||
t.Errorf("Got Istio VS Http %v wanted %v", len(vs.Spec.Http), 1)
|
||||
}
|
||||
|
||||
if len(vs.Spec.Http[0].Route) != 2 {
|
||||
t.Errorf("Got Istio VS routes %v wanted %v", len(vs.Spec.Http[0].Route), 2)
|
||||
}
|
||||
|
||||
// test update
|
||||
cd, err := mocks.flaggerClient.FlaggerV1alpha3().Canaries("default").Get("podinfo", metav1.GetOptions{})
|
||||
if err != nil {
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
|
||||
cdClone := cd.DeepCopy()
|
||||
hosts := cdClone.Spec.Service.Hosts
|
||||
hosts = append(hosts, "test.example.com")
|
||||
cdClone.Spec.Service.Hosts = hosts
|
||||
canary, err := mocks.flaggerClient.FlaggerV1alpha3().Canaries("default").Update(cdClone)
|
||||
if err != nil {
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
|
||||
// apply change
|
||||
err = mocks.router.Sync(canary)
|
||||
if err != nil {
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
|
||||
// verify
|
||||
vs, err = mocks.istioClient.NetworkingV1alpha3().VirtualServices("default").Get("podinfo", metav1.GetOptions{})
|
||||
if err != nil {
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
if len(vs.Spec.Hosts) != 2 {
|
||||
t.Errorf("Got Istio VS hosts %v wanted %v", vs.Spec.Hosts, 2)
|
||||
}
|
||||
|
||||
// test drift
|
||||
vsClone := vs.DeepCopy()
|
||||
gateways := vsClone.Spec.Gateways
|
||||
gateways = append(gateways, "test-gateway.istio-system")
|
||||
vsClone.Spec.Gateways = gateways
|
||||
|
||||
vsGateways, err := mocks.istioClient.NetworkingV1alpha3().VirtualServices("default").Update(vsClone)
|
||||
if err != nil {
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
if len(vsGateways.Spec.Gateways) != 2 {
|
||||
t.Errorf("Got Istio VS gateway %v wanted %v", vsGateways.Spec.Gateways, 2)
|
||||
}
|
||||
|
||||
// undo change
|
||||
err = mocks.router.Sync(mocks.canary)
|
||||
if err != nil {
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
|
||||
// verify
|
||||
vs, err = mocks.istioClient.NetworkingV1alpha3().VirtualServices("default").Get("podinfo", metav1.GetOptions{})
|
||||
if err != nil {
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
if len(vs.Spec.Gateways) != 1 {
|
||||
t.Errorf("Got Istio VS gateways %v wanted %v", vs.Spec.Gateways, 1)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCanaryRouter_SetRoutes(t *testing.T) {
|
||||
mocks := SetupMocks()
|
||||
err := mocks.router.Sync(mocks.canary)
|
||||
@@ -109,7 +177,7 @@ func TestCanaryRouter_SetRoutes(t *testing.T) {
|
||||
if route.Destination.Host == fmt.Sprintf("%s-primary", mocks.canary.Spec.TargetRef.Name) {
|
||||
pRoute = route
|
||||
}
|
||||
if route.Destination.Host == mocks.canary.Spec.TargetRef.Name {
|
||||
if route.Destination.Host == fmt.Sprintf("%s-canary", mocks.canary.Spec.TargetRef.Name) {
|
||||
cRoute = route
|
||||
}
|
||||
}
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
istiov1alpha3 "github.com/knative/pkg/apis/istio/v1alpha3"
|
||||
flaggerv1 "github.com/stefanprodan/flagger/pkg/apis/flagger/v1alpha3"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
@@ -170,6 +171,11 @@ func (c *Controller) advanceCanary(name string, namespace string, skipLivenessCh
|
||||
}
|
||||
}
|
||||
|
||||
// check if analysis should be skipped
|
||||
if skip := c.shouldSkipAnalysis(cd, primaryRoute, canaryRoute); skip {
|
||||
return
|
||||
}
|
||||
|
||||
// check if the number of failed checks reached the threshold
|
||||
if cd.Status.Phase == flaggerv1.CanaryProgressing &&
|
||||
(!retriable || cd.Status.FailedChecks >= cd.Spec.CanaryAnalysis.Threshold) {
|
||||
@@ -294,6 +300,50 @@ func (c *Controller) advanceCanary(name string, namespace string, skipLivenessCh
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Controller) shouldSkipAnalysis(cd *flaggerv1.Canary, primary istiov1alpha3.DestinationWeight, canary istiov1alpha3.DestinationWeight) bool {
|
||||
if !cd.Spec.SkipAnalysis {
|
||||
return false
|
||||
}
|
||||
|
||||
// route all traffic to primary
|
||||
primary.Weight = 100
|
||||
canary.Weight = 0
|
||||
if err := c.router.SetRoutes(cd, primary, canary); err != nil {
|
||||
c.recordEventWarningf(cd, "%v", err)
|
||||
return false
|
||||
}
|
||||
c.recorder.SetWeight(cd, primary.Weight, canary.Weight)
|
||||
|
||||
// copy spec and configs from canary to primary
|
||||
c.recordEventInfof(cd, "Copying %s.%s template spec to %s-primary.%s",
|
||||
cd.Spec.TargetRef.Name, cd.Namespace, cd.Spec.TargetRef.Name, cd.Namespace)
|
||||
if err := c.deployer.Promote(cd); err != nil {
|
||||
c.recordEventWarningf(cd, "%v", err)
|
||||
return false
|
||||
}
|
||||
|
||||
// shutdown canary
|
||||
if err := c.deployer.Scale(cd, 0); err != nil {
|
||||
c.recordEventWarningf(cd, "%v", err)
|
||||
return false
|
||||
}
|
||||
|
||||
// update status phase
|
||||
if err := c.deployer.SetStatusPhase(cd, flaggerv1.CanarySucceeded); err != nil {
|
||||
c.recordEventWarningf(cd, "%v", err)
|
||||
return false
|
||||
}
|
||||
|
||||
// notify
|
||||
c.recorder.SetStatus(cd)
|
||||
c.recordEventInfof(cd, "Promotion completed! Canary analysis was skipped for %s.%s",
|
||||
cd.Spec.TargetRef.Name, cd.Namespace)
|
||||
c.sendNotification(cd, "Canary analysis was skipped, promotion finished.",
|
||||
false, false)
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func (c *Controller) checkCanaryStatus(cd *flaggerv1.Canary, shouldAdvance bool) bool {
|
||||
c.recorder.SetStatus(cd)
|
||||
if cd.Status.Phase == flaggerv1.CanaryProgressing {
|
||||
@@ -355,6 +405,10 @@ func (c *Controller) analyseCanary(r *flaggerv1.Canary) bool {
|
||||
|
||||
// run metrics checks
|
||||
for _, metric := range r.Spec.CanaryAnalysis.Metrics {
|
||||
if metric.Interval == "" {
|
||||
metric.Interval = r.GetMetricInterval()
|
||||
}
|
||||
|
||||
if metric.Name == "istio_requests_total" {
|
||||
val, err := c.observer.GetDeploymentCounter(r.Spec.TargetRef.Name, r.Namespace, metric.Name, metric.Interval)
|
||||
if err != nil {
|
||||
@@ -386,6 +440,24 @@ func (c *Controller) analyseCanary(r *flaggerv1.Canary) bool {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
if metric.Query != "" {
|
||||
val, err := c.observer.GetScalar(metric.Query)
|
||||
if err != nil {
|
||||
if strings.Contains(err.Error(), "no values found") {
|
||||
c.recordEventWarningf(r, "Halt advancement no values found for metric %s probably %s.%s is not receiving traffic",
|
||||
metric.Name, r.Spec.TargetRef.Name, r.Namespace)
|
||||
} else {
|
||||
c.recordEventErrorf(r, "Metrics server %s query failed: %v", c.observer.metricsServer, err)
|
||||
}
|
||||
return false
|
||||
}
|
||||
if val > float64(metric.Threshold) {
|
||||
c.recordEventWarningf(r, "Halt %s.%s advancement %s %.2f > %v",
|
||||
r.Name, r.Namespace, metric.Name, val, metric.Threshold)
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
|
||||
@@ -64,6 +64,47 @@ func TestScheduler_Rollback(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestScheduler_SkipAnalysis(t *testing.T) {
|
||||
mocks := SetupMocks()
|
||||
// init
|
||||
mocks.ctrl.advanceCanary("podinfo", "default", false)
|
||||
|
||||
// enable skip
|
||||
cd, err := mocks.flaggerClient.FlaggerV1alpha3().Canaries("default").Get("podinfo", metav1.GetOptions{})
|
||||
if err != nil {
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
cd.Spec.SkipAnalysis = true
|
||||
_, err = mocks.flaggerClient.FlaggerV1alpha3().Canaries("default").Update(cd)
|
||||
if err != nil {
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
|
||||
// update
|
||||
dep2 := newTestDeploymentV2()
|
||||
_, err = mocks.kubeClient.AppsV1().Deployments("default").Update(dep2)
|
||||
if err != nil {
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
|
||||
// detect changes
|
||||
mocks.ctrl.advanceCanary("podinfo", "default", true)
|
||||
// advance
|
||||
mocks.ctrl.advanceCanary("podinfo", "default", true)
|
||||
|
||||
c, err := mocks.flaggerClient.FlaggerV1alpha3().Canaries("default").Get("podinfo", metav1.GetOptions{})
|
||||
if err != nil {
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
if !c.Spec.SkipAnalysis {
|
||||
t.Errorf("Got skip analysis %v wanted %v", c.Spec.SkipAnalysis, true)
|
||||
}
|
||||
|
||||
if c.Status.Phase != v1alpha3.CanarySucceeded {
|
||||
t.Errorf("Got canary state %v wanted %v", c.Status.Phase, v1alpha3.CanarySucceeded)
|
||||
}
|
||||
}
|
||||
|
||||
func TestScheduler_NewRevisionReset(t *testing.T) {
|
||||
mocks := SetupMocks()
|
||||
// init
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
package version
|
||||
|
||||
var VERSION = "0.5.0"
|
||||
var VERSION = "0.7.0"
|
||||
var REVISION = "unknown"
|
||||
|
||||
4
test/Dockerfile.kind
Normal file
@@ -0,0 +1,4 @@
|
||||
FROM golang:1.11
|
||||
|
||||
RUN go get -u sigs.k8s.io/kind
|
||||
|
||||
24
test/README.md
Normal file
@@ -0,0 +1,24 @@
|
||||
# Flagger end-to-end testing
|
||||
|
||||
The e2e testing infrastructure is powered by CircleCI and [Kubernetes Kind](https://github.com/kubernetes-sigs/kind).
|
||||
|
||||
CircleCI e2e workflow:
|
||||
|
||||
* install latest stable kubectl [e2e-kind.sh](e2e-kind.sh)
|
||||
* build Kubernetes Kind from master [e2e-kind.sh](e2e-kind.sh)
|
||||
* create local Kubernetes cluster with kind [e2e-kind.sh](e2e-kind.sh)
|
||||
* install latest stable Helm CLI [e2e-istio.sh](e2e-istio.sh)
|
||||
* deploy Tiller on the local cluster [e2e-istio.sh](e2e-istio.sh)
|
||||
* install Istio CRDs with Helm [e2e-istio.sh](e2e-istio.sh)
|
||||
* install Istio control plane and Prometheus with Helm [e2e-istio.sh](e2e-istio.sh)
|
||||
* build Flagger container image [e2e-build.sh](e2e-build.sh)
|
||||
* load Flagger image onto the local cluster [e2e-build.sh](e2e-build.sh)
|
||||
* deploy Flagger in the istio-system namespace [e2e-build.sh](e2e-build.sh)
|
||||
* create a test namespace with Istio injection enabled [e2e-tests.sh](e2e-tests.sh)
|
||||
* deploy the load tester in the test namespace [e2e-tests.sh](e2e-tests.sh)
|
||||
* deploy a demo workload (podinfo) in the test namespace [e2e-tests.sh](e2e-tests.sh)
|
||||
* test the canary initialization [e2e-tests.sh](e2e-tests.sh)
|
||||
* test the canary analysis and promotion [e2e-tests.sh](e2e-tests.sh)
|
||||
|
||||
|
||||
|
||||
15
test/e2e-build.sh
Executable file
@@ -0,0 +1,15 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
set -o errexit
|
||||
|
||||
REPO_ROOT=$(git rev-parse --show-toplevel)
|
||||
export KUBECONFIG="$(kind get kubeconfig-path --name="kind")"
|
||||
|
||||
echo '>>> Building Flagger'
|
||||
cd ${REPO_ROOT} && docker build -t test/flagger:latest . -f Dockerfile
|
||||
|
||||
echo '>>> Installing Flagger'
|
||||
kind load docker-image test/flagger:latest
|
||||
kubectl apply -f ${REPO_ROOT}/artifacts/flagger/
|
||||
kubectl -n istio-system set image deployment/flagger flagger=test/flagger:latest
|
||||
kubectl -n istio-system rollout status deployment/flagger
|
||||
62
test/e2e-istio-values.yaml
Normal file
@@ -0,0 +1,62 @@
|
||||
#
|
||||
# Minimal Istio Configuration required by Flagger
|
||||
#
|
||||
|
||||
# pilot configuration
|
||||
pilot:
|
||||
enabled: true
|
||||
sidecar: true
|
||||
|
||||
gateways:
|
||||
enabled: false
|
||||
istio-ingressgateway:
|
||||
autoscaleMax: 1
|
||||
|
||||
# citadel configuration
|
||||
security:
|
||||
enabled: true
|
||||
|
||||
# sidecar-injector webhook configuration
|
||||
sidecarInjectorWebhook:
|
||||
enabled: true
|
||||
|
||||
# galley configuration
|
||||
galley:
|
||||
enabled: false
|
||||
|
||||
# mixer configuration
|
||||
mixer:
|
||||
policy:
|
||||
enabled: false
|
||||
replicaCount: 1
|
||||
autoscaleEnabled: false
|
||||
telemetry:
|
||||
enabled: true
|
||||
replicaCount: 1
|
||||
autoscaleEnabled: false
|
||||
resources:
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 128Mi
|
||||
|
||||
# addon prometheus configuration
|
||||
prometheus:
|
||||
enabled: true
|
||||
scrapeInterval: 5s
|
||||
|
||||
# addon jaeger tracing configuration
|
||||
tracing:
|
||||
enabled: false
|
||||
|
||||
# Common settings.
|
||||
global:
|
||||
proxy:
|
||||
# Resources for the sidecar.
|
||||
resources:
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 128Mi
|
||||
limits:
|
||||
cpu: 2000m
|
||||
memory: 128Mi
|
||||
useMCP: false
|
||||
28
test/e2e-istio.sh
Executable file
@@ -0,0 +1,28 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
set -o errexit
|
||||
|
||||
ISTIO_VER="1.1.0-rc.0"
|
||||
REPO_ROOT=$(git rev-parse --show-toplevel)
|
||||
export KUBECONFIG="$(kind get kubeconfig-path --name="kind")"
|
||||
|
||||
echo ">>> Installing Helm"
|
||||
curl https://raw.githubusercontent.com/kubernetes/helm/master/scripts/get | bash
|
||||
|
||||
echo '>>> Installing Tiller'
|
||||
kubectl --namespace kube-system create sa tiller
|
||||
kubectl create clusterrolebinding tiller-cluster-rule --clusterrole=cluster-admin --serviceaccount=kube-system:tiller
|
||||
helm init --service-account tiller --upgrade --wait
|
||||
|
||||
echo ">>> Installing Istio ${ISTIO_VER}"
|
||||
helm repo add istio.io https://storage.googleapis.com/istio-release/releases/${ISTIO_VER}/charts
|
||||
|
||||
echo '>>> Installing Istio CRDs'
|
||||
helm upgrade -i istio-init istio.io/istio-init --wait --namespace istio-system
|
||||
|
||||
echo '>>> Waiting for Istio CRDs to be ready'
|
||||
kubectl -n istio-system wait --for=condition=complete job/istio-init-crd-10
|
||||
kubectl -n istio-system wait --for=condition=complete job/istio-init-crd-11
|
||||
|
||||
echo '>>> Installing Istio control plane'
|
||||
helm upgrade -i istio istio.io/istio --wait --namespace istio-system -f ${REPO_ROOT}/test/e2e-istio-values.yaml
|
||||
25
test/e2e-kind.sh
Executable file
@@ -0,0 +1,25 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
set -o errexit
|
||||
|
||||
REPO_ROOT=$(git rev-parse --show-toplevel)
|
||||
|
||||
echo ">>> Installing kubectl"
|
||||
curl -LO https://storage.googleapis.com/kubernetes-release/release/$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)/bin/linux/amd64/kubectl && \
|
||||
chmod +x kubectl && \
|
||||
sudo mv kubectl /usr/local/bin/
|
||||
|
||||
echo ">>> Building sigs.k8s.io/kind"
|
||||
docker build -t kind:src . -f ${REPO_ROOT}/test/Dockerfile.kind
|
||||
docker create -ti --name dummy kind:src sh
|
||||
docker cp dummy:/go/bin/kind ./kind
|
||||
docker rm -f dummy
|
||||
|
||||
echo ">>> Installing kind"
|
||||
chmod +x kind
|
||||
sudo mv kind /usr/local/bin/
|
||||
kind create cluster --wait 5m
|
||||
|
||||
export KUBECONFIG="$(kind get kubeconfig-path --name="kind")"
|
||||
kubectl get pods --all-namespaces
|
||||
|
||||
123
test/e2e-tests.sh
Executable file
@@ -0,0 +1,123 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# This script runs e2e tests for Canary initialization, analysis and promotion
|
||||
# Prerequisites: Kubernetes Kind, Helm and Istio
|
||||
|
||||
set -o errexit
|
||||
|
||||
REPO_ROOT=$(git rev-parse --show-toplevel)
|
||||
export KUBECONFIG="$(kind get kubeconfig-path --name="kind")"
|
||||
|
||||
echo '>>> Creating test namespace'
|
||||
kubectl create namespace test
|
||||
kubectl label namespace test istio-injection=enabled
|
||||
|
||||
echo '>>> Installing the load tester'
|
||||
kubectl -n test apply -f ${REPO_ROOT}/artifacts/loadtester/
|
||||
kubectl -n test rollout status deployment/flagger-loadtester
|
||||
|
||||
echo '>>> Initialising canary'
|
||||
kubectl apply -f ${REPO_ROOT}/test/e2e-workload.yaml
|
||||
|
||||
cat <<EOF | kubectl apply -f -
|
||||
apiVersion: flagger.app/v1alpha3
|
||||
kind: Canary
|
||||
metadata:
|
||||
name: podinfo
|
||||
namespace: test
|
||||
spec:
|
||||
targetRef:
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
name: podinfo
|
||||
progressDeadlineSeconds: 60
|
||||
service:
|
||||
port: 9898
|
||||
canaryAnalysis:
|
||||
interval: 15s
|
||||
threshold: 15
|
||||
maxWeight: 50
|
||||
stepWeight: 10
|
||||
metrics:
|
||||
- name: istio_requests_total
|
||||
threshold: 99
|
||||
interval: 1m
|
||||
- name: istio_request_duration_seconds_bucket
|
||||
threshold: 500
|
||||
interval: 30s
|
||||
- name: "404s percentage"
|
||||
threshold: 5
|
||||
interval: 1m
|
||||
query: |
|
||||
100 - sum(
|
||||
rate(
|
||||
istio_requests_total{
|
||||
reporter="destination",
|
||||
destination_workload_namespace=~"test",
|
||||
destination_workload=~"podinfo",
|
||||
response_code!="404"
|
||||
}[1m]
|
||||
)
|
||||
)
|
||||
/
|
||||
sum(
|
||||
rate(
|
||||
istio_requests_total{
|
||||
reporter="destination",
|
||||
destination_workload_namespace=~"test",
|
||||
destination_workload=~"podinfo"
|
||||
}[1m]
|
||||
)
|
||||
) * 100
|
||||
webhooks:
|
||||
- name: load-test
|
||||
url: http://flagger-loadtester.test/
|
||||
timeout: 5s
|
||||
metadata:
|
||||
cmd: "hey -z 10m -q 10 -c 2 http://podinfo.test:9898/"
|
||||
|
||||
EOF
|
||||
|
||||
echo '>>> Waiting for primary to be ready'
|
||||
retries=50
|
||||
count=0
|
||||
ok=false
|
||||
until ${ok}; do
|
||||
kubectl -n test get canary/podinfo | grep 'Initialized' && ok=true || ok=false
|
||||
sleep 5
|
||||
count=$(($count + 1))
|
||||
if [[ ${count} -eq ${retries} ]]; then
|
||||
kubectl -n istio-system logs deployment/flagger
|
||||
echo "No more retries left"
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
|
||||
echo '✔ Canary initialization test passed'
|
||||
|
||||
echo '>>> Triggering canary deployment'
|
||||
kubectl -n test set image deployment/podinfo podinfod=quay.io/stefanprodan/podinfo:1.4.1
|
||||
|
||||
echo '>>> Waiting for canary promotion'
|
||||
retries=50
|
||||
count=0
|
||||
ok=false
|
||||
until ${ok}; do
|
||||
kubectl -n test describe deployment/podinfo-primary | grep '1.4.1' && ok=true || ok=false
|
||||
sleep 10
|
||||
kubectl -n istio-system logs deployment/flagger --tail 1
|
||||
count=$(($count + 1))
|
||||
if [[ ${count} -eq ${retries} ]]; then
|
||||
kubectl -n test describe deployment/podinfo
|
||||
kubectl -n test describe deployment/podinfo-primary
|
||||
kubectl -n istio-system logs deployment/flagger
|
||||
echo "No more retries left"
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
|
||||
echo '✔ Canary promotion test passed'
|
||||
|
||||
kubectl -n istio-system logs deployment/flagger
|
||||
|
||||
echo '✔ All tests passed'
|
||||
67
test/e2e-workload.yaml
Normal file
@@ -0,0 +1,67 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: podinfo
|
||||
namespace: test
|
||||
labels:
|
||||
app: podinfo
|
||||
spec:
|
||||
minReadySeconds: 5
|
||||
revisionHistoryLimit: 5
|
||||
progressDeadlineSeconds: 60
|
||||
strategy:
|
||||
rollingUpdate:
|
||||
maxUnavailable: 0
|
||||
type: RollingUpdate
|
||||
selector:
|
||||
matchLabels:
|
||||
app: podinfo
|
||||
template:
|
||||
metadata:
|
||||
annotations:
|
||||
prometheus.io/scrape: "true"
|
||||
labels:
|
||||
app: podinfo
|
||||
spec:
|
||||
containers:
|
||||
- name: podinfod
|
||||
image: quay.io/stefanprodan/podinfo:1.4.0
|
||||
imagePullPolicy: IfNotPresent
|
||||
ports:
|
||||
- containerPort: 9898
|
||||
name: http
|
||||
protocol: TCP
|
||||
command:
|
||||
- ./podinfo
|
||||
- --port=9898
|
||||
- --level=info
|
||||
- --random-delay=false
|
||||
- --random-error=false
|
||||
env:
|
||||
- name: PODINFO_UI_COLOR
|
||||
value: blue
|
||||
livenessProbe:
|
||||
exec:
|
||||
command:
|
||||
- podcli
|
||||
- check
|
||||
- http
|
||||
- localhost:9898/healthz
|
||||
initialDelaySeconds: 5
|
||||
timeoutSeconds: 5
|
||||
readinessProbe:
|
||||
exec:
|
||||
command:
|
||||
- podcli
|
||||
- check
|
||||
- http
|
||||
- localhost:9898/readyz
|
||||
initialDelaySeconds: 5
|
||||
timeoutSeconds: 5
|
||||
resources:
|
||||
limits:
|
||||
cpu: 1000m
|
||||
memory: 128Mi
|
||||
requests:
|
||||
cpu: 1m
|
||||
memory: 16Mi
|
||||
18
vendor/github.com/knative/pkg/apis/doc.go
generated
vendored
@@ -1,18 +0,0 @@
|
||||
/*
|
||||
Copyright 2018 The Knative Authors
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// +k8s:deepcopy-gen=package
|
||||
package apis
|
||||
23
vendor/github.com/knative/pkg/apis/duck/doc.go
generated
vendored
@@ -1,23 +0,0 @@
|
||||
/*
|
||||
Copyright 2018 The Knative Authors
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package duck defines logic for defining and consuming "duck typed"
|
||||
// Kubernetes resources. Producers define partial resource definitions
|
||||
// that resource authors may choose to implement to interoperate with
|
||||
// consumers of these "duck typed" interfaces.
|
||||
// For more information see:
|
||||
// TODO(mattmoor): Add link to doc.
|
||||
package duck
|
||||
21
vendor/github.com/knative/pkg/apis/duck/register.go
generated
vendored
@@ -1,21 +0,0 @@
|
||||
/*
|
||||
Copyright 2018 The Knative Authors
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package duck
|
||||
|
||||
const (
|
||||
GroupName = "duck.knative.dev"
|
||||
)
|
||||
280
vendor/github.com/knative/pkg/apis/duck/v1alpha1/condition_set.go
generated
vendored
@@ -1,280 +0,0 @@
|
||||
/*
|
||||
Copyright 2018 The Knative Authors
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package v1alpha1
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"sort"
|
||||
"time"
|
||||
|
||||
"fmt"
|
||||
|
||||
"github.com/knative/pkg/apis"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
// Conditions is the interface for a Resource that implements the getter and
|
||||
// setter for accessing a Condition collection.
|
||||
// +k8s:deepcopy-gen=true
|
||||
type ConditionsAccessor interface {
|
||||
GetConditions() Conditions
|
||||
SetConditions(Conditions)
|
||||
}
|
||||
|
||||
// ConditionSet is an abstract collection of the possible ConditionType values
|
||||
// that a particular resource might expose. It also holds the "happy condition"
|
||||
// for that resource, which we define to be one of Ready or Succeeded depending
|
||||
// on whether it is a Living or Batch process respectively.
|
||||
// +k8s:deepcopy-gen=false
|
||||
type ConditionSet struct {
|
||||
happy ConditionType
|
||||
dependents []ConditionType
|
||||
}
|
||||
|
||||
// ConditionManager allows a resource to operate on its Conditions using higher
|
||||
// order operations.
|
||||
type ConditionManager interface {
|
||||
// IsHappy looks at the happy condition and returns true if that condition is
|
||||
// set to true.
|
||||
IsHappy() bool
|
||||
|
||||
// GetCondition finds and returns the Condition that matches the ConditionType
|
||||
// previously set on Conditions.
|
||||
GetCondition(t ConditionType) *Condition
|
||||
|
||||
// SetCondition sets or updates the Condition on Conditions for Condition.Type.
|
||||
// If there is an update, Conditions are stored back sorted.
|
||||
SetCondition(new Condition)
|
||||
|
||||
// MarkTrue sets the status of t to true, and then marks the happy condition to
|
||||
// true if all other dependents are also true.
|
||||
MarkTrue(t ConditionType)
|
||||
|
||||
// MarkUnknown sets the status of t to Unknown and also sets the happy condition
|
||||
// to Unknown if no other dependent condition is in an error state.
|
||||
MarkUnknown(t ConditionType, reason, messageFormat string, messageA ...interface{})
|
||||
|
||||
// MarkFalse sets the status of t and the happy condition to False.
|
||||
MarkFalse(t ConditionType, reason, messageFormat string, messageA ...interface{})
|
||||
|
||||
// InitializeConditions updates all Conditions in the ConditionSet to Unknown
|
||||
// if not set.
|
||||
InitializeConditions()
|
||||
|
||||
// InitializeCondition updates a Condition to Unknown if not set.
|
||||
InitializeCondition(t ConditionType)
|
||||
}
|
||||
|
||||
// NewLivingConditionSet returns a ConditionSet to hold the conditions for the
|
||||
// living resource. ConditionReady is used as the happy condition.
|
||||
func NewLivingConditionSet(d ...ConditionType) ConditionSet {
|
||||
return newConditionSet(ConditionReady, d...)
|
||||
}
|
||||
|
||||
// NewBatchConditionSet returns a ConditionSet to hold the conditions for the
|
||||
// batch resource. ConditionSucceeded is used as the happy condition.
|
||||
func NewBatchConditionSet(d ...ConditionType) ConditionSet {
|
||||
return newConditionSet(ConditionSucceeded, d...)
|
||||
}
|
||||
|
||||
// newConditionSet returns a ConditionSet to hold the conditions that are
|
||||
// important for the caller. The first ConditionType is the overarching status
|
||||
// for that will be used to signal the resources' status is Ready or Succeeded.
|
||||
func newConditionSet(happy ConditionType, dependents ...ConditionType) ConditionSet {
|
||||
var deps []ConditionType
|
||||
for _, d := range dependents {
|
||||
// Skip duplicates
|
||||
if d == happy || contains(deps, d) {
|
||||
continue
|
||||
}
|
||||
deps = append(deps, d)
|
||||
}
|
||||
return ConditionSet{
|
||||
happy: happy,
|
||||
dependents: deps,
|
||||
}
|
||||
}
|
||||
|
||||
func contains(ct []ConditionType, t ConditionType) bool {
|
||||
for _, c := range ct {
|
||||
if c == t {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Check that conditionsImpl implements ConditionManager.
|
||||
var _ ConditionManager = (*conditionsImpl)(nil)
|
||||
|
||||
// conditionsImpl implements the helper methods for evaluating Conditions.
|
||||
// +k8s:deepcopy-gen=false
|
||||
type conditionsImpl struct {
|
||||
ConditionSet
|
||||
accessor ConditionsAccessor
|
||||
}
|
||||
|
||||
// Manage creates a ConditionManager from an object that implements
|
||||
// ConditionsAccessopr using the original ConditionSet as a reference.
|
||||
func (r ConditionSet) Manage(accessor ConditionsAccessor) ConditionManager {
|
||||
return conditionsImpl{
|
||||
accessor: accessor,
|
||||
ConditionSet: r,
|
||||
}
|
||||
}
|
||||
|
||||
// IsHappy looks at the happy condition and returns true if that condition is
|
||||
// set to true.
|
||||
func (r conditionsImpl) IsHappy() bool {
|
||||
if c := r.GetCondition(r.happy); c == nil || !c.IsTrue() {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// GetCondition finds and returns the Condition that matches the ConditionType
|
||||
// previously set on Conditions.
|
||||
func (r conditionsImpl) GetCondition(t ConditionType) *Condition {
|
||||
if r.accessor == nil {
|
||||
return nil
|
||||
}
|
||||
for _, c := range r.accessor.GetConditions() {
|
||||
if c.Type == t {
|
||||
return &c
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetCondition sets or updates the Condition on Conditions for Condition.Type.
|
||||
// If there is an update, Conditions are stored back sorted.
|
||||
func (r conditionsImpl) SetCondition(new Condition) {
|
||||
if r.accessor == nil {
|
||||
return
|
||||
}
|
||||
t := new.Type
|
||||
var conditions Conditions
|
||||
for _, c := range r.accessor.GetConditions() {
|
||||
if c.Type != t {
|
||||
conditions = append(conditions, c)
|
||||
} else {
|
||||
// If we'd only update the LastTransitionTime, then return.
|
||||
new.LastTransitionTime = c.LastTransitionTime
|
||||
if reflect.DeepEqual(&new, &c) {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
new.LastTransitionTime = apis.VolatileTime{Inner: metav1.NewTime(time.Now())}
|
||||
conditions = append(conditions, new)
|
||||
// Sorted for convince of the consumer, i.e.: kubectl.
|
||||
sort.Slice(conditions, func(i, j int) bool { return conditions[i].Type < conditions[j].Type })
|
||||
r.accessor.SetConditions(conditions)
|
||||
}
|
||||
|
||||
// MarkTrue sets the status of t to true, and then marks the happy condition to
|
||||
// true if all other dependents are also true.
|
||||
func (r conditionsImpl) MarkTrue(t ConditionType) {
|
||||
// set the specified condition
|
||||
r.SetCondition(Condition{
|
||||
Type: t,
|
||||
Status: corev1.ConditionTrue,
|
||||
})
|
||||
|
||||
// check the dependents.
|
||||
for _, cond := range r.dependents {
|
||||
c := r.GetCondition(cond)
|
||||
// Failed or Unknown conditions trump true conditions
|
||||
if !c.IsTrue() {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// set the happy condition
|
||||
r.SetCondition(Condition{
|
||||
Type: r.happy,
|
||||
Status: corev1.ConditionTrue,
|
||||
})
|
||||
}
|
||||
|
||||
// MarkUnknown sets the status of t to Unknown and also sets the happy condition
|
||||
// to Unknown if no other dependent condition is in an error state.
|
||||
func (r conditionsImpl) MarkUnknown(t ConditionType, reason, messageFormat string, messageA ...interface{}) {
|
||||
// set the specified condition
|
||||
r.SetCondition(Condition{
|
||||
Type: t,
|
||||
Status: corev1.ConditionUnknown,
|
||||
Reason: reason,
|
||||
Message: fmt.Sprintf(messageFormat, messageA...),
|
||||
})
|
||||
|
||||
// check the dependents.
|
||||
for _, cond := range r.dependents {
|
||||
c := r.GetCondition(cond)
|
||||
// Failed conditions trump Unknown conditions
|
||||
if c.IsFalse() {
|
||||
// Double check that the happy condition is also false.
|
||||
happy := r.GetCondition(r.happy)
|
||||
if !happy.IsFalse() {
|
||||
r.MarkFalse(r.happy, reason, messageFormat, messageA)
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// set the happy condition
|
||||
r.SetCondition(Condition{
|
||||
Type: r.happy,
|
||||
Status: corev1.ConditionUnknown,
|
||||
Reason: reason,
|
||||
Message: fmt.Sprintf(messageFormat, messageA...),
|
||||
})
|
||||
}
|
||||
|
||||
// MarkFalse sets the status of t and the happy condition to False.
|
||||
func (r conditionsImpl) MarkFalse(t ConditionType, reason, messageFormat string, messageA ...interface{}) {
|
||||
for _, t := range []ConditionType{
|
||||
t,
|
||||
r.happy,
|
||||
} {
|
||||
r.SetCondition(Condition{
|
||||
Type: t,
|
||||
Status: corev1.ConditionFalse,
|
||||
Reason: reason,
|
||||
Message: fmt.Sprintf(messageFormat, messageA...),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// InitializeConditions updates all Conditions in the ConditionSet to Unknown
|
||||
// if not set.
|
||||
func (r conditionsImpl) InitializeConditions() {
|
||||
for _, t := range append(r.dependents, r.happy) {
|
||||
r.InitializeCondition(t)
|
||||
}
|
||||
}
|
||||
|
||||
// InitializeCondition updates a Condition to Unknown if not set.
|
||||
func (r conditionsImpl) InitializeCondition(t ConditionType) {
|
||||
if c := r.GetCondition(t); c == nil {
|
||||
r.SetCondition(Condition{
|
||||
Type: t,
|
||||
Status: corev1.ConditionUnknown,
|
||||
})
|
||||
}
|
||||
}
|
||||
149
vendor/github.com/knative/pkg/apis/duck/v1alpha1/conditions_types.go
generated
vendored
@@ -1,149 +0,0 @@
|
||||
/*
|
||||
Copyright 2018 The Knative Authors
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package v1alpha1
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
"github.com/knative/pkg/apis"
|
||||
"github.com/knative/pkg/apis/duck"
|
||||
)
|
||||
|
||||
// Conditions is the schema for the conditions portion of the payload
|
||||
type Conditions []Condition
|
||||
|
||||
// ConditionType is a camel-cased condition type.
|
||||
type ConditionType string
|
||||
|
||||
const (
|
||||
// ConditionReady specifies that the resource is ready.
|
||||
// For long-running resources.
|
||||
ConditionReady ConditionType = "Ready"
|
||||
// ConditionSucceeded specifies that the resource has finished.
|
||||
// For resource which run to completion.
|
||||
ConditionSucceeded ConditionType = "Succeeded"
|
||||
)
|
||||
|
||||
// Conditions defines a readiness condition for a Knative resource.
|
||||
// See: https://github.com/kubernetes/community/blob/master/contributors/devel/api-conventions.md#typical-status-properties
|
||||
// +k8s:deepcopy-gen=true
|
||||
type Condition struct {
|
||||
// Type of condition.
|
||||
// +required
|
||||
Type ConditionType `json:"type" description:"type of status condition"`
|
||||
|
||||
// Status of the condition, one of True, False, Unknown.
|
||||
// +required
|
||||
Status corev1.ConditionStatus `json:"status" description:"status of the condition, one of True, False, Unknown"`
|
||||
|
||||
// LastTransitionTime is the last time the condition transitioned from one status to another.
|
||||
// We use VolatileTime in place of metav1.Time to exclude this from creating equality.Semantic
|
||||
// differences (all other things held constant).
|
||||
// +optional
|
||||
LastTransitionTime apis.VolatileTime `json:"lastTransitionTime,omitempty" description:"last time the condition transit from one status to another"`
|
||||
|
||||
// The reason for the condition's last transition.
|
||||
// +optional
|
||||
Reason string `json:"reason,omitempty" description:"one-word CamelCase reason for the condition's last transition"`
|
||||
|
||||
// A human readable message indicating details about the transition.
|
||||
// +optional
|
||||
Message string `json:"message,omitempty" description:"human-readable message indicating details about last transition"`
|
||||
}
|
||||
|
||||
// IsTrue is true if the condition is True
|
||||
func (c *Condition) IsTrue() bool {
|
||||
if c == nil {
|
||||
return false
|
||||
}
|
||||
return c.Status == corev1.ConditionTrue
|
||||
}
|
||||
|
||||
// IsFalse is true if the condition is False
|
||||
func (c *Condition) IsFalse() bool {
|
||||
if c == nil {
|
||||
return false
|
||||
}
|
||||
return c.Status == corev1.ConditionFalse
|
||||
}
|
||||
|
||||
// IsUnknown is true if the condition is Unknown
|
||||
func (c *Condition) IsUnknown() bool {
|
||||
if c == nil {
|
||||
return true
|
||||
}
|
||||
return c.Status == corev1.ConditionUnknown
|
||||
}
|
||||
|
||||
// Implementations can verify that they implement Conditions via:
|
||||
var _ = duck.VerifyType(&KResource{}, &Conditions{})
|
||||
|
||||
// Conditions is an Implementable "duck type".
|
||||
var _ duck.Implementable = (*Conditions)(nil)
|
||||
|
||||
// +genclient
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
// KResource is a skeleton type wrapping Conditions in the manner we expect
|
||||
// resource writers defining compatible resources to embed it. We will
|
||||
// typically use this type to deserialize Conditions ObjectReferences and
|
||||
// access the Conditions data. This is not a real resource.
|
||||
type KResource struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ObjectMeta `json:"metadata,omitempty"`
|
||||
|
||||
Status KResourceStatus `json:"status"`
|
||||
}
|
||||
|
||||
// KResourceStatus shows how we expect folks to embed Conditions in
|
||||
// their Status field.
|
||||
type KResourceStatus struct {
|
||||
Conditions Conditions `json:"conditions,omitempty"`
|
||||
}
|
||||
|
||||
// In order for Conditions to be Implementable, KResource must be Populatable.
|
||||
var _ duck.Populatable = (*KResource)(nil)
|
||||
|
||||
// GetFullType implements duck.Implementable
|
||||
func (_ *Conditions) GetFullType() duck.Populatable {
|
||||
return &KResource{}
|
||||
}
|
||||
|
||||
// Populate implements duck.Populatable
|
||||
func (t *KResource) Populate() {
|
||||
t.Status.Conditions = Conditions{{
|
||||
// Populate ALL fields
|
||||
Type: "Birthday",
|
||||
Status: corev1.ConditionTrue,
|
||||
LastTransitionTime: apis.VolatileTime{Inner: metav1.NewTime(time.Date(1984, 02, 28, 18, 52, 00, 00, time.UTC))},
|
||||
Reason: "Celebrate",
|
||||
Message: "n3wScott, find your party hat :tada:",
|
||||
}}
|
||||
}
|
||||
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
// KResourceList is a list of KResource resources
|
||||
type KResourceList struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ListMeta `json:"metadata"`
|
||||
|
||||
Items []KResource `json:"items"`
|
||||
}
|
||||
23
vendor/github.com/knative/pkg/apis/duck/v1alpha1/doc.go
generated
vendored
@@ -1,23 +0,0 @@
|
||||
/*
|
||||
Copyright 2018 The Knative Authors
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Api versions allow the api contract for a resource to be changed while keeping
|
||||
// backward compatibility by support multiple concurrent versions
|
||||
// of the same resource
|
||||
|
||||
// +k8s:deepcopy-gen=package
|
||||
// +groupName=duck.knative.dev
|
||||
package v1alpha1
|
||||
76
vendor/github.com/knative/pkg/apis/duck/v1alpha1/generational_types.go
generated
vendored
@@ -1,76 +0,0 @@
|
||||
/*
|
||||
Copyright 2018 The Knative Authors
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package v1alpha1
|
||||
|
||||
import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
"github.com/knative/pkg/apis/duck"
|
||||
)
|
||||
|
||||
// Generation is the schema for the generational portion of the payload
|
||||
type Generation int64
|
||||
|
||||
// Implementations can verify that they implement Generation via:
|
||||
var emptyGen Generation
|
||||
var _ = duck.VerifyType(&Generational{}, &emptyGen)
|
||||
|
||||
// Generation is an Implementable "duck type".
|
||||
var _ duck.Implementable = (*Generation)(nil)
|
||||
|
||||
// +genclient
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
// Generational is a skeleton type wrapping Generation in the manner we expect
|
||||
// resource writers defining compatible resources to embed it. We will
|
||||
// typically use this type to deserialize Generation ObjectReferences and
|
||||
// access the Generation data. This is not a real resource.
|
||||
type Generational struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ObjectMeta `json:"metadata,omitempty"`
|
||||
|
||||
Spec GenerationalSpec `json:"spec"`
|
||||
}
|
||||
|
||||
// GenerationalSpec shows how we expect folks to embed Generation in
|
||||
// their Spec field.
|
||||
type GenerationalSpec struct {
|
||||
Generation Generation `json:"generation,omitempty"`
|
||||
}
|
||||
|
||||
// In order for Generation to be Implementable, Generational must be Populatable.
|
||||
var _ duck.Populatable = (*Generational)(nil)
|
||||
|
||||
// GetFullType implements duck.Implementable
|
||||
func (_ *Generation) GetFullType() duck.Populatable {
|
||||
return &Generational{}
|
||||
}
|
||||
|
||||
// Populate implements duck.Populatable
|
||||
func (t *Generational) Populate() {
|
||||
t.Spec.Generation = 1234
|
||||
}
|
||||
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
// GenerationalList is a list of Generational resources
|
||||
type GenerationalList struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ListMeta `json:"metadata"`
|
||||
|
||||
Items []Generational `json:"items"`
|
||||
}
|
||||
53
vendor/github.com/knative/pkg/apis/duck/v1alpha1/register.go
generated
vendored
@@ -1,53 +0,0 @@
|
||||
/*
|
||||
Copyright 2018 The Knative Authors
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package v1alpha1
|
||||
|
||||
import (
|
||||
"github.com/knative/pkg/apis/duck"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
)
|
||||
|
||||
// SchemeGroupVersion is group version used to register these objects
|
||||
var SchemeGroupVersion = schema.GroupVersion{Group: duck.GroupName, Version: "v1alpha1"}
|
||||
|
||||
// Kind takes an unqualified kind and returns back a Group qualified GroupKind
|
||||
func Kind(kind string) schema.GroupKind {
|
||||
return SchemeGroupVersion.WithKind(kind).GroupKind()
|
||||
}
|
||||
|
||||
// Resource takes an unqualified resource and returns a Group qualified GroupResource
|
||||
func Resource(resource string) schema.GroupResource {
|
||||
return SchemeGroupVersion.WithResource(resource).GroupResource()
|
||||
}
|
||||
|
||||
var (
|
||||
SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes)
|
||||
AddToScheme = SchemeBuilder.AddToScheme
|
||||
)
|
||||
|
||||
// Adds the list of known types to Scheme.
|
||||
func addKnownTypes(scheme *runtime.Scheme) error {
|
||||
scheme.AddKnownTypes(
|
||||
SchemeGroupVersion,
|
||||
// &VirtualService{},
|
||||
// &VirtualServiceList{},
|
||||
)
|
||||
metav1.AddToGroupVersion(scheme, SchemeGroupVersion)
|
||||
return nil
|
||||
}
|
||||
81
vendor/github.com/knative/pkg/apis/duck/v1alpha1/subscribable_types.go
generated
vendored
@@ -1,81 +0,0 @@
|
||||
/*
|
||||
Copyright 2018 The Knative Authors
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package v1alpha1
|
||||
|
||||
import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
"github.com/knative/pkg/apis/duck"
|
||||
)
|
||||
|
||||
// Subscribable is the schema for the subscribable portion of the payload
|
||||
type Subscribable struct {
|
||||
// TODO(vaikas): Give me a schema!
|
||||
Field string `json:"field,omitempty"`
|
||||
}
|
||||
|
||||
// Implementations can verify that they implement Subscribable via:
|
||||
var _ = duck.VerifyType(&Topic{}, &Subscribable{})
|
||||
|
||||
// Subscribable is an Implementable "duck type".
|
||||
var _ duck.Implementable = (*Subscribable)(nil)
|
||||
|
||||
// +genclient
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
// Topic is a skeleton type wrapping Subscribable in the manner we expect
|
||||
// resource writers defining compatible resources to embed it. We will
|
||||
// typically use this type to deserialize Subscribable ObjectReferences and
|
||||
// access the Subscribable data. This is not a real resource.
|
||||
type Topic struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ObjectMeta `json:"metadata,omitempty"`
|
||||
|
||||
Status TopicStatus `json:"status"`
|
||||
}
|
||||
|
||||
// TopicStatus shows how we expect folks to embed Subscribable in
|
||||
// their Status field.
|
||||
type TopicStatus struct {
|
||||
Subscribable *Subscribable `json:"subscribable,omitempty"`
|
||||
}
|
||||
|
||||
// In order for Subscribable to be Implementable, Topic must be Populatable.
|
||||
var _ duck.Populatable = (*Topic)(nil)
|
||||
|
||||
// GetFullType implements duck.Implementable
|
||||
func (_ *Subscribable) GetFullType() duck.Populatable {
|
||||
return &Topic{}
|
||||
}
|
||||
|
||||
// Populate implements duck.Populatable
|
||||
func (t *Topic) Populate() {
|
||||
t.Status.Subscribable = &Subscribable{
|
||||
// Populate ALL fields
|
||||
Field: "this is not empty",
|
||||
}
|
||||
}
|
||||
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
// TopicList is a list of Topic resources
|
||||
type TopicList struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ListMeta `json:"metadata"`
|
||||
|
||||
Items []Topic `json:"items"`
|
||||
}
|
||||
81
vendor/github.com/knative/pkg/apis/duck/v1alpha1/targettable_types.go
generated
vendored
@@ -1,81 +0,0 @@
|
||||
/*
|
||||
Copyright 2018 The Knative Authors
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package v1alpha1
|
||||
|
||||
import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
"github.com/knative/pkg/apis/duck"
|
||||
)
|
||||
|
||||
// Targetable is the schema for the targetable portion of the payload
|
||||
type Targetable struct {
|
||||
// TODO(vaikas): Give me a schema!
|
||||
Field string `json:"field,omitempty"`
|
||||
}
|
||||
|
||||
// Implementations can verify that they implement Targetable via:
|
||||
var _ = duck.VerifyType(&Target{}, &Targetable{})
|
||||
|
||||
// Targetable is an Implementable "duck type".
|
||||
var _ duck.Implementable = (*Targetable)(nil)
|
||||
|
||||
// +genclient
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
// Target is a skeleton type wrapping Targetable in the manner we expect
|
||||
// resource writers defining compatible resources to embed it. We will
|
||||
// typically use this type to deserialize Targetable ObjectReferences and
|
||||
// access the Targetable data. This is not a real resource.
|
||||
type Target struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ObjectMeta `json:"metadata,omitempty"`
|
||||
|
||||
Status TargetStatus `json:"status"`
|
||||
}
|
||||
|
||||
// TargetStatus shows how we expect folks to embed Targetable in
|
||||
// their Status field.
|
||||
type TargetStatus struct {
|
||||
Targetable *Targetable `json:"targetable,omitempty"`
|
||||
}
|
||||
|
||||
// In order for Targetable to be Implementable, Target must be Populatable.
|
||||
var _ duck.Populatable = (*Target)(nil)
|
||||
|
||||
// GetFullType implements duck.Implementable
|
||||
func (_ *Targetable) GetFullType() duck.Populatable {
|
||||
return &Target{}
|
||||
}
|
||||
|
||||
// Populate implements duck.Populatable
|
||||
func (t *Target) Populate() {
|
||||
t.Status.Targetable = &Targetable{
|
||||
// Populate ALL fields
|
||||
Field: "this is not empty",
|
||||
}
|
||||
}
|
||||
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
// TargetList is a list of Target resources
|
||||
type TargetList struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ListMeta `json:"metadata"`
|
||||
|
||||
Items []Target `json:"items"`
|
||||
}
|
||||
417
vendor/github.com/knative/pkg/apis/duck/v1alpha1/zz_generated.deepcopy.go
generated
vendored
@@ -1,417 +0,0 @@
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright 2018 The Knative Authors
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Code generated by deepcopy-gen. DO NOT EDIT.
|
||||
|
||||
package v1alpha1
|
||||
|
||||
import (
|
||||
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||
)
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *Condition) DeepCopyInto(out *Condition) {
|
||||
*out = *in
|
||||
in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime)
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Condition.
|
||||
func (in *Condition) DeepCopy() *Condition {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(Condition)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in Conditions) DeepCopyInto(out *Conditions) {
|
||||
{
|
||||
in := &in
|
||||
*out = make(Conditions, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Conditions.
|
||||
func (in Conditions) DeepCopy() Conditions {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(Conditions)
|
||||
in.DeepCopyInto(out)
|
||||
return *out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *Generational) DeepCopyInto(out *Generational) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
|
||||
out.Spec = in.Spec
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Generational.
|
||||
func (in *Generational) DeepCopy() *Generational {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(Generational)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *Generational) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *GenerationalList) DeepCopyInto(out *GenerationalList) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
out.ListMeta = in.ListMeta
|
||||
if in.Items != nil {
|
||||
in, out := &in.Items, &out.Items
|
||||
*out = make([]Generational, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GenerationalList.
|
||||
func (in *GenerationalList) DeepCopy() *GenerationalList {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(GenerationalList)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *GenerationalList) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *GenerationalSpec) DeepCopyInto(out *GenerationalSpec) {
|
||||
*out = *in
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GenerationalSpec.
|
||||
func (in *GenerationalSpec) DeepCopy() *GenerationalSpec {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(GenerationalSpec)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *KResource) DeepCopyInto(out *KResource) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
|
||||
in.Status.DeepCopyInto(&out.Status)
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KResource.
|
||||
func (in *KResource) DeepCopy() *KResource {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(KResource)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *KResource) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *KResourceList) DeepCopyInto(out *KResourceList) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
out.ListMeta = in.ListMeta
|
||||
if in.Items != nil {
|
||||
in, out := &in.Items, &out.Items
|
||||
*out = make([]KResource, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KResourceList.
|
||||
func (in *KResourceList) DeepCopy() *KResourceList {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(KResourceList)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *KResourceList) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *KResourceStatus) DeepCopyInto(out *KResourceStatus) {
|
||||
*out = *in
|
||||
if in.Conditions != nil {
|
||||
in, out := &in.Conditions, &out.Conditions
|
||||
*out = make(Conditions, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KResourceStatus.
|
||||
func (in *KResourceStatus) DeepCopy() *KResourceStatus {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(KResourceStatus)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *Subscribable) DeepCopyInto(out *Subscribable) {
|
||||
*out = *in
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Subscribable.
|
||||
func (in *Subscribable) DeepCopy() *Subscribable {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(Subscribable)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *Target) DeepCopyInto(out *Target) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
|
||||
in.Status.DeepCopyInto(&out.Status)
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Target.
|
||||
func (in *Target) DeepCopy() *Target {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(Target)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *Target) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *TargetList) DeepCopyInto(out *TargetList) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
out.ListMeta = in.ListMeta
|
||||
if in.Items != nil {
|
||||
in, out := &in.Items, &out.Items
|
||||
*out = make([]Target, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TargetList.
|
||||
func (in *TargetList) DeepCopy() *TargetList {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(TargetList)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *TargetList) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *TargetStatus) DeepCopyInto(out *TargetStatus) {
|
||||
*out = *in
|
||||
if in.Targetable != nil {
|
||||
in, out := &in.Targetable, &out.Targetable
|
||||
*out = new(Targetable)
|
||||
**out = **in
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TargetStatus.
|
||||
func (in *TargetStatus) DeepCopy() *TargetStatus {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(TargetStatus)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *Targetable) DeepCopyInto(out *Targetable) {
|
||||
*out = *in
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Targetable.
|
||||
func (in *Targetable) DeepCopy() *Targetable {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(Targetable)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *Topic) DeepCopyInto(out *Topic) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
|
||||
in.Status.DeepCopyInto(&out.Status)
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Topic.
|
||||
func (in *Topic) DeepCopy() *Topic {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(Topic)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *Topic) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *TopicList) DeepCopyInto(out *TopicList) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
out.ListMeta = in.ListMeta
|
||||
if in.Items != nil {
|
||||
in, out := &in.Items, &out.Items
|
||||
*out = make([]Topic, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TopicList.
|
||||
func (in *TopicList) DeepCopy() *TopicList {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(TopicList)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *TopicList) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *TopicStatus) DeepCopyInto(out *TopicStatus) {
|
||||
*out = *in
|
||||
if in.Subscribable != nil {
|
||||
in, out := &in.Subscribable, &out.Subscribable
|
||||
*out = new(Subscribable)
|
||||
**out = **in
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TopicStatus.
|
||||
func (in *TopicStatus) DeepCopy() *TopicStatus {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(TopicStatus)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
86
vendor/github.com/knative/pkg/apis/duck/verify.go
generated
vendored
@@ -1,86 +0,0 @@
|
||||
/*
|
||||
Copyright 2018 The Knative Authors
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package duck
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
)
|
||||
|
||||
// Implementable in implemented by the Fooable duck type that consumers
|
||||
// are expected to embed as a `.status.fooable` field.
|
||||
type Implementable interface {
|
||||
// GetFullType returns an instance of a full resource wrapping
|
||||
// an instance of this Implementable that can populate its fields
|
||||
// to verify json roundtripping.
|
||||
GetFullType() Populatable
|
||||
}
|
||||
|
||||
// Populatable is implemented by a skeleton resource wrapping an Implementable
|
||||
// duck type. It will generally have TypeMeta, ObjectMeta, and a Status field
|
||||
// wrapping a Fooable field.
|
||||
type Populatable interface {
|
||||
// Populate fills in all possible fields, so that we can verify that
|
||||
// they roundtrip properly through JSON.
|
||||
Populate()
|
||||
}
|
||||
|
||||
// VerifyType verifies that a particular concrete resource properly implements
|
||||
// the provided Implementable duck type. It is expected that under the resource
|
||||
// definition implementing a particular "Fooable" that one would write:
|
||||
//
|
||||
// type ConcreteResource struct { ... }
|
||||
//
|
||||
// // Check that ConcreteResource properly implement Fooable.
|
||||
// var _ = duck.VerifyType(&ConcreteResource{}, &something.Fooable{})
|
||||
//
|
||||
// This will panic on startup if the duck typing is not satisfied. The return
|
||||
// value is purely cosmetic to enable the `var _ = ...` shorthand.
|
||||
func VerifyType(instance interface{}, iface Implementable) (nothing interface{}) {
|
||||
// Create instances of the full resource for our input and ultimate result
|
||||
// that we will compare at the end.
|
||||
input, output := iface.GetFullType(), iface.GetFullType()
|
||||
|
||||
// Populate our input resource with values we will roundtrip.
|
||||
input.Populate()
|
||||
|
||||
// Serialize the input to JSON and deserialize that into the provided instance
|
||||
// of the type that we are checking.
|
||||
if before, err := json.Marshal(input); err != nil {
|
||||
panic(fmt.Sprintf("Error serializing duck type %T", input))
|
||||
} else if err := json.Unmarshal(before, instance); err != nil {
|
||||
panic(fmt.Sprintf("Error deserializing duck type %T into %T", input, instance))
|
||||
}
|
||||
|
||||
// Serialize the instance we are checking to JSON and deserialize that into the
|
||||
// output resource.
|
||||
if after, err := json.Marshal(instance); err != nil {
|
||||
panic(fmt.Sprintf("Error serializing %T", instance))
|
||||
} else if err := json.Unmarshal(after, output); err != nil {
|
||||
panic(fmt.Sprintf("Error deserializing %T into dock type %T", instance, output))
|
||||
}
|
||||
|
||||
// Now verify that we were able to roundtrip all of our fields through the type
|
||||
// we are checking.
|
||||
if diff := cmp.Diff(input, output); diff != "" {
|
||||
panic(fmt.Sprintf("%T does not implement the duck type %T, the following fields were lost: %s",
|
||||
instance, iface, diff))
|
||||
}
|
||||
return
|
||||
}
|
||||
186
vendor/github.com/knative/pkg/apis/field_error.go
generated
vendored
@@ -1,186 +0,0 @@
|
||||
/*
|
||||
Copyright 2017 The Knative Authors
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package apis
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// CurrentField is a constant to supply as a fieldPath for when there is
|
||||
// a problem with the current field itself.
|
||||
const CurrentField = ""
|
||||
|
||||
// FieldError is used to propagate the context of errors pertaining to
|
||||
// specific fields in a manner suitable for use in a recursive walk, so
|
||||
// that errors contain the appropriate field context.
|
||||
// +k8s:deepcopy-gen=false
|
||||
type FieldError struct {
|
||||
Message string
|
||||
Paths []string
|
||||
// Details contains an optional longer payload.
|
||||
Details string
|
||||
}
|
||||
|
||||
// FieldError implements error
|
||||
var _ error = (*FieldError)(nil)
|
||||
|
||||
// ViaField is used to propagate a validation error along a field access.
|
||||
// For example, if a type recursively validates its "spec" via:
|
||||
// if err := foo.Spec.Validate(); err != nil {
|
||||
// // Augment any field paths with the context that they were accessed
|
||||
// // via "spec".
|
||||
// return err.ViaField("spec")
|
||||
// }
|
||||
func (fe *FieldError) ViaField(prefix ...string) *FieldError {
|
||||
if fe == nil {
|
||||
return nil
|
||||
}
|
||||
var newPaths []string
|
||||
for _, oldPath := range fe.Paths {
|
||||
newPaths = append(newPaths, flatten(append(prefix, oldPath)))
|
||||
}
|
||||
fe.Paths = newPaths
|
||||
return fe
|
||||
}
|
||||
|
||||
// ViaIndex is used to attach an index to the next ViaField provided.
|
||||
// For example, if a type recursively validates a parameter that has a collection:
|
||||
// for i, c := range spec.Collection {
|
||||
// if err := doValidation(c); err != nil {
|
||||
// return err.ViaIndex(i).ViaField("collection")
|
||||
// }
|
||||
// }
|
||||
func (fe *FieldError) ViaIndex(index int) *FieldError {
|
||||
if fe == nil {
|
||||
return nil
|
||||
}
|
||||
return fe.ViaField(fmt.Sprintf("[%d]", index))
|
||||
}
|
||||
|
||||
// ViaFieldIndex is the short way to chain: err.ViaIndex(bar).ViaField(foo)
|
||||
func (fe *FieldError) ViaFieldIndex(field string, index int) *FieldError {
|
||||
return fe.ViaIndex(index).ViaField(field)
|
||||
}
|
||||
|
||||
// ViaKey is used to attach a key to the next ViaField provided.
|
||||
// For example, if a type recursively validates a parameter that has a collection:
|
||||
// for k, v := range spec.Bag. {
|
||||
// if err := doValidation(v); err != nil {
|
||||
// return err.ViaKey(k).ViaField("bag")
|
||||
// }
|
||||
// }
|
||||
func (fe *FieldError) ViaKey(key string) *FieldError {
|
||||
if fe == nil {
|
||||
return nil
|
||||
}
|
||||
return fe.ViaField(fmt.Sprintf("[%s]", key))
|
||||
}
|
||||
|
||||
// ViaFieldKey is the short way to chain: err.ViaKey(bar).ViaField(foo)
|
||||
func (fe *FieldError) ViaFieldKey(field string, key string) *FieldError {
|
||||
return fe.ViaKey(key).ViaField(field)
|
||||
}
|
||||
|
||||
// flatten takes in a array of path components and looks for chances to flatten
|
||||
// objects that have index prefixes, examples:
|
||||
// err([0]).ViaField(bar).ViaField(foo) -> foo.bar.[0] converts to foo.bar[0]
|
||||
// err(bar).ViaIndex(0).ViaField(foo) -> foo.[0].bar converts to foo[0].bar
|
||||
// err(bar).ViaField(foo).ViaIndex(0) -> [0].foo.bar converts to [0].foo.bar
|
||||
// err(bar).ViaIndex(0).ViaIndex[1].ViaField(foo) -> foo.[1].[0].bar converts to foo[1][0].bar
|
||||
func flatten(path []string) string {
|
||||
var newPath []string
|
||||
for _, part := range path {
|
||||
for _, p := range strings.Split(part, ".") {
|
||||
if p == CurrentField {
|
||||
continue
|
||||
} else if len(newPath) > 0 && isIndex(p) {
|
||||
newPath[len(newPath)-1] = fmt.Sprintf("%s%s", newPath[len(newPath)-1], p)
|
||||
} else {
|
||||
newPath = append(newPath, p)
|
||||
}
|
||||
}
|
||||
}
|
||||
return strings.Join(newPath, ".")
|
||||
}
|
||||
|
||||
func isIndex(part string) bool {
|
||||
return strings.HasPrefix(part, "[") && strings.HasSuffix(part, "]")
|
||||
}
|
||||
|
||||
// Error implements error
|
||||
func (fe *FieldError) Error() string {
|
||||
if fe.Details == "" {
|
||||
return fmt.Sprintf("%v: %v", fe.Message, strings.Join(fe.Paths, ", "))
|
||||
}
|
||||
return fmt.Sprintf("%v: %v\n%v", fe.Message, strings.Join(fe.Paths, ", "), fe.Details)
|
||||
}
|
||||
|
||||
// ErrMissingField is a variadic helper method for constructing a FieldError for
|
||||
// a set of missing fields.
|
||||
func ErrMissingField(fieldPaths ...string) *FieldError {
|
||||
return &FieldError{
|
||||
Message: "missing field(s)",
|
||||
Paths: fieldPaths,
|
||||
}
|
||||
}
|
||||
|
||||
// ErrDisallowedFields is a variadic helper method for constructing a FieldError
|
||||
// for a set of disallowed fields.
|
||||
func ErrDisallowedFields(fieldPaths ...string) *FieldError {
|
||||
return &FieldError{
|
||||
Message: "must not set the field(s)",
|
||||
Paths: fieldPaths,
|
||||
}
|
||||
}
|
||||
|
||||
// ErrInvalidValue constructs a FieldError for a field that has received an
|
||||
// invalid string value.
|
||||
func ErrInvalidValue(value, fieldPath string) *FieldError {
|
||||
return &FieldError{
|
||||
Message: fmt.Sprintf("invalid value %q", value),
|
||||
Paths: []string{fieldPath},
|
||||
}
|
||||
}
|
||||
|
||||
// ErrMissingOneOf is a variadic helper method for constructing a FieldError for
|
||||
// not having at least one field in a mutually exclusive field group.
|
||||
func ErrMissingOneOf(fieldPaths ...string) *FieldError {
|
||||
return &FieldError{
|
||||
Message: "expected exactly one, got neither",
|
||||
Paths: fieldPaths,
|
||||
}
|
||||
}
|
||||
|
||||
// ErrMultipleOneOf is a variadic helper method for constructing a FieldError
|
||||
// for having more than one field set in a mutually exclusive field group.
|
||||
func ErrMultipleOneOf(fieldPaths ...string) *FieldError {
|
||||
return &FieldError{
|
||||
Message: "expected exactly one, got both",
|
||||
Paths: fieldPaths,
|
||||
}
|
||||
}
|
||||
|
||||
// ErrInvalidKeyName is a variadic helper method for constructing a
|
||||
// FieldError that specifies a key name that is invalid.
|
||||
func ErrInvalidKeyName(value, fieldPath string, details ...string) *FieldError {
|
||||
return &FieldError{
|
||||
Message: fmt.Sprintf("invalid key name %q", value),
|
||||
Paths: []string{fieldPath},
|
||||
Details: strings.Join(details, ", "),
|
||||
}
|
||||
}
|
||||
37
vendor/github.com/knative/pkg/apis/interfaces.go
generated
vendored
@@ -1,37 +0,0 @@
|
||||
/*
|
||||
Copyright 2018 The Knative Authors
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package apis
|
||||
|
||||
// Defaultable defines an interface for setting the defaults for the
|
||||
// uninitialized fields of this instance.
|
||||
type Defaultable interface {
|
||||
SetDefaults()
|
||||
}
|
||||
|
||||
// Validatable indicates that a particular type may have its fields validated.
|
||||
type Validatable interface {
|
||||
// Validate checks the validity of this types fields.
|
||||
Validate() *FieldError
|
||||
}
|
||||
|
||||
// Immutable indicates that a particular type has fields that should
|
||||
// not change after creation.
|
||||
type Immutable interface {
|
||||
// CheckImmutableFields checks that the current instance's immutable
|
||||
// fields haven't changed from the provided original.
|
||||
CheckImmutableFields(original Immutable) *FieldError
|
||||
}
|
||||
42
vendor/github.com/knative/pkg/apis/istio/authentication/v1alpha1/policy_types.go
generated
vendored
@@ -17,8 +17,8 @@ limitations under the License.
|
||||
package v1alpha1
|
||||
|
||||
import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"github.com/knative/pkg/apis/istio/common/v1alpha1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
// +genclient
|
||||
@@ -127,7 +127,7 @@ type PolicySpec struct {
|
||||
// List rules to select destinations that the policy should be applied on.
|
||||
// If empty, policy will be used on all destinations in the same namespace.
|
||||
Targets []TargetSelector `json:"targets,omitempty"`
|
||||
|
||||
|
||||
// List of authentication methods that can be used for peer authentication.
|
||||
// They will be evaluated in order; the first validate one will be used to
|
||||
// set peer identity (source.user) and other peer attributes. If none of
|
||||
@@ -135,14 +135,14 @@ type PolicySpec struct {
|
||||
// request will be rejected with authentication failed error (401).
|
||||
// Leave the list empty if peer authentication is not required
|
||||
Peers []PeerAuthenticationMethod `json:"peers,omitempty"`
|
||||
|
||||
|
||||
// Set this flag to true to accept request (for peer authentication perspective),
|
||||
// even when none of the peer authentication methods defined above satisfied.
|
||||
// Typically, this is used to delay the rejection decision to next layer (e.g
|
||||
// authorization).
|
||||
// This flag is ignored if no authentication defined for peer (peers field is empty).
|
||||
PeerIsOptional bool `json:"peerIsOptional,omitempty"`
|
||||
|
||||
|
||||
// List of authentication methods that can be used for origin authentication.
|
||||
// Similar to peers, these will be evaluated in order; the first validate one
|
||||
// will be used to set origin identity and attributes (i.e request.auth.user,
|
||||
@@ -151,17 +151,17 @@ type PolicySpec struct {
|
||||
// error (401).
|
||||
// Leave the list empty if origin authentication is not required.
|
||||
Origins []OriginAuthenticationMethod `json:"origins,omitempty"`
|
||||
|
||||
|
||||
// Set this flag to true to accept request (for origin authentication perspective),
|
||||
// even when none of the origin authentication methods defined above satisfied.
|
||||
// Typically, this is used to delay the rejection decision to next layer (e.g
|
||||
// authorization).
|
||||
// This flag is ignored if no authentication defined for origin (origins field is empty).
|
||||
OriginIsOptional bool `json:"originIsOptional,omitempty"`
|
||||
|
||||
|
||||
// Define whether peer or origin identity should be use for principal. Default
|
||||
// value is USE_PEER.
|
||||
// If peer (or orgin) identity is not available, either because of peer/origin
|
||||
// If peer (or origin) identity is not available, either because of peer/origin
|
||||
// authentication is not defined, or failed, principal will be left unset.
|
||||
// In other words, binding rule does not affect the decision to accept or
|
||||
// reject request.
|
||||
@@ -173,7 +173,7 @@ type TargetSelector struct {
|
||||
// REQUIRED. The name must be a short name from the service registry. The
|
||||
// fully qualified domain name will be resolved in a platform specific manner.
|
||||
Name string `json:"name"`
|
||||
|
||||
|
||||
// Specifies the ports on the destination. Leave empty to match all ports
|
||||
// that are exposed.
|
||||
Ports []PortSelector `json:"ports,omitempty"`
|
||||
@@ -183,12 +183,12 @@ type TargetSelector struct {
|
||||
// matching targets for authenticationn policy. This is copied from
|
||||
// networking API to avoid dependency.
|
||||
type PortSelector struct {
|
||||
// It is requred to specify exactly one of the fields:
|
||||
// It is required to specify exactly one of the fields:
|
||||
// Number or Name
|
||||
|
||||
// Valid port number
|
||||
Number uint32 `json:"number,omitempty"`
|
||||
|
||||
|
||||
// Port name
|
||||
Name string `json:"name,omitempty"`
|
||||
}
|
||||
@@ -199,11 +199,11 @@ type PortSelector struct {
|
||||
// The type can be progammatically determine by checking the type of the
|
||||
// "params" field.
|
||||
type PeerAuthenticationMethod struct {
|
||||
// It is requred to specify exactly one of the fields:
|
||||
// It is required to specify exactly one of the fields:
|
||||
// Mtls or Jwt
|
||||
// Set if mTLS is used.
|
||||
Mtls *MutualTls `json:"mtls,omitempty"`
|
||||
|
||||
|
||||
// Set if JWT is used. This option is not yet available.
|
||||
Jwt *Jwt `json:"jwt,omitempty"`
|
||||
}
|
||||
@@ -214,7 +214,7 @@ type Mode string
|
||||
const (
|
||||
// Client cert must be presented, connection is in TLS.
|
||||
ModeStrict Mode = "STRICT"
|
||||
|
||||
|
||||
// Connection can be either plaintext or TLS, and client cert can be omitted.
|
||||
ModePermissive Mode = "PERMISSIVE"
|
||||
)
|
||||
@@ -229,7 +229,7 @@ type MutualTls struct {
|
||||
// be left unset.
|
||||
// When the flag is false (default), request must have client certificate.
|
||||
AllowTls bool `json:"allowTls,omitempty"`
|
||||
|
||||
|
||||
// Defines the mode of mTLS authentication.
|
||||
Mode Mode `json:"mode,omitempty"`
|
||||
}
|
||||
@@ -256,7 +256,7 @@ type Jwt struct {
|
||||
// Example: https://securetoken.google.com
|
||||
// Example: 1234567-compute@developer.gserviceaccount.com
|
||||
Issuer string `json:"issuer,omitempty"`
|
||||
|
||||
|
||||
// The list of JWT
|
||||
// [audiences](https://tools.ietf.org/html/rfc7519#section-4.1.3).
|
||||
// that are allowed to access. A JWT containing any of these
|
||||
@@ -272,7 +272,7 @@ type Jwt struct {
|
||||
// bookstore_web.apps.googleusercontent.com
|
||||
// ```
|
||||
Audiences []string `json:"audiences,omitempty"`
|
||||
|
||||
|
||||
// URL of the provider's public key set to validate signature of the
|
||||
// JWT. See [OpenID
|
||||
// Discovery](https://openid.net/specs/openid-connect-discovery-1_0.html#ProviderMetadata).
|
||||
@@ -285,7 +285,7 @@ type Jwt struct {
|
||||
//
|
||||
// Example: https://www.googleapis.com/oauth2/v1/certs
|
||||
JwksUri string `json:"jwksUri,omitempty"`
|
||||
|
||||
|
||||
// Two fields below define where to extract the JWT from an HTTP request.
|
||||
//
|
||||
// If no explicit location is specified the following default
|
||||
@@ -304,7 +304,7 @@ type Jwt struct {
|
||||
// For example, if `header=x-goog-iap-jwt-assertion`, the header
|
||||
// format will be x-goog-iap-jwt-assertion: <JWT>.
|
||||
JwtHeaders []string `json:"jwtHeaders,omitempty"`
|
||||
|
||||
|
||||
// JWT is sent in a query parameter. `query` represents the
|
||||
// query parameter name.
|
||||
//
|
||||
@@ -312,9 +312,9 @@ type Jwt struct {
|
||||
JwtParams []string `json:"jwtParams,omitempty"`
|
||||
|
||||
// URL paths that should be excluded from the JWT validation. If the request path is matched,
|
||||
// the JWT validation will be skipped and the request will proceed regardless.
|
||||
// This is useful to keep a couple of URLs public for external health checks.
|
||||
// Example: "/health_check", "/status/cpu_usage".
|
||||
// the JWT validation will be skipped and the request will proceed regardless.
|
||||
// This is useful to keep a couple of URLs public for external health checks.
|
||||
// Example: "/health_check", "/status/cpu_usage".
|
||||
ExcludedPaths []v1alpha1.StringMatch `json:"excludedPaths,omitempty"`
|
||||
}
|
||||
|
||||
|
||||
18
vendor/github.com/knative/pkg/apis/istio/common/v1alpha1/string.go
generated
vendored
@@ -19,17 +19,17 @@ package v1alpha1
|
||||
// Describes how to match a given string in HTTP headers. Match is
|
||||
// case-sensitive.
|
||||
type StringMatch struct {
|
||||
// Specified exactly one of the fields below.
|
||||
// Specified exactly one of the fields below.
|
||||
|
||||
// exact string match
|
||||
Exact string `json:"exact,omitempty"`
|
||||
// exact string match
|
||||
Exact string `json:"exact,omitempty"`
|
||||
|
||||
// prefix-based match
|
||||
Prefix string `json:"prefix,omitempty"`
|
||||
// prefix-based match
|
||||
Prefix string `json:"prefix,omitempty"`
|
||||
|
||||
// suffix-based match.
|
||||
Suffix string `json:"prefix,omitempty"`
|
||||
// suffix-based match.
|
||||
Suffix string `json:"suffix,omitempty"`
|
||||
|
||||
// ECMAscript style regex-based match
|
||||
Regex string `json:"regex,omitempty"`
|
||||
// ECMAscript style regex-based match
|
||||
Regex string `json:"regex,omitempty"`
|
||||
}
|
||||
|
||||
68
vendor/github.com/knative/pkg/apis/istio/v1alpha3/destinationrule_types.go
generated
vendored
@@ -117,11 +117,11 @@ type DestinationRuleSpec struct {
|
||||
//
|
||||
// Note that the host field applies to both HTTP and TCP services.
|
||||
Host string `json:"host"`
|
||||
|
||||
|
||||
// Traffic policies to apply (load balancing policy, connection pool
|
||||
// sizes, outlier detection).
|
||||
TrafficPolicy *TrafficPolicy `json:"trafficPolicy,omitempty"`
|
||||
|
||||
|
||||
// One or more named sets that represent individual versions of a
|
||||
// service. Traffic policies can be overridden at subset level.
|
||||
Subsets []Subset `json:"subsets,omitempty"`
|
||||
@@ -133,16 +133,16 @@ type TrafficPolicy struct {
|
||||
|
||||
// Settings controlling the load balancer algorithms.
|
||||
LoadBalancer *LoadBalancerSettings `json:"loadBalancer,omitempty"`
|
||||
|
||||
|
||||
// Settings controlling the volume of connections to an upstream service
|
||||
ConnectionPool *ConnectionPoolSettings `json:"connectionPool,omitempty"`
|
||||
|
||||
|
||||
// Settings controlling eviction of unhealthy hosts from the load balancing pool
|
||||
OutlierDetection *OutlierDetection `json:"outlierDetection,omitempty"`
|
||||
|
||||
|
||||
// TLS related settings for connections to the upstream service.
|
||||
Tls *TLSSettings `json:"tls,omitempty"`
|
||||
|
||||
|
||||
// Traffic policies specific to individual ports. Note that port level
|
||||
// settings will override the destination-level settings. Traffic
|
||||
// settings specified at the destination-level will not be inherited when
|
||||
@@ -161,13 +161,13 @@ type PortTrafficPolicy struct {
|
||||
// the same protocol the names should be of the form <protocol-name>-<DNS
|
||||
// label>.
|
||||
Port PortSelector `json:"port"`
|
||||
|
||||
|
||||
// Settings controlling the load balancer algorithms.
|
||||
LoadBalancer *LoadBalancerSettings `json:"loadBalancer,omitempty"`
|
||||
|
||||
|
||||
// Settings controlling the volume of connections to an upstream service
|
||||
ConnectionPool *ConnectionPoolSettings `json:"connectionPool,omitempty"`
|
||||
|
||||
|
||||
// Settings controlling eviction of unhealthy hosts from the load balancing pool
|
||||
OutlierDetection *OutlierDetection `json:"outlierDetection,omitempty"`
|
||||
|
||||
@@ -206,12 +206,12 @@ type PortTrafficPolicy struct {
|
||||
type Subset struct {
|
||||
// REQUIRED. Name of the subset. The service name and the subset name can
|
||||
// be used for traffic splitting in a route rule.
|
||||
Name string `json:"port"`
|
||||
|
||||
Name string `json:"name"`
|
||||
|
||||
// REQUIRED. Labels apply a filter over the endpoints of a service in the
|
||||
// service registry. See route rules for examples of usage.
|
||||
Labels map[string]string `json:"labels"`
|
||||
|
||||
|
||||
// Traffic policies that apply to this subset. Subsets inherit the
|
||||
// traffic policies specified at the DestinationRule level. Settings
|
||||
// specified at the subset level will override the corresponding settings
|
||||
@@ -254,7 +254,7 @@ type Subset struct {
|
||||
// name: user
|
||||
// ttl: 0s
|
||||
type LoadBalancerSettings struct {
|
||||
// It is requred to specify exactly one of the fields:
|
||||
// It is required to specify exactly one of the fields:
|
||||
// Simple or ConsistentHash
|
||||
Simple SimpleLB `json:"simple,omitempty"`
|
||||
ConsistentHash *ConsistentHashLB `json:"consistentHash,omitempty"`
|
||||
@@ -266,17 +266,17 @@ type SimpleLB string
|
||||
const (
|
||||
// Round Robin policy. Default
|
||||
SimpleLBRoundRobin SimpleLB = "ROUND_ROBIN"
|
||||
|
||||
|
||||
// The least request load balancer uses an O(1) algorithm which selects
|
||||
// two random healthy hosts and picks the host which has fewer active
|
||||
// requests.
|
||||
SimpleLBLeastConn SimpleLB = "LEAST_CONN"
|
||||
|
||||
|
||||
// The random load balancer selects a random healthy host. The random
|
||||
// load balancer generally performs better than round robin if no health
|
||||
// checking policy is configured.
|
||||
SimpleLBRandom SimpleLB = "RANDOM"
|
||||
|
||||
|
||||
// This option will forward the connection to the original IP address
|
||||
// requested by the caller without doing any form of load
|
||||
// balancing. This option must be used with care. It is meant for
|
||||
@@ -293,17 +293,17 @@ const (
|
||||
// service.
|
||||
type ConsistentHashLB struct {
|
||||
|
||||
// It is requred to specify exactly one of the fields as hash key:
|
||||
// It is required to specify exactly one of the fields as hash key:
|
||||
// HttpHeaderName, HttpCookie, or UseSourceIP.
|
||||
// Hash based on a specific HTTP header.
|
||||
HttpHeaderName string `json:"httpHeaderName,omitempty"`
|
||||
|
||||
|
||||
// Hash based on HTTP cookie.
|
||||
HttpCookie *HTTPCookie `json:"httpCookie,omitempty"`
|
||||
|
||||
|
||||
// Hash based on the source IP address.
|
||||
UseSourceIp bool `json:"useSourceIp,omitempty"`
|
||||
|
||||
|
||||
// The minimum number of virtual nodes to use for the hash
|
||||
// ring. Defaults to 1024. Larger ring sizes result in more granular
|
||||
// load distributions. If the number of hosts in the load balancing
|
||||
@@ -359,7 +359,7 @@ type ConnectionPoolSettings struct {
|
||||
type TCPSettings struct {
|
||||
// Maximum number of HTTP1 /TCP connections to a destination host.
|
||||
MaxConnections int32 `json:"maxConnections,omitempty"`
|
||||
|
||||
|
||||
// TCP connection timeout.
|
||||
ConnectTimeout string `json:"connectTimeout,omitempty"`
|
||||
}
|
||||
@@ -368,14 +368,14 @@ type TCPSettings struct {
|
||||
type HTTPSettings struct {
|
||||
// Maximum number of pending HTTP requests to a destination. Default 1024.
|
||||
Http1MaxPendingRequests int32 `json:"http1MaxPendingRequests,omitempty"`
|
||||
|
||||
|
||||
// Maximum number of requests to a backend. Default 1024.
|
||||
Http2MaxRequests int32 `json:"http2MaxRequests,omitempty"`
|
||||
|
||||
|
||||
// Maximum number of requests per connection to a backend. Setting this
|
||||
// parameter to 1 disables keep alive.
|
||||
MaxRequestsPerConnection int32 `json:"maxRequestsPerConnection,omitempty"`
|
||||
|
||||
|
||||
// Maximum number of retries that can be outstanding to all hosts in a
|
||||
// cluster at a given time. Defaults to 3.
|
||||
MaxRetries int32 `json:"maxRetries,omitempty"`
|
||||
@@ -421,18 +421,18 @@ type OutlierDetection struct {
|
||||
// accessed over an opaque TCP connection, connect timeouts and
|
||||
// connection error/failure events qualify as an error.
|
||||
ConsecutiveErrors int32 `json:"consecutiveErrors,omitempty"`
|
||||
|
||||
|
||||
// Time interval between ejection sweep analysis. format:
|
||||
// 1h/1m/1s/1ms. MUST BE >=1ms. Default is 10s.
|
||||
Interval string `json:"interval,omitempty"`
|
||||
|
||||
|
||||
// Minimum ejection duration. A host will remain ejected for a period
|
||||
// equal to the product of minimum ejection duration and the number of
|
||||
// times the host has been ejected. This technique allows the system to
|
||||
// automatically increase the ejection period for unhealthy upstream
|
||||
// servers. format: 1h/1m/1s/1ms. MUST BE >=1ms. Default is 30s.
|
||||
BaseEjectionTime string `json:"baseEjectionTime,omitempty"`
|
||||
|
||||
|
||||
// Maximum % of hosts in the load balancing pool for the upstream
|
||||
// service that can be ejected. Defaults to 10%.
|
||||
MaxEjectionPercent int32 `json:"maxEjectionPercent,omitempty"`
|
||||
@@ -488,29 +488,29 @@ type TLSSettings struct {
|
||||
// REQUIRED: Indicates whether connections to this port should be secured
|
||||
// using TLS. The value of this field determines how TLS is enforced.
|
||||
Mode TLSmode `json:"mode"`
|
||||
|
||||
|
||||
// REQUIRED if mode is `MUTUAL`. The path to the file holding the
|
||||
// client-side TLS certificate to use.
|
||||
// Should be empty if mode is `ISTIO_MUTUAL`.
|
||||
ClientCertificate string `json:"clientCertificate,omitempty"`
|
||||
|
||||
|
||||
// REQUIRED if mode is `MUTUAL`. The path to the file holding the
|
||||
// client's private key.
|
||||
// Should be empty if mode is `ISTIO_MUTUAL`.
|
||||
PrivateKey string `json:"privateKey,omitempty"`
|
||||
|
||||
|
||||
// OPTIONAL: The path to the file containing certificate authority
|
||||
// certificates to use in verifying a presented server certificate. If
|
||||
// omitted, the proxy will not verify the server's certificate.
|
||||
// Should be empty if mode is `ISTIO_MUTUAL`.
|
||||
CaCertificates string `json:"caCertificates,omitempty"`
|
||||
|
||||
|
||||
// A list of alternate names to verify the subject identity in the
|
||||
// certificate. If specified, the proxy will verify that the server
|
||||
// certificate's subject alt name matches one of the specified values.
|
||||
// Should be empty if mode is `ISTIO_MUTUAL`.
|
||||
SubjectAltNames []string `json:"subjectAltNames,omitempty"`
|
||||
|
||||
|
||||
// SNI string to present to the server during TLS handshake.
|
||||
// Should be empty if mode is `ISTIO_MUTUAL`.
|
||||
Sni string `json:"sni,omitempty"`
|
||||
@@ -525,11 +525,11 @@ const (
|
||||
|
||||
// Originate a TLS connection to the upstream endpoint.
|
||||
TLSmodeSimple TLSmode = "SIMPLE"
|
||||
|
||||
|
||||
// Secure connections to the upstream using mutual TLS by presenting
|
||||
// client certificates for authentication.
|
||||
TLSmodeMutual TLSmode = "MUTUAL"
|
||||
|
||||
|
||||
// Secure connections to the upstream using mutual TLS by presenting
|
||||
// client certificates for authentication.
|
||||
// Compared to Mutual mode, this mode uses certificates generated
|
||||
|
||||
2
vendor/github.com/knative/pkg/apis/istio/v1alpha3/virtualservice_types.go
generated
vendored
@@ -17,8 +17,8 @@ limitations under the License.
|
||||
package v1alpha3
|
||||
|
||||
import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"github.com/knative/pkg/apis/istio/common/v1alpha1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
// +genclient
|
||||
|
||||
46
vendor/github.com/knative/pkg/apis/volatile_time.go
generated
vendored
@@ -1,46 +0,0 @@
|
||||
/*
|
||||
Copyright 2018 The Knative Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package apis
|
||||
|
||||
import (
|
||||
"k8s.io/apimachinery/pkg/api/equality"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
// VolatileTime wraps metav1.Time
|
||||
type VolatileTime struct {
|
||||
Inner metav1.Time
|
||||
}
|
||||
|
||||
// MarshalJSON implements the json.Marshaler interface.
|
||||
func (t VolatileTime) MarshalJSON() ([]byte, error) {
|
||||
return t.Inner.MarshalJSON()
|
||||
}
|
||||
|
||||
// UnmarshalJSON implements the json.Unmarshaller interface.
|
||||
func (t *VolatileTime) UnmarshalJSON(b []byte) error {
|
||||
return t.Inner.UnmarshalJSON(b)
|
||||
}
|
||||
|
||||
func init() {
|
||||
equality.Semantic.AddFunc(
|
||||
// Always treat VolatileTime fields as equivalent.
|
||||
func(a, b VolatileTime) bool {
|
||||
return true
|
||||
},
|
||||
)
|
||||
}
|
||||
38
vendor/github.com/knative/pkg/apis/zz_generated.deepcopy.go
generated
vendored
@@ -1,38 +0,0 @@
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright 2018 The Knative Authors
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Code generated by deepcopy-gen. DO NOT EDIT.
|
||||
|
||||
package apis
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *VolatileTime) DeepCopyInto(out *VolatileTime) {
|
||||
*out = *in
|
||||
in.Inner.DeepCopyInto(&out.Inner)
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolatileTime.
|
||||
func (in *VolatileTime) DeepCopy() *VolatileTime {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(VolatileTime)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
22
vendor/github.com/knative/pkg/client/clientset/versioned/clientset.go
generated
vendored
@@ -20,7 +20,6 @@ package versioned
|
||||
|
||||
import (
|
||||
authenticationv1alpha1 "github.com/knative/pkg/client/clientset/versioned/typed/authentication/v1alpha1"
|
||||
duckv1alpha1 "github.com/knative/pkg/client/clientset/versioned/typed/duck/v1alpha1"
|
||||
networkingv1alpha3 "github.com/knative/pkg/client/clientset/versioned/typed/istio/v1alpha3"
|
||||
discovery "k8s.io/client-go/discovery"
|
||||
rest "k8s.io/client-go/rest"
|
||||
@@ -32,9 +31,6 @@ type Interface interface {
|
||||
AuthenticationV1alpha1() authenticationv1alpha1.AuthenticationV1alpha1Interface
|
||||
// Deprecated: please explicitly pick a version if possible.
|
||||
Authentication() authenticationv1alpha1.AuthenticationV1alpha1Interface
|
||||
DuckV1alpha1() duckv1alpha1.DuckV1alpha1Interface
|
||||
// Deprecated: please explicitly pick a version if possible.
|
||||
Duck() duckv1alpha1.DuckV1alpha1Interface
|
||||
NetworkingV1alpha3() networkingv1alpha3.NetworkingV1alpha3Interface
|
||||
// Deprecated: please explicitly pick a version if possible.
|
||||
Networking() networkingv1alpha3.NetworkingV1alpha3Interface
|
||||
@@ -45,7 +41,6 @@ type Interface interface {
|
||||
type Clientset struct {
|
||||
*discovery.DiscoveryClient
|
||||
authenticationV1alpha1 *authenticationv1alpha1.AuthenticationV1alpha1Client
|
||||
duckV1alpha1 *duckv1alpha1.DuckV1alpha1Client
|
||||
networkingV1alpha3 *networkingv1alpha3.NetworkingV1alpha3Client
|
||||
}
|
||||
|
||||
@@ -60,17 +55,6 @@ func (c *Clientset) Authentication() authenticationv1alpha1.AuthenticationV1alph
|
||||
return c.authenticationV1alpha1
|
||||
}
|
||||
|
||||
// DuckV1alpha1 retrieves the DuckV1alpha1Client
|
||||
func (c *Clientset) DuckV1alpha1() duckv1alpha1.DuckV1alpha1Interface {
|
||||
return c.duckV1alpha1
|
||||
}
|
||||
|
||||
// Deprecated: Duck retrieves the default version of DuckClient.
|
||||
// Please explicitly pick a version.
|
||||
func (c *Clientset) Duck() duckv1alpha1.DuckV1alpha1Interface {
|
||||
return c.duckV1alpha1
|
||||
}
|
||||
|
||||
// NetworkingV1alpha3 retrieves the NetworkingV1alpha3Client
|
||||
func (c *Clientset) NetworkingV1alpha3() networkingv1alpha3.NetworkingV1alpha3Interface {
|
||||
return c.networkingV1alpha3
|
||||
@@ -102,10 +86,6 @@ func NewForConfig(c *rest.Config) (*Clientset, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cs.duckV1alpha1, err = duckv1alpha1.NewForConfig(&configShallowCopy)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cs.networkingV1alpha3, err = networkingv1alpha3.NewForConfig(&configShallowCopy)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -123,7 +103,6 @@ func NewForConfig(c *rest.Config) (*Clientset, error) {
|
||||
func NewForConfigOrDie(c *rest.Config) *Clientset {
|
||||
var cs Clientset
|
||||
cs.authenticationV1alpha1 = authenticationv1alpha1.NewForConfigOrDie(c)
|
||||
cs.duckV1alpha1 = duckv1alpha1.NewForConfigOrDie(c)
|
||||
cs.networkingV1alpha3 = networkingv1alpha3.NewForConfigOrDie(c)
|
||||
|
||||
cs.DiscoveryClient = discovery.NewDiscoveryClientForConfigOrDie(c)
|
||||
@@ -134,7 +113,6 @@ func NewForConfigOrDie(c *rest.Config) *Clientset {
|
||||
func New(c rest.Interface) *Clientset {
|
||||
var cs Clientset
|
||||
cs.authenticationV1alpha1 = authenticationv1alpha1.New(c)
|
||||
cs.duckV1alpha1 = duckv1alpha1.New(c)
|
||||
cs.networkingV1alpha3 = networkingv1alpha3.New(c)
|
||||
|
||||
cs.DiscoveryClient = discovery.NewDiscoveryClient(c)
|
||||
|
||||
12
vendor/github.com/knative/pkg/client/clientset/versioned/fake/clientset_generated.go
generated
vendored
@@ -22,8 +22,6 @@ import (
|
||||
clientset "github.com/knative/pkg/client/clientset/versioned"
|
||||
authenticationv1alpha1 "github.com/knative/pkg/client/clientset/versioned/typed/authentication/v1alpha1"
|
||||
fakeauthenticationv1alpha1 "github.com/knative/pkg/client/clientset/versioned/typed/authentication/v1alpha1/fake"
|
||||
duckv1alpha1 "github.com/knative/pkg/client/clientset/versioned/typed/duck/v1alpha1"
|
||||
fakeduckv1alpha1 "github.com/knative/pkg/client/clientset/versioned/typed/duck/v1alpha1/fake"
|
||||
networkingv1alpha3 "github.com/knative/pkg/client/clientset/versioned/typed/istio/v1alpha3"
|
||||
fakenetworkingv1alpha3 "github.com/knative/pkg/client/clientset/versioned/typed/istio/v1alpha3/fake"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
@@ -85,16 +83,6 @@ func (c *Clientset) Authentication() authenticationv1alpha1.AuthenticationV1alph
|
||||
return &fakeauthenticationv1alpha1.FakeAuthenticationV1alpha1{Fake: &c.Fake}
|
||||
}
|
||||
|
||||
// DuckV1alpha1 retrieves the DuckV1alpha1Client
|
||||
func (c *Clientset) DuckV1alpha1() duckv1alpha1.DuckV1alpha1Interface {
|
||||
return &fakeduckv1alpha1.FakeDuckV1alpha1{Fake: &c.Fake}
|
||||
}
|
||||
|
||||
// Duck retrieves the DuckV1alpha1Client
|
||||
func (c *Clientset) Duck() duckv1alpha1.DuckV1alpha1Interface {
|
||||
return &fakeduckv1alpha1.FakeDuckV1alpha1{Fake: &c.Fake}
|
||||
}
|
||||
|
||||
// NetworkingV1alpha3 retrieves the NetworkingV1alpha3Client
|
||||
func (c *Clientset) NetworkingV1alpha3() networkingv1alpha3.NetworkingV1alpha3Interface {
|
||||
return &fakenetworkingv1alpha3.FakeNetworkingV1alpha3{Fake: &c.Fake}
|
||||
|
||||
2
vendor/github.com/knative/pkg/client/clientset/versioned/fake/register.go
generated
vendored
@@ -19,7 +19,6 @@ limitations under the License.
|
||||
package fake
|
||||
|
||||
import (
|
||||
duckv1alpha1 "github.com/knative/pkg/apis/duck/v1alpha1"
|
||||
authenticationv1alpha1 "github.com/knative/pkg/apis/istio/authentication/v1alpha1"
|
||||
networkingv1alpha3 "github.com/knative/pkg/apis/istio/v1alpha3"
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
@@ -53,6 +52,5 @@ func init() {
|
||||
// correctly.
|
||||
func AddToScheme(scheme *runtime.Scheme) {
|
||||
authenticationv1alpha1.AddToScheme(scheme)
|
||||
duckv1alpha1.AddToScheme(scheme)
|
||||
networkingv1alpha3.AddToScheme(scheme)
|
||||
}
|
||||
|
||||
2
vendor/github.com/knative/pkg/client/clientset/versioned/scheme/register.go
generated
vendored
@@ -19,7 +19,6 @@ limitations under the License.
|
||||
package scheme
|
||||
|
||||
import (
|
||||
duckv1alpha1 "github.com/knative/pkg/apis/duck/v1alpha1"
|
||||
authenticationv1alpha1 "github.com/knative/pkg/apis/istio/authentication/v1alpha1"
|
||||
networkingv1alpha3 "github.com/knative/pkg/apis/istio/v1alpha3"
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
@@ -53,6 +52,5 @@ func init() {
|
||||
// correctly.
|
||||
func AddToScheme(scheme *runtime.Scheme) {
|
||||
authenticationv1alpha1.AddToScheme(scheme)
|
||||
duckv1alpha1.AddToScheme(scheme)
|
||||
networkingv1alpha3.AddToScheme(scheme)
|
||||
}
|
||||
|
||||
20
vendor/github.com/knative/pkg/client/clientset/versioned/typed/duck/v1alpha1/doc.go
generated
vendored
@@ -1,20 +0,0 @@
|
||||
/*
|
||||
Copyright 2018 The Knative Authors
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Code generated by client-gen. DO NOT EDIT.
|
||||
|
||||
// This package has the automatically generated typed clients.
|
||||
package v1alpha1
|
||||
105
vendor/github.com/knative/pkg/client/clientset/versioned/typed/duck/v1alpha1/duck_client.go
generated
vendored
@@ -1,105 +0,0 @@
|
||||
/*
|
||||
Copyright 2018 The Knative Authors
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Code generated by client-gen. DO NOT EDIT.
|
||||
|
||||
package v1alpha1
|
||||
|
||||
import (
|
||||
v1alpha1 "github.com/knative/pkg/apis/duck/v1alpha1"
|
||||
"github.com/knative/pkg/client/clientset/versioned/scheme"
|
||||
serializer "k8s.io/apimachinery/pkg/runtime/serializer"
|
||||
rest "k8s.io/client-go/rest"
|
||||
)
|
||||
|
||||
type DuckV1alpha1Interface interface {
|
||||
RESTClient() rest.Interface
|
||||
GenerationalsGetter
|
||||
KResourcesGetter
|
||||
TargetsGetter
|
||||
TopicsGetter
|
||||
}
|
||||
|
||||
// DuckV1alpha1Client is used to interact with features provided by the duck.knative.dev group.
|
||||
type DuckV1alpha1Client struct {
|
||||
restClient rest.Interface
|
||||
}
|
||||
|
||||
func (c *DuckV1alpha1Client) Generationals(namespace string) GenerationalInterface {
|
||||
return newGenerationals(c, namespace)
|
||||
}
|
||||
|
||||
func (c *DuckV1alpha1Client) KResources(namespace string) KResourceInterface {
|
||||
return newKResources(c, namespace)
|
||||
}
|
||||
|
||||
func (c *DuckV1alpha1Client) Targets(namespace string) TargetInterface {
|
||||
return newTargets(c, namespace)
|
||||
}
|
||||
|
||||
func (c *DuckV1alpha1Client) Topics(namespace string) TopicInterface {
|
||||
return newTopics(c, namespace)
|
||||
}
|
||||
|
||||
// NewForConfig creates a new DuckV1alpha1Client for the given config.
|
||||
func NewForConfig(c *rest.Config) (*DuckV1alpha1Client, error) {
|
||||
config := *c
|
||||
if err := setConfigDefaults(&config); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
client, err := rest.RESTClientFor(&config)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &DuckV1alpha1Client{client}, nil
|
||||
}
|
||||
|
||||
// NewForConfigOrDie creates a new DuckV1alpha1Client for the given config and
|
||||
// panics if there is an error in the config.
|
||||
func NewForConfigOrDie(c *rest.Config) *DuckV1alpha1Client {
|
||||
client, err := NewForConfig(c)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return client
|
||||
}
|
||||
|
||||
// New creates a new DuckV1alpha1Client for the given RESTClient.
|
||||
func New(c rest.Interface) *DuckV1alpha1Client {
|
||||
return &DuckV1alpha1Client{c}
|
||||
}
|
||||
|
||||
func setConfigDefaults(config *rest.Config) error {
|
||||
gv := v1alpha1.SchemeGroupVersion
|
||||
config.GroupVersion = &gv
|
||||
config.APIPath = "/apis"
|
||||
config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs}
|
||||
|
||||
if config.UserAgent == "" {
|
||||
config.UserAgent = rest.DefaultKubernetesUserAgent()
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// RESTClient returns a RESTClient that is used to communicate
|
||||
// with API server by this client implementation.
|
||||
func (c *DuckV1alpha1Client) RESTClient() rest.Interface {
|
||||
if c == nil {
|
||||
return nil
|
||||
}
|
||||
return c.restClient
|
||||
}
|
||||