Compare commits
369 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
3492b07d9a | ||
|
|
d0b582048f | ||
|
|
a82eb7b01f | ||
|
|
cd08afcbeb | ||
|
|
331942a4ed | ||
|
|
4e387fa943 | ||
|
|
15484363d6 | ||
|
|
50b7b74480 | ||
|
|
adb53c63dd | ||
|
|
bdc3a32e96 | ||
|
|
65f716182b | ||
|
|
6ef72e2550 | ||
|
|
60f51ad7d5 | ||
|
|
a09dc2cbd8 | ||
|
|
825d07aa54 | ||
|
|
f46882c778 | ||
|
|
663fa08cc1 | ||
|
|
19e625d38e | ||
|
|
edcff9cd15 | ||
|
|
e0fc5ecb39 | ||
|
|
4ac6629969 | ||
|
|
68d8dad7c8 | ||
|
|
4ab9ceafc1 | ||
|
|
352ed898d4 | ||
|
|
e091d6a50d | ||
|
|
c651ef00c9 | ||
|
|
4b17788a77 | ||
|
|
db673dddd9 | ||
|
|
88ad457e87 | ||
|
|
126b68559e | ||
|
|
2cd3fe47e6 | ||
|
|
15eb7cce55 | ||
|
|
13f923aabf | ||
|
|
0ffb112063 | ||
|
|
b4ea6af110 | ||
|
|
611c8f7374 | ||
|
|
1cc73f37e7 | ||
|
|
ca37fc0eb5 | ||
|
|
5380624da9 | ||
|
|
aaece0bd44 | ||
|
|
de7cc17f5d | ||
|
|
66efa39d27 | ||
|
|
ff7c0a105d | ||
|
|
7b29253df4 | ||
|
|
7ef63b341e | ||
|
|
e7bfaa4f1a | ||
|
|
3a9a408941 | ||
|
|
3e43963daa | ||
|
|
69a6e260f5 | ||
|
|
664e7ad555 | ||
|
|
ee4a009a06 | ||
|
|
36dfd4dd35 | ||
|
|
dbf36082b2 | ||
|
|
3a1018cff6 | ||
|
|
fc10745a1a | ||
|
|
347cfd06de | ||
|
|
ec759ce467 | ||
|
|
f211e0fe31 | ||
|
|
c91a128b65 | ||
|
|
6a080f3032 | ||
|
|
b2c12c1131 | ||
|
|
b945b37089 | ||
|
|
9a5529a0aa | ||
|
|
025785389d | ||
|
|
48d9a0dede | ||
|
|
fbdf38e990 | ||
|
|
ef5bf70386 | ||
|
|
274c1469b4 | ||
|
|
960d506360 | ||
|
|
79a6421178 | ||
|
|
8b5c004860 | ||
|
|
f54768772e | ||
|
|
b9075dc6f9 | ||
|
|
107596ad54 | ||
|
|
3c6a2b1508 | ||
|
|
f996cba354 | ||
|
|
bdd864fbdd | ||
|
|
ca074ef13f | ||
|
|
ddd3a8251e | ||
|
|
f5b862dc1b | ||
|
|
d45d475f61 | ||
|
|
d0f72ea3fa | ||
|
|
5ed5d1e5b6 | ||
|
|
311b14026e | ||
|
|
67cd722b54 | ||
|
|
7f6247eb7b | ||
|
|
f3fd515521 | ||
|
|
9db5dd0d7f | ||
|
|
d07925d79d | ||
|
|
662d0f31ff | ||
|
|
2c5ad0bf8f | ||
|
|
0ea76b986a | ||
|
|
3c4253c336 | ||
|
|
77ba28e91c | ||
|
|
6399e7586c | ||
|
|
1caa62adc8 | ||
|
|
8fa558f124 | ||
|
|
191228633b | ||
|
|
8a981f935a | ||
|
|
a8ea9adbcc | ||
|
|
685d94c44b | ||
|
|
153ed1b044 | ||
|
|
7788f3a1ba | ||
|
|
cd99225f9b | ||
|
|
33ba3b8d4a | ||
|
|
d222dd1069 | ||
|
|
39fd3d46ba | ||
|
|
419c1804b6 | ||
|
|
ae0351ddad | ||
|
|
941be15762 | ||
|
|
578ebcf6ed | ||
|
|
27ab4b08f9 | ||
|
|
428b2208ba | ||
|
|
438c553d60 | ||
|
|
90cb293182 | ||
|
|
1f9f93ebe4 | ||
|
|
f5b97fbb74 | ||
|
|
ce79244126 | ||
|
|
3af5d767d8 | ||
|
|
3ce3efd2f2 | ||
|
|
8108edea31 | ||
|
|
5d80087ab3 | ||
|
|
6593be584d | ||
|
|
a0f63f858f | ||
|
|
49914f3bd5 | ||
|
|
71988a8b98 | ||
|
|
d65be6ef58 | ||
|
|
38c40d02e7 | ||
|
|
9e071b9d60 | ||
|
|
b4ae060122 | ||
|
|
436656e81b | ||
|
|
d7e111b7d4 | ||
|
|
4b6126dd1a | ||
|
|
89faa70196 | ||
|
|
6ed9d4a1db | ||
|
|
9d0e38c2e1 | ||
|
|
8b758fd616 | ||
|
|
14369d8be3 | ||
|
|
7b4153113e | ||
|
|
7d340c5e61 | ||
|
|
337c94376d | ||
|
|
0ef1d0b2f1 | ||
|
|
5cf67bd4e0 | ||
|
|
f22be17852 | ||
|
|
48e79d5dd4 | ||
|
|
59f5a0654a | ||
|
|
6da2e11683 | ||
|
|
802c087a4b | ||
|
|
ed2048e9f3 | ||
|
|
437b1d30c0 | ||
|
|
ba1788cbc5 | ||
|
|
773094a20d | ||
|
|
5aa39106a0 | ||
|
|
a9167801ba | ||
|
|
62f4a6cb96 | ||
|
|
ea2b41e96e | ||
|
|
d28ce650e9 | ||
|
|
1bfcdba499 | ||
|
|
e48faa9144 | ||
|
|
33fbe99561 | ||
|
|
989925b484 | ||
|
|
7dd66559e7 | ||
|
|
2ef1c5608e | ||
|
|
b5932e8905 | ||
|
|
37999d3250 | ||
|
|
83985ae482 | ||
|
|
3adfcc837e | ||
|
|
c720fee3ab | ||
|
|
881387e522 | ||
|
|
d9f3378e29 | ||
|
|
ba87620225 | ||
|
|
1cd0c49872 | ||
|
|
12ac96deeb | ||
|
|
17e6f35785 | ||
|
|
bd115633a3 | ||
|
|
86ea172380 | ||
|
|
d87bbbbc1e | ||
|
|
6196f69f4d | ||
|
|
be31bcf22f | ||
|
|
cba2135c69 | ||
|
|
2e52573499 | ||
|
|
b2ce1ed1fb | ||
|
|
77a485af74 | ||
|
|
d8b847a973 | ||
|
|
e80a3d3232 | ||
|
|
780ba82385 | ||
|
|
6ba69dce0a | ||
|
|
3c7a561db8 | ||
|
|
49c942bea0 | ||
|
|
bf1ca293dc | ||
|
|
62b906d30b | ||
|
|
65bf048189 | ||
|
|
a498ed8200 | ||
|
|
9f12bbcd98 | ||
|
|
fcd520787d | ||
|
|
e2417e4e40 | ||
|
|
70a2cbf1c6 | ||
|
|
fa0c6af6aa | ||
|
|
4f1abd0c8d | ||
|
|
41e839aa36 | ||
|
|
2fd1593ad2 | ||
|
|
27b601c5aa | ||
|
|
5fc69134e3 | ||
|
|
9adc0698bb | ||
|
|
119c2ff464 | ||
|
|
f3a4201c7d | ||
|
|
8b6aa73df0 | ||
|
|
1d4dfb0883 | ||
|
|
eab7f126a6 | ||
|
|
fe7547d83e | ||
|
|
7d0df82861 | ||
|
|
7f0cd27591 | ||
|
|
e094c2ae14 | ||
|
|
a5d438257f | ||
|
|
d8cb8f1064 | ||
|
|
a8d8bb2d6f | ||
|
|
a76ea5917c | ||
|
|
b0b6198ec8 | ||
|
|
eda97f35d2 | ||
|
|
2b6507d35a | ||
|
|
f7c4d5aa0b | ||
|
|
74f07cffa6 | ||
|
|
79c8ff0af8 | ||
|
|
ac544eea4b | ||
|
|
231a32331b | ||
|
|
104e8ef050 | ||
|
|
296015faff | ||
|
|
9a9964c968 | ||
|
|
0d05d86e32 | ||
|
|
9680ca98f2 | ||
|
|
42b850ca52 | ||
|
|
3f5c22d863 | ||
|
|
535a92e871 | ||
|
|
3411a6a981 | ||
|
|
b5adee271c | ||
|
|
e2abcd1323 | ||
|
|
25fbe7ecb6 | ||
|
|
6befee79c2 | ||
|
|
f09c5a60f1 | ||
|
|
52e89ff509 | ||
|
|
35e20406ef | ||
|
|
c6e96ff1bb | ||
|
|
793ab524b0 | ||
|
|
5a479d0187 | ||
|
|
a23e4f1d2a | ||
|
|
bd35a3f61c | ||
|
|
197e987d5f | ||
|
|
7f29beb639 | ||
|
|
1140af8dc7 | ||
|
|
a2688c3910 | ||
|
|
75b27ab3f3 | ||
|
|
59d3f55fb2 | ||
|
|
f34739f334 | ||
|
|
90c71ec18f | ||
|
|
395234d7c8 | ||
|
|
e322ba0065 | ||
|
|
6db8b96f72 | ||
|
|
44d7e96e96 | ||
|
|
1662479c8d | ||
|
|
2e351fcf0d | ||
|
|
5d81876d07 | ||
|
|
c81e6989ec | ||
|
|
4d61a896c3 | ||
|
|
d148933ab3 | ||
|
|
04a56a3591 | ||
|
|
4a354e74d4 | ||
|
|
1e3e6427d5 | ||
|
|
38826108c8 | ||
|
|
4c4752f907 | ||
|
|
94dcd6c94d | ||
|
|
eabef3db30 | ||
|
|
6750f10ffa | ||
|
|
56cb888cbf | ||
|
|
b3e7fb3417 | ||
|
|
2c6e1baca2 | ||
|
|
c8358929d1 | ||
|
|
1dc7677dfb | ||
|
|
8e699a7543 | ||
|
|
cbbabdfac0 | ||
|
|
9d92de234c | ||
|
|
ba65975fb5 | ||
|
|
ef423b2078 | ||
|
|
f451b4e36c | ||
|
|
0856e13ee6 | ||
|
|
87b9fa8ca7 | ||
|
|
5b43d3d314 | ||
|
|
ac4972dd8d | ||
|
|
8a8f68af5d | ||
|
|
c669dc0c4b | ||
|
|
863a5466cc | ||
|
|
e2347c84e3 | ||
|
|
e0e673f565 | ||
|
|
30cbf2a741 | ||
|
|
f58de3801c | ||
|
|
7c6b88d4c1 | ||
|
|
0c0ebaecd5 | ||
|
|
1925f99118 | ||
|
|
6f2a22a1cc | ||
|
|
ee04082cd7 | ||
|
|
efd901ac3a | ||
|
|
e565789ae8 | ||
|
|
d3953004f6 | ||
|
|
df1d9e3011 | ||
|
|
631c55fa6e | ||
|
|
29cdd43288 | ||
|
|
9b79af9fcd | ||
|
|
2c9c1adb47 | ||
|
|
5dfb5808c4 | ||
|
|
bb0175aebf | ||
|
|
adaf4c99c0 | ||
|
|
bed6ed09d5 | ||
|
|
4ff67a85ce | ||
|
|
702f4fcd14 | ||
|
|
8a03ae153d | ||
|
|
434c6149ab | ||
|
|
97fc4a90ae | ||
|
|
217ef06930 | ||
|
|
71057946e6 | ||
|
|
a74ad52c72 | ||
|
|
12d26874f8 | ||
|
|
27de9ce151 | ||
|
|
9e7cd5a8c5 | ||
|
|
38cb487b64 | ||
|
|
05ca266c5e | ||
|
|
5cc26de645 | ||
|
|
2b9a195fa3 | ||
|
|
4454749eec | ||
|
|
b435a03fab | ||
|
|
7c166e2b40 | ||
|
|
f7a7963dcf | ||
|
|
9c77c0d69c | ||
|
|
e8a9555346 | ||
|
|
59751dd007 | ||
|
|
9c4d4d16b6 | ||
|
|
0e3d1b3e8f | ||
|
|
f119b78940 | ||
|
|
456d914c35 | ||
|
|
737507b0fe | ||
|
|
4bcf82d295 | ||
|
|
e9cd7afc8a | ||
|
|
0830abd51d | ||
|
|
5b296e01b3 | ||
|
|
3fd039afd1 | ||
|
|
5904348ba5 | ||
|
|
1a98e93723 | ||
|
|
c9685fbd13 | ||
|
|
dc347e273d | ||
|
|
8170916897 | ||
|
|
71cd4e0cb7 | ||
|
|
0109788ccc | ||
|
|
1649dea468 | ||
|
|
b8a7ea8534 | ||
|
|
afe4d59d5a | ||
|
|
0f2697df23 | ||
|
|
05664fa648 | ||
|
|
3b2564f34b | ||
|
|
dd0cf2d588 | ||
|
|
7c66f23c6a | ||
|
|
a9f034de1a | ||
|
|
6ad2dca57a | ||
|
|
e8353c110b | ||
|
|
dbf26ddf53 | ||
|
|
acc72d207f | ||
|
|
a784f83464 | ||
|
|
07d8355363 | ||
|
|
f7a439274e | ||
|
|
bd6d446cb8 | ||
|
|
385d0e0549 | ||
|
|
02236374d8 |
22
.circleci/config.yml
Normal file
@@ -0,0 +1,22 @@
|
||||
version: 2.1
|
||||
jobs:
|
||||
e2e-testing:
|
||||
machine: true
|
||||
steps:
|
||||
- checkout
|
||||
- run: test/e2e-kind.sh
|
||||
- run: test/e2e-istio.sh
|
||||
- run: test/e2e-build.sh
|
||||
- run: test/e2e-tests.sh
|
||||
|
||||
workflows:
|
||||
version: 2
|
||||
build-and-test:
|
||||
jobs:
|
||||
- e2e-testing:
|
||||
filters:
|
||||
branches:
|
||||
ignore:
|
||||
- /gh-pages.*/
|
||||
- /docs-.*/
|
||||
- /release-.*/
|
||||
@@ -6,3 +6,6 @@ coverage:
|
||||
threshold: 50
|
||||
base: auto
|
||||
patch: off
|
||||
|
||||
comment:
|
||||
require_changes: yes
|
||||
1
.github/CODEOWNERS
vendored
Normal file
@@ -0,0 +1 @@
|
||||
* @stefanprodan
|
||||
17
.github/main.workflow
vendored
Normal file
@@ -0,0 +1,17 @@
|
||||
workflow "Publish Helm charts" {
|
||||
on = "push"
|
||||
resolves = ["helm-push"]
|
||||
}
|
||||
|
||||
action "helm-lint" {
|
||||
uses = "stefanprodan/gh-actions/helm@master"
|
||||
args = ["lint charts/*"]
|
||||
}
|
||||
|
||||
action "helm-push" {
|
||||
needs = ["helm-lint"]
|
||||
uses = "stefanprodan/gh-actions/helm-gh-pages@master"
|
||||
args = ["charts/*","https://flagger.app"]
|
||||
secrets = ["GITHUB_TOKEN"]
|
||||
}
|
||||
|
||||
4
.gitignore
vendored
@@ -11,3 +11,7 @@
|
||||
# Output of the go coverage tool, specifically when used with LiteIDE
|
||||
*.out
|
||||
.DS_Store
|
||||
|
||||
bin/
|
||||
artifacts/gcloud/
|
||||
.idea
|
||||
@@ -1,7 +1,7 @@
|
||||
builds:
|
||||
- main: ./cmd/flagger
|
||||
binary: flagger
|
||||
ldflags: -s -w -X github.com/stefanprodan/flagger/pkg/version.REVISION={{.Commit}}
|
||||
ldflags: -s -w -X github.com/weaveworks/flagger/pkg/version.REVISION={{.Commit}}
|
||||
goos:
|
||||
- linux
|
||||
goarch:
|
||||
|
||||
29
.travis.yml
@@ -1,8 +1,13 @@
|
||||
sudo: required
|
||||
language: go
|
||||
|
||||
branches:
|
||||
except:
|
||||
- /gh-pages.*/
|
||||
- /docs-.*/
|
||||
|
||||
go:
|
||||
- 1.11.x
|
||||
- 1.12.x
|
||||
|
||||
services:
|
||||
- docker
|
||||
@@ -13,26 +18,26 @@ addons:
|
||||
- docker-ce
|
||||
|
||||
script:
|
||||
- set -e
|
||||
- make test-fmt
|
||||
- make test-codegen
|
||||
- go test -race -coverprofile=coverage.txt -covermode=atomic ./pkg/controller/
|
||||
- make build
|
||||
- make test-fmt
|
||||
- make test-codegen
|
||||
- go test -race -coverprofile=coverage.txt -covermode=atomic $(go list ./pkg/...)
|
||||
- make build
|
||||
|
||||
after_success:
|
||||
- if [ -z "$DOCKER_USER" ]; then
|
||||
echo "PR build, skipping image push";
|
||||
else
|
||||
docker tag stefanprodan/flagger:latest quay.io/stefanprodan/flagger:${TRAVIS_COMMIT};
|
||||
echo $DOCKER_PASS | docker login -u=$DOCKER_USER --password-stdin quay.io;
|
||||
docker push quay.io/stefanprodan/flagger:${TRAVIS_COMMIT};
|
||||
BRANCH_COMMIT=${TRAVIS_BRANCH}-$(echo ${TRAVIS_COMMIT} | head -c7);
|
||||
docker tag weaveworks/flagger:latest weaveworks/flagger:${BRANCH_COMMIT};
|
||||
echo $DOCKER_PASS | docker login -u=$DOCKER_USER --password-stdin;
|
||||
docker push weaveworks/flagger:${BRANCH_COMMIT};
|
||||
fi
|
||||
- if [ -z "$TRAVIS_TAG" ]; then
|
||||
echo "Not a release, skipping image push";
|
||||
else
|
||||
docker tag stefanprodan/flagger:latest quay.io/stefanprodan/flagger:${TRAVIS_TAG};
|
||||
echo $DOCKER_PASS | docker login -u=$DOCKER_USER --password-stdin quay.io;
|
||||
docker push quay.io/stefanprodan/flagger:$TRAVIS_TAG;
|
||||
docker tag weaveworks/flagger:latest weaveworks/flagger:${TRAVIS_TAG};
|
||||
echo $DOCKER_PASS | docker login -u=$DOCKER_USER --password-stdin;
|
||||
docker push weaveworks/flagger:$TRAVIS_TAG;
|
||||
fi
|
||||
- bash <(curl -s https://codecov.io/bash)
|
||||
- rm coverage.txt
|
||||
|
||||
229
CHANGELOG.md
Normal file
@@ -0,0 +1,229 @@
|
||||
# Changelog
|
||||
|
||||
All notable changes to this project are documented in this file.
|
||||
|
||||
## 0.11.0 (2019-04-17)
|
||||
|
||||
Adds pre/post rollout [webhooks](https://docs.flagger.app/how-it-works#webhooks)
|
||||
|
||||
#### Features
|
||||
|
||||
- Add `pre-rollout` and `post-rollout` webhook types [#147](https://github.com/weaveworks/flagger/pull/147)
|
||||
|
||||
#### Improvements
|
||||
|
||||
- Unify App Mesh and Istio builtin metric checks [#146](https://github.com/weaveworks/flagger/pull/146)
|
||||
- Make the pod selector label configurable [#148](https://github.com/weaveworks/flagger/pull/148)
|
||||
|
||||
#### Breaking changes
|
||||
|
||||
- Set default `mesh` Istio gateway only if no gateway is specified [#141](https://github.com/weaveworks/flagger/pull/141)
|
||||
|
||||
## 0.10.0 (2019-03-27)
|
||||
|
||||
Adds support for App Mesh
|
||||
|
||||
#### Features
|
||||
|
||||
- AWS App Mesh integration
|
||||
[#107](https://github.com/weaveworks/flagger/pull/107)
|
||||
[#123](https://github.com/weaveworks/flagger/pull/123)
|
||||
|
||||
#### Improvements
|
||||
|
||||
- Reconcile Kubernetes ClusterIP services [#122](https://github.com/weaveworks/flagger/pull/122)
|
||||
|
||||
#### Fixes
|
||||
|
||||
- Preserve pod labels on canary promotion [#105](https://github.com/weaveworks/flagger/pull/105)
|
||||
- Fix canary status Prometheus metric [#121](https://github.com/weaveworks/flagger/pull/121)
|
||||
|
||||
## 0.9.0 (2019-03-11)
|
||||
|
||||
Allows A/B testing scenarios where instead of weighted routing, the traffic is split between the
|
||||
primary and canary based on HTTP headers or cookies.
|
||||
|
||||
#### Features
|
||||
|
||||
- A/B testing - canary with session affinity [#88](https://github.com/weaveworks/flagger/pull/88)
|
||||
|
||||
#### Fixes
|
||||
|
||||
- Update the analysis interval when the custom resource changes [#91](https://github.com/weaveworks/flagger/pull/91)
|
||||
|
||||
## 0.8.0 (2019-03-06)
|
||||
|
||||
Adds support for CORS policy and HTTP request headers manipulation
|
||||
|
||||
#### Features
|
||||
|
||||
- CORS policy support [#83](https://github.com/weaveworks/flagger/pull/83)
|
||||
- Allow headers to be appended to HTTP requests [#82](https://github.com/weaveworks/flagger/pull/82)
|
||||
|
||||
#### Improvements
|
||||
|
||||
- Refactor the routing management
|
||||
[#72](https://github.com/weaveworks/flagger/pull/72)
|
||||
[#80](https://github.com/weaveworks/flagger/pull/80)
|
||||
- Fine-grained RBAC [#73](https://github.com/weaveworks/flagger/pull/73)
|
||||
- Add option to limit Flagger to a single namespace [#78](https://github.com/weaveworks/flagger/pull/78)
|
||||
|
||||
## 0.7.0 (2019-02-28)
|
||||
|
||||
Adds support for custom metric checks, HTTP timeouts and HTTP retries
|
||||
|
||||
#### Features
|
||||
|
||||
- Allow custom promql queries in the canary analysis spec [#60](https://github.com/weaveworks/flagger/pull/60)
|
||||
- Add HTTP timeout and retries to canary service spec [#62](https://github.com/weaveworks/flagger/pull/62)
|
||||
|
||||
## 0.6.0 (2019-02-25)
|
||||
|
||||
Allows for [HTTPMatchRequests](https://istio.io/docs/reference/config/istio.networking.v1alpha3/#HTTPMatchRequest)
|
||||
and [HTTPRewrite](https://istio.io/docs/reference/config/istio.networking.v1alpha3/#HTTPRewrite)
|
||||
to be customized in the service spec of the canary custom resource.
|
||||
|
||||
#### Features
|
||||
|
||||
- Add HTTP match conditions and URI rewrite to the canary service spec [#55](https://github.com/weaveworks/flagger/pull/55)
|
||||
- Update virtual service when the canary service spec changes
|
||||
[#54](https://github.com/weaveworks/flagger/pull/54)
|
||||
[#51](https://github.com/weaveworks/flagger/pull/51)
|
||||
|
||||
#### Improvements
|
||||
|
||||
- Run e2e testing on [Kubernetes Kind](https://github.com/kubernetes-sigs/kind) for canary promotion
|
||||
[#53](https://github.com/weaveworks/flagger/pull/53)
|
||||
|
||||
## 0.5.1 (2019-02-14)
|
||||
|
||||
Allows skipping the analysis phase to ship changes directly to production
|
||||
|
||||
#### Features
|
||||
|
||||
- Add option to skip the canary analysis [#46](https://github.com/weaveworks/flagger/pull/46)
|
||||
|
||||
#### Fixes
|
||||
|
||||
- Reject deployment if the pod label selector doesn't match `app: <DEPLOYMENT_NAME>` [#43](https://github.com/weaveworks/flagger/pull/43)
|
||||
|
||||
## 0.5.0 (2019-01-30)
|
||||
|
||||
Track changes in ConfigMaps and Secrets [#37](https://github.com/weaveworks/flagger/pull/37)
|
||||
|
||||
#### Features
|
||||
|
||||
- Promote configmaps and secrets changes from canary to primary
|
||||
- Detect changes in configmaps and/or secrets and (re)start canary analysis
|
||||
- Add configs checksum to Canary CRD status
|
||||
- Create primary configmaps and secrets at bootstrap
|
||||
- Scan canary volumes and containers for configmaps and secrets
|
||||
|
||||
#### Fixes
|
||||
|
||||
- Copy deployment labels from canary to primary at bootstrap and promotion
|
||||
|
||||
## 0.4.1 (2019-01-24)
|
||||
|
||||
Load testing webhook [#35](https://github.com/weaveworks/flagger/pull/35)
|
||||
|
||||
#### Features
|
||||
|
||||
- Add the load tester chart to Flagger Helm repository
|
||||
- Implement a load test runner based on [rakyll/hey](https://github.com/rakyll/hey)
|
||||
- Log warning when no values are found for Istio metric due to lack of traffic
|
||||
|
||||
#### Fixes
|
||||
|
||||
- Run wekbooks before the metrics checks to avoid failures when using a load tester
|
||||
|
||||
## 0.4.0 (2019-01-18)
|
||||
|
||||
Restart canary analysis if revision changes [#31](https://github.com/weaveworks/flagger/pull/31)
|
||||
|
||||
#### Breaking changes
|
||||
|
||||
- Drop support for Kubernetes 1.10
|
||||
|
||||
#### Features
|
||||
|
||||
- Detect changes during canary analysis and reset advancement
|
||||
- Add status and additional printer columns to CRD
|
||||
- Add canary name and namespace to controller structured logs
|
||||
|
||||
#### Fixes
|
||||
|
||||
- Allow canary name to be different to the target name
|
||||
- Check if multiple canaries have the same target and log error
|
||||
- Use deep copy when updating Kubernetes objects
|
||||
- Skip readiness checks if canary analysis has finished
|
||||
|
||||
## 0.3.0 (2019-01-11)
|
||||
|
||||
Configurable canary analysis duration [#20](https://github.com/weaveworks/flagger/pull/20)
|
||||
|
||||
#### Breaking changes
|
||||
|
||||
- Helm chart: flag `controlLoopInterval` has been removed
|
||||
|
||||
#### Features
|
||||
|
||||
- CRD: canaries.flagger.app v1alpha3
|
||||
- Schedule canary analysis independently based on `canaryAnalysis.interval`
|
||||
- Add analysis interval to Canary CRD (defaults to one minute)
|
||||
- Make autoscaler (HPA) reference optional
|
||||
|
||||
## 0.2.0 (2019-01-04)
|
||||
|
||||
Webhooks [#18](https://github.com/weaveworks/flagger/pull/18)
|
||||
|
||||
#### Features
|
||||
|
||||
- CRD: canaries.flagger.app v1alpha2
|
||||
- Implement canary external checks based on webhooks HTTP POST calls
|
||||
- Add webhooks to Canary CRD
|
||||
- Move docs to gitbook [docs.flagger.app](https://docs.flagger.app)
|
||||
|
||||
## 0.1.2 (2018-12-06)
|
||||
|
||||
Improve Slack notifications [#14](https://github.com/weaveworks/flagger/pull/14)
|
||||
|
||||
#### Features
|
||||
|
||||
- Add canary analysis metadata to init and start Slack messages
|
||||
- Add rollback reason to failed canary Slack messages
|
||||
|
||||
## 0.1.1 (2018-11-28)
|
||||
|
||||
Canary progress deadline [#10](https://github.com/weaveworks/flagger/pull/10)
|
||||
|
||||
#### Features
|
||||
|
||||
- Rollback canary based on the deployment progress deadline check
|
||||
- Add progress deadline to Canary CRD (defaults to 10 minutes)
|
||||
|
||||
## 0.1.0 (2018-11-25)
|
||||
|
||||
First stable release
|
||||
|
||||
#### Features
|
||||
|
||||
- CRD: canaries.flagger.app v1alpha1
|
||||
- Notifications: post canary events to Slack
|
||||
- Instrumentation: expose Prometheus metrics for canary status and traffic weight percentage
|
||||
- Autoscaling: add HPA reference to CRD and create primary HPA at bootstrap
|
||||
- Bootstrap: create primary deployment, ClusterIP services and Istio virtual service based on CRD spec
|
||||
|
||||
|
||||
## 0.0.1 (2018-10-07)
|
||||
|
||||
Initial semver release
|
||||
|
||||
#### Features
|
||||
|
||||
- Implement canary rollback based on failed checks threshold
|
||||
- Scale up the deployment when canary revision changes
|
||||
- Add OpenAPI v3 schema validation to Canary CRD
|
||||
- Use CRD status for canary state persistence
|
||||
- Add Helm charts for Flagger and Grafana
|
||||
- Add canary analysis Grafana dashboard
|
||||
12
Dockerfile
@@ -1,17 +1,17 @@
|
||||
FROM golang:1.11
|
||||
FROM golang:1.12
|
||||
|
||||
RUN mkdir -p /go/src/github.com/stefanprodan/flagger/
|
||||
RUN mkdir -p /go/src/github.com/weaveworks/flagger/
|
||||
|
||||
WORKDIR /go/src/github.com/stefanprodan/flagger
|
||||
WORKDIR /go/src/github.com/weaveworks/flagger
|
||||
|
||||
COPY . .
|
||||
|
||||
RUN GIT_COMMIT=$(git rev-list -1 HEAD) && \
|
||||
CGO_ENABLED=0 GOOS=linux go build -ldflags "-s -w \
|
||||
-X github.com/stefanprodan/flagger/pkg/version.REVISION=${GIT_COMMIT}" \
|
||||
-X github.com/weaveworks/flagger/pkg/version.REVISION=${GIT_COMMIT}" \
|
||||
-a -installsuffix cgo -o flagger ./cmd/flagger/*
|
||||
|
||||
FROM alpine:3.8
|
||||
FROM alpine:3.9
|
||||
|
||||
RUN addgroup -S flagger \
|
||||
&& adduser -S -g flagger flagger \
|
||||
@@ -19,7 +19,7 @@ RUN addgroup -S flagger \
|
||||
|
||||
WORKDIR /home/flagger
|
||||
|
||||
COPY --from=0 /go/src/github.com/stefanprodan/flagger/flagger .
|
||||
COPY --from=0 /go/src/github.com/weaveworks/flagger/flagger .
|
||||
|
||||
RUN chown -R flagger:flagger ./
|
||||
|
||||
|
||||
30
Dockerfile.loadtester
Normal file
@@ -0,0 +1,30 @@
|
||||
FROM golang:1.12 AS builder
|
||||
|
||||
RUN mkdir -p /go/src/github.com/weaveworks/flagger/
|
||||
|
||||
WORKDIR /go/src/github.com/weaveworks/flagger
|
||||
|
||||
COPY . .
|
||||
|
||||
RUN go test -race ./pkg/loadtester/
|
||||
|
||||
RUN CGO_ENABLED=0 GOOS=linux go build -a -installsuffix cgo -o loadtester ./cmd/loadtester/*
|
||||
|
||||
FROM bats/bats:v1.1.0
|
||||
|
||||
RUN addgroup -S app \
|
||||
&& adduser -S -g app app \
|
||||
&& apk --no-cache add ca-certificates curl jq
|
||||
|
||||
WORKDIR /home/app
|
||||
|
||||
RUN curl -sSLo hey "https://storage.googleapis.com/jblabs/dist/hey_linux_v0.1.2" && \
|
||||
chmod +x hey && mv hey /usr/local/bin/hey
|
||||
|
||||
COPY --from=builder /go/src/github.com/weaveworks/flagger/loadtester .
|
||||
|
||||
RUN chown -R app:app ./
|
||||
|
||||
USER app
|
||||
|
||||
ENTRYPOINT ["./loadtester"]
|
||||
169
Gopkg.lock
generated
@@ -2,12 +2,12 @@
|
||||
|
||||
|
||||
[[projects]]
|
||||
digest = "1:5c3894b2aa4d6bead0ceeea6831b305d62879c871780e7b76296ded1b004bc57"
|
||||
digest = "1:4d6f036ea3fe636bcb2e89850bcdc62a771354e157cd51b8b22a2de8562bf663"
|
||||
name = "cloud.google.com/go"
|
||||
packages = ["compute/metadata"]
|
||||
pruneopts = "NUT"
|
||||
revision = "97efc2c9ffd9fe8ef47f7f3203dc60bbca547374"
|
||||
version = "v0.28.0"
|
||||
revision = "c9474f2f8deb81759839474b6bd1726bbfe1c1c4"
|
||||
version = "v0.36.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
@@ -34,15 +34,15 @@
|
||||
version = "v1.0.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:8679b8a64f3613e9749c5640c3535c83399b8e69f67ce54d91dc73f6d77373af"
|
||||
digest = "1:a1b2a5e38f79688ee8250942d5fa960525fceb1024c855c7bc76fa77b0f3cca2"
|
||||
name = "github.com/gogo/protobuf"
|
||||
packages = [
|
||||
"proto",
|
||||
"sortkeys",
|
||||
]
|
||||
pruneopts = "NUT"
|
||||
revision = "636bf0302bc95575d69441b25a2603156ffdddf1"
|
||||
version = "v1.1.1"
|
||||
revision = "ba06b47c162d49f2af050fb4c75bcbc86a159d5c"
|
||||
version = "v1.2.1"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
@@ -55,14 +55,14 @@
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:3fb07f8e222402962fa190eb060608b34eddfb64562a18e2167df2de0ece85d8"
|
||||
digest = "1:b7cb6054d3dff43b38ad2e92492f220f57ae6087ee797dca298139776749ace8"
|
||||
name = "github.com/golang/groupcache"
|
||||
packages = ["lru"]
|
||||
pruneopts = "NUT"
|
||||
revision = "24b0969c4cb722950103eed87108c8d291a8df00"
|
||||
revision = "5b532d6fd5efaf7fa130d4e859a2fde0fc3a9e1b"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:63ccdfbd20f7ccd2399d0647a7d100b122f79c13bb83da9660b1598396fd9f62"
|
||||
digest = "1:2d0636a8c490d2272dd725db26f74a537111b99b9dbdda0d8b98febe63702aa4"
|
||||
name = "github.com/golang/protobuf"
|
||||
packages = [
|
||||
"proto",
|
||||
@@ -72,8 +72,8 @@
|
||||
"ptypes/timestamp",
|
||||
]
|
||||
pruneopts = "NUT"
|
||||
revision = "aa810b61a9c79d51363740d207bb46cf8e620ed5"
|
||||
version = "v1.2.0"
|
||||
revision = "c823c79ea1570fb5ff454033735a8e68575d1d0f"
|
||||
version = "v1.3.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
@@ -119,33 +119,41 @@
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:7fdf3223c7372d1ced0b98bf53457c5e89d89aecbad9a77ba9fcc6e01f9e5621"
|
||||
digest = "1:a86d65bc23eea505cd9139178e4d889733928fe165c7a008f41eaab039edf9df"
|
||||
name = "github.com/gregjones/httpcache"
|
||||
packages = [
|
||||
".",
|
||||
"diskcache",
|
||||
]
|
||||
pruneopts = "NUT"
|
||||
revision = "9cad4c3443a7200dd6400aef47183728de563a38"
|
||||
revision = "3befbb6ad0cc97d4c25d851e9528915809e1a22f"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:b42cde0e1f3c816dd57f57f7bbcf05ca40263ad96f168714c130c611fc0856a6"
|
||||
digest = "1:7313d6b9095eb86581402557bbf3871620cf82adf41853c5b9bee04b894290c7"
|
||||
name = "github.com/h2non/parth"
|
||||
packages = ["."]
|
||||
pruneopts = "NUT"
|
||||
revision = "b4df798d65426f8c8ab5ca5f9987aec5575d26c9"
|
||||
version = "v2.0.1"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:52094d0f8bdf831d1a2401e9b6fee5795fdc0b2a2d1f8bb1980834c289e79129"
|
||||
name = "github.com/hashicorp/golang-lru"
|
||||
packages = [
|
||||
".",
|
||||
"simplelru",
|
||||
]
|
||||
pruneopts = "NUT"
|
||||
revision = "20f1fb78b0740ba8c3cb143a61e86ba5c8669768"
|
||||
version = "v0.5.0"
|
||||
revision = "7087cb70de9f7a8bc0a10c375cb0d2280a8edf9c"
|
||||
version = "v0.5.1"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:9a52adf44086cead3b384e5d0dbf7a1c1cce65e67552ee3383a8561c42a18cd3"
|
||||
digest = "1:aaa38889f11896ee3644d77e17dc7764cc47f5f3d3b488268df2af2b52541c5f"
|
||||
name = "github.com/imdario/mergo"
|
||||
packages = ["."]
|
||||
pruneopts = "NUT"
|
||||
revision = "9f23e2d6bd2a77f959b2bf6acdbefd708a83a4a4"
|
||||
version = "v0.3.6"
|
||||
revision = "7c29201646fa3de8506f701213473dd407f19646"
|
||||
version = "v0.3.7"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
@@ -162,32 +170,6 @@
|
||||
pruneopts = "NUT"
|
||||
revision = "f2b4162afba35581b6d4a50d3b8f34e33c144682"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:03a74b0d86021c8269b52b7c908eb9bb3852ff590b363dad0a807cf58cec2f89"
|
||||
name = "github.com/knative/pkg"
|
||||
packages = [
|
||||
"apis",
|
||||
"apis/duck",
|
||||
"apis/duck/v1alpha1",
|
||||
"apis/istio",
|
||||
"apis/istio/authentication",
|
||||
"apis/istio/authentication/v1alpha1",
|
||||
"apis/istio/common/v1alpha1",
|
||||
"apis/istio/v1alpha3",
|
||||
"client/clientset/versioned",
|
||||
"client/clientset/versioned/fake",
|
||||
"client/clientset/versioned/scheme",
|
||||
"client/clientset/versioned/typed/authentication/v1alpha1",
|
||||
"client/clientset/versioned/typed/authentication/v1alpha1/fake",
|
||||
"client/clientset/versioned/typed/duck/v1alpha1",
|
||||
"client/clientset/versioned/typed/duck/v1alpha1/fake",
|
||||
"client/clientset/versioned/typed/istio/v1alpha3",
|
||||
"client/clientset/versioned/typed/istio/v1alpha3/fake",
|
||||
"signals",
|
||||
]
|
||||
pruneopts = "NUT"
|
||||
revision = "c15d7c8f2220a7578b33504df6edefa948c845ae"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:5985ef4caf91ece5d54817c11ea25f182697534f8ae6521eadcd628c142ac4b6"
|
||||
name = "github.com/matttproud/golang_protobuf_extensions"
|
||||
@@ -245,11 +227,10 @@
|
||||
name = "github.com/prometheus/client_model"
|
||||
packages = ["go"]
|
||||
pruneopts = "NUT"
|
||||
revision = "5c3871d89910bfb32f5fcab2aa4b9ec68e65a99f"
|
||||
revision = "fd36f4220a901265f90734c3183c5f0c91daa0b8"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:fad5a35eea6a1a33d6c8f949fbc146f24275ca809ece854248187683f52cc30b"
|
||||
digest = "1:4e776079b966091d3e6e12ed2aaf728bea5cd1175ef88bb654e03adbf5d4f5d3"
|
||||
name = "github.com/prometheus/common"
|
||||
packages = [
|
||||
"expfmt",
|
||||
@@ -257,28 +238,30 @@
|
||||
"model",
|
||||
]
|
||||
pruneopts = "NUT"
|
||||
revision = "c7de2306084e37d54b8be01f3541a8464345e9a5"
|
||||
revision = "cfeb6f9992ffa54aaa4f2170ade4067ee478b250"
|
||||
version = "v0.2.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:26a2f5e891cc4d2321f18a0caa84c8e788663c17bed6a487f3cbe2c4295292d0"
|
||||
digest = "1:0a2e604afa3cbf53a1ddade2f240ee8472eded98856dd8c7cfbfea392ddbbfc7"
|
||||
name = "github.com/prometheus/procfs"
|
||||
packages = [
|
||||
".",
|
||||
"internal/util",
|
||||
"iostats",
|
||||
"nfs",
|
||||
"xfs",
|
||||
]
|
||||
pruneopts = "NUT"
|
||||
revision = "418d78d0b9a7b7de3a6bbc8a23def624cc977bb2"
|
||||
revision = "bbced9601137e764853b2fad7ec3e2dc4c504e02"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:e3707aeaccd2adc89eba6c062fec72116fe1fc1ba71097da85b4d8ae1668a675"
|
||||
digest = "1:9d8420bbf131d1618bde6530af37c3799340d3762cc47210c1d9532a4c3a2779"
|
||||
name = "github.com/spf13/pflag"
|
||||
packages = ["."]
|
||||
pruneopts = "NUT"
|
||||
revision = "9a97c102cda95a86cec2345a6f09f55a939babf5"
|
||||
version = "v1.0.2"
|
||||
revision = "298182f68c66c05229eb03ac171abe6e309ee79a"
|
||||
version = "v1.0.3"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:22f696cee54865fb8e9ff91df7b633f6b8f22037a8015253c6b6a71ca82219c7"
|
||||
@@ -313,15 +296,15 @@
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:3f3a05ae0b95893d90b9b3b5afdb79a9b3d96e4e36e099d841ae602e4aca0da8"
|
||||
digest = "1:058e9504b9a79bfe86092974d05bb3298d2aa0c312d266d43148de289a5065d9"
|
||||
name = "golang.org/x/crypto"
|
||||
packages = ["ssh/terminal"]
|
||||
pruneopts = "NUT"
|
||||
revision = "0e37d006457bf46f9e6692014ba72ef82c33022c"
|
||||
revision = "8dd112bcdc25174059e45e07517d9fc663123347"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:1400b8e87c2c9bd486ea1a13155f59f8f02d385761206df05c0b7db007a53b2c"
|
||||
digest = "1:e3477b53a5c2fb71a7c9688e9b3d58be702807a5a88def8b9a327259d46e4979"
|
||||
name = "golang.org/x/net"
|
||||
packages = [
|
||||
"context",
|
||||
@@ -332,11 +315,11 @@
|
||||
"idna",
|
||||
]
|
||||
pruneopts = "NUT"
|
||||
revision = "26e67e76b6c3f6ce91f7c52def5af501b4e0f3a2"
|
||||
revision = "16b79f2e4e95ea23b2bf9903c9809ff7b013ce85"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:bc2b221d465bb28ce46e8d472ecdc424b9a9b541bd61d8c311c5f29c8dd75b1b"
|
||||
digest = "1:17ee74a4d9b6078611784b873cdbfe91892d2c73052c430724e66fcc015b6c7b"
|
||||
name = "golang.org/x/oauth2"
|
||||
packages = [
|
||||
".",
|
||||
@@ -346,18 +329,18 @@
|
||||
"jwt",
|
||||
]
|
||||
pruneopts = "NUT"
|
||||
revision = "d2e6202438beef2727060aa7cabdd924d92ebfd9"
|
||||
revision = "e64efc72b421e893cbf63f17ba2221e7d6d0b0f3"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:44261e94b6095310a2df925fd68632d399a00eb153b52566a7b3697f7c70638c"
|
||||
digest = "1:a0d91ab4d23badd4e64e115c6e6ba7dd56bd3cde5d287845822fb2599ac10236"
|
||||
name = "golang.org/x/sys"
|
||||
packages = [
|
||||
"unix",
|
||||
"windows",
|
||||
]
|
||||
pruneopts = "NUT"
|
||||
revision = "1561086e645b2809fb9f8a1e2a38160bf8d53bf4"
|
||||
revision = "30e92a19ae4a77dde818b8c3d41d51e4850cba12"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:e7071ed636b5422cc51c0e3a6cebc229d6c9fffc528814b519a980641422d619"
|
||||
@@ -384,26 +367,35 @@
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:c9e7a4b4d47c0ed205d257648b0e5b0440880cb728506e318f8ac7cd36270bc4"
|
||||
digest = "1:9fdc2b55e8e0fafe4b41884091e51e77344f7dc511c5acedcfd98200003bff90"
|
||||
name = "golang.org/x/time"
|
||||
packages = ["rate"]
|
||||
pruneopts = "NUT"
|
||||
revision = "fbb02b2291d28baffd63558aa44b4b56f178d650"
|
||||
revision = "85acf8d2951cb2a3bde7632f9ff273ef0379bcbd"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:45751dc3302c90ea55913674261b2d74286b05cdd8e3ae9606e02e4e77f4353f"
|
||||
digest = "1:e46d8e20161401a9cf8765dfa428494a3492a0b56fe114156b7da792bf41ba78"
|
||||
name = "golang.org/x/tools"
|
||||
packages = [
|
||||
"go/ast/astutil",
|
||||
"go/gcexportdata",
|
||||
"go/internal/cgo",
|
||||
"go/internal/gcimporter",
|
||||
"go/internal/packagesdriver",
|
||||
"go/packages",
|
||||
"go/types/typeutil",
|
||||
"imports",
|
||||
"internal/fastwalk",
|
||||
"internal/gopathwalk",
|
||||
"internal/module",
|
||||
"internal/semver",
|
||||
]
|
||||
pruneopts = "NUT"
|
||||
revision = "90fa682c2a6e6a37b3a1364ce2fe1d5e41af9d6d"
|
||||
revision = "f8c04913dfb7b2339a756441456bdbe0af6eb508"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:e2da54c7866453ac5831c61c7ec5d887f39328cac088c806553303bff4048e6f"
|
||||
digest = "1:d395d49d784dd3a11938a3e85091b6570664aa90ff2767a626565c6c130fa7e9"
|
||||
name = "google.golang.org/appengine"
|
||||
packages = [
|
||||
".",
|
||||
@@ -418,8 +410,16 @@
|
||||
"urlfetch",
|
||||
]
|
||||
pruneopts = "NUT"
|
||||
revision = "ae0ab99deb4dc413a2b4bd6c8bdd0eb67f1e4d06"
|
||||
version = "v1.2.0"
|
||||
revision = "e9657d882bb81064595ca3b56cbe2546bbabf7b1"
|
||||
version = "v1.4.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:fe9eb931d7b59027c4a3467f7edc16cc8552dac5328039bec05045143c18e1ce"
|
||||
name = "gopkg.in/h2non/gock.v1"
|
||||
packages = ["."]
|
||||
pruneopts = "NUT"
|
||||
revision = "ba88c4862a27596539531ce469478a91bc5a0511"
|
||||
version = "v1.0.14"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:2d1fbdc6777e5408cabeb02bf336305e724b925ff4546ded0fa8715a7267922a"
|
||||
@@ -430,12 +430,12 @@
|
||||
version = "v0.9.1"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:7c95b35057a0ff2e19f707173cc1a947fa43a6eb5c4d300d196ece0334046082"
|
||||
digest = "1:18108594151654e9e696b27b181b953f9a90b16bf14d253dd1b397b025a1487f"
|
||||
name = "gopkg.in/yaml.v2"
|
||||
packages = ["."]
|
||||
pruneopts = "NUT"
|
||||
revision = "5420a8b6744d3b0345ab293f6fcba19c978f1183"
|
||||
version = "v2.2.1"
|
||||
revision = "51d6538a90f86fe93ac480b35f37b2be17fef232"
|
||||
version = "v2.2.2"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:8960ef753a87391086a307122d23cd5007cee93c28189437e4f1b6ed72bffc50"
|
||||
@@ -476,10 +476,9 @@
|
||||
version = "kubernetes-1.11.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:4b0d523ee389c762d02febbcfa0734c4530ebe87abe925db18f05422adcb33e8"
|
||||
digest = "1:83b01e3d6f85c4e911de84febd69a2d3ece614c5a4a518fbc2b5d59000645980"
|
||||
name = "k8s.io/apimachinery"
|
||||
packages = [
|
||||
"pkg/api/equality",
|
||||
"pkg/api/errors",
|
||||
"pkg/api/meta",
|
||||
"pkg/api/resource",
|
||||
@@ -659,7 +658,7 @@
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:5249c83f0fb9e277b2d28c19eca814feac7ef05dc762e4deaf0a2e4b1a7c5df3"
|
||||
digest = "1:61024ed77a53ac618effed55043bf6a9afbdeb64136bd6a5b0c992d4c0363766"
|
||||
name = "k8s.io/gengo"
|
||||
packages = [
|
||||
"args",
|
||||
@@ -672,15 +671,23 @@
|
||||
"types",
|
||||
]
|
||||
pruneopts = "NUT"
|
||||
revision = "4242d8e6c5dba56827bb7bcf14ad11cda38f3991"
|
||||
revision = "0689ccc1d7d65d9dd1bedcc3b0b1ed7df91ba266"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:c263611800c3a97991dbcf9d3bc4de390f6224aaa8ca0a7226a9d734f65a416a"
|
||||
name = "k8s.io/klog"
|
||||
packages = ["."]
|
||||
pruneopts = "NUT"
|
||||
revision = "71442cd4037d612096940ceb0f3fec3f7fff66e0"
|
||||
version = "v0.2.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:a2c842a1e0aed96fd732b535514556323a6f5edfded3b63e5e0ab1bce188aa54"
|
||||
digest = "1:03a96603922fc1f6895ae083e1e16d943b55ef0656b56965351bd87e7d90485f"
|
||||
name = "k8s.io/kube-openapi"
|
||||
packages = ["pkg/util/proto"]
|
||||
pruneopts = "NUT"
|
||||
revision = "e3762e86a74c878ffed47484592986685639c2cd"
|
||||
revision = "b3a7cee44a305be0a69e1b9ac03018307287e1b0"
|
||||
|
||||
[solve-meta]
|
||||
analyzer-name = "dep"
|
||||
@@ -689,13 +696,11 @@
|
||||
"github.com/google/go-cmp/cmp",
|
||||
"github.com/google/go-cmp/cmp/cmpopts",
|
||||
"github.com/istio/glog",
|
||||
"github.com/knative/pkg/apis/istio/v1alpha3",
|
||||
"github.com/knative/pkg/client/clientset/versioned",
|
||||
"github.com/knative/pkg/client/clientset/versioned/fake",
|
||||
"github.com/knative/pkg/signals",
|
||||
"github.com/prometheus/client_golang/prometheus",
|
||||
"github.com/prometheus/client_golang/prometheus/promhttp",
|
||||
"go.uber.org/zap",
|
||||
"go.uber.org/zap/zapcore",
|
||||
"gopkg.in/h2non/gock.v1",
|
||||
"k8s.io/api/apps/v1",
|
||||
"k8s.io/api/autoscaling/v1",
|
||||
"k8s.io/api/autoscaling/v2beta1",
|
||||
|
||||
@@ -11,6 +11,10 @@ required = [
|
||||
name = "go.uber.org/zap"
|
||||
version = "v1.9.1"
|
||||
|
||||
[[constraint]]
|
||||
name = "gopkg.in/h2non/gock.v1"
|
||||
version = "v1.0.14"
|
||||
|
||||
[[override]]
|
||||
name = "gopkg.in/yaml.v2"
|
||||
version = "v2.2.1"
|
||||
@@ -45,10 +49,6 @@ required = [
|
||||
name = "github.com/google/go-cmp"
|
||||
version = "v0.2.0"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/knative/pkg"
|
||||
revision = "c15d7c8f2220a7578b33504df6edefa948c845ae"
|
||||
|
||||
[[override]]
|
||||
name = "github.com/golang/glog"
|
||||
source = "github.com/istio/glog"
|
||||
|
||||
5
MAINTAINERS
Normal file
@@ -0,0 +1,5 @@
|
||||
The maintainers are generally available in Slack at
|
||||
https://weave-community.slack.com/messages/flagger/ (obtain an invitation
|
||||
at https://slack.weave.works/).
|
||||
|
||||
Stefan Prodan, Weaveworks <stefan@weave.works> (Slack: @stefan Twitter: @stefanprodan)
|
||||
25
Makefile
@@ -3,18 +3,26 @@ VERSION?=$(shell grep 'VERSION' pkg/version/version.go | awk '{ print $$4 }' | t
|
||||
VERSION_MINOR:=$(shell grep 'VERSION' pkg/version/version.go | awk '{ print $$4 }' | tr -d '"' | rev | cut -d'.' -f2- | rev)
|
||||
PATCH:=$(shell grep 'VERSION' pkg/version/version.go | awk '{ print $$4 }' | tr -d '"' | awk -F. '{print $$NF}')
|
||||
SOURCE_DIRS = cmd pkg/apis pkg/controller pkg/server pkg/logging pkg/version
|
||||
LT_VERSION?=$(shell grep 'VERSION' cmd/loadtester/main.go | awk '{ print $$4 }' | tr -d '"' | head -n1)
|
||||
|
||||
run:
|
||||
go run cmd/flagger/* -kubeconfig=$$HOME/.kube/config -log-level=info \
|
||||
-metrics-server=https://prometheus.iowa.weavedx.com \
|
||||
-metrics-server=https://prometheus.istio.weavedx.com \
|
||||
-slack-url=https://hooks.slack.com/services/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 \
|
||||
-slack-channel="devops-alerts"
|
||||
|
||||
run-appmesh:
|
||||
go run cmd/flagger/* -kubeconfig=$$HOME/.kube/config -log-level=info -mesh-provider=appmesh \
|
||||
-metrics-server=http://acfc235624ca911e9a94c02c4171f346-1585187926.us-west-2.elb.amazonaws.com:9090 \
|
||||
-slack-url=https://hooks.slack.com/services/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 \
|
||||
-slack-channel="devops-alerts"
|
||||
|
||||
build:
|
||||
docker build -t stefanprodan/flagger:$(TAG) . -f Dockerfile
|
||||
docker build -t weaveworks/flagger:$(TAG) . -f Dockerfile
|
||||
|
||||
push:
|
||||
docker tag stefanprodan/flagger:$(TAG) quay.io/stefanprodan/flagger:$(VERSION)
|
||||
docker push quay.io/stefanprodan/flagger:$(VERSION)
|
||||
docker tag weaveworks/flagger:$(TAG) quay.io/weaveworks/flagger:$(VERSION)
|
||||
docker push quay.io/weaveworks/flagger:$(VERSION)
|
||||
|
||||
fmt:
|
||||
gofmt -l -s -w $(SOURCE_DIRS)
|
||||
@@ -29,9 +37,9 @@ test: test-fmt test-codegen
|
||||
go test ./...
|
||||
|
||||
helm-package:
|
||||
cd charts/ && helm package flagger/ && helm package grafana/
|
||||
cd charts/ && helm package ./*
|
||||
mv charts/*.tgz docs/
|
||||
helm repo index docs --url https://stefanprodan.github.io/flagger --merge ./docs/index.yaml
|
||||
helm repo index docs --url https://weaveworks.github.io/flagger --merge ./docs/index.yaml
|
||||
|
||||
helm-up:
|
||||
helm upgrade --install flagger ./charts/flagger --namespace=istio-system --set crd.create=false
|
||||
@@ -44,6 +52,7 @@ version-set:
|
||||
sed -i '' "s/flagger:$$current/flagger:$$next/g" artifacts/flagger/deployment.yaml && \
|
||||
sed -i '' "s/tag: $$current/tag: $$next/g" charts/flagger/values.yaml && \
|
||||
sed -i '' "s/appVersion: $$current/appVersion: $$next/g" charts/flagger/Chart.yaml && \
|
||||
sed -i '' "s/version: $$current/version: $$next/g" charts/flagger/Chart.yaml && \
|
||||
echo "Version $$next set in code, deployment and charts"
|
||||
|
||||
version-up:
|
||||
@@ -77,3 +86,7 @@ reset-test:
|
||||
kubectl delete -f ./artifacts/namespaces
|
||||
kubectl apply -f ./artifacts/namespaces
|
||||
kubectl apply -f ./artifacts/canaries
|
||||
|
||||
loadtester-push:
|
||||
docker build -t weaveworks/flagger-loadtester:$(LT_VERSION) . -f Dockerfile.loadtester
|
||||
docker push weaveworks/flagger-loadtester:$(LT_VERSION)
|
||||
464
README.md
@@ -1,75 +1,56 @@
|
||||
# flagger
|
||||
|
||||
[](https://travis-ci.org/stefanprodan/flagger)
|
||||
[](https://goreportcard.com/report/github.com/stefanprodan/flagger)
|
||||
[](https://codecov.io/gh/stefanprodan/flagger)
|
||||
[](https://github.com/stefanprodan/flagger/blob/master/LICENSE)
|
||||
[](https://github.com/stefanprodan/flagger/releases)
|
||||
[](https://travis-ci.org/weaveworks/flagger)
|
||||
[](https://goreportcard.com/report/github.com/weaveworks/flagger)
|
||||
[](https://codecov.io/gh/weaveworks/flagger)
|
||||
[](https://github.com/weaveworks/flagger/blob/master/LICENSE)
|
||||
[](https://github.com/weaveworks/flagger/releases)
|
||||
|
||||
Flagger is a Kubernetes operator that automates the promotion of canary deployments
|
||||
using Istio routing for traffic shifting and Prometheus metrics for canary analysis.
|
||||
The canary analysis can be extended with webhooks for running integration tests,
|
||||
using Istio or App Mesh routing for traffic shifting and Prometheus metrics for canary analysis.
|
||||
The canary analysis can be extended with webhooks for running acceptance tests,
|
||||
load tests or any other custom validation.
|
||||
|
||||
### Install
|
||||
Flagger implements a control loop that gradually shifts traffic to the canary while measuring key performance
|
||||
indicators like HTTP requests success rate, requests average duration and pods health.
|
||||
Based on analysis of the KPIs a canary is promoted or aborted, and the analysis result is published to Slack.
|
||||
|
||||
Before installing Flagger make sure you have Istio setup up with Prometheus enabled.
|
||||
If you are new to Istio you can follow my [Istio service mesh walk-through](https://github.com/stefanprodan/istio-gke).
|
||||

|
||||
|
||||
Deploy Flagger in the `istio-system` namespace using Helm:
|
||||
## Documentation
|
||||
|
||||
```bash
|
||||
# add the Helm repository
|
||||
helm repo add flagger https://flagger.app
|
||||
Flagger documentation can be found at [docs.flagger.app](https://docs.flagger.app)
|
||||
|
||||
# install or upgrade
|
||||
helm upgrade -i flagger flagger/flagger \
|
||||
--namespace=istio-system \
|
||||
--set metricsServer=http://prometheus.istio-system:9090
|
||||
```
|
||||
* Install
|
||||
* [Flagger install on Kubernetes](https://docs.flagger.app/install/flagger-install-on-kubernetes)
|
||||
* [Flagger install on GKE Istio](https://docs.flagger.app/install/flagger-install-on-google-cloud)
|
||||
* [Flagger install on EKS App Mesh](https://docs.flagger.app/install/flagger-install-on-eks-appmesh)
|
||||
* How it works
|
||||
* [Canary custom resource](https://docs.flagger.app/how-it-works#canary-custom-resource)
|
||||
* [Routing](https://docs.flagger.app/how-it-works#istio-routing)
|
||||
* [Canary deployment stages](https://docs.flagger.app/how-it-works#canary-deployment)
|
||||
* [Canary analysis](https://docs.flagger.app/how-it-works#canary-analysis)
|
||||
* [HTTP metrics](https://docs.flagger.app/how-it-works#http-metrics)
|
||||
* [Custom metrics](https://docs.flagger.app/how-it-works#custom-metrics)
|
||||
* [Webhooks](https://docs.flagger.app/how-it-works#webhooks)
|
||||
* [Load testing](https://docs.flagger.app/how-it-works#load-testing)
|
||||
* Usage
|
||||
* [Istio canary deployments](https://docs.flagger.app/usage/progressive-delivery)
|
||||
* [Istio A/B testing](https://docs.flagger.app/usage/ab-testing)
|
||||
* [App Mesh canary deployments](https://docs.flagger.app/usage/appmesh-progressive-delivery)
|
||||
* [Monitoring](https://docs.flagger.app/usage/monitoring)
|
||||
* [Alerting](https://docs.flagger.app/usage/alerting)
|
||||
* Tutorials
|
||||
* [Canary deployments with Helm charts and Weave Flux](https://docs.flagger.app/tutorials/canary-helm-gitops)
|
||||
|
||||
Flagger is compatible with Kubernetes >1.11.0 and Istio >1.0.0.
|
||||
## Canary CRD
|
||||
|
||||
### Usage
|
||||
Flagger takes a Kubernetes deployment and optionally a horizontal pod autoscaler (HPA),
|
||||
then creates a series of objects (Kubernetes deployments, ClusterIP services and Istio or App Mesh virtual services).
|
||||
These objects expose the application on the mesh and drive the canary analysis and promotion.
|
||||
|
||||
Flagger takes a Kubernetes deployment and creates a series of objects
|
||||
(Kubernetes [deployments](https://kubernetes.io/docs/concepts/workloads/controllers/deployment/),
|
||||
ClusterIP [services](https://kubernetes.io/docs/concepts/services-networking/service/) and
|
||||
Istio [virtual services](https://istio.io/docs/reference/config/istio.networking.v1alpha3/#VirtualService))
|
||||
to drive the canary analysis and promotion.
|
||||
|
||||

|
||||
|
||||
Gated canary promotion stages:
|
||||
|
||||
* scan for canary deployments
|
||||
* check Istio virtual service routes are mapped to primary and canary ClusterIP services
|
||||
* check primary and canary deployments status
|
||||
* halt advancement if a rolling update is underway
|
||||
* halt advancement if pods are unhealthy
|
||||
* increase canary traffic weight percentage from 0% to 5% (step weight)
|
||||
* check canary HTTP request success rate and latency
|
||||
* halt advancement if any metric is under the specified threshold
|
||||
* increment the failed checks counter
|
||||
* check if the number of failed checks reached the threshold
|
||||
* route all traffic to primary
|
||||
* scale to zero the canary deployment and mark it as failed
|
||||
* wait for the canary deployment to be updated (revision bump) and start over
|
||||
* increase canary traffic weight by 5% (step weight) till it reaches 50% (max weight)
|
||||
* halt advancement while canary request success rate is under the threshold
|
||||
* halt advancement while canary request duration P99 is over the threshold
|
||||
* halt advancement if the primary or canary deployment becomes unhealthy
|
||||
* halt advancement while canary deployment is being scaled up/down by HPA
|
||||
* promote canary to primary
|
||||
* copy canary deployment spec template over primary
|
||||
* wait for primary rolling update to finish
|
||||
* halt advancement if pods are unhealthy
|
||||
* route all traffic to primary
|
||||
* scale to zero the canary deployment
|
||||
* mark rollout as finished
|
||||
* wait for the canary deployment to be updated (revision bump) and start over
|
||||
|
||||
You can change the canary analysis _max weight_ and the _step weight_ percentage in the Flagger's custom resource.
|
||||
Flagger keeps track of ConfigMaps and Secrets referenced by a Kubernetes Deployment and triggers a canary analysis if any of those objects change.
|
||||
When promoting a workload in production, both code (container images) and configuration (config maps and secrets) are being synchronised.
|
||||
|
||||
For a deployment named _podinfo_, a canary promotion can be defined using Flagger's custom resource:
|
||||
|
||||
@@ -99,9 +80,31 @@ spec:
|
||||
# Istio gateways (optional)
|
||||
gateways:
|
||||
- public-gateway.istio-system.svc.cluster.local
|
||||
- mesh
|
||||
# Istio virtual service host names (optional)
|
||||
hosts:
|
||||
- podinfo.example.com
|
||||
# HTTP match conditions (optional)
|
||||
match:
|
||||
- uri:
|
||||
prefix: /
|
||||
# HTTP rewrite (optional)
|
||||
rewrite:
|
||||
uri: /
|
||||
# Envoy timeout and retry policy (optional)
|
||||
headers:
|
||||
request:
|
||||
add:
|
||||
x-envoy-upstream-rq-timeout-ms: "15000"
|
||||
x-envoy-max-retries: "10"
|
||||
x-envoy-retry-on: "gateway-error,connect-failure,refused-stream"
|
||||
# cross-origin resource sharing policy (optional)
|
||||
corsPolicy:
|
||||
allowOrigin:
|
||||
- example.com
|
||||
# promote the canary without analysing it (default false)
|
||||
skipAnalysis: false
|
||||
# define the canary analysis timing and KPIs
|
||||
canaryAnalysis:
|
||||
# schedule interval (default 60s)
|
||||
interval: 1m
|
||||
@@ -115,6 +118,7 @@ spec:
|
||||
stepWeight: 5
|
||||
# Istio Prometheus checks
|
||||
metrics:
|
||||
# builtin Istio checks
|
||||
- name: istio_requests_total
|
||||
# minimum req success rate (non 5xx responses)
|
||||
# percentage (0-100)
|
||||
@@ -125,320 +129,66 @@ spec:
|
||||
# milliseconds
|
||||
threshold: 500
|
||||
interval: 30s
|
||||
# custom check
|
||||
- name: "kafka lag"
|
||||
threshold: 100
|
||||
query: |
|
||||
avg_over_time(
|
||||
kafka_consumergroup_lag{
|
||||
consumergroup=~"podinfo-consumer-.*",
|
||||
topic="podinfo"
|
||||
}[1m]
|
||||
)
|
||||
# external checks (optional)
|
||||
webhooks:
|
||||
- name: integration-tests
|
||||
url: http://podinfo.test:9898/echo
|
||||
timeout: 1m
|
||||
- name: load-test
|
||||
url: http://flagger-loadtester.test/
|
||||
timeout: 5s
|
||||
metadata:
|
||||
test: "all"
|
||||
token: "16688eb5e9f289f1991c"
|
||||
cmd: "hey -z 1m -q 10 -c 2 http://podinfo.test:9898/"
|
||||
```
|
||||
|
||||
The canary analysis is using the following promql queries:
|
||||
|
||||
_HTTP requests success rate percentage_
|
||||
For more details on how the canary analysis and promotion works please [read the docs](https://docs.flagger.app/how-it-works).
|
||||
|
||||
```sql
|
||||
sum(
|
||||
rate(
|
||||
istio_requests_total{
|
||||
reporter="destination",
|
||||
destination_workload_namespace=~"$namespace",
|
||||
destination_workload=~"$workload",
|
||||
response_code!~"5.*"
|
||||
}[$interval]
|
||||
)
|
||||
)
|
||||
/
|
||||
sum(
|
||||
rate(
|
||||
istio_requests_total{
|
||||
reporter="destination",
|
||||
destination_workload_namespace=~"$namespace",
|
||||
destination_workload=~"$workload"
|
||||
}[$interval]
|
||||
)
|
||||
)
|
||||
```
|
||||
## Features
|
||||
|
||||
_HTTP requests milliseconds duration P99_
|
||||
| Feature | Istio | App Mesh |
|
||||
| -------------------------------------------- | ------------------ | ------------------ |
|
||||
| Canary deployments (weighted traffic) | :heavy_check_mark: | :heavy_check_mark: |
|
||||
| A/B testing (headers and cookies filters) | :heavy_check_mark: | :heavy_minus_sign: |
|
||||
| Load testing | :heavy_check_mark: | :heavy_check_mark: |
|
||||
| Webhooks (custom acceptance tests) | :heavy_check_mark: | :heavy_check_mark: |
|
||||
| Request success rate check (Envoy metric) | :heavy_check_mark: | :heavy_check_mark: |
|
||||
| Request duration check (Envoy metric) | :heavy_check_mark: | :heavy_minus_sign: |
|
||||
| Custom promql checks | :heavy_check_mark: | :heavy_check_mark: |
|
||||
| Ingress gateway (CORS, retries and timeouts) | :heavy_check_mark: | :heavy_minus_sign: |
|
||||
|
||||
```sql
|
||||
histogram_quantile(0.99,
|
||||
sum(
|
||||
irate(
|
||||
istio_request_duration_seconds_bucket{
|
||||
reporter="destination",
|
||||
destination_workload=~"$workload",
|
||||
destination_workload_namespace=~"$namespace"
|
||||
}[$interval]
|
||||
)
|
||||
) by (le)
|
||||
)
|
||||
```
|
||||
## Roadmap
|
||||
|
||||
The canary analysis can be extended with webhooks.
|
||||
Flagger will call the webhooks (HTTP POST) and determine from the response status code (HTTP 2xx) if the canary is failing or not.
|
||||
|
||||
Webhook payload:
|
||||
|
||||
```json
|
||||
{
|
||||
"name": "podinfo",
|
||||
"namespace": "test",
|
||||
"metadata": {
|
||||
"test": "all",
|
||||
"token": "16688eb5e9f289f1991c"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Automated canary analysis, promotions and rollbacks
|
||||
|
||||
Create a test namespace with Istio sidecar injection enabled:
|
||||
|
||||
```bash
|
||||
export REPO=https://raw.githubusercontent.com/stefanprodan/flagger/master
|
||||
|
||||
kubectl apply -f ${REPO}/artifacts/namespaces/test.yaml
|
||||
```
|
||||
|
||||
Create a deployment and a horizontal pod autoscaler:
|
||||
|
||||
```bash
|
||||
kubectl apply -f ${REPO}/artifacts/canaries/deployment.yaml
|
||||
kubectl apply -f ${REPO}/artifacts/canaries/hpa.yaml
|
||||
```
|
||||
|
||||
Create a canary promotion custom resource (replace the Istio gateway and the internet domain with your own):
|
||||
|
||||
```bash
|
||||
kubectl apply -f ${REPO}/artifacts/canaries/canary.yaml
|
||||
```
|
||||
|
||||
After a couple of seconds Flagger will create the canary objects:
|
||||
|
||||
```bash
|
||||
# applied
|
||||
deployment.apps/podinfo
|
||||
horizontalpodautoscaler.autoscaling/podinfo
|
||||
canary.flagger.app/podinfo
|
||||
# generated
|
||||
deployment.apps/podinfo-primary
|
||||
horizontalpodautoscaler.autoscaling/podinfo-primary
|
||||
service/podinfo
|
||||
service/podinfo-canary
|
||||
service/podinfo-primary
|
||||
virtualservice.networking.istio.io/podinfo
|
||||
```
|
||||
|
||||

|
||||
|
||||
Trigger a canary deployment by updating the container image:
|
||||
|
||||
```bash
|
||||
kubectl -n test set image deployment/podinfo \
|
||||
podinfod=quay.io/stefanprodan/podinfo:1.2.1
|
||||
```
|
||||
|
||||
Flagger detects that the deployment revision changed and starts a new canary analysis:
|
||||
|
||||
```
|
||||
kubectl -n test describe canary/podinfo
|
||||
|
||||
Status:
|
||||
Canary Weight: 0
|
||||
Failed Checks: 0
|
||||
Last Transition Time: 2019-01-16T13:47:16Z
|
||||
Phase: Succeeded
|
||||
Events:
|
||||
Type Reason Age From Message
|
||||
---- ------ ---- ---- -------
|
||||
Normal Synced 3m flagger New revision detected podinfo.test
|
||||
Normal Synced 3m flagger Scaling up podinfo.test
|
||||
Warning Synced 3m flagger Waiting for podinfo.test rollout to finish: 0 of 1 updated replicas are available
|
||||
Normal Synced 3m flagger Advance podinfo.test canary weight 5
|
||||
Normal Synced 3m flagger Advance podinfo.test canary weight 10
|
||||
Normal Synced 3m flagger Advance podinfo.test canary weight 15
|
||||
Normal Synced 2m flagger Advance podinfo.test canary weight 20
|
||||
Normal Synced 2m flagger Advance podinfo.test canary weight 25
|
||||
Normal Synced 1m flagger Advance podinfo.test canary weight 30
|
||||
Normal Synced 1m flagger Advance podinfo.test canary weight 35
|
||||
Normal Synced 55s flagger Advance podinfo.test canary weight 40
|
||||
Normal Synced 45s flagger Advance podinfo.test canary weight 45
|
||||
Normal Synced 35s flagger Advance podinfo.test canary weight 50
|
||||
Normal Synced 25s flagger Copying podinfo.test template spec to podinfo-primary.test
|
||||
Warning Synced 15s flagger Waiting for podinfo-primary.test rollout to finish: 1 of 2 updated replicas are available
|
||||
Normal Synced 5s flagger Promotion completed! Scaling down podinfo.test
|
||||
```
|
||||
|
||||
You can monitor all canaries with:
|
||||
|
||||
```bash
|
||||
watch kubectl get canaries --all-namespaces
|
||||
|
||||
NAMESPACE NAME STATUS WEIGHT LASTTRANSITIONTIME
|
||||
test podinfo Progressing 5 2019-01-16T14:05:07Z
|
||||
```
|
||||
|
||||
During the canary analysis you can generate HTTP 500 errors and high latency to test if Flagger pauses the rollout.
|
||||
|
||||
Create a tester pod and exec into it:
|
||||
|
||||
```bash
|
||||
kubectl -n test run tester --image=quay.io/stefanprodan/podinfo:1.2.1 -- ./podinfo --port=9898
|
||||
kubectl -n test exec -it tester-xx-xx sh
|
||||
```
|
||||
|
||||
Generate HTTP 500 errors:
|
||||
|
||||
```bash
|
||||
watch curl http://podinfo-canary:9898/status/500
|
||||
```
|
||||
|
||||
Generate latency:
|
||||
|
||||
```bash
|
||||
watch curl http://podinfo-canary:9898/delay/1
|
||||
```
|
||||
|
||||
When the number of failed checks reaches the canary analysis threshold, the traffic is routed back to the primary,
|
||||
the canary is scaled to zero and the rollout is marked as failed.
|
||||
|
||||
```
|
||||
kubectl -n test describe canary/podinfo
|
||||
|
||||
Status:
|
||||
Canary Weight: 0
|
||||
Failed Checks: 10
|
||||
Last Transition Time: 2019-01-16T13:47:16Z
|
||||
Phase: Failed
|
||||
Events:
|
||||
Type Reason Age From Message
|
||||
---- ------ ---- ---- -------
|
||||
Normal Synced 3m flagger Starting canary deployment for podinfo.test
|
||||
Normal Synced 3m flagger Advance podinfo.test canary weight 5
|
||||
Normal Synced 3m flagger Advance podinfo.test canary weight 10
|
||||
Normal Synced 3m flagger Advance podinfo.test canary weight 15
|
||||
Normal Synced 3m flagger Halt podinfo.test advancement success rate 69.17% < 99%
|
||||
Normal Synced 2m flagger Halt podinfo.test advancement success rate 61.39% < 99%
|
||||
Normal Synced 2m flagger Halt podinfo.test advancement success rate 55.06% < 99%
|
||||
Normal Synced 2m flagger Halt podinfo.test advancement success rate 47.00% < 99%
|
||||
Normal Synced 2m flagger (combined from similar events): Halt podinfo.test advancement success rate 38.08% < 99%
|
||||
Warning Synced 1m flagger Rolling back podinfo.test failed checks threshold reached 10
|
||||
Warning Synced 1m flagger Canary failed! Scaling down podinfo.test
|
||||
```
|
||||
|
||||
### Monitoring
|
||||
|
||||
Flagger comes with a Grafana dashboard made for canary analysis.
|
||||
|
||||
Install Grafana with Helm:
|
||||
|
||||
```bash
|
||||
helm upgrade -i flagger-grafana flagger/grafana \
|
||||
--namespace=istio-system \
|
||||
--set url=http://prometheus.istio-system:9090
|
||||
```
|
||||
|
||||
The dashboard shows the RED and USE metrics for the primary and canary workloads:
|
||||
|
||||

|
||||
|
||||
The canary errors and latency spikes have been recorded as Kubernetes events and logged by Flagger in json format:
|
||||
|
||||
```
|
||||
kubectl -n istio-system logs deployment/flagger --tail=100 | jq .msg
|
||||
|
||||
Starting canary deployment for podinfo.test
|
||||
Advance podinfo.test canary weight 5
|
||||
Advance podinfo.test canary weight 10
|
||||
Advance podinfo.test canary weight 15
|
||||
Advance podinfo.test canary weight 20
|
||||
Advance podinfo.test canary weight 25
|
||||
Advance podinfo.test canary weight 30
|
||||
Advance podinfo.test canary weight 35
|
||||
Halt podinfo.test advancement success rate 98.69% < 99%
|
||||
Advance podinfo.test canary weight 40
|
||||
Halt podinfo.test advancement request duration 1.515s > 500ms
|
||||
Advance podinfo.test canary weight 45
|
||||
Advance podinfo.test canary weight 50
|
||||
Copying podinfo.test template spec to podinfo-primary.test
|
||||
Halt podinfo-primary.test advancement waiting for rollout to finish: 1 old replicas are pending termination
|
||||
Scaling down podinfo.test
|
||||
Promotion completed! podinfo.test
|
||||
```
|
||||
|
||||
Flagger exposes Prometheus metrics that can be used to determine the canary analysis status and the destination weight values:
|
||||
|
||||
```bash
|
||||
# Canaries total gauge
|
||||
flagger_canary_total{namespace="test"} 1
|
||||
|
||||
# Canary promotion last known status gauge
|
||||
# 0 - running, 1 - successful, 2 - failed
|
||||
flagger_canary_status{name="podinfo" namespace="test"} 1
|
||||
|
||||
# Canary traffic weight gauge
|
||||
flagger_canary_weight{workload="podinfo-primary" namespace="test"} 95
|
||||
flagger_canary_weight{workload="podinfo" namespace="test"} 5
|
||||
|
||||
# Seconds spent performing canary analysis histogram
|
||||
flagger_canary_duration_seconds_bucket{name="podinfo",namespace="test",le="10"} 6
|
||||
flagger_canary_duration_seconds_bucket{name="podinfo",namespace="test",le="+Inf"} 6
|
||||
flagger_canary_duration_seconds_sum{name="podinfo",namespace="test"} 17.3561329
|
||||
flagger_canary_duration_seconds_count{name="podinfo",namespace="test"} 6
|
||||
```
|
||||
|
||||
### Alerting
|
||||
|
||||
Flagger can be configured to send Slack notifications:
|
||||
|
||||
```bash
|
||||
helm upgrade -i flagger flagger/flagger \
|
||||
--namespace=istio-system \
|
||||
--set slack.url=https://hooks.slack.com/services/YOUR/SLACK/WEBHOOK \
|
||||
--set slack.channel=general \
|
||||
--set slack.user=flagger
|
||||
```
|
||||
|
||||
Once configured with a Slack incoming webhook, Flagger will post messages when a canary deployment has been initialized,
|
||||
when a new revision has been detected and if the canary analysis failed or succeeded.
|
||||
|
||||

|
||||
|
||||
A canary deployment will be rolled back if the progress deadline exceeded or if the analysis
|
||||
reached the maximum number of failed checks:
|
||||
|
||||

|
||||
|
||||
Besides Slack, you can use Alertmanager to trigger alerts when a canary deployment failed:
|
||||
|
||||
```yaml
|
||||
- alert: canary_rollback
|
||||
expr: flagger_canary_status > 1
|
||||
for: 1m
|
||||
labels:
|
||||
severity: warning
|
||||
annotations:
|
||||
summary: "Canary failed"
|
||||
description: "Workload {{ $labels.name }} namespace {{ $labels.namespace }}"
|
||||
```
|
||||
|
||||
### Roadmap
|
||||
|
||||
* Extend the validation mechanism to support other metrics than HTTP success rate and latency
|
||||
* Integrate with other service mesh technologies like Linkerd v2, Super Gloo or Consul Mesh
|
||||
* Add support for comparing the canary metrics to the primary ones and do the validation based on the derivation between the two
|
||||
* Extend the canary analysis and promotion to other types than Kubernetes deployments such as Flux Helm releases or OpenFaaS functions
|
||||
|
||||
### Contributing
|
||||
## Contributing
|
||||
|
||||
Flagger is Apache 2.0 licensed and accepts contributions via GitHub pull requests.
|
||||
|
||||
When submitting bug reports please include as much details as possible:
|
||||
When submitting bug reports please include as much details as possible:
|
||||
|
||||
* which Flagger version
|
||||
* which Flagger CRD version
|
||||
* which Kubernetes/Istio version
|
||||
* what configuration (canary, virtual service and workloads definitions)
|
||||
* what happened (Flagger, Istio Pilot and Proxy logs)
|
||||
|
||||
## Getting Help
|
||||
|
||||
If you have any questions about Flagger and progressive delivery:
|
||||
|
||||
* Read the Flagger [docs](https://docs.flagger.app).
|
||||
* Invite yourself to the [Weave community slack](https://slack.weave.works/)
|
||||
and join the [#flagger](https://weave-community.slack.com/messages/flagger/) channel.
|
||||
* Join the [Weave User Group](https://www.meetup.com/pro/Weave/) and get invited to online talks,
|
||||
hands-on training and meetups in your area.
|
||||
* File an [issue](https://github.com/weaveworks/flagger/issues/new).
|
||||
|
||||
Your feedback is always welcome!
|
||||
|
||||
62
artifacts/ab-testing/canary.yaml
Normal file
@@ -0,0 +1,62 @@
|
||||
apiVersion: flagger.app/v1alpha3
|
||||
kind: Canary
|
||||
metadata:
|
||||
name: abtest
|
||||
namespace: test
|
||||
spec:
|
||||
# deployment reference
|
||||
targetRef:
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
name: abtest
|
||||
# the maximum time in seconds for the canary deployment
|
||||
# to make progress before it is rollback (default 600s)
|
||||
progressDeadlineSeconds: 60
|
||||
# HPA reference (optional)
|
||||
autoscalerRef:
|
||||
apiVersion: autoscaling/v2beta1
|
||||
kind: HorizontalPodAutoscaler
|
||||
name: abtest
|
||||
service:
|
||||
# container port
|
||||
port: 9898
|
||||
# Istio gateways (optional)
|
||||
gateways:
|
||||
- public-gateway.istio-system.svc.cluster.local
|
||||
- mesh
|
||||
# Istio virtual service host names (optional)
|
||||
hosts:
|
||||
- abtest.istio.weavedx.com
|
||||
canaryAnalysis:
|
||||
# schedule interval (default 60s)
|
||||
interval: 10s
|
||||
# max number of failed metric checks before rollback
|
||||
threshold: 10
|
||||
# total number of iterations
|
||||
iterations: 10
|
||||
# canary match condition
|
||||
match:
|
||||
- headers:
|
||||
user-agent:
|
||||
regex: "^(?!.*Chrome)(?=.*\bSafari\b).*$"
|
||||
- headers:
|
||||
cookie:
|
||||
regex: "^(.*?;)?(type=insider)(;.*)?$"
|
||||
metrics:
|
||||
- name: istio_requests_total
|
||||
# minimum req success rate (non 5xx responses)
|
||||
# percentage (0-100)
|
||||
threshold: 99
|
||||
interval: 1m
|
||||
- name: istio_request_duration_seconds_bucket
|
||||
# maximum req duration P99
|
||||
# milliseconds
|
||||
threshold: 500
|
||||
interval: 30s
|
||||
# external checks (optional)
|
||||
webhooks:
|
||||
- name: load-test
|
||||
url: http://flagger-loadtester.test/
|
||||
timeout: 5s
|
||||
metadata:
|
||||
cmd: "hey -z 1m -q 10 -c 2 -H 'Cookie: type=insider' http://podinfo.test:9898/"
|
||||
67
artifacts/ab-testing/deployment.yaml
Normal file
@@ -0,0 +1,67 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: abtest
|
||||
namespace: test
|
||||
labels:
|
||||
app: abtest
|
||||
spec:
|
||||
minReadySeconds: 5
|
||||
revisionHistoryLimit: 5
|
||||
progressDeadlineSeconds: 60
|
||||
strategy:
|
||||
rollingUpdate:
|
||||
maxUnavailable: 0
|
||||
type: RollingUpdate
|
||||
selector:
|
||||
matchLabels:
|
||||
app: abtest
|
||||
template:
|
||||
metadata:
|
||||
annotations:
|
||||
prometheus.io/scrape: "true"
|
||||
labels:
|
||||
app: abtest
|
||||
spec:
|
||||
containers:
|
||||
- name: podinfod
|
||||
image: quay.io/stefanprodan/podinfo:1.4.0
|
||||
imagePullPolicy: IfNotPresent
|
||||
ports:
|
||||
- containerPort: 9898
|
||||
name: http
|
||||
protocol: TCP
|
||||
command:
|
||||
- ./podinfo
|
||||
- --port=9898
|
||||
- --level=info
|
||||
- --random-delay=false
|
||||
- --random-error=false
|
||||
env:
|
||||
- name: PODINFO_UI_COLOR
|
||||
value: blue
|
||||
livenessProbe:
|
||||
exec:
|
||||
command:
|
||||
- podcli
|
||||
- check
|
||||
- http
|
||||
- localhost:9898/healthz
|
||||
initialDelaySeconds: 5
|
||||
timeoutSeconds: 5
|
||||
readinessProbe:
|
||||
exec:
|
||||
command:
|
||||
- podcli
|
||||
- check
|
||||
- http
|
||||
- localhost:9898/readyz
|
||||
initialDelaySeconds: 5
|
||||
timeoutSeconds: 5
|
||||
resources:
|
||||
limits:
|
||||
cpu: 2000m
|
||||
memory: 512Mi
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 64Mi
|
||||
19
artifacts/ab-testing/hpa.yaml
Normal file
@@ -0,0 +1,19 @@
|
||||
apiVersion: autoscaling/v2beta1
|
||||
kind: HorizontalPodAutoscaler
|
||||
metadata:
|
||||
name: abtest
|
||||
namespace: test
|
||||
spec:
|
||||
scaleTargetRef:
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
name: abtest
|
||||
minReplicas: 2
|
||||
maxReplicas: 4
|
||||
metrics:
|
||||
- type: Resource
|
||||
resource:
|
||||
name: cpu
|
||||
# scale up if usage is above
|
||||
# 99% of the requested CPU (100m)
|
||||
targetAverageUtilization: 99
|
||||
50
artifacts/appmesh/canary.yaml
Normal file
@@ -0,0 +1,50 @@
|
||||
apiVersion: flagger.app/v1alpha3
|
||||
kind: Canary
|
||||
metadata:
|
||||
name: podinfo
|
||||
namespace: test
|
||||
spec:
|
||||
# deployment reference
|
||||
targetRef:
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
name: podinfo
|
||||
# the maximum time in seconds for the canary deployment
|
||||
# to make progress before it is rollback (default 600s)
|
||||
progressDeadlineSeconds: 60
|
||||
# HPA reference (optional)
|
||||
autoscalerRef:
|
||||
apiVersion: autoscaling/v2beta1
|
||||
kind: HorizontalPodAutoscaler
|
||||
name: podinfo
|
||||
service:
|
||||
# container port
|
||||
port: 9898
|
||||
# App Mesh reference
|
||||
meshName: global
|
||||
# define the canary analysis timing and KPIs
|
||||
canaryAnalysis:
|
||||
# schedule interval (default 60s)
|
||||
interval: 10s
|
||||
# max number of failed metric checks before rollback
|
||||
threshold: 10
|
||||
# max traffic percentage routed to canary
|
||||
# percentage (0-100)
|
||||
maxWeight: 50
|
||||
# canary increment step
|
||||
# percentage (0-100)
|
||||
stepWeight: 5
|
||||
# App Mesh Prometheus checks
|
||||
metrics:
|
||||
- name: envoy_cluster_upstream_rq
|
||||
# minimum req success rate (non 5xx responses)
|
||||
# percentage (0-100)
|
||||
threshold: 99
|
||||
interval: 1m
|
||||
# external checks (optional)
|
||||
webhooks:
|
||||
- name: load-test
|
||||
url: http://flagger-loadtester.test/
|
||||
timeout: 5s
|
||||
metadata:
|
||||
cmd: "hey -z 1m -q 10 -c 2 http://podinfo.test:9898/"
|
||||
65
artifacts/appmesh/deployment.yaml
Normal file
@@ -0,0 +1,65 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: podinfo
|
||||
namespace: test
|
||||
labels:
|
||||
app: podinfo
|
||||
spec:
|
||||
minReadySeconds: 5
|
||||
revisionHistoryLimit: 5
|
||||
progressDeadlineSeconds: 60
|
||||
strategy:
|
||||
rollingUpdate:
|
||||
maxUnavailable: 0
|
||||
type: RollingUpdate
|
||||
selector:
|
||||
matchLabels:
|
||||
app: podinfo
|
||||
template:
|
||||
metadata:
|
||||
annotations:
|
||||
prometheus.io/scrape: "true"
|
||||
labels:
|
||||
app: podinfo
|
||||
spec:
|
||||
containers:
|
||||
- name: podinfod
|
||||
image: quay.io/stefanprodan/podinfo:1.4.0
|
||||
imagePullPolicy: IfNotPresent
|
||||
ports:
|
||||
- containerPort: 9898
|
||||
name: http
|
||||
protocol: TCP
|
||||
command:
|
||||
- ./podinfo
|
||||
- --port=9898
|
||||
- --level=info
|
||||
env:
|
||||
- name: PODINFO_UI_COLOR
|
||||
value: blue
|
||||
livenessProbe:
|
||||
exec:
|
||||
command:
|
||||
- podcli
|
||||
- check
|
||||
- http
|
||||
- localhost:9898/healthz
|
||||
initialDelaySeconds: 5
|
||||
timeoutSeconds: 5
|
||||
readinessProbe:
|
||||
exec:
|
||||
command:
|
||||
- podcli
|
||||
- check
|
||||
- http
|
||||
- localhost:9898/readyz
|
||||
initialDelaySeconds: 5
|
||||
timeoutSeconds: 5
|
||||
resources:
|
||||
limits:
|
||||
cpu: 2000m
|
||||
memory: 512Mi
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 64Mi
|
||||
6
artifacts/appmesh/global-mesh.yaml
Normal file
@@ -0,0 +1,6 @@
|
||||
apiVersion: appmesh.k8s.aws/v1beta1
|
||||
kind: Mesh
|
||||
metadata:
|
||||
name: global
|
||||
spec:
|
||||
serviceDiscoveryType: dns
|
||||
19
artifacts/appmesh/hpa.yaml
Normal file
@@ -0,0 +1,19 @@
|
||||
apiVersion: autoscaling/v2beta1
|
||||
kind: HorizontalPodAutoscaler
|
||||
metadata:
|
||||
name: podinfo
|
||||
namespace: test
|
||||
spec:
|
||||
scaleTargetRef:
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
name: podinfo
|
||||
minReplicas: 2
|
||||
maxReplicas: 4
|
||||
metrics:
|
||||
- type: Resource
|
||||
resource:
|
||||
name: cpu
|
||||
# scale up if usage is above
|
||||
# 99% of the requested CPU (100m)
|
||||
targetAverageUtilization: 99
|
||||
177
artifacts/appmesh/ingress.yaml
Normal file
@@ -0,0 +1,177 @@
|
||||
---
|
||||
kind: ConfigMap
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: ingress-config
|
||||
namespace: test
|
||||
labels:
|
||||
app: ingress
|
||||
data:
|
||||
envoy.yaml: |
|
||||
static_resources:
|
||||
listeners:
|
||||
- address:
|
||||
socket_address:
|
||||
address: 0.0.0.0
|
||||
port_value: 80
|
||||
filter_chains:
|
||||
- filters:
|
||||
- name: envoy.http_connection_manager
|
||||
config:
|
||||
access_log:
|
||||
- name: envoy.file_access_log
|
||||
config:
|
||||
path: /dev/stdout
|
||||
codec_type: auto
|
||||
stat_prefix: ingress_http
|
||||
http_filters:
|
||||
- name: envoy.router
|
||||
config: {}
|
||||
route_config:
|
||||
name: local_route
|
||||
virtual_hosts:
|
||||
- name: local_service
|
||||
domains: ["*"]
|
||||
routes:
|
||||
- match:
|
||||
prefix: "/"
|
||||
route:
|
||||
cluster: podinfo
|
||||
host_rewrite: podinfo.test
|
||||
timeout: 15s
|
||||
retry_policy:
|
||||
retry_on: "gateway-error,connect-failure,refused-stream"
|
||||
num_retries: 10
|
||||
per_try_timeout: 5s
|
||||
clusters:
|
||||
- name: podinfo
|
||||
connect_timeout: 0.30s
|
||||
type: strict_dns
|
||||
lb_policy: round_robin
|
||||
http2_protocol_options: {}
|
||||
hosts:
|
||||
- socket_address:
|
||||
address: podinfo.test
|
||||
port_value: 9898
|
||||
admin:
|
||||
access_log_path: /dev/null
|
||||
address:
|
||||
socket_address:
|
||||
address: 0.0.0.0
|
||||
port_value: 9999
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: ingress
|
||||
namespace: test
|
||||
labels:
|
||||
app: ingress
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: ingress
|
||||
strategy:
|
||||
type: RollingUpdate
|
||||
rollingUpdate:
|
||||
maxUnavailable: 0
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: ingress
|
||||
annotations:
|
||||
prometheus.io/path: "/stats/prometheus"
|
||||
prometheus.io/port: "9999"
|
||||
prometheus.io/scrape: "true"
|
||||
# dummy port to exclude ingress from mesh traffic
|
||||
# only egress should go over the mesh
|
||||
appmesh.k8s.aws/ports: "444"
|
||||
spec:
|
||||
terminationGracePeriodSeconds: 30
|
||||
containers:
|
||||
- name: ingress
|
||||
image: "envoyproxy/envoy-alpine:d920944aed67425f91fc203774aebce9609e5d9a"
|
||||
securityContext:
|
||||
capabilities:
|
||||
drop:
|
||||
- ALL
|
||||
add:
|
||||
- NET_BIND_SERVICE
|
||||
command:
|
||||
- /usr/bin/dumb-init
|
||||
- --
|
||||
args:
|
||||
- /usr/local/bin/envoy
|
||||
- --base-id 30
|
||||
- --v2-config-only
|
||||
- -l
|
||||
- $loglevel
|
||||
- -c
|
||||
- /config/envoy.yaml
|
||||
ports:
|
||||
- name: admin
|
||||
containerPort: 9999
|
||||
protocol: TCP
|
||||
- name: http
|
||||
containerPort: 80
|
||||
protocol: TCP
|
||||
- name: https
|
||||
containerPort: 443
|
||||
protocol: TCP
|
||||
livenessProbe:
|
||||
initialDelaySeconds: 5
|
||||
tcpSocket:
|
||||
port: admin
|
||||
readinessProbe:
|
||||
initialDelaySeconds: 5
|
||||
tcpSocket:
|
||||
port: admin
|
||||
resources:
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 64Mi
|
||||
volumeMounts:
|
||||
- name: config
|
||||
mountPath: /config
|
||||
volumes:
|
||||
- name: config
|
||||
configMap:
|
||||
name: ingress-config
|
||||
---
|
||||
kind: Service
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: ingress
|
||||
namespace: test
|
||||
spec:
|
||||
selector:
|
||||
app: ingress
|
||||
ports:
|
||||
- protocol: TCP
|
||||
name: http
|
||||
port: 80
|
||||
targetPort: 80
|
||||
- protocol: TCP
|
||||
name: https
|
||||
port: 443
|
||||
targetPort: 443
|
||||
type: LoadBalancer
|
||||
---
|
||||
apiVersion: appmesh.k8s.aws/v1beta1
|
||||
kind: VirtualNode
|
||||
metadata:
|
||||
name: ingress
|
||||
namespace: test
|
||||
spec:
|
||||
meshName: global
|
||||
listeners:
|
||||
- portMapping:
|
||||
port: 80
|
||||
protocol: http
|
||||
serviceDiscovery:
|
||||
dns:
|
||||
hostName: ingress.test
|
||||
backends:
|
||||
- virtualService:
|
||||
virtualServiceName: podinfo.test
|
||||
@@ -23,9 +23,26 @@ spec:
|
||||
# Istio gateways (optional)
|
||||
gateways:
|
||||
- public-gateway.istio-system.svc.cluster.local
|
||||
- mesh
|
||||
# Istio virtual service host names (optional)
|
||||
hosts:
|
||||
- app.iowa.weavedx.com
|
||||
- app.istio.weavedx.com
|
||||
# HTTP match conditions (optional)
|
||||
match:
|
||||
- uri:
|
||||
prefix: /
|
||||
# HTTP rewrite (optional)
|
||||
rewrite:
|
||||
uri: /
|
||||
# Envoy timeout and retry policy (optional)
|
||||
headers:
|
||||
request:
|
||||
add:
|
||||
x-envoy-upstream-rq-timeout-ms: "15000"
|
||||
x-envoy-max-retries: "10"
|
||||
x-envoy-retry-on: "gateway-error,connect-failure,refused-stream"
|
||||
# promote the canary without analysing it (default false)
|
||||
skipAnalysis: false
|
||||
canaryAnalysis:
|
||||
# schedule interval (default 60s)
|
||||
interval: 10s
|
||||
@@ -51,9 +68,10 @@ spec:
|
||||
interval: 30s
|
||||
# external checks (optional)
|
||||
webhooks:
|
||||
- name: integration-tests
|
||||
url: https://httpbin.org/post
|
||||
timeout: 1m
|
||||
- name: load-test
|
||||
url: http://flagger-loadtester.test/
|
||||
timeout: 5s
|
||||
metadata:
|
||||
test: "all"
|
||||
token: "16688eb5e9f289f1991c"
|
||||
type: cmd
|
||||
cmd: "hey -z 1m -q 10 -c 2 http://podinfo.test:9898/"
|
||||
logCmdOutput: "true"
|
||||
|
||||
@@ -25,7 +25,7 @@ spec:
|
||||
spec:
|
||||
containers:
|
||||
- name: podinfod
|
||||
image: quay.io/stefanprodan/podinfo:1.3.0
|
||||
image: quay.io/stefanprodan/podinfo:1.4.0
|
||||
imagePullPolicy: IfNotPresent
|
||||
ports:
|
||||
- containerPort: 9898
|
||||
|
||||
6
artifacts/cluster/namespaces/test.yaml
Normal file
@@ -0,0 +1,6 @@
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: test
|
||||
labels:
|
||||
istio-injection: enabled
|
||||
26
artifacts/cluster/releases/test/backend.yaml
Normal file
@@ -0,0 +1,26 @@
|
||||
apiVersion: flux.weave.works/v1beta1
|
||||
kind: HelmRelease
|
||||
metadata:
|
||||
name: backend
|
||||
namespace: test
|
||||
annotations:
|
||||
flux.weave.works/automated: "true"
|
||||
flux.weave.works/tag.chart-image: regexp:^1.4.*
|
||||
spec:
|
||||
releaseName: backend
|
||||
chart:
|
||||
repository: https://flagger.app/
|
||||
name: podinfo
|
||||
version: 2.0.0
|
||||
values:
|
||||
image:
|
||||
repository: quay.io/stefanprodan/podinfo
|
||||
tag: 1.4.0
|
||||
httpServer:
|
||||
timeout: 30s
|
||||
canary:
|
||||
enabled: true
|
||||
istioIngress:
|
||||
enabled: false
|
||||
loadtest:
|
||||
enabled: true
|
||||
27
artifacts/cluster/releases/test/frontend.yaml
Normal file
@@ -0,0 +1,27 @@
|
||||
apiVersion: flux.weave.works/v1beta1
|
||||
kind: HelmRelease
|
||||
metadata:
|
||||
name: frontend
|
||||
namespace: test
|
||||
annotations:
|
||||
flux.weave.works/automated: "true"
|
||||
flux.weave.works/tag.chart-image: semver:~1.4
|
||||
spec:
|
||||
releaseName: frontend
|
||||
chart:
|
||||
repository: https://flagger.app/
|
||||
name: podinfo
|
||||
version: 2.0.0
|
||||
values:
|
||||
image:
|
||||
repository: quay.io/stefanprodan/podinfo
|
||||
tag: 1.4.0
|
||||
backend: http://backend-podinfo:9898/echo
|
||||
canary:
|
||||
enabled: true
|
||||
istioIngress:
|
||||
enabled: true
|
||||
gateway: public-gateway.istio-system.svc.cluster.local
|
||||
host: frontend.istio.example.com
|
||||
loadtest:
|
||||
enabled: true
|
||||
18
artifacts/cluster/releases/test/loadtester.yaml
Normal file
@@ -0,0 +1,18 @@
|
||||
apiVersion: flux.weave.works/v1beta1
|
||||
kind: HelmRelease
|
||||
metadata:
|
||||
name: loadtester
|
||||
namespace: test
|
||||
annotations:
|
||||
flux.weave.works/automated: "true"
|
||||
flux.weave.works/tag.chart-image: glob:0.*
|
||||
spec:
|
||||
releaseName: flagger-loadtester
|
||||
chart:
|
||||
repository: https://flagger.app/
|
||||
name: loadtester
|
||||
version: 0.1.0
|
||||
values:
|
||||
image:
|
||||
repository: quay.io/stefanprodan/flagger-loadtester
|
||||
tag: 0.1.0
|
||||
59
artifacts/configs/canary.yaml
Normal file
@@ -0,0 +1,59 @@
|
||||
apiVersion: flagger.app/v1alpha3
|
||||
kind: Canary
|
||||
metadata:
|
||||
name: podinfo
|
||||
namespace: test
|
||||
spec:
|
||||
# deployment reference
|
||||
targetRef:
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
name: podinfo
|
||||
# the maximum time in seconds for the canary deployment
|
||||
# to make progress before it is rollback (default 600s)
|
||||
progressDeadlineSeconds: 60
|
||||
# HPA reference (optional)
|
||||
autoscalerRef:
|
||||
apiVersion: autoscaling/v2beta1
|
||||
kind: HorizontalPodAutoscaler
|
||||
name: podinfo
|
||||
service:
|
||||
# container port
|
||||
port: 9898
|
||||
# Istio gateways (optional)
|
||||
gateways:
|
||||
- public-gateway.istio-system.svc.cluster.local
|
||||
- mesh
|
||||
# Istio virtual service host names (optional)
|
||||
hosts:
|
||||
- app.iowa.weavedx.com
|
||||
canaryAnalysis:
|
||||
# schedule interval (default 60s)
|
||||
interval: 10s
|
||||
# max number of failed metric checks before rollback
|
||||
threshold: 10
|
||||
# max traffic percentage routed to canary
|
||||
# percentage (0-100)
|
||||
maxWeight: 50
|
||||
# canary increment step
|
||||
# percentage (0-100)
|
||||
stepWeight: 5
|
||||
# Istio Prometheus checks
|
||||
metrics:
|
||||
- name: istio_requests_total
|
||||
# minimum req success rate (non 5xx responses)
|
||||
# percentage (0-100)
|
||||
threshold: 99
|
||||
interval: 1m
|
||||
- name: istio_request_duration_seconds_bucket
|
||||
# maximum req duration P99
|
||||
# milliseconds
|
||||
threshold: 500
|
||||
interval: 30s
|
||||
# external checks (optional)
|
||||
webhooks:
|
||||
- name: load-test
|
||||
url: http://flagger-loadtester.test/
|
||||
timeout: 5s
|
||||
metadata:
|
||||
cmd: "hey -z 1m -q 10 -c 2 http://podinfo.test:9898/"
|
||||
16
artifacts/configs/configs.yaml
Normal file
@@ -0,0 +1,16 @@
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: podinfo-config-env
|
||||
namespace: test
|
||||
data:
|
||||
color: blue
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: podinfo-config-vol
|
||||
namespace: test
|
||||
data:
|
||||
output: console
|
||||
textmode: "true"
|
||||
89
artifacts/configs/deployment.yaml
Normal file
@@ -0,0 +1,89 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: podinfo
|
||||
namespace: test
|
||||
labels:
|
||||
app: podinfo
|
||||
spec:
|
||||
minReadySeconds: 5
|
||||
revisionHistoryLimit: 5
|
||||
progressDeadlineSeconds: 60
|
||||
strategy:
|
||||
rollingUpdate:
|
||||
maxUnavailable: 0
|
||||
type: RollingUpdate
|
||||
selector:
|
||||
matchLabels:
|
||||
app: podinfo
|
||||
template:
|
||||
metadata:
|
||||
annotations:
|
||||
prometheus.io/scrape: "true"
|
||||
labels:
|
||||
app: podinfo
|
||||
spec:
|
||||
containers:
|
||||
- name: podinfod
|
||||
image: quay.io/stefanprodan/podinfo:1.3.0
|
||||
imagePullPolicy: IfNotPresent
|
||||
ports:
|
||||
- containerPort: 9898
|
||||
name: http
|
||||
protocol: TCP
|
||||
command:
|
||||
- ./podinfo
|
||||
- --port=9898
|
||||
- --level=info
|
||||
- --random-delay=false
|
||||
- --random-error=false
|
||||
env:
|
||||
- name: PODINFO_UI_COLOR
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: podinfo-config-env
|
||||
key: color
|
||||
- name: SECRET_USER
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: podinfo-secret-env
|
||||
key: user
|
||||
livenessProbe:
|
||||
exec:
|
||||
command:
|
||||
- podcli
|
||||
- check
|
||||
- http
|
||||
- localhost:9898/healthz
|
||||
initialDelaySeconds: 5
|
||||
timeoutSeconds: 5
|
||||
readinessProbe:
|
||||
exec:
|
||||
command:
|
||||
- podcli
|
||||
- check
|
||||
- http
|
||||
- localhost:9898/readyz
|
||||
initialDelaySeconds: 5
|
||||
timeoutSeconds: 5
|
||||
resources:
|
||||
limits:
|
||||
cpu: 2000m
|
||||
memory: 512Mi
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 64Mi
|
||||
volumeMounts:
|
||||
- name: configs
|
||||
mountPath: /etc/podinfo/configs
|
||||
readOnly: true
|
||||
- name: secrets
|
||||
mountPath: /etc/podinfo/secrets
|
||||
readOnly: true
|
||||
volumes:
|
||||
- name: configs
|
||||
configMap:
|
||||
name: podinfo-config-vol
|
||||
- name: secrets
|
||||
secret:
|
||||
secretName: podinfo-secret-vol
|
||||
19
artifacts/configs/hpa.yaml
Normal file
@@ -0,0 +1,19 @@
|
||||
apiVersion: autoscaling/v2beta1
|
||||
kind: HorizontalPodAutoscaler
|
||||
metadata:
|
||||
name: podinfo
|
||||
namespace: test
|
||||
spec:
|
||||
scaleTargetRef:
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
name: podinfo
|
||||
minReplicas: 1
|
||||
maxReplicas: 4
|
||||
metrics:
|
||||
- type: Resource
|
||||
resource:
|
||||
name: cpu
|
||||
# scale up if usage is above
|
||||
# 99% of the requested CPU (100m)
|
||||
targetAverageUtilization: 99
|
||||
16
artifacts/configs/secrets.yaml
Normal file
@@ -0,0 +1,16 @@
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: podinfo-secret-env
|
||||
namespace: test
|
||||
data:
|
||||
password: cGFzc3dvcmQ=
|
||||
user: YWRtaW4=
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: podinfo-secret-vol
|
||||
namespace: test
|
||||
data:
|
||||
key: cGFzc3dvcmQ=
|
||||
264
artifacts/eks/appmesh-prometheus.yaml
Normal file
@@ -0,0 +1,264 @@
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: prometheus
|
||||
labels:
|
||||
app: prometheus
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources:
|
||||
- nodes
|
||||
- services
|
||||
- endpoints
|
||||
- pods
|
||||
- nodes/proxy
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups: [""]
|
||||
resources:
|
||||
- configmaps
|
||||
verbs: ["get"]
|
||||
- nonResourceURLs: ["/metrics"]
|
||||
verbs: ["get"]
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: prometheus
|
||||
labels:
|
||||
app: prometheus
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: prometheus
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: prometheus
|
||||
namespace: appmesh-system
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: prometheus
|
||||
namespace: appmesh-system
|
||||
labels:
|
||||
app: prometheus
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: prometheus
|
||||
namespace: appmesh-system
|
||||
labels:
|
||||
app: prometheus
|
||||
data:
|
||||
prometheus.yml: |-
|
||||
global:
|
||||
scrape_interval: 5s
|
||||
scrape_configs:
|
||||
|
||||
# Scrape config for AppMesh Envoy sidecar
|
||||
- job_name: 'appmesh-envoy'
|
||||
metrics_path: /stats/prometheus
|
||||
kubernetes_sd_configs:
|
||||
- role: pod
|
||||
|
||||
relabel_configs:
|
||||
- source_labels: [__meta_kubernetes_pod_container_name]
|
||||
action: keep
|
||||
regex: '^envoy$'
|
||||
- source_labels: [__address__, __meta_kubernetes_pod_annotation_prometheus_io_port]
|
||||
action: replace
|
||||
regex: ([^:]+)(?::\d+)?;(\d+)
|
||||
replacement: ${1}:9901
|
||||
target_label: __address__
|
||||
- action: labelmap
|
||||
regex: __meta_kubernetes_pod_label_(.+)
|
||||
- source_labels: [__meta_kubernetes_namespace]
|
||||
action: replace
|
||||
target_label: kubernetes_namespace
|
||||
- source_labels: [__meta_kubernetes_pod_name]
|
||||
action: replace
|
||||
target_label: kubernetes_pod_name
|
||||
|
||||
# Exclude high cardinality metrics
|
||||
metric_relabel_configs:
|
||||
- source_labels: [ cluster_name ]
|
||||
regex: '(outbound|inbound|prometheus_stats).*'
|
||||
action: drop
|
||||
- source_labels: [ tcp_prefix ]
|
||||
regex: '(outbound|inbound|prometheus_stats).*'
|
||||
action: drop
|
||||
- source_labels: [ listener_address ]
|
||||
regex: '(.+)'
|
||||
action: drop
|
||||
- source_labels: [ http_conn_manager_listener_prefix ]
|
||||
regex: '(.+)'
|
||||
action: drop
|
||||
- source_labels: [ http_conn_manager_prefix ]
|
||||
regex: '(.+)'
|
||||
action: drop
|
||||
- source_labels: [ __name__ ]
|
||||
regex: 'envoy_tls.*'
|
||||
action: drop
|
||||
- source_labels: [ __name__ ]
|
||||
regex: 'envoy_tcp_downstream.*'
|
||||
action: drop
|
||||
- source_labels: [ __name__ ]
|
||||
regex: 'envoy_http_(stats|admin).*'
|
||||
action: drop
|
||||
- source_labels: [ __name__ ]
|
||||
regex: 'envoy_cluster_(lb|retry|bind|internal|max|original).*'
|
||||
action: drop
|
||||
|
||||
# Scrape config for API servers
|
||||
- job_name: 'kubernetes-apiservers'
|
||||
kubernetes_sd_configs:
|
||||
- role: endpoints
|
||||
namespaces:
|
||||
names:
|
||||
- default
|
||||
scheme: https
|
||||
tls_config:
|
||||
ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
|
||||
bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
|
||||
relabel_configs:
|
||||
- source_labels: [__meta_kubernetes_service_name, __meta_kubernetes_endpoint_port_name]
|
||||
action: keep
|
||||
regex: kubernetes;https
|
||||
|
||||
# Scrape config for nodes
|
||||
- job_name: 'kubernetes-nodes'
|
||||
scheme: https
|
||||
tls_config:
|
||||
ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
|
||||
bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
|
||||
kubernetes_sd_configs:
|
||||
- role: node
|
||||
relabel_configs:
|
||||
- action: labelmap
|
||||
regex: __meta_kubernetes_node_label_(.+)
|
||||
- target_label: __address__
|
||||
replacement: kubernetes.default.svc:443
|
||||
- source_labels: [__meta_kubernetes_node_name]
|
||||
regex: (.+)
|
||||
target_label: __metrics_path__
|
||||
replacement: /api/v1/nodes/${1}/proxy/metrics
|
||||
|
||||
# scrape config for cAdvisor
|
||||
- job_name: 'kubernetes-cadvisor'
|
||||
scheme: https
|
||||
tls_config:
|
||||
ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
|
||||
bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
|
||||
kubernetes_sd_configs:
|
||||
- role: node
|
||||
relabel_configs:
|
||||
- action: labelmap
|
||||
regex: __meta_kubernetes_node_label_(.+)
|
||||
- target_label: __address__
|
||||
replacement: kubernetes.default.svc:443
|
||||
- source_labels: [__meta_kubernetes_node_name]
|
||||
regex: (.+)
|
||||
target_label: __metrics_path__
|
||||
replacement: /api/v1/nodes/${1}/proxy/metrics/cadvisor
|
||||
|
||||
# scrape config for pods
|
||||
- job_name: kubernetes-pods
|
||||
kubernetes_sd_configs:
|
||||
- role: pod
|
||||
relabel_configs:
|
||||
- action: keep
|
||||
regex: true
|
||||
source_labels:
|
||||
- __meta_kubernetes_pod_annotation_prometheus_io_scrape
|
||||
- source_labels: [ __address__ ]
|
||||
regex: '.*9901.*'
|
||||
action: drop
|
||||
- action: replace
|
||||
regex: (.+)
|
||||
source_labels:
|
||||
- __meta_kubernetes_pod_annotation_prometheus_io_path
|
||||
target_label: __metrics_path__
|
||||
- action: replace
|
||||
regex: ([^:]+)(?::\d+)?;(\d+)
|
||||
replacement: $1:$2
|
||||
source_labels:
|
||||
- __address__
|
||||
- __meta_kubernetes_pod_annotation_prometheus_io_port
|
||||
target_label: __address__
|
||||
- action: labelmap
|
||||
regex: __meta_kubernetes_pod_label_(.+)
|
||||
- action: replace
|
||||
source_labels:
|
||||
- __meta_kubernetes_namespace
|
||||
target_label: kubernetes_namespace
|
||||
- action: replace
|
||||
source_labels:
|
||||
- __meta_kubernetes_pod_name
|
||||
target_label: kubernetes_pod_name
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: prometheus
|
||||
namespace: appmesh-system
|
||||
labels:
|
||||
app: prometheus
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: prometheus
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: prometheus
|
||||
annotations:
|
||||
version: "appmesh-v1alpha1"
|
||||
spec:
|
||||
serviceAccountName: prometheus
|
||||
containers:
|
||||
- name: prometheus
|
||||
image: "docker.io/prom/prometheus:v2.7.1"
|
||||
imagePullPolicy: IfNotPresent
|
||||
args:
|
||||
- '--storage.tsdb.retention=6h'
|
||||
- '--config.file=/etc/prometheus/prometheus.yml'
|
||||
ports:
|
||||
- containerPort: 9090
|
||||
name: http
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /-/healthy
|
||||
port: 9090
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /-/ready
|
||||
port: 9090
|
||||
resources:
|
||||
requests:
|
||||
cpu: 10m
|
||||
memory: 128Mi
|
||||
volumeMounts:
|
||||
- name: config-volume
|
||||
mountPath: /etc/prometheus
|
||||
volumes:
|
||||
- name: config-volume
|
||||
configMap:
|
||||
name: prometheus
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: prometheus
|
||||
namespace: appmesh-system
|
||||
labels:
|
||||
name: prometheus
|
||||
spec:
|
||||
selector:
|
||||
app: prometheus
|
||||
ports:
|
||||
- name: http
|
||||
protocol: TCP
|
||||
port: 9090
|
||||
@@ -13,11 +13,50 @@ metadata:
|
||||
labels:
|
||||
app: flagger
|
||||
rules:
|
||||
- apiGroups: ['*']
|
||||
resources: ['*']
|
||||
verbs: ['*']
|
||||
- nonResourceURLs: ['*']
|
||||
verbs: ['*']
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- events
|
||||
- configmaps
|
||||
- secrets
|
||||
- services
|
||||
verbs: ["*"]
|
||||
- apiGroups:
|
||||
- apps
|
||||
resources:
|
||||
- deployments
|
||||
verbs: ["*"]
|
||||
- apiGroups:
|
||||
- autoscaling
|
||||
resources:
|
||||
- horizontalpodautoscalers
|
||||
verbs: ["*"]
|
||||
- apiGroups:
|
||||
- flagger.app
|
||||
resources:
|
||||
- canaries
|
||||
- canaries/status
|
||||
verbs: ["*"]
|
||||
- apiGroups:
|
||||
- networking.istio.io
|
||||
resources:
|
||||
- virtualservices
|
||||
- virtualservices/status
|
||||
verbs: ["*"]
|
||||
- apiGroups:
|
||||
- appmesh.k8s.aws
|
||||
resources:
|
||||
- meshes
|
||||
- meshes/status
|
||||
- virtualnodes
|
||||
- virtualnodes/status
|
||||
- virtualservices
|
||||
- virtualservices/status
|
||||
verbs: ["*"]
|
||||
- nonResourceURLs:
|
||||
- /version
|
||||
verbs:
|
||||
- get
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: ClusterRoleBinding
|
||||
|
||||
@@ -2,6 +2,8 @@ apiVersion: apiextensions.k8s.io/v1beta1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: canaries.flagger.app
|
||||
annotations:
|
||||
helm.sh/resource-policy: keep
|
||||
spec:
|
||||
group: flagger.app
|
||||
version: v1alpha3
|
||||
@@ -39,9 +41,9 @@ spec:
|
||||
properties:
|
||||
spec:
|
||||
required:
|
||||
- targetRef
|
||||
- service
|
||||
- canaryAnalysis
|
||||
- targetRef
|
||||
- service
|
||||
- canaryAnalysis
|
||||
properties:
|
||||
progressDeadlineSeconds:
|
||||
type: number
|
||||
@@ -73,11 +75,21 @@ spec:
|
||||
properties:
|
||||
port:
|
||||
type: number
|
||||
portName:
|
||||
type: string
|
||||
meshName:
|
||||
type: string
|
||||
timeout:
|
||||
type: string
|
||||
skipAnalysis:
|
||||
type: boolean
|
||||
canaryAnalysis:
|
||||
properties:
|
||||
interval:
|
||||
type: string
|
||||
pattern: "^[0-9]+(m|s)"
|
||||
iterations:
|
||||
type: number
|
||||
threshold:
|
||||
type: number
|
||||
maxWeight:
|
||||
@@ -89,7 +101,7 @@ spec:
|
||||
properties:
|
||||
items:
|
||||
type: object
|
||||
required: ['name', 'interval', 'threshold']
|
||||
required: ['name', 'threshold']
|
||||
properties:
|
||||
name:
|
||||
type: string
|
||||
@@ -98,6 +110,8 @@ spec:
|
||||
pattern: "^[0-9]+(m|s)"
|
||||
threshold:
|
||||
type: number
|
||||
query:
|
||||
type: string
|
||||
webhooks:
|
||||
type: array
|
||||
properties:
|
||||
@@ -107,9 +121,36 @@ spec:
|
||||
properties:
|
||||
name:
|
||||
type: string
|
||||
type:
|
||||
type: string
|
||||
enum:
|
||||
- ""
|
||||
- pre-rollout
|
||||
- rollout
|
||||
- post-rollout
|
||||
url:
|
||||
type: string
|
||||
format: url
|
||||
timeout:
|
||||
type: string
|
||||
pattern: "^[0-9]+(m|s)"
|
||||
status:
|
||||
properties:
|
||||
phase:
|
||||
type: string
|
||||
enum:
|
||||
- ""
|
||||
- Initialized
|
||||
- Progressing
|
||||
- Succeeded
|
||||
- Failed
|
||||
canaryWeight:
|
||||
type: number
|
||||
failedChecks:
|
||||
type: number
|
||||
iterations:
|
||||
type: number
|
||||
lastAppliedSpec:
|
||||
type: string
|
||||
lastTransitionTime:
|
||||
type: string
|
||||
|
||||
@@ -22,8 +22,8 @@ spec:
|
||||
serviceAccountName: flagger
|
||||
containers:
|
||||
- name: flagger
|
||||
image: quay.io/stefanprodan/flagger:0.4.0
|
||||
imagePullPolicy: Always
|
||||
image: weaveworks/flagger:0.11.0
|
||||
imagePullPolicy: IfNotPresent
|
||||
ports:
|
||||
- name: http
|
||||
containerPort: 8080
|
||||
|
||||
27
artifacts/gke/istio-gateway.yaml
Normal file
@@ -0,0 +1,27 @@
|
||||
apiVersion: networking.istio.io/v1alpha3
|
||||
kind: Gateway
|
||||
metadata:
|
||||
name: public-gateway
|
||||
namespace: istio-system
|
||||
spec:
|
||||
selector:
|
||||
istio: ingressgateway
|
||||
servers:
|
||||
- port:
|
||||
number: 80
|
||||
name: http
|
||||
protocol: HTTP
|
||||
hosts:
|
||||
- "*"
|
||||
tls:
|
||||
httpsRedirect: true
|
||||
- port:
|
||||
number: 443
|
||||
name: https
|
||||
protocol: HTTPS
|
||||
hosts:
|
||||
- "*"
|
||||
tls:
|
||||
mode: SIMPLE
|
||||
privateKey: /etc/istio/ingressgateway-certs/tls.key
|
||||
serverCertificate: /etc/istio/ingressgateway-certs/tls.crt
|
||||
443
artifacts/gke/istio-prometheus.yaml
Normal file
@@ -0,0 +1,443 @@
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: prometheus
|
||||
labels:
|
||||
app: prometheus
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources:
|
||||
- nodes
|
||||
- services
|
||||
- endpoints
|
||||
- pods
|
||||
- nodes/proxy
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups: [""]
|
||||
resources:
|
||||
- configmaps
|
||||
verbs: ["get"]
|
||||
- nonResourceURLs: ["/metrics"]
|
||||
verbs: ["get"]
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: prometheus
|
||||
labels:
|
||||
app: prometheus
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: prometheus
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: prometheus
|
||||
namespace: istio-system
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: prometheus
|
||||
namespace: istio-system
|
||||
labels:
|
||||
app: prometheus
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: prometheus
|
||||
namespace: istio-system
|
||||
labels:
|
||||
app: prometheus
|
||||
data:
|
||||
prometheus.yml: |-
|
||||
global:
|
||||
scrape_interval: 15s
|
||||
scrape_configs:
|
||||
|
||||
- job_name: 'istio-mesh'
|
||||
# Override the global default and scrape targets from this job every 5 seconds.
|
||||
scrape_interval: 5s
|
||||
|
||||
kubernetes_sd_configs:
|
||||
- role: endpoints
|
||||
namespaces:
|
||||
names:
|
||||
- istio-system
|
||||
|
||||
relabel_configs:
|
||||
- source_labels: [__meta_kubernetes_service_name, __meta_kubernetes_endpoint_port_name]
|
||||
action: keep
|
||||
regex: istio-telemetry;prometheus
|
||||
|
||||
|
||||
# Scrape config for envoy stats
|
||||
- job_name: 'envoy-stats'
|
||||
metrics_path: /stats/prometheus
|
||||
kubernetes_sd_configs:
|
||||
- role: pod
|
||||
|
||||
relabel_configs:
|
||||
- source_labels: [__meta_kubernetes_pod_container_port_name]
|
||||
action: keep
|
||||
regex: '.*-envoy-prom'
|
||||
- source_labels: [__address__, __meta_kubernetes_pod_annotation_prometheus_io_port]
|
||||
action: replace
|
||||
regex: ([^:]+)(?::\d+)?;(\d+)
|
||||
replacement: $1:15090
|
||||
target_label: __address__
|
||||
- action: labelmap
|
||||
regex: __meta_kubernetes_pod_label_(.+)
|
||||
- source_labels: [__meta_kubernetes_namespace]
|
||||
action: replace
|
||||
target_label: namespace
|
||||
- source_labels: [__meta_kubernetes_pod_name]
|
||||
action: replace
|
||||
target_label: pod_name
|
||||
|
||||
metric_relabel_configs:
|
||||
# Exclude some of the envoy metrics that have massive cardinality
|
||||
# This list may need to be pruned further moving forward, as informed
|
||||
# by performance and scalability testing.
|
||||
- source_labels: [ cluster_name ]
|
||||
regex: '(outbound|inbound|prometheus_stats).*'
|
||||
action: drop
|
||||
- source_labels: [ tcp_prefix ]
|
||||
regex: '(outbound|inbound|prometheus_stats).*'
|
||||
action: drop
|
||||
- source_labels: [ listener_address ]
|
||||
regex: '(.+)'
|
||||
action: drop
|
||||
- source_labels: [ http_conn_manager_listener_prefix ]
|
||||
regex: '(.+)'
|
||||
action: drop
|
||||
- source_labels: [ http_conn_manager_prefix ]
|
||||
regex: '(.+)'
|
||||
action: drop
|
||||
- source_labels: [ __name__ ]
|
||||
regex: 'envoy_tls.*'
|
||||
action: drop
|
||||
- source_labels: [ __name__ ]
|
||||
regex: 'envoy_tcp_downstream.*'
|
||||
action: drop
|
||||
- source_labels: [ __name__ ]
|
||||
regex: 'envoy_http_(stats|admin).*'
|
||||
action: drop
|
||||
- source_labels: [ __name__ ]
|
||||
regex: 'envoy_cluster_(lb|retry|bind|internal|max|original).*'
|
||||
action: drop
|
||||
|
||||
|
||||
- job_name: 'istio-policy'
|
||||
# Override the global default and scrape targets from this job every 5 seconds.
|
||||
scrape_interval: 5s
|
||||
# metrics_path defaults to '/metrics'
|
||||
# scheme defaults to 'http'.
|
||||
|
||||
kubernetes_sd_configs:
|
||||
- role: endpoints
|
||||
namespaces:
|
||||
names:
|
||||
- istio-system
|
||||
|
||||
|
||||
relabel_configs:
|
||||
- source_labels: [__meta_kubernetes_service_name, __meta_kubernetes_endpoint_port_name]
|
||||
action: keep
|
||||
regex: istio-policy;http-monitoring
|
||||
|
||||
- job_name: 'istio-telemetry'
|
||||
# Override the global default and scrape targets from this job every 5 seconds.
|
||||
scrape_interval: 5s
|
||||
# metrics_path defaults to '/metrics'
|
||||
# scheme defaults to 'http'.
|
||||
|
||||
kubernetes_sd_configs:
|
||||
- role: endpoints
|
||||
namespaces:
|
||||
names:
|
||||
- istio-system
|
||||
|
||||
relabel_configs:
|
||||
- source_labels: [__meta_kubernetes_service_name, __meta_kubernetes_endpoint_port_name]
|
||||
action: keep
|
||||
regex: istio-telemetry;http-monitoring
|
||||
|
||||
- job_name: 'pilot'
|
||||
# Override the global default and scrape targets from this job every 5 seconds.
|
||||
scrape_interval: 5s
|
||||
# metrics_path defaults to '/metrics'
|
||||
# scheme defaults to 'http'.
|
||||
|
||||
kubernetes_sd_configs:
|
||||
- role: endpoints
|
||||
namespaces:
|
||||
names:
|
||||
- istio-system
|
||||
|
||||
relabel_configs:
|
||||
- source_labels: [__meta_kubernetes_service_name, __meta_kubernetes_endpoint_port_name]
|
||||
action: keep
|
||||
regex: istio-pilot;http-monitoring
|
||||
|
||||
- job_name: 'galley'
|
||||
# Override the global default and scrape targets from this job every 5 seconds.
|
||||
scrape_interval: 5s
|
||||
# metrics_path defaults to '/metrics'
|
||||
# scheme defaults to 'http'.
|
||||
|
||||
kubernetes_sd_configs:
|
||||
- role: endpoints
|
||||
namespaces:
|
||||
names:
|
||||
- istio-system
|
||||
|
||||
relabel_configs:
|
||||
- source_labels: [__meta_kubernetes_service_name, __meta_kubernetes_endpoint_port_name]
|
||||
action: keep
|
||||
regex: istio-galley;http-monitoring
|
||||
|
||||
# scrape config for API servers
|
||||
- job_name: 'kubernetes-apiservers'
|
||||
kubernetes_sd_configs:
|
||||
- role: endpoints
|
||||
namespaces:
|
||||
names:
|
||||
- default
|
||||
scheme: https
|
||||
tls_config:
|
||||
ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
|
||||
bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
|
||||
relabel_configs:
|
||||
- source_labels: [__meta_kubernetes_service_name, __meta_kubernetes_endpoint_port_name]
|
||||
action: keep
|
||||
regex: kubernetes;https
|
||||
|
||||
# scrape config for nodes (kubelet)
|
||||
- job_name: 'kubernetes-nodes'
|
||||
scheme: https
|
||||
tls_config:
|
||||
ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
|
||||
bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
|
||||
kubernetes_sd_configs:
|
||||
- role: node
|
||||
relabel_configs:
|
||||
- action: labelmap
|
||||
regex: __meta_kubernetes_node_label_(.+)
|
||||
- target_label: __address__
|
||||
replacement: kubernetes.default.svc:443
|
||||
- source_labels: [__meta_kubernetes_node_name]
|
||||
regex: (.+)
|
||||
target_label: __metrics_path__
|
||||
replacement: /api/v1/nodes/${1}/proxy/metrics
|
||||
|
||||
# Scrape config for Kubelet cAdvisor.
|
||||
#
|
||||
# This is required for Kubernetes 1.7.3 and later, where cAdvisor metrics
|
||||
# (those whose names begin with 'container_') have been removed from the
|
||||
# Kubelet metrics endpoint. This job scrapes the cAdvisor endpoint to
|
||||
# retrieve those metrics.
|
||||
#
|
||||
# In Kubernetes 1.7.0-1.7.2, these metrics are only exposed on the cAdvisor
|
||||
# HTTP endpoint; use "replacement: /api/v1/nodes/${1}:4194/proxy/metrics"
|
||||
# in that case (and ensure cAdvisor's HTTP server hasn't been disabled with
|
||||
# the --cadvisor-port=0 Kubelet flag).
|
||||
#
|
||||
# This job is not necessary and should be removed in Kubernetes 1.6 and
|
||||
# earlier versions, or it will cause the metrics to be scraped twice.
|
||||
- job_name: 'kubernetes-cadvisor'
|
||||
scheme: https
|
||||
tls_config:
|
||||
ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
|
||||
bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
|
||||
kubernetes_sd_configs:
|
||||
- role: node
|
||||
relabel_configs:
|
||||
- action: labelmap
|
||||
regex: __meta_kubernetes_node_label_(.+)
|
||||
- target_label: __address__
|
||||
replacement: kubernetes.default.svc:443
|
||||
- source_labels: [__meta_kubernetes_node_name]
|
||||
regex: (.+)
|
||||
target_label: __metrics_path__
|
||||
replacement: /api/v1/nodes/${1}/proxy/metrics/cadvisor
|
||||
|
||||
# scrape config for service endpoints.
|
||||
- job_name: 'kubernetes-service-endpoints'
|
||||
kubernetes_sd_configs:
|
||||
- role: endpoints
|
||||
relabel_configs:
|
||||
- source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scrape]
|
||||
action: keep
|
||||
regex: true
|
||||
- source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scheme]
|
||||
action: replace
|
||||
target_label: __scheme__
|
||||
regex: (https?)
|
||||
- source_labels: [__meta_kubernetes_service_annotation_prometheus_io_path]
|
||||
action: replace
|
||||
target_label: __metrics_path__
|
||||
regex: (.+)
|
||||
- source_labels: [__address__, __meta_kubernetes_service_annotation_prometheus_io_port]
|
||||
action: replace
|
||||
target_label: __address__
|
||||
regex: ([^:]+)(?::\d+)?;(\d+)
|
||||
replacement: $1:$2
|
||||
- action: labelmap
|
||||
regex: __meta_kubernetes_service_label_(.+)
|
||||
- source_labels: [__meta_kubernetes_namespace]
|
||||
action: replace
|
||||
target_label: kubernetes_namespace
|
||||
- source_labels: [__meta_kubernetes_service_name]
|
||||
action: replace
|
||||
target_label: kubernetes_name
|
||||
|
||||
- job_name: 'kubernetes-pods'
|
||||
kubernetes_sd_configs:
|
||||
- role: pod
|
||||
relabel_configs: # If first two labels are present, pod should be scraped by the istio-secure job.
|
||||
- source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_scrape]
|
||||
action: keep
|
||||
regex: true
|
||||
- source_labels: [__meta_kubernetes_pod_annotation_sidecar_istio_io_status]
|
||||
action: drop
|
||||
regex: (.+)
|
||||
- source_labels: [__meta_kubernetes_pod_annotation_istio_mtls]
|
||||
action: drop
|
||||
regex: (true)
|
||||
- source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_path]
|
||||
action: replace
|
||||
target_label: __metrics_path__
|
||||
regex: (.+)
|
||||
- source_labels: [__address__, __meta_kubernetes_pod_annotation_prometheus_io_port]
|
||||
action: replace
|
||||
regex: ([^:]+)(?::\d+)?;(\d+)
|
||||
replacement: $1:$2
|
||||
target_label: __address__
|
||||
- action: labelmap
|
||||
regex: __meta_kubernetes_pod_label_(.+)
|
||||
- source_labels: [__meta_kubernetes_namespace]
|
||||
action: replace
|
||||
target_label: namespace
|
||||
- source_labels: [__meta_kubernetes_pod_name]
|
||||
action: replace
|
||||
target_label: pod_name
|
||||
|
||||
- job_name: 'kubernetes-pods-istio-secure'
|
||||
scheme: https
|
||||
tls_config:
|
||||
ca_file: /etc/istio-certs/root-cert.pem
|
||||
cert_file: /etc/istio-certs/cert-chain.pem
|
||||
key_file: /etc/istio-certs/key.pem
|
||||
insecure_skip_verify: true # prometheus does not support secure naming.
|
||||
kubernetes_sd_configs:
|
||||
- role: pod
|
||||
relabel_configs:
|
||||
- source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_scrape]
|
||||
action: keep
|
||||
regex: true
|
||||
# sidecar status annotation is added by sidecar injector and
|
||||
# istio_workload_mtls_ability can be specifically placed on a pod to indicate its ability to receive mtls traffic.
|
||||
- source_labels: [__meta_kubernetes_pod_annotation_sidecar_istio_io_status, __meta_kubernetes_pod_annotation_istio_mtls]
|
||||
action: keep
|
||||
regex: (([^;]+);([^;]*))|(([^;]*);(true))
|
||||
- source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_path]
|
||||
action: replace
|
||||
target_label: __metrics_path__
|
||||
regex: (.+)
|
||||
- source_labels: [__address__] # Only keep address that is host:port
|
||||
action: keep # otherwise an extra target with ':443' is added for https scheme
|
||||
regex: ([^:]+):(\d+)
|
||||
- source_labels: [__address__, __meta_kubernetes_pod_annotation_prometheus_io_port]
|
||||
action: replace
|
||||
regex: ([^:]+)(?::\d+)?;(\d+)
|
||||
replacement: $1:$2
|
||||
target_label: __address__
|
||||
- action: labelmap
|
||||
regex: __meta_kubernetes_pod_label_(.+)
|
||||
- source_labels: [__meta_kubernetes_namespace]
|
||||
action: replace
|
||||
target_label: namespace
|
||||
- source_labels: [__meta_kubernetes_pod_name]
|
||||
action: replace
|
||||
target_label: pod_name
|
||||
---
|
||||
|
||||
# Source: istio/charts/prometheus/templates/service.yaml
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: prometheus
|
||||
namespace: istio-system
|
||||
annotations:
|
||||
prometheus.io/scrape: 'true'
|
||||
labels:
|
||||
name: prometheus
|
||||
spec:
|
||||
selector:
|
||||
app: prometheus
|
||||
ports:
|
||||
- name: http-prometheus
|
||||
protocol: TCP
|
||||
port: 9090
|
||||
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: prometheus
|
||||
namespace: istio-system
|
||||
labels:
|
||||
app: prometheus
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: prometheus
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: prometheus
|
||||
annotations:
|
||||
sidecar.istio.io/inject: "false"
|
||||
scheduler.alpha.kubernetes.io/critical-pod: ""
|
||||
spec:
|
||||
serviceAccountName: prometheus
|
||||
containers:
|
||||
- name: prometheus
|
||||
image: "docker.io/prom/prometheus:v2.7.1"
|
||||
imagePullPolicy: IfNotPresent
|
||||
args:
|
||||
- '--storage.tsdb.retention=6h'
|
||||
- '--config.file=/etc/prometheus/prometheus.yml'
|
||||
ports:
|
||||
- containerPort: 9090
|
||||
name: http
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /-/healthy
|
||||
port: 9090
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /-/ready
|
||||
port: 9090
|
||||
resources:
|
||||
requests:
|
||||
cpu: 10m
|
||||
|
||||
volumeMounts:
|
||||
- name: config-volume
|
||||
mountPath: /etc/prometheus
|
||||
- mountPath: /etc/istio-certs
|
||||
name: istio-certs
|
||||
volumes:
|
||||
- name: config-volume
|
||||
configMap:
|
||||
name: prometheus
|
||||
- name: istio-certs
|
||||
secret:
|
||||
defaultMode: 420
|
||||
optional: true
|
||||
secretName: istio.default
|
||||
59
artifacts/loadtester/deployment.yaml
Normal file
@@ -0,0 +1,59 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: flagger-loadtester
|
||||
labels:
|
||||
app: flagger-loadtester
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: flagger-loadtester
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: flagger-loadtester
|
||||
annotations:
|
||||
prometheus.io/scrape: "true"
|
||||
spec:
|
||||
containers:
|
||||
- name: loadtester
|
||||
image: weaveworks/flagger-loadtester:0.2.0
|
||||
imagePullPolicy: IfNotPresent
|
||||
ports:
|
||||
- name: http
|
||||
containerPort: 8080
|
||||
command:
|
||||
- ./loadtester
|
||||
- -port=8080
|
||||
- -log-level=info
|
||||
- -timeout=1h
|
||||
livenessProbe:
|
||||
exec:
|
||||
command:
|
||||
- wget
|
||||
- --quiet
|
||||
- --tries=1
|
||||
- --timeout=4
|
||||
- --spider
|
||||
- http://localhost:8080/healthz
|
||||
timeoutSeconds: 5
|
||||
readinessProbe:
|
||||
exec:
|
||||
command:
|
||||
- wget
|
||||
- --quiet
|
||||
- --tries=1
|
||||
- --timeout=4
|
||||
- --spider
|
||||
- http://localhost:8080/healthz
|
||||
timeoutSeconds: 5
|
||||
resources:
|
||||
limits:
|
||||
memory: "512Mi"
|
||||
cpu: "1000m"
|
||||
requests:
|
||||
memory: "32Mi"
|
||||
cpu: "10m"
|
||||
securityContext:
|
||||
readOnlyRootFilesystem: true
|
||||
runAsUser: 10001
|
||||
15
artifacts/loadtester/service.yaml
Normal file
@@ -0,0 +1,15 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: flagger-loadtester
|
||||
labels:
|
||||
app: flagger-loadtester
|
||||
spec:
|
||||
type: ClusterIP
|
||||
selector:
|
||||
app: flagger-loadtester
|
||||
ports:
|
||||
- name: http
|
||||
port: 80
|
||||
protocol: TCP
|
||||
targetPort: http
|
||||
@@ -4,3 +4,4 @@ metadata:
|
||||
name: test
|
||||
labels:
|
||||
istio-injection: enabled
|
||||
appmesh.k8s.aws/sidecarInjectorWebhook: enabled
|
||||
|
||||
45
artifacts/routing/destination-rule.yaml
Normal file
@@ -0,0 +1,45 @@
|
||||
apiVersion: networking.istio.io/v1alpha3
|
||||
kind: VirtualService
|
||||
metadata:
|
||||
name: podinfo
|
||||
namespace: test
|
||||
spec:
|
||||
gateways:
|
||||
- public-gateway.istio-system.svc.cluster.local
|
||||
- mesh
|
||||
hosts:
|
||||
- podinfo.istio.weavedx.com
|
||||
- podinfo
|
||||
http:
|
||||
- route:
|
||||
- destination:
|
||||
host: podinfo
|
||||
subset: primary
|
||||
weight: 50
|
||||
- destination:
|
||||
host: podinfo
|
||||
subset: canary
|
||||
weight: 50
|
||||
---
|
||||
apiVersion: networking.istio.io/v1alpha3
|
||||
kind: DestinationRule
|
||||
metadata:
|
||||
name: podinfo-destination
|
||||
namespace: test
|
||||
spec:
|
||||
host: podinfo
|
||||
trafficPolicy:
|
||||
loadBalancer:
|
||||
consistentHash:
|
||||
httpCookie:
|
||||
name: istiouser
|
||||
ttl: 30s
|
||||
subsets:
|
||||
- name: primary
|
||||
labels:
|
||||
app: podinfo
|
||||
role: primary
|
||||
- name: canary
|
||||
labels:
|
||||
app: podinfo
|
||||
role: canary
|
||||
@@ -8,13 +8,17 @@ spec:
|
||||
- public-gateway.istio-system.svc.cluster.local
|
||||
- mesh
|
||||
hosts:
|
||||
- podinfo.iowa.weavedx.com
|
||||
- app.istio.weavedx.com
|
||||
- podinfo
|
||||
http:
|
||||
- match:
|
||||
- headers:
|
||||
user-agent:
|
||||
regex: ^(?!.*Chrome)(?=.*\bSafari\b).*$
|
||||
uri:
|
||||
prefix: "/version/"
|
||||
rewrite:
|
||||
uri: /api/info
|
||||
route:
|
||||
- destination:
|
||||
host: podinfo-primary
|
||||
@@ -26,7 +30,12 @@ spec:
|
||||
port:
|
||||
number: 9898
|
||||
weight: 100
|
||||
- route:
|
||||
- match:
|
||||
- uri:
|
||||
prefix: "/version/"
|
||||
rewrite:
|
||||
uri: /api/info
|
||||
route:
|
||||
- destination:
|
||||
host: podinfo-primary
|
||||
port:
|
||||
|
||||
@@ -23,7 +23,7 @@ spec:
|
||||
spec:
|
||||
containers:
|
||||
- name: podinfod
|
||||
image: quay.io/stefanprodan/podinfo:1.2.0
|
||||
image: quay.io/stefanprodan/podinfo:1.4.0
|
||||
imagePullPolicy: IfNotPresent
|
||||
ports:
|
||||
- containerPort: 9898
|
||||
@@ -67,9 +67,3 @@ spec:
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 16Mi
|
||||
volumeMounts:
|
||||
- mountPath: /data
|
||||
name: data
|
||||
volumes:
|
||||
- emptyDir: {}
|
||||
name: data
|
||||
|
||||
@@ -1,10 +1,8 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: podinfo
|
||||
name: podinfo-canary
|
||||
namespace: test
|
||||
labels:
|
||||
app: podinfo
|
||||
spec:
|
||||
type: ClusterIP
|
||||
selector:
|
||||
|
||||
@@ -23,7 +23,7 @@ spec:
|
||||
spec:
|
||||
containers:
|
||||
- name: podinfod
|
||||
image: quay.io/stefanprodan/podinfo:1.1.1
|
||||
image: quay.io/stefanprodan/podinfo:1.4.1
|
||||
imagePullPolicy: IfNotPresent
|
||||
ports:
|
||||
- containerPort: 9898
|
||||
|
||||
14
artifacts/workloads/service.yaml
Normal file
@@ -0,0 +1,14 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: podinfo
|
||||
namespace: test
|
||||
spec:
|
||||
type: ClusterIP
|
||||
selector:
|
||||
app: podinfo-primary
|
||||
ports:
|
||||
- name: http
|
||||
port: 9898
|
||||
protocol: TCP
|
||||
targetPort: http
|
||||
@@ -1,14 +1,14 @@
|
||||
apiVersion: v1
|
||||
name: flagger
|
||||
version: 0.4.0
|
||||
appVersion: 0.4.0
|
||||
version: 0.11.0
|
||||
appVersion: 0.11.0
|
||||
kubeVersion: ">=1.11.0-0"
|
||||
engine: gotpl
|
||||
description: Flagger is a Kubernetes operator that automates the promotion of canary deployments using Istio routing for traffic shifting and Prometheus metrics for canary analysis.
|
||||
home: https://docs.flagger.app
|
||||
icon: https://raw.githubusercontent.com/stefanprodan/flagger/master/docs/logo/flagger-icon.png
|
||||
icon: https://raw.githubusercontent.com/weaveworks/flagger/master/docs/logo/flagger-icon.png
|
||||
sources:
|
||||
- https://github.com/stefanprodan/flagger
|
||||
- https://github.com/weaveworks/flagger
|
||||
maintainers:
|
||||
- name: stefanprodan
|
||||
url: https://github.com/stefanprodan
|
||||
@@ -16,4 +16,5 @@ maintainers:
|
||||
keywords:
|
||||
- canary
|
||||
- istio
|
||||
- appmesh
|
||||
- gitops
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
# Flagger
|
||||
|
||||
[Flagger](https://github.com/stefanprodan/flagger) is a Kubernetes operator that automates the promotion of
|
||||
[Flagger](https://github.com/weaveworks/flagger) is a Kubernetes operator that automates the promotion of
|
||||
canary deployments using Istio routing for traffic shifting and Prometheus metrics for canary analysis.
|
||||
Flagger implements a control loop that gradually shifts traffic to the canary while measuring key performance indicators
|
||||
like HTTP requests success rate, requests average duration and pods health.
|
||||
|
||||
@@ -3,6 +3,8 @@ apiVersion: apiextensions.k8s.io/v1beta1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: canaries.flagger.app
|
||||
annotations:
|
||||
helm.sh/resource-policy: keep
|
||||
spec:
|
||||
group: flagger.app
|
||||
version: v1alpha3
|
||||
@@ -74,11 +76,21 @@ spec:
|
||||
properties:
|
||||
port:
|
||||
type: number
|
||||
portName:
|
||||
type: string
|
||||
meshName:
|
||||
type: string
|
||||
timeout:
|
||||
type: string
|
||||
skipAnalysis:
|
||||
type: boolean
|
||||
canaryAnalysis:
|
||||
properties:
|
||||
interval:
|
||||
type: string
|
||||
pattern: "^[0-9]+(m|s)"
|
||||
iterations:
|
||||
type: number
|
||||
threshold:
|
||||
type: number
|
||||
maxWeight:
|
||||
@@ -90,7 +102,7 @@ spec:
|
||||
properties:
|
||||
items:
|
||||
type: object
|
||||
required: ['name', 'interval', 'threshold']
|
||||
required: ['name', 'threshold']
|
||||
properties:
|
||||
name:
|
||||
type: string
|
||||
@@ -99,6 +111,8 @@ spec:
|
||||
pattern: "^[0-9]+(m|s)"
|
||||
threshold:
|
||||
type: number
|
||||
query:
|
||||
type: string
|
||||
webhooks:
|
||||
type: array
|
||||
properties:
|
||||
@@ -108,10 +122,37 @@ spec:
|
||||
properties:
|
||||
name:
|
||||
type: string
|
||||
type:
|
||||
type: string
|
||||
enum:
|
||||
- ""
|
||||
- pre-rollout
|
||||
- rollout
|
||||
- post-rollout
|
||||
url:
|
||||
type: string
|
||||
format: url
|
||||
timeout:
|
||||
type: string
|
||||
pattern: "^[0-9]+(m|s)"
|
||||
status:
|
||||
properties:
|
||||
phase:
|
||||
type: string
|
||||
enum:
|
||||
- ""
|
||||
- Initialized
|
||||
- Progressing
|
||||
- Succeeded
|
||||
- Failed
|
||||
canaryWeight:
|
||||
type: number
|
||||
failedChecks:
|
||||
type: number
|
||||
iterations:
|
||||
type: number
|
||||
lastAppliedSpec:
|
||||
type: string
|
||||
lastTransitionTime:
|
||||
type: string
|
||||
{{- end }}
|
||||
|
||||
@@ -35,7 +35,13 @@ spec:
|
||||
command:
|
||||
- ./flagger
|
||||
- -log-level=info
|
||||
{{- if .Values.meshProvider }}
|
||||
- -mesh-provider={{ .Values.meshProvider }}
|
||||
{{- end }}
|
||||
- -metrics-server={{ .Values.metricsServer }}
|
||||
{{- if .Values.namespace }}
|
||||
- -namespace={{ .Values.namespace }}
|
||||
{{- end }}
|
||||
{{- if .Values.slack.url }}
|
||||
- -slack-url={{ .Values.slack.url }}
|
||||
- -slack-user={{ .Values.slack.user }}
|
||||
|
||||
@@ -9,11 +9,50 @@ metadata:
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
rules:
|
||||
- apiGroups: ['*']
|
||||
resources: ['*']
|
||||
verbs: ['*']
|
||||
- nonResourceURLs: ['*']
|
||||
verbs: ['*']
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- events
|
||||
- configmaps
|
||||
- secrets
|
||||
- services
|
||||
verbs: ["*"]
|
||||
- apiGroups:
|
||||
- apps
|
||||
resources:
|
||||
- deployments
|
||||
verbs: ["*"]
|
||||
- apiGroups:
|
||||
- autoscaling
|
||||
resources:
|
||||
- horizontalpodautoscalers
|
||||
verbs: ["*"]
|
||||
- apiGroups:
|
||||
- flagger.app
|
||||
resources:
|
||||
- canaries
|
||||
- canaries/status
|
||||
verbs: ["*"]
|
||||
- apiGroups:
|
||||
- networking.istio.io
|
||||
resources:
|
||||
- virtualservices
|
||||
- virtualservices/status
|
||||
verbs: ["*"]
|
||||
- apiGroups:
|
||||
- appmesh.k8s.aws
|
||||
resources:
|
||||
- meshes
|
||||
- meshes/status
|
||||
- virtualnodes
|
||||
- virtualnodes/status
|
||||
- virtualservices
|
||||
- virtualservices/status
|
||||
verbs: ["*"]
|
||||
- nonResourceURLs:
|
||||
- /version
|
||||
verbs:
|
||||
- get
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: ClusterRoleBinding
|
||||
|
||||
@@ -1,11 +1,17 @@
|
||||
# Default values for flagger.
|
||||
|
||||
image:
|
||||
repository: quay.io/stefanprodan/flagger
|
||||
tag: 0.4.0
|
||||
repository: weaveworks/flagger
|
||||
tag: 0.11.0
|
||||
pullPolicy: IfNotPresent
|
||||
|
||||
metricsServer: "http://prometheus.istio-system.svc.cluster.local:9090"
|
||||
metricsServer: "http://prometheus:9090"
|
||||
|
||||
# accepted values are istio or appmesh (defaults to istio)
|
||||
meshProvider: ""
|
||||
|
||||
# single namespace restriction
|
||||
namespace: ""
|
||||
|
||||
slack:
|
||||
user: flagger
|
||||
|
||||
@@ -1,12 +1,12 @@
|
||||
apiVersion: v1
|
||||
name: grafana
|
||||
version: 0.1.0
|
||||
appVersion: 5.4.2
|
||||
version: 1.1.0
|
||||
appVersion: 5.4.3
|
||||
description: Grafana dashboards for monitoring Flagger canary deployments
|
||||
icon: https://raw.githubusercontent.com/stefanprodan/flagger/master/docs/logo/flagger-icon.png
|
||||
icon: https://raw.githubusercontent.com/weaveworks/flagger/master/docs/logo/flagger-icon.png
|
||||
home: https://flagger.app
|
||||
sources:
|
||||
- https://github.com/stefanprodan/flagger
|
||||
- https://github.com/weaveworks/flagger
|
||||
maintainers:
|
||||
- name: stefanprodan
|
||||
url: https://github.com/stefanprodan
|
||||
|
||||
@@ -2,11 +2,11 @@
|
||||
|
||||
Grafana dashboards for monitoring progressive deployments powered by Istio, Prometheus and Flagger.
|
||||
|
||||

|
||||

|
||||
|
||||
## Prerequisites
|
||||
|
||||
* Kubernetes >= 1.9
|
||||
* Kubernetes >= 1.11
|
||||
* Istio >= 1.0
|
||||
* Prometheus >= 2.6
|
||||
|
||||
@@ -75,5 +75,5 @@ helm install flagger/grafana --name flagger-grafana -f values.yaml
|
||||
```
|
||||
|
||||
> **Tip**: You can use the default [values.yaml](values.yaml)
|
||||
```
|
||||
|
||||
|
||||
|
||||
1248
charts/grafana/dashboards/appmesh.json
Normal file
@@ -2,7 +2,6 @@
|
||||
"annotations": {
|
||||
"list": [
|
||||
{
|
||||
"$$hashKey": "object:1587",
|
||||
"builtIn": 1,
|
||||
"datasource": "-- Grafana --",
|
||||
"enable": true,
|
||||
@@ -16,8 +15,8 @@
|
||||
"editable": true,
|
||||
"gnetId": null,
|
||||
"graphTooltip": 0,
|
||||
"id": null,
|
||||
"iteration": 1534587617141,
|
||||
"id": 1,
|
||||
"iteration": 1549736611069,
|
||||
"links": [],
|
||||
"panels": [
|
||||
{
|
||||
@@ -179,7 +178,6 @@
|
||||
"tableColumn": "",
|
||||
"targets": [
|
||||
{
|
||||
"$$hashKey": "object:2857",
|
||||
"expr": "sum(irate(istio_requests_total{reporter=\"destination\",destination_workload_namespace=~\"$namespace\",destination_workload=~\"$primary\",response_code!~\"5.*\"}[30s])) / sum(irate(istio_requests_total{reporter=\"destination\",destination_workload_namespace=~\"$namespace\",destination_workload=~\"$primary\"}[30s]))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
@@ -344,7 +342,6 @@
|
||||
"tableColumn": "",
|
||||
"targets": [
|
||||
{
|
||||
"$$hashKey": "object:2810",
|
||||
"expr": "sum(irate(istio_requests_total{reporter=\"destination\",destination_workload_namespace=~\"$namespace\",destination_workload=~\"$canary\",response_code!~\"5.*\"}[30s])) / sum(irate(istio_requests_total{reporter=\"destination\",destination_workload_namespace=~\"$namespace\",destination_workload=~\"$canary\"}[30s]))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
@@ -363,7 +360,7 @@
|
||||
"value": "null"
|
||||
}
|
||||
],
|
||||
"valueName": "avg"
|
||||
"valueName": "current"
|
||||
},
|
||||
{
|
||||
"aliasColors": {},
|
||||
@@ -432,6 +429,7 @@
|
||||
],
|
||||
"thresholds": [],
|
||||
"timeFrom": null,
|
||||
"timeRegions": [],
|
||||
"timeShift": null,
|
||||
"title": "Primary: Request Duration",
|
||||
"tooltip": {
|
||||
@@ -464,7 +462,11 @@
|
||||
"min": null,
|
||||
"show": false
|
||||
}
|
||||
]
|
||||
],
|
||||
"yaxis": {
|
||||
"align": false,
|
||||
"alignLevel": null
|
||||
}
|
||||
},
|
||||
{
|
||||
"aliasColors": {},
|
||||
@@ -533,6 +535,7 @@
|
||||
],
|
||||
"thresholds": [],
|
||||
"timeFrom": null,
|
||||
"timeRegions": [],
|
||||
"timeShift": null,
|
||||
"title": "Canary: Request Duration",
|
||||
"tooltip": {
|
||||
@@ -565,7 +568,11 @@
|
||||
"min": null,
|
||||
"show": false
|
||||
}
|
||||
]
|
||||
],
|
||||
"yaxis": {
|
||||
"align": false,
|
||||
"alignLevel": null
|
||||
}
|
||||
},
|
||||
{
|
||||
"content": "<div class=\"dashboard-header text-center\">\n<span>USE: $canary.$namespace</span>\n</div>",
|
||||
@@ -623,7 +630,6 @@
|
||||
"steppedLine": false,
|
||||
"targets": [
|
||||
{
|
||||
"$$hashKey": "object:1685",
|
||||
"expr": "sum(rate(container_cpu_usage_seconds_total{cpu=\"total\",namespace=\"$namespace\",pod_name=~\"$primary.*\", container_name!~\"POD|istio-proxy\"}[1m])) by (pod_name)",
|
||||
"format": "time_series",
|
||||
"hide": false,
|
||||
@@ -634,6 +640,7 @@
|
||||
],
|
||||
"thresholds": [],
|
||||
"timeFrom": null,
|
||||
"timeRegions": [],
|
||||
"timeShift": null,
|
||||
"title": "Primary: CPU Usage by Pod",
|
||||
"tooltip": {
|
||||
@@ -651,7 +658,6 @@
|
||||
},
|
||||
"yaxes": [
|
||||
{
|
||||
"$$hashKey": "object:1845",
|
||||
"format": "s",
|
||||
"label": "CPU seconds / second",
|
||||
"logBase": 1,
|
||||
@@ -660,7 +666,6 @@
|
||||
"show": true
|
||||
},
|
||||
{
|
||||
"$$hashKey": "object:1846",
|
||||
"format": "short",
|
||||
"label": null,
|
||||
"logBase": 1,
|
||||
@@ -668,7 +673,11 @@
|
||||
"min": null,
|
||||
"show": false
|
||||
}
|
||||
]
|
||||
],
|
||||
"yaxis": {
|
||||
"align": false,
|
||||
"alignLevel": null
|
||||
}
|
||||
},
|
||||
{
|
||||
"aliasColors": {},
|
||||
@@ -711,7 +720,6 @@
|
||||
"steppedLine": false,
|
||||
"targets": [
|
||||
{
|
||||
"$$hashKey": "object:1685",
|
||||
"expr": "sum(rate(container_cpu_usage_seconds_total{cpu=\"total\",namespace=\"$namespace\",pod_name=~\"$canary.*\", pod_name!~\"$primary.*\", container_name!~\"POD|istio-proxy\"}[1m])) by (pod_name)",
|
||||
"format": "time_series",
|
||||
"hide": false,
|
||||
@@ -722,6 +730,7 @@
|
||||
],
|
||||
"thresholds": [],
|
||||
"timeFrom": null,
|
||||
"timeRegions": [],
|
||||
"timeShift": null,
|
||||
"title": "Canary: CPU Usage by Pod",
|
||||
"tooltip": {
|
||||
@@ -739,7 +748,6 @@
|
||||
},
|
||||
"yaxes": [
|
||||
{
|
||||
"$$hashKey": "object:1845",
|
||||
"format": "s",
|
||||
"label": "CPU seconds / second",
|
||||
"logBase": 1,
|
||||
@@ -748,7 +756,6 @@
|
||||
"show": true
|
||||
},
|
||||
{
|
||||
"$$hashKey": "object:1846",
|
||||
"format": "short",
|
||||
"label": null,
|
||||
"logBase": 1,
|
||||
@@ -756,7 +763,11 @@
|
||||
"min": null,
|
||||
"show": false
|
||||
}
|
||||
]
|
||||
],
|
||||
"yaxis": {
|
||||
"align": false,
|
||||
"alignLevel": null
|
||||
}
|
||||
},
|
||||
{
|
||||
"aliasColors": {},
|
||||
@@ -799,7 +810,6 @@
|
||||
"steppedLine": false,
|
||||
"targets": [
|
||||
{
|
||||
"$$hashKey": "object:1685",
|
||||
"expr": "sum(container_memory_working_set_bytes{namespace=\"$namespace\",pod_name=~\"$primary.*\", container_name!~\"POD|istio-proxy\"}) by (pod_name)",
|
||||
"format": "time_series",
|
||||
"hide": false,
|
||||
@@ -811,6 +821,7 @@
|
||||
],
|
||||
"thresholds": [],
|
||||
"timeFrom": null,
|
||||
"timeRegions": [],
|
||||
"timeShift": null,
|
||||
"title": "Primary: Memory Usage by Pod",
|
||||
"tooltip": {
|
||||
@@ -828,7 +839,6 @@
|
||||
},
|
||||
"yaxes": [
|
||||
{
|
||||
"$$hashKey": "object:1845",
|
||||
"decimals": null,
|
||||
"format": "bytes",
|
||||
"label": "",
|
||||
@@ -838,7 +848,6 @@
|
||||
"show": true
|
||||
},
|
||||
{
|
||||
"$$hashKey": "object:1846",
|
||||
"format": "short",
|
||||
"label": null,
|
||||
"logBase": 1,
|
||||
@@ -846,7 +855,11 @@
|
||||
"min": null,
|
||||
"show": false
|
||||
}
|
||||
]
|
||||
],
|
||||
"yaxis": {
|
||||
"align": false,
|
||||
"alignLevel": null
|
||||
}
|
||||
},
|
||||
{
|
||||
"aliasColors": {},
|
||||
@@ -889,7 +902,6 @@
|
||||
"steppedLine": false,
|
||||
"targets": [
|
||||
{
|
||||
"$$hashKey": "object:1685",
|
||||
"expr": "sum(container_memory_working_set_bytes{namespace=\"$namespace\",pod_name=~\"$canary.*\", pod_name!~\"$primary.*\", container_name!~\"POD|istio-proxy\"}) by (pod_name)",
|
||||
"format": "time_series",
|
||||
"hide": false,
|
||||
@@ -901,6 +913,7 @@
|
||||
],
|
||||
"thresholds": [],
|
||||
"timeFrom": null,
|
||||
"timeRegions": [],
|
||||
"timeShift": null,
|
||||
"title": "Canary: Memory Usage by Pod",
|
||||
"tooltip": {
|
||||
@@ -918,7 +931,6 @@
|
||||
},
|
||||
"yaxes": [
|
||||
{
|
||||
"$$hashKey": "object:1845",
|
||||
"decimals": null,
|
||||
"format": "bytes",
|
||||
"label": "",
|
||||
@@ -928,7 +940,6 @@
|
||||
"show": true
|
||||
},
|
||||
{
|
||||
"$$hashKey": "object:1846",
|
||||
"format": "short",
|
||||
"label": null,
|
||||
"logBase": 1,
|
||||
@@ -936,7 +947,11 @@
|
||||
"min": null,
|
||||
"show": false
|
||||
}
|
||||
]
|
||||
],
|
||||
"yaxis": {
|
||||
"align": false,
|
||||
"alignLevel": null
|
||||
}
|
||||
},
|
||||
{
|
||||
"aliasColors": {},
|
||||
@@ -975,12 +990,10 @@
|
||||
"renderer": "flot",
|
||||
"seriesOverrides": [
|
||||
{
|
||||
"$$hashKey": "object:3641",
|
||||
"alias": "received",
|
||||
"color": "#f9d9f9"
|
||||
},
|
||||
{
|
||||
"$$hashKey": "object:3649",
|
||||
"alias": "transmited",
|
||||
"color": "#f29191"
|
||||
}
|
||||
@@ -990,7 +1003,6 @@
|
||||
"steppedLine": false,
|
||||
"targets": [
|
||||
{
|
||||
"$$hashKey": "object:2598",
|
||||
"expr": "sum(rate (container_network_receive_bytes_total{namespace=\"$namespace\",pod_name=~\"$primary.*\"}[1m])) ",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
@@ -998,7 +1010,6 @@
|
||||
"refId": "A"
|
||||
},
|
||||
{
|
||||
"$$hashKey": "object:3245",
|
||||
"expr": "-sum (rate (container_network_transmit_bytes_total{namespace=\"$namespace\",pod_name=~\"$primary.*\"}[1m]))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
@@ -1008,6 +1019,7 @@
|
||||
],
|
||||
"thresholds": [],
|
||||
"timeFrom": null,
|
||||
"timeRegions": [],
|
||||
"timeShift": null,
|
||||
"title": "Primary: Network I/O",
|
||||
"tooltip": {
|
||||
@@ -1025,7 +1037,6 @@
|
||||
},
|
||||
"yaxes": [
|
||||
{
|
||||
"$$hashKey": "object:1845",
|
||||
"decimals": null,
|
||||
"format": "Bps",
|
||||
"label": "",
|
||||
@@ -1035,7 +1046,6 @@
|
||||
"show": true
|
||||
},
|
||||
{
|
||||
"$$hashKey": "object:1846",
|
||||
"format": "short",
|
||||
"label": null,
|
||||
"logBase": 1,
|
||||
@@ -1043,7 +1053,11 @@
|
||||
"min": null,
|
||||
"show": false
|
||||
}
|
||||
]
|
||||
],
|
||||
"yaxis": {
|
||||
"align": false,
|
||||
"alignLevel": null
|
||||
}
|
||||
},
|
||||
{
|
||||
"aliasColors": {},
|
||||
@@ -1082,12 +1096,10 @@
|
||||
"renderer": "flot",
|
||||
"seriesOverrides": [
|
||||
{
|
||||
"$$hashKey": "object:3641",
|
||||
"alias": "received",
|
||||
"color": "#f9d9f9"
|
||||
},
|
||||
{
|
||||
"$$hashKey": "object:3649",
|
||||
"alias": "transmited",
|
||||
"color": "#f29191"
|
||||
}
|
||||
@@ -1097,7 +1109,6 @@
|
||||
"steppedLine": false,
|
||||
"targets": [
|
||||
{
|
||||
"$$hashKey": "object:2598",
|
||||
"expr": "sum(rate (container_network_receive_bytes_total{namespace=\"$namespace\",pod_name=~\"$canary.*\",pod_name!~\"$primary.*\"}[1m])) ",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
@@ -1105,7 +1116,6 @@
|
||||
"refId": "A"
|
||||
},
|
||||
{
|
||||
"$$hashKey": "object:3245",
|
||||
"expr": "-sum (rate (container_network_transmit_bytes_total{namespace=\"$namespace\",pod_name=~\"$canary.*\",pod_name!~\"$primary.*\"}[1m]))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
@@ -1115,6 +1125,7 @@
|
||||
],
|
||||
"thresholds": [],
|
||||
"timeFrom": null,
|
||||
"timeRegions": [],
|
||||
"timeShift": null,
|
||||
"title": "Canary: Network I/O",
|
||||
"tooltip": {
|
||||
@@ -1132,7 +1143,6 @@
|
||||
},
|
||||
"yaxes": [
|
||||
{
|
||||
"$$hashKey": "object:1845",
|
||||
"decimals": null,
|
||||
"format": "Bps",
|
||||
"label": "",
|
||||
@@ -1142,7 +1152,6 @@
|
||||
"show": true
|
||||
},
|
||||
{
|
||||
"$$hashKey": "object:1846",
|
||||
"format": "short",
|
||||
"label": null,
|
||||
"logBase": 1,
|
||||
@@ -1150,7 +1159,11 @@
|
||||
"min": null,
|
||||
"show": false
|
||||
}
|
||||
]
|
||||
],
|
||||
"yaxis": {
|
||||
"align": false,
|
||||
"alignLevel": null
|
||||
}
|
||||
},
|
||||
{
|
||||
"content": "<div class=\"dashboard-header text-center\">\n<span>IN/OUTBOUND: $canary.$namespace</span>\n</div>",
|
||||
@@ -1205,7 +1218,6 @@
|
||||
"steppedLine": false,
|
||||
"targets": [
|
||||
{
|
||||
"$$hashKey": "object:1953",
|
||||
"expr": "round(sum(irate(istio_requests_total{connection_security_policy=\"mutual_tls\", destination_workload_namespace=~\"$namespace\", destination_workload=~\"$primary\", reporter=\"destination\"}[30s])) by (source_workload, source_workload_namespace, response_code), 0.001)",
|
||||
"format": "time_series",
|
||||
"hide": false,
|
||||
@@ -1215,7 +1227,6 @@
|
||||
"step": 2
|
||||
},
|
||||
{
|
||||
"$$hashKey": "object:1954",
|
||||
"expr": "round(sum(irate(istio_requests_total{connection_security_policy!=\"mutual_tls\", destination_workload_namespace=~\"$namespace\", destination_workload=~\"$primary\", reporter=\"destination\"}[30s])) by (source_workload, source_workload_namespace, response_code), 0.001)",
|
||||
"format": "time_series",
|
||||
"hide": false,
|
||||
@@ -1227,6 +1238,7 @@
|
||||
],
|
||||
"thresholds": [],
|
||||
"timeFrom": null,
|
||||
"timeRegions": [],
|
||||
"timeShift": null,
|
||||
"title": "Primary: Incoming Requests by Source And Response Code",
|
||||
"tooltip": {
|
||||
@@ -1246,7 +1258,6 @@
|
||||
},
|
||||
"yaxes": [
|
||||
{
|
||||
"$$hashKey": "object:1999",
|
||||
"format": "ops",
|
||||
"label": null,
|
||||
"logBase": 1,
|
||||
@@ -1255,7 +1266,6 @@
|
||||
"show": true
|
||||
},
|
||||
{
|
||||
"$$hashKey": "object:2000",
|
||||
"format": "short",
|
||||
"label": null,
|
||||
"logBase": 1,
|
||||
@@ -1263,7 +1273,11 @@
|
||||
"min": null,
|
||||
"show": false
|
||||
}
|
||||
]
|
||||
],
|
||||
"yaxis": {
|
||||
"align": false,
|
||||
"alignLevel": null
|
||||
}
|
||||
},
|
||||
{
|
||||
"aliasColors": {},
|
||||
@@ -1323,6 +1337,7 @@
|
||||
],
|
||||
"thresholds": [],
|
||||
"timeFrom": null,
|
||||
"timeRegions": [],
|
||||
"timeShift": null,
|
||||
"title": "Canary: Incoming Requests by Source And Response Code",
|
||||
"tooltip": {
|
||||
@@ -1357,7 +1372,11 @@
|
||||
"min": null,
|
||||
"show": false
|
||||
}
|
||||
]
|
||||
],
|
||||
"yaxis": {
|
||||
"align": false,
|
||||
"alignLevel": null
|
||||
}
|
||||
},
|
||||
{
|
||||
"aliasColors": {},
|
||||
@@ -1416,6 +1435,7 @@
|
||||
],
|
||||
"thresholds": [],
|
||||
"timeFrom": null,
|
||||
"timeRegions": [],
|
||||
"timeShift": null,
|
||||
"title": "Primary: Outgoing Requests by Destination And Response Code",
|
||||
"tooltip": {
|
||||
@@ -1450,7 +1470,11 @@
|
||||
"min": null,
|
||||
"show": false
|
||||
}
|
||||
]
|
||||
],
|
||||
"yaxis": {
|
||||
"align": false,
|
||||
"alignLevel": null
|
||||
}
|
||||
},
|
||||
{
|
||||
"aliasColors": {},
|
||||
@@ -1509,6 +1533,7 @@
|
||||
],
|
||||
"thresholds": [],
|
||||
"timeFrom": null,
|
||||
"timeRegions": [],
|
||||
"timeShift": null,
|
||||
"title": "Canary: Outgoing Requests by Destination And Response Code",
|
||||
"tooltip": {
|
||||
@@ -1543,7 +1568,11 @@
|
||||
"min": null,
|
||||
"show": false
|
||||
}
|
||||
]
|
||||
],
|
||||
"yaxis": {
|
||||
"align": false,
|
||||
"alignLevel": null
|
||||
}
|
||||
}
|
||||
],
|
||||
"refresh": "10s",
|
||||
@@ -1554,11 +1583,9 @@
|
||||
"list": [
|
||||
{
|
||||
"allValue": null,
|
||||
"current": {
|
||||
"text": "demo",
|
||||
"value": "demo"
|
||||
},
|
||||
"current": null,
|
||||
"datasource": "prometheus",
|
||||
"definition": "",
|
||||
"hide": 0,
|
||||
"includeAll": false,
|
||||
"label": "Namespace",
|
||||
@@ -1568,6 +1595,7 @@
|
||||
"query": "query_result(sum(istio_requests_total) by (destination_workload_namespace) or sum(istio_tcp_sent_bytes_total) by (destination_workload_namespace))",
|
||||
"refresh": 1,
|
||||
"regex": "/.*_namespace=\"([^\"]*).*/",
|
||||
"skipUrlSync": false,
|
||||
"sort": 0,
|
||||
"tagValuesQuery": "",
|
||||
"tags": [],
|
||||
@@ -1577,11 +1605,9 @@
|
||||
},
|
||||
{
|
||||
"allValue": null,
|
||||
"current": {
|
||||
"text": "primary",
|
||||
"value": "primary"
|
||||
},
|
||||
"current": null,
|
||||
"datasource": "prometheus",
|
||||
"definition": "",
|
||||
"hide": 0,
|
||||
"includeAll": false,
|
||||
"label": "Primary",
|
||||
@@ -1591,6 +1617,7 @@
|
||||
"query": "query_result(sum(istio_requests_total{destination_workload_namespace=~\"$namespace\"}) by (destination_service_name))",
|
||||
"refresh": 1,
|
||||
"regex": "/.*destination_service_name=\"([^\"]*).*/",
|
||||
"skipUrlSync": false,
|
||||
"sort": 1,
|
||||
"tagValuesQuery": "",
|
||||
"tags": [],
|
||||
@@ -1600,11 +1627,9 @@
|
||||
},
|
||||
{
|
||||
"allValue": null,
|
||||
"current": {
|
||||
"text": "canary",
|
||||
"value": "canary"
|
||||
},
|
||||
"current": null,
|
||||
"datasource": "prometheus",
|
||||
"definition": "",
|
||||
"hide": 0,
|
||||
"includeAll": false,
|
||||
"label": "Canary",
|
||||
@@ -1614,6 +1639,7 @@
|
||||
"query": "query_result(sum(istio_requests_total{destination_workload_namespace=~\"$namespace\"}) by (destination_service_name))",
|
||||
"refresh": 1,
|
||||
"regex": "/.*destination_service_name=\"([^\"]*).*/",
|
||||
"skipUrlSync": false,
|
||||
"sort": 1,
|
||||
"tagValuesQuery": "",
|
||||
"tags": [],
|
||||
@@ -1653,7 +1679,7 @@
|
||||
]
|
||||
},
|
||||
"timezone": "",
|
||||
"title": "Canary analysis",
|
||||
"uid": "RdykD7tiz",
|
||||
"version": 2
|
||||
}
|
||||
"title": "Istio Canary",
|
||||
"uid": "flagger-istio",
|
||||
"version": 3
|
||||
}
|
||||
@@ -1,15 +1,7 @@
|
||||
1. Get the application URL by running these commands:
|
||||
{{- if contains "NodePort" .Values.service.type }}
|
||||
export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "grafana.fullname" . }})
|
||||
export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}")
|
||||
echo http://$NODE_IP:$NODE_PORT
|
||||
{{- else if contains "LoadBalancer" .Values.service.type }}
|
||||
NOTE: It may take a few minutes for the LoadBalancer IP to be available.
|
||||
You can watch the status of by running 'kubectl get svc -w {{ template "grafana.fullname" . }}'
|
||||
export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "grafana.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}')
|
||||
echo http://$SERVICE_IP:{{ .Values.service.port }}
|
||||
{{- else if contains "ClusterIP" .Values.service.type }}
|
||||
export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app={{ template "grafana.name" . }},release={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
|
||||
echo "Visit http://127.0.0.1:8080 to use your application"
|
||||
kubectl port-forward $POD_NAME 8080:80
|
||||
{{- end }}
|
||||
1. Run the port forward command:
|
||||
|
||||
kubectl -n {{ .Release.Namespace }} port-forward svc/{{ .Release.Name }} 3000:80
|
||||
|
||||
2. Navigate to:
|
||||
|
||||
http://localhost:3000
|
||||
@@ -38,12 +38,21 @@ spec:
|
||||
# path: /
|
||||
# port: http
|
||||
env:
|
||||
- name: GF_PATHS_PROVISIONING
|
||||
value: /etc/grafana/provisioning/
|
||||
{{- if .Values.password }}
|
||||
- name: GF_SECURITY_ADMIN_USER
|
||||
value: {{ .Values.user }}
|
||||
- name: GF_SECURITY_ADMIN_PASSWORD
|
||||
value: {{ .Values.password }}
|
||||
- name: GF_PATHS_PROVISIONING
|
||||
value: /etc/grafana/provisioning/
|
||||
{{- else }}
|
||||
- name: GF_AUTH_BASIC_ENABLED
|
||||
value: "false"
|
||||
- name: GF_AUTH_ANONYMOUS_ENABLED
|
||||
value: "true"
|
||||
- name: GF_AUTH_ANONYMOUS_ORG_ROLE
|
||||
value: Admin
|
||||
{{- end }}
|
||||
volumeMounts:
|
||||
- name: grafana
|
||||
mountPath: /var/lib/grafana
|
||||
|
||||
@@ -6,7 +6,7 @@ replicaCount: 1
|
||||
|
||||
image:
|
||||
repository: grafana/grafana
|
||||
tag: 5.4.2
|
||||
tag: 5.4.3
|
||||
pullPolicy: IfNotPresent
|
||||
|
||||
service:
|
||||
@@ -28,7 +28,7 @@ tolerations: []
|
||||
affinity: {}
|
||||
|
||||
user: admin
|
||||
password: admin
|
||||
password:
|
||||
|
||||
# Istio Prometheus instance
|
||||
url: http://prometheus:9090
|
||||
|
||||
22
charts/loadtester/.helmignore
Normal file
@@ -0,0 +1,22 @@
|
||||
# Patterns to ignore when building packages.
|
||||
# This supports shell glob matching, relative path matching, and
|
||||
# negation (prefixed with !). Only one pattern per line.
|
||||
.DS_Store
|
||||
# Common VCS dirs
|
||||
.git/
|
||||
.gitignore
|
||||
.bzr/
|
||||
.bzrignore
|
||||
.hg/
|
||||
.hgignore
|
||||
.svn/
|
||||
# Common backup files
|
||||
*.swp
|
||||
*.bak
|
||||
*.tmp
|
||||
*~
|
||||
# Various IDEs
|
||||
.project
|
||||
.idea/
|
||||
*.tmproj
|
||||
.vscode/
|
||||
21
charts/loadtester/Chart.yaml
Normal file
@@ -0,0 +1,21 @@
|
||||
apiVersion: v1
|
||||
name: loadtester
|
||||
version: 0.2.0
|
||||
appVersion: 0.2.0
|
||||
kubeVersion: ">=1.11.0-0"
|
||||
engine: gotpl
|
||||
description: Flagger's load testing services based on rakyll/hey that generates traffic during canary analysis when configured as a webhook.
|
||||
home: https://docs.flagger.app
|
||||
icon: https://raw.githubusercontent.com/weaveworks/flagger/master/docs/logo/flagger-icon.png
|
||||
sources:
|
||||
- https://github.com/weaveworks/flagger
|
||||
maintainers:
|
||||
- name: stefanprodan
|
||||
url: https://github.com/stefanprodan
|
||||
email: stefanprodan@users.noreply.github.com
|
||||
keywords:
|
||||
- canary
|
||||
- istio
|
||||
- appmesh
|
||||
- gitops
|
||||
- load testing
|
||||
78
charts/loadtester/README.md
Normal file
@@ -0,0 +1,78 @@
|
||||
# Flagger load testing service
|
||||
|
||||
[Flagger's](https://github.com/weaveworks/flagger) load testing service is based on
|
||||
[rakyll/hey](https://github.com/rakyll/hey)
|
||||
and can be used to generates traffic during canary analysis when configured as a webhook.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
* Kubernetes >= 1.11
|
||||
* Istio >= 1.0
|
||||
|
||||
## Installing the Chart
|
||||
|
||||
Add Flagger Helm repository:
|
||||
|
||||
```console
|
||||
helm repo add flagger https://flagger.app
|
||||
```
|
||||
|
||||
To install the chart with the release name `flagger-loadtester`:
|
||||
|
||||
```console
|
||||
helm upgrade -i flagger-loadtester flagger/loadtester
|
||||
```
|
||||
|
||||
The command deploys Grafana on the Kubernetes cluster in the default namespace.
|
||||
|
||||
> **Tip**: Note that the namespace where you deploy the load tester should have the Istio sidecar injection enabled
|
||||
|
||||
The [configuration](#configuration) section lists the parameters that can be configured during installation.
|
||||
|
||||
## Uninstalling the Chart
|
||||
|
||||
To uninstall/delete the `flagger-loadtester` deployment:
|
||||
|
||||
```console
|
||||
helm delete --purge flagger-loadtester
|
||||
```
|
||||
|
||||
The command removes all the Kubernetes components associated with the chart and deletes the release.
|
||||
|
||||
## Configuration
|
||||
|
||||
The following tables lists the configurable parameters of the load tester chart and their default values.
|
||||
|
||||
Parameter | Description | Default
|
||||
--- | --- | ---
|
||||
`image.repository` | Image repository | `quay.io/stefanprodan/flagger-loadtester`
|
||||
`image.pullPolicy` | Image pull policy | `IfNotPresent`
|
||||
`image.tag` | Image tag | `<VERSION>`
|
||||
`replicaCount` | desired number of pods | `1`
|
||||
`resources.requests.cpu` | CPU requests | `10m`
|
||||
`resources.requests.memory` | memory requests | `64Mi`
|
||||
`tolerations` | List of node taints to tolerate | `[]`
|
||||
`affinity` | node/pod affinities | `node`
|
||||
`nodeSelector` | node labels for pod assignment | `{}`
|
||||
`service.type` | type of service | `ClusterIP`
|
||||
`service.port` | ClusterIP port | `80`
|
||||
`cmd.timeout` | Command execution timeout | `1h`
|
||||
`logLevel` | Log level can be debug, info, warning, error or panic | `info`
|
||||
`meshName` | AWS App Mesh name | `none`
|
||||
`backends` | AWS App Mesh virtual services | `none`
|
||||
|
||||
Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example,
|
||||
|
||||
```console
|
||||
helm install flagger/loadtester --name flagger-loadtester
|
||||
```
|
||||
|
||||
Alternatively, a YAML file that specifies the values for the above parameters can be provided while installing the chart. For example,
|
||||
|
||||
```console
|
||||
helm install flagger/loadtester --name flagger-loadtester -f values.yaml
|
||||
```
|
||||
|
||||
> **Tip**: You can use the default [values.yaml](values.yaml)
|
||||
|
||||
|
||||
1
charts/loadtester/templates/NOTES.txt
Normal file
@@ -0,0 +1 @@
|
||||
Flagger's load testing service is available at http://{{ include "loadtester.fullname" . }}.{{ .Release.Namespace }}/
|
||||
32
charts/loadtester/templates/_helpers.tpl
Normal file
@@ -0,0 +1,32 @@
|
||||
{{/* vim: set filetype=mustache: */}}
|
||||
{{/*
|
||||
Expand the name of the chart.
|
||||
*/}}
|
||||
{{- define "loadtester.name" -}}
|
||||
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Create a default fully qualified app name.
|
||||
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
|
||||
If release name contains chart name it will be used as a full name.
|
||||
*/}}
|
||||
{{- define "loadtester.fullname" -}}
|
||||
{{- if .Values.fullnameOverride -}}
|
||||
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
|
||||
{{- else -}}
|
||||
{{- $name := default .Chart.Name .Values.nameOverride -}}
|
||||
{{- if contains $name .Release.Name -}}
|
||||
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
|
||||
{{- else -}}
|
||||
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Create chart name and version as used by the chart label.
|
||||
*/}}
|
||||
{{- define "loadtester.chart" -}}
|
||||
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
|
||||
{{- end -}}
|
||||
67
charts/loadtester/templates/deployment.yaml
Normal file
@@ -0,0 +1,67 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: {{ include "loadtester.fullname" . }}
|
||||
labels:
|
||||
app.kubernetes.io/name: {{ include "loadtester.name" . }}
|
||||
helm.sh/chart: {{ include "loadtester.chart" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
spec:
|
||||
replicas: {{ .Values.replicaCount }}
|
||||
selector:
|
||||
matchLabels:
|
||||
app: {{ include "loadtester.name" . }}
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: {{ include "loadtester.name" . }}
|
||||
annotations:
|
||||
appmesh.k8s.aws/ports: "444"
|
||||
spec:
|
||||
containers:
|
||||
- name: {{ .Chart.Name }}
|
||||
image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
|
||||
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
||||
ports:
|
||||
- name: http
|
||||
containerPort: 8080
|
||||
command:
|
||||
- ./loadtester
|
||||
- -port=8080
|
||||
- -log-level={{ .Values.logLevel }}
|
||||
- -timeout={{ .Values.cmd.timeout }}
|
||||
livenessProbe:
|
||||
exec:
|
||||
command:
|
||||
- wget
|
||||
- --quiet
|
||||
- --tries=1
|
||||
- --timeout=4
|
||||
- --spider
|
||||
- http://localhost:8080/healthz
|
||||
timeoutSeconds: 5
|
||||
readinessProbe:
|
||||
exec:
|
||||
command:
|
||||
- wget
|
||||
- --quiet
|
||||
- --tries=1
|
||||
- --timeout=4
|
||||
- --spider
|
||||
- http://localhost:8080/healthz
|
||||
timeoutSeconds: 5
|
||||
resources:
|
||||
{{- toYaml .Values.resources | nindent 12 }}
|
||||
{{- with .Values.nodeSelector }}
|
||||
nodeSelector:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.affinity }}
|
||||
affinity:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.tolerations }}
|
||||
tolerations:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
18
charts/loadtester/templates/service.yaml
Normal file
@@ -0,0 +1,18 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: {{ include "loadtester.fullname" . }}
|
||||
labels:
|
||||
app.kubernetes.io/name: {{ include "loadtester.name" . }}
|
||||
helm.sh/chart: {{ include "loadtester.chart" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
spec:
|
||||
type: {{ .Values.service.type }}
|
||||
ports:
|
||||
- port: {{ .Values.service.port }}
|
||||
targetPort: http
|
||||
protocol: TCP
|
||||
name: http
|
||||
selector:
|
||||
app: {{ include "loadtester.name" . }}
|
||||
27
charts/loadtester/templates/virtual-node.yaml
Normal file
@@ -0,0 +1,27 @@
|
||||
{{- if .Values.meshName }}
|
||||
apiVersion: appmesh.k8s.aws/v1beta1
|
||||
kind: VirtualNode
|
||||
metadata:
|
||||
name: {{ include "loadtester.fullname" . }}
|
||||
labels:
|
||||
app.kubernetes.io/name: {{ include "loadtester.name" . }}
|
||||
helm.sh/chart: {{ include "loadtester.chart" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
spec:
|
||||
meshName: {{ .Values.meshName }}
|
||||
listeners:
|
||||
- portMapping:
|
||||
port: 444
|
||||
protocol: http
|
||||
serviceDiscovery:
|
||||
dns:
|
||||
hostName: {{ include "loadtester.fullname" . }}.{{ .Release.Namespace }}
|
||||
{{- if .Values.backends }}
|
||||
backends:
|
||||
{{- range .Values.backends }}
|
||||
- virtualService:
|
||||
virtualServiceName: {{ . }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
34
charts/loadtester/values.yaml
Normal file
@@ -0,0 +1,34 @@
|
||||
replicaCount: 1
|
||||
|
||||
image:
|
||||
repository: quay.io/weaveworks/flagger-loadtester
|
||||
tag: 0.2.0
|
||||
pullPolicy: IfNotPresent
|
||||
|
||||
logLevel: info
|
||||
cmd:
|
||||
timeout: 1h
|
||||
|
||||
nameOverride: ""
|
||||
fullnameOverride: ""
|
||||
|
||||
service:
|
||||
type: ClusterIP
|
||||
port: 80
|
||||
|
||||
resources:
|
||||
requests:
|
||||
cpu: 10m
|
||||
memory: 64Mi
|
||||
|
||||
nodeSelector: {}
|
||||
|
||||
tolerations: []
|
||||
|
||||
affinity: {}
|
||||
|
||||
# App Mesh virtual node settings
|
||||
meshName: ""
|
||||
#backends:
|
||||
# - app1.namespace
|
||||
# - app2.namespace
|
||||
21
charts/podinfo/.helmignore
Normal file
@@ -0,0 +1,21 @@
|
||||
# Patterns to ignore when building packages.
|
||||
# This supports shell glob matching, relative path matching, and
|
||||
# negation (prefixed with !). Only one pattern per line.
|
||||
.DS_Store
|
||||
# Common VCS dirs
|
||||
.git/
|
||||
.gitignore
|
||||
.bzr/
|
||||
.bzrignore
|
||||
.hg/
|
||||
.hgignore
|
||||
.svn/
|
||||
# Common backup files
|
||||
*.swp
|
||||
*.bak
|
||||
*.tmp
|
||||
*~
|
||||
# Various IDEs
|
||||
.project
|
||||
.idea/
|
||||
*.tmproj
|
||||
12
charts/podinfo/Chart.yaml
Normal file
@@ -0,0 +1,12 @@
|
||||
apiVersion: v1
|
||||
version: 2.0.0
|
||||
appVersion: 1.4.0
|
||||
name: podinfo
|
||||
engine: gotpl
|
||||
description: Flagger canary deployment demo chart
|
||||
home: https://github.com/weaveworks/flagger
|
||||
maintainers:
|
||||
- email: stefanprodan@users.noreply.github.com
|
||||
name: stefanprodan
|
||||
sources:
|
||||
- https://github.com/weaveworks/flagger
|
||||
79
charts/podinfo/README.md
Normal file
@@ -0,0 +1,79 @@
|
||||
# Podinfo
|
||||
|
||||
Podinfo is a tiny web application made with Go
|
||||
that showcases best practices of running canary deployments with Flagger and Istio.
|
||||
|
||||
## Installing the Chart
|
||||
|
||||
Add Flagger Helm repository:
|
||||
|
||||
```console
|
||||
helm repo add flagger https://flagger.app
|
||||
```
|
||||
|
||||
To install the chart with the release name `frontend`:
|
||||
|
||||
```console
|
||||
helm upgrade -i frontend flagger/podinfo \
|
||||
--namespace test \
|
||||
--set nameOverride=frontend \
|
||||
--set backend=http://backend.test:9898/echo \
|
||||
--set canary.enabled=true \
|
||||
--set canary.istioIngress.enabled=true \
|
||||
--set canary.istioIngress.gateway=public-gateway.istio-system.svc.cluster.local \
|
||||
--set canary.istioIngress.host=frontend.istio.example.com
|
||||
```
|
||||
|
||||
To install the chart as `backend`:
|
||||
|
||||
```console
|
||||
helm upgrade -i backend flagger/podinfo \
|
||||
--namespace test \
|
||||
--set nameOverride=backend \
|
||||
--set canary.enabled=true
|
||||
```
|
||||
|
||||
## Uninstalling the Chart
|
||||
|
||||
To uninstall/delete the `frontend` deployment:
|
||||
|
||||
```console
|
||||
$ helm delete --purge frontend
|
||||
```
|
||||
|
||||
The command removes all the Kubernetes components associated with the chart and deletes the release.
|
||||
|
||||
## Configuration
|
||||
|
||||
The following tables lists the configurable parameters of the podinfo chart and their default values.
|
||||
|
||||
Parameter | Description | Default
|
||||
--- | --- | ---
|
||||
`image.repository` | image repository | `quay.io/stefanprodan/podinfo`
|
||||
`image.tag` | image tag | `<VERSION>`
|
||||
`image.pullPolicy` | image pull policy | `IfNotPresent`
|
||||
`hpa.enabled` | enables HPA | `true`
|
||||
`hpa.cpu` | target CPU usage per pod | `80`
|
||||
`hpa.memory` | target memory usage per pod | `512Mi`
|
||||
`hpa.minReplicas` | maximum pod replicas | `2`
|
||||
`hpa.maxReplicas` | maximum pod replicas | `4`
|
||||
`resources.requests/cpu` | pod CPU request | `1m`
|
||||
`resources.requests/memory` | pod memory request | `16Mi`
|
||||
`backend` | backend URL | None
|
||||
`faults.delay` | random HTTP response delays between 0 and 5 seconds | `false`
|
||||
`faults.error` | 1/3 chances of a random HTTP response error | `false`
|
||||
|
||||
Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example,
|
||||
|
||||
```console
|
||||
$ helm install flagger/podinfo --name frontend \
|
||||
--set=image.tag=1.4.1,hpa.enabled=false
|
||||
```
|
||||
|
||||
Alternatively, a YAML file that specifies the values for the above parameters can be provided while installing the chart. For example,
|
||||
|
||||
```console
|
||||
$ helm install flagger/podinfo --name frontend -f values.yaml
|
||||
```
|
||||
|
||||
|
||||
1
charts/podinfo/templates/NOTES.txt
Normal file
@@ -0,0 +1 @@
|
||||
podinfo {{ .Release.Name }} deployed!
|
||||
43
charts/podinfo/templates/_helpers.tpl
Normal file
@@ -0,0 +1,43 @@
|
||||
{{/* vim: set filetype=mustache: */}}
|
||||
{{/*
|
||||
Expand the name of the chart.
|
||||
*/}}
|
||||
{{- define "podinfo.name" -}}
|
||||
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Create a default fully qualified app name.
|
||||
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
|
||||
If release name contains chart name it will be used as a full name.
|
||||
*/}}
|
||||
{{- define "podinfo.fullname" -}}
|
||||
{{- if .Values.fullnameOverride -}}
|
||||
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
|
||||
{{- else -}}
|
||||
{{- $name := default .Chart.Name .Values.nameOverride -}}
|
||||
{{- if contains $name .Release.Name -}}
|
||||
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
|
||||
{{- else -}}
|
||||
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Create chart name and version as used by the chart label.
|
||||
*/}}
|
||||
{{- define "podinfo.chart" -}}
|
||||
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Create chart name suffix.
|
||||
*/}}
|
||||
{{- define "podinfo.suffix" -}}
|
||||
{{- if .Values.canary.enabled -}}
|
||||
{{- "-primary" -}}
|
||||
{{- else -}}
|
||||
{{- "" -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
54
charts/podinfo/templates/canary.yaml
Normal file
@@ -0,0 +1,54 @@
|
||||
{{- if .Values.canary.enabled }}
|
||||
apiVersion: flagger.app/v1alpha3
|
||||
kind: Canary
|
||||
metadata:
|
||||
name: {{ template "podinfo.fullname" . }}
|
||||
labels:
|
||||
app: {{ template "podinfo.name" . }}
|
||||
chart: {{ template "podinfo.chart" . }}
|
||||
release: {{ .Release.Name }}
|
||||
heritage: {{ .Release.Service }}
|
||||
spec:
|
||||
targetRef:
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
name: {{ template "podinfo.fullname" . }}
|
||||
progressDeadlineSeconds: 60
|
||||
autoscalerRef:
|
||||
apiVersion: autoscaling/v2beta1
|
||||
kind: HorizontalPodAutoscaler
|
||||
name: {{ template "podinfo.fullname" . }}
|
||||
service:
|
||||
port: {{ .Values.service.port }}
|
||||
{{- if .Values.canary.istioIngress.enabled }}
|
||||
gateways:
|
||||
- {{ .Values.canary.istioIngress.gateway }}
|
||||
hosts:
|
||||
- {{ .Values.canary.istioIngress.host }}
|
||||
{{- end }}
|
||||
canaryAnalysis:
|
||||
interval: {{ .Values.canary.analysis.interval }}
|
||||
threshold: {{ .Values.canary.analysis.threshold }}
|
||||
maxWeight: {{ .Values.canary.analysis.maxWeight }}
|
||||
stepWeight: {{ .Values.canary.analysis.stepWeight }}
|
||||
metrics:
|
||||
- name: istio_requests_total
|
||||
threshold: {{ .Values.canary.thresholds.successRate }}
|
||||
interval: 1m
|
||||
- name: istio_request_duration_seconds_bucket
|
||||
threshold: {{ .Values.canary.thresholds.latency }}
|
||||
interval: 1m
|
||||
{{- if .Values.canary.loadtest.enabled }}
|
||||
webhooks:
|
||||
- name: load-test-get
|
||||
url: {{ .Values.canary.loadtest.url }}
|
||||
timeout: 5s
|
||||
metadata:
|
||||
cmd: "hey -z 1m -q 5 -c 2 http://{{ template "podinfo.fullname" . }}.{{ .Release.Namespace }}:{{ .Values.service.port }}"
|
||||
- name: load-test-post
|
||||
url: {{ .Values.canary.loadtest.url }}
|
||||
timeout: 5s
|
||||
metadata:
|
||||
cmd: "hey -z 1m -q 5 -c 2 -m POST -d '{\"test\": true}' http://{{ template "podinfo.fullname" . }}.{{ .Release.Namespace }}:{{ .Values.service.port }}/echo"
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
15
charts/podinfo/templates/configmap.yaml
Normal file
@@ -0,0 +1,15 @@
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: {{ template "podinfo.fullname" . }}
|
||||
labels:
|
||||
app: {{ template "podinfo.name" . }}
|
||||
chart: {{ template "podinfo.chart" . }}
|
||||
release: {{ .Release.Name }}
|
||||
heritage: {{ .Release.Service }}
|
||||
data:
|
||||
config.yaml: |-
|
||||
# http settings
|
||||
http-client-timeout: 1m
|
||||
http-server-timeout: {{ .Values.httpServer.timeout }}
|
||||
http-server-shutdown-timeout: 5s
|
||||
93
charts/podinfo/templates/deployment.yaml
Normal file
@@ -0,0 +1,93 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: {{ template "podinfo.fullname" . }}
|
||||
labels:
|
||||
app: {{ template "podinfo.name" . }}
|
||||
chart: {{ template "podinfo.chart" . }}
|
||||
release: {{ .Release.Name }}
|
||||
heritage: {{ .Release.Service }}
|
||||
spec:
|
||||
strategy:
|
||||
type: RollingUpdate
|
||||
rollingUpdate:
|
||||
maxUnavailable: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: {{ template "podinfo.fullname" . }}
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: {{ template "podinfo.fullname" . }}
|
||||
annotations:
|
||||
prometheus.io/scrape: 'true'
|
||||
spec:
|
||||
terminationGracePeriodSeconds: 30
|
||||
containers:
|
||||
- name: {{ .Chart.Name }}
|
||||
image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
|
||||
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
||||
command:
|
||||
- ./podinfo
|
||||
- --port={{ .Values.service.port }}
|
||||
- --level={{ .Values.logLevel }}
|
||||
- --random-delay={{ .Values.faults.delay }}
|
||||
- --random-error={{ .Values.faults.error }}
|
||||
- --config-path=/podinfo/config
|
||||
env:
|
||||
{{- if .Values.message }}
|
||||
- name: PODINFO_UI_MESSAGE
|
||||
value: {{ .Values.message }}
|
||||
{{- end }}
|
||||
{{- if .Values.backend }}
|
||||
- name: PODINFO_BACKEND_URL
|
||||
value: {{ .Values.backend }}
|
||||
{{- end }}
|
||||
ports:
|
||||
- name: http
|
||||
containerPort: {{ .Values.service.port }}
|
||||
protocol: TCP
|
||||
livenessProbe:
|
||||
exec:
|
||||
command:
|
||||
- podcli
|
||||
- check
|
||||
- http
|
||||
- localhost:{{ .Values.service.port }}/healthz
|
||||
initialDelaySeconds: 5
|
||||
timeoutSeconds: 5
|
||||
readinessProbe:
|
||||
exec:
|
||||
command:
|
||||
- podcli
|
||||
- check
|
||||
- http
|
||||
- localhost:{{ .Values.service.port }}/readyz
|
||||
initialDelaySeconds: 5
|
||||
timeoutSeconds: 5
|
||||
volumeMounts:
|
||||
- name: data
|
||||
mountPath: /data
|
||||
- name: config
|
||||
mountPath: /podinfo/config
|
||||
readOnly: true
|
||||
resources:
|
||||
{{ toYaml .Values.resources | indent 12 }}
|
||||
{{- with .Values.nodeSelector }}
|
||||
nodeSelector:
|
||||
{{ toYaml . | indent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.affinity }}
|
||||
affinity:
|
||||
{{ toYaml . | indent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.tolerations }}
|
||||
tolerations:
|
||||
{{ toYaml . | indent 8 }}
|
||||
{{- end }}
|
||||
volumes:
|
||||
- name: data
|
||||
emptyDir: {}
|
||||
- name: config
|
||||
configMap:
|
||||
name: {{ template "podinfo.fullname" . }}
|
||||
37
charts/podinfo/templates/hpa.yaml
Normal file
@@ -0,0 +1,37 @@
|
||||
{{- if .Values.hpa.enabled -}}
|
||||
apiVersion: autoscaling/v2beta1
|
||||
kind: HorizontalPodAutoscaler
|
||||
metadata:
|
||||
name: {{ template "podinfo.fullname" . }}
|
||||
labels:
|
||||
app: {{ template "podinfo.name" . }}
|
||||
chart: {{ template "podinfo.chart" . }}
|
||||
release: {{ .Release.Name }}
|
||||
heritage: {{ .Release.Service }}
|
||||
spec:
|
||||
scaleTargetRef:
|
||||
apiVersion: apps/v1beta2
|
||||
kind: Deployment
|
||||
name: {{ template "podinfo.fullname" . }}
|
||||
minReplicas: {{ .Values.hpa.minReplicas }}
|
||||
maxReplicas: {{ .Values.hpa.maxReplicas }}
|
||||
metrics:
|
||||
{{- if .Values.hpa.cpu }}
|
||||
- type: Resource
|
||||
resource:
|
||||
name: cpu
|
||||
targetAverageUtilization: {{ .Values.hpa.cpu }}
|
||||
{{- end }}
|
||||
{{- if .Values.hpa.memory }}
|
||||
- type: Resource
|
||||
resource:
|
||||
name: memory
|
||||
targetAverageValue: {{ .Values.hpa.memory }}
|
||||
{{- end }}
|
||||
{{- if .Values.hpa.requests }}
|
||||
- type: Pod
|
||||
pods:
|
||||
metricName: http_requests
|
||||
targetAverageValue: {{ .Values.hpa.requests }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
20
charts/podinfo/templates/service.yaml
Normal file
@@ -0,0 +1,20 @@
|
||||
{{- if not .Values.canary.enabled }}
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: {{ template "podinfo.fullname" . }}
|
||||
labels:
|
||||
app: {{ template "podinfo.name" . }}
|
||||
chart: {{ template "podinfo.chart" . }}
|
||||
release: {{ .Release.Name }}
|
||||
heritage: {{ .Release.Service }}
|
||||
spec:
|
||||
type: {{ .Values.service.type }}
|
||||
ports:
|
||||
- port: {{ .Values.service.port }}
|
||||
targetPort: http
|
||||
protocol: TCP
|
||||
name: http
|
||||
selector:
|
||||
app: {{ template "podinfo.fullname" . }}
|
||||
{{- end }}
|
||||
22
charts/podinfo/templates/tests/test-config.yaml
Executable file
@@ -0,0 +1,22 @@
|
||||
{{- $url := printf "%s%s.%s:%v" (include "podinfo.fullname" .) (include "podinfo.suffix" .) .Release.Namespace .Values.service.port -}}
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: {{ template "podinfo.fullname" . }}-tests
|
||||
labels:
|
||||
heritage: {{ .Release.Service }}
|
||||
release: {{ .Release.Name }}
|
||||
chart: {{ .Chart.Name }}-{{ .Chart.Version }}
|
||||
app: {{ template "podinfo.name" . }}
|
||||
data:
|
||||
run.sh: |-
|
||||
@test "HTTP POST /echo" {
|
||||
run curl --retry 3 --connect-timeout 2 -sSX POST -d 'test' {{ $url }}/echo
|
||||
[ $output = "test" ]
|
||||
}
|
||||
@test "HTTP POST /store" {
|
||||
curl --retry 3 --connect-timeout 2 -sSX POST -d 'test' {{ $url }}/store
|
||||
}
|
||||
@test "HTTP GET /" {
|
||||
curl --retry 3 --connect-timeout 2 -sS {{ $url }} | grep hostname
|
||||
}
|
||||
43
charts/podinfo/templates/tests/test-pod.yaml
Executable file
@@ -0,0 +1,43 @@
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: {{ template "podinfo.fullname" . }}-tests-{{ randAlphaNum 5 | lower }}
|
||||
annotations:
|
||||
"helm.sh/hook": test-success
|
||||
sidecar.istio.io/inject: "false"
|
||||
labels:
|
||||
heritage: {{ .Release.Service }}
|
||||
release: {{ .Release.Name }}
|
||||
chart: {{ .Chart.Name }}-{{ .Chart.Version }}
|
||||
app: {{ template "podinfo.name" . }}
|
||||
spec:
|
||||
initContainers:
|
||||
- name: "test-framework"
|
||||
image: "dduportal/bats:0.4.0"
|
||||
command:
|
||||
- "bash"
|
||||
- "-c"
|
||||
- |
|
||||
set -ex
|
||||
# copy bats to tools dir
|
||||
cp -R /usr/local/libexec/ /tools/bats/
|
||||
volumeMounts:
|
||||
- mountPath: /tools
|
||||
name: tools
|
||||
containers:
|
||||
- name: {{ .Release.Name }}-ui-test
|
||||
image: dduportal/bats:0.4.0
|
||||
command: ["/tools/bats/bats", "-t", "/tests/run.sh"]
|
||||
volumeMounts:
|
||||
- mountPath: /tests
|
||||
name: tests
|
||||
readOnly: true
|
||||
- mountPath: /tools
|
||||
name: tools
|
||||
volumes:
|
||||
- name: tests
|
||||
configMap:
|
||||
name: {{ template "podinfo.fullname" . }}-tests
|
||||
- name: tools
|
||||
emptyDir: {}
|
||||
restartPolicy: Never
|
||||
73
charts/podinfo/values.yaml
Normal file
@@ -0,0 +1,73 @@
|
||||
# Default values for podinfo.
|
||||
image:
|
||||
repository: quay.io/stefanprodan/podinfo
|
||||
tag: 1.4.0
|
||||
pullPolicy: IfNotPresent
|
||||
|
||||
service:
|
||||
type: ClusterIP
|
||||
port: 9898
|
||||
|
||||
hpa:
|
||||
enabled: true
|
||||
minReplicas: 2
|
||||
maxReplicas: 2
|
||||
cpu: 80
|
||||
memory: 512Mi
|
||||
|
||||
canary:
|
||||
enabled: true
|
||||
istioIngress:
|
||||
enabled: false
|
||||
# Istio ingress gateway name
|
||||
gateway: public-gateway.istio-system.svc.cluster.local
|
||||
# external host name eg. podinfo.example.com
|
||||
host:
|
||||
analysis:
|
||||
# schedule interval (default 60s)
|
||||
interval: 15s
|
||||
# max number of failed metric checks before rollback
|
||||
threshold: 10
|
||||
# max traffic percentage routed to canary
|
||||
# percentage (0-100)
|
||||
maxWeight: 50
|
||||
# canary increment step
|
||||
# percentage (0-100)
|
||||
stepWeight: 5
|
||||
thresholds:
|
||||
# minimum req success rate (non 5xx responses)
|
||||
# percentage (0-100)
|
||||
successRate: 99
|
||||
# maximum req duration P99
|
||||
# milliseconds
|
||||
latency: 500
|
||||
loadtest:
|
||||
enabled: false
|
||||
# load tester address
|
||||
url: http://flagger-loadtester.test/
|
||||
|
||||
resources:
|
||||
limits:
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 32Mi
|
||||
|
||||
nodeSelector: {}
|
||||
|
||||
tolerations: []
|
||||
|
||||
affinity: {}
|
||||
|
||||
nameOverride: ""
|
||||
fullnameOverride: ""
|
||||
|
||||
logLevel: info
|
||||
backend: #http://backend-podinfo:9898/echo
|
||||
message: #UI greetings
|
||||
|
||||
faults:
|
||||
delay: false
|
||||
error: false
|
||||
|
||||
httpServer:
|
||||
timeout: 30s
|
||||
@@ -2,23 +2,24 @@ package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"log"
|
||||
"time"
|
||||
|
||||
_ "github.com/istio/glog"
|
||||
istioclientset "github.com/knative/pkg/client/clientset/versioned"
|
||||
"github.com/knative/pkg/signals"
|
||||
clientset "github.com/stefanprodan/flagger/pkg/client/clientset/versioned"
|
||||
informers "github.com/stefanprodan/flagger/pkg/client/informers/externalversions"
|
||||
"github.com/stefanprodan/flagger/pkg/controller"
|
||||
"github.com/stefanprodan/flagger/pkg/logging"
|
||||
"github.com/stefanprodan/flagger/pkg/notifier"
|
||||
"github.com/stefanprodan/flagger/pkg/server"
|
||||
"github.com/stefanprodan/flagger/pkg/version"
|
||||
clientset "github.com/weaveworks/flagger/pkg/client/clientset/versioned"
|
||||
informers "github.com/weaveworks/flagger/pkg/client/informers/externalversions"
|
||||
"github.com/weaveworks/flagger/pkg/controller"
|
||||
"github.com/weaveworks/flagger/pkg/logger"
|
||||
"github.com/weaveworks/flagger/pkg/metrics"
|
||||
"github.com/weaveworks/flagger/pkg/notifier"
|
||||
"github.com/weaveworks/flagger/pkg/server"
|
||||
"github.com/weaveworks/flagger/pkg/signals"
|
||||
"github.com/weaveworks/flagger/pkg/version"
|
||||
"go.uber.org/zap"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
_ "k8s.io/client-go/plugin/pkg/client/auth/gcp"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
"log"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -31,6 +32,12 @@ var (
|
||||
slackURL string
|
||||
slackUser string
|
||||
slackChannel string
|
||||
threadiness int
|
||||
zapReplaceGlobals bool
|
||||
zapEncoding string
|
||||
namespace string
|
||||
meshProvider string
|
||||
selectorLabels string
|
||||
)
|
||||
|
||||
func init() {
|
||||
@@ -43,15 +50,25 @@ func init() {
|
||||
flag.StringVar(&slackURL, "slack-url", "", "Slack hook URL.")
|
||||
flag.StringVar(&slackUser, "slack-user", "flagger", "Slack user name.")
|
||||
flag.StringVar(&slackChannel, "slack-channel", "", "Slack channel.")
|
||||
flag.IntVar(&threadiness, "threadiness", 2, "Worker concurrency.")
|
||||
flag.BoolVar(&zapReplaceGlobals, "zap-replace-globals", false, "Whether to change the logging level of the global zap logger.")
|
||||
flag.StringVar(&zapEncoding, "zap-encoding", "json", "Zap logger encoding.")
|
||||
flag.StringVar(&namespace, "namespace", "", "Namespace that flagger would watch canary object")
|
||||
flag.StringVar(&meshProvider, "mesh-provider", "istio", "Service mesh provider, can be istio or appmesh")
|
||||
flag.StringVar(&selectorLabels, "selector-labels", "app,name,app.kubernetes.io/name", "List of pod labels that Flagger uses to create pod selectors")
|
||||
}
|
||||
|
||||
func main() {
|
||||
flag.Parse()
|
||||
|
||||
logger, err := logging.NewLogger(logLevel)
|
||||
logger, err := logger.NewLoggerWithEncoding(logLevel, zapEncoding)
|
||||
if err != nil {
|
||||
log.Fatalf("Error creating logger: %v", err)
|
||||
}
|
||||
if zapReplaceGlobals {
|
||||
zap.ReplaceGlobals(logger.Desugar())
|
||||
}
|
||||
|
||||
defer logger.Sync()
|
||||
|
||||
stopCh := signals.SetupSignalHandler()
|
||||
@@ -66,7 +83,7 @@ func main() {
|
||||
logger.Fatalf("Error building kubernetes clientset: %v", err)
|
||||
}
|
||||
|
||||
istioClient, err := istioclientset.NewForConfig(cfg)
|
||||
meshClient, err := clientset.NewForConfig(cfg)
|
||||
if err != nil {
|
||||
logger.Fatalf("Error building istio clientset: %v", err)
|
||||
}
|
||||
@@ -76,7 +93,8 @@ func main() {
|
||||
logger.Fatalf("Error building example clientset: %s", err.Error())
|
||||
}
|
||||
|
||||
flaggerInformerFactory := informers.NewSharedInformerFactory(flaggerClient, time.Second*30)
|
||||
flaggerInformerFactory := informers.NewSharedInformerFactoryWithOptions(flaggerClient, time.Second*30, informers.WithNamespace(namespace))
|
||||
|
||||
canaryInformer := flaggerInformerFactory.Flagger().V1alpha3().Canaries()
|
||||
|
||||
logger.Infof("Starting flagger version %s revision %s", version.VERSION, version.REVISION)
|
||||
@@ -86,9 +104,17 @@ func main() {
|
||||
logger.Fatalf("Error calling Kubernetes API: %v", err)
|
||||
}
|
||||
|
||||
logger.Infof("Connected to Kubernetes API %s", ver)
|
||||
labels := strings.Split(selectorLabels, ",")
|
||||
if len(labels) < 1 {
|
||||
logger.Fatalf("At least one selector label is required")
|
||||
}
|
||||
|
||||
ok, err := controller.CheckMetricsServer(metricsServer)
|
||||
logger.Infof("Connected to Kubernetes API %s", ver)
|
||||
if namespace != "" {
|
||||
logger.Infof("Watching namespace %s", namespace)
|
||||
}
|
||||
|
||||
ok, err := metrics.CheckMetricsServer(metricsServer)
|
||||
if ok {
|
||||
logger.Infof("Connected to metrics server %s", metricsServer)
|
||||
} else {
|
||||
@@ -110,13 +136,16 @@ func main() {
|
||||
|
||||
c := controller.NewController(
|
||||
kubeClient,
|
||||
istioClient,
|
||||
meshClient,
|
||||
flaggerClient,
|
||||
canaryInformer,
|
||||
controlLoopInterval,
|
||||
metricsServer,
|
||||
logger,
|
||||
slack,
|
||||
meshProvider,
|
||||
version.VERSION,
|
||||
labels,
|
||||
)
|
||||
|
||||
flaggerInformerFactory.Start(stopCh)
|
||||
@@ -132,7 +161,7 @@ func main() {
|
||||
|
||||
// start controller
|
||||
go func(ctrl *controller.Controller) {
|
||||
if err := ctrl.Run(2, stopCh); err != nil {
|
||||
if err := ctrl.Run(threadiness, stopCh); err != nil {
|
||||
logger.Fatalf("Error running controller: %v", err)
|
||||
}
|
||||
}(c)
|
||||
|
||||
51
cmd/loadtester/main.go
Normal file
@@ -0,0 +1,51 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"github.com/weaveworks/flagger/pkg/loadtester"
|
||||
"github.com/weaveworks/flagger/pkg/logger"
|
||||
"github.com/weaveworks/flagger/pkg/signals"
|
||||
"go.uber.org/zap"
|
||||
"log"
|
||||
"time"
|
||||
)
|
||||
|
||||
var VERSION = "0.2.0"
|
||||
var (
|
||||
logLevel string
|
||||
port string
|
||||
timeout time.Duration
|
||||
zapReplaceGlobals bool
|
||||
zapEncoding string
|
||||
)
|
||||
|
||||
func init() {
|
||||
flag.StringVar(&logLevel, "log-level", "debug", "Log level can be: debug, info, warning, error.")
|
||||
flag.StringVar(&port, "port", "9090", "Port to listen on.")
|
||||
flag.DurationVar(&timeout, "timeout", time.Hour, "Load test exec timeout.")
|
||||
flag.BoolVar(&zapReplaceGlobals, "zap-replace-globals", false, "Whether to change the logging level of the global zap logger.")
|
||||
flag.StringVar(&zapEncoding, "zap-encoding", "json", "Zap logger encoding.")
|
||||
}
|
||||
|
||||
func main() {
|
||||
flag.Parse()
|
||||
|
||||
logger, err := logger.NewLoggerWithEncoding(logLevel, zapEncoding)
|
||||
if err != nil {
|
||||
log.Fatalf("Error creating logger: %v", err)
|
||||
}
|
||||
if zapReplaceGlobals {
|
||||
zap.ReplaceGlobals(logger.Desugar())
|
||||
}
|
||||
|
||||
defer logger.Sync()
|
||||
|
||||
stopCh := signals.SetupSignalHandler()
|
||||
|
||||
taskRunner := loadtester.NewTaskRunner(logger, timeout)
|
||||
|
||||
go taskRunner.Start(100*time.Millisecond, stopCh)
|
||||
|
||||
logger.Infof("Starting load tester v%s API on port %s", VERSION, port)
|
||||
loadtester.ListenAndServe(port, time.Minute, logger, taskRunner, stopCh)
|
||||
}
|
||||
@@ -1 +0,0 @@
|
||||
flagger.app
|
||||
@@ -1,11 +0,0 @@
|
||||
# Flagger
|
||||
|
||||
Flagger is a Kubernetes operator that automates the promotion of canary deployments using Istio routing for traffic
|
||||
shifting and Prometheus metrics for canary analysis.
|
||||
|
||||
Flagger implements a control loop that gradually shifts traffic to the canary while measuring key performance
|
||||
indicators like HTTP requests success rate, requests average duration and pods health. Based on the KPIs analysis
|
||||
a canary is promoted or aborted and the analysis result is published to Slack.
|
||||
|
||||
### For the install instructions and usage examples please see [docs.flagger.app](https://docs.flagger.app)
|
||||
|
||||
@@ -1,55 +0,0 @@
|
||||
title: Flagger - Istio Progressive Delivery Kubernetes Operator
|
||||
|
||||
remote_theme: errordeveloper/simple-project-homepage
|
||||
repository: stefanprodan/flagger
|
||||
by_weaveworks: true
|
||||
|
||||
url: "https://flagger.app"
|
||||
baseurl: "/"
|
||||
|
||||
twitter:
|
||||
username: "stefanprodan"
|
||||
author:
|
||||
twitter: "stefanprodan"
|
||||
|
||||
# Set default og:image
|
||||
defaults:
|
||||
- scope: {path: ""}
|
||||
values: {image: "diagrams/flagger-overview.png"}
|
||||
|
||||
# See: https://material.io/guidelines/style/color.html
|
||||
# Use color-name-value, like pink-200 or deep-purple-100
|
||||
brand_color: "amber-400"
|
||||
|
||||
# How article URLs are structured.
|
||||
# See: https://jekyllrb.com/docs/permalinks/
|
||||
permalink: posts/:title/
|
||||
|
||||
# "UA-NNNNNNNN-N"
|
||||
google_analytics: ""
|
||||
|
||||
# Language. For example, if you write in Japanese, use "ja"
|
||||
lang: "en"
|
||||
|
||||
# How many posts are visible on the home page without clicking "View More"
|
||||
num_posts_visible_initially: 5
|
||||
|
||||
# Date format: See http://strftime.net/
|
||||
date_format: "%b %-d, %Y"
|
||||
|
||||
plugins:
|
||||
- jekyll-feed
|
||||
- jekyll-readme-index
|
||||
- jekyll-seo-tag
|
||||
- jekyll-sitemap
|
||||
- jemoji
|
||||
# # required for local builds with starefossen/github-pages
|
||||
# - jekyll-github-metadata
|
||||
# - jekyll-mentions
|
||||
# - jekyll-redirect-from
|
||||
# - jekyll-remote-theme
|
||||
|
||||
exclude:
|
||||
- CNAME
|
||||
- gitbook
|
||||
|
||||
BIN
docs/diagrams/flagger-abtest-steps.png
Normal file
|
After Width: | Height: | Size: 158 KiB |
|
Before Width: | Height: | Size: 207 KiB After Width: | Height: | Size: 46 KiB |
BIN
docs/diagrams/flagger-flux-gitops.png
Normal file
|
After Width: | Height: | Size: 130 KiB |
BIN
docs/diagrams/flagger-gitops-aws.png
Normal file
|
After Width: | Height: | Size: 32 KiB |
BIN
docs/diagrams/flagger-gke-istio.png
Normal file
|
After Width: | Height: | Size: 196 KiB |
BIN
docs/diagrams/flagger-load-testing.png
Normal file
|
After Width: | Height: | Size: 159 KiB |
|
Before Width: | Height: | Size: 183 KiB After Width: | Height: | Size: 46 KiB |
BIN
docs/diagrams/istio-cert-manager-gke.png
Normal file
|
After Width: | Height: | Size: 205 KiB |