Compare commits
317 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
b9f0517c5d | ||
|
|
6e66f02585 | ||
|
|
5922e96044 | ||
|
|
f36e7e414a | ||
|
|
606754d4a5 | ||
|
|
a3847e64df | ||
|
|
7a3f9f2e73 | ||
|
|
2e4e8b0bf9 | ||
|
|
951fe80115 | ||
|
|
c0a8149acb | ||
|
|
80b75b227d | ||
|
|
dff7de09f2 | ||
|
|
b3bbadfccf | ||
|
|
fc676e3cb7 | ||
|
|
860c82dff9 | ||
|
|
4829f5af7f | ||
|
|
c463b6b231 | ||
|
|
b2ca0c4c16 | ||
|
|
69875cb3dc | ||
|
|
9e33a116d4 | ||
|
|
dab3d53b65 | ||
|
|
e3f8bff6fc | ||
|
|
0648d81d34 | ||
|
|
ece5c4401e | ||
|
|
bfc64c7cf1 | ||
|
|
0a2c134ece | ||
|
|
8bea9253c3 | ||
|
|
e1dacc3983 | ||
|
|
0c6a7355e7 | ||
|
|
83046282c3 | ||
|
|
65c9817295 | ||
|
|
e4905d3d35 | ||
|
|
6bc0670a7a | ||
|
|
95ff6adc19 | ||
|
|
7ee51c7def | ||
|
|
dfa065b745 | ||
|
|
e3b03debde | ||
|
|
ef759305cb | ||
|
|
ad65497d4e | ||
|
|
163f5292b0 | ||
|
|
e07a82d024 | ||
|
|
046245a8b5 | ||
|
|
aa6a180bcc | ||
|
|
c4d28e14fc | ||
|
|
bc4bdcdc1c | ||
|
|
be22ff9951 | ||
|
|
f204fe53f4 | ||
|
|
28e7e89047 | ||
|
|
75d49304f3 | ||
|
|
04cbacb6e0 | ||
|
|
c46c7b9e21 | ||
|
|
919dafa567 | ||
|
|
dfdcfed26e | ||
|
|
a0a4d4cfc5 | ||
|
|
970a589fd3 | ||
|
|
56d2c0952a | ||
|
|
4871be0345 | ||
|
|
e3e112e279 | ||
|
|
d2cbd40d89 | ||
|
|
3786a49f00 | ||
|
|
ff4aa62061 | ||
|
|
9b6cfdeef7 | ||
|
|
9d89e0c83f | ||
|
|
559cbd0d36 | ||
|
|
caea00e47f | ||
|
|
b26542f38d | ||
|
|
bbab7ce855 | ||
|
|
afa2d079f6 | ||
|
|
108bf9ca65 | ||
|
|
438f952128 | ||
|
|
3e84799644 | ||
|
|
d6e80bac7f | ||
|
|
9b3b24bddf | ||
|
|
5c831ae482 | ||
|
|
78233fafd3 | ||
|
|
73c3e07859 | ||
|
|
10c61daee4 | ||
|
|
b1bb9fa114 | ||
|
|
a7f4b6d2ae | ||
|
|
b937c4ea8d | ||
|
|
e577311b64 | ||
|
|
b847345308 | ||
|
|
85e683446f | ||
|
|
4f49aa5760 | ||
|
|
8ca9cf24bb | ||
|
|
61d0216c21 | ||
|
|
ba4a2406ba | ||
|
|
c2974416b4 | ||
|
|
48fac4e876 | ||
|
|
f0add9a67c | ||
|
|
20f9df01c2 | ||
|
|
514e850072 | ||
|
|
61fe78a982 | ||
|
|
c4b066c845 | ||
|
|
d24a23f3bd | ||
|
|
22045982e2 | ||
|
|
f496f1e18f | ||
|
|
2e802432c4 | ||
|
|
a2f747e16f | ||
|
|
982338e162 | ||
|
|
03fe4775dd | ||
|
|
def7d9bde0 | ||
|
|
a58a7cbeeb | ||
|
|
82ca66c23b | ||
|
|
92c971c0d7 | ||
|
|
30c4faf72b | ||
|
|
85ee7d17cf | ||
|
|
a6d278ae91 | ||
|
|
ad8d02f701 | ||
|
|
00fa5542f7 | ||
|
|
9ed2719d19 | ||
|
|
8a809baf35 | ||
|
|
ff90c42fa7 | ||
|
|
d651e8fe48 | ||
|
|
bc613905e9 | ||
|
|
e3321118e5 | ||
|
|
31f526cbd6 | ||
|
|
493554178f | ||
|
|
004b1cc7dd | ||
|
|
767602592c | ||
|
|
34676acaf5 | ||
|
|
491ab7affa | ||
|
|
b522bbd903 | ||
|
|
dd3bc28806 | ||
|
|
764e7e275d | ||
|
|
931c051153 | ||
|
|
3da86fe118 | ||
|
|
93f37a3022 | ||
|
|
77b3d861e6 | ||
|
|
ce0e16ffe8 | ||
|
|
fb9709ae78 | ||
|
|
191c3868ab | ||
|
|
d076f0859e | ||
|
|
df24ba86d0 | ||
|
|
3996bcfa67 | ||
|
|
9e8a4ad384 | ||
|
|
26ee668612 | ||
|
|
e3c102e7f8 | ||
|
|
ba60b127ea | ||
|
|
74c69dc07e | ||
|
|
0687d89178 | ||
|
|
7a454c005f | ||
|
|
2ce4f3a93e | ||
|
|
7baaaebdd4 | ||
|
|
608c7f7a31 | ||
|
|
1a0daa8678 | ||
|
|
ed0d25af97 | ||
|
|
720d04aba1 | ||
|
|
901648393a | ||
|
|
b5acd817fc | ||
|
|
2586fc6ef0 | ||
|
|
62e0eb6395 | ||
|
|
768b0490e2 | ||
|
|
852454fa2c | ||
|
|
970b67d6f6 | ||
|
|
ea0eddff82 | ||
|
|
0d4d2ac37b | ||
|
|
d0591916a4 | ||
|
|
6a8aef8675 | ||
|
|
a894a7a0ce | ||
|
|
0bbe724b8c | ||
|
|
bea22c0259 | ||
|
|
6363580120 | ||
|
|
cbdc7ef2d3 | ||
|
|
0959406609 | ||
|
|
cf41f9a478 | ||
|
|
6fe6a41e3e | ||
|
|
91cd2648d9 | ||
|
|
240591a6b8 | ||
|
|
2973822113 | ||
|
|
a6b2b1246c | ||
|
|
c74456411d | ||
|
|
31b3fcf906 | ||
|
|
767be5b6a8 | ||
|
|
48834cd8d1 | ||
|
|
f4bb0ea9c2 | ||
|
|
cf25a9a8a5 | ||
|
|
4f0ad7a067 | ||
|
|
c0fe461a9f | ||
|
|
1911143514 | ||
|
|
9b67b360d0 | ||
|
|
991e01efd2 | ||
|
|
83b8ae46c9 | ||
|
|
c3b7aee063 | ||
|
|
66d662c085 | ||
|
|
4d5876fb76 | ||
|
|
7ca2558a81 | ||
|
|
8957994c1a | ||
|
|
0147aea69b | ||
|
|
b5f73d66ec | ||
|
|
6800181594 | ||
|
|
6f5f80a085 | ||
|
|
fd23a2f98f | ||
|
|
63cb8a5ba5 | ||
|
|
4a9e3182c6 | ||
|
|
5cbc3df7b5 | ||
|
|
dcadc2303f | ||
|
|
cf5f364ed2 | ||
|
|
e45ace5d9b | ||
|
|
6e7421b0d8 | ||
|
|
647d02890f | ||
|
|
7e72d23b60 | ||
|
|
9fada306f0 | ||
|
|
8d1cc83405 | ||
|
|
1979bc59d0 | ||
|
|
bf7ebc9708 | ||
|
|
dc3cde88d2 | ||
|
|
98beb1011e | ||
|
|
8c59e9d2b4 | ||
|
|
9a87d47f45 | ||
|
|
f25023ed1b | ||
|
|
806b233d58 | ||
|
|
677ee8d639 | ||
|
|
61ac8d7a8c | ||
|
|
278680b248 | ||
|
|
5e4a58a1c1 | ||
|
|
757b5ca22e | ||
|
|
6d1da5bb45 | ||
|
|
9ca79d147d | ||
|
|
37fcfe15bb | ||
|
|
a9c7466359 | ||
|
|
91a3f2c9a7 | ||
|
|
9aa341d088 | ||
|
|
c9e09fa8eb | ||
|
|
e6257b7531 | ||
|
|
aee027c91c | ||
|
|
c106796751 | ||
|
|
42bd600482 | ||
|
|
47ad81be5b | ||
|
|
88c450e3bd | ||
|
|
2ebedd185c | ||
|
|
0fdbef4cda | ||
|
|
68500dc579 | ||
|
|
12a29f1939 | ||
|
|
9974968dee | ||
|
|
f2eaa91c9c | ||
|
|
f117f72901 | ||
|
|
5424126d3c | ||
|
|
028933b635 | ||
|
|
678f79fc61 | ||
|
|
933c19fdf4 | ||
|
|
d678c59285 | ||
|
|
2285bd210e | ||
|
|
cba6e5f811 | ||
|
|
3fa9f37192 | ||
|
|
c243756802 | ||
|
|
27b1b882ea | ||
|
|
2505cbfe15 | ||
|
|
396452b7b6 | ||
|
|
76c82f48a4 | ||
|
|
948226dd4e | ||
|
|
1c97fc86c9 | ||
|
|
00de7abfde | ||
|
|
631d93b8d9 | ||
|
|
2e38dbc565 | ||
|
|
b122f7f71a | ||
|
|
6101557000 | ||
|
|
cdc66128a9 | ||
|
|
eace3713ce | ||
|
|
fd50c4b4b7 | ||
|
|
62a5f8c5d6 | ||
|
|
093cb24602 | ||
|
|
4f63f7f9e4 | ||
|
|
9f359327f0 | ||
|
|
2bc8194d96 | ||
|
|
181d50b7b6 | ||
|
|
3ae995f55c | ||
|
|
fbb37ad5e4 | ||
|
|
5cc3b905b4 | ||
|
|
abb8d946cc | ||
|
|
797316fc4d | ||
|
|
beed6369a0 | ||
|
|
9618d2ea0d | ||
|
|
94e5bfc031 | ||
|
|
bb620ad94a | ||
|
|
7c6d1c48a3 | ||
|
|
bd5d884c8b | ||
|
|
1c06721c9a | ||
|
|
1e29e2c4eb | ||
|
|
88c39d7379 | ||
|
|
da43a152ba | ||
|
|
ec63aa9999 | ||
|
|
7b9df746ad | ||
|
|
52d93ddda2 | ||
|
|
eb0331f2bf | ||
|
|
6a66a87a44 | ||
|
|
f3cc810948 | ||
|
|
12d84b2e24 | ||
|
|
58bde24ece | ||
|
|
5b3fd0efca | ||
|
|
ee6e39afa6 | ||
|
|
677b9d9197 | ||
|
|
786c5aa93a | ||
|
|
fd44f1fabf | ||
|
|
b20e0178e1 | ||
|
|
5a490abfdd | ||
|
|
674c79da94 | ||
|
|
23ebb4235d | ||
|
|
b2500d0ccb | ||
|
|
ee500d83ac | ||
|
|
0032c14a78 | ||
|
|
8fd3e927b8 | ||
|
|
7fe273a21d | ||
|
|
bd817cc520 | ||
|
|
eb856fda13 | ||
|
|
d63f05c92e | ||
|
|
8fde6bdb8a | ||
|
|
8148120421 | ||
|
|
95b8840bf2 | ||
|
|
0e8b1ef20f | ||
|
|
0fbf4dcdb2 | ||
|
|
7aca9468ac | ||
|
|
a6c0f08fcc | ||
|
|
9c1bcc08bb | ||
|
|
87e9dfe3d3 | ||
|
|
d7be66743e | ||
|
|
350efb2bfe |
@@ -1,54 +1,196 @@
|
||||
version: 2.1
|
||||
jobs:
|
||||
|
||||
build-binary:
|
||||
docker:
|
||||
- image: circleci/golang:1.12
|
||||
working_directory: ~/build
|
||||
steps:
|
||||
- checkout
|
||||
- restore_cache:
|
||||
keys:
|
||||
- go-mod-v3-{{ checksum "go.sum" }}
|
||||
- run:
|
||||
name: Run go fmt
|
||||
command: make test-fmt
|
||||
- run:
|
||||
name: Build Flagger
|
||||
command: |
|
||||
CGO_ENABLED=0 GOOS=linux go build \
|
||||
-ldflags "-s -w -X github.com/weaveworks/flagger/pkg/version.REVISION=${CIRCLE_SHA1}" \
|
||||
-a -installsuffix cgo -o bin/flagger ./cmd/flagger/*.go
|
||||
- run:
|
||||
name: Build Flagger load tester
|
||||
command: |
|
||||
CGO_ENABLED=0 GOOS=linux go build \
|
||||
-a -installsuffix cgo -o bin/loadtester ./cmd/loadtester/*.go
|
||||
- run:
|
||||
name: Run unit tests
|
||||
command: |
|
||||
go test -race -coverprofile=coverage.txt -covermode=atomic $(go list ./pkg/...)
|
||||
bash <(curl -s https://codecov.io/bash)
|
||||
- run:
|
||||
name: Verify code gen
|
||||
command: make test-codegen
|
||||
- save_cache:
|
||||
key: go-mod-v3-{{ checksum "go.sum" }}
|
||||
paths:
|
||||
- "/go/pkg/mod/"
|
||||
- persist_to_workspace:
|
||||
root: bin
|
||||
paths:
|
||||
- flagger
|
||||
- loadtester
|
||||
|
||||
push-container:
|
||||
docker:
|
||||
- image: circleci/golang:1.12
|
||||
steps:
|
||||
- checkout
|
||||
- setup_remote_docker:
|
||||
docker_layer_caching: true
|
||||
- attach_workspace:
|
||||
at: /tmp/bin
|
||||
- run: test/container-build.sh
|
||||
- run: test/container-push.sh
|
||||
|
||||
push-binary:
|
||||
docker:
|
||||
- image: circleci/golang:1.12
|
||||
working_directory: ~/build
|
||||
steps:
|
||||
- checkout
|
||||
- setup_remote_docker:
|
||||
docker_layer_caching: true
|
||||
- restore_cache:
|
||||
keys:
|
||||
- go-mod-v3-{{ checksum "go.sum" }}
|
||||
- run: test/goreleaser.sh
|
||||
|
||||
e2e-istio-testing:
|
||||
machine: true
|
||||
steps:
|
||||
- checkout
|
||||
- attach_workspace:
|
||||
at: /tmp/bin
|
||||
- run: test/container-build.sh
|
||||
- run: test/e2e-kind.sh
|
||||
- run: test/e2e-istio.sh
|
||||
- run: test/e2e-build.sh
|
||||
- run: test/e2e-tests.sh
|
||||
|
||||
e2e-smi-istio-testing:
|
||||
machine: true
|
||||
steps:
|
||||
- checkout
|
||||
- attach_workspace:
|
||||
at: /tmp/bin
|
||||
- run: test/container-build.sh
|
||||
- run: test/e2e-kind.sh
|
||||
- run: test/e2e-smi-istio.sh
|
||||
- run: test/e2e-tests.sh canary
|
||||
|
||||
e2e-supergloo-testing:
|
||||
machine: true
|
||||
steps:
|
||||
- checkout
|
||||
- run: test/e2e-kind.sh
|
||||
- attach_workspace:
|
||||
at: /tmp/bin
|
||||
- run: test/container-build.sh
|
||||
- run: test/e2e-kind.sh 0.2.1
|
||||
- run: test/e2e-supergloo.sh
|
||||
- run: test/e2e-build.sh supergloo:test.supergloo-system
|
||||
- run: test/e2e-tests.sh canary
|
||||
|
||||
e2e-gloo-testing:
|
||||
machine: true
|
||||
steps:
|
||||
- checkout
|
||||
- attach_workspace:
|
||||
at: /tmp/bin
|
||||
- run: test/container-build.sh
|
||||
- run: test/e2e-kind.sh
|
||||
- run: test/e2e-gloo.sh
|
||||
- run: test/e2e-gloo-tests.sh
|
||||
|
||||
e2e-nginx-testing:
|
||||
machine: true
|
||||
steps:
|
||||
- checkout
|
||||
- attach_workspace:
|
||||
at: /tmp/bin
|
||||
- run: test/container-build.sh
|
||||
- run: test/e2e-kind.sh
|
||||
- run: test/e2e-nginx.sh
|
||||
- run: test/e2e-nginx-build.sh
|
||||
- run: test/e2e-nginx-tests.sh
|
||||
|
||||
e2e-linkerd-testing:
|
||||
machine: true
|
||||
steps:
|
||||
- checkout
|
||||
- attach_workspace:
|
||||
at: /tmp/bin
|
||||
- run: test/container-build.sh
|
||||
- run: test/e2e-kind.sh
|
||||
- run: test/e2e-linkerd.sh
|
||||
- run: test/e2e-linkerd-tests.sh
|
||||
|
||||
workflows:
|
||||
version: 2
|
||||
build-and-test:
|
||||
build-test-push:
|
||||
jobs:
|
||||
- build-binary:
|
||||
filters:
|
||||
branches:
|
||||
ignore:
|
||||
- gh-pages
|
||||
- e2e-istio-testing:
|
||||
filters:
|
||||
branches:
|
||||
ignore:
|
||||
- /gh-pages.*/
|
||||
- /docs-.*/
|
||||
- /release-.*/
|
||||
- e2e-supergloo-testing:
|
||||
filters:
|
||||
branches:
|
||||
ignore:
|
||||
- /gh-pages.*/
|
||||
- /docs-.*/
|
||||
- /release-.*/
|
||||
requires:
|
||||
- build-binary
|
||||
- e2e-smi-istio-testing:
|
||||
requires:
|
||||
- build-binary
|
||||
# - e2e-supergloo-testing:
|
||||
# requires:
|
||||
# - build-binary
|
||||
- e2e-gloo-testing:
|
||||
requires:
|
||||
- build-binary
|
||||
- e2e-nginx-testing:
|
||||
requires:
|
||||
- build-binary
|
||||
- e2e-linkerd-testing:
|
||||
requires:
|
||||
- build-binary
|
||||
- push-container:
|
||||
requires:
|
||||
- build-binary
|
||||
- e2e-istio-testing
|
||||
- e2e-smi-istio-testing
|
||||
#- e2e-supergloo-testing
|
||||
- e2e-gloo-testing
|
||||
- e2e-nginx-testing
|
||||
- e2e-linkerd-testing
|
||||
|
||||
release:
|
||||
jobs:
|
||||
- build-binary:
|
||||
filters:
|
||||
branches:
|
||||
ignore:
|
||||
- /gh-pages.*/
|
||||
- /docs-.*/
|
||||
- /release-.*/
|
||||
ignore: /.*/
|
||||
tags:
|
||||
ignore: /^chart.*/
|
||||
- push-container:
|
||||
requires:
|
||||
- build-binary
|
||||
filters:
|
||||
branches:
|
||||
ignore: /.*/
|
||||
tags:
|
||||
ignore: /^chart.*/
|
||||
- push-binary:
|
||||
requires:
|
||||
- push-container
|
||||
filters:
|
||||
branches:
|
||||
ignore: /.*/
|
||||
tags:
|
||||
ignore: /^chart.*/
|
||||
2
.gitignore
vendored
@@ -13,5 +13,7 @@
|
||||
.DS_Store
|
||||
|
||||
bin/
|
||||
_tmp/
|
||||
|
||||
artifacts/gcloud/
|
||||
.idea
|
||||
@@ -8,7 +8,11 @@ builds:
|
||||
- amd64
|
||||
env:
|
||||
- CGO_ENABLED=0
|
||||
archive:
|
||||
name_template: "{{ .Binary }}_{{ .Version }}_{{ .Os }}_{{ .Arch }}"
|
||||
files:
|
||||
- none*
|
||||
archives:
|
||||
- name_template: "{{ .Binary }}_{{ .Version }}_{{ .Os }}_{{ .Arch }}"
|
||||
files:
|
||||
- none*
|
||||
changelog:
|
||||
filters:
|
||||
exclude:
|
||||
- '^CircleCI'
|
||||
|
||||
50
.travis.yml
@@ -1,50 +0,0 @@
|
||||
sudo: required
|
||||
language: go
|
||||
|
||||
branches:
|
||||
except:
|
||||
- /gh-pages.*/
|
||||
- /docs-.*/
|
||||
|
||||
go:
|
||||
- 1.12.x
|
||||
|
||||
services:
|
||||
- docker
|
||||
|
||||
addons:
|
||||
apt:
|
||||
packages:
|
||||
- docker-ce
|
||||
|
||||
script:
|
||||
- make test-fmt
|
||||
- make test-codegen
|
||||
- go test -race -coverprofile=coverage.txt -covermode=atomic $(go list ./pkg/...)
|
||||
- make build
|
||||
|
||||
after_success:
|
||||
- if [ -z "$DOCKER_USER" ]; then
|
||||
echo "PR build, skipping image push";
|
||||
else
|
||||
BRANCH_COMMIT=${TRAVIS_BRANCH}-$(echo ${TRAVIS_COMMIT} | head -c7);
|
||||
docker tag weaveworks/flagger:latest weaveworks/flagger:${BRANCH_COMMIT};
|
||||
echo $DOCKER_PASS | docker login -u=$DOCKER_USER --password-stdin;
|
||||
docker push weaveworks/flagger:${BRANCH_COMMIT};
|
||||
fi
|
||||
- if [ -z "$TRAVIS_TAG" ]; then
|
||||
echo "Not a release, skipping image push";
|
||||
else
|
||||
docker tag weaveworks/flagger:latest weaveworks/flagger:${TRAVIS_TAG};
|
||||
echo $DOCKER_PASS | docker login -u=$DOCKER_USER --password-stdin;
|
||||
docker push weaveworks/flagger:$TRAVIS_TAG;
|
||||
fi
|
||||
- bash <(curl -s https://codecov.io/bash)
|
||||
- rm coverage.txt
|
||||
|
||||
deploy:
|
||||
- provider: script
|
||||
skip_cleanup: true
|
||||
script: curl -sL http://git.io/goreleaser | bash
|
||||
on:
|
||||
tags: true
|
||||
88
CHANGELOG.md
@@ -2,6 +2,94 @@
|
||||
|
||||
All notable changes to this project are documented in this file.
|
||||
|
||||
## 0.18.0 (2019-07-29)
|
||||
|
||||
Adds support for [manual gating](https://docs.flagger.app/how-it-works#manual-gating) and pausing/resuming an ongoing analysis
|
||||
|
||||
#### Features
|
||||
|
||||
- Implement confirm rollout gate, hook and API [#251](https://github.com/weaveworks/flagger/pull/251)
|
||||
|
||||
#### Improvements
|
||||
|
||||
- Refactor canary change detection and status [#240](https://github.com/weaveworks/flagger/pull/240)
|
||||
- Implement finalising state [#257](https://github.com/weaveworks/flagger/pull/257)
|
||||
- Add gRPC load testing tool [#248](https://github.com/weaveworks/flagger/pull/248)
|
||||
|
||||
#### Breaking changes
|
||||
|
||||
- due to the status sub-resource changes in [#240](https://github.com/weaveworks/flagger/pull/240), when upgrading Flagger the canaries status phase will be reset to `Initialized`
|
||||
- upgrading Flagger with Helm will fail due to Helm poor support of CRDs, see [workaround](https://github.com/weaveworks/flagger/issues/223)
|
||||
|
||||
## 0.17.0 (2019-07-08)
|
||||
|
||||
Adds support for Linkerd (SMI Traffic Split API), MS Teams notifications and HA mode with leader election
|
||||
|
||||
#### Features
|
||||
|
||||
- Add Linkerd support [#230](https://github.com/weaveworks/flagger/pull/230)
|
||||
- Implement MS Teams notifications [#235](https://github.com/weaveworks/flagger/pull/235)
|
||||
- Implement leader election [#236](https://github.com/weaveworks/flagger/pull/236)
|
||||
|
||||
#### Improvements
|
||||
|
||||
- Add [Kustomize](https://docs.flagger.app/install/flagger-install-on-kubernetes#install-flagger-with-kustomize) installer [#232](https://github.com/weaveworks/flagger/pull/232)
|
||||
- Add Pod Security Policy to Helm chart [#234](https://github.com/weaveworks/flagger/pull/234)
|
||||
|
||||
## 0.16.0 (2019-06-23)
|
||||
|
||||
Adds support for running [Blue/Green deployments](https://docs.flagger.app/usage/blue-green) without a service mesh or ingress controller
|
||||
|
||||
#### Features
|
||||
|
||||
- Allow blue/green deployments without a service mesh provider [#211](https://github.com/weaveworks/flagger/pull/211)
|
||||
- Add the service mesh provider to the canary spec [#217](https://github.com/weaveworks/flagger/pull/217)
|
||||
- Allow multi-port services and implement port discovery [#207](https://github.com/weaveworks/flagger/pull/207)
|
||||
|
||||
#### Improvements
|
||||
|
||||
- Add [FAQ page](https://docs.flagger.app/faq) to docs website
|
||||
- Switch to go modules in CI [#218](https://github.com/weaveworks/flagger/pull/218)
|
||||
- Update e2e testing to Kubernetes Kind 0.3.0 and Istio 1.2.0
|
||||
|
||||
#### Fixes
|
||||
|
||||
- Update the primary HPA on canary promotion [#216](https://github.com/weaveworks/flagger/pull/216)
|
||||
|
||||
## 0.15.0 (2019-06-12)
|
||||
|
||||
Adds support for customising the Istio [traffic policy](https://docs.flagger.app/how-it-works#istio-routing) in the canary service spec
|
||||
|
||||
#### Features
|
||||
|
||||
- Generate Istio destination rules and allow traffic policy customisation [#200](https://github.com/weaveworks/flagger/pull/200)
|
||||
|
||||
#### Improvements
|
||||
|
||||
- Update Kubernetes packages to 1.14 and use go modules instead of dep [#202](https://github.com/weaveworks/flagger/pull/202)
|
||||
|
||||
## 0.14.1 (2019-06-05)
|
||||
|
||||
Adds support for running [acceptance/integration tests](https://docs.flagger.app/how-it-works#integration-testing) with Helm test or Bash Bats using pre-rollout hooks
|
||||
|
||||
#### Features
|
||||
|
||||
- Implement Helm and Bash pre-rollout hooks [#196](https://github.com/weaveworks/flagger/pull/196)
|
||||
|
||||
#### Fixes
|
||||
|
||||
- Fix promoting canary when max weight is not a multiple of step [#190](https://github.com/weaveworks/flagger/pull/190)
|
||||
- Add ability to set Prometheus url with custom path without trailing '/' [#197](https://github.com/weaveworks/flagger/pull/197)
|
||||
|
||||
## 0.14.0 (2019-05-21)
|
||||
|
||||
Adds support for Service Mesh Interface and [Gloo](https://docs.flagger.app/usage/gloo-progressive-delivery) ingress controller
|
||||
|
||||
#### Features
|
||||
|
||||
- Add support for SMI (Istio weighted traffic) [#180](https://github.com/weaveworks/flagger/pull/180)
|
||||
- Add support for Gloo ingress controller (weighted traffic) [#179](https://github.com/weaveworks/flagger/pull/179)
|
||||
|
||||
## 0.13.2 (2019-04-11)
|
||||
|
||||
Fixes for Jenkins X deployments (prevent the jx GC from removing the primary instance)
|
||||
|
||||
15
Dockerfile
@@ -1,16 +1,3 @@
|
||||
FROM golang:1.12
|
||||
|
||||
RUN mkdir -p /go/src/github.com/weaveworks/flagger/
|
||||
|
||||
WORKDIR /go/src/github.com/weaveworks/flagger
|
||||
|
||||
COPY . .
|
||||
|
||||
RUN GIT_COMMIT=$(git rev-list -1 HEAD) && \
|
||||
CGO_ENABLED=0 GOOS=linux go build -ldflags "-s -w \
|
||||
-X github.com/weaveworks/flagger/pkg/version.REVISION=${GIT_COMMIT}" \
|
||||
-a -installsuffix cgo -o flagger ./cmd/flagger/*
|
||||
|
||||
FROM alpine:3.9
|
||||
|
||||
RUN addgroup -S flagger \
|
||||
@@ -19,7 +6,7 @@ RUN addgroup -S flagger \
|
||||
|
||||
WORKDIR /home/flagger
|
||||
|
||||
COPY --from=0 /go/src/github.com/weaveworks/flagger/flagger .
|
||||
COPY /bin/flagger .
|
||||
|
||||
RUN chown -R flagger:flagger ./
|
||||
|
||||
|
||||
@@ -1,15 +1,3 @@
|
||||
FROM golang:1.12 AS builder
|
||||
|
||||
RUN mkdir -p /go/src/github.com/weaveworks/flagger/
|
||||
|
||||
WORKDIR /go/src/github.com/weaveworks/flagger
|
||||
|
||||
COPY . .
|
||||
|
||||
RUN go test -race ./pkg/loadtester/
|
||||
|
||||
RUN CGO_ENABLED=0 GOOS=linux go build -a -installsuffix cgo -o loadtester ./cmd/loadtester/*
|
||||
|
||||
FROM bats/bats:v1.1.0
|
||||
|
||||
RUN addgroup -S app \
|
||||
@@ -21,10 +9,19 @@ WORKDIR /home/app
|
||||
RUN curl -sSLo hey "https://storage.googleapis.com/jblabs/dist/hey_linux_v0.1.2" && \
|
||||
chmod +x hey && mv hey /usr/local/bin/hey
|
||||
|
||||
COPY --from=builder /go/src/github.com/weaveworks/flagger/loadtester .
|
||||
RUN curl -sSL "https://get.helm.sh/helm-v2.12.3-linux-amd64.tar.gz" | tar xvz && \
|
||||
chmod +x linux-amd64/helm && mv linux-amd64/helm /usr/local/bin/helm && \
|
||||
rm -rf linux-amd64
|
||||
|
||||
RUN curl -sSL "https://github.com/bojand/ghz/releases/download/v0.39.0/ghz_0.39.0_Linux_x86_64.tar.gz" | tar xz -C /tmp && \
|
||||
mv /tmp/ghz /usr/local/bin && chmod +x /usr/local/bin/ghz && rm -rf /tmp/ghz-web
|
||||
|
||||
RUN ls /tmp
|
||||
|
||||
COPY ./bin/loadtester .
|
||||
|
||||
RUN chown -R app:app ./
|
||||
|
||||
USER app
|
||||
|
||||
ENTRYPOINT ["./loadtester"]
|
||||
ENTRYPOINT ["./loadtester"]
|
||||
|
||||
1195
Gopkg.lock
generated
74
Gopkg.toml
@@ -1,74 +0,0 @@
|
||||
required = [
|
||||
"k8s.io/apimachinery/pkg/util/sets/types",
|
||||
"k8s.io/code-generator/cmd/deepcopy-gen",
|
||||
"k8s.io/code-generator/cmd/defaulter-gen",
|
||||
"k8s.io/code-generator/cmd/client-gen",
|
||||
"k8s.io/code-generator/cmd/lister-gen",
|
||||
"k8s.io/code-generator/cmd/informer-gen",
|
||||
]
|
||||
|
||||
[[constraint]]
|
||||
name = "go.uber.org/zap"
|
||||
version = "v1.9.1"
|
||||
|
||||
[[constraint]]
|
||||
name = "gopkg.in/h2non/gock.v1"
|
||||
version = "v1.0.14"
|
||||
|
||||
[[override]]
|
||||
name = "gopkg.in/yaml.v2"
|
||||
version = "v2.2.1"
|
||||
|
||||
[[override]]
|
||||
name = "k8s.io/api"
|
||||
version = "kubernetes-1.13.1"
|
||||
|
||||
[[override]]
|
||||
name = "k8s.io/apimachinery"
|
||||
version = "kubernetes-1.13.1"
|
||||
|
||||
[[override]]
|
||||
name = "k8s.io/code-generator"
|
||||
version = "kubernetes-1.13.1"
|
||||
|
||||
[[override]]
|
||||
name = "k8s.io/client-go"
|
||||
version = "kubernetes-1.13.1"
|
||||
|
||||
[[override]]
|
||||
name = "k8s.io/apiextensions-apiserver"
|
||||
version = "kubernetes-1.13.1"
|
||||
|
||||
[[override]]
|
||||
name = "k8s.io/apiserver"
|
||||
version = "kubernetes-1.13.1"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/prometheus/client_golang"
|
||||
version = "v0.8.0"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/google/go-cmp"
|
||||
version = "v0.2.0"
|
||||
|
||||
[[override]]
|
||||
name = "k8s.io/klog"
|
||||
source = "github.com/stefanprodan/klog"
|
||||
|
||||
[prune]
|
||||
go-tests = true
|
||||
unused-packages = true
|
||||
non-go = true
|
||||
|
||||
[[prune.project]]
|
||||
name = "k8s.io/code-generator"
|
||||
unused-packages = false
|
||||
non-go = false
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/solo-io/supergloo"
|
||||
version = "v0.3.11"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/solo-io/solo-kit"
|
||||
version = "v0.6.3"
|
||||
55
Makefile
@@ -2,29 +2,47 @@ TAG?=latest
|
||||
VERSION?=$(shell grep 'VERSION' pkg/version/version.go | awk '{ print $$4 }' | tr -d '"')
|
||||
VERSION_MINOR:=$(shell grep 'VERSION' pkg/version/version.go | awk '{ print $$4 }' | tr -d '"' | rev | cut -d'.' -f2- | rev)
|
||||
PATCH:=$(shell grep 'VERSION' pkg/version/version.go | awk '{ print $$4 }' | tr -d '"' | awk -F. '{print $$NF}')
|
||||
SOURCE_DIRS = cmd pkg/apis pkg/controller pkg/server pkg/logging pkg/version
|
||||
SOURCE_DIRS = cmd pkg/apis pkg/controller pkg/server pkg/canary pkg/metrics pkg/router pkg/notifier
|
||||
LT_VERSION?=$(shell grep 'VERSION' cmd/loadtester/main.go | awk '{ print $$4 }' | tr -d '"' | head -n1)
|
||||
TS=$(shell date +%Y-%m-%d_%H-%M-%S)
|
||||
|
||||
run:
|
||||
go run cmd/flagger/* -kubeconfig=$$HOME/.kube/config -log-level=info \
|
||||
GO111MODULE=on go run cmd/flagger/* -kubeconfig=$$HOME/.kube/config -log-level=info -mesh-provider=istio -namespace=test \
|
||||
-metrics-server=https://prometheus.istio.weavedx.com \
|
||||
-slack-url=https://hooks.slack.com/services/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 \
|
||||
-slack-channel="devops-alerts"
|
||||
-enable-leader-election=true
|
||||
|
||||
run2:
|
||||
GO111MODULE=on go run cmd/flagger/* -kubeconfig=$$HOME/.kube/config -log-level=info -mesh-provider=istio -namespace=test \
|
||||
-metrics-server=https://prometheus.istio.weavedx.com \
|
||||
-enable-leader-election=true \
|
||||
-port=9092
|
||||
|
||||
run-appmesh:
|
||||
go run cmd/flagger/* -kubeconfig=$$HOME/.kube/config -log-level=info -mesh-provider=appmesh \
|
||||
-metrics-server=http://acfc235624ca911e9a94c02c4171f346-1585187926.us-west-2.elb.amazonaws.com:9090 \
|
||||
-slack-url=https://hooks.slack.com/services/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 \
|
||||
-slack-channel="devops-alerts"
|
||||
GO111MODULE=on go run cmd/flagger/* -kubeconfig=$$HOME/.kube/config -log-level=info -mesh-provider=appmesh \
|
||||
-metrics-server=http://acfc235624ca911e9a94c02c4171f346-1585187926.us-west-2.elb.amazonaws.com:9090
|
||||
|
||||
run-nginx:
|
||||
go run cmd/flagger/* -kubeconfig=$$HOME/.kube/config -log-level=info -mesh-provider=nginx -namespace=nginx \
|
||||
-metrics-server=http://prometheus-weave.istio.weavedx.com \
|
||||
-slack-url=https://hooks.slack.com/services/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 \
|
||||
-slack-channel="devops-alerts"
|
||||
GO111MODULE=on go run cmd/flagger/* -kubeconfig=$$HOME/.kube/config -log-level=info -mesh-provider=nginx -namespace=nginx \
|
||||
-metrics-server=http://prometheus-weave.istio.weavedx.com
|
||||
|
||||
run-smi:
|
||||
GO111MODULE=on go run cmd/flagger/* -kubeconfig=$$HOME/.kube/config -log-level=info -mesh-provider=smi:istio -namespace=smi \
|
||||
-metrics-server=https://prometheus.istio.weavedx.com
|
||||
|
||||
run-gloo:
|
||||
GO111MODULE=on go run cmd/flagger/* -kubeconfig=$$HOME/.kube/config -log-level=info -mesh-provider=gloo -namespace=gloo \
|
||||
-metrics-server=https://prometheus.istio.weavedx.com
|
||||
|
||||
run-nop:
|
||||
GO111MODULE=on go run cmd/flagger/* -kubeconfig=$$HOME/.kube/config -log-level=info -mesh-provider=none -namespace=bg \
|
||||
-metrics-server=https://prometheus.istio.weavedx.com
|
||||
|
||||
run-linkerd:
|
||||
GO111MODULE=on go run cmd/flagger/* -kubeconfig=$$HOME/.kube/config -log-level=info -mesh-provider=smi:linkerd -namespace=demo \
|
||||
-metrics-server=https://linkerd-prometheus.istio.weavedx.com
|
||||
|
||||
build:
|
||||
GIT_COMMIT=$$(git rev-list -1 HEAD) && GO111MODULE=on CGO_ENABLED=0 GOOS=linux go build -ldflags "-s -w -X github.com/weaveworks/flagger/pkg/version.REVISION=$${GIT_COMMIT}" -a -installsuffix cgo -o ./bin/flagger ./cmd/flagger/*
|
||||
docker build -t weaveworks/flagger:$(TAG) . -f Dockerfile
|
||||
|
||||
push:
|
||||
@@ -60,7 +78,8 @@ version-set:
|
||||
sed -i '' "s/tag: $$current/tag: $$next/g" charts/flagger/values.yaml && \
|
||||
sed -i '' "s/appVersion: $$current/appVersion: $$next/g" charts/flagger/Chart.yaml && \
|
||||
sed -i '' "s/version: $$current/version: $$next/g" charts/flagger/Chart.yaml && \
|
||||
echo "Version $$next set in code, deployment and charts"
|
||||
sed -i '' "s/newTag: $$current/newTag: $$next/g" kustomize/base/flagger/kustomization.yaml && \
|
||||
echo "Version $$next set in code, deployment, chart and kustomize"
|
||||
|
||||
version-up:
|
||||
@next="$(VERSION_MINOR).$$(($(PATCH) + 1))" && \
|
||||
@@ -94,6 +113,14 @@ reset-test:
|
||||
kubectl apply -f ./artifacts/namespaces
|
||||
kubectl apply -f ./artifacts/canaries
|
||||
|
||||
loadtester-run: loadtester-build
|
||||
docker build -t weaveworks/flagger-loadtester:$(LT_VERSION) . -f Dockerfile.loadtester
|
||||
docker rm -f tester || true
|
||||
docker run -dp 8888:9090 --name tester weaveworks/flagger-loadtester:$(LT_VERSION)
|
||||
|
||||
loadtester-build:
|
||||
GO111MODULE=on CGO_ENABLED=0 GOOS=linux go build -a -installsuffix cgo -o ./bin/loadtester ./cmd/loadtester/*
|
||||
|
||||
loadtester-push:
|
||||
docker build -t weaveworks/flagger-loadtester:$(LT_VERSION) . -f Dockerfile.loadtester
|
||||
docker push weaveworks/flagger-loadtester:$(LT_VERSION)
|
||||
docker push weaveworks/flagger-loadtester:$(LT_VERSION)
|
||||
|
||||
46
README.md
@@ -1,19 +1,19 @@
|
||||
# flagger
|
||||
|
||||
[](https://travis-ci.org/weaveworks/flagger)
|
||||
[](https://circleci.com/gh/weaveworks/flagger)
|
||||
[](https://goreportcard.com/report/github.com/weaveworks/flagger)
|
||||
[](https://codecov.io/gh/weaveworks/flagger)
|
||||
[](https://github.com/weaveworks/flagger/blob/master/LICENSE)
|
||||
[](https://github.com/weaveworks/flagger/releases)
|
||||
|
||||
Flagger is a Kubernetes operator that automates the promotion of canary deployments
|
||||
using Istio, App Mesh or NGINX routing for traffic shifting and Prometheus metrics for canary analysis.
|
||||
using Istio, Linkerd, App Mesh, NGINX or Gloo routing for traffic shifting and Prometheus metrics for canary analysis.
|
||||
The canary analysis can be extended with webhooks for running acceptance tests,
|
||||
load tests or any other custom validation.
|
||||
|
||||
Flagger implements a control loop that gradually shifts traffic to the canary while measuring key performance
|
||||
indicators like HTTP requests success rate, requests average duration and pods health.
|
||||
Based on analysis of the KPIs a canary is promoted or aborted, and the analysis result is published to Slack.
|
||||
Based on analysis of the KPIs a canary is promoted or aborted, and the analysis result is published to Slack or MS Teams.
|
||||
|
||||

|
||||
|
||||
@@ -35,11 +35,16 @@ Flagger documentation can be found at [docs.flagger.app](https://docs.flagger.ap
|
||||
* [Custom metrics](https://docs.flagger.app/how-it-works#custom-metrics)
|
||||
* [Webhooks](https://docs.flagger.app/how-it-works#webhooks)
|
||||
* [Load testing](https://docs.flagger.app/how-it-works#load-testing)
|
||||
* [Manual gating](https://docs.flagger.app/how-it-works#manual-gating)
|
||||
* [FAQ](https://docs.flagger.app/faq)
|
||||
* Usage
|
||||
* [Istio canary deployments](https://docs.flagger.app/usage/progressive-delivery)
|
||||
* [Istio A/B testing](https://docs.flagger.app/usage/ab-testing)
|
||||
* [Linkerd canary deployments](https://docs.flagger.app/usage/linkerd-progressive-delivery)
|
||||
* [App Mesh canary deployments](https://docs.flagger.app/usage/appmesh-progressive-delivery)
|
||||
* [NGINX ingress controller canary deployments](https://docs.flagger.app/usage/nginx-progressive-delivery)
|
||||
* [Gloo ingress controller canary deployments](https://docs.flagger.app/usage/gloo-progressive-delivery)
|
||||
* [Blue/Green deployments](https://docs.flagger.app/usage/blue-green)
|
||||
* [Monitoring](https://docs.flagger.app/usage/monitoring)
|
||||
* [Alerting](https://docs.flagger.app/usage/alerting)
|
||||
* Tutorials
|
||||
@@ -63,6 +68,10 @@ metadata:
|
||||
name: podinfo
|
||||
namespace: test
|
||||
spec:
|
||||
# service mesh provider (optional)
|
||||
# can be: kubernetes, istio, linkerd, appmesh, nginx, gloo, supergloo
|
||||
# use the kubernetes provider for Blue/Green style deployments
|
||||
provider: istio
|
||||
# deployment reference
|
||||
targetRef:
|
||||
apiVersion: apps/v1
|
||||
@@ -82,7 +91,6 @@ spec:
|
||||
# Istio gateways (optional)
|
||||
gateways:
|
||||
- public-gateway.istio-system.svc.cluster.local
|
||||
- mesh
|
||||
# Istio virtual service host names (optional)
|
||||
hosts:
|
||||
- podinfo.example.com
|
||||
@@ -93,17 +101,12 @@ spec:
|
||||
# HTTP rewrite (optional)
|
||||
rewrite:
|
||||
uri: /
|
||||
# Envoy timeout and retry policy (optional)
|
||||
headers:
|
||||
request:
|
||||
add:
|
||||
x-envoy-upstream-rq-timeout-ms: "15000"
|
||||
x-envoy-max-retries: "10"
|
||||
x-envoy-retry-on: "gateway-error,connect-failure,refused-stream"
|
||||
# cross-origin resource sharing policy (optional)
|
||||
corsPolicy:
|
||||
allowOrigin:
|
||||
- example.com
|
||||
# request timeout (optional)
|
||||
timeout: 5s
|
||||
# promote the canary without analysing it (default false)
|
||||
skipAnalysis: false
|
||||
# define the canary analysis timing and KPIs
|
||||
@@ -154,20 +157,19 @@ For more details on how the canary analysis and promotion works please [read the
|
||||
|
||||
## Features
|
||||
|
||||
| Feature | Istio | App Mesh | SuperGloo | NGINX Ingress |
|
||||
| -------------------------------------------- | ------------------ | ------------------ |------------------ |------------------ |
|
||||
| Canary deployments (weighted traffic) | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: |
|
||||
| A/B testing (headers and cookies filters) | :heavy_check_mark: | :heavy_minus_sign: | :heavy_minus_sign: | :heavy_check_mark: |
|
||||
| Load testing | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: |
|
||||
| Webhooks (custom acceptance tests) | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: |
|
||||
| Request success rate check (L7 metric) | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: |
|
||||
| Request duration check (L7 metric) | :heavy_check_mark: | :heavy_minus_sign: | :heavy_check_mark: | :heavy_check_mark: |
|
||||
| Custom promql checks | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: |
|
||||
| Ingress gateway (CORS, retries and timeouts) | :heavy_check_mark: | :heavy_minus_sign: | :heavy_check_mark: | :heavy_check_mark: |
|
||||
| Feature | Istio | Linkerd | App Mesh | NGINX | Gloo |
|
||||
| -------------------------------------------- | ------------------ | ------------------ |------------------ |------------------ |------------------ |
|
||||
| Canary deployments (weighted traffic) | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: |
|
||||
| A/B testing (headers and cookies filters) | :heavy_check_mark: | :heavy_minus_sign: | :heavy_minus_sign: | :heavy_check_mark: | :heavy_minus_sign: |
|
||||
| Webhooks (acceptance/load testing) | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: |
|
||||
| Request success rate check (L7 metric) | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: |
|
||||
| Request duration check (L7 metric) | :heavy_check_mark: | :heavy_check_mark: | :heavy_minus_sign: | :heavy_check_mark: | :heavy_check_mark: |
|
||||
| Custom promql checks | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: |
|
||||
| Traffic policy, CORS, retries and timeouts | :heavy_check_mark: | :heavy_minus_sign: | :heavy_minus_sign: | :heavy_minus_sign: | :heavy_minus_sign: |
|
||||
|
||||
## Roadmap
|
||||
|
||||
* Integrate with other service mesh technologies like Linkerd v2
|
||||
* Integrate with other ingress controllers like Contour, HAProxy, ALB
|
||||
* Add support for comparing the canary metrics to the primary ones and do the validation based on the derivation between the two
|
||||
|
||||
## Contributing
|
||||
|
||||
@@ -25,7 +25,7 @@ spec:
|
||||
spec:
|
||||
containers:
|
||||
- name: podinfod
|
||||
image: quay.io/stefanprodan/podinfo:1.4.0
|
||||
image: quay.io/stefanprodan/podinfo:1.7.0
|
||||
imagePullPolicy: IfNotPresent
|
||||
ports:
|
||||
- containerPort: 9898
|
||||
|
||||
@@ -25,7 +25,7 @@ spec:
|
||||
spec:
|
||||
containers:
|
||||
- name: podinfod
|
||||
image: quay.io/stefanprodan/podinfo:1.4.0
|
||||
image: quay.io/stefanprodan/podinfo:1.7.0
|
||||
imagePullPolicy: IfNotPresent
|
||||
ports:
|
||||
- containerPort: 9898
|
||||
|
||||
@@ -4,6 +4,10 @@ metadata:
|
||||
name: podinfo
|
||||
namespace: test
|
||||
spec:
|
||||
# service mesh provider (default istio)
|
||||
# can be: kubernetes, istio, appmesh, smi, nginx, gloo, supergloo
|
||||
# use the kubernetes provider for Blue/Green style deployments
|
||||
provider: istio
|
||||
# deployment reference
|
||||
targetRef:
|
||||
apiVersion: apps/v1
|
||||
@@ -20,13 +24,25 @@ spec:
|
||||
service:
|
||||
# container port
|
||||
port: 9898
|
||||
# port name can be http or grpc (default http)
|
||||
portName: http
|
||||
# add all the other container ports
|
||||
# when generating ClusterIP services (default false)
|
||||
portDiscovery: false
|
||||
# Istio gateways (optional)
|
||||
gateways:
|
||||
- public-gateway.istio-system.svc.cluster.local
|
||||
# remove the mesh gateway if the public host is
|
||||
# shared across multiple virtual services
|
||||
- mesh
|
||||
# Istio virtual service host names (optional)
|
||||
hosts:
|
||||
- app.istio.weavedx.com
|
||||
# Istio traffic policy (optional)
|
||||
trafficPolicy:
|
||||
tls:
|
||||
# use ISTIO_MUTUAL when mTLS is enabled
|
||||
mode: DISABLE
|
||||
# HTTP match conditions (optional)
|
||||
match:
|
||||
- uri:
|
||||
@@ -34,13 +50,8 @@ spec:
|
||||
# HTTP rewrite (optional)
|
||||
rewrite:
|
||||
uri: /
|
||||
# Envoy timeout and retry policy (optional)
|
||||
headers:
|
||||
request:
|
||||
add:
|
||||
x-envoy-upstream-rq-timeout-ms: "15000"
|
||||
x-envoy-max-retries: "10"
|
||||
x-envoy-retry-on: "gateway-error,connect-failure,refused-stream"
|
||||
# HTTP timeout (optional)
|
||||
timeout: 30s
|
||||
# promote the canary without analysing it (default false)
|
||||
skipAnalysis: false
|
||||
canaryAnalysis:
|
||||
@@ -54,7 +65,7 @@ spec:
|
||||
# canary increment step
|
||||
# percentage (0-100)
|
||||
stepWeight: 5
|
||||
# Istio Prometheus checks
|
||||
# Prometheus checks
|
||||
metrics:
|
||||
- name: request-success-rate
|
||||
# minimum req success rate (non 5xx responses)
|
||||
@@ -73,5 +84,5 @@ spec:
|
||||
timeout: 5s
|
||||
metadata:
|
||||
type: cmd
|
||||
cmd: "hey -z 1m -q 10 -c 2 http://podinfo.test:9898/"
|
||||
cmd: "hey -z 1m -q 10 -c 2 http://podinfo-canary.test:9898/"
|
||||
logCmdOutput: "true"
|
||||
|
||||
@@ -25,7 +25,7 @@ spec:
|
||||
spec:
|
||||
containers:
|
||||
- name: podinfod
|
||||
image: quay.io/stefanprodan/podinfo:1.4.0
|
||||
image: quay.io/stefanprodan/podinfo:1.7.0
|
||||
imagePullPolicy: IfNotPresent
|
||||
ports:
|
||||
- containerPort: 9898
|
||||
|
||||
@@ -5,17 +5,17 @@ metadata:
|
||||
namespace: test
|
||||
annotations:
|
||||
flux.weave.works/automated: "true"
|
||||
flux.weave.works/tag.chart-image: regexp:^1.4.*
|
||||
flux.weave.works/tag.chart-image: regexp:^1.7.*
|
||||
spec:
|
||||
releaseName: backend
|
||||
chart:
|
||||
repository: https://flagger.app/
|
||||
name: podinfo
|
||||
version: 2.0.0
|
||||
version: 2.2.0
|
||||
values:
|
||||
image:
|
||||
repository: quay.io/stefanprodan/podinfo
|
||||
tag: 1.4.0
|
||||
tag: 1.7.0
|
||||
httpServer:
|
||||
timeout: 30s
|
||||
canary:
|
||||
|
||||
@@ -5,17 +5,17 @@ metadata:
|
||||
namespace: test
|
||||
annotations:
|
||||
flux.weave.works/automated: "true"
|
||||
flux.weave.works/tag.chart-image: semver:~1.4
|
||||
flux.weave.works/tag.chart-image: semver:~1.7
|
||||
spec:
|
||||
releaseName: frontend
|
||||
chart:
|
||||
repository: https://flagger.app/
|
||||
name: podinfo
|
||||
version: 2.0.0
|
||||
version: 2.2.0
|
||||
values:
|
||||
image:
|
||||
repository: quay.io/stefanprodan/podinfo
|
||||
tag: 1.4.0
|
||||
tag: 1.7.0
|
||||
backend: http://backend-podinfo:9898/echo
|
||||
canary:
|
||||
enabled: true
|
||||
|
||||
@@ -11,8 +11,8 @@ spec:
|
||||
chart:
|
||||
repository: https://flagger.app/
|
||||
name: loadtester
|
||||
version: 0.1.0
|
||||
version: 0.6.0
|
||||
values:
|
||||
image:
|
||||
repository: quay.io/stefanprodan/flagger-loadtester
|
||||
tag: 0.1.0
|
||||
repository: weaveworks/flagger-loadtester
|
||||
tag: 0.6.1
|
||||
|
||||
@@ -1,59 +0,0 @@
|
||||
apiVersion: flagger.app/v1alpha3
|
||||
kind: Canary
|
||||
metadata:
|
||||
name: podinfo
|
||||
namespace: test
|
||||
spec:
|
||||
# deployment reference
|
||||
targetRef:
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
name: podinfo
|
||||
# the maximum time in seconds for the canary deployment
|
||||
# to make progress before it is rollback (default 600s)
|
||||
progressDeadlineSeconds: 60
|
||||
# HPA reference (optional)
|
||||
autoscalerRef:
|
||||
apiVersion: autoscaling/v2beta1
|
||||
kind: HorizontalPodAutoscaler
|
||||
name: podinfo
|
||||
service:
|
||||
# container port
|
||||
port: 9898
|
||||
# Istio gateways (optional)
|
||||
gateways:
|
||||
- public-gateway.istio-system.svc.cluster.local
|
||||
- mesh
|
||||
# Istio virtual service host names (optional)
|
||||
hosts:
|
||||
- app.iowa.weavedx.com
|
||||
canaryAnalysis:
|
||||
# schedule interval (default 60s)
|
||||
interval: 10s
|
||||
# max number of failed metric checks before rollback
|
||||
threshold: 10
|
||||
# max traffic percentage routed to canary
|
||||
# percentage (0-100)
|
||||
maxWeight: 50
|
||||
# canary increment step
|
||||
# percentage (0-100)
|
||||
stepWeight: 5
|
||||
# Istio Prometheus checks
|
||||
metrics:
|
||||
- name: request-success-rate
|
||||
# minimum req success rate (non 5xx responses)
|
||||
# percentage (0-100)
|
||||
threshold: 99
|
||||
interval: 1m
|
||||
- name: request-duration
|
||||
# maximum req duration P99
|
||||
# milliseconds
|
||||
threshold: 500
|
||||
interval: 30s
|
||||
# external checks (optional)
|
||||
webhooks:
|
||||
- name: load-test
|
||||
url: http://flagger-loadtester.test/
|
||||
timeout: 5s
|
||||
metadata:
|
||||
cmd: "hey -z 1m -q 10 -c 2 http://podinfo.test:9898/"
|
||||
@@ -1,16 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: podinfo-config-env
|
||||
namespace: test
|
||||
data:
|
||||
color: blue
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: podinfo-config-vol
|
||||
namespace: test
|
||||
data:
|
||||
output: console
|
||||
textmode: "true"
|
||||
@@ -1,89 +0,0 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: podinfo
|
||||
namespace: test
|
||||
labels:
|
||||
app: podinfo
|
||||
spec:
|
||||
minReadySeconds: 5
|
||||
revisionHistoryLimit: 5
|
||||
progressDeadlineSeconds: 60
|
||||
strategy:
|
||||
rollingUpdate:
|
||||
maxUnavailable: 0
|
||||
type: RollingUpdate
|
||||
selector:
|
||||
matchLabels:
|
||||
app: podinfo
|
||||
template:
|
||||
metadata:
|
||||
annotations:
|
||||
prometheus.io/scrape: "true"
|
||||
labels:
|
||||
app: podinfo
|
||||
spec:
|
||||
containers:
|
||||
- name: podinfod
|
||||
image: quay.io/stefanprodan/podinfo:1.3.0
|
||||
imagePullPolicy: IfNotPresent
|
||||
ports:
|
||||
- containerPort: 9898
|
||||
name: http
|
||||
protocol: TCP
|
||||
command:
|
||||
- ./podinfo
|
||||
- --port=9898
|
||||
- --level=info
|
||||
- --random-delay=false
|
||||
- --random-error=false
|
||||
env:
|
||||
- name: PODINFO_UI_COLOR
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: podinfo-config-env
|
||||
key: color
|
||||
- name: SECRET_USER
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: podinfo-secret-env
|
||||
key: user
|
||||
livenessProbe:
|
||||
exec:
|
||||
command:
|
||||
- podcli
|
||||
- check
|
||||
- http
|
||||
- localhost:9898/healthz
|
||||
initialDelaySeconds: 5
|
||||
timeoutSeconds: 5
|
||||
readinessProbe:
|
||||
exec:
|
||||
command:
|
||||
- podcli
|
||||
- check
|
||||
- http
|
||||
- localhost:9898/readyz
|
||||
initialDelaySeconds: 5
|
||||
timeoutSeconds: 5
|
||||
resources:
|
||||
limits:
|
||||
cpu: 2000m
|
||||
memory: 512Mi
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 64Mi
|
||||
volumeMounts:
|
||||
- name: configs
|
||||
mountPath: /etc/podinfo/configs
|
||||
readOnly: true
|
||||
- name: secrets
|
||||
mountPath: /etc/podinfo/secrets
|
||||
readOnly: true
|
||||
volumes:
|
||||
- name: configs
|
||||
configMap:
|
||||
name: podinfo-config-vol
|
||||
- name: secrets
|
||||
secret:
|
||||
secretName: podinfo-secret-vol
|
||||
@@ -1,16 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: podinfo-secret-env
|
||||
namespace: test
|
||||
data:
|
||||
password: cGFzc3dvcmQ=
|
||||
user: YWRtaW4=
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: podinfo-secret-vol
|
||||
namespace: test
|
||||
data:
|
||||
key: cGFzc3dvcmQ=
|
||||
@@ -48,6 +48,8 @@ rules:
|
||||
resources:
|
||||
- virtualservices
|
||||
- virtualservices/status
|
||||
- destinationrules
|
||||
- destinationrules/status
|
||||
verbs: ["*"]
|
||||
- apiGroups:
|
||||
- appmesh.k8s.aws
|
||||
@@ -59,6 +61,26 @@ rules:
|
||||
- virtualservices
|
||||
- virtualservices/status
|
||||
verbs: ["*"]
|
||||
- apiGroups:
|
||||
- split.smi-spec.io
|
||||
resources:
|
||||
- trafficsplits
|
||||
verbs: ["*"]
|
||||
- apiGroups:
|
||||
- gloo.solo.io
|
||||
resources:
|
||||
- settings
|
||||
- upstreams
|
||||
- upstreamgroups
|
||||
- proxies
|
||||
- virtualservices
|
||||
verbs: ["*"]
|
||||
- apiGroups:
|
||||
- gateway.solo.io
|
||||
resources:
|
||||
- virtualservices
|
||||
- gateways
|
||||
verbs: ["*"]
|
||||
- nonResourceURLs:
|
||||
- /version
|
||||
verbs:
|
||||
|
||||
@@ -45,6 +45,8 @@ spec:
|
||||
- service
|
||||
- canaryAnalysis
|
||||
properties:
|
||||
provider:
|
||||
type: string
|
||||
progressDeadlineSeconds:
|
||||
type: number
|
||||
targetRef:
|
||||
@@ -89,6 +91,8 @@ spec:
|
||||
type: number
|
||||
portName:
|
||||
type: string
|
||||
portDiscovery:
|
||||
type: boolean
|
||||
meshName:
|
||||
type: string
|
||||
timeout:
|
||||
@@ -98,17 +102,23 @@ spec:
|
||||
canaryAnalysis:
|
||||
properties:
|
||||
interval:
|
||||
description: Canary schedule interval
|
||||
type: string
|
||||
pattern: "^[0-9]+(m|s)"
|
||||
iterations:
|
||||
description: Number of checks to run for A/B Testing and Blue/Green
|
||||
type: number
|
||||
threshold:
|
||||
description: Max number of failed checks before rollback
|
||||
type: number
|
||||
maxWeight:
|
||||
description: Max traffic percentage routed to canary
|
||||
type: number
|
||||
stepWeight:
|
||||
description: Canary incremental traffic percentage step
|
||||
type: number
|
||||
metrics:
|
||||
description: Prometheus query list for this canary
|
||||
type: array
|
||||
properties:
|
||||
items:
|
||||
@@ -116,15 +126,20 @@ spec:
|
||||
required: ['name', 'threshold']
|
||||
properties:
|
||||
name:
|
||||
description: Name of the Prometheus metric
|
||||
type: string
|
||||
interval:
|
||||
description: Interval of the promql query
|
||||
type: string
|
||||
pattern: "^[0-9]+(m|s)"
|
||||
threshold:
|
||||
description: Max scalar value accepted for this metric
|
||||
type: number
|
||||
query:
|
||||
description: Prometheus query
|
||||
type: string
|
||||
webhooks:
|
||||
description: Webhook list for this canary
|
||||
type: array
|
||||
properties:
|
||||
items:
|
||||
@@ -132,8 +147,10 @@ spec:
|
||||
required: ['name', 'url', 'timeout']
|
||||
properties:
|
||||
name:
|
||||
description: Name of the webhook
|
||||
type: string
|
||||
type:
|
||||
description: Type of the webhook pre, post or during rollout
|
||||
type: string
|
||||
enum:
|
||||
- ""
|
||||
@@ -141,28 +158,68 @@ spec:
|
||||
- rollout
|
||||
- post-rollout
|
||||
url:
|
||||
description: URL address of this webhook
|
||||
type: string
|
||||
format: url
|
||||
timeout:
|
||||
description: Request timeout for this webhook
|
||||
type: string
|
||||
pattern: "^[0-9]+(m|s)"
|
||||
status:
|
||||
properties:
|
||||
phase:
|
||||
description: Analysis phase of this canary
|
||||
type: string
|
||||
enum:
|
||||
- ""
|
||||
- Initializing
|
||||
- Initialized
|
||||
- Waiting
|
||||
- Progressing
|
||||
- Finalising
|
||||
- Succeeded
|
||||
- Failed
|
||||
canaryWeight:
|
||||
description: Traffic weight percentage routed to canary
|
||||
type: number
|
||||
failedChecks:
|
||||
description: Failed check count of the current canary analysis
|
||||
type: number
|
||||
iterations:
|
||||
description: Iteration count of the current canary analysis
|
||||
type: number
|
||||
lastAppliedSpec:
|
||||
description: LastAppliedSpec of this canary
|
||||
type: string
|
||||
lastTransitionTime:
|
||||
description: LastTransitionTime of this canary
|
||||
format: date-time
|
||||
type: string
|
||||
conditions:
|
||||
description: Status conditions of this canary
|
||||
type: array
|
||||
properties:
|
||||
items:
|
||||
type: object
|
||||
required: ['type', 'status', 'reason']
|
||||
properties:
|
||||
lastTransitionTime:
|
||||
description: LastTransitionTime of this condition
|
||||
format: date-time
|
||||
type: string
|
||||
lastUpdateTime:
|
||||
description: LastUpdateTime of this condition
|
||||
format: date-time
|
||||
type: string
|
||||
message:
|
||||
description: Message associated with this condition
|
||||
type: string
|
||||
reason:
|
||||
description: Reason for the current status of this condition
|
||||
type: string
|
||||
status:
|
||||
description: Status of this condition
|
||||
type: string
|
||||
type:
|
||||
description: Type of this condition
|
||||
type: string
|
||||
|
||||
@@ -22,7 +22,7 @@ spec:
|
||||
serviceAccountName: flagger
|
||||
containers:
|
||||
- name: flagger
|
||||
image: weaveworks/flagger:0.13.2
|
||||
image: weaveworks/flagger:0.18.0
|
||||
imagePullPolicy: IfNotPresent
|
||||
ports:
|
||||
- name: http
|
||||
|
||||
@@ -1,49 +1,4 @@
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: prometheus
|
||||
labels:
|
||||
app: prometheus
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources:
|
||||
- nodes
|
||||
- services
|
||||
- endpoints
|
||||
- pods
|
||||
- nodes/proxy
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups: [""]
|
||||
resources:
|
||||
- configmaps
|
||||
verbs: ["get"]
|
||||
- nonResourceURLs: ["/metrics"]
|
||||
verbs: ["get"]
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: prometheus
|
||||
labels:
|
||||
app: prometheus
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: prometheus
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: prometheus
|
||||
namespace: istio-system
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: prometheus
|
||||
namespace: istio-system
|
||||
labels:
|
||||
app: prometheus
|
||||
---
|
||||
# Source: istio/charts/prometheus/templates/configmap.yaml
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
@@ -51,6 +6,9 @@ metadata:
|
||||
namespace: istio-system
|
||||
labels:
|
||||
app: prometheus
|
||||
chart: prometheus-1.0.6
|
||||
heritage: Tiller
|
||||
release: istio
|
||||
data:
|
||||
prometheus.yml: |-
|
||||
global:
|
||||
@@ -363,6 +321,70 @@ data:
|
||||
- source_labels: [__meta_kubernetes_pod_name]
|
||||
action: replace
|
||||
target_label: pod_name
|
||||
|
||||
---
|
||||
|
||||
# Source: istio/charts/prometheus/templates/clusterrole.yaml
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: prometheus-istio-system
|
||||
labels:
|
||||
app: prometheus
|
||||
chart: prometheus-1.0.6
|
||||
heritage: Tiller
|
||||
release: istio
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources:
|
||||
- nodes
|
||||
- services
|
||||
- endpoints
|
||||
- pods
|
||||
- nodes/proxy
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups: [""]
|
||||
resources:
|
||||
- configmaps
|
||||
verbs: ["get"]
|
||||
- nonResourceURLs: ["/metrics"]
|
||||
verbs: ["get"]
|
||||
|
||||
---
|
||||
|
||||
# Source: istio/charts/prometheus/templates/serviceaccount.yaml
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: prometheus
|
||||
namespace: istio-system
|
||||
labels:
|
||||
app: prometheus
|
||||
chart: prometheus-1.0.6
|
||||
heritage: Tiller
|
||||
release: istio
|
||||
|
||||
---
|
||||
|
||||
# Source: istio/charts/prometheus/templates/clusterrolebindings.yaml
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: prometheus-istio-system
|
||||
labels:
|
||||
app: prometheus
|
||||
chart: prometheus-1.0.6
|
||||
heritage: Tiller
|
||||
release: istio
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: prometheus-istio-system
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: prometheus
|
||||
namespace: istio-system
|
||||
|
||||
---
|
||||
|
||||
# Source: istio/charts/prometheus/templates/service.yaml
|
||||
@@ -384,13 +406,18 @@ spec:
|
||||
port: 9090
|
||||
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
|
||||
# Source: istio/charts/prometheus/templates/deployment.yaml
|
||||
apiVersion: apps/v1beta1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: prometheus
|
||||
namespace: istio-system
|
||||
labels:
|
||||
app: prometheus
|
||||
chart: prometheus-1.0.6
|
||||
heritage: Tiller
|
||||
release: istio
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
@@ -407,7 +434,7 @@ spec:
|
||||
serviceAccountName: prometheus
|
||||
containers:
|
||||
- name: prometheus
|
||||
image: "docker.io/prom/prometheus:v2.7.1"
|
||||
image: "docker.io/prom/prometheus:v2.3.1"
|
||||
imagePullPolicy: IfNotPresent
|
||||
args:
|
||||
- '--storage.tsdb.retention=6h'
|
||||
@@ -441,3 +468,367 @@ spec:
|
||||
defaultMode: 420
|
||||
optional: true
|
||||
secretName: istio.default
|
||||
affinity:
|
||||
nodeAffinity:
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
nodeSelectorTerms:
|
||||
- matchExpressions:
|
||||
- key: beta.kubernetes.io/arch
|
||||
operator: In
|
||||
values:
|
||||
- amd64
|
||||
- ppc64le
|
||||
- s390x
|
||||
preferredDuringSchedulingIgnoredDuringExecution:
|
||||
- weight: 2
|
||||
preference:
|
||||
matchExpressions:
|
||||
- key: beta.kubernetes.io/arch
|
||||
operator: In
|
||||
values:
|
||||
- amd64
|
||||
- weight: 2
|
||||
preference:
|
||||
matchExpressions:
|
||||
- key: beta.kubernetes.io/arch
|
||||
operator: In
|
||||
values:
|
||||
- ppc64le
|
||||
- weight: 2
|
||||
preference:
|
||||
matchExpressions:
|
||||
- key: beta.kubernetes.io/arch
|
||||
operator: In
|
||||
values:
|
||||
- s390x
|
||||
|
||||
---
|
||||
apiVersion: "config.istio.io/v1alpha2"
|
||||
kind: metric
|
||||
metadata:
|
||||
name: requestcount
|
||||
namespace: istio-system
|
||||
spec:
|
||||
value: "1"
|
||||
dimensions:
|
||||
reporter: conditional((context.reporter.kind | "inbound") == "outbound", "source", "destination")
|
||||
source_workload: source.workload.name | "unknown"
|
||||
source_workload_namespace: source.workload.namespace | "unknown"
|
||||
source_principal: source.principal | "unknown"
|
||||
source_app: source.labels["app"] | "unknown"
|
||||
source_version: source.labels["version"] | "unknown"
|
||||
destination_workload: destination.workload.name | "unknown"
|
||||
destination_workload_namespace: destination.workload.namespace | "unknown"
|
||||
destination_principal: destination.principal | "unknown"
|
||||
destination_app: destination.labels["app"] | "unknown"
|
||||
destination_version: destination.labels["version"] | "unknown"
|
||||
destination_service: destination.service.host | "unknown"
|
||||
destination_service_name: destination.service.name | "unknown"
|
||||
destination_service_namespace: destination.service.namespace | "unknown"
|
||||
request_protocol: api.protocol | context.protocol | "unknown"
|
||||
response_code: response.code | 200
|
||||
connection_security_policy: conditional((context.reporter.kind | "inbound") == "outbound", "unknown", conditional(connection.mtls | false, "mutual_tls", "none"))
|
||||
monitored_resource_type: '"UNSPECIFIED"'
|
||||
---
|
||||
apiVersion: "config.istio.io/v1alpha2"
|
||||
kind: metric
|
||||
metadata:
|
||||
name: requestduration
|
||||
namespace: istio-system
|
||||
spec:
|
||||
value: response.duration | "0ms"
|
||||
dimensions:
|
||||
reporter: conditional((context.reporter.kind | "inbound") == "outbound", "source", "destination")
|
||||
source_workload: source.workload.name | "unknown"
|
||||
source_workload_namespace: source.workload.namespace | "unknown"
|
||||
source_principal: source.principal | "unknown"
|
||||
source_app: source.labels["app"] | "unknown"
|
||||
source_version: source.labels["version"] | "unknown"
|
||||
destination_workload: destination.workload.name | "unknown"
|
||||
destination_workload_namespace: destination.workload.namespace | "unknown"
|
||||
destination_principal: destination.principal | "unknown"
|
||||
destination_app: destination.labels["app"] | "unknown"
|
||||
destination_version: destination.labels["version"] | "unknown"
|
||||
destination_service: destination.service.host | "unknown"
|
||||
destination_service_name: destination.service.name | "unknown"
|
||||
destination_service_namespace: destination.service.namespace | "unknown"
|
||||
request_protocol: api.protocol | context.protocol | "unknown"
|
||||
response_code: response.code | 200
|
||||
connection_security_policy: conditional((context.reporter.kind | "inbound") == "outbound", "unknown", conditional(connection.mtls | false, "mutual_tls", "none"))
|
||||
monitored_resource_type: '"UNSPECIFIED"'
|
||||
---
|
||||
apiVersion: "config.istio.io/v1alpha2"
|
||||
kind: metric
|
||||
metadata:
|
||||
name: requestsize
|
||||
namespace: istio-system
|
||||
spec:
|
||||
value: request.size | 0
|
||||
dimensions:
|
||||
reporter: conditional((context.reporter.kind | "inbound") == "outbound", "source", "destination")
|
||||
source_workload: source.workload.name | "unknown"
|
||||
source_workload_namespace: source.workload.namespace | "unknown"
|
||||
source_principal: source.principal | "unknown"
|
||||
source_app: source.labels["app"] | "unknown"
|
||||
source_version: source.labels["version"] | "unknown"
|
||||
destination_workload: destination.workload.name | "unknown"
|
||||
destination_workload_namespace: destination.workload.namespace | "unknown"
|
||||
destination_principal: destination.principal | "unknown"
|
||||
destination_app: destination.labels["app"] | "unknown"
|
||||
destination_version: destination.labels["version"] | "unknown"
|
||||
destination_service: destination.service.host | "unknown"
|
||||
destination_service_name: destination.service.name | "unknown"
|
||||
destination_service_namespace: destination.service.namespace | "unknown"
|
||||
request_protocol: api.protocol | context.protocol | "unknown"
|
||||
response_code: response.code | 200
|
||||
connection_security_policy: conditional((context.reporter.kind | "inbound") == "outbound", "unknown", conditional(connection.mtls | false, "mutual_tls", "none"))
|
||||
monitored_resource_type: '"UNSPECIFIED"'
|
||||
---
|
||||
apiVersion: "config.istio.io/v1alpha2"
|
||||
kind: metric
|
||||
metadata:
|
||||
name: responsesize
|
||||
namespace: istio-system
|
||||
spec:
|
||||
value: response.size | 0
|
||||
dimensions:
|
||||
reporter: conditional((context.reporter.kind | "inbound") == "outbound", "source", "destination")
|
||||
source_workload: source.workload.name | "unknown"
|
||||
source_workload_namespace: source.workload.namespace | "unknown"
|
||||
source_principal: source.principal | "unknown"
|
||||
source_app: source.labels["app"] | "unknown"
|
||||
source_version: source.labels["version"] | "unknown"
|
||||
destination_workload: destination.workload.name | "unknown"
|
||||
destination_workload_namespace: destination.workload.namespace | "unknown"
|
||||
destination_principal: destination.principal | "unknown"
|
||||
destination_app: destination.labels["app"] | "unknown"
|
||||
destination_version: destination.labels["version"] | "unknown"
|
||||
destination_service: destination.service.host | "unknown"
|
||||
destination_service_name: destination.service.name | "unknown"
|
||||
destination_service_namespace: destination.service.namespace | "unknown"
|
||||
request_protocol: api.protocol | context.protocol | "unknown"
|
||||
response_code: response.code | 200
|
||||
connection_security_policy: conditional((context.reporter.kind | "inbound") == "outbound", "unknown", conditional(connection.mtls | false, "mutual_tls", "none"))
|
||||
monitored_resource_type: '"UNSPECIFIED"'
|
||||
---
|
||||
apiVersion: "config.istio.io/v1alpha2"
|
||||
kind: metric
|
||||
metadata:
|
||||
name: tcpbytesent
|
||||
namespace: istio-system
|
||||
spec:
|
||||
value: connection.sent.bytes | 0
|
||||
dimensions:
|
||||
reporter: conditional((context.reporter.kind | "inbound") == "outbound", "source", "destination")
|
||||
source_workload: source.workload.name | "unknown"
|
||||
source_workload_namespace: source.workload.namespace | "unknown"
|
||||
source_principal: source.principal | "unknown"
|
||||
source_app: source.labels["app"] | "unknown"
|
||||
source_version: source.labels["version"] | "unknown"
|
||||
destination_workload: destination.workload.name | "unknown"
|
||||
destination_workload_namespace: destination.workload.namespace | "unknown"
|
||||
destination_principal: destination.principal | "unknown"
|
||||
destination_app: destination.labels["app"] | "unknown"
|
||||
destination_version: destination.labels["version"] | "unknown"
|
||||
destination_service: destination.service.name | "unknown"
|
||||
destination_service_name: destination.service.name | "unknown"
|
||||
destination_service_namespace: destination.service.namespace | "unknown"
|
||||
connection_security_policy: conditional((context.reporter.kind | "inbound") == "outbound", "unknown", conditional(connection.mtls | false, "mutual_tls", "none"))
|
||||
monitored_resource_type: '"UNSPECIFIED"'
|
||||
---
|
||||
apiVersion: "config.istio.io/v1alpha2"
|
||||
kind: metric
|
||||
metadata:
|
||||
name: tcpbytereceived
|
||||
namespace: istio-system
|
||||
spec:
|
||||
value: connection.received.bytes | 0
|
||||
dimensions:
|
||||
reporter: conditional((context.reporter.kind | "inbound") == "outbound", "source", "destination")
|
||||
source_workload: source.workload.name | "unknown"
|
||||
source_workload_namespace: source.workload.namespace | "unknown"
|
||||
source_principal: source.principal | "unknown"
|
||||
source_app: source.labels["app"] | "unknown"
|
||||
source_version: source.labels["version"] | "unknown"
|
||||
destination_workload: destination.workload.name | "unknown"
|
||||
destination_workload_namespace: destination.workload.namespace | "unknown"
|
||||
destination_principal: destination.principal | "unknown"
|
||||
destination_app: destination.labels["app"] | "unknown"
|
||||
destination_version: destination.labels["version"] | "unknown"
|
||||
destination_service: destination.service.name | "unknown"
|
||||
destination_service_name: destination.service.name | "unknown"
|
||||
destination_service_namespace: destination.service.namespace | "unknown"
|
||||
connection_security_policy: conditional((context.reporter.kind | "inbound") == "outbound", "unknown", conditional(connection.mtls | false, "mutual_tls", "none"))
|
||||
monitored_resource_type: '"UNSPECIFIED"'
|
||||
---
|
||||
apiVersion: "config.istio.io/v1alpha2"
|
||||
kind: prometheus
|
||||
metadata:
|
||||
name: handler
|
||||
namespace: istio-system
|
||||
spec:
|
||||
metrics:
|
||||
- name: requests_total
|
||||
instance_name: requestcount.metric.istio-system
|
||||
kind: COUNTER
|
||||
label_names:
|
||||
- reporter
|
||||
- source_app
|
||||
- source_principal
|
||||
- source_workload
|
||||
- source_workload_namespace
|
||||
- source_version
|
||||
- destination_app
|
||||
- destination_principal
|
||||
- destination_workload
|
||||
- destination_workload_namespace
|
||||
- destination_version
|
||||
- destination_service
|
||||
- destination_service_name
|
||||
- destination_service_namespace
|
||||
- request_protocol
|
||||
- response_code
|
||||
- connection_security_policy
|
||||
- name: request_duration_seconds
|
||||
instance_name: requestduration.metric.istio-system
|
||||
kind: DISTRIBUTION
|
||||
label_names:
|
||||
- reporter
|
||||
- source_app
|
||||
- source_principal
|
||||
- source_workload
|
||||
- source_workload_namespace
|
||||
- source_version
|
||||
- destination_app
|
||||
- destination_principal
|
||||
- destination_workload
|
||||
- destination_workload_namespace
|
||||
- destination_version
|
||||
- destination_service
|
||||
- destination_service_name
|
||||
- destination_service_namespace
|
||||
- request_protocol
|
||||
- response_code
|
||||
- connection_security_policy
|
||||
buckets:
|
||||
explicit_buckets:
|
||||
bounds: [0.005, 0.01, 0.025, 0.05, 0.1, 0.25, 0.5, 1, 2.5, 5, 10]
|
||||
- name: request_bytes
|
||||
instance_name: requestsize.metric.istio-system
|
||||
kind: DISTRIBUTION
|
||||
label_names:
|
||||
- reporter
|
||||
- source_app
|
||||
- source_principal
|
||||
- source_workload
|
||||
- source_workload_namespace
|
||||
- source_version
|
||||
- destination_app
|
||||
- destination_principal
|
||||
- destination_workload
|
||||
- destination_workload_namespace
|
||||
- destination_version
|
||||
- destination_service
|
||||
- destination_service_name
|
||||
- destination_service_namespace
|
||||
- request_protocol
|
||||
- response_code
|
||||
- connection_security_policy
|
||||
buckets:
|
||||
exponentialBuckets:
|
||||
numFiniteBuckets: 8
|
||||
scale: 1
|
||||
growthFactor: 10
|
||||
- name: response_bytes
|
||||
instance_name: responsesize.metric.istio-system
|
||||
kind: DISTRIBUTION
|
||||
label_names:
|
||||
- reporter
|
||||
- source_app
|
||||
- source_principal
|
||||
- source_workload
|
||||
- source_workload_namespace
|
||||
- source_version
|
||||
- destination_app
|
||||
- destination_principal
|
||||
- destination_workload
|
||||
- destination_workload_namespace
|
||||
- destination_version
|
||||
- destination_service
|
||||
- destination_service_name
|
||||
- destination_service_namespace
|
||||
- request_protocol
|
||||
- response_code
|
||||
- connection_security_policy
|
||||
buckets:
|
||||
exponentialBuckets:
|
||||
numFiniteBuckets: 8
|
||||
scale: 1
|
||||
growthFactor: 10
|
||||
- name: tcp_sent_bytes_total
|
||||
instance_name: tcpbytesent.metric.istio-system
|
||||
kind: COUNTER
|
||||
label_names:
|
||||
- reporter
|
||||
- source_app
|
||||
- source_principal
|
||||
- source_workload
|
||||
- source_workload_namespace
|
||||
- source_version
|
||||
- destination_app
|
||||
- destination_principal
|
||||
- destination_workload
|
||||
- destination_workload_namespace
|
||||
- destination_version
|
||||
- destination_service
|
||||
- destination_service_name
|
||||
- destination_service_namespace
|
||||
- connection_security_policy
|
||||
- name: tcp_received_bytes_total
|
||||
instance_name: tcpbytereceived.metric.istio-system
|
||||
kind: COUNTER
|
||||
label_names:
|
||||
- reporter
|
||||
- source_app
|
||||
- source_principal
|
||||
- source_workload
|
||||
- source_workload_namespace
|
||||
- source_version
|
||||
- destination_app
|
||||
- destination_principal
|
||||
- destination_workload
|
||||
- destination_workload_namespace
|
||||
- destination_version
|
||||
- destination_service
|
||||
- destination_service_name
|
||||
- destination_service_namespace
|
||||
- connection_security_policy
|
||||
---
|
||||
apiVersion: "config.istio.io/v1alpha2"
|
||||
kind: rule
|
||||
metadata:
|
||||
name: promhttp
|
||||
namespace: istio-system
|
||||
spec:
|
||||
match: context.protocol == "http" || context.protocol == "grpc"
|
||||
actions:
|
||||
- handler: handler.prometheus
|
||||
instances:
|
||||
- requestcount.metric
|
||||
- requestduration.metric
|
||||
- requestsize.metric
|
||||
- responsesize.metric
|
||||
---
|
||||
apiVersion: "config.istio.io/v1alpha2"
|
||||
kind: rule
|
||||
metadata:
|
||||
name: promtcp
|
||||
namespace: istio-system
|
||||
spec:
|
||||
match: context.protocol == "tcp"
|
||||
actions:
|
||||
- handler: handler.prometheus
|
||||
instances:
|
||||
- tcpbytesent.metric
|
||||
- tcpbytereceived.metric
|
||||
---
|
||||
|
||||
36
artifacts/gloo/canary.yaml
Normal file
@@ -0,0 +1,36 @@
|
||||
apiVersion: flagger.app/v1alpha3
|
||||
kind: Canary
|
||||
metadata:
|
||||
name: podinfo
|
||||
namespace: test
|
||||
spec:
|
||||
targetRef:
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
name: podinfo
|
||||
progressDeadlineSeconds: 60
|
||||
autoscalerRef:
|
||||
apiVersion: autoscaling/v2beta1
|
||||
kind: HorizontalPodAutoscaler
|
||||
name: podinfo
|
||||
service:
|
||||
port: 9898
|
||||
canaryAnalysis:
|
||||
interval: 10s
|
||||
threshold: 10
|
||||
maxWeight: 50
|
||||
stepWeight: 5
|
||||
metrics:
|
||||
- name: request-success-rate
|
||||
threshold: 99
|
||||
interval: 1m
|
||||
- name: request-duration
|
||||
threshold: 500
|
||||
interval: 30s
|
||||
webhooks:
|
||||
- name: load-test
|
||||
url: http://flagger-loadtester.test/
|
||||
timeout: 5s
|
||||
metadata:
|
||||
type: cmd
|
||||
cmd: "hey -z 1m -q 10 -c 2 http://gloo.example.com/"
|
||||
@@ -6,7 +6,9 @@ metadata:
|
||||
labels:
|
||||
app: podinfo
|
||||
spec:
|
||||
replicas: 1
|
||||
minReadySeconds: 5
|
||||
revisionHistoryLimit: 5
|
||||
progressDeadlineSeconds: 60
|
||||
strategy:
|
||||
rollingUpdate:
|
||||
maxUnavailable: 0
|
||||
@@ -23,7 +25,7 @@ spec:
|
||||
spec:
|
||||
containers:
|
||||
- name: podinfod
|
||||
image: quay.io/stefanprodan/podinfo:1.4.0
|
||||
image: quay.io/stefanprodan/podinfo:1.7.0
|
||||
imagePullPolicy: IfNotPresent
|
||||
ports:
|
||||
- containerPort: 9898
|
||||
@@ -37,7 +39,7 @@ spec:
|
||||
- --random-error=false
|
||||
env:
|
||||
- name: PODINFO_UI_COLOR
|
||||
value: green
|
||||
value: blue
|
||||
livenessProbe:
|
||||
exec:
|
||||
command:
|
||||
@@ -45,10 +47,8 @@ spec:
|
||||
- check
|
||||
- http
|
||||
- localhost:9898/healthz
|
||||
failureThreshold: 3
|
||||
periodSeconds: 10
|
||||
successThreshold: 1
|
||||
timeoutSeconds: 2
|
||||
initialDelaySeconds: 5
|
||||
timeoutSeconds: 5
|
||||
readinessProbe:
|
||||
exec:
|
||||
command:
|
||||
@@ -56,14 +56,12 @@ spec:
|
||||
- check
|
||||
- http
|
||||
- localhost:9898/readyz
|
||||
failureThreshold: 3
|
||||
periodSeconds: 3
|
||||
successThreshold: 1
|
||||
timeoutSeconds: 2
|
||||
initialDelaySeconds: 5
|
||||
timeoutSeconds: 5
|
||||
resources:
|
||||
limits:
|
||||
cpu: 1000m
|
||||
memory: 256Mi
|
||||
cpu: 2000m
|
||||
memory: 512Mi
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 16Mi
|
||||
memory: 64Mi
|
||||
17
artifacts/gloo/virtual-service.yaml
Normal file
@@ -0,0 +1,17 @@
|
||||
apiVersion: gateway.solo.io/v1
|
||||
kind: VirtualService
|
||||
metadata:
|
||||
name: podinfo
|
||||
namespace: test
|
||||
spec:
|
||||
virtualHost:
|
||||
domains:
|
||||
- '*'
|
||||
name: podinfo.default
|
||||
routes:
|
||||
- matcher:
|
||||
prefix: /
|
||||
routeAction:
|
||||
upstreamGroup:
|
||||
name: podinfo
|
||||
namespace: gloo
|
||||
58
artifacts/helmtester/deployment.yaml
Normal file
@@ -0,0 +1,58 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: flagger-helmtester
|
||||
namespace: kube-system
|
||||
labels:
|
||||
app: flagger-helmtester
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: flagger-helmtester
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: flagger-helmtester
|
||||
annotations:
|
||||
prometheus.io/scrape: "true"
|
||||
spec:
|
||||
serviceAccountName: tiller
|
||||
containers:
|
||||
- name: helmtester
|
||||
image: weaveworks/flagger-loadtester:0.4.0
|
||||
imagePullPolicy: IfNotPresent
|
||||
ports:
|
||||
- name: http
|
||||
containerPort: 8080
|
||||
command:
|
||||
- ./loadtester
|
||||
- -port=8080
|
||||
- -log-level=info
|
||||
- -timeout=1h
|
||||
livenessProbe:
|
||||
exec:
|
||||
command:
|
||||
- wget
|
||||
- --quiet
|
||||
- --tries=1
|
||||
- --timeout=4
|
||||
- --spider
|
||||
- http://localhost:8080/healthz
|
||||
timeoutSeconds: 5
|
||||
readinessProbe:
|
||||
exec:
|
||||
command:
|
||||
- wget
|
||||
- --quiet
|
||||
- --tries=1
|
||||
- --timeout=4
|
||||
- --spider
|
||||
- http://localhost:8080/healthz
|
||||
timeoutSeconds: 5
|
||||
resources:
|
||||
limits:
|
||||
memory: "512Mi"
|
||||
cpu: "1000m"
|
||||
requests:
|
||||
memory: "32Mi"
|
||||
cpu: "10m"
|
||||
16
artifacts/helmtester/service.yaml
Normal file
@@ -0,0 +1,16 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: flagger-helmtester
|
||||
namespace: kube-system
|
||||
labels:
|
||||
app: flagger-helmtester
|
||||
spec:
|
||||
type: ClusterIP
|
||||
selector:
|
||||
app: flagger-helmtester
|
||||
ports:
|
||||
- name: http
|
||||
port: 80
|
||||
protocol: TCP
|
||||
targetPort: http
|
||||
@@ -17,7 +17,7 @@ spec:
|
||||
spec:
|
||||
containers:
|
||||
- name: loadtester
|
||||
image: weaveworks/flagger-loadtester:0.3.0
|
||||
image: weaveworks/flagger-loadtester:0.6.1
|
||||
imagePullPolicy: IfNotPresent
|
||||
ports:
|
||||
- name: http
|
||||
|
||||
@@ -23,7 +23,7 @@ spec:
|
||||
spec:
|
||||
containers:
|
||||
- name: podinfod
|
||||
image: quay.io/stefanprodan/podinfo:1.4.0
|
||||
image: quay.io/stefanprodan/podinfo:1.7.0
|
||||
imagePullPolicy: IfNotPresent
|
||||
ports:
|
||||
- containerPort: 9898
|
||||
|
||||
@@ -1,45 +0,0 @@
|
||||
apiVersion: networking.istio.io/v1alpha3
|
||||
kind: VirtualService
|
||||
metadata:
|
||||
name: podinfo
|
||||
namespace: test
|
||||
spec:
|
||||
gateways:
|
||||
- public-gateway.istio-system.svc.cluster.local
|
||||
- mesh
|
||||
hosts:
|
||||
- podinfo.istio.weavedx.com
|
||||
- podinfo
|
||||
http:
|
||||
- route:
|
||||
- destination:
|
||||
host: podinfo
|
||||
subset: primary
|
||||
weight: 50
|
||||
- destination:
|
||||
host: podinfo
|
||||
subset: canary
|
||||
weight: 50
|
||||
---
|
||||
apiVersion: networking.istio.io/v1alpha3
|
||||
kind: DestinationRule
|
||||
metadata:
|
||||
name: podinfo-destination
|
||||
namespace: test
|
||||
spec:
|
||||
host: podinfo
|
||||
trafficPolicy:
|
||||
loadBalancer:
|
||||
consistentHash:
|
||||
httpCookie:
|
||||
name: istiouser
|
||||
ttl: 30s
|
||||
subsets:
|
||||
- name: primary
|
||||
labels:
|
||||
app: podinfo
|
||||
role: primary
|
||||
- name: canary
|
||||
labels:
|
||||
app: podinfo
|
||||
role: canary
|
||||
@@ -1,43 +0,0 @@
|
||||
apiVersion: networking.istio.io/v1alpha3
|
||||
kind: VirtualService
|
||||
metadata:
|
||||
name: podinfo
|
||||
namespace: test
|
||||
spec:
|
||||
gateways:
|
||||
- public-gateway.istio-system.svc.cluster.local
|
||||
- mesh
|
||||
hosts:
|
||||
- app.istio.weavedx.com
|
||||
- podinfo
|
||||
http:
|
||||
- match:
|
||||
- headers:
|
||||
user-agent:
|
||||
regex: ^(?!.*Chrome)(?=.*\bSafari\b).*$
|
||||
uri:
|
||||
prefix: "/version/"
|
||||
rewrite:
|
||||
uri: /api/info
|
||||
route:
|
||||
- destination:
|
||||
host: podinfo-primary
|
||||
port:
|
||||
number: 9898
|
||||
weight: 0
|
||||
- destination:
|
||||
host: podinfo
|
||||
port:
|
||||
number: 9898
|
||||
weight: 100
|
||||
- match:
|
||||
- uri:
|
||||
prefix: "/version/"
|
||||
rewrite:
|
||||
uri: /api/info
|
||||
route:
|
||||
- destination:
|
||||
host: podinfo-primary
|
||||
port:
|
||||
number: 9898
|
||||
weight: 100
|
||||
@@ -1,25 +0,0 @@
|
||||
apiVersion: networking.istio.io/v1alpha3
|
||||
kind: VirtualService
|
||||
metadata:
|
||||
name: podinfo
|
||||
namespace: test
|
||||
labels:
|
||||
app: podinfo
|
||||
spec:
|
||||
gateways:
|
||||
- public-gateway.istio-system.svc.cluster.local
|
||||
- mesh
|
||||
hosts:
|
||||
- podinfo.iowa.weavedx.com
|
||||
- podinfo
|
||||
http:
|
||||
- route:
|
||||
- destination:
|
||||
host: podinfo-primary
|
||||
port:
|
||||
number: 9898
|
||||
weight: 100
|
||||
mirror:
|
||||
host: podinfo
|
||||
port:
|
||||
number: 9898
|
||||
@@ -1,26 +0,0 @@
|
||||
apiVersion: networking.istio.io/v1alpha3
|
||||
kind: VirtualService
|
||||
metadata:
|
||||
name: podinfo
|
||||
namespace: test
|
||||
labels:
|
||||
app: podinfo
|
||||
spec:
|
||||
gateways:
|
||||
- public-gateway.istio-system.svc.cluster.local
|
||||
- mesh
|
||||
hosts:
|
||||
- podinfo.iowa.weavedx.com
|
||||
- podinfo
|
||||
http:
|
||||
- route:
|
||||
- destination:
|
||||
host: podinfo-primary
|
||||
port:
|
||||
number: 9898
|
||||
weight: 100
|
||||
- destination:
|
||||
host: podinfo
|
||||
port:
|
||||
number: 9898
|
||||
weight: 0
|
||||
131
artifacts/smi/istio-adapter.yaml
Normal file
@@ -0,0 +1,131 @@
|
||||
apiVersion: apiextensions.k8s.io/v1beta1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: trafficsplits.split.smi-spec.io
|
||||
spec:
|
||||
additionalPrinterColumns:
|
||||
- JSONPath: .spec.service
|
||||
description: The service
|
||||
name: Service
|
||||
type: string
|
||||
group: split.smi-spec.io
|
||||
names:
|
||||
kind: TrafficSplit
|
||||
listKind: TrafficSplitList
|
||||
plural: trafficsplits
|
||||
singular: trafficsplit
|
||||
scope: Namespaced
|
||||
subresources:
|
||||
status: {}
|
||||
version: v1alpha1
|
||||
versions:
|
||||
- name: v1alpha1
|
||||
served: true
|
||||
storage: true
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: smi-adapter-istio
|
||||
namespace: istio-system
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: smi-adapter-istio
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- pods
|
||||
- services
|
||||
- endpoints
|
||||
- persistentvolumeclaims
|
||||
- events
|
||||
- configmaps
|
||||
- secrets
|
||||
verbs:
|
||||
- '*'
|
||||
- apiGroups:
|
||||
- apps
|
||||
resources:
|
||||
- deployments
|
||||
- daemonsets
|
||||
- replicasets
|
||||
- statefulsets
|
||||
verbs:
|
||||
- '*'
|
||||
- apiGroups:
|
||||
- monitoring.coreos.com
|
||||
resources:
|
||||
- servicemonitors
|
||||
verbs:
|
||||
- get
|
||||
- create
|
||||
- apiGroups:
|
||||
- apps
|
||||
resourceNames:
|
||||
- smi-adapter-istio
|
||||
resources:
|
||||
- deployments/finalizers
|
||||
verbs:
|
||||
- update
|
||||
- apiGroups:
|
||||
- split.smi-spec.io
|
||||
resources:
|
||||
- '*'
|
||||
verbs:
|
||||
- '*'
|
||||
- apiGroups:
|
||||
- networking.istio.io
|
||||
resources:
|
||||
- '*'
|
||||
verbs:
|
||||
- '*'
|
||||
---
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: smi-adapter-istio
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: smi-adapter-istio
|
||||
namespace: istio-system
|
||||
roleRef:
|
||||
kind: ClusterRole
|
||||
name: smi-adapter-istio
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: smi-adapter-istio
|
||||
namespace: istio-system
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
name: smi-adapter-istio
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
name: smi-adapter-istio
|
||||
annotations:
|
||||
sidecar.istio.io/inject: "false"
|
||||
spec:
|
||||
serviceAccountName: smi-adapter-istio
|
||||
containers:
|
||||
- name: smi-adapter-istio
|
||||
image: docker.io/stefanprodan/smi-adapter-istio:0.0.2-beta.1
|
||||
command:
|
||||
- smi-adapter-istio
|
||||
imagePullPolicy: Always
|
||||
env:
|
||||
- name: WATCH_NAMESPACE
|
||||
value: ""
|
||||
- name: POD_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.name
|
||||
- name: OPERATOR_NAME
|
||||
value: "smi-adapter-istio"
|
||||
@@ -1,23 +0,0 @@
|
||||
apiVersion: autoscaling/v2beta1
|
||||
kind: HorizontalPodAutoscaler
|
||||
metadata:
|
||||
name: podinfo
|
||||
namespace: test
|
||||
spec:
|
||||
scaleTargetRef:
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
name: podinfo
|
||||
minReplicas: 2
|
||||
maxReplicas: 3
|
||||
metrics:
|
||||
- type: Resource
|
||||
resource:
|
||||
name: cpu
|
||||
# scale up if usage is above
|
||||
# 99% of the requested CPU (100m)
|
||||
targetAverageUtilization: 99
|
||||
- type: Resource
|
||||
resource:
|
||||
name: memory
|
||||
targetAverageValue: 200Mi
|
||||
@@ -1,14 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: podinfo-canary
|
||||
namespace: test
|
||||
spec:
|
||||
type: ClusterIP
|
||||
selector:
|
||||
app: podinfo
|
||||
ports:
|
||||
- name: http
|
||||
port: 9898
|
||||
protocol: TCP
|
||||
targetPort: http
|
||||
@@ -1,16 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: podinfo-primary
|
||||
namespace: test
|
||||
labels:
|
||||
app: podinfo-primary
|
||||
spec:
|
||||
type: ClusterIP
|
||||
selector:
|
||||
app: podinfo-primary
|
||||
ports:
|
||||
- name: http
|
||||
port: 9898
|
||||
protocol: TCP
|
||||
targetPort: http
|
||||
@@ -1,14 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: podinfo
|
||||
namespace: test
|
||||
spec:
|
||||
type: ClusterIP
|
||||
selector:
|
||||
app: podinfo-primary
|
||||
ports:
|
||||
- name: http
|
||||
port: 9898
|
||||
protocol: TCP
|
||||
targetPort: http
|
||||
@@ -1,30 +0,0 @@
|
||||
apiVersion: networking.istio.io/v1alpha3
|
||||
kind: VirtualService
|
||||
metadata:
|
||||
name: podinfo
|
||||
namespace: test
|
||||
labels:
|
||||
app: podinfo
|
||||
spec:
|
||||
gateways:
|
||||
- public-gateway.istio-system.svc.cluster.local
|
||||
- mesh
|
||||
hosts:
|
||||
- podinfo.istio.weavedx.com
|
||||
- podinfo
|
||||
http:
|
||||
- route:
|
||||
- destination:
|
||||
host: podinfo-primary
|
||||
port:
|
||||
number: 9898
|
||||
weight: 100
|
||||
- destination:
|
||||
host: podinfo
|
||||
port:
|
||||
number: 9898
|
||||
weight: 0
|
||||
timeout: 10s
|
||||
retries:
|
||||
attempts: 3
|
||||
perTryTimeout: 2s
|
||||
@@ -1,10 +1,10 @@
|
||||
apiVersion: v1
|
||||
name: flagger
|
||||
version: 0.13.2
|
||||
appVersion: 0.13.2
|
||||
version: 0.18.0
|
||||
appVersion: 0.18.0
|
||||
kubeVersion: ">=1.11.0-0"
|
||||
engine: gotpl
|
||||
description: Flagger is a Kubernetes operator that automates the promotion of canary deployments using Istio, App Mesh or NGINX routing for traffic shifting and Prometheus metrics for canary analysis.
|
||||
description: Flagger is a Kubernetes operator that automates the promotion of canary deployments using Istio, Linkerd, App Mesh, Gloo or NGINX routing for traffic shifting and Prometheus metrics for canary analysis.
|
||||
home: https://docs.flagger.app
|
||||
icon: https://raw.githubusercontent.com/weaveworks/flagger/master/docs/logo/flagger-icon.png
|
||||
sources:
|
||||
@@ -17,4 +17,5 @@ keywords:
|
||||
- canary
|
||||
- istio
|
||||
- appmesh
|
||||
- linkerd
|
||||
- gitops
|
||||
|
||||
@@ -1,15 +1,14 @@
|
||||
# Flagger
|
||||
|
||||
[Flagger](https://github.com/weaveworks/flagger) is a Kubernetes operator that automates the promotion of
|
||||
canary deployments using Istio routing for traffic shifting and Prometheus metrics for canary analysis.
|
||||
canary deployments using Istio, Linkerd, App Mesh, NGINX or Gloo routing for traffic shifting and Prometheus metrics for canary analysis.
|
||||
Flagger implements a control loop that gradually shifts traffic to the canary while measuring key performance indicators
|
||||
like HTTP requests success rate, requests average duration and pods health.
|
||||
Based on the KPIs analysis a canary is promoted or aborted and the analysis result is published to Slack.
|
||||
Based on the KPIs analysis a canary is promoted or aborted and the analysis result is published to Slack or MS Teams.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
* Kubernetes >= 1.11
|
||||
* Istio >= 1.0
|
||||
* Prometheus >= 2.6
|
||||
|
||||
## Installing the Chart
|
||||
@@ -17,16 +16,35 @@ Based on the KPIs analysis a canary is promoted or aborted and the analysis resu
|
||||
Add Flagger Helm repository:
|
||||
|
||||
```console
|
||||
helm repo add flagger https://flagger.app
|
||||
$ helm repo add flagger https://flagger.app
|
||||
```
|
||||
|
||||
To install the chart with the release name `flagger`:
|
||||
Install Flagger's custom resource definitions:
|
||||
|
||||
```console
|
||||
$ helm install --name flagger --namespace istio-system flagger/flagger
|
||||
$ kubectl apply -f https://raw.githubusercontent.com/weaveworks/flagger/master/artifacts/flagger/crd.yaml
|
||||
```
|
||||
|
||||
To install the chart with the release name `flagger` for Istio:
|
||||
|
||||
```console
|
||||
$ helm upgrade -i flagger flagger/flagger \
|
||||
--namespace=istio-system \
|
||||
--set crd.create=false \
|
||||
--set meshProvider=istio \
|
||||
--set metricsServer=http://prometheus:9090
|
||||
```
|
||||
|
||||
To install the chart with the release name `flagger` for Linkerd:
|
||||
|
||||
```console
|
||||
$ helm upgrade -i flagger flagger/flagger \
|
||||
--namespace=linkerd \
|
||||
--set crd.create=false \
|
||||
--set meshProvider=linkerd \
|
||||
--set metricsServer=http://linkerd-prometheus:9090
|
||||
```
|
||||
|
||||
The command deploys Flagger on the Kubernetes cluster in the istio-system namespace.
|
||||
The [configuration](#configuration) section lists the parameters that can be configured during installation.
|
||||
|
||||
## Uninstalling the Chart
|
||||
@@ -48,11 +66,16 @@ Parameter | Description | Default
|
||||
`image.repository` | image repository | `weaveworks/flagger`
|
||||
`image.tag` | image tag | `<VERSION>`
|
||||
`image.pullPolicy` | image pull policy | `IfNotPresent`
|
||||
`metricsServer` | Prometheus URL | `http://prometheus.istio-system:9090`
|
||||
`prometheus.install` | if `true`, installs Prometheus configured to scrape all pods in the custer including the App Mesh sidecar | `false`
|
||||
`metricsServer` | Prometheus URL, used when `prometheus.install` is `false` | `http://prometheus.istio-system:9090`
|
||||
`slack.url` | Slack incoming webhook | None
|
||||
`slack.channel` | Slack channel | None
|
||||
`slack.user` | Slack username | `flagger`
|
||||
`msteams.url` | Microsoft Teams incoming webhook | None
|
||||
`leaderElection.enabled` | leader election must be enabled when running more than one replica | `false`
|
||||
`leaderElection.replicaCount` | number of replicas | `1`
|
||||
`rbac.create` | if `true`, create and use RBAC resources | `true`
|
||||
`rbac.pspEnabled` | If `true`, create and use a restricted pod security policy | `false`
|
||||
`crd.create` | if `true`, create Flagger's CRDs | `true`
|
||||
`resources.requests/cpu` | pod CPU request | `10m`
|
||||
`resources.requests/memory` | pod memory request | `32Mi`
|
||||
@@ -67,6 +90,7 @@ Specify each parameter using the `--set key=value[,key=value]` argument to `helm
|
||||
```console
|
||||
$ helm upgrade -i flagger flagger/flagger \
|
||||
--namespace istio-system \
|
||||
--set crd.create=false \
|
||||
--set slack.url=https://hooks.slack.com/services/YOUR/SLACK/WEBHOOK \
|
||||
--set slack.channel=general
|
||||
```
|
||||
|
||||
@@ -46,6 +46,8 @@ spec:
|
||||
- service
|
||||
- canaryAnalysis
|
||||
properties:
|
||||
provider:
|
||||
type: string
|
||||
progressDeadlineSeconds:
|
||||
type: number
|
||||
targetRef:
|
||||
@@ -90,6 +92,8 @@ spec:
|
||||
type: number
|
||||
portName:
|
||||
type: string
|
||||
portDiscovery:
|
||||
type: boolean
|
||||
meshName:
|
||||
type: string
|
||||
timeout:
|
||||
@@ -99,17 +103,23 @@ spec:
|
||||
canaryAnalysis:
|
||||
properties:
|
||||
interval:
|
||||
description: Canary schedule interval
|
||||
type: string
|
||||
pattern: "^[0-9]+(m|s)"
|
||||
iterations:
|
||||
description: Number of checks to run for A/B Testing and Blue/Green
|
||||
type: number
|
||||
threshold:
|
||||
description: Max number of failed checks before rollback
|
||||
type: number
|
||||
maxWeight:
|
||||
description: Max traffic percentage routed to canary
|
||||
type: number
|
||||
stepWeight:
|
||||
description: Canary incremental traffic percentage step
|
||||
type: number
|
||||
metrics:
|
||||
description: Prometheus query list for this canary
|
||||
type: array
|
||||
properties:
|
||||
items:
|
||||
@@ -117,15 +127,20 @@ spec:
|
||||
required: ['name', 'threshold']
|
||||
properties:
|
||||
name:
|
||||
description: Name of the Prometheus metric
|
||||
type: string
|
||||
interval:
|
||||
description: Interval of the promql query
|
||||
type: string
|
||||
pattern: "^[0-9]+(m|s)"
|
||||
threshold:
|
||||
description: Max scalar value accepted for this metric
|
||||
type: number
|
||||
query:
|
||||
description: Prometheus query
|
||||
type: string
|
||||
webhooks:
|
||||
description: Webhook list for this canary
|
||||
type: array
|
||||
properties:
|
||||
items:
|
||||
@@ -133,8 +148,10 @@ spec:
|
||||
required: ['name', 'url', 'timeout']
|
||||
properties:
|
||||
name:
|
||||
description: Name of the webhook
|
||||
type: string
|
||||
type:
|
||||
description: Type of the webhook pre, post or during rollout
|
||||
type: string
|
||||
enum:
|
||||
- ""
|
||||
@@ -142,29 +159,69 @@ spec:
|
||||
- rollout
|
||||
- post-rollout
|
||||
url:
|
||||
description: URL address of this webhook
|
||||
type: string
|
||||
format: url
|
||||
timeout:
|
||||
description: Request timeout for this webhook
|
||||
type: string
|
||||
pattern: "^[0-9]+(m|s)"
|
||||
status:
|
||||
properties:
|
||||
phase:
|
||||
description: Analysis phase of this canary
|
||||
type: string
|
||||
enum:
|
||||
- ""
|
||||
- Initializing
|
||||
- Initialized
|
||||
- Waiting
|
||||
- Progressing
|
||||
- Finalising
|
||||
- Succeeded
|
||||
- Failed
|
||||
canaryWeight:
|
||||
description: Traffic weight percentage routed to canary
|
||||
type: number
|
||||
failedChecks:
|
||||
description: Failed check count of the current canary analysis
|
||||
type: number
|
||||
iterations:
|
||||
description: Iteration count of the current canary analysis
|
||||
type: number
|
||||
lastAppliedSpec:
|
||||
description: LastAppliedSpec of this canary
|
||||
type: string
|
||||
lastTransitionTime:
|
||||
description: LastTransitionTime of this canary
|
||||
format: date-time
|
||||
type: string
|
||||
conditions:
|
||||
description: Status conditions of this canary
|
||||
type: array
|
||||
properties:
|
||||
items:
|
||||
type: object
|
||||
required: ['type', 'status', 'reason']
|
||||
properties:
|
||||
lastTransitionTime:
|
||||
description: LastTransitionTime of this condition
|
||||
format: date-time
|
||||
type: string
|
||||
lastUpdateTime:
|
||||
description: LastUpdateTime of this condition
|
||||
format: date-time
|
||||
type: string
|
||||
message:
|
||||
description: Message associated with this condition
|
||||
type: string
|
||||
reason:
|
||||
description: Reason for the current status of this condition
|
||||
type: string
|
||||
status:
|
||||
description: Status of this condition
|
||||
type: string
|
||||
type:
|
||||
description: Type of this condition
|
||||
type: string
|
||||
{{- end }}
|
||||
|
||||
@@ -8,7 +8,7 @@ metadata:
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
spec:
|
||||
replicas: 1
|
||||
replicas: {{ .Values.leaderElection.replicaCount }}
|
||||
strategy:
|
||||
type: Recreate
|
||||
selector:
|
||||
@@ -22,6 +22,16 @@ spec:
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
spec:
|
||||
serviceAccountName: {{ template "flagger.serviceAccountName" . }}
|
||||
affinity:
|
||||
podAntiAffinity:
|
||||
preferredDuringSchedulingIgnoredDuringExecution:
|
||||
- weight: 100
|
||||
podAffinityTerm:
|
||||
labelSelector:
|
||||
matchLabels:
|
||||
app.kubernetes.io/name: {{ template "flagger.name" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
topologyKey: kubernetes.io/hostname
|
||||
containers:
|
||||
- name: flagger
|
||||
securityContext:
|
||||
@@ -51,6 +61,13 @@ spec:
|
||||
- -slack-user={{ .Values.slack.user }}
|
||||
- -slack-channel={{ .Values.slack.channel }}
|
||||
{{- end }}
|
||||
{{- if .Values.msteams.url }}
|
||||
- -msteams-url={{ .Values.msteams.url }}
|
||||
{{- end }}
|
||||
{{- if .Values.leaderElection.enabled }}
|
||||
- -enable-leader-election=true
|
||||
- -leader-election-namespace={{ .Release.Namespace }}
|
||||
{{- end }}
|
||||
livenessProbe:
|
||||
exec:
|
||||
command:
|
||||
@@ -75,10 +92,6 @@ spec:
|
||||
{{ toYaml .Values.resources | indent 12 }}
|
||||
{{- with .Values.nodeSelector }}
|
||||
nodeSelector:
|
||||
{{ toYaml . | indent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.affinity }}
|
||||
affinity:
|
||||
{{ toYaml . | indent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.tolerations }}
|
||||
|
||||
@@ -238,10 +238,10 @@ spec:
|
||||
serviceAccountName: {{ template "flagger.serviceAccountName" . }}-prometheus
|
||||
containers:
|
||||
- name: prometheus
|
||||
image: "docker.io/prom/prometheus:v2.7.1"
|
||||
image: "docker.io/prom/prometheus:v2.10.0"
|
||||
imagePullPolicy: IfNotPresent
|
||||
args:
|
||||
- '--storage.tsdb.retention=6h'
|
||||
- '--storage.tsdb.retention=2h'
|
||||
- '--config.file=/etc/prometheus/prometheus.yml'
|
||||
ports:
|
||||
- containerPort: 9090
|
||||
|
||||
66
charts/flagger/templates/psp.yaml
Normal file
@@ -0,0 +1,66 @@
|
||||
{{- if .Values.rbac.pspEnabled }}
|
||||
apiVersion: policy/v1beta1
|
||||
kind: PodSecurityPolicy
|
||||
metadata:
|
||||
name: {{ template "flagger.fullname" . }}
|
||||
labels:
|
||||
helm.sh/chart: {{ template "flagger.chart" . }}
|
||||
app.kubernetes.io/name: {{ template "flagger.name" . }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
annotations:
|
||||
seccomp.security.alpha.kubernetes.io/allowedProfileNames: '*'
|
||||
spec:
|
||||
privileged: false
|
||||
hostIPC: false
|
||||
hostNetwork: false
|
||||
hostPID: false
|
||||
readOnlyRootFilesystem: false
|
||||
allowPrivilegeEscalation: false
|
||||
allowedCapabilities:
|
||||
- '*'
|
||||
fsGroup:
|
||||
rule: RunAsAny
|
||||
runAsUser:
|
||||
rule: RunAsAny
|
||||
seLinux:
|
||||
rule: RunAsAny
|
||||
supplementalGroups:
|
||||
rule: RunAsAny
|
||||
volumes:
|
||||
- '*'
|
||||
---
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: {{ template "flagger.fullname" . }}-psp
|
||||
labels:
|
||||
helm.sh/chart: {{ template "flagger.chart" . }}
|
||||
app.kubernetes.io/name: {{ template "flagger.name" . }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
rules:
|
||||
- apiGroups: ['policy']
|
||||
resources: ['podsecuritypolicies']
|
||||
verbs: ['use']
|
||||
resourceNames:
|
||||
- {{ template "flagger.fullname" . }}
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
name: {{ template "flagger.fullname" . }}-psp
|
||||
labels:
|
||||
helm.sh/chart: {{ template "flagger.chart" . }}
|
||||
app.kubernetes.io/name: {{ template "flagger.name" . }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: {{ template "flagger.fullname" . }}-psp
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: {{ template "flagger.serviceAccountName" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
{{- end }}
|
||||
@@ -44,6 +44,8 @@ rules:
|
||||
resources:
|
||||
- virtualservices
|
||||
- virtualservices/status
|
||||
- destinationrules
|
||||
- destinationrules/status
|
||||
verbs: ["*"]
|
||||
- apiGroups:
|
||||
- appmesh.k8s.aws
|
||||
@@ -55,6 +57,26 @@ rules:
|
||||
- virtualservices
|
||||
- virtualservices/status
|
||||
verbs: ["*"]
|
||||
- apiGroups:
|
||||
- split.smi-spec.io
|
||||
resources:
|
||||
- trafficsplits
|
||||
verbs: ["*"]
|
||||
- apiGroups:
|
||||
- gloo.solo.io
|
||||
resources:
|
||||
- settings
|
||||
- upstreams
|
||||
- upstreamgroups
|
||||
- proxies
|
||||
- virtualservices
|
||||
verbs: ["*"]
|
||||
- apiGroups:
|
||||
- gateway.solo.io
|
||||
resources:
|
||||
- virtualservices
|
||||
- gateways
|
||||
verbs: ["*"]
|
||||
- nonResourceURLs:
|
||||
- /version
|
||||
verbs:
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
|
||||
image:
|
||||
repository: weaveworks/flagger
|
||||
tag: 0.13.2
|
||||
tag: 0.18.0
|
||||
pullPolicy: IfNotPresent
|
||||
|
||||
metricsServer: "http://prometheus:9090"
|
||||
@@ -19,6 +19,14 @@ slack:
|
||||
# incoming webhook https://api.slack.com/incoming-webhooks
|
||||
url:
|
||||
|
||||
msteams:
|
||||
# MS Teams incoming webhook URL
|
||||
url:
|
||||
|
||||
leaderElection:
|
||||
enabled: false
|
||||
replicaCount: 1
|
||||
|
||||
serviceAccount:
|
||||
# serviceAccount.create: Whether to create a service account or not
|
||||
create: true
|
||||
@@ -28,6 +36,8 @@ serviceAccount:
|
||||
rbac:
|
||||
# rbac.create: `true` if rbac resources should be created
|
||||
create: true
|
||||
# rbac.pspEnabled: `true` if PodSecurityPolicy resources should be created
|
||||
pspEnabled: false
|
||||
|
||||
crd:
|
||||
# crd.create: `true` if custom resource definitions should be created
|
||||
@@ -48,8 +58,6 @@ nodeSelector: {}
|
||||
|
||||
tolerations: []
|
||||
|
||||
affinity: {}
|
||||
|
||||
prometheus:
|
||||
# to be used with AppMesh or nginx ingress
|
||||
install: false
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
apiVersion: v1
|
||||
name: grafana
|
||||
version: 1.2.0
|
||||
appVersion: 5.4.3
|
||||
version: 1.3.0
|
||||
appVersion: 6.2.5
|
||||
description: Grafana dashboards for monitoring Flagger canary deployments
|
||||
icon: https://raw.githubusercontent.com/weaveworks/flagger/master/docs/logo/flagger-icon.png
|
||||
home: https://flagger.app
|
||||
|
||||
@@ -6,7 +6,7 @@ replicaCount: 1
|
||||
|
||||
image:
|
||||
repository: grafana/grafana
|
||||
tag: 5.4.3
|
||||
tag: 6.2.5
|
||||
pullPolicy: IfNotPresent
|
||||
|
||||
service:
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
apiVersion: v1
|
||||
name: loadtester
|
||||
version: 0.4.0
|
||||
appVersion: 0.3.0
|
||||
version: 0.6.0
|
||||
appVersion: 0.6.1
|
||||
kubeVersion: ">=1.11.0-0"
|
||||
engine: gotpl
|
||||
description: Flagger's load testing services based on rakyll/hey that generates traffic during canary analysis when configured as a webhook.
|
||||
description: Flagger's load testing services based on rakyll/hey and bojand/ghz that generates traffic during canary analysis when configured as a webhook.
|
||||
home: https://docs.flagger.app
|
||||
icon: https://raw.githubusercontent.com/weaveworks/flagger/master/docs/logo/flagger-icon.png
|
||||
sources:
|
||||
|
||||
@@ -7,7 +7,6 @@ and can be used to generates traffic during canary analysis when configured as a
|
||||
## Prerequisites
|
||||
|
||||
* Kubernetes >= 1.11
|
||||
* Istio >= 1.0
|
||||
|
||||
## Installing the Chart
|
||||
|
||||
@@ -25,7 +24,7 @@ helm upgrade -i flagger-loadtester flagger/loadtester
|
||||
|
||||
The command deploys Grafana on the Kubernetes cluster in the default namespace.
|
||||
|
||||
> **Tip**: Note that the namespace where you deploy the load tester should have the Istio sidecar injection enabled
|
||||
> **Tip**: Note that the namespace where you deploy the load tester should have the Istio or App Mesh sidecar injection enabled
|
||||
|
||||
The [configuration](#configuration) section lists the parameters that can be configured during installation.
|
||||
|
||||
@@ -48,13 +47,14 @@ Parameter | Description | Default
|
||||
`image.repository` | Image repository | `quay.io/stefanprodan/flagger-loadtester`
|
||||
`image.pullPolicy` | Image pull policy | `IfNotPresent`
|
||||
`image.tag` | Image tag | `<VERSION>`
|
||||
`replicaCount` | desired number of pods | `1`
|
||||
`replicaCount` | Desired number of pods | `1`
|
||||
`serviceAccountName` | Kubernetes service account name | `none`
|
||||
`resources.requests.cpu` | CPU requests | `10m`
|
||||
`resources.requests.memory` | memory requests | `64Mi`
|
||||
`resources.requests.memory` | Memory requests | `64Mi`
|
||||
`tolerations` | List of node taints to tolerate | `[]`
|
||||
`affinity` | node/pod affinities | `node`
|
||||
`nodeSelector` | node labels for pod assignment | `{}`
|
||||
`service.type` | type of service | `ClusterIP`
|
||||
`nodeSelector` | Node labels for pod assignment | `{}`
|
||||
`service.type` | Type of service | `ClusterIP`
|
||||
`service.port` | ClusterIP port | `80`
|
||||
`cmd.timeout` | Command execution timeout | `1h`
|
||||
`logLevel` | Log level can be debug, info, warning, error or panic | `info`
|
||||
|
||||
@@ -19,6 +19,9 @@ spec:
|
||||
annotations:
|
||||
appmesh.k8s.aws/ports: "444"
|
||||
spec:
|
||||
{{- if .Values.serviceAccountName }}
|
||||
serviceAccountName: {{ .Values.serviceAccountName }}
|
||||
{{- end }}
|
||||
containers:
|
||||
- name: {{ .Chart.Name }}
|
||||
image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
|
||||
|
||||
@@ -2,7 +2,7 @@ replicaCount: 1
|
||||
|
||||
image:
|
||||
repository: weaveworks/flagger-loadtester
|
||||
tag: 0.3.0
|
||||
tag: 0.6.1
|
||||
pullPolicy: IfNotPresent
|
||||
|
||||
logLevel: info
|
||||
@@ -27,6 +27,8 @@ tolerations: []
|
||||
|
||||
affinity: {}
|
||||
|
||||
serviceAccountName: ""
|
||||
|
||||
# App Mesh virtual node settings
|
||||
meshName: ""
|
||||
#backends:
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
apiVersion: v1
|
||||
version: 2.0.1
|
||||
appVersion: 1.4.0
|
||||
version: 2.3.0
|
||||
appVersion: 1.7.0
|
||||
name: podinfo
|
||||
engine: gotpl
|
||||
description: Flagger canary deployment demo chart
|
||||
|
||||
@@ -26,6 +26,9 @@ spec:
|
||||
hosts:
|
||||
- {{ .Values.canary.istioIngress.host }}
|
||||
{{- end }}
|
||||
trafficPolicy:
|
||||
tls:
|
||||
mode: {{ .Values.canary.istioTLS }}
|
||||
canaryAnalysis:
|
||||
interval: {{ .Values.canary.analysis.interval }}
|
||||
threshold: {{ .Values.canary.analysis.threshold }}
|
||||
@@ -38,8 +41,17 @@ spec:
|
||||
- name: request-duration
|
||||
threshold: {{ .Values.canary.thresholds.latency }}
|
||||
interval: 1m
|
||||
{{- if .Values.canary.loadtest.enabled }}
|
||||
webhooks:
|
||||
{{- if .Values.canary.helmtest.enabled }}
|
||||
- name: "helm test"
|
||||
type: pre-rollout
|
||||
url: {{ .Values.canary.helmtest.url }}
|
||||
timeout: 3m
|
||||
metadata:
|
||||
type: "helm"
|
||||
cmd: "test {{ .Release.Name }} --cleanup"
|
||||
{{- end }}
|
||||
{{- if .Values.canary.loadtest.enabled }}
|
||||
- name: load-test-get
|
||||
url: {{ .Values.canary.loadtest.url }}
|
||||
timeout: 5s
|
||||
@@ -50,5 +62,5 @@ spec:
|
||||
timeout: 5s
|
||||
metadata:
|
||||
cmd: "hey -z 1m -q 5 -c 2 -m POST -d '{\"test\": true}' http://{{ template "podinfo.fullname" . }}.{{ .Release.Namespace }}:{{ .Values.service.port }}/echo"
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
@@ -1,7 +1,7 @@
|
||||
# Default values for podinfo.
|
||||
image:
|
||||
repository: quay.io/stefanprodan/podinfo
|
||||
tag: 1.4.0
|
||||
tag: 1.7.0
|
||||
pullPolicy: IfNotPresent
|
||||
|
||||
service:
|
||||
@@ -17,6 +17,8 @@ hpa:
|
||||
|
||||
canary:
|
||||
enabled: true
|
||||
# Istio traffic policy tls can be DISABLE or ISTIO_MUTUAL
|
||||
istioTLS: DISABLE
|
||||
istioIngress:
|
||||
enabled: false
|
||||
# Istio ingress gateway name
|
||||
@@ -45,6 +47,10 @@ canary:
|
||||
enabled: false
|
||||
# load tester address
|
||||
url: http://flagger-loadtester.test/
|
||||
helmtest:
|
||||
enabled: false
|
||||
# helm tester address
|
||||
url: http://flagger-helmtester.kube-system/
|
||||
|
||||
resources:
|
||||
limits:
|
||||
|
||||
@@ -1,12 +1,15 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"flag"
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
_ "github.com/stefanprodan/klog"
|
||||
"github.com/Masterminds/semver"
|
||||
clientset "github.com/weaveworks/flagger/pkg/client/clientset/versioned"
|
||||
informers "github.com/weaveworks/flagger/pkg/client/informers/externalversions"
|
||||
"github.com/weaveworks/flagger/pkg/controller"
|
||||
@@ -18,51 +21,69 @@ import (
|
||||
"github.com/weaveworks/flagger/pkg/signals"
|
||||
"github.com/weaveworks/flagger/pkg/version"
|
||||
"go.uber.org/zap"
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
_ "k8s.io/client-go/plugin/pkg/client/auth/gcp"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
"k8s.io/client-go/tools/leaderelection"
|
||||
"k8s.io/client-go/tools/leaderelection/resourcelock"
|
||||
"k8s.io/client-go/transport"
|
||||
_ "k8s.io/code-generator/cmd/client-gen/generators"
|
||||
)
|
||||
|
||||
var (
|
||||
masterURL string
|
||||
kubeconfig string
|
||||
metricsServer string
|
||||
controlLoopInterval time.Duration
|
||||
logLevel string
|
||||
port string
|
||||
slackURL string
|
||||
slackUser string
|
||||
slackChannel string
|
||||
threadiness int
|
||||
zapReplaceGlobals bool
|
||||
zapEncoding string
|
||||
namespace string
|
||||
meshProvider string
|
||||
selectorLabels string
|
||||
masterURL string
|
||||
kubeconfig string
|
||||
metricsServer string
|
||||
controlLoopInterval time.Duration
|
||||
logLevel string
|
||||
port string
|
||||
msteamsURL string
|
||||
slackURL string
|
||||
slackUser string
|
||||
slackChannel string
|
||||
threadiness int
|
||||
zapReplaceGlobals bool
|
||||
zapEncoding string
|
||||
namespace string
|
||||
meshProvider string
|
||||
selectorLabels string
|
||||
enableLeaderElection bool
|
||||
leaderElectionNamespace string
|
||||
ver bool
|
||||
)
|
||||
|
||||
func init() {
|
||||
flag.StringVar(&kubeconfig, "kubeconfig", "", "Path to a kubeconfig. Only required if out-of-cluster.")
|
||||
flag.StringVar(&masterURL, "master", "", "The address of the Kubernetes API server. Overrides any value in kubeconfig. Only required if out-of-cluster.")
|
||||
flag.StringVar(&metricsServer, "metrics-server", "http://prometheus:9090", "Prometheus URL")
|
||||
flag.DurationVar(&controlLoopInterval, "control-loop-interval", 10*time.Second, "Kubernetes API sync interval")
|
||||
flag.StringVar(&metricsServer, "metrics-server", "http://prometheus:9090", "Prometheus URL.")
|
||||
flag.DurationVar(&controlLoopInterval, "control-loop-interval", 10*time.Second, "Kubernetes API sync interval.")
|
||||
flag.StringVar(&logLevel, "log-level", "debug", "Log level can be: debug, info, warning, error.")
|
||||
flag.StringVar(&port, "port", "8080", "Port to listen on.")
|
||||
flag.StringVar(&slackURL, "slack-url", "", "Slack hook URL.")
|
||||
flag.StringVar(&slackUser, "slack-user", "flagger", "Slack user name.")
|
||||
flag.StringVar(&slackChannel, "slack-channel", "", "Slack channel.")
|
||||
flag.StringVar(&msteamsURL, "msteams-url", "", "MS Teams incoming webhook URL.")
|
||||
flag.IntVar(&threadiness, "threadiness", 2, "Worker concurrency.")
|
||||
flag.BoolVar(&zapReplaceGlobals, "zap-replace-globals", false, "Whether to change the logging level of the global zap logger.")
|
||||
flag.StringVar(&zapEncoding, "zap-encoding", "json", "Zap logger encoding.")
|
||||
flag.StringVar(&namespace, "namespace", "", "Namespace that flagger would watch canary object")
|
||||
flag.StringVar(&meshProvider, "mesh-provider", "istio", "Service mesh provider, can be istio or appmesh")
|
||||
flag.StringVar(&selectorLabels, "selector-labels", "app,name,app.kubernetes.io/name", "List of pod labels that Flagger uses to create pod selectors")
|
||||
flag.StringVar(&namespace, "namespace", "", "Namespace that flagger would watch canary object.")
|
||||
flag.StringVar(&meshProvider, "mesh-provider", "istio", "Service mesh provider, can be istio, linkerd, appmesh, supergloo, nginx or smi.")
|
||||
flag.StringVar(&selectorLabels, "selector-labels", "app,name,app.kubernetes.io/name", "List of pod labels that Flagger uses to create pod selectors.")
|
||||
flag.BoolVar(&enableLeaderElection, "enable-leader-election", false, "Enable leader election.")
|
||||
flag.StringVar(&leaderElectionNamespace, "leader-election-namespace", "kube-system", "Namespace used to create the leader election config map.")
|
||||
flag.BoolVar(&ver, "version", false, "Print version")
|
||||
}
|
||||
|
||||
func main() {
|
||||
flag.Parse()
|
||||
|
||||
if ver {
|
||||
fmt.Println("Flagger version", version.VERSION, "revision ", version.REVISION)
|
||||
os.Exit(0)
|
||||
}
|
||||
|
||||
logger, err := logger.NewLoggerWithEncoding(logLevel, zapEncoding)
|
||||
if err != nil {
|
||||
log.Fatalf("Error creating logger: %v", err)
|
||||
@@ -87,12 +108,12 @@ func main() {
|
||||
|
||||
meshClient, err := clientset.NewForConfig(cfg)
|
||||
if err != nil {
|
||||
logger.Fatalf("Error building istio clientset: %v", err)
|
||||
logger.Fatalf("Error building mesh clientset: %v", err)
|
||||
}
|
||||
|
||||
flaggerClient, err := clientset.NewForConfig(cfg)
|
||||
if err != nil {
|
||||
logger.Fatalf("Error building example clientset: %s", err.Error())
|
||||
logger.Fatalf("Error building flagger clientset: %s", err.Error())
|
||||
}
|
||||
|
||||
flaggerInformerFactory := informers.NewSharedInformerFactoryWithOptions(flaggerClient, time.Second*30, informers.WithNamespace(namespace))
|
||||
@@ -106,6 +127,26 @@ func main() {
|
||||
logger.Fatalf("Error calling Kubernetes API: %v", err)
|
||||
}
|
||||
|
||||
k8sVersionConstraint := "^1.11.0"
|
||||
|
||||
// We append -alpha.1 to the end of our version constraint so that prebuilds of later versions
|
||||
// are considered valid for our purposes, as well as some managed solutions like EKS where they provide
|
||||
// a version like `v1.12.6-eks-d69f1b`. It doesn't matter what the prelease value is here, just that it
|
||||
// exists in our constraint.
|
||||
semverConstraint, err := semver.NewConstraint(k8sVersionConstraint + "-alpha.1")
|
||||
if err != nil {
|
||||
logger.Fatalf("Error parsing kubernetes version constraint: %v", err)
|
||||
}
|
||||
|
||||
k8sSemver, err := semver.NewVersion(ver.GitVersion)
|
||||
if err != nil {
|
||||
logger.Fatalf("Error parsing kubernetes version as a semantic version: %v", err)
|
||||
}
|
||||
|
||||
if !semverConstraint.Check(k8sSemver) {
|
||||
logger.Fatalf("Unsupported version of kubernetes detected. Expected %s, got %v", k8sVersionConstraint, ver)
|
||||
}
|
||||
|
||||
labels := strings.Split(selectorLabels, ",")
|
||||
if len(labels) < 1 {
|
||||
logger.Fatalf("At least one selector label is required")
|
||||
@@ -116,22 +157,20 @@ func main() {
|
||||
logger.Infof("Watching namespace %s", namespace)
|
||||
}
|
||||
|
||||
ok, err := metrics.CheckMetricsServer(metricsServer)
|
||||
observerFactory, err := metrics.NewFactory(metricsServer, meshProvider, 5*time.Second)
|
||||
if err != nil {
|
||||
logger.Fatalf("Error building prometheus client: %s", err.Error())
|
||||
}
|
||||
|
||||
ok, err := observerFactory.Client.IsOnline()
|
||||
if ok {
|
||||
logger.Infof("Connected to metrics server %s", metricsServer)
|
||||
} else {
|
||||
logger.Errorf("Metrics server %s unreachable %v", metricsServer, err)
|
||||
}
|
||||
|
||||
var slack *notifier.Slack
|
||||
if slackURL != "" {
|
||||
slack, err = notifier.NewSlack(slackURL, slackUser, slackChannel)
|
||||
if err != nil {
|
||||
logger.Errorf("Notifier %v", err)
|
||||
} else {
|
||||
logger.Infof("Slack notifications enabled for channel %s", slack.Channel)
|
||||
}
|
||||
}
|
||||
// setup Slack or MS Teams notifications
|
||||
notifierClient := initNotifier(logger)
|
||||
|
||||
// start HTTP server
|
||||
go server.ListenAndServe(port, 3*time.Second, logger, stopCh)
|
||||
@@ -144,10 +183,10 @@ func main() {
|
||||
flaggerClient,
|
||||
canaryInformer,
|
||||
controlLoopInterval,
|
||||
metricsServer,
|
||||
logger,
|
||||
slack,
|
||||
notifierClient,
|
||||
routerFactory,
|
||||
observerFactory,
|
||||
meshProvider,
|
||||
version.VERSION,
|
||||
labels,
|
||||
@@ -164,12 +203,102 @@ func main() {
|
||||
}
|
||||
}
|
||||
|
||||
// start controller
|
||||
go func(ctrl *controller.Controller) {
|
||||
if err := ctrl.Run(threadiness, stopCh); err != nil {
|
||||
// leader election context
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
// prevents new requests when leadership is lost
|
||||
cfg.Wrap(transport.ContextCanceller(ctx, fmt.Errorf("the leader is shutting down")))
|
||||
|
||||
// cancel leader election context on shutdown signals
|
||||
go func() {
|
||||
<-stopCh
|
||||
cancel()
|
||||
}()
|
||||
|
||||
// wrap controller run
|
||||
runController := func() {
|
||||
if err := c.Run(threadiness, stopCh); err != nil {
|
||||
logger.Fatalf("Error running controller: %v", err)
|
||||
}
|
||||
}(c)
|
||||
}
|
||||
|
||||
<-stopCh
|
||||
// run controller when this instance wins the leader election
|
||||
if enableLeaderElection {
|
||||
ns := leaderElectionNamespace
|
||||
if namespace != "" {
|
||||
ns = namespace
|
||||
}
|
||||
startLeaderElection(ctx, runController, ns, kubeClient, logger)
|
||||
} else {
|
||||
runController()
|
||||
}
|
||||
}
|
||||
|
||||
func startLeaderElection(ctx context.Context, run func(), ns string, kubeClient kubernetes.Interface, logger *zap.SugaredLogger) {
|
||||
configMapName := "flagger-leader-election"
|
||||
id, err := os.Hostname()
|
||||
if err != nil {
|
||||
logger.Fatalf("Error running controller: %v", err)
|
||||
}
|
||||
id = id + "_" + string(uuid.NewUUID())
|
||||
|
||||
lock, err := resourcelock.New(
|
||||
resourcelock.ConfigMapsResourceLock,
|
||||
ns,
|
||||
configMapName,
|
||||
kubeClient.CoreV1(),
|
||||
kubeClient.CoordinationV1(),
|
||||
resourcelock.ResourceLockConfig{
|
||||
Identity: id,
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
logger.Fatalf("Error running controller: %v", err)
|
||||
}
|
||||
|
||||
logger.Infof("Starting leader election id: %s configmap: %s namespace: %s", id, configMapName, ns)
|
||||
leaderelection.RunOrDie(ctx, leaderelection.LeaderElectionConfig{
|
||||
Lock: lock,
|
||||
ReleaseOnCancel: true,
|
||||
LeaseDuration: 60 * time.Second,
|
||||
RenewDeadline: 15 * time.Second,
|
||||
RetryPeriod: 5 * time.Second,
|
||||
Callbacks: leaderelection.LeaderCallbacks{
|
||||
OnStartedLeading: func(ctx context.Context) {
|
||||
logger.Info("Acting as elected leader")
|
||||
run()
|
||||
},
|
||||
OnStoppedLeading: func() {
|
||||
logger.Infof("Leadership lost")
|
||||
os.Exit(1)
|
||||
},
|
||||
OnNewLeader: func(identity string) {
|
||||
if identity != id {
|
||||
logger.Infof("Another instance has been elected as leader: %v", identity)
|
||||
}
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func initNotifier(logger *zap.SugaredLogger) (client notifier.Interface) {
|
||||
provider := "slack"
|
||||
notifierURL := slackURL
|
||||
if msteamsURL != "" {
|
||||
provider = "msteams"
|
||||
notifierURL = msteamsURL
|
||||
}
|
||||
notifierFactory := notifier.NewFactory(notifierURL, slackUser, slackChannel)
|
||||
|
||||
if notifierURL != "" {
|
||||
var err error
|
||||
client, err = notifierFactory.Notifier(provider)
|
||||
if err != nil {
|
||||
logger.Errorf("Notifier %v", err)
|
||||
} else {
|
||||
logger.Infof("Notifications enabled for %s", notifierURL[0:30])
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
@@ -10,7 +10,7 @@ import (
|
||||
"time"
|
||||
)
|
||||
|
||||
var VERSION = "0.3.0"
|
||||
var VERSION = "0.6.1"
|
||||
var (
|
||||
logLevel string
|
||||
port string
|
||||
@@ -47,5 +47,7 @@ func main() {
|
||||
go taskRunner.Start(100*time.Millisecond, stopCh)
|
||||
|
||||
logger.Infof("Starting load tester v%s API on port %s", VERSION, port)
|
||||
loadtester.ListenAndServe(port, time.Minute, logger, taskRunner, stopCh)
|
||||
|
||||
gateStorage := loadtester.NewGateStorage("in-memory")
|
||||
loadtester.ListenAndServe(port, time.Minute, logger, taskRunner, gateStorage, stopCh)
|
||||
}
|
||||
|
||||
BIN
docs/diagrams/flagger-bluegreen-steps.png
Normal file
|
After Width: | Height: | Size: 31 KiB |
|
Before Width: | Height: | Size: 46 KiB After Width: | Height: | Size: 49 KiB |
|
Before Width: | Height: | Size: 210 KiB After Width: | Height: | Size: 36 KiB |
BIN
docs/diagrams/flagger-gloo-overview.png
Normal file
|
After Width: | Height: | Size: 41 KiB |
BIN
docs/diagrams/flagger-linkerd-traffic-split.png
Normal file
|
After Width: | Height: | Size: 38 KiB |
BIN
docs/diagrams/flagger-nginx-linkerd.png
Normal file
|
After Width: | Height: | Size: 39 KiB |
|
Before Width: | Height: | Size: 46 KiB After Width: | Height: | Size: 49 KiB |
@@ -5,13 +5,12 @@ description: Flagger is a progressive delivery Kubernetes operator
|
||||
# Introduction
|
||||
|
||||
[Flagger](https://github.com/weaveworks/flagger) is a **Kubernetes** operator that automates the promotion of canary
|
||||
deployments using **Istio**, **App Mesh** or **NGINX** routing for traffic shifting and **Prometheus** metrics for canary analysis.
|
||||
The canary analysis can be extended with webhooks for running
|
||||
system integration/acceptance tests, load tests, or any other custom validation.
|
||||
deployments using **Istio**, **Linkerd**, **App Mesh**, **NGINX** or **Gloo** routing for traffic shifting and **Prometheus** metrics for canary analysis.
|
||||
The canary analysis can be extended with webhooks for running system integration/acceptance tests, load tests, or any other custom validation.
|
||||
|
||||
Flagger implements a control loop that gradually shifts traffic to the canary while measuring key performance
|
||||
indicators like HTTP requests success rate, requests average duration and pods health.
|
||||
Based on analysis of the **KPIs** a canary is promoted or aborted, and the analysis result is published to **Slack**.
|
||||
Based on analysis of the **KPIs** a canary is promoted or aborted, and the analysis result is published to **Slack** or **MS Teams**.
|
||||
|
||||

|
||||
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
|
||||
* [Introduction](README.md)
|
||||
* [How it works](how-it-works.md)
|
||||
* [FAQ](faq.md)
|
||||
|
||||
## Install
|
||||
|
||||
@@ -14,12 +15,16 @@
|
||||
|
||||
* [Istio Canary Deployments](usage/progressive-delivery.md)
|
||||
* [Istio A/B Testing](usage/ab-testing.md)
|
||||
* [Linkerd Canary Deployments](usage/linkerd-progressive-delivery.md)
|
||||
* [App Mesh Canary Deployments](usage/appmesh-progressive-delivery.md)
|
||||
* [NGINX Canary Deployments](usage/nginx-progressive-delivery.md)
|
||||
* [Gloo Canary Deployments](usage/gloo-progressive-delivery.md)
|
||||
* [Blue/Green Deployments](usage/blue-green.md)
|
||||
* [Monitoring](usage/monitoring.md)
|
||||
* [Alerting](usage/alerting.md)
|
||||
|
||||
## Tutorials
|
||||
|
||||
* [SMI Istio Canary Deployments](tutorials/flagger-smi-istio.md)
|
||||
* [Canaries with Helm charts and GitOps](tutorials/canary-helm-gitops.md)
|
||||
* [Zero downtime deployments](tutorials/zero-downtime-deployments.md)
|
||||
|
||||
@@ -1,21 +1,398 @@
|
||||
# Frequently asked questions
|
||||
|
||||
**Can Flagger be part of my integration tests?**
|
||||
> Yes, Flagger supports webhooks to do integration testing.
|
||||
### Deployment Strategies
|
||||
|
||||
**What if I only want to target beta testers?**
|
||||
> That's a feature in Flagger, not in App Mesh. It's on the App Mesh roadmap.
|
||||
**Which deployment strategies are supported by Flagger?**
|
||||
|
||||
**When do I use A/B testing when Canary?**
|
||||
> One advantage of using A/B testing is that each version remains separated and routes aren't mixed.
|
||||
>
|
||||
> Using a Canary deployment can lead to behaviour like this one observed by a
|
||||
> user:
|
||||
>
|
||||
> [..] during a canary deployment of our nodejs app, the version that is being served <50% traffic reports mime type mismatch errors in the browser (js as "text/html")
|
||||
> When the deployment Passes/ Fails (doesn't really matter) the version that stays alive works as expected. If anyone has any tips or direction I would greatly appreciate it. Even if its as simple as I'm looking in the wrong place. Thanks in advance!
|
||||
>
|
||||
> The issue was that we were not maintaining session affinity while serving files for our frontend. Which resulted in any redirects or refreshes occasionally returning a mismatched app.*.js file (generated from vue)
|
||||
>
|
||||
> Read up on [A/B testing](https://docs.flagger.app/usage/ab-testing).
|
||||
Flagger can run automated application analysis, promotion and rollback for the following deployment strategies:
|
||||
* Canary (progressive traffic shifting)
|
||||
* Istio, Linkerd, App Mesh, NGINX, Gloo
|
||||
* A/B Testing (HTTP headers and cookies traffic routing)
|
||||
* Istio, NGINX
|
||||
* Blue/Green (traffic switch)
|
||||
* Kubernetes CNI
|
||||
|
||||
For Canary deployments and A/B testing you'll need a Layer 7 traffic management solution like a service mesh or an ingress controller.
|
||||
For Blue/Green deployments no service mesh or ingress controller is required.
|
||||
|
||||
**When should I use A/B testing instead of progressive traffic shifting?**
|
||||
|
||||
For frontend applications that require session affinity you should use HTTP headers or cookies match conditions
|
||||
to ensure a set of users will stay on the same version for the whole duration of the canary analysis.
|
||||
A/B testing is supported by Istio and NGINX only.
|
||||
|
||||
Istio example:
|
||||
|
||||
```yaml
|
||||
canaryAnalysis:
|
||||
# schedule interval (default 60s)
|
||||
interval: 1m
|
||||
# total number of iterations
|
||||
iterations: 10
|
||||
# max number of failed iterations before rollback
|
||||
threshold: 2
|
||||
# canary match condition
|
||||
match:
|
||||
- headers:
|
||||
x-canary:
|
||||
regex: ".*insider.*"
|
||||
- headers:
|
||||
cookie:
|
||||
regex: "^(.*?;)?(canary=always)(;.*)?$"
|
||||
```
|
||||
|
||||
NGINX example:
|
||||
|
||||
```yaml
|
||||
canaryAnalysis:
|
||||
interval: 1m
|
||||
threshold: 10
|
||||
iterations: 2
|
||||
match:
|
||||
- headers:
|
||||
x-canary:
|
||||
exact: "insider"
|
||||
- headers:
|
||||
cookie:
|
||||
exact: "canary"
|
||||
```
|
||||
|
||||
Note that the NGINX ingress controller supports only exact matching for a single header and the cookie value is set to `always`.
|
||||
|
||||
The above configurations will route users with the x-canary header or canary cookie to the canary instance during analysis:
|
||||
|
||||
```bash
|
||||
curl -H 'X-Canary: insider' http://app.example.com
|
||||
curl -b 'canary=always' http://app.example.com
|
||||
```
|
||||
|
||||
**Can I use Flagger to manage applications that live outside of a service mesh?**
|
||||
|
||||
For applications that are not deployed on a service mesh, Flagger can orchestrate Blue/Green style deployments
|
||||
with Kubernetes L4 networking.
|
||||
|
||||
Blue/Green example:
|
||||
|
||||
```yaml
|
||||
apiVersion: flagger.app/v1alpha3
|
||||
kind: Canary
|
||||
spec:
|
||||
provider: kubernetes
|
||||
canaryAnalysis:
|
||||
interval: 30s
|
||||
threshold: 2
|
||||
iterations: 10
|
||||
metrics:
|
||||
- name: request-success-rate
|
||||
threshold: 99
|
||||
interval: 1m
|
||||
- name: request-duration
|
||||
threshold: 500
|
||||
interval: 30s
|
||||
webhooks:
|
||||
- name: load-test
|
||||
url: http://flagger-loadtester.test/
|
||||
timeout: 5s
|
||||
metadata:
|
||||
type: cmd
|
||||
cmd: "hey -z 1m -q 10 -c 2 http://podinfo-canary.test:9898/"
|
||||
```
|
||||
|
||||
The above configuration will run an analysis for five minutes.
|
||||
Flagger starts the load test for the canary service (green version) and checks the Prometheus metrics every 30 seconds.
|
||||
If the analysis result is positive, Flagger will promote the canary (green version) to primary (blue version).
|
||||
|
||||
### Kubernetes services
|
||||
|
||||
**How is an application exposed inside the cluster?**
|
||||
|
||||
Assuming the app name is podinfo you can define a canary like:
|
||||
|
||||
```yaml
|
||||
apiVersion: flagger.app/v1alpha3
|
||||
kind: Canary
|
||||
metadata:
|
||||
name: podinfo
|
||||
namespace: test
|
||||
spec:
|
||||
targetRef:
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
name: podinfo
|
||||
service:
|
||||
# container port (required)
|
||||
port: 9898
|
||||
# port name can be http or grpc (default http)
|
||||
portName: http
|
||||
```
|
||||
|
||||
Based on the canary spec service, Flagger generates the following Kubernetes ClusterIP service:
|
||||
|
||||
* `<targetRef.name>.<namespace>.svc.cluster.local`
|
||||
selector `app=<name>-primary`
|
||||
* `<targetRef.name>-primary.<namespace>.svc.cluster.local`
|
||||
selector `app=<name>-primary`
|
||||
* `<targetRef.name>-canary.<namespace>.svc.cluster.local`
|
||||
selector `app=<name>`
|
||||
|
||||
This ensures that traffic coming from a namespace outside the mesh to `podinfo.test:9898`
|
||||
will be routed to the latest stable release of your app.
|
||||
|
||||
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: podinfo
|
||||
spec:
|
||||
type: ClusterIP
|
||||
selector:
|
||||
app: podinfo-primary
|
||||
ports:
|
||||
- name: http
|
||||
port: 9898
|
||||
protocol: TCP
|
||||
targetPort: http
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: podinfo-primary
|
||||
spec:
|
||||
type: ClusterIP
|
||||
selector:
|
||||
app: podinfo-primary
|
||||
ports:
|
||||
- name: http
|
||||
port: 9898
|
||||
protocol: TCP
|
||||
targetPort: http
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: podinfo-canary
|
||||
spec:
|
||||
type: ClusterIP
|
||||
selector:
|
||||
app: podinfo
|
||||
ports:
|
||||
- name: http
|
||||
port: 9898
|
||||
protocol: TCP
|
||||
targetPort: http
|
||||
```
|
||||
|
||||
The `podinfo-canary.test:9898` address is available only during the
|
||||
canary analysis and can be used for conformance testing or load testing.
|
||||
|
||||
### Multiple ports
|
||||
|
||||
**My application listens on multiple ports, how can I expose them inside the cluster?**
|
||||
|
||||
If port discovery is enabled, Flagger scans the deployment spec and extracts the containers
|
||||
ports excluding the port specified in the canary service and Envoy sidecar ports.
|
||||
`These ports will be used when generating the ClusterIP services.
|
||||
|
||||
For a deployment that exposes two ports:
|
||||
|
||||
```yaml
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
spec:
|
||||
template:
|
||||
metadata:
|
||||
annotations:
|
||||
prometheus.io/scrape: "true"
|
||||
prometheus.io/port: "9899"
|
||||
spec:
|
||||
containers:
|
||||
- name: app
|
||||
ports:
|
||||
- containerPort: 8080
|
||||
- containerPort: 9090
|
||||
```
|
||||
|
||||
You can enable port discovery so that Prometheus will be able to reach port `9090` over mTLS:
|
||||
|
||||
```yaml
|
||||
apiVersion: flagger.app/v1alpha3
|
||||
kind: Canary
|
||||
spec:
|
||||
service:
|
||||
# container port used for canary analysis
|
||||
port: 8080
|
||||
# port name can be http or grpc (default http)
|
||||
portName: http
|
||||
# add all the other container ports
|
||||
# to the ClusterIP services (default false)
|
||||
portDiscovery: true
|
||||
trafficPolicy:
|
||||
tls:
|
||||
mode: ISTIO_MUTUAL
|
||||
```
|
||||
|
||||
Both port `8080` and `9090` will be added to the ClusterIP services but the virtual service
|
||||
will point to the port specified in `spec.service.port`.
|
||||
|
||||
### Label selectors
|
||||
|
||||
**What labels selectors are supported by Flagger?**
|
||||
|
||||
The target deployment must have a single label selector in the format `app: <DEPLOYMENT-NAME>`:
|
||||
|
||||
```yaml
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: podinfo
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: podinfo
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: podinfo
|
||||
```
|
||||
|
||||
Besides `app` Flagger supports `name` and `app.kubernetes.io/name` selectors. If you use a different
|
||||
convention you can specify your label with the `-selector-labels` flag.
|
||||
|
||||
**Is pod affinity and anti affinity supported?**
|
||||
|
||||
For pod affinity to work you need to use a different label than the `app`, `name` or `app.kubernetes.io/name`.
|
||||
|
||||
Anti affinity example:
|
||||
|
||||
```yaml
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: podinfo
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: podinfo
|
||||
affinity: podinfo
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: podinfo
|
||||
affinity: podinfo
|
||||
spec:
|
||||
affinity:
|
||||
podAntiAffinity:
|
||||
preferredDuringSchedulingIgnoredDuringExecution:
|
||||
- weight: 100
|
||||
podAffinityTerm:
|
||||
labelSelector:
|
||||
matchLabels:
|
||||
affinity: podinfo
|
||||
topologyKey: kubernetes.io/hostname
|
||||
```
|
||||
|
||||
### Istio Ingress Gateway
|
||||
|
||||
**How can I expose multiple canaries on the same external domain?**
|
||||
|
||||
Assuming you have two apps, one that servers the main website and one that serves the REST API.
|
||||
For each app you can define a canary object as:
|
||||
|
||||
```yaml
|
||||
apiVersion: flagger.app/v1alpha3
|
||||
kind: Canary
|
||||
metadata:
|
||||
name: website
|
||||
spec:
|
||||
service:
|
||||
port: 8080
|
||||
gateways:
|
||||
- public-gateway.istio-system.svc.cluster.local
|
||||
hosts:
|
||||
- my-site.com
|
||||
match:
|
||||
- uri:
|
||||
prefix: /
|
||||
rewrite:
|
||||
uri: /
|
||||
---
|
||||
apiVersion: flagger.app/v1alpha3
|
||||
kind: Canary
|
||||
metadata:
|
||||
name: webapi
|
||||
spec:
|
||||
service:
|
||||
port: 8080
|
||||
gateways:
|
||||
- public-gateway.istio-system.svc.cluster.local
|
||||
hosts:
|
||||
- my-site.com
|
||||
match:
|
||||
- uri:
|
||||
prefix: /api
|
||||
rewrite:
|
||||
uri: /
|
||||
```
|
||||
|
||||
Based on the above configuration, Flagger will create two virtual services bounded to the same ingress gateway and external host.
|
||||
Istio Pilot will [merge](https://istio.io/help/ops/traffic-management/deploy-guidelines/#multiple-virtual-services-and-destination-rules-for-the-same-host)
|
||||
the two services and the website rule will be moved to the end of the list in the merged configuration.
|
||||
|
||||
Note that host merging only works if the canaries are bounded to a ingress gateway other than the `mesh` gateway.
|
||||
|
||||
### Istio Mutual TLS
|
||||
|
||||
**How can I enable mTLS for a canary?**
|
||||
|
||||
When deploying Istio with global mTLS enabled, you have to set the TLS mode to `ISTIO_MUTUAL`:
|
||||
|
||||
```yaml
|
||||
apiVersion: flagger.app/v1alpha3
|
||||
kind: Canary
|
||||
spec:
|
||||
service:
|
||||
trafficPolicy:
|
||||
tls:
|
||||
mode: ISTIO_MUTUAL
|
||||
```
|
||||
|
||||
If you run Istio in permissive mode you can disable TLS:
|
||||
|
||||
```yaml
|
||||
apiVersion: flagger.app/v1alpha3
|
||||
kind: Canary
|
||||
spec:
|
||||
service:
|
||||
trafficPolicy:
|
||||
tls:
|
||||
mode: DISABLE
|
||||
```
|
||||
|
||||
**If Flagger is outside of the mesh, how can it start the load test?**
|
||||
|
||||
In order for Flagger to be able to call the load tester service from outside the mesh, you need to disable mTLS on port 80:
|
||||
|
||||
```yaml
|
||||
apiVersion: networking.istio.io/v1alpha3
|
||||
kind: DestinationRule
|
||||
metadata:
|
||||
name: flagger-loadtester
|
||||
namespace: test
|
||||
spec:
|
||||
host: "flagger-loadtester.test.svc.cluster.local"
|
||||
trafficPolicy:
|
||||
tls:
|
||||
mode: DISABLE
|
||||
---
|
||||
apiVersion: authentication.istio.io/v1alpha1
|
||||
kind: Policy
|
||||
metadata:
|
||||
name: flagger-loadtester
|
||||
namespace: test
|
||||
spec:
|
||||
targets:
|
||||
- name: flagger-loadtester
|
||||
ports:
|
||||
- number: 80
|
||||
```
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
|
||||
[Flagger](https://github.com/weaveworks/flagger) takes a Kubernetes deployment and optionally
|
||||
a horizontal pod autoscaler \(HPA\) and creates a series of objects
|
||||
\(Kubernetes deployments, ClusterIP services and Istio or App Mesh virtual services\) to drive the canary analysis and promotion.
|
||||
\(Kubernetes deployments, ClusterIP services, virtual service, traffic split or ingress\) to drive the canary analysis and promotion.
|
||||
|
||||

|
||||
|
||||
@@ -17,6 +17,10 @@ metadata:
|
||||
name: podinfo
|
||||
namespace: test
|
||||
spec:
|
||||
# service mesh provider (optional)
|
||||
# can be: kubernetes, istio, linkerd, appmesh, nginx, gloo, supergloo
|
||||
# use the kubernetes provider for Blue/Green style deployments
|
||||
provider: istio
|
||||
# deployment reference
|
||||
targetRef:
|
||||
apiVersion: apps/v1
|
||||
@@ -38,7 +42,6 @@ spec:
|
||||
# Istio gateways (optional)
|
||||
gateways:
|
||||
- public-gateway.istio-system.svc.cluster.local
|
||||
- mesh
|
||||
# Istio virtual service host names (optional)
|
||||
hosts:
|
||||
- podinfo.example.com
|
||||
@@ -102,11 +105,59 @@ convention you can specify your label with the `-selector-labels` flag.
|
||||
The target deployment should expose a TCP port that will be used by Flagger to create the ClusterIP Service and
|
||||
the Istio Virtual Service. The container port from the target deployment should match the `service.port` value.
|
||||
|
||||
### Canary status
|
||||
|
||||
Get the current status of canary deployments cluster wide:
|
||||
|
||||
```bash
|
||||
kubectl get canaries --all-namespaces
|
||||
|
||||
NAMESPACE NAME STATUS WEIGHT LASTTRANSITIONTIME
|
||||
test podinfo Progressing 15 2019-06-30T14:05:07Z
|
||||
prod frontend Succeeded 0 2019-06-30T16:15:07Z
|
||||
prod backend Failed 0 2019-06-30T17:05:07Z
|
||||
```
|
||||
|
||||
The status condition reflects the last know state of the canary analysis:
|
||||
|
||||
```bash
|
||||
kubectl -n test get canary/podinfo -oyaml | awk '/status/,0'
|
||||
```
|
||||
|
||||
A successful rollout status:
|
||||
|
||||
```yaml
|
||||
status:
|
||||
canaryWeight: 0
|
||||
failedChecks: 0
|
||||
iterations: 0
|
||||
lastAppliedSpec: "14788816656920327485"
|
||||
lastPromotedSpec: "14788816656920327485"
|
||||
conditions:
|
||||
- lastTransitionTime: "2019-07-10T08:23:18Z"
|
||||
lastUpdateTime: "2019-07-10T08:23:18Z"
|
||||
message: Canary analysis completed successfully, promotion finished.
|
||||
reason: Succeeded
|
||||
status: "True"
|
||||
type: Promoted
|
||||
```
|
||||
|
||||
The `Promoted` status condition can have one of the following reasons:
|
||||
Initialized, Waiting, Progressing, Finalising, Succeeded or Failed.
|
||||
A failed canary will have the promoted status set to `false`,
|
||||
the reason to `failed` and the last applied spec will be different to the last promoted one.
|
||||
|
||||
Wait for a successful rollout:
|
||||
|
||||
```bash
|
||||
kubectl wait canary/podinfo --for=condition=promoted
|
||||
```
|
||||
|
||||
### Istio routing
|
||||
|
||||
Flagger creates an Istio Virtual Service based on the Canary service spec. The service configuration lets you expose
|
||||
an app inside or outside the mesh.
|
||||
You can also define HTTP match conditions, URI rewrite rules, CORS policies, timeout and retries.
|
||||
Flagger creates an Istio Virtual Service and Destination Rules based on the Canary service spec.
|
||||
The service configuration lets you expose an app inside or outside the mesh.
|
||||
You can also define traffic policies, HTTP match conditions, URI rewrite rules, CORS policies, timeout and retries.
|
||||
|
||||
The following spec exposes the `frontend` workload inside the mesh on `frontend.test.svc.cluster.local:9898`
|
||||
and outside the mesh on `frontend.example.com`. You'll have to specify an Istio ingress gateway for external hosts.
|
||||
@@ -130,6 +181,10 @@ spec:
|
||||
# Istio virtual service host names (optional)
|
||||
hosts:
|
||||
- frontend.example.com
|
||||
# Istio traffic policy (optional)
|
||||
trafficPolicy:
|
||||
loadBalancer:
|
||||
simple: LEAST_CONN
|
||||
# HTTP match conditions (optional)
|
||||
match:
|
||||
- uri:
|
||||
@@ -199,18 +254,40 @@ spec:
|
||||
route:
|
||||
- destination:
|
||||
host: podinfo-primary
|
||||
port:
|
||||
number: 9898
|
||||
weight: 100
|
||||
- destination:
|
||||
host: podinfo-canary
|
||||
port:
|
||||
number: 9898
|
||||
weight: 0
|
||||
```
|
||||
|
||||
Flagger keeps in sync the virtual service with the canary service spec. Any direct modification to the virtual
|
||||
service spec will be overwritten.
|
||||
For each destination in the virtual service a rule is generated:
|
||||
|
||||
```yaml
|
||||
apiVersion: networking.istio.io/v1alpha3
|
||||
kind: DestinationRule
|
||||
metadata:
|
||||
name: frontend-primary
|
||||
namespace: test
|
||||
spec:
|
||||
host: frontend-primary
|
||||
trafficPolicy:
|
||||
loadBalancer:
|
||||
simple: LEAST_CONN
|
||||
---
|
||||
apiVersion: networking.istio.io/v1alpha3
|
||||
kind: DestinationRule
|
||||
metadata:
|
||||
name: frontend-canary
|
||||
namespace: test
|
||||
spec:
|
||||
host: frontend-canary
|
||||
trafficPolicy:
|
||||
loadBalancer:
|
||||
simple: LEAST_CONN
|
||||
```
|
||||
|
||||
Flagger keeps in sync the virtual service and destination rules with the canary service spec.
|
||||
Any direct modification to the virtual service spec will be overwritten.
|
||||
|
||||
To expose a workload inside the mesh on `http://backend.test.svc.cluster.local:9898`,
|
||||
the service spec can contain only the container port:
|
||||
@@ -579,6 +656,8 @@ The canary analysis can be extended with webhooks. Flagger will call each webhoo
|
||||
determine from the response status code (HTTP 2xx) if the canary is failing or not.
|
||||
|
||||
There are three types of hooks:
|
||||
* Confirm-rollout hooks are executed before scaling up the canary deployment and ca be used for manual approval.
|
||||
The rollout is paused until the hook returns a successful HTTP status code.
|
||||
* Pre-rollout hooks are executed before routing traffic to canary.
|
||||
The canary advancement is paused if a pre-rollout hook fails and if the number of failures reach the
|
||||
threshold the canary will be rollback.
|
||||
@@ -592,13 +671,16 @@ Spec:
|
||||
```yaml
|
||||
canaryAnalysis:
|
||||
webhooks:
|
||||
- name: "start gate"
|
||||
type: confirm-rollout
|
||||
url: http://flagger-loadtester.test/gate/approve
|
||||
- name: "smoke test"
|
||||
type: pre-rollout
|
||||
url: http://migration-check.db/query
|
||||
timeout: 30s
|
||||
url: http://flagger-helmtester.kube-system/
|
||||
timeout: 3m
|
||||
metadata:
|
||||
key1: "val1"
|
||||
key2: "val2"
|
||||
type: "helm"
|
||||
cmd: "test podinfo --cleanup"
|
||||
- name: "load test"
|
||||
type: rollout
|
||||
url: http://flagger-loadtester.test/
|
||||
@@ -641,14 +723,14 @@ On a non-2xx response Flagger will include the response body (if any) in the fai
|
||||
For workloads that are not receiving constant traffic Flagger can be configured with a webhook,
|
||||
that when called, will start a load test for the target workload.
|
||||
If the target workload doesn't receive any traffic during the canary analysis,
|
||||
Flagger metric checks will fail with "no values found for metric istio_requests_total".
|
||||
Flagger metric checks will fail with "no values found for metric request-success-rate".
|
||||
|
||||
Flagger comes with a load testing service based on [rakyll/hey](https://github.com/rakyll/hey)
|
||||
that generates traffic during analysis when configured as a webhook.
|
||||
|
||||

|
||||
|
||||
First you need to deploy the load test runner in a namespace with Istio sidecar injection enabled:
|
||||
First you need to deploy the load test runner in a namespace with sidecar injection enabled:
|
||||
|
||||
```bash
|
||||
export REPO=https://raw.githubusercontent.com/weaveworks/flagger/master
|
||||
@@ -678,20 +760,20 @@ webhooks:
|
||||
timeout: 5s
|
||||
metadata:
|
||||
type: cmd
|
||||
cmd: "hey -z 1m -q 10 -c 2 http://podinfo.test:9898/"
|
||||
cmd: "hey -z 1m -q 10 -c 2 http://podinfo-canary.test:9898/"
|
||||
- name: load-test-post
|
||||
url: http://flagger-loadtester.test/
|
||||
timeout: 5s
|
||||
metadata:
|
||||
type: cmd
|
||||
cmd: "hey -z 1m -q 10 -c 2 -m POST -d '{test: 2}' http://podinfo.test:9898/echo"
|
||||
cmd: "hey -z 1m -q 10 -c 2 -m POST -d '{test: 2}' http://podinfo-canary.test:9898/echo"
|
||||
```
|
||||
|
||||
When the canary analysis starts, Flagger will call the webhooks and the load tester will run the `hey` commands
|
||||
in the background, if they are not already running. This will ensure that during the
|
||||
analysis, the `podinfo.test` virtual service will receive a steady stream of GET and POST requests.
|
||||
analysis, the `podinfo-canary.test` service will receive a steady stream of GET and POST requests.
|
||||
|
||||
If your workload is exposed outside the mesh with the Istio Gateway and TLS you can point `hey` to the
|
||||
If your workload is exposed outside the mesh you can point `hey` to the
|
||||
public URL and use HTTP2.
|
||||
|
||||
```yaml
|
||||
@@ -704,11 +786,23 @@ webhooks:
|
||||
cmd: "hey -z 1m -q 10 -c 2 -h2 https://podinfo.example.com/"
|
||||
```
|
||||
|
||||
For gRPC services you can use [bojand/ghz](https://github.com/bojand/ghz) which is a similar tool to Hey but for gPRC:
|
||||
|
||||
```yaml
|
||||
webhooks:
|
||||
- name: grpc-load-test
|
||||
url: http://flagger-loadtester.test/
|
||||
timeout: 5s
|
||||
metadata:
|
||||
type: cmd
|
||||
cmd: "ghz -z 1m -q 10 -c 2 --insecure podinfo.test:9898"
|
||||
```
|
||||
|
||||
The load tester can run arbitrary commands as long as the binary is present in the container image.
|
||||
For example if you you want to replace `hey` with another CLI, you can create your own Docker image:
|
||||
|
||||
```dockerfile
|
||||
FROM quay.io/stefanprodan/flagger-loadtester:<VER>
|
||||
FROM weaveworks/flagger-loadtester:<VER>
|
||||
|
||||
RUN curl -Lo /usr/local/bin/my-cli https://github.com/user/repo/releases/download/ver/my-cli \
|
||||
&& chmod +x /usr/local/bin/my-cli
|
||||
@@ -742,3 +836,115 @@ webhooks:
|
||||
When the canary analysis starts, the load tester will initiate a [clone_and_start request](https://github.com/naver/ngrinder/wiki/REST-API-PerfTest)
|
||||
to the nGrinder server and start a new performance test. the load tester will periodically poll the nGrinder server
|
||||
for the status of the test, and prevent duplicate requests from being sent in subsequent analysis loops.
|
||||
|
||||
### Integration Testing
|
||||
|
||||
Flagger comes with a testing service that can run Helm tests or Bats tests when configured as a webhook.
|
||||
|
||||
Deploy the Helm test runner in the `kube-system` namespace using the `tiller` service account:
|
||||
|
||||
```bash
|
||||
helm repo add flagger https://flagger.app
|
||||
|
||||
helm upgrade -i flagger-helmtester flagger/loadtester \
|
||||
--namespace=kube-system \
|
||||
--set serviceAccountName=tiller
|
||||
```
|
||||
|
||||
When deployed the Helm tester API will be available at `http://flagger-helmtester.kube-system/`.
|
||||
|
||||
Now you can add pre-rollout webhooks to the canary analysis spec:
|
||||
|
||||
```yaml
|
||||
canaryAnalysis:
|
||||
webhooks:
|
||||
- name: "smoke test"
|
||||
type: pre-rollout
|
||||
url: http://flagger-helmtester.kube-system/
|
||||
timeout: 3m
|
||||
metadata:
|
||||
type: "helm"
|
||||
cmd: "test {{ .Release.Name }} --cleanup"
|
||||
```
|
||||
|
||||
When the canary analysis starts, Flagger will call the pre-rollout webhooks before routing traffic to the canary.
|
||||
If the helm test fails, Flagger will retry until the analysis threshold is reached and the canary is rolled back.
|
||||
|
||||
As an alternative to Helm you can use the [Bash Automated Testing System](https://github.com/bats-core/bats-core) to run your tests.
|
||||
|
||||
```yaml
|
||||
canaryAnalysis:
|
||||
webhooks:
|
||||
- name: "acceptance tests"
|
||||
type: pre-rollout
|
||||
url: http://flagger-batstester.default/
|
||||
timeout: 5m
|
||||
metadata:
|
||||
type: "bash"
|
||||
cmd: "bats /tests/acceptance.bats"
|
||||
```
|
||||
|
||||
Note that you should create a ConfigMap with your Bats tests and mount it inside the tester container.
|
||||
|
||||
### Manual Gating
|
||||
|
||||
For manual approval of a canary deployment you can use the `confirm-rollout` webhook.
|
||||
The confirmation hooks are executed before the pre-rollout hooks.
|
||||
Flagger will halt the canary traffic shifting and analysis until the confirm webhook returns HTTP status 200.
|
||||
|
||||
Manual gating with Flagger's tester:
|
||||
|
||||
```yaml
|
||||
canaryAnalysis:
|
||||
webhooks:
|
||||
- name: "gate"
|
||||
type: confirm-rollout
|
||||
url: http://flagger-loadtester.test/gate/halt
|
||||
```
|
||||
|
||||
The `/gate/halt` returns HTTP 403 thus blocking the rollout.
|
||||
|
||||
If you have notifications enabled, Flagger will post a message to Slack or MS Teams if a canary rollout is waiting for approval.
|
||||
|
||||
Change the URL to `/gate/approve` to start the canary analysis:
|
||||
|
||||
```yaml
|
||||
canaryAnalysis:
|
||||
webhooks:
|
||||
- name: "gate"
|
||||
type: confirm-rollout
|
||||
url: http://flagger-loadtester.test/gate/approve
|
||||
```
|
||||
|
||||
Manual gating can be driven with Flagger's tester API. Set the confirmation URL to `/gate/check`:
|
||||
|
||||
```yaml
|
||||
canaryAnalysis:
|
||||
webhooks:
|
||||
- name: "ask for confirmation"
|
||||
type: confirm-rollout
|
||||
url: http://flagger-loadtester.test/gate/check
|
||||
```
|
||||
|
||||
By default the gate is closed, you can start or resume the canary rollout with:
|
||||
|
||||
```bash
|
||||
kubectl -n test exec -it flagger-loadtester-xxxx-xxxx sh
|
||||
|
||||
curl -d '{"name": "podinfo","namespace":"test"}' http://localhost:8080/gate/open
|
||||
```
|
||||
|
||||
You can pause the rollout at any time with:
|
||||
|
||||
```bash
|
||||
curl -d '{"name": "podinfo","namespace":"test"}' http://localhost:8080/gate/close
|
||||
```
|
||||
|
||||
If a canary analysis is paused the status will change to waiting:
|
||||
|
||||
```bash
|
||||
kubectl get canary/podinfo
|
||||
|
||||
NAME STATUS WEIGHT
|
||||
podinfo Waiting 0
|
||||
```
|
||||
|
||||
@@ -133,11 +133,18 @@ Add Flagger Helm repository:
|
||||
helm repo add flagger https://flagger.app
|
||||
```
|
||||
|
||||
Install Flagger's Canary CRD:
|
||||
|
||||
```yaml
|
||||
kubectl apply -f https://raw.githubusercontent.com/weaveworks/flagger/master/artifacts/flagger/crd.yaml
|
||||
```
|
||||
|
||||
Deploy Flagger and Prometheus in the _**appmesh-system**_ namespace:
|
||||
|
||||
```bash
|
||||
helm upgrade -i flagger flagger/flagger \
|
||||
--namespace=appmesh-system \
|
||||
--set crd.create=false \
|
||||
--set meshProvider=appmesh \
|
||||
--set prometheus.install=true
|
||||
```
|
||||
@@ -150,6 +157,7 @@ You can enable **Slack** notifications with:
|
||||
```bash
|
||||
helm upgrade -i flagger flagger/flagger \
|
||||
--namespace=appmesh-system \
|
||||
--set crd.create=false \
|
||||
--set meshProvider=appmesh \
|
||||
--set metricsServer=http://prometheus.appmesh:9090 \
|
||||
--set slack.url=https://hooks.slack.com/services/YOUR/SLACK/WEBHOOK \
|
||||
@@ -163,7 +171,7 @@ Deploy Grafana in the _**appmesh-system**_ namespace:
|
||||
```bash
|
||||
helm upgrade -i flagger-grafana flagger/grafana \
|
||||
--namespace=appmesh-system \
|
||||
--set url=http://prometheus.appmesh-system:9090
|
||||
--set url=http://flagger-prometheus.appmesh-system:9090
|
||||
```
|
||||
|
||||
You can access Grafana using port forwarding:
|
||||
|
||||
@@ -333,10 +333,17 @@ The GKE Istio add-on does not include a Prometheus instance that scrapes the Ist
|
||||
Because Flagger uses the Istio HTTP metrics to run the canary analysis you have to deploy the following
|
||||
Prometheus configuration that's similar to the one that comes with the official Istio Helm chart.
|
||||
|
||||
```bash
|
||||
REPO=https://raw.githubusercontent.com/weaveworks/flagger/master
|
||||
Find the GKE Istio version with:
|
||||
|
||||
kubectl apply -f ${REPO}/artifacts/gke/istio-prometheus.yaml
|
||||
```bash
|
||||
kubectl -n istio-system get deploy istio-pilot -oyaml | grep image:
|
||||
```
|
||||
|
||||
Install Prometheus in istio-system namespace (replace `1.0.6-gke.3` with your version):
|
||||
|
||||
```bash
|
||||
kubectl -n istio-system apply -f \
|
||||
https://storage.googleapis.com/gke-release/istio/release/1.0.6-gke.3/patches/install-prometheus.yaml
|
||||
```
|
||||
|
||||
### Install Flagger and Grafana
|
||||
@@ -347,11 +354,18 @@ Add Flagger Helm repository:
|
||||
helm repo add flagger https://flagger.app
|
||||
```
|
||||
|
||||
Install Flagger's Canary CRD:
|
||||
|
||||
```yaml
|
||||
kubectl apply -f https://raw.githubusercontent.com/weaveworks/flagger/master/artifacts/flagger/crd.yaml
|
||||
```
|
||||
|
||||
Deploy Flagger in the `istio-system` namespace with Slack notifications enabled:
|
||||
|
||||
```bash
|
||||
helm upgrade -i flagger flagger/flagger \
|
||||
--namespace=istio-system \
|
||||
--set crd.create=false \
|
||||
--set metricsServer=http://prometheus.istio-system:9090 \
|
||||
--set slack.url=https://hooks.slack.com/services/YOUR/SLACK/WEBHOOK \
|
||||
--set slack.channel=general \
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
# Flagger install on Kubernetes
|
||||
|
||||
This guide walks you through setting up Flagger on a Kubernetes cluster.
|
||||
This guide walks you through setting up Flagger on a Kubernetes cluster with Helm or Kustomize.
|
||||
|
||||
### Prerequisites
|
||||
|
||||
@@ -9,18 +9,7 @@ Flagger requires a Kubernetes cluster **v1.11** or newer with the following admi
|
||||
* MutatingAdmissionWebhook
|
||||
* ValidatingAdmissionWebhook
|
||||
|
||||
Flagger depends on [Istio](https://istio.io/docs/setup/kubernetes/quick-start/) **v1.0.3** or newer
|
||||
with traffic management, telemetry and Prometheus enabled.
|
||||
|
||||
A minimal Istio installation should contain the following services:
|
||||
|
||||
* istio-pilot
|
||||
* istio-ingressgateway
|
||||
* istio-sidecar-injector
|
||||
* istio-telemetry
|
||||
* prometheus
|
||||
|
||||
### Install Flagger
|
||||
### Install Flagger with Helm
|
||||
|
||||
Add Flagger Helm repository:
|
||||
|
||||
@@ -28,26 +17,54 @@ Add Flagger Helm repository:
|
||||
helm repo add flagger https://flagger.app
|
||||
```
|
||||
|
||||
Deploy Flagger in the _**istio-system**_ namespace:
|
||||
Install Flagger's Canary CRD:
|
||||
|
||||
```yaml
|
||||
kubectl apply -f https://raw.githubusercontent.com/weaveworks/flagger/master/artifacts/flagger/crd.yaml
|
||||
```
|
||||
|
||||
Deploy Flagger for Istio:
|
||||
|
||||
```bash
|
||||
helm upgrade -i flagger flagger/flagger \
|
||||
--namespace=istio-system \
|
||||
--set metricsServer=http://prometheus.istio-system:9090
|
||||
--set crd.create=false \
|
||||
--set meshProvider=istio \
|
||||
--set metricsServer=http://prometheus:9090
|
||||
```
|
||||
|
||||
You can install Flagger in any namespace as long as it can talk to the Istio Prometheus service on port 9090.
|
||||
Deploy Flagger for Linkerd:
|
||||
|
||||
```bash
|
||||
helm upgrade -i flagger flagger/flagger \
|
||||
--namespace=linkerd \
|
||||
--set crd.create=false \
|
||||
--set meshProvider=linkerd \
|
||||
--set metricsServer=http://linkerd-prometheus:9090
|
||||
```
|
||||
|
||||
You can install Flagger in any namespace as long as it can talk to the Prometheus service on port 9090.
|
||||
|
||||
Enable **Slack** notifications:
|
||||
|
||||
```bash
|
||||
helm upgrade -i flagger flagger/flagger \
|
||||
--namespace=istio-system \
|
||||
--set crd.create=false \
|
||||
--set slack.url=https://hooks.slack.com/services/YOUR/SLACK/WEBHOOK \
|
||||
--set slack.channel=general \
|
||||
--set slack.user=flagger
|
||||
```
|
||||
|
||||
Enable **Microsoft Teams** notifications:
|
||||
|
||||
```bash
|
||||
helm upgrade -i flagger flagger/flagger \
|
||||
--namespace=istio-system \
|
||||
--set crd.create=false \
|
||||
--set msteams.url=https://outlook.office.com/webhook/YOUR/TEAMS/WEBHOOK
|
||||
```
|
||||
|
||||
If you don't have Tiller you can use the helm template command and apply the generated yaml with kubectl:
|
||||
|
||||
```bash
|
||||
@@ -81,7 +98,7 @@ If you want to remove all the objects created by Flagger you have delete the Can
|
||||
kubectl delete crd canaries.flagger.app
|
||||
```
|
||||
|
||||
### Install Grafana
|
||||
### Install Grafana with Helm
|
||||
|
||||
Flagger comes with a Grafana dashboard made for monitoring the canary analysis.
|
||||
|
||||
@@ -115,31 +132,109 @@ You can access Grafana using port forwarding:
|
||||
kubectl -n istio-system port-forward svc/flagger-grafana 3000:80
|
||||
```
|
||||
|
||||
### Install Load Tester
|
||||
### Install Flagger with Kustomize
|
||||
|
||||
Flagger comes with an optional load testing service that generates traffic
|
||||
during canary analysis when configured as a webhook.
|
||||
As an alternative to Helm, Flagger can be installed with Kustomize.
|
||||
|
||||
Deploy the load test runner with Helm:
|
||||
**Service mesh specific installers**
|
||||
|
||||
Install Flagger for Istio:
|
||||
|
||||
```bash
|
||||
helm upgrade -i flagger-loadtester flagger/loadtester \
|
||||
--namespace=test \
|
||||
--set cmd.timeout=1h
|
||||
kubectl apply -k github.com/weaveworks/flagger//kustomize/istio
|
||||
```
|
||||
|
||||
Deploy with kubectl:
|
||||
This deploys Flagger in the `istio-system` namespace and sets the metrics server URL to Istio's Prometheus instance.
|
||||
|
||||
Note that you'll need kubectl 1.14 to run the above the command or you can download the
|
||||
[kustomize binary](https://github.com/kubernetes-sigs/kustomize/releases) and run:
|
||||
|
||||
```bash
|
||||
helm fetch --untar --untardir . flagger/loadtester &&
|
||||
helm template loadtester \
|
||||
--name flagger-loadtester \
|
||||
--namespace=test
|
||||
> $HOME/flagger-loadtester.yaml
|
||||
|
||||
# apply
|
||||
kubectl apply -f $HOME/flagger-loadtester.yaml
|
||||
kustomize build github.com/weaveworks/flagger//kustomize/istio | kubectl apply -f -
|
||||
```
|
||||
|
||||
> **Note** that the load tester should be deployed in a namespace with Istio sidecar injection enabled.
|
||||
Install Flagger for Linkerd:
|
||||
|
||||
```bash
|
||||
kubectl apply -k github.com/weaveworks/flagger//kustomize/linkerd
|
||||
```
|
||||
|
||||
This deploys Flagger in the `linkerd` namespace and sets the metrics server URL to Linkerd's Prometheus instance.
|
||||
|
||||
If you want to install a specific Flagger release, add the version number to the URL:
|
||||
|
||||
```bash
|
||||
kubectl apply -k github.com/weaveworks/flagger//kustomize/linkerd?ref=0.18.0
|
||||
```
|
||||
|
||||
**Generic installer**
|
||||
|
||||
Install Flagger and Prometheus:
|
||||
|
||||
```bash
|
||||
kubectl apply -k github.com/weaveworks/flagger//kustomize/kubernetes
|
||||
```
|
||||
|
||||
This deploys Flagger and Prometheus in the `flagger-system` namespace,
|
||||
sets the metrics server URL to `http://flagger-prometheus.flagger-system:9090` and the mesh provider to `kubernetes`.
|
||||
|
||||
The Prometheus instance has a two hours data retention and is configured to scrape all pods in your cluster that
|
||||
have the `prometheus.io/scrape: "true"` annotation.
|
||||
|
||||
To target a different provider you can specify it in the canary custom resource:
|
||||
|
||||
```yaml
|
||||
apiVersion: flagger.app/v1alpha3
|
||||
kind: Canary
|
||||
metadata:
|
||||
name: app
|
||||
namespace: test
|
||||
spec:
|
||||
# can be: kubernetes, istio, linkerd, appmesh, nginx, gloo
|
||||
# use the kubernetes provider for Blue/Green style deployments
|
||||
provider: nginx
|
||||
```
|
||||
|
||||
**Customized installer**
|
||||
|
||||
Create a kustomization file using flagger as base:
|
||||
|
||||
```bash
|
||||
cat > kustomization.yaml <<EOF
|
||||
namespace: istio-system
|
||||
bases:
|
||||
- github.com/weaveworks/flagger/kustomize/base/flagger
|
||||
patchesStrategicMerge:
|
||||
- patch.yaml
|
||||
EOF
|
||||
```
|
||||
|
||||
Create a patch and enable Slack notifications by setting the slack channel and hook URL:
|
||||
|
||||
```bash
|
||||
cat > patch.yaml <<EOF
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: flagger
|
||||
spec:
|
||||
template:
|
||||
spec:
|
||||
containers:
|
||||
- name: flagger
|
||||
args:
|
||||
- -mesh-provider=istio
|
||||
- -metrics-server=http://prometheus.istio-system:9090
|
||||
- -slack-user=flagger
|
||||
- -slack-channel=alerts
|
||||
- -slack-url=https://hooks.slack.com/services/YOUR/SLACK/WEBHOOK
|
||||
EOF
|
||||
```
|
||||
|
||||
Install Flagger with Slack:
|
||||
|
||||
```bash
|
||||
kubectl apply -k .
|
||||
```
|
||||
|
||||
If you want to use MS Teams instead of Slack, replace `-slack-url` with `-msteams-url` and set the webhook address to `https://outlook.office.com/webhook/YOUR/TEAMS/WEBHOOK`.
|
||||
|
||||
@@ -115,11 +115,18 @@ Add Flagger Helm repository:
|
||||
helm repo add flagger https://flagger.app
|
||||
```
|
||||
|
||||
Install Flagger's Canary CRD:
|
||||
|
||||
```yaml
|
||||
kubectl apply -f https://raw.githubusercontent.com/weaveworks/flagger/master/artifacts/flagger/crd.yaml
|
||||
```
|
||||
|
||||
Deploy Flagger in the _**istio-system**_ namespace and set the service mesh provider to SuperGloo:
|
||||
|
||||
```bash
|
||||
helm upgrade -i flagger flagger/flagger \
|
||||
--namespace=istio-system \
|
||||
--set crd.create=false \
|
||||
--set metricsServer=http://prometheus.istio-system:9090 \
|
||||
--set meshProvider=supergloo:istio.supergloo-system
|
||||
```
|
||||
|
||||
@@ -18,7 +18,11 @@ and the canary configuration file.
|
||||
│ ├── canary.yaml
|
||||
│ ├── configmap.yaml
|
||||
│ ├── deployment.yaml
|
||||
│ └── hpa.yaml
|
||||
│ ├── hpa.yaml
|
||||
│ ├── service.yaml
|
||||
│ └── tests
|
||||
│ ├── test-config.yaml
|
||||
│ └── test-pod.yaml
|
||||
└── values.yaml
|
||||
```
|
||||
|
||||
@@ -47,7 +51,7 @@ helm upgrade -i frontend flagger/podinfo \
|
||||
--namespace test \
|
||||
--set nameOverride=frontend \
|
||||
--set backend=http://backend.test:9898/echo \
|
||||
--set canary.enabled=true \
|
||||
--set canary.loadtest.enabled=true \
|
||||
--set canary.istioIngress.enabled=true \
|
||||
--set canary.istioIngress.gateway=public-gateway.istio-system.svc.cluster.local \
|
||||
--set canary.istioIngress.host=frontend.istio.example.com
|
||||
@@ -87,7 +91,7 @@ Now let's install the `backend` release without exposing it outside the mesh:
|
||||
helm upgrade -i backend flagger/podinfo \
|
||||
--namespace test \
|
||||
--set nameOverride=backend \
|
||||
--set canary.enabled=true \
|
||||
--set canary.loadtest.enabled=true \
|
||||
--set canary.istioIngress.enabled=false
|
||||
```
|
||||
|
||||
@@ -118,17 +122,26 @@ helm upgrade -i flagger-loadtester flagger/loadtester \
|
||||
--namespace=test
|
||||
```
|
||||
|
||||
Enable the load tester and deploy a new `frontend` version:
|
||||
Install Flagger's helm test runner in the `kube-system` using `tiller` service account:
|
||||
|
||||
```bash
|
||||
helm upgrade -i flagger-helmtester flagger/loadtester \
|
||||
--namespace=kube-system \
|
||||
--set serviceAccountName=tiller
|
||||
```
|
||||
|
||||
Enable the load and helm tester and deploy a new `frontend` version:
|
||||
|
||||
```bash
|
||||
helm upgrade -i frontend flagger/podinfo/ \
|
||||
--namespace test \
|
||||
--reuse-values \
|
||||
--set canary.loadtest.enabled=true \
|
||||
--set image.tag=1.4.1
|
||||
--set canary.helmtest.enabled=true \
|
||||
--set image.tag=1.7.1
|
||||
```
|
||||
|
||||
Flagger detects that the deployment revision changed and starts the canary analysis along with the load test:
|
||||
Flagger detects that the deployment revision changed and starts the canary analysis:
|
||||
|
||||
```
|
||||
kubectl -n istio-system logs deployment/flagger -f | jq .msg
|
||||
@@ -136,6 +149,7 @@ kubectl -n istio-system logs deployment/flagger -f | jq .msg
|
||||
New revision detected! Scaling up frontend.test
|
||||
Halt advancement frontend.test waiting for rollout to finish: 0 of 2 updated replicas are available
|
||||
Starting canary analysis for frontend.test
|
||||
Pre-rollout check helm test passed
|
||||
Advance frontend.test canary weight 5
|
||||
Advance frontend.test canary weight 10
|
||||
Advance frontend.test canary weight 15
|
||||
@@ -163,7 +177,7 @@ Now trigger a canary deployment for the `backend` app, but this time you'll chan
|
||||
helm upgrade -i backend flagger/podinfo/ \
|
||||
--namespace test \
|
||||
--reuse-values \
|
||||
--set canary.loadtest.enabled=true \
|
||||
--set canary.helmtest.enabled=true \
|
||||
--set httpServer.timeout=25s
|
||||
```
|
||||
|
||||
@@ -253,7 +267,8 @@ Create a git repository with the following content:
|
||||
└── test
|
||||
├── backend.yaml
|
||||
├── frontend.yaml
|
||||
└── loadtester.yaml
|
||||
├── loadtester.yaml
|
||||
└── helmtester.yaml
|
||||
```
|
||||
|
||||
You can find the git source [here](https://github.com/stefanprodan/flagger/tree/master/artifacts/cluster).
|
||||
@@ -268,17 +283,17 @@ metadata:
|
||||
namespace: test
|
||||
annotations:
|
||||
flux.weave.works/automated: "true"
|
||||
flux.weave.works/tag.chart-image: semver:~1.4
|
||||
flux.weave.works/tag.chart-image: semver:~1.7
|
||||
spec:
|
||||
releaseName: frontend
|
||||
chart:
|
||||
repository: https://stefanprodan.github.io/flagger/
|
||||
name: podinfo
|
||||
version: 2.0.0
|
||||
version: 2.3.0
|
||||
values:
|
||||
image:
|
||||
repository: quay.io/stefanprodan/podinfo
|
||||
tag: 1.4.0
|
||||
tag: 1.7.0
|
||||
backend: http://backend-podinfo:9898/echo
|
||||
canary:
|
||||
enabled: true
|
||||
@@ -288,13 +303,15 @@ spec:
|
||||
host: frontend.istio.example.com
|
||||
loadtest:
|
||||
enabled: true
|
||||
helmtest:
|
||||
enabled: true
|
||||
```
|
||||
|
||||
In the `chart` section I've defined the release source by specifying the Helm repository (hosted on GitHub Pages), chart name and version.
|
||||
In the `values` section I've overwritten the defaults set in values.yaml.
|
||||
|
||||
With the `flux.weave.works` annotations I instruct Flux to automate this release.
|
||||
When an image tag in the sem ver range of `1.4.0 - 1.4.99` is pushed to Quay,
|
||||
When an image tag in the sem ver range of `1.7.0 - 1.7.99` is pushed to Quay,
|
||||
Flux will upgrade the Helm release and from there Flagger will pick up the change and start a canary deployment.
|
||||
|
||||
Install [Weave Flux](https://github.com/weaveworks/flux) and its Helm Operator by specifying your Git repo URL:
|
||||
@@ -304,6 +321,7 @@ helm repo add weaveworks https://weaveworks.github.io/flux
|
||||
|
||||
helm install --name flux \
|
||||
--set helmOperator.create=true \
|
||||
--set helmOperator.createCRD=true \
|
||||
--set git.url=git@github.com:<USERNAME>/<REPOSITORY> \
|
||||
--namespace flux \
|
||||
weaveworks/flux
|
||||
@@ -326,28 +344,30 @@ launch the `frontend` and `backend` apps.
|
||||
|
||||
A CI/CD pipeline for the `frontend` release could look like this:
|
||||
|
||||
* cut a release from the master branch of the podinfo code repo with the git tag `1.4.1`
|
||||
* CI builds the image and pushes the `podinfo:1.4.1` image to the container registry
|
||||
* Flux scans the registry and updates the Helm release `image.tag` to `1.4.1`
|
||||
* cut a release from the master branch of the podinfo code repo with the git tag `1.7.1`
|
||||
* CI builds the image and pushes the `podinfo:1.7.1` image to the container registry
|
||||
* Flux scans the registry and updates the Helm release `image.tag` to `1.7.1`
|
||||
* Flux commits and push the change to the cluster repo
|
||||
* Flux applies the updated Helm release on the cluster
|
||||
* Flux Helm Operator picks up the change and calls Tiller to upgrade the release
|
||||
* Flagger detects a revision change and scales up the `frontend` deployment
|
||||
* Flagger runs the helm test before routing traffic to the canary service
|
||||
* Flagger starts the load test and runs the canary analysis
|
||||
* Based on the analysis result the canary deployment is promoted to production or rolled back
|
||||
* Flagger sends a Slack notification with the canary result
|
||||
|
||||
If the canary fails, fix the bug, do another patch release eg `1.4.2` and the whole process will run again.
|
||||
If the canary fails, fix the bug, do another patch release eg `1.7.2` and the whole process will run again.
|
||||
|
||||
A canary deployment can fail due to any of the following reasons:
|
||||
|
||||
* the container image can't be downloaded
|
||||
* the deployment replica set is stuck for more then ten minutes (eg. due to a container crash loop)
|
||||
* the webooks (acceptance tests, load tests, etc) are returning a non 2xx response
|
||||
* the webooks (acceptance tests, helm tests, load tests, etc) are returning a non 2xx response
|
||||
* the HTTP success rate (non 5xx responses) metric drops under the threshold
|
||||
* the HTTP average duration metric goes over the threshold
|
||||
* the Istio telemetry service is unable to collect traffic metrics
|
||||
* the metrics server (Prometheus) can't be reached
|
||||
|
||||
If you want to find out more about managing Helm releases with Flux here is an in-depth guide
|
||||
[github.com/stefanprodan/gitops-helm](https://github.com/stefanprodan/gitops-helm).
|
||||
If you want to find out more about managing Helm releases with Flux here are two in-depth guides:
|
||||
[gitops-helm](https://github.com/stefanprodan/gitops-helm) and
|
||||
[gitops-istio](https://github.com/stefanprodan/gitops-istio).
|
||||
|
||||
332
docs/gitbook/tutorials/flagger-smi-istio.md
Normal file
@@ -0,0 +1,332 @@
|
||||
# Flagger SMI
|
||||
|
||||
This guide shows you how to use the SMI Istio adapter and Flagger to automate canary deployments.
|
||||
|
||||
### Prerequisites
|
||||
|
||||
Flagger requires a Kubernetes cluster **v1.11** or newer with the following admission controllers enabled:
|
||||
|
||||
* MutatingAdmissionWebhook
|
||||
* ValidatingAdmissionWebhook
|
||||
|
||||
Flagger depends on [Istio](https://istio.io/docs/setup/kubernetes/quick-start/) **v1.0.3** or newer
|
||||
with traffic management, telemetry and Prometheus enabled.
|
||||
|
||||
A minimal Istio installation should contain the following services:
|
||||
|
||||
* istio-pilot
|
||||
* istio-ingressgateway
|
||||
* istio-sidecar-injector
|
||||
* istio-telemetry
|
||||
* prometheus
|
||||
|
||||
### Install Istio and the SMI adapter
|
||||
|
||||
Add Istio Helm repository:
|
||||
|
||||
```bash
|
||||
helm repo add istio.io https://storage.googleapis.com/istio-release/releases/1.1.5/charts
|
||||
```
|
||||
|
||||
Install Istio CRDs:
|
||||
|
||||
```bash
|
||||
helm upgrade -i istio-init istio.io/istio-init --wait --namespace istio-system
|
||||
|
||||
kubectl -n istio-system wait --for=condition=complete job/istio-init-crd-11
|
||||
```
|
||||
|
||||
Install Istio:
|
||||
|
||||
```bash
|
||||
helm upgrade -i istio istio.io/istio --wait --namespace istio-system
|
||||
```
|
||||
|
||||
Create a generic Istio gateway to expose services outside the mesh on HTTP:
|
||||
|
||||
```yaml
|
||||
apiVersion: networking.istio.io/v1alpha3
|
||||
kind: Gateway
|
||||
metadata:
|
||||
name: public-gateway
|
||||
namespace: istio-system
|
||||
spec:
|
||||
selector:
|
||||
istio: ingressgateway
|
||||
servers:
|
||||
- port:
|
||||
number: 80
|
||||
name: http
|
||||
protocol: HTTP
|
||||
hosts:
|
||||
- "*"
|
||||
```
|
||||
|
||||
Save the above resource as public-gateway.yaml and then apply it:
|
||||
|
||||
```bash
|
||||
kubectl apply -f ./public-gateway.yaml
|
||||
```
|
||||
|
||||
Find the Gateway load balancer IP and add a DNS record for it:
|
||||
|
||||
```bash
|
||||
kubectl -n istio-system get svc/istio-ingressgateway -ojson | jq -r .status.loadBalancer.ingress[0].ip
|
||||
```
|
||||
|
||||
Install the SMI adapter:
|
||||
|
||||
```bash
|
||||
REPO=https://raw.githubusercontent.com/weaveworks/flagger/master
|
||||
|
||||
kubectl apply -f ${REPO}/artifacts/smi/istio-adapter.yaml
|
||||
```
|
||||
|
||||
### Install Flagger and Grafana
|
||||
|
||||
Add Flagger Helm repository:
|
||||
|
||||
```bash
|
||||
helm repo add flagger https://flagger.app
|
||||
```
|
||||
|
||||
Deploy Flagger in the _**istio-system**_ namespace:
|
||||
|
||||
```bash
|
||||
helm upgrade -i flagger flagger/flagger \
|
||||
--namespace=istio-system \
|
||||
--set image.tag=master-12d84b2 \
|
||||
--set meshProvider=smi:istio
|
||||
```
|
||||
|
||||
Flagger comes with a Grafana dashboard made for monitoring the canary deployments.
|
||||
|
||||
Deploy Grafana in the _**istio-system**_ namespace:
|
||||
|
||||
```bash
|
||||
helm upgrade -i flagger-grafana flagger/grafana \
|
||||
--namespace=istio-system \
|
||||
--set url=http://prometheus.istio-system:9090
|
||||
```
|
||||
|
||||
You can access Grafana using port forwarding:
|
||||
|
||||
```bash
|
||||
kubectl -n istio-system port-forward svc/flagger-grafana 3000:80
|
||||
```
|
||||
|
||||
### Workloads bootstrap
|
||||
|
||||
Create a test namespace with Istio sidecar injection enabled:
|
||||
|
||||
```bash
|
||||
export REPO=https://raw.githubusercontent.com/weaveworks/flagger/master
|
||||
|
||||
kubectl apply -f ${REPO}/artifacts/namespaces/test.yaml
|
||||
```
|
||||
|
||||
Create a deployment and a horizontal pod autoscaler:
|
||||
|
||||
```bash
|
||||
kubectl apply -f ${REPO}/artifacts/canaries/deployment.yaml
|
||||
kubectl apply -f ${REPO}/artifacts/canaries/hpa.yaml
|
||||
```
|
||||
|
||||
Deploy the load testing service to generate traffic during the canary analysis:
|
||||
|
||||
```bash
|
||||
kubectl -n test apply -f ${REPO}/artifacts/loadtester/deployment.yaml
|
||||
kubectl -n test apply -f ${REPO}/artifacts/loadtester/service.yaml
|
||||
```
|
||||
|
||||
Create a canary custom resource (replace example.com with your own domain):
|
||||
|
||||
```yaml
|
||||
apiVersion: flagger.app/v1alpha3
|
||||
kind: Canary
|
||||
metadata:
|
||||
name: podinfo
|
||||
namespace: test
|
||||
spec:
|
||||
# deployment reference
|
||||
targetRef:
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
name: podinfo
|
||||
# the maximum time in seconds for the canary deployment
|
||||
# to make progress before it is rollback (default 600s)
|
||||
progressDeadlineSeconds: 60
|
||||
# HPA reference (optional)
|
||||
autoscalerRef:
|
||||
apiVersion: autoscaling/v2beta1
|
||||
kind: HorizontalPodAutoscaler
|
||||
name: podinfo
|
||||
service:
|
||||
# container port
|
||||
port: 9898
|
||||
# Istio gateways (optional)
|
||||
gateways:
|
||||
- public-gateway.istio-system.svc.cluster.local
|
||||
# Istio virtual service host names (optional)
|
||||
hosts:
|
||||
- app.example.com
|
||||
canaryAnalysis:
|
||||
# schedule interval (default 60s)
|
||||
interval: 10s
|
||||
# max number of failed metric checks before rollback
|
||||
threshold: 5
|
||||
# max traffic percentage routed to canary
|
||||
# percentage (0-100)
|
||||
maxWeight: 50
|
||||
# canary increment step
|
||||
# percentage (0-100)
|
||||
stepWeight: 10
|
||||
metrics:
|
||||
- name: request-success-rate
|
||||
# minimum req success rate (non 5xx responses)
|
||||
# percentage (0-100)
|
||||
threshold: 99
|
||||
interval: 1m
|
||||
- name: request-duration
|
||||
# maximum req duration P99
|
||||
# milliseconds
|
||||
threshold: 500
|
||||
interval: 30s
|
||||
# generate traffic during analysis
|
||||
webhooks:
|
||||
- name: load-test
|
||||
url: http://flagger-loadtester.test/
|
||||
timeout: 5s
|
||||
metadata:
|
||||
cmd: "hey -z 1m -q 10 -c 2 http://podinfo.test:9898/"
|
||||
```
|
||||
|
||||
Save the above resource as podinfo-canary.yaml and then apply it:
|
||||
|
||||
```bash
|
||||
kubectl apply -f ./podinfo-canary.yaml
|
||||
```
|
||||
|
||||
After a couple of seconds Flagger will create the canary objects:
|
||||
|
||||
```bash
|
||||
# applied
|
||||
deployment.apps/podinfo
|
||||
horizontalpodautoscaler.autoscaling/podinfo
|
||||
canary.flagger.app/podinfo
|
||||
|
||||
# generated
|
||||
deployment.apps/podinfo-primary
|
||||
horizontalpodautoscaler.autoscaling/podinfo-primary
|
||||
service/podinfo
|
||||
service/podinfo-canary
|
||||
service/podinfo-primary
|
||||
trafficsplits.split.smi-spec.io/podinfo
|
||||
```
|
||||
|
||||
### Automated canary promotion
|
||||
|
||||
Flagger implements a control loop that gradually shifts traffic to the canary while measuring key performance indicators
|
||||
like HTTP requests success rate, requests average duration and pod health.
|
||||
Based on analysis of the KPIs a canary is promoted or aborted, and the analysis result is published to Slack.
|
||||
|
||||

|
||||
|
||||
Trigger a canary deployment by updating the container image:
|
||||
|
||||
```bash
|
||||
kubectl -n test set image deployment/podinfo \
|
||||
podinfod=quay.io/stefanprodan/podinfo:1.7.1
|
||||
```
|
||||
|
||||
Flagger detects that the deployment revision changed and starts a new rollout:
|
||||
|
||||
```text
|
||||
kubectl -n istio-system logs deployment/flagger -f | jq .msg
|
||||
|
||||
|
||||
New revision detected podinfo.test
|
||||
Scaling up podinfo.test
|
||||
Waiting for podinfo.test rollout to finish: 0 of 1 updated replicas are available
|
||||
Advance podinfo.test canary weight 5
|
||||
Advance podinfo.test canary weight 10
|
||||
Advance podinfo.test canary weight 15
|
||||
Advance podinfo.test canary weight 20
|
||||
Advance podinfo.test canary weight 25
|
||||
Advance podinfo.test canary weight 30
|
||||
Advance podinfo.test canary weight 35
|
||||
Advance podinfo.test canary weight 40
|
||||
Advance podinfo.test canary weight 45
|
||||
Advance podinfo.test canary weight 50
|
||||
Copying podinfo.test template spec to podinfo-primary.test
|
||||
Waiting for podinfo-primary.test rollout to finish: 1 of 2 updated replicas are available
|
||||
Promotion completed! Scaling down podinfo.test
|
||||
```
|
||||
|
||||
**Note** that if you apply new changes to the deployment during the canary analysis, Flagger will restart the analysis.
|
||||
|
||||
During the analysis the canary’s progress can be monitored with Grafana. The Istio dashboard URL is
|
||||
http://localhost:3000/d/flagger-istio/istio-canary?refresh=10s&orgId=1&var-namespace=test&var-primary=podinfo-primary&var-canary=podinfo
|
||||
|
||||
You can monitor all canaries with:
|
||||
|
||||
```bash
|
||||
watch kubectl get canaries --all-namespaces
|
||||
|
||||
NAMESPACE NAME STATUS WEIGHT LASTTRANSITIONTIME
|
||||
test podinfo Progressing 15 2019-05-16T14:05:07Z
|
||||
prod frontend Succeeded 0 2019-05-15T16:15:07Z
|
||||
prod backend Failed 0 2019-05-14T17:05:07Z
|
||||
```
|
||||
|
||||
### Automated rollback
|
||||
|
||||
During the canary analysis you can generate HTTP 500 errors and high latency to test if Flagger pauses the rollout.
|
||||
|
||||
Create a tester pod and exec into it:
|
||||
|
||||
```bash
|
||||
kubectl -n test run tester \
|
||||
--image=quay.io/stefanprodan/podinfo:1.2.1 \
|
||||
-- ./podinfo --port=9898
|
||||
|
||||
kubectl -n test exec -it tester-xx-xx sh
|
||||
```
|
||||
|
||||
Generate HTTP 500 errors:
|
||||
|
||||
```bash
|
||||
watch curl http://podinfo-canary:9898/status/500
|
||||
```
|
||||
|
||||
Generate latency:
|
||||
|
||||
```bash
|
||||
watch curl http://podinfo-canary:9898/delay/1
|
||||
```
|
||||
|
||||
When the number of failed checks reaches the canary analysis threshold, the traffic is routed back to the primary,
|
||||
the canary is scaled to zero and the rollout is marked as failed.
|
||||
|
||||
```text
|
||||
kubectl -n test describe canary/podinfo
|
||||
|
||||
Status:
|
||||
Canary Weight: 0
|
||||
Failed Checks: 10
|
||||
Phase: Failed
|
||||
Events:
|
||||
Type Reason Age From Message
|
||||
---- ------ ---- ---- -------
|
||||
Normal Synced 3m flagger Starting canary deployment for podinfo.test
|
||||
Normal Synced 3m flagger Advance podinfo.test canary weight 5
|
||||
Normal Synced 3m flagger Advance podinfo.test canary weight 10
|
||||
Normal Synced 3m flagger Advance podinfo.test canary weight 15
|
||||
Normal Synced 3m flagger Halt podinfo.test advancement success rate 69.17% < 99%
|
||||
Normal Synced 2m flagger Halt podinfo.test advancement success rate 61.39% < 99%
|
||||
Normal Synced 2m flagger Halt podinfo.test advancement success rate 55.06% < 99%
|
||||
Normal Synced 2m flagger Halt podinfo.test advancement success rate 47.00% < 99%
|
||||
Normal Synced 2m flagger (combined from similar events): Halt podinfo.test advancement success rate 38.08% < 99%
|
||||
Warning Synced 1m flagger Rolling back podinfo.test failed checks threshold reached 10
|
||||
Warning Synced 1m flagger Canary failed! Scaling down podinfo.test
|
||||
```
|
||||
@@ -60,7 +60,6 @@ spec:
|
||||
# Istio gateways (optional)
|
||||
gateways:
|
||||
- public-gateway.istio-system.svc.cluster.local
|
||||
- mesh
|
||||
# Istio virtual service host names (optional)
|
||||
hosts:
|
||||
- app.example.com
|
||||
@@ -121,6 +120,8 @@ horizontalpodautoscaler.autoscaling/abtest-primary
|
||||
service/abtest
|
||||
service/abtest-canary
|
||||
service/abtest-primary
|
||||
destinationrule.networking.istio.io/abtest-canary
|
||||
destinationrule.networking.istio.io/abtest-primary
|
||||
virtualservice.networking.istio.io/abtest
|
||||
```
|
||||
|
||||
@@ -130,7 +131,7 @@ Trigger a canary deployment by updating the container image:
|
||||
|
||||
```bash
|
||||
kubectl -n test set image deployment/abtest \
|
||||
podinfod=quay.io/stefanprodan/podinfo:1.4.1
|
||||
podinfod=quay.io/stefanprodan/podinfo:1.7.1
|
||||
```
|
||||
|
||||
Flagger detects that the deployment revision changed and starts a new rollout:
|
||||
|
||||
@@ -6,7 +6,6 @@ Flagger can be configured to send Slack notifications:
|
||||
|
||||
```bash
|
||||
helm upgrade -i flagger flagger/flagger \
|
||||
--namespace=istio-system \
|
||||
--set slack.url=https://hooks.slack.com/services/YOUR/SLACK/WEBHOOK \
|
||||
--set slack.channel=general \
|
||||
--set slack.user=flagger
|
||||
@@ -22,6 +21,23 @@ maximum number of failed checks:
|
||||
|
||||

|
||||
|
||||
### Microsoft Teams
|
||||
|
||||
Flagger can be configured to send notifications to Microsoft Teams:
|
||||
|
||||
```bash
|
||||
helm upgrade -i flagger flagger/flagger \
|
||||
--set msteams.url=https://outlook.office.com/webhook/YOUR/TEAMS/WEBHOOK
|
||||
```
|
||||
|
||||
Flagger will post a message card to MS Teams when a new revision has been detected and if the canary analysis failed or succeeded:
|
||||
|
||||

|
||||
|
||||
And you'll get a notification on rollback:
|
||||
|
||||

|
||||
|
||||
### Prometheus Alert Manager
|
||||
|
||||
Besides Slack, you can use Alertmanager to trigger alerts when a canary deployment failed:
|
||||
|
||||
@@ -176,7 +176,7 @@ Trigger a canary deployment by updating the container image:
|
||||
|
||||
```bash
|
||||
kubectl -n test set image deployment/podinfo \
|
||||
podinfod=quay.io/stefanprodan/podinfo:1.4.1
|
||||
podinfod=quay.io/stefanprodan/podinfo:1.7.1
|
||||
```
|
||||
|
||||
Flagger detects that the deployment revision changed and starts a new rollout:
|
||||
|
||||
356
docs/gitbook/usage/blue-green.md
Normal file
@@ -0,0 +1,356 @@
|
||||
# Blue/Green Deployments
|
||||
|
||||
This guide shows you how to automate Blue/Green deployments with Flagger and Kubernetes.
|
||||
|
||||
For applications that are not deployed on a service mesh, Flagger can orchestrate Blue/Green style deployments
|
||||
with Kubernetes L4 networking.
|
||||
|
||||

|
||||
|
||||
### Prerequisites
|
||||
|
||||
Flagger requires a Kubernetes cluster **v1.11** or newer.
|
||||
|
||||
Install Flagger and the Prometheus add-on:
|
||||
|
||||
```bash
|
||||
helm repo add flagger https://flagger.app
|
||||
|
||||
helm upgrade -i flagger flagger/flagger \
|
||||
--namespace flagger \
|
||||
--set prometheus.install=true \
|
||||
--set meshProvider=kubernetes
|
||||
```
|
||||
|
||||
If you already have a Prometheus instance running in your cluster,
|
||||
you can point Flagger to the ClusterIP service with:
|
||||
|
||||
```bash
|
||||
helm upgrade -i flagger flagger/flagger \
|
||||
--namespace flagger \
|
||||
--set metricsServer=http://prometheus.monitoring:9090
|
||||
```
|
||||
|
||||
Optionally you can enable Slack notifications:
|
||||
|
||||
```bash
|
||||
helm upgrade -i flagger flagger/flagger \
|
||||
--reuse-values \
|
||||
--namespace flagger \
|
||||
--set slack.url=https://hooks.slack.com/services/YOUR/SLACK/WEBHOOK \
|
||||
--set slack.channel=general \
|
||||
--set slack.user=flagger
|
||||
```
|
||||
|
||||
### Bootstrap
|
||||
|
||||
Flagger takes a Kubernetes deployment and optionally a horizontal pod autoscaler (HPA),
|
||||
then creates a series of objects (Kubernetes deployment and ClusterIP services).
|
||||
These objects expose the application inside the cluster and drive the canary analysis and Blue/Green promotion.
|
||||
|
||||
Create a test namespace:
|
||||
|
||||
```bash
|
||||
kubectl create ns test
|
||||
```
|
||||
|
||||
Create a deployment and a horizontal pod autoscaler:
|
||||
|
||||
```bash
|
||||
export REPO=https://raw.githubusercontent.com/weaveworks/flagger/master
|
||||
|
||||
kubectl apply -f ${REPO}/artifacts/canaries/deployment.yaml
|
||||
kubectl apply -f ${REPO}/artifacts/canaries/hpa.yaml
|
||||
```
|
||||
|
||||
Deploy the load testing service to generate traffic during the analysis:
|
||||
|
||||
```bash
|
||||
kubectl -n test apply -f ${REPO}/artifacts/loadtester/deployment.yaml
|
||||
kubectl -n test apply -f ${REPO}/artifacts/loadtester/service.yaml
|
||||
```
|
||||
|
||||
Create a canary custom resource:
|
||||
|
||||
```yaml
|
||||
apiVersion: flagger.app/v1alpha3
|
||||
kind: Canary
|
||||
metadata:
|
||||
name: podinfo
|
||||
namespace: test
|
||||
spec:
|
||||
# service mesh provider can be: kubernetes, istio, appmesh, nginx, gloo
|
||||
# use the kubernetes provider for Blue/Green style deployments
|
||||
provider: kubernetes
|
||||
# deployment reference
|
||||
targetRef:
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
name: podinfo
|
||||
# the maximum time in seconds for the canary deployment
|
||||
# to make progress before rollback (default 600s)
|
||||
progressDeadlineSeconds: 60
|
||||
# HPA reference (optional)
|
||||
autoscalerRef:
|
||||
apiVersion: autoscaling/v2beta1
|
||||
kind: HorizontalPodAutoscaler
|
||||
name: podinfo
|
||||
service:
|
||||
# container port
|
||||
port: 9898
|
||||
portDiscovery: true
|
||||
canaryAnalysis:
|
||||
# schedule interval (default 60s)
|
||||
interval: 30s
|
||||
# max number of failed checks before rollback
|
||||
threshold: 2
|
||||
# number of checks to run before rollback
|
||||
iterations: 10
|
||||
# Prometheus checks based on
|
||||
# http_request_duration_seconds histogram
|
||||
metrics:
|
||||
- name: request-success-rate
|
||||
# minimum req success rate (non 5xx responses)
|
||||
# percentage (0-100)
|
||||
threshold: 99
|
||||
interval: 1m
|
||||
- name: request-duration
|
||||
# maximum req duration P99
|
||||
# milliseconds
|
||||
threshold: 500
|
||||
interval: 30s
|
||||
# acceptance/load testing hooks
|
||||
webhooks:
|
||||
- name: smoke-test
|
||||
type: pre-rollout
|
||||
url: http://flagger-loadtester.test/
|
||||
timeout: 15s
|
||||
metadata:
|
||||
type: bash
|
||||
cmd: "curl -sd 'anon' http://podinfo-canary.test:9898/token | grep token"
|
||||
- name: load-test
|
||||
url: http://flagger-loadtester.test/
|
||||
timeout: 5s
|
||||
metadata:
|
||||
type: cmd
|
||||
cmd: "hey -z 1m -q 10 -c 2 http://podinfo-canary.test:9898/"
|
||||
```
|
||||
|
||||
The above configuration will run an analysis for five minutes.
|
||||
|
||||
Save the above resource as podinfo-canary.yaml and then apply it:
|
||||
|
||||
```bash
|
||||
kubectl apply -f ./podinfo-canary.yaml
|
||||
```
|
||||
|
||||
After a couple of seconds Flagger will create the canary objects:
|
||||
|
||||
```bash
|
||||
# applied
|
||||
deployment.apps/podinfo
|
||||
horizontalpodautoscaler.autoscaling/podinfo
|
||||
canary.flagger.app/podinfo
|
||||
|
||||
# generated
|
||||
deployment.apps/podinfo-primary
|
||||
horizontalpodautoscaler.autoscaling/podinfo-primary
|
||||
service/podinfo
|
||||
service/podinfo-canary
|
||||
service/podinfo-primary
|
||||
```
|
||||
|
||||
Blue/Green scenario:
|
||||
* on bootstrap, Flagger will create three ClusterIP services (`app-primary`,` app-canary`, `app`) and a shadow deployment named `app-primary` that represents the blue version
|
||||
* when a new version is detected, Flagger would scale up the green version and run the conformance tests (the tests should target the `app-canary` ClusterIP service to reach the green version)
|
||||
* if the conformance tests are passing, Flagger would start the load tests and validate them with custom Prometheus queries
|
||||
* if the load test analysis is successful, Flagger will promote the new version to `app-primary` and scale down the green version
|
||||
|
||||
### Automated Blue/Green promotion
|
||||
|
||||
Trigger a deployment by updating the container image:
|
||||
|
||||
```bash
|
||||
kubectl -n test set image deployment/podinfo \
|
||||
podinfod=quay.io/stefanprodan/podinfo:1.7.1
|
||||
```
|
||||
|
||||
Flagger detects that the deployment revision changed and starts a new rollout:
|
||||
|
||||
```text
|
||||
kubectl -n test describe canary/podinfo
|
||||
|
||||
Events:
|
||||
|
||||
New revision detected podinfo.test
|
||||
Waiting for podinfo.test rollout to finish: 0 of 1 updated replicas are available
|
||||
Pre-rollout check acceptance-test passed
|
||||
Advance podinfo.test canary iteration 1/10
|
||||
Advance podinfo.test canary iteration 2/10
|
||||
Advance podinfo.test canary iteration 3/10
|
||||
Advance podinfo.test canary iteration 4/10
|
||||
Advance podinfo.test canary iteration 5/10
|
||||
Advance podinfo.test canary iteration 6/10
|
||||
Advance podinfo.test canary iteration 7/10
|
||||
Advance podinfo.test canary iteration 8/10
|
||||
Advance podinfo.test canary iteration 9/10
|
||||
Advance podinfo.test canary iteration 10/10
|
||||
Copying podinfo.test template spec to podinfo-primary.test
|
||||
Waiting for podinfo-primary.test rollout to finish: 1 of 2 updated replicas are available
|
||||
Promotion completed! Scaling down podinfo.test
|
||||
```
|
||||
|
||||
**Note** that if you apply new changes to the deployment during the canary analysis, Flagger will restart the analysis.
|
||||
|
||||
You can monitor all canaries with:
|
||||
|
||||
```bash
|
||||
watch kubectl get canaries --all-namespaces
|
||||
|
||||
NAMESPACE NAME STATUS WEIGHT LASTTRANSITIONTIME
|
||||
test podinfo Progressing 100 2019-06-16T14:05:07Z
|
||||
prod frontend Succeeded 0 2019-06-15T16:15:07Z
|
||||
prod backend Failed 0 2019-06-14T17:05:07Z
|
||||
```
|
||||
|
||||
### Automated rollback
|
||||
|
||||
During the analysis you can generate HTTP 500 errors and high latency to test Flagger's rollback.
|
||||
|
||||
Exec into the load tester pod with:
|
||||
|
||||
```bash
|
||||
kubectl -n test exec -it flagger-loadtester-xx-xx sh
|
||||
```
|
||||
|
||||
Generate HTTP 500 errors:
|
||||
|
||||
```bash
|
||||
watch curl http://podinfo-canary.test:9898/status/500
|
||||
```
|
||||
|
||||
Generate latency:
|
||||
|
||||
```bash
|
||||
watch curl http://podinfo-canary.test:9898/delay/1
|
||||
```
|
||||
|
||||
When the number of failed checks reaches the analysis threshold,
|
||||
the green version is scaled to zero and the rollout is marked as failed.
|
||||
|
||||
```text
|
||||
kubectl -n test describe canary/podinfo
|
||||
|
||||
Status:
|
||||
Failed Checks: 2
|
||||
Phase: Failed
|
||||
Events:
|
||||
Type Reason Age From Message
|
||||
---- ------ ---- ---- -------
|
||||
Normal Synced 3m flagger New revision detected podinfo.test
|
||||
Normal Synced 3m flagger Advance podinfo.test canary iteration 1/10
|
||||
Normal Synced 3m flagger Advance podinfo.test canary iteration 2/10
|
||||
Normal Synced 3m flagger Advance podinfo.test canary iteration 3/10
|
||||
Normal Synced 3m flagger Halt podinfo.test advancement success rate 69.17% < 99%
|
||||
Normal Synced 2m flagger Halt podinfo.test advancement success rate 61.39% < 99%
|
||||
Warning Synced 2m flagger Rolling back podinfo.test failed checks threshold reached 2
|
||||
Warning Synced 1m flagger Canary failed! Scaling down podinfo.test
|
||||
```
|
||||
|
||||
### Custom metrics
|
||||
|
||||
The analysis can be extended with Prometheus queries. The demo app is instrumented with Prometheus
|
||||
so you can create a custom check that will use the HTTP request duration histogram to validate the canary (green version).
|
||||
|
||||
Edit the canary analysis and add the following metric:
|
||||
|
||||
```yaml
|
||||
canaryAnalysis:
|
||||
metrics:
|
||||
- name: "404s percentage"
|
||||
threshold: 5
|
||||
query: |
|
||||
100 - sum(
|
||||
rate(
|
||||
http_request_duration_seconds_count{
|
||||
kubernetes_namespace="test",
|
||||
kubernetes_pod_name=~"podinfo-[0-9a-zA-Z]+(-[0-9a-zA-Z]+)"
|
||||
status!="404"
|
||||
}[1m]
|
||||
)
|
||||
)
|
||||
/
|
||||
sum(
|
||||
rate(
|
||||
http_request_duration_seconds_count{
|
||||
kubernetes_namespace="test",
|
||||
kubernetes_pod_name=~"podinfo-[0-9a-zA-Z]+(-[0-9a-zA-Z]+)"
|
||||
}[1m]
|
||||
)
|
||||
) * 100
|
||||
```
|
||||
|
||||
The above configuration validates the canary (green version) by checking if the HTTP 404 req/sec percentage is below 5
|
||||
percent of the total traffic. If the 404s rate reaches the 5% threshold, then the rollout is rolled back.
|
||||
|
||||
Trigger a deployment by updating the container image:
|
||||
|
||||
```bash
|
||||
kubectl -n test set image deployment/podinfo \
|
||||
podinfod=quay.io/stefanprodan/podinfo:1.7.3
|
||||
```
|
||||
|
||||
Generate 404s:
|
||||
|
||||
```bash
|
||||
watch curl http://podinfo-canary.test:9898/status/400
|
||||
```
|
||||
|
||||
Watch Flagger logs:
|
||||
|
||||
```
|
||||
kubectl -n flagger logs deployment/flagger -f | jq .msg
|
||||
|
||||
New revision detected podinfo.test
|
||||
Scaling up podinfo.test
|
||||
Advance podinfo.test canary iteration 1/10
|
||||
Halt podinfo.test advancement 404s percentage 6.20 > 5
|
||||
Halt podinfo.test advancement 404s percentage 6.45 > 5
|
||||
Rolling back podinfo.test failed checks threshold reached 2
|
||||
Canary failed! Scaling down podinfo.test
|
||||
```
|
||||
|
||||
If you have Slack configured, Flagger will send a notification with the reason why the canary failed.
|
||||
|
||||
### Conformance Testing with Helm
|
||||
|
||||
Flagger comes with a testing service that can run Helm tests when configured as a pre-rollout webhook.
|
||||
|
||||
Deploy the Helm test runner in the `kube-system` namespace using the `tiller` service account:
|
||||
|
||||
```bash
|
||||
helm repo add flagger https://flagger.app
|
||||
|
||||
helm upgrade -i flagger-helmtester flagger/loadtester \
|
||||
--namespace=kube-system \
|
||||
--set serviceAccountName=tiller
|
||||
```
|
||||
|
||||
When deployed the Helm tester API will be available at `http://flagger-helmtester.kube-system/`.
|
||||
|
||||
Add a helm test pre-rollout hook to your chart:
|
||||
|
||||
```yaml
|
||||
canaryAnalysis:
|
||||
webhooks:
|
||||
- name: "conformance testing"
|
||||
type: pre-rollout
|
||||
url: http://flagger-helmtester.kube-system/
|
||||
timeout: 3m
|
||||
metadata:
|
||||
type: "helm"
|
||||
cmd: "test {{ .Release.Name }} --cleanup"
|
||||
```
|
||||
|
||||
When the canary analysis starts, Flagger will call the pre-rollout webhooks.
|
||||
If the helm test fails, Flagger will retry until the analysis threshold is reached and the canary is rolled back.
|
||||
366
docs/gitbook/usage/gloo-progressive-delivery.md
Normal file
@@ -0,0 +1,366 @@
|
||||
# NGNIX Ingress Controller Canary Deployments
|
||||
|
||||
This guide shows you how to use the [Gloo](https://gloo.solo.io/) ingress controller and Flagger to automate canary deployments.
|
||||
|
||||

|
||||
|
||||
### Prerequisites
|
||||
|
||||
Flagger requires a Kubernetes cluster **v1.11** or newer and Gloo ingress **0.13.29** or newer.
|
||||
|
||||
Install Gloo with Helm:
|
||||
|
||||
```bash
|
||||
helm repo add gloo https://storage.googleapis.com/solo-public-helm
|
||||
|
||||
helm upgrade -i gloo gloo/gloo \
|
||||
--namespace gloo-system
|
||||
```
|
||||
|
||||
Install Flagger and the Prometheus add-on in the same namespace as Gloo:
|
||||
|
||||
```bash
|
||||
helm repo add flagger https://flagger.app
|
||||
|
||||
helm upgrade -i flagger flagger/flagger \
|
||||
--namespace gloo-system \
|
||||
--set prometheus.install=true \
|
||||
--set meshProvider=gloo
|
||||
```
|
||||
|
||||
Optionally you can enable Slack notifications:
|
||||
|
||||
```bash
|
||||
helm upgrade -i flagger flagger/flagger \
|
||||
--reuse-values \
|
||||
--namespace gloo-system \
|
||||
--set slack.url=https://hooks.slack.com/services/YOUR/SLACK/WEBHOOK \
|
||||
--set slack.channel=general \
|
||||
--set slack.user=flagger
|
||||
```
|
||||
|
||||
### Bootstrap
|
||||
|
||||
Flagger takes a Kubernetes deployment and optionally a horizontal pod autoscaler (HPA),
|
||||
then creates a series of objects (Kubernetes deployments, ClusterIP services and Gloo upstream groups).
|
||||
These objects expose the application outside the cluster and drive the canary analysis and promotion.
|
||||
|
||||
Create a test namespace:
|
||||
|
||||
```bash
|
||||
kubectl create ns test
|
||||
```
|
||||
|
||||
Create a deployment and a horizontal pod autoscaler:
|
||||
|
||||
```bash
|
||||
kubectl apply -f ${REPO}/artifacts/gloo/deployment.yaml
|
||||
kubectl apply -f ${REPO}/artifacts/gloo/hpa.yaml
|
||||
```
|
||||
|
||||
Deploy the load testing service to generate traffic during the canary analysis:
|
||||
|
||||
```bash
|
||||
helm upgrade -i flagger-loadtester flagger/loadtester \
|
||||
--namespace=test
|
||||
```
|
||||
|
||||
Create an virtual service definition that references an upstream group that will be generated by Flagger
|
||||
(replace `app.example.com` with your own domain):
|
||||
|
||||
```yaml
|
||||
apiVersion: gateway.solo.io/v1
|
||||
kind: VirtualService
|
||||
metadata:
|
||||
name: podinfo
|
||||
namespace: test
|
||||
spec:
|
||||
virtualHost:
|
||||
domains:
|
||||
- 'app.example.com'
|
||||
name: podinfo.test
|
||||
routes:
|
||||
- matcher:
|
||||
prefix: /
|
||||
routeAction:
|
||||
upstreamGroup:
|
||||
name: podinfo
|
||||
namespace: test
|
||||
```
|
||||
|
||||
Save the above resource as podinfo-virtualservice.yaml and then apply it:
|
||||
|
||||
```bash
|
||||
kubectl apply -f ./podinfo-virtualservice.yaml
|
||||
```
|
||||
|
||||
Create a canary custom resource (replace `app.example.com` with your own domain):
|
||||
|
||||
```yaml
|
||||
apiVersion: flagger.app/v1alpha3
|
||||
kind: Canary
|
||||
metadata:
|
||||
name: podinfo
|
||||
namespace: test
|
||||
spec:
|
||||
# deployment reference
|
||||
targetRef:
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
name: podinfo
|
||||
# HPA reference (optional)
|
||||
autoscalerRef:
|
||||
apiVersion: autoscaling/v2beta1
|
||||
kind: HorizontalPodAutoscaler
|
||||
name: podinfo
|
||||
# the maximum time in seconds for the canary deployment
|
||||
# to make progress before it is rollback (default 600s)
|
||||
progressDeadlineSeconds: 60
|
||||
service:
|
||||
# container port
|
||||
port: 9898
|
||||
canaryAnalysis:
|
||||
# schedule interval (default 60s)
|
||||
interval: 10s
|
||||
# max number of failed metric checks before rollback
|
||||
threshold: 5
|
||||
# max traffic percentage routed to canary
|
||||
# percentage (0-100)
|
||||
maxWeight: 50
|
||||
# canary increment step
|
||||
# percentage (0-100)
|
||||
stepWeight: 5
|
||||
# Gloo Prometheus checks
|
||||
metrics:
|
||||
- name: request-success-rate
|
||||
# minimum req success rate (non 5xx responses)
|
||||
# percentage (0-100)
|
||||
threshold: 99
|
||||
interval: 1m
|
||||
- name: request-duration
|
||||
# maximum req duration P99
|
||||
# milliseconds
|
||||
threshold: 500
|
||||
interval: 30s
|
||||
# load testing (optional)
|
||||
webhooks:
|
||||
- name: load-test
|
||||
url: http://flagger-loadtester.test/
|
||||
timeout: 5s
|
||||
metadata:
|
||||
type: cmd
|
||||
cmd: "hey -z 1m -q 10 -c 2 http://app.example.com/"
|
||||
```
|
||||
|
||||
Save the above resource as podinfo-canary.yaml and then apply it:
|
||||
|
||||
```bash
|
||||
kubectl apply -f ./podinfo-canary.yaml
|
||||
```
|
||||
|
||||
After a couple of seconds Flagger will create the canary objects:
|
||||
|
||||
```bash
|
||||
# applied
|
||||
deployment.apps/podinfo
|
||||
horizontalpodautoscaler.autoscaling/podinfo
|
||||
virtualservices.gateway.solo.io/podinfo
|
||||
canary.flagger.app/podinfo
|
||||
|
||||
# generated
|
||||
deployment.apps/podinfo-primary
|
||||
horizontalpodautoscaler.autoscaling/podinfo-primary
|
||||
service/podinfo
|
||||
service/podinfo-canary
|
||||
service/podinfo-primary
|
||||
upstreamgroups.gloo.solo.io/podinfo
|
||||
```
|
||||
|
||||
When the bootstrap finishes Flagger will set the canary status to initialized:
|
||||
|
||||
```bash
|
||||
kubectl -n test get canary podinfo
|
||||
|
||||
NAME STATUS WEIGHT LASTTRANSITIONTIME
|
||||
podinfo Initialized 0 2019-05-17T08:09:51Z
|
||||
```
|
||||
|
||||
### Automated canary promotion
|
||||
|
||||
Flagger implements a control loop that gradually shifts traffic to the canary while measuring key performance indicators
|
||||
like HTTP requests success rate, requests average duration and pod health.
|
||||
Based on analysis of the KPIs a canary is promoted or aborted, and the analysis result is published to Slack.
|
||||
|
||||

|
||||
|
||||
Trigger a canary deployment by updating the container image:
|
||||
|
||||
```bash
|
||||
kubectl -n test set image deployment/podinfo \
|
||||
podinfod=quay.io/stefanprodan/podinfo:1.7.1
|
||||
```
|
||||
|
||||
Flagger detects that the deployment revision changed and starts a new rollout:
|
||||
|
||||
```text
|
||||
kubectl -n test describe canary/podinfo
|
||||
|
||||
Status:
|
||||
Canary Weight: 0
|
||||
Failed Checks: 0
|
||||
Phase: Succeeded
|
||||
Events:
|
||||
Type Reason Age From Message
|
||||
---- ------ ---- ---- -------
|
||||
Normal Synced 3m flagger New revision detected podinfo.test
|
||||
Normal Synced 3m flagger Scaling up podinfo.test
|
||||
Warning Synced 3m flagger Waiting for podinfo.test rollout to finish: 0 of 1 updated replicas are available
|
||||
Normal Synced 3m flagger Advance podinfo.test canary weight 5
|
||||
Normal Synced 3m flagger Advance podinfo.test canary weight 10
|
||||
Normal Synced 3m flagger Advance podinfo.test canary weight 15
|
||||
Normal Synced 2m flagger Advance podinfo.test canary weight 20
|
||||
Normal Synced 2m flagger Advance podinfo.test canary weight 25
|
||||
Normal Synced 1m flagger Advance podinfo.test canary weight 30
|
||||
Normal Synced 1m flagger Advance podinfo.test canary weight 35
|
||||
Normal Synced 55s flagger Advance podinfo.test canary weight 40
|
||||
Normal Synced 45s flagger Advance podinfo.test canary weight 45
|
||||
Normal Synced 35s flagger Advance podinfo.test canary weight 50
|
||||
Normal Synced 25s flagger Copying podinfo.test template spec to podinfo-primary.test
|
||||
Warning Synced 15s flagger Waiting for podinfo-primary.test rollout to finish: 1 of 2 updated replicas are available
|
||||
Normal Synced 5s flagger Promotion completed! Scaling down podinfo.test
|
||||
```
|
||||
|
||||
**Note** that if you apply new changes to the deployment during the canary analysis, Flagger will restart the analysis.
|
||||
|
||||
You can monitor all canaries with:
|
||||
|
||||
```bash
|
||||
watch kubectl get canaries --all-namespaces
|
||||
|
||||
NAMESPACE NAME STATUS WEIGHT LASTTRANSITIONTIME
|
||||
test podinfo Progressing 15 2019-05-17T14:05:07Z
|
||||
prod frontend Succeeded 0 2019-05-17T16:15:07Z
|
||||
prod backend Failed 0 2019-05-17T17:05:07Z
|
||||
```
|
||||
|
||||
### Automated rollback
|
||||
|
||||
During the canary analysis you can generate HTTP 500 errors and high latency to test if Flagger pauses and rolls back the faulted version.
|
||||
|
||||
Trigger another canary deployment:
|
||||
|
||||
```bash
|
||||
kubectl -n test set image deployment/podinfo \
|
||||
podinfod=quay.io/stefanprodan/podinfo:1.4.2
|
||||
```
|
||||
|
||||
Generate HTTP 500 errors:
|
||||
|
||||
```bash
|
||||
watch curl http://app.example.com/status/500
|
||||
```
|
||||
|
||||
Generate high latency:
|
||||
|
||||
```bash
|
||||
watch curl http://app.example.com/delay/2
|
||||
```
|
||||
|
||||
When the number of failed checks reaches the canary analysis threshold, the traffic is routed back to the primary,
|
||||
the canary is scaled to zero and the rollout is marked as failed.
|
||||
|
||||
```text
|
||||
kubectl -n test describe canary/podinfo
|
||||
|
||||
Status:
|
||||
Canary Weight: 0
|
||||
Failed Checks: 10
|
||||
Phase: Failed
|
||||
Events:
|
||||
Type Reason Age From Message
|
||||
---- ------ ---- ---- -------
|
||||
Normal Synced 3m flagger Starting canary deployment for podinfo.test
|
||||
Normal Synced 3m flagger Advance podinfo.test canary weight 5
|
||||
Normal Synced 3m flagger Advance podinfo.test canary weight 10
|
||||
Normal Synced 3m flagger Advance podinfo.test canary weight 15
|
||||
Normal Synced 3m flagger Halt podinfo.test advancement success rate 69.17% < 99%
|
||||
Normal Synced 2m flagger Halt podinfo.test advancement success rate 61.39% < 99%
|
||||
Normal Synced 2m flagger Halt podinfo.test advancement success rate 55.06% < 99%
|
||||
Normal Synced 2m flagger Halt podinfo.test advancement success rate 47.00% < 99%
|
||||
Normal Synced 2m flagger (combined from similar events): Halt podinfo.test advancement success rate 38.08% < 99%
|
||||
Warning Synced 1m flagger Rolling back podinfo.test failed checks threshold reached 10
|
||||
Warning Synced 1m flagger Canary failed! Scaling down podinfo.test
|
||||
```
|
||||
|
||||
### Custom metrics
|
||||
|
||||
The canary analysis can be extended with Prometheus queries.
|
||||
|
||||
The demo app is instrumented with Prometheus so you can create a custom check that will use the HTTP request duration
|
||||
histogram to validate the canary.
|
||||
|
||||
Edit the canary analysis and add the following metric:
|
||||
|
||||
```yaml
|
||||
canaryAnalysis:
|
||||
metrics:
|
||||
- name: "404s percentage"
|
||||
threshold: 5
|
||||
query: |
|
||||
100 - sum(
|
||||
rate(
|
||||
http_request_duration_seconds_count{
|
||||
kubernetes_namespace="test",
|
||||
kubernetes_pod_name=~"podinfo-[0-9a-zA-Z]+(-[0-9a-zA-Z]+)"
|
||||
status!="404"
|
||||
}[1m]
|
||||
)
|
||||
)
|
||||
/
|
||||
sum(
|
||||
rate(
|
||||
http_request_duration_seconds_count{
|
||||
kubernetes_namespace="test",
|
||||
kubernetes_pod_name=~"podinfo-[0-9a-zA-Z]+(-[0-9a-zA-Z]+)"
|
||||
}[1m]
|
||||
)
|
||||
) * 100
|
||||
```
|
||||
|
||||
The above configuration validates the canary by checking if the HTTP 404 req/sec percentage is below 5
|
||||
percent of the total traffic. If the 404s rate reaches the 5% threshold, then the canary fails.
|
||||
|
||||
Trigger a canary deployment by updating the container image:
|
||||
|
||||
```bash
|
||||
kubectl -n test set image deployment/podinfo \
|
||||
podinfod=quay.io/stefanprodan/podinfo:1.4.3
|
||||
```
|
||||
|
||||
Generate 404s:
|
||||
|
||||
```bash
|
||||
watch curl http://app.example.com/status/400
|
||||
```
|
||||
|
||||
Watch Flagger logs:
|
||||
|
||||
```
|
||||
kubectl -n gloo-system logs deployment/flagger -f | jq .msg
|
||||
|
||||
Starting canary deployment for podinfo.test
|
||||
Advance podinfo.test canary weight 5
|
||||
Advance podinfo.test canary weight 10
|
||||
Advance podinfo.test canary weight 15
|
||||
Halt podinfo.test advancement 404s percentage 6.20 > 5
|
||||
Halt podinfo.test advancement 404s percentage 6.45 > 5
|
||||
Halt podinfo.test advancement 404s percentage 7.60 > 5
|
||||
Halt podinfo.test advancement 404s percentage 8.69 > 5
|
||||
Halt podinfo.test advancement 404s percentage 9.70 > 5
|
||||
Rolling back podinfo.test failed checks threshold reached 5
|
||||
Canary failed! Scaling down podinfo.test
|
||||
```
|
||||
|
||||
If you have Slack configured, Flagger will send a notification with the reason why the canary failed.
|
||||
|
||||
|
||||
471
docs/gitbook/usage/linkerd-progressive-delivery.md
Normal file
@@ -0,0 +1,471 @@
|
||||
# Linkerd Canary Deployments
|
||||
|
||||
This guide shows you how to use Linkerd and Flagger to automate canary deployments.
|
||||
|
||||

|
||||
|
||||
### Prerequisites
|
||||
|
||||
Flagger requires a Kubernetes cluster **v1.11** or newer and Linkerd **2.4** or newer.
|
||||
|
||||
Install Flagger in the linkerd namespace:
|
||||
|
||||
```bash
|
||||
kubectl apply -k github.com/weaveworks/flagger//kustomize/linkerd
|
||||
```
|
||||
|
||||
Note that you'll need kubectl 1.14 or newer to run the above command.
|
||||
|
||||
To enable Slack or MS Teams notifications,
|
||||
see Flagger's [install docs](https://docs.flagger.app/install/flagger-install-on-kubernetes) for Kustomize or Helm options.
|
||||
|
||||
### Bootstrap
|
||||
|
||||
Flagger takes a Kubernetes deployment and optionally a horizontal pod autoscaler (HPA),
|
||||
then creates a series of objects (Kubernetes deployments, ClusterIP services and SMI traffic split).
|
||||
These objects expose the application inside the mesh and drive the canary analysis and promotion.
|
||||
|
||||
Create a test namespace and enable Linkerd proxy injection:
|
||||
|
||||
```bash
|
||||
kubectl create ns test
|
||||
kubectl annotate namespace test linkerd.io/inject=enabled
|
||||
```
|
||||
|
||||
Install the load testing service to generate traffic during the canary analysis:
|
||||
|
||||
```bash
|
||||
kubectl apply -k github.com/weaveworks/flagger//kustomize/tester
|
||||
```
|
||||
|
||||
Create a deployment and a horizontal pod autoscaler:
|
||||
|
||||
```bash
|
||||
kubectl apply -k github.com/weaveworks/flagger//kustomize/podinfo
|
||||
```
|
||||
|
||||
Create a canary custom resource for the podinfo deployment:
|
||||
|
||||
```yaml
|
||||
apiVersion: flagger.app/v1alpha3
|
||||
kind: Canary
|
||||
metadata:
|
||||
name: podinfo
|
||||
namespace: test
|
||||
spec:
|
||||
# deployment reference
|
||||
targetRef:
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
name: podinfo
|
||||
# HPA reference (optional)
|
||||
autoscalerRef:
|
||||
apiVersion: autoscaling/v2beta1
|
||||
kind: HorizontalPodAutoscaler
|
||||
name: podinfo
|
||||
# the maximum time in seconds for the canary deployment
|
||||
# to make progress before it is rollback (default 600s)
|
||||
progressDeadlineSeconds: 60
|
||||
service:
|
||||
# container port
|
||||
port: 9898
|
||||
canaryAnalysis:
|
||||
# schedule interval (default 60s)
|
||||
interval: 30s
|
||||
# max number of failed metric checks before rollback
|
||||
threshold: 5
|
||||
# max traffic percentage routed to canary
|
||||
# percentage (0-100)
|
||||
maxWeight: 50
|
||||
# canary increment step
|
||||
# percentage (0-100)
|
||||
stepWeight: 5
|
||||
# Linkerd Prometheus checks
|
||||
metrics:
|
||||
- name: request-success-rate
|
||||
# minimum req success rate (non 5xx responses)
|
||||
# percentage (0-100)
|
||||
threshold: 99
|
||||
interval: 1m
|
||||
- name: request-duration
|
||||
# maximum req duration P99
|
||||
# milliseconds
|
||||
threshold: 500
|
||||
interval: 30s
|
||||
# testing (optional)
|
||||
webhooks:
|
||||
- name: acceptance-test
|
||||
type: pre-rollout
|
||||
url: http://flagger-loadtester.test/
|
||||
timeout: 30s
|
||||
metadata:
|
||||
type: bash
|
||||
cmd: "curl -sd 'test' http://podinfo-canary:9898/token | grep token"
|
||||
- name: load-test
|
||||
type: rollout
|
||||
url: http://flagger-loadtester.test/
|
||||
metadata:
|
||||
cmd: "hey -z 2m -q 10 -c 2 http://podinfo:9898/"
|
||||
```
|
||||
|
||||
Save the above resource as podinfo-canary.yaml and then apply it:
|
||||
|
||||
```bash
|
||||
kubectl apply -f ./podinfo-canary.yaml
|
||||
```
|
||||
|
||||
When the canary analysis starts, Flagger will call the pre-rollout webhooks before routing traffic to the canary.
|
||||
The canary analysis will run for five minutes while validating the HTTP metrics and rollout hooks every half a minute.
|
||||
|
||||
After a couple of seconds Flagger will create the canary objects:
|
||||
|
||||
```bash
|
||||
# applied
|
||||
deployment.apps/podinfo
|
||||
horizontalpodautoscaler.autoscaling/podinfo
|
||||
ingresses.extensions/podinfo
|
||||
canary.flagger.app/podinfo
|
||||
|
||||
# generated
|
||||
deployment.apps/podinfo-primary
|
||||
horizontalpodautoscaler.autoscaling/podinfo-primary
|
||||
service/podinfo
|
||||
service/podinfo-canary
|
||||
service/podinfo-primary
|
||||
trafficsplits.split.smi-spec.io/podinfo
|
||||
```
|
||||
|
||||
After the boostrap, the podinfo deployment will be scaled to zero and the traffic to `podinfo.test` will be routed
|
||||
to the primary pods. During the canary analysis, the `podinfo-canary.test` address can be used to target directly the canary pods.
|
||||
|
||||
### Automated canary promotion
|
||||
|
||||
Flagger implements a control loop that gradually shifts traffic to the canary while measuring key performance indicators
|
||||
like HTTP requests success rate, requests average duration and pod health.
|
||||
Based on analysis of the KPIs a canary is promoted or aborted, and the analysis result is published to Slack.
|
||||
|
||||

|
||||
|
||||
Trigger a canary deployment by updating the container image:
|
||||
|
||||
```bash
|
||||
kubectl -n test set image deployment/podinfo \
|
||||
podinfod=quay.io/stefanprodan/podinfo:1.7.1
|
||||
```
|
||||
|
||||
Flagger detects that the deployment revision changed and starts a new rollout:
|
||||
|
||||
```text
|
||||
kubectl -n test describe canary/podinfo
|
||||
|
||||
Status:
|
||||
Canary Weight: 0
|
||||
Failed Checks: 0
|
||||
Phase: Succeeded
|
||||
Events:
|
||||
New revision detected! Scaling up podinfo.test
|
||||
Waiting for podinfo.test rollout to finish: 0 of 1 updated replicas are available
|
||||
Pre-rollout check acceptance-test passed
|
||||
Advance podinfo.test canary weight 5
|
||||
Advance podinfo.test canary weight 10
|
||||
Advance podinfo.test canary weight 15
|
||||
Advance podinfo.test canary weight 20
|
||||
Advance podinfo.test canary weight 25
|
||||
Waiting for podinfo.test rollout to finish: 1 of 2 updated replicas are available
|
||||
Advance podinfo.test canary weight 30
|
||||
Advance podinfo.test canary weight 35
|
||||
Advance podinfo.test canary weight 40
|
||||
Advance podinfo.test canary weight 45
|
||||
Advance podinfo.test canary weight 50
|
||||
Copying podinfo.test template spec to podinfo-primary.test
|
||||
Waiting for podinfo-primary.test rollout to finish: 1 of 2 updated replicas are available
|
||||
Promotion completed! Scaling down podinfo.test
|
||||
```
|
||||
|
||||
**Note** that if you apply new changes to the deployment during the canary analysis, Flagger will restart the analysis.
|
||||
|
||||
A canary deployment is triggered by changes in any of the following objects:
|
||||
* Deployment PodSpec (container image, command, ports, env, resources, etc)
|
||||
* ConfigMaps mounted as volumes or mapped to environment variables
|
||||
* Secrets mounted as volumes or mapped to environment variables
|
||||
|
||||
You can monitor all canaries with:
|
||||
|
||||
```bash
|
||||
watch kubectl get canaries --all-namespaces
|
||||
|
||||
NAMESPACE NAME STATUS WEIGHT LASTTRANSITIONTIME
|
||||
test podinfo Progressing 15 2019-06-30T14:05:07Z
|
||||
prod frontend Succeeded 0 2019-06-30T16:15:07Z
|
||||
prod backend Failed 0 2019-06-30T17:05:07Z
|
||||
```
|
||||
|
||||
### Automated rollback
|
||||
|
||||
During the canary analysis you can generate HTTP 500 errors and high latency to test if Flagger pauses and rolls back the faulted version.
|
||||
|
||||
Trigger another canary deployment:
|
||||
|
||||
```bash
|
||||
kubectl -n test set image deployment/podinfo \
|
||||
podinfod=quay.io/stefanprodan/podinfo:1.7.2
|
||||
```
|
||||
|
||||
Exec into the load tester pod with:
|
||||
|
||||
```bash
|
||||
kubectl -n test exec -it flagger-loadtester-xx-xx sh
|
||||
```
|
||||
|
||||
Generate HTTP 500 errors:
|
||||
|
||||
```bash
|
||||
watch -n 1 curl http://podinfo-canary.test:9898/status/500
|
||||
```
|
||||
|
||||
Generate latency:
|
||||
|
||||
```bash
|
||||
watch -n 1 curl http://podinfo-canary.test:9898/delay/1
|
||||
```
|
||||
|
||||
When the number of failed checks reaches the canary analysis threshold, the traffic is routed back to the primary,
|
||||
the canary is scaled to zero and the rollout is marked as failed.
|
||||
|
||||
```text
|
||||
kubectl -n test describe canary/podinfo
|
||||
|
||||
Status:
|
||||
Canary Weight: 0
|
||||
Failed Checks: 10
|
||||
Phase: Failed
|
||||
Events:
|
||||
Starting canary analysis for podinfo.test
|
||||
Pre-rollout check acceptance-test passed
|
||||
Advance podinfo.test canary weight 5
|
||||
Advance podinfo.test canary weight 10
|
||||
Advance podinfo.test canary weight 15
|
||||
Halt podinfo.test advancement success rate 69.17% < 99%
|
||||
Halt podinfo.test advancement success rate 61.39% < 99%
|
||||
Halt podinfo.test advancement success rate 55.06% < 99%
|
||||
Halt podinfo.test advancement request duration 1.20s > 0.5s
|
||||
Halt podinfo.test advancement request duration 1.45s > 0.5s
|
||||
Rolling back podinfo.test failed checks threshold reached 5
|
||||
Canary failed! Scaling down podinfo.test
|
||||
```
|
||||
|
||||
### Custom metrics
|
||||
|
||||
The canary analysis can be extended with Prometheus queries.
|
||||
|
||||
Let's a define a check for not found errors. Edit the canary analysis and add the following metric:
|
||||
|
||||
```yaml
|
||||
canaryAnalysis:
|
||||
metrics:
|
||||
- name: "404s percentage"
|
||||
threshold: 3
|
||||
query: |
|
||||
100 - sum(
|
||||
rate(
|
||||
response_total{
|
||||
namespace="test",
|
||||
deployment="podinfo",
|
||||
status_code!="404",
|
||||
direction="inbound"
|
||||
}[1m]
|
||||
)
|
||||
)
|
||||
/
|
||||
sum(
|
||||
rate(
|
||||
response_total{
|
||||
namespace="test",
|
||||
deployment="podinfo",
|
||||
direction="inbound"
|
||||
}[1m]
|
||||
)
|
||||
)
|
||||
* 100
|
||||
```
|
||||
|
||||
The above configuration validates the canary version by checking if the HTTP 404 req/sec percentage is below
|
||||
three percent of the total traffic. If the 404s rate reaches the 3% threshold, then the analysis is aborted and the
|
||||
canary is marked as failed.
|
||||
|
||||
Trigger a canary deployment by updating the container image:
|
||||
|
||||
```bash
|
||||
kubectl -n test set image deployment/podinfo \
|
||||
podinfod=quay.io/stefanprodan/podinfo:1.7.3
|
||||
```
|
||||
|
||||
Generate 404s:
|
||||
|
||||
```bash
|
||||
watch -n 1 curl http://podinfo-canary:9898/status/404
|
||||
```
|
||||
|
||||
Watch Flagger logs:
|
||||
|
||||
```
|
||||
kubectl -n linkerd logs deployment/flagger -f | jq .msg
|
||||
|
||||
Starting canary deployment for podinfo.test
|
||||
Pre-rollout check acceptance-test passed
|
||||
Advance podinfo.test canary weight 5
|
||||
Halt podinfo.test advancement 404s percentage 6.20 > 3
|
||||
Halt podinfo.test advancement 404s percentage 6.45 > 3
|
||||
Halt podinfo.test advancement 404s percentage 7.22 > 3
|
||||
Halt podinfo.test advancement 404s percentage 6.50 > 3
|
||||
Halt podinfo.test advancement 404s percentage 6.34 > 3
|
||||
Rolling back podinfo.test failed checks threshold reached 5
|
||||
Canary failed! Scaling down podinfo.test
|
||||
```
|
||||
|
||||
If you have Slack configured, Flagger will send a notification with the reason why the canary failed.
|
||||
|
||||
### Linkerd Ingress
|
||||
|
||||
There are two ingress controllers that are compatible with both Flagger and Linkerd: NGINX and Gloo.
|
||||
|
||||
Install NGINX:
|
||||
|
||||
```bash
|
||||
helm upgrade -i nginx-ingress stable/nginx-ingress \
|
||||
--namespace ingress-nginx
|
||||
```
|
||||
|
||||
Create an ingress definition for podinfo that rewrites the incoming header to the internal service name (required by Linkerd):
|
||||
|
||||
```yaml
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: podinfo
|
||||
namespace: test
|
||||
labels:
|
||||
app: podinfo
|
||||
annotations:
|
||||
kubernetes.io/ingress.class: "nginx"
|
||||
nginx.ingress.kubernetes.io/configuration-snippet: |
|
||||
proxy_set_header l5d-dst-override $service_name.$namespace.svc.cluster.local:9898;
|
||||
proxy_hide_header l5d-remote-ip;
|
||||
proxy_hide_header l5d-server-id;
|
||||
spec:
|
||||
rules:
|
||||
- host: app.example.com
|
||||
http:
|
||||
paths:
|
||||
- backend:
|
||||
serviceName: podinfo
|
||||
servicePort: 9898
|
||||
```
|
||||
|
||||
When using an ingress controller, the Linkerd traffic split does not apply to incoming traffic since NGINX in running outside of
|
||||
the mesh. In order to run a canary analysis for a frontend app, Flagger creates a shadow ingress and sets the NGINX specific annotations.
|
||||
|
||||
### A/B Testing
|
||||
|
||||
Besides weighted routing, Flagger can be configured to route traffic to the canary based on HTTP match conditions.
|
||||
In an A/B testing scenario, you'll be using HTTP headers or cookies to target a certain segment of your users.
|
||||
This is particularly useful for frontend applications that require session affinity.
|
||||
|
||||

|
||||
|
||||
Edit podinfo canary analysis, set the provider to `nginx`, add the ingress reference, remove the max/step weight and add the match conditions and iterations:
|
||||
|
||||
```yaml
|
||||
apiVersion: flagger.app/v1alpha3
|
||||
kind: Canary
|
||||
metadata:
|
||||
name: podinfo
|
||||
namespace: test
|
||||
spec:
|
||||
# ingress reference
|
||||
provider: nginx
|
||||
ingressRef:
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Ingress
|
||||
name: podinfo
|
||||
targetRef:
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
name: podinfo
|
||||
autoscalerRef:
|
||||
apiVersion: autoscaling/v2beta1
|
||||
kind: HorizontalPodAutoscaler
|
||||
name: podinfo
|
||||
service:
|
||||
# container port
|
||||
port: 9898
|
||||
canaryAnalysis:
|
||||
interval: 1m
|
||||
threshold: 10
|
||||
iterations: 10
|
||||
match:
|
||||
# curl -H 'X-Canary: always' http://app.example.com
|
||||
- headers:
|
||||
x-canary:
|
||||
exact: "always"
|
||||
# curl -b 'canary=always' http://app.example.com
|
||||
- headers:
|
||||
cookie:
|
||||
exact: "canary"
|
||||
# Linkerd Prometheus checks
|
||||
metrics:
|
||||
- name: request-success-rate
|
||||
threshold: 99
|
||||
interval: 1m
|
||||
- name: request-duration
|
||||
threshold: 500
|
||||
interval: 30s
|
||||
webhooks:
|
||||
- name: acceptance-test
|
||||
type: pre-rollout
|
||||
url: http://flagger-loadtester.test/
|
||||
timeout: 30s
|
||||
metadata:
|
||||
type: bash
|
||||
cmd: "curl -sd 'test' http://podinfo-canary:9898/token | grep token"
|
||||
- name: load-test
|
||||
type: rollout
|
||||
url: http://flagger-loadtester.test/
|
||||
metadata:
|
||||
cmd: "hey -z 2m -q 10 -c 2 -H 'Cookie: canary=always' http://app.example.com"
|
||||
```
|
||||
|
||||
The above configuration will run an analysis for ten minutes targeting users that have a `canary` cookie set to `always` or
|
||||
those that call the service using the `X-Canary: always` header.
|
||||
|
||||
**Note** that the load test now targets the external address and uses the canary cookie.
|
||||
|
||||
Trigger a canary deployment by updating the container image:
|
||||
|
||||
```bash
|
||||
kubectl -n test set image deployment/podinfo \
|
||||
podinfod=quay.io/stefanprodan/podinfo:1.7.4
|
||||
```
|
||||
|
||||
Flagger detects that the deployment revision changed and starts the A/B testing:
|
||||
|
||||
```text
|
||||
kubectl -n test describe canary/podinfo
|
||||
|
||||
Events:
|
||||
Starting canary deployment for podinfo.test
|
||||
Pre-rollout check acceptance-test passed
|
||||
Advance podinfo.test canary iteration 1/10
|
||||
Advance podinfo.test canary iteration 2/10
|
||||
Advance podinfo.test canary iteration 3/10
|
||||
Advance podinfo.test canary iteration 4/10
|
||||
Advance podinfo.test canary iteration 5/10
|
||||
Advance podinfo.test canary iteration 6/10
|
||||
Advance podinfo.test canary iteration 7/10
|
||||
Advance podinfo.test canary iteration 8/10
|
||||
Advance podinfo.test canary iteration 9/10
|
||||
Advance podinfo.test canary iteration 10/10
|
||||
Copying podinfo.test template spec to podinfo-primary.test
|
||||
Waiting for podinfo-primary.test rollout to finish: 1 of 2 updated replicas are available
|
||||
Promotion completed! Scaling down podinfo.test
|
||||
```
|
||||
@@ -189,7 +189,7 @@ Trigger a canary deployment by updating the container image:
|
||||
|
||||
```bash
|
||||
kubectl -n test set image deployment/podinfo \
|
||||
podinfod=quay.io/stefanprodan/podinfo:1.4.1
|
||||
podinfod=quay.io/stefanprodan/podinfo:1.7.1
|
||||
```
|
||||
|
||||
Flagger detects that the deployment revision changed and starts a new rollout:
|
||||
|
||||
@@ -54,7 +54,6 @@ spec:
|
||||
# Istio gateways (optional)
|
||||
gateways:
|
||||
- public-gateway.istio-system.svc.cluster.local
|
||||
- mesh
|
||||
# Istio virtual service host names (optional)
|
||||
hosts:
|
||||
- app.example.com
|
||||
@@ -86,7 +85,7 @@ spec:
|
||||
url: http://flagger-loadtester.test/
|
||||
timeout: 5s
|
||||
metadata:
|
||||
cmd: "hey -z 1m -q 10 -c 2 http://podinfo.test:9898/"
|
||||
cmd: "hey -z 1m -q 10 -c 2 http://podinfo-canary.test:9898/"
|
||||
```
|
||||
|
||||
Save the above resource as podinfo-canary.yaml and then apply it:
|
||||
@@ -109,6 +108,8 @@ horizontalpodautoscaler.autoscaling/podinfo-primary
|
||||
service/podinfo
|
||||
service/podinfo-canary
|
||||
service/podinfo-primary
|
||||
destinationrule.networking.istio.io/podinfo-canary
|
||||
destinationrule.networking.istio.io/podinfo-primary
|
||||
virtualservice.networking.istio.io/podinfo
|
||||
```
|
||||
|
||||
@@ -118,7 +119,7 @@ Trigger a canary deployment by updating the container image:
|
||||
|
||||
```bash
|
||||
kubectl -n test set image deployment/podinfo \
|
||||
podinfod=quay.io/stefanprodan/podinfo:1.4.1
|
||||
podinfod=quay.io/stefanprodan/podinfo:1.7.1
|
||||
```
|
||||
|
||||
Flagger detects that the deployment revision changed and starts a new rollout:
|
||||
|
||||
BIN
docs/screens/flagger-ms-teams-failed.png
Normal file
|
After Width: | Height: | Size: 22 KiB |
BIN
docs/screens/flagger-ms-teams-notifications.png
Normal file
|
After Width: | Height: | Size: 30 KiB |
72
go.mod
Normal file
@@ -0,0 +1,72 @@
|
||||
module github.com/weaveworks/flagger
|
||||
|
||||
go 1.12
|
||||
|
||||
require (
|
||||
cloud.google.com/go v0.37.4 // indirect
|
||||
github.com/Masterminds/semver v1.4.2
|
||||
github.com/beorn7/perks v1.0.0 // indirect
|
||||
github.com/bxcodec/faker v2.0.1+incompatible // indirect
|
||||
github.com/envoyproxy/go-control-plane v0.8.0 // indirect
|
||||
github.com/gogo/googleapis v1.2.0 // indirect
|
||||
github.com/gogo/protobuf v1.2.1
|
||||
github.com/golang/protobuf v1.3.1 // indirect
|
||||
github.com/golang/snappy v0.0.1 // indirect
|
||||
github.com/google/btree v1.0.0 // indirect
|
||||
github.com/google/go-cmp v0.3.0
|
||||
github.com/hashicorp/consul v1.4.4 // indirect
|
||||
github.com/hashicorp/go-cleanhttp v0.5.1 // indirect
|
||||
github.com/hashicorp/go-retryablehttp v0.5.3 // indirect
|
||||
github.com/hashicorp/go-rootcerts v1.0.0 // indirect
|
||||
github.com/hashicorp/go-sockaddr v1.0.2 // indirect
|
||||
github.com/hashicorp/hcl v1.0.0 // indirect
|
||||
github.com/hashicorp/serf v0.8.3 // indirect
|
||||
github.com/hashicorp/vault v1.1.0 // indirect
|
||||
github.com/imdario/mergo v0.3.7 // indirect
|
||||
github.com/k0kubun/pp v3.0.1+incompatible // indirect
|
||||
github.com/linkerd/linkerd2 v0.0.0-20190221030352-5e47cb150a33 // indirect
|
||||
github.com/lyft/protoc-gen-validate v0.0.14 // indirect
|
||||
github.com/mattn/go-colorable v0.1.1 // indirect
|
||||
github.com/mattn/go-isatty v0.0.7 // indirect
|
||||
github.com/mitchellh/go-homedir v1.1.0 // indirect
|
||||
github.com/mitchellh/go-testing-interface v1.0.0 // indirect
|
||||
github.com/mitchellh/hashstructure v1.0.0
|
||||
github.com/pkg/errors v0.8.1
|
||||
github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829
|
||||
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90 // indirect
|
||||
github.com/prometheus/common v0.3.0 // indirect
|
||||
github.com/prometheus/procfs v0.0.0-20190416084830-8368d24ba045 // indirect
|
||||
github.com/radovskyb/watcher v1.0.6 // indirect
|
||||
github.com/ryanuber/go-glob v1.0.0 // indirect
|
||||
github.com/solo-io/gloo v0.13.17
|
||||
github.com/solo-io/go-utils v0.7.11 // indirect
|
||||
github.com/solo-io/solo-kit v0.6.3
|
||||
github.com/solo-io/supergloo v0.3.11
|
||||
go.opencensus.io v0.20.2 // indirect
|
||||
go.uber.org/zap v1.9.1
|
||||
golang.org/x/crypto v0.0.0-20190418161225-b43e412143f9 // indirect
|
||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 // indirect
|
||||
gopkg.in/h2non/gock.v1 v1.0.14
|
||||
k8s.io/api v0.0.0-20190620073856-dcce3486da33
|
||||
k8s.io/apiextensions-apiserver v0.0.0-20190315093550-53c4693659ed // indirect
|
||||
k8s.io/apimachinery v0.0.0-20190620073744-d16981aedf33
|
||||
k8s.io/client-go v11.0.0+incompatible
|
||||
k8s.io/code-generator v0.0.0-20190620073620-d55040311883
|
||||
k8s.io/kube-openapi v0.0.0-20190418160015-6b3d3b2d5666 // indirect
|
||||
)
|
||||
|
||||
replace (
|
||||
github.com/google/uuid => github.com/google/uuid v1.0.0
|
||||
golang.org/x/crypto => golang.org/x/crypto v0.0.0-20181025213731-e84da0312774
|
||||
golang.org/x/net => golang.org/x/net v0.0.0-20190206173232-65e2d4e15006
|
||||
golang.org/x/sync => golang.org/x/sync v0.0.0-20181108010431-42b317875d0f
|
||||
golang.org/x/sys => golang.org/x/sys v0.0.0-20190209173611-3b5209105503
|
||||
golang.org/x/tools => golang.org/x/tools v0.0.0-20190313210603-aa82965741a9
|
||||
k8s.io/api => k8s.io/api v0.0.0-20190620073856-dcce3486da33
|
||||
k8s.io/apimachinery => k8s.io/apimachinery v0.0.0-20190620073744-d16981aedf33
|
||||
k8s.io/client-go => k8s.io/client-go v0.0.0-20190620074045-585a16d2e773
|
||||
k8s.io/code-generator => k8s.io/code-generator v0.0.0-20190620073620-d55040311883
|
||||
k8s.io/component-base => k8s.io/component-base v0.0.0-20190620074451-e5083e713460
|
||||
)
|
||||
|
||||
replace k8s.io/klog => github.com/stefanprodan/klog v0.0.0-20190418165334-9cbb78b20423
|
||||
518
go.sum
Normal file
@@ -0,0 +1,518 @@
|
||||
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
cloud.google.com/go v0.37.4 h1:glPeL3BQJsbF6aIIYfZizMwc5LTYz250bDMjttbBGAU=
|
||||
cloud.google.com/go v0.37.4/go.mod h1:NHPJ89PdicEuT9hdPXMROBD91xc5uRDxsMtSB16k7hw=
|
||||
git.apache.org/thrift.git v0.0.0-20180902110319-2566ecd5d999/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg=
|
||||
github.com/Azure/go-autorest v11.1.2+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
|
||||
github.com/MakeNowJust/heredoc v0.0.0-20171113091838-e9091a26100e/go.mod h1:64YHyfSL2R96J44Nlwm39UHepQbyR5q10x7iYa1ks2E=
|
||||
github.com/Masterminds/goutils v1.1.0/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU=
|
||||
github.com/Masterminds/semver v1.4.2 h1:WBLTQ37jOCzSLtXNdoo8bNM8876KhNqOKvrlGITgsTc=
|
||||
github.com/Masterminds/semver v1.4.2/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y=
|
||||
github.com/Masterminds/sprig v2.18.0+incompatible/go.mod h1:y6hNFY5UBTIWBxnzTeuNhlNS5hqE0NB0E6fgfo2Br3o=
|
||||
github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ=
|
||||
github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c=
|
||||
github.com/Netflix/go-expect v0.0.0-20180615182759-c93bf25de8e8/go.mod h1:oX5x61PbNXchhh0oikYAH+4Pcfw5LKv21+Jnpr6r6Pc=
|
||||
github.com/Netflix/go-expect v0.0.0-20180928190340-9d1f4485533b/go.mod h1:oX5x61PbNXchhh0oikYAH+4Pcfw5LKv21+Jnpr6r6Pc=
|
||||
github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
|
||||
github.com/PuerkitoBio/purell v1.1.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
|
||||
github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
|
||||
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
|
||||
github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo=
|
||||
github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI=
|
||||
github.com/airbrake/gobrake v3.6.1+incompatible/go.mod h1:wM4gu3Cn0W0K7GUuVWnlXZU11AGBXMILnrdOU8Kn00o=
|
||||
github.com/alcortesm/tgz v0.0.0-20161220082320-9c5fe88206d7/go.mod h1:6zEj6s6u/ghQa61ZWa/C2Aw3RkjiTBOix7dkqa1VLIs=
|
||||
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||
github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c=
|
||||
github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ=
|
||||
github.com/appscode/jsonpatch v0.0.0-20190108182946-7c0e3b262f30/go.mod h1:4AJxUpXUhv4N+ziTvIcWWXgeorXpxPZOfk9HdEVr96M=
|
||||
github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o=
|
||||
github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da h1:8GUt8eRujhVEGZFFEjBj46YV4rDjvGrNxb0KMWYkL2I=
|
||||
github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY=
|
||||
github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310 h1:BUAU3CGlLvorLI26FmByPp2eC2qla6E1Tw+scpcg/to=
|
||||
github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
|
||||
github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY=
|
||||
github.com/avast/retry-go v2.2.0+incompatible h1:m+w7mVLWa/oKqX2xYqiEKQQkeGH8DDEXB/XnjS54Wyw=
|
||||
github.com/avast/retry-go v2.2.0+incompatible/go.mod h1:XtSnn+n/sHqQIpZ10K1qAevBhOOCWBLXXy3hyiqqBrY=
|
||||
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
||||
github.com/beorn7/perks v1.0.0 h1:HWo1m869IqiPhD389kmkxeTalrjNbbJTC8LXupb+sl0=
|
||||
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
|
||||
github.com/bgentry/speakeasy v0.1.0 h1:ByYyxL9InA1OWqxJqqp2A5pYHUrCiAL6K3J+LKSsQkY=
|
||||
github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
|
||||
github.com/bitly/go-simplejson v0.5.0/go.mod h1:cXHtHw4XUPsvGaxgjIAn8PhEWG9NfngEKAMDJEczWVA=
|
||||
github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4=
|
||||
github.com/bugsnag/bugsnag-go v1.4.0/go.mod h1:2oa8nejYd4cQ/b0hMIopN0lCRxU0bueqREvZLWFrtK8=
|
||||
github.com/bugsnag/panicwrap v1.2.0/go.mod h1:D/8v3kj0zr8ZAKg1AQ6crr+5VwKN5eIywRkfhyM/+dE=
|
||||
github.com/bxcodec/faker v2.0.1+incompatible h1:P0KUpUw5w6WJXwrPfv35oc91i4d8nf40Nwln+M/+faA=
|
||||
github.com/bxcodec/faker v2.0.1+incompatible/go.mod h1:BNzfpVdTwnFJ6GtfYTcQu6l6rHShT+veBxNCnjCx5XM=
|
||||
github.com/certifi/gocertifi v0.0.0-20190105021004-abcd57078448/go.mod h1:GJKEexRPVJrBSOjoqN5VNOIKJ5Q3RViH6eu3puDRwx4=
|
||||
github.com/chai2010/gettext-go v0.0.0-20170215093142-bf70f2a70fb1/go.mod h1:/iP1qXHoty45bqomnu2LM+VVyAEdWN+vtSHGlQgyxbw=
|
||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||
github.com/coreos/etcd v3.3.12+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
|
||||
github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
|
||||
github.com/cpuguy83/go-md2man v1.0.8/go.mod h1:N6JayAiVKtlHSnuTCeuLSQVs75hb8q+dYQLjr7cDsKY=
|
||||
github.com/cyphar/filepath-securejoin v0.2.2/go.mod h1:FpkQEhXnPnOthhzymB7CGsFk2G9VLXONKD9G7QGMM+4=
|
||||
github.com/davecgh/go-spew v0.0.0-20151105211317-5215b55f46b2/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/dgrijalva/jwt-go v0.0.0-20160705203006-01aeca54ebda/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
|
||||
github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
|
||||
github.com/docker/docker v1.13.1/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM=
|
||||
github.com/docker/spdystream v0.0.0-20181023171402-6480d4af844c/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM=
|
||||
github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs=
|
||||
github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU=
|
||||
github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I=
|
||||
github.com/elazarl/go-bindata-assetfs v1.0.0/go.mod h1:v+YaWX3bdea5J/mo8dSETolEo7R71Vk1u8bnjau5yw4=
|
||||
github.com/elazarl/goproxy v0.0.0-20170405201442-c4fc26588b6e/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc=
|
||||
github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
|
||||
github.com/emicklei/go-restful v2.9.3+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
|
||||
github.com/emicklei/go-restful-swagger12 v0.0.0-20170926063155-7524189396c6/go.mod h1:qr0VowGBT4CS4Q8vFF8BSeKz34PuqKGxs/L0IAQA9DQ=
|
||||
github.com/emirpasic/gods v1.9.0/go.mod h1:YfzfFFoVP/catgzJb4IKIqXjX78Ha8FMSDh3ymbK86o=
|
||||
github.com/envoyproxy/go-control-plane v0.8.0 h1:uE6Fp4fOcAJdc1wTQXLJ+SYistkbG1dNoi6Zs1+Ybvk=
|
||||
github.com/envoyproxy/go-control-plane v0.8.0/go.mod h1:GSSbY9P1neVhdY7G4wu+IK1rk/dqhiCC/4ExuWJZVuk=
|
||||
github.com/envoyproxy/protoc-gen-validate v0.0.14 h1:YBW6/cKy9prEGRYLnaGa4IDhzxZhRCtKsax8srGKDnM=
|
||||
github.com/envoyproxy/protoc-gen-validate v0.0.14/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
|
||||
github.com/evanphx/json-patch v4.0.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
|
||||
github.com/evanphx/json-patch v4.1.0+incompatible h1:K1MDoo4AZ4wU0GIU/fPmtZg7VpzLjCxu+UwBD1FvwOc=
|
||||
github.com/evanphx/json-patch v4.1.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
|
||||
github.com/evanphx/json-patch v4.2.0+incompatible h1:fUDGZCv/7iAN7u0puUVhvKCcsR6vRfwrJatElLBEf0I=
|
||||
github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
|
||||
github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d/go.mod h1:ZZMPRZwes7CROmyNKgQzC3XPs6L/G2EJLHddWejkmf4=
|
||||
github.com/fatih/color v1.7.0 h1:DkWD4oS2D8LGGgTQ6IvwJJXSL5Vp2ffcQg58nFV38Ys=
|
||||
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
|
||||
github.com/fgrosse/zaptest v1.1.0 h1:sK9hP0/xBoNX5qfFo3KWFluDXfc809APomI1QXuYELA=
|
||||
github.com/fgrosse/zaptest v1.1.0/go.mod h1:vMnRSul6kW7kIUXZgnZZcDwyTn8k49ODfAULL8nmL5w=
|
||||
github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc=
|
||||
github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=
|
||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||
github.com/getsentry/raven-go v0.2.0/go.mod h1:KungGk8q33+aIAZUIVWZDr2OfAEBsO49PX4NzFV5kcQ=
|
||||
github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
||||
github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk=
|
||||
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
||||
github.com/gliderlabs/ssh v0.1.1/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0=
|
||||
github.com/globalsign/mgo v0.0.0-20180905125535-1ca0a4f7cbcb/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q=
|
||||
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
|
||||
github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas=
|
||||
github.com/go-logr/zapr v0.1.1/go.mod h1:tabnROwaDl0UNxkVeFRbY8bwB37GwRv0P8lg6aAiEnk=
|
||||
github.com/go-openapi/analysis v0.0.0-20180825180245-b006789cd277/go.mod h1:k70tL6pCuVxPJOHXQ+wIac1FUrvNkHolPie/cLEU6hI=
|
||||
github.com/go-openapi/analysis v0.17.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik=
|
||||
github.com/go-openapi/errors v0.17.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0=
|
||||
github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0=
|
||||
github.com/go-openapi/jsonpointer v0.17.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M=
|
||||
github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg=
|
||||
github.com/go-openapi/jsonreference v0.17.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I=
|
||||
github.com/go-openapi/loads v0.17.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU=
|
||||
github.com/go-openapi/runtime v0.0.0-20180920151709-4f900dc2ade9/go.mod h1:6v9a6LTXWQCdL8k1AO3cvqx5OtZY/Y9wKTgaoP6YRfA=
|
||||
github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc=
|
||||
github.com/go-openapi/spec v0.17.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI=
|
||||
github.com/go-openapi/spec v0.19.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI=
|
||||
github.com/go-openapi/strfmt v0.17.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU=
|
||||
github.com/go-openapi/strfmt v0.19.0/go.mod h1:+uW+93UVvGGq2qGaZxdDeJqSAqBqBdl+ZPMF/cC8nDY=
|
||||
github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I=
|
||||
github.com/go-openapi/swag v0.17.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg=
|
||||
github.com/go-openapi/validate v0.19.0/go.mod h1:Uh4HdOzKt19xGIGm1qHf/ofbX1YQ4Y+MYsct2VUrAJ4=
|
||||
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
|
||||
github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8=
|
||||
github.com/gofrs/uuid v3.2.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM=
|
||||
github.com/gogo/googleapis v1.1.0/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s=
|
||||
github.com/gogo/googleapis v1.2.0 h1:Z0v3OJDotX9ZBpdz2V+AI7F4fITSZhVE5mg6GQppwMM=
|
||||
github.com/gogo/googleapis v1.2.0/go.mod h1:Njal3psf3qN6dwBtQfUmBZh2ybovJ0tlu3o/AC7HYjU=
|
||||
github.com/gogo/protobuf v0.0.0-20171007142547-342cbe0a0415/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
||||
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
||||
github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
||||
github.com/gogo/protobuf v1.2.1 h1:/s5zKNz0uPFCZ5hddgPdo2TK2TVrUNMn0OOX8/aZMTE=
|
||||
github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58=
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||
github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef h1:veQD95Isof8w9/WXiA+pa3tz3fJXkt5B7QaRBrM62gk=
|
||||
github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:tluoj9z5200jBnyusfRPU2LqT6J+DAorxEvtC7LHB+E=
|
||||
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||
github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||
github.com/golang/protobuf v0.0.0-20161109072736-4bd1920723d7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.1 h1:YF8+flBXS5eO826T4nzqPrxfhQThhXl0YzfuUPu4SBg=
|
||||
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||
github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4=
|
||||
github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||
github.com/google/btree v0.0.0-20160524151835-7d79101e329e/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
||||
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
||||
github.com/google/btree v1.0.0 h1:0udJVsspx3VBr5FwtLhQQtuAsVc79tTq0ocGIPAU6qo=
|
||||
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
||||
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
||||
github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY=
|
||||
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ=
|
||||
github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck=
|
||||
github.com/google/gofuzz v0.0.0-20161122191042-44d81051d367/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI=
|
||||
github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI=
|
||||
github.com/google/gofuzz v1.0.0 h1:A8PeW59pxE9IoFRqBp37U+mSNaQoZ46F1f0f863XSXw=
|
||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
|
||||
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
|
||||
github.com/google/uuid v1.0.0 h1:b4Gk+7WdP/d3HZH8EJsZpvV7EtDOgaZLtnaNGIu1adA=
|
||||
github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
|
||||
github.com/googleapis/gnostic v0.0.0-20170426233943-68f4ded48ba9/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY=
|
||||
github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY=
|
||||
github.com/googleapis/gnostic v0.2.0 h1:l6N3VoaVzTncYYW+9yOz2LJJammFZGBO13sqgEhpy9g=
|
||||
github.com/googleapis/gnostic v0.2.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY=
|
||||
github.com/goph/emperror v0.17.1/go.mod h1:+ZbQ+fUNO/6FNiUo0ujtMjhgad9Xa6fQL9KhH4LNHic=
|
||||
github.com/gophercloud/gophercloud v0.0.0-20190126172459-c818fa66e4c8/go.mod h1:3WdhXV3rUYy9p6AUW8d94kr+HS62Y4VL9mBnFxsD8q4=
|
||||
github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg=
|
||||
github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
|
||||
github.com/gregjones/httpcache v0.0.0-20170728041850-787624de3eb7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
|
||||
github.com/gregjones/httpcache v0.0.0-20181110185634-c63ab54fda8f h1:ShTPMJQes6tubcjzGMODIVG5hlrCeImaBnZzKF2N8SM=
|
||||
github.com/gregjones/httpcache v0.0.0-20181110185634-c63ab54fda8f/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
|
||||
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.5.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw=
|
||||
github.com/h2non/parth v0.0.0-20190131123155-b4df798d6542 h1:2VTzZjLZBgl62/EtslCrtky5vbi9dd7HrQPQIx6wqiw=
|
||||
github.com/h2non/parth v0.0.0-20190131123155-b4df798d6542/go.mod h1:Ow0tF8D4Kplbc8s8sSb3V2oUCygFHVp8gC3Dn6U4MNI=
|
||||
github.com/hashicorp/consul v1.4.4 h1:DR1+5EGgnPsd/LIsK3c9RDvajcsV5GOkGQBSNd3dpn8=
|
||||
github.com/hashicorp/consul v1.4.4/go.mod h1:mFrjN1mfidgJfYP1xrJCF+AfRhr6Eaqhb2+sfyn/OOI=
|
||||
github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA=
|
||||
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
|
||||
github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
|
||||
github.com/hashicorp/go-cleanhttp v0.5.1 h1:dH3aiDG9Jvb5r5+bYHsikaOUIpcM0xvgMXVoDkXMzJM=
|
||||
github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
|
||||
github.com/hashicorp/go-immutable-radix v1.0.0 h1:AKDB1HM5PWEA7i4nhcpwOrO2byshxBjXVn/J/3+z5/0=
|
||||
github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
|
||||
github.com/hashicorp/go-msgpack v0.5.3 h1:zKjpN5BK/P5lMYrLmBHdBULWbJ0XpYR+7NGzqkZzoD4=
|
||||
github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM=
|
||||
github.com/hashicorp/go-multierror v1.0.0 h1:iVjPR7a6H0tWELX5NxNe7bYopibicUzc7uPribsnS6o=
|
||||
github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk=
|
||||
github.com/hashicorp/go-retryablehttp v0.5.3 h1:QlWt0KvWT0lq8MFppF9tsJGF+ynG7ztc2KIPhzRGk7s=
|
||||
github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs=
|
||||
github.com/hashicorp/go-rootcerts v1.0.0 h1:Rqb66Oo1X/eSV1x66xbDccZjhJigjg0+e82kpwzSwCI=
|
||||
github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU=
|
||||
github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU=
|
||||
github.com/hashicorp/go-sockaddr v1.0.2 h1:ztczhD1jLxIRjVejw8gFomI1BQZOe2WoVOu0SyteCQc=
|
||||
github.com/hashicorp/go-sockaddr v1.0.2/go.mod h1:rB4wwRAUzs07qva3c5SdrY/NEtAUjGlgmH/UkBUC97A=
|
||||
github.com/hashicorp/go-syslog v1.0.0 h1:KaodqZuhUoZereWVIYmpUgZysurB1kBLX2j0MwMrUAE=
|
||||
github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4=
|
||||
github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
|
||||
github.com/hashicorp/go-uuid v1.0.1 h1:fv1ep09latC32wFoVwnqcnKJGnMSdBanPczbHAYm1BE=
|
||||
github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
|
||||
github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90=
|
||||
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
||||
github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU=
|
||||
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
||||
github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4=
|
||||
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
|
||||
github.com/hashicorp/logutils v1.0.0 h1:dLEQVugN8vlakKOUE3ihGLTZJRB4j+M2cdTm/ORI65Y=
|
||||
github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64=
|
||||
github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ=
|
||||
github.com/hashicorp/memberlist v0.1.3 h1:EmmoJme1matNzb+hMpDuR/0sbJSUisxyqBGG676r31M=
|
||||
github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I=
|
||||
github.com/hashicorp/serf v0.8.3 h1:MWYcmct5EtKz0efYooPcL0yNkem+7kWxqXDi/UIh+8k=
|
||||
github.com/hashicorp/serf v0.8.3/go.mod h1:UpNcs7fFbpKIyZaUuSW6EPiH+eZC7OuyFD+wc1oal+k=
|
||||
github.com/hashicorp/vault v1.1.0 h1:v79NUgO5xCZnXVzUkIqFOXtP8YhpnHAi1fk3eo9cuOE=
|
||||
github.com/hashicorp/vault v1.1.0/go.mod h1:KfSyffbKxoVyspOdlaGVjIuwLobi07qD1bAbosPMpP0=
|
||||
github.com/hinshun/vt10x v0.0.0-20180616224451-1954e6464174/go.mod h1:DqJ97dSdRW1W22yXSB90986pcOyQ7r45iio1KN2ez1A=
|
||||
github.com/hinshun/vt10x v0.0.0-20180809195222-d55458df857c/go.mod h1:DqJ97dSdRW1W22yXSB90986pcOyQ7r45iio1KN2ez1A=
|
||||
github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI=
|
||||
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
||||
github.com/huandu/xstrings v1.2.0/go.mod h1:DvyZB1rfVYsBIigL8HwpZgxHwXozlTgGqn63UyNX5k4=
|
||||
github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
|
||||
github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
|
||||
github.com/imdario/mergo v0.3.7 h1:Y+UAYTZ7gDEuOfhxKWy+dvb5dRQ6rJjFSdX2HZY1/gI=
|
||||
github.com/imdario/mergo v0.3.7/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
|
||||
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
|
||||
github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99/go.mod h1:1lJo3i6rXxKeerYnT8Nvf0QmHCRC1n8sfWVwXF2Frvo=
|
||||
github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
|
||||
github.com/json-iterator/go v0.0.0-20180612202835-f2b4162afba3/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
|
||||
github.com/json-iterator/go v1.1.5/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
|
||||
github.com/json-iterator/go v1.1.6 h1:MrUvLMLTMxbqFJ9kzlvat/rYZqZnW3u4wkLzWTaFwKs=
|
||||
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
|
||||
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
|
||||
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
|
||||
github.com/k0kubun/colorstring v0.0.0-20150214042306-9440f1994b88 h1:uC1QfSlInpQF+M0ao65imhwqKnz3Q2z/d8PWZRMQvDM=
|
||||
github.com/k0kubun/colorstring v0.0.0-20150214042306-9440f1994b88/go.mod h1:3w7q1U84EfirKl04SVQ/s7nPm1ZPhiXd34z40TNz36k=
|
||||
github.com/k0kubun/pp v2.3.0+incompatible/go.mod h1:GWse8YhT0p8pT4ir3ZgBbfZild3tgzSScAn6HmfYukg=
|
||||
github.com/k0kubun/pp v3.0.1+incompatible h1:3tqvf7QgUnZ5tXO6pNAZlrvHgl6DvifjDrd9g2S9Z40=
|
||||
github.com/k0kubun/pp v3.0.1+incompatible/go.mod h1:GWse8YhT0p8pT4ir3ZgBbfZild3tgzSScAn6HmfYukg=
|
||||
github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0/go.mod h1:1NbS8ALrpOvjt0rHPNLyCIeMtbizbir8U//inJ+zuB8=
|
||||
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8=
|
||||
github.com/kevinburke/ssh_config v0.0.0-20180830205328-81db2a75821e/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM=
|
||||
github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
|
||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
|
||||
github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
|
||||
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||
github.com/kr/pty v1.1.3/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
|
||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/linkerd/linkerd2 v0.0.0-20190221030352-5e47cb150a33 h1:+eM/rkJK2iCSi0fDzp218TzJSAglSGeI985YYgRS/mY=
|
||||
github.com/linkerd/linkerd2 v0.0.0-20190221030352-5e47cb150a33/go.mod h1:n9QnL65Uv2gAG97S0t1q2aYmP33wPQ3oAh0+DJhQSSw=
|
||||
github.com/lyft/protoc-gen-validate v0.0.14 h1:xbdDVIHd0Xq5Bfzu+8JR9s7mFmJPMvNLmfGhgcHJdFU=
|
||||
github.com/lyft/protoc-gen-validate v0.0.14/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ=
|
||||
github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
||||
github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
||||
github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
|
||||
github.com/mattn/go-colorable v0.1.1 h1:G1f5SKeVxmagw/IyvzvtZE4Gybcc4Tr1tf7I8z0XgOg=
|
||||
github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ=
|
||||
github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
|
||||
github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
|
||||
github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
|
||||
github.com/mattn/go-isatty v0.0.7 h1:UvyT9uN+3r7yLEYSlJsbQGdsaB/a0DlgWP3pql6iwOc=
|
||||
github.com/mattn/go-isatty v0.0.7/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
||||
github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE=
|
||||
github.com/miekg/dns v1.0.14 h1:9jZdLNd/P4+SfEJ0TNyxYpsK8N4GtfylBLqtbYN1sbA=
|
||||
github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
|
||||
github.com/mitchellh/cli v1.0.0 h1:iGBIsUe3+HZ/AD/Vd7DErOt5sU9fa8Uj7A2s1aggv1Y=
|
||||
github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc=
|
||||
github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
|
||||
github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
|
||||
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
|
||||
github.com/mitchellh/go-testing-interface v1.0.0 h1:fzU/JVNcaqHQEcVFAKeR41fkiLdIPrefOvVG1VZ96U0=
|
||||
github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI=
|
||||
github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo=
|
||||
github.com/mitchellh/hashstructure v1.0.0 h1:ZkRJX1CyOoTkar7p/mLS5TZU4nJ1Rn/F8u9dGS02Q3Y=
|
||||
github.com/mitchellh/hashstructure v1.0.0/go.mod h1:QjSHrPWS+BGUVBYkbTZWEnOh3G1DutKwClXU/ABz6AQ=
|
||||
github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
|
||||
github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE=
|
||||
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/reflect2 v0.0.0-20180320133207-05fbef0ca5da/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||
github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI=
|
||||
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||
github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
|
||||
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
|
||||
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw=
|
||||
github.com/nbio/st v0.0.0-20140626010706-e9e8d9816f32 h1:W6apQkHrMkS0Muv8G/TipAy/FJl/rCYT0+EuS8+Z0z4=
|
||||
github.com/nbio/st v0.0.0-20140626010706-e9e8d9816f32/go.mod h1:9wM+0iRr9ahx58uYLpLIr5fm8diHn0JbqRycJi6w0Ms=
|
||||
github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/ginkgo v1.8.0 h1:VkHVNpR4iVnU8XQR6DBm8BqYjN7CRzw+xKUbVVbbW9w=
|
||||
github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
|
||||
github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
|
||||
github.com/onsi/gomega v1.5.0 h1:izbySO9zDPmjJ8rDjLvkA2zJHIo+HkYXHnf7eN7SSyo=
|
||||
github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
|
||||
github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
|
||||
github.com/openzipkin/zipkin-go v0.1.1/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTmOf0Erfk+hxe8=
|
||||
github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw=
|
||||
github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c h1:Lgl0gzECD8GnQ5QCWA8o6BtfL6mDH5rQgM4/fX3avOs=
|
||||
github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
|
||||
github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k=
|
||||
github.com/pelletier/go-buffruneio v0.2.0/go.mod h1:JkE26KsDizTr40EUHkXVtNPvgGtbSNq5BcowyYOWdKo=
|
||||
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
|
||||
github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI=
|
||||
github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU=
|
||||
github.com/pierrec/lz4 v2.0.5+incompatible h1:2xWsjqPFWcplujydGg4WmhC/6fZqK42wMM8aXeqhl0I=
|
||||
github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
|
||||
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I=
|
||||
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pmezard/go-difflib v0.0.0-20151028094244-d8ed2627bdf0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/posener/complete v1.1.1 h1:ccV59UEOTzVDnDUEFdT95ZzHVZ+5+158q8+SJb2QV5w=
|
||||
github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI=
|
||||
github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
|
||||
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
|
||||
github.com/prometheus/client_golang v0.9.2/go.mod h1:OsXs2jCmiKlQ1lTBmv21f2mNfw4xf/QclQDMrYNZzcM=
|
||||
github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829 h1:D+CiwcpGTW6pL6bv6KI3KbyEyCKyS+1JWS2h8PNDnGA=
|
||||
github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs=
|
||||
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||
github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90 h1:S/YWwWx/RA8rT8tKFRuGUZhuA90OyIBpPCXkcbwU8DE=
|
||||
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
|
||||
github.com/prometheus/common v0.0.0-20181126121408-4724e9255275/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
|
||||
github.com/prometheus/common v0.0.0-20190104105734-b1c43a6df3ae/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
||||
github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
||||
github.com/prometheus/common v0.3.0 h1:taZ4h8Tkxv2kNyoSctBvfXEHmBmxrwmIidZTIaHons4=
|
||||
github.com/prometheus/common v0.3.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
||||
github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
github.com/prometheus/procfs v0.0.0-20190104112138-b1a0a9a36d74/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
github.com/prometheus/procfs v0.0.0-20190416084830-8368d24ba045 h1:Raos9GP+3BlCBicScEQ+SjTLpYYac34fZMoeqj9McSM=
|
||||
github.com/prometheus/procfs v0.0.0-20190416084830-8368d24ba045/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
|
||||
github.com/radovskyb/watcher v1.0.6 h1:8WIQ9UxEYMZjem1OwU7dVH94DXXk9mAIE1i8eqHD+IY=
|
||||
github.com/radovskyb/watcher v1.0.6/go.mod h1:78okwvY5wPdzcb1UYnip1pvrZNIVEIh/Cm+ZuvsUYIg=
|
||||
github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
|
||||
github.com/remyoudompheng/bigfft v0.0.0-20170806203942-52369c62f446/go.mod h1:uYEyJGbgTkfkS4+E/PavXkNJcbFIpEtjt2B0KDQ5+9M=
|
||||
github.com/rollbar/rollbar-go v1.0.2/go.mod h1:AcFs5f0I+c71bpHlXNNDbOWJiKwjFDtISeXco0L5PKQ=
|
||||
github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
|
||||
github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
|
||||
github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
|
||||
github.com/ryanuber/go-glob v1.0.0 h1:iQh3xXAumdQ+4Ufa5b25cRpC5TYKlno6hsv6Cb3pkBk=
|
||||
github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc=
|
||||
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I=
|
||||
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
|
||||
github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo=
|
||||
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
|
||||
github.com/solo-io/gloo v0.13.17 h1:rbNmO7e5+0vEq5krkO9/Rcp16PqqvepyGD0j3xPdmhg=
|
||||
github.com/solo-io/gloo v0.13.17/go.mod h1:dNnxchbq5F4ITJhX/0fy5brfbsj6vlW+AwgnTlqZN0A=
|
||||
github.com/solo-io/go-utils v0.7.11 h1:3Kmk50e6nYqyf7MBY1473XkH5L7qO/Nigjx+t6jEOQo=
|
||||
github.com/solo-io/go-utils v0.7.11/go.mod h1:7r+dFKdqJNOjx+odeLFqg8SOwVHyVVG1P0EPt6rNLN8=
|
||||
github.com/solo-io/solo-kit v0.6.3 h1:s/SxcgG7YSjW7wu7iQER5MCHSzeXg1b/lCZRazQ0IMw=
|
||||
github.com/solo-io/solo-kit v0.6.3/go.mod h1:oBaQ6tOwuO97u7w+s3TeI08YLHcbiWemInx0XkDfKFw=
|
||||
github.com/solo-io/supergloo v0.3.11 h1:IwnrL2xojowzb7k+V2wCG3I6WrelzXsezqJiraaVxIM=
|
||||
github.com/solo-io/supergloo v0.3.11/go.mod h1:hJuUwop5IMBL9Qc2/G+f+/PfIWPt/2nGr66fDcuhrn8=
|
||||
github.com/spf13/afero v1.2.1/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk=
|
||||
github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
|
||||
github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
|
||||
github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg=
|
||||
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
|
||||
github.com/src-d/gcfg v1.4.0/go.mod h1:p/UMsR43ujA89BJY9duynAwIpvqEujIH/jFlfL7jWoI=
|
||||
github.com/stefanprodan/klog v0.0.0-20190418165334-9cbb78b20423 h1:qTtUiiNM+iq4IXOwHofKW5+jzvkvnNVz0GFRxwukUlY=
|
||||
github.com/stefanprodan/klog v0.0.0-20190418165334-9cbb78b20423/go.mod h1:TYstY5LQfzxFVm9MiiMg7kZ39sc5cue/6CFoY5KgXn8=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/testify v0.0.0-20151208002404-e3a8ff8ce365/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
github.com/stretchr/testify v1.2.1/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/technosophos/moniker v0.0.0-20180509230615-a5dbd03a2245/go.mod h1:O1c8HleITsZqzNZDjSNzirUGsMT0oGu9LhHKoJrqO+A=
|
||||
github.com/xanzy/ssh-agent v0.2.0/go.mod h1:0NyE30eGUDliuLEHJgYte/zncp2zdTStcOnWhgSqHD8=
|
||||
go.opencensus.io v0.18.0/go.mod h1:vKdFvxhtzZ9onBp9VKHK8z/sRpBMnKAsufL7wlDrCOA=
|
||||
go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk=
|
||||
go.opencensus.io v0.20.2 h1:NAfh7zF0/3/HqtMvJNZ/RFrSlCE6ZTlHmKfhL/Dm1Jk=
|
||||
go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk=
|
||||
go.uber.org/atomic v1.3.2 h1:2Oa65PReHzfn29GpvgsYwloV9AVFHPDk8tYxt2c2tr4=
|
||||
go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
|
||||
go.uber.org/multierr v1.1.0 h1:HoEmRHQPVSqub6w2z2d2EOVs2fjyFRGyofhKuyDq0QI=
|
||||
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
|
||||
go.uber.org/zap v1.9.1 h1:XCJQEf3W6eZaVwhRBof6ImoYGJSITeKWsyeh3HFu/5o=
|
||||
go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
|
||||
golang.org/x/crypto v0.0.0-20181025213731-e84da0312774 h1:a4tQYYYuK9QdeO/+kEvNYyuR21S+7ve5EANok6hABhI=
|
||||
golang.org/x/crypto v0.0.0-20181025213731-e84da0312774/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190312203227-4b39c73a6495 h1:I6A9Ag9FpEKOjcKrRNjQkPHawoXIhKyTGfvvjFAiiAk=
|
||||
golang.org/x/exp v0.0.0-20190312203227-4b39c73a6495/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
|
||||
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
|
||||
golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
|
||||
golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
|
||||
golang.org/x/net v0.0.0-20190206173232-65e2d4e15006 h1:bfLnR+k0tq5Lqt6dflRLcZiz6UaXCMt3vhYJ1l4FQ80=
|
||||
golang.org/x/net v0.0.0-20190206173232-65e2d4e15006/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421 h1:Wo7BWFiOk0QRFMLYMqJGFMd9CgUAcGx7V+qEg/h5IBI=
|
||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20190402181905-9f3314589c9a h1:tImsplftrFpALCYumobsd0K86vlAs/eXGFms2txfJfA=
|
||||
golang.org/x/oauth2 v0.0.0-20190402181905-9f3314589c9a/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f h1:Bl/8QSvNqXvPGPGXa2z5xUTmV7VDcZyvRZ+QQXkXTZQ=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sys v0.0.0-20190209173611-3b5209105503 h1:5SvYFrOM3W8Mexn9/oA44Ji7vhXAZQ9hiP+1Q/DMrWg=
|
||||
golang.org/x/sys v0.0.0-20190209173611-3b5209105503/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||
golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=
|
||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||
golang.org/x/time v0.0.0-20161028155119-f51c12702a4d/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 h1:SvFZT6jyqRaOeXpc5h/JSfZenJ2O330aBsf7JfSUXmQ=
|
||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/tools v0.0.0-20190313210603-aa82965741a9 h1:7Pf/N3ln54fsGsAPsSwSfFhxXGKWHMIRUI/T5x1GP90=
|
||||
golang.org/x/tools v0.0.0-20190313210603-aa82965741a9/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
gonum.org/v1/gonum v0.0.0-20190331200053-3d26580ed485 h1:OB/uP/Puiu5vS5QMRPrXCDWUPb+kt8f1KW8oQzFejQw=
|
||||
gonum.org/v1/gonum v0.0.0-20190331200053-3d26580ed485/go.mod h1:2ltnJ7xHfj0zHS40VVPYEAAMTa3ZGguvHGBSJeRWqE0=
|
||||
gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw=
|
||||
gonum.org/v1/netlib v0.0.0-20190331212654-76723241ea4e h1:jRyg0XfpwWlhEV8mDfdNGBeSJM2fuyh9Yjrnd8kF2Ts=
|
||||
gonum.org/v1/netlib v0.0.0-20190331212654-76723241ea4e/go.mod h1:kS+toOQn6AQKjmKJ7gzohV1XkqsFehRA2FbsbkopSuQ=
|
||||
google.golang.org/api v0.0.0-20180910000450-7ca32eb868bf/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0=
|
||||
google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk=
|
||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||
google.golang.org/appengine v1.4.0 h1:/wp5JvzpHIxhs/dumFmF7BXTf3Z+dd4uXta4kVyO508=
|
||||
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/appengine v1.5.0 h1:KxkO13IPW4Lslp2bz+KHP2E3gtFlrIGNThxkZQ3g+4c=
|
||||
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||
google.golang.org/genproto v0.0.0-20180831171423-11092d34479b/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||
google.golang.org/genproto v0.0.0-20181221175505-bd9b4fb69e2f/go.mod h1:7Ep/1NZk928CDR8SjdVbjWNpdIf6nzjE3BTgJDr2Atg=
|
||||
google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||
google.golang.org/genproto v0.0.0-20190404172233-64821d5d2107 h1:xtNn7qFlagY2mQNFHMSRPjT2RkOV4OXM7P5TVy9xATo=
|
||||
google.golang.org/genproto v0.0.0-20190404172233-64821d5d2107/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||
google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
|
||||
google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio=
|
||||
google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
|
||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
google.golang.org/grpc v1.19.1 h1:TrBcJ1yqAl1G++wO39nD/qtgpsW9/1+QGrluyMGEYgM=
|
||||
google.golang.org/grpc v1.19.1/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
gopkg.in/AlecAivazis/survey.v1 v1.8.2/go.mod h1:iBNOmqKz/NUbZx3bA+4hAGLRC7fSK7tgtVDT4tB22XA=
|
||||
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4=
|
||||
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
|
||||
gopkg.in/h2non/gock.v1 v1.0.14 h1:fTeu9fcUvSnLNacYvYI54h+1/XEteDyHvrVCZEEEYNM=
|
||||
gopkg.in/h2non/gock.v1 v1.0.14/go.mod h1:sX4zAkdYX1TRGJ2JY156cFspQn4yRWn6p9EMdODlynE=
|
||||
gopkg.in/inf.v0 v0.9.0/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
|
||||
gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
|
||||
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
|
||||
gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k=
|
||||
gopkg.in/square/go-jose.v2 v2.3.0/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=
|
||||
gopkg.in/src-d/go-billy.v4 v4.2.1/go.mod h1:tm33zBoOwxjYHZIE+OV8bxTWFMJLrconzFMd38aARFk=
|
||||
gopkg.in/src-d/go-git-fixtures.v3 v3.1.1/go.mod h1:dLBcvytrw/TYZsNTWCnkNF2DSIlzWYqTe3rJR56Ac7g=
|
||||
gopkg.in/src-d/go-git.v4 v4.10.0/go.mod h1:Vtut8izDyrM8BUVQnzJ+YvmNcem2J89EmfZYCkLokZk=
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
|
||||
gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI=
|
||||
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw=
|
||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
istio.io/gogo-genproto v0.0.0-20190124151557-6d926a6e6feb/go.mod h1:eIDJ6jNk/IeJz6ODSksHl5Aiczy5JUq6vFhJWI5OtiI=
|
||||
k8s.io/api v0.0.0-20190620073856-dcce3486da33 h1:aC/EvF9PT1h8NeMEOVwTel8xxbZwq0SZnxXNThEROnE=
|
||||
k8s.io/api v0.0.0-20190620073856-dcce3486da33/go.mod h1:ldk709UQo/iedNLOW7J06V9QSSGY5heETKeWqnPoqF8=
|
||||
k8s.io/apiextensions-apiserver v0.0.0-20190111034747-7d26de67f177+incompatible/go.mod h1:IxkesAMoaCRoLrPJdZNZUQp9NfZnzqaVzLhb2VEQzXE=
|
||||
k8s.io/apiextensions-apiserver v0.0.0-20190315093550-53c4693659ed h1:rCteec//ELIjZMfjIGQbVtZooyaofqDJwsmWwWKItNs=
|
||||
k8s.io/apiextensions-apiserver v0.0.0-20190315093550-53c4693659ed/go.mod h1:IxkesAMoaCRoLrPJdZNZUQp9NfZnzqaVzLhb2VEQzXE=
|
||||
k8s.io/apimachinery v0.0.0-20190620073744-d16981aedf33 h1:Lkd+QNFOB3DqrDyWo796aodJgFJautn/M+t9IGearPc=
|
||||
k8s.io/apimachinery v0.0.0-20190620073744-d16981aedf33/go.mod h1:9q5NW/mMno/nwbRZd/Ks2TECgi2PTZ9cwarf4q+ze6Q=
|
||||
k8s.io/apiserver v0.0.0-20190111033246-d50e9ac5404f+incompatible/go.mod h1:6bqaTSOSJavUIXUtfaR9Os9JtTCm8ZqH2SUl2S60C4w=
|
||||
k8s.io/cli-runtime v0.0.0-20190111035321-c7263d800665+incompatible/go.mod h1:qWnH3/b8sp/l7EvlDh7ulDU3UWA4P4N1NFbEEP791tM=
|
||||
k8s.io/client-go v0.0.0-20190620074045-585a16d2e773 h1:XyjDnwRO9icfyrN7HRSa8o3NqdPOEQoVW8vWizuqyQQ=
|
||||
k8s.io/client-go v0.0.0-20190620074045-585a16d2e773/go.mod h1:miKCC7C/WGwJqcDctyJtAnP3Gss0Y5KwURqJ7q5pfEw=
|
||||
k8s.io/code-generator v0.0.0-20190620073620-d55040311883 h1:NWWNvN6IdpmQvZ43rVccCI8GPUrheK8XNdqeKycw0DI=
|
||||
k8s.io/code-generator v0.0.0-20190620073620-d55040311883/go.mod h1:+a+9g9W0llgbgvx6qOb+VbeZPH5km1FrVyMQe9/jkQY=
|
||||
k8s.io/gengo v0.0.0-20190116091435-f8a0810f38af/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
|
||||
k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6 h1:4s3/R4+OYYYUKptXPhZKjQ04WJ6EhQQVFdjOFvCazDk=
|
||||
k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
|
||||
k8s.io/helm v2.13.0+incompatible/go.mod h1:LZzlS4LQBHfciFOurYBFkCMTaZ0D1l+p0teMg7TSULI=
|
||||
k8s.io/kube-openapi v0.0.0-20190228160746-b3a7cee44a30/go.mod h1:BXM9ceUBTj2QnfH2MK1odQs778ajze1RxcmP6S8RVVc=
|
||||
k8s.io/kube-openapi v0.0.0-20190401085232-94e1e7b7574c/go.mod h1:BXM9ceUBTj2QnfH2MK1odQs778ajze1RxcmP6S8RVVc=
|
||||
k8s.io/kube-openapi v0.0.0-20190418160015-6b3d3b2d5666 h1:hlzz2EvLPcefAcG/j0tOZpds4LWSElZzxpZuhxbblbc=
|
||||
k8s.io/kube-openapi v0.0.0-20190418160015-6b3d3b2d5666/go.mod h1:jqYp7BKXW0Jl+F1dWXBieUmcHKMPpGHGWA0uqfpOZZ4=
|
||||
k8s.io/kubernetes v1.13.2/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk=
|
||||
k8s.io/utils v0.0.0-20190221042446-c2654d5206da/go.mod h1:8k8uAuAQ0rXslZKaEWd0c3oVhZz7sSzSiPnVZayjIX0=
|
||||
k8s.io/utils v0.0.0-20190308190857-21c4ce38f2a7 h1:8r+l4bNWjRlsFYlQJnKJ2p7s1YQPj4XyXiJVqDHRx7c=
|
||||
k8s.io/utils v0.0.0-20190308190857-21c4ce38f2a7/go.mod h1:8k8uAuAQ0rXslZKaEWd0c3oVhZz7sSzSiPnVZayjIX0=
|
||||
modernc.org/cc v1.0.0/go.mod h1:1Sk4//wdnYJiUIxnW8ddKpaOJCF37yAdqYnkxUpaYxw=
|
||||
modernc.org/golex v1.0.0/go.mod h1:b/QX9oBD/LhixY6NDh+IdGv17hgB+51fET1i2kPSmvk=
|
||||
modernc.org/mathutil v1.0.0/go.mod h1:wU0vUrJsVWBZ4P6e7xtFJEhFSNsfRLJ8H458uRjg03k=
|
||||
modernc.org/strutil v1.0.0/go.mod h1:lstksw84oURvj9y3tn8lGvRxyRC1S2+g5uuIzNfIOBs=
|
||||
modernc.org/xc v1.0.0/go.mod h1:mRNCo0bvLjGhHO9WsyuKVU4q0ceiDDDoEeWDJHrNx8I=
|
||||
sigs.k8s.io/controller-runtime v0.1.10/go.mod h1:HFAYoOh6XMV+jKF1UjFwrknPbowfyHEHHRdJMf2jMX8=
|
||||
sigs.k8s.io/structured-merge-diff v0.0.0-20181214233322-d43a45b8663b/go.mod h1:wWxsB5ozmmv/SG7nM11ayaAW51xMvak/t1r0CSlcokI=
|
||||
sigs.k8s.io/yaml v1.1.0 h1:4A07+ZFc2wgJwo8YNlQpr1rVlgUDlxXHhPJciaPY5gs=
|
||||
sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o=
|
||||
vbom.ml/util v0.0.0-20180919145318-efcd4e0f9787/go.mod h1:so/NYdZXCz+E3ZpW0uAoCj6uzU2+8OWDFv/HxUSs7kI=
|
||||
@@ -1,27 +1,23 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright 2018 The Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
SCRIPT_ROOT=$(dirname ${BASH_SOURCE})/..
|
||||
CODEGEN_PKG=${CODEGEN_PKG:-$(cd ${SCRIPT_ROOT}; ls -d -1 ./vendor/k8s.io/code-generator 2>/dev/null || echo ../code-generator)}
|
||||
REPO_ROOT=$(git rev-parse --show-toplevel)
|
||||
CODEGEN_VERSION="@v0.0.0-20190620073620-d55040311883"
|
||||
CODEGEN_PKG=${CODEGEN_PKG:-$(echo `go env GOPATH`"/pkg/mod/k8s.io/code-generator${CODEGEN_VERSION}")}
|
||||
|
||||
${CODEGEN_PKG}/generate-groups.sh "deepcopy,client,informer,lister" \
|
||||
github.com/weaveworks/flagger/pkg/client github.com/weaveworks/flagger/pkg/apis \
|
||||
"appmesh:v1beta1 istio:v1alpha3 flagger:v1alpha3" \
|
||||
--go-header-file ${SCRIPT_ROOT}/hack/boilerplate.go.txt
|
||||
if [[ "$CIRCLECI" ]]; then
|
||||
ls $(echo `go env GOPATH`'/pkg/mod/k8s.io/');
|
||||
mkdir -p ${REPO_ROOT}/bin;
|
||||
cp -r ${CODEGEN_PKG} ${REPO_ROOT}/bin/;
|
||||
CODEGEN_PKG=${REPO_ROOT}/bin/code-generator${CODEGEN_VERSION};
|
||||
echo ">> $CODEGEN_PKG";
|
||||
fi
|
||||
|
||||
chmod +x ${CODEGEN_PKG}/generate-groups.sh
|
||||
|
||||
${CODEGEN_PKG}/generate-groups.sh all \
|
||||
github.com/weaveworks/flagger/pkg/client github.com/weaveworks/flagger/pkg/apis \
|
||||
"appmesh:v1beta1 istio:v1alpha3 flagger:v1alpha3 smi:v1alpha1" \
|
||||
--go-header-file ${REPO_ROOT}/hack/boilerplate.go.txt
|
||||
|
||||
145
kustomize/README.md
Normal file
@@ -0,0 +1,145 @@
|
||||
# Flagger Kustomize installer
|
||||
|
||||
As an alternative to Helm, Flagger can be installed with [Kustomize](https://kustomize.io/).
|
||||
|
||||
## Service mesh specific installers
|
||||
|
||||
Install Flagger for Istio:
|
||||
|
||||
```bash
|
||||
kubectl apply -k github.com/weaveworks/flagger//kustomize/istio
|
||||
```
|
||||
|
||||
This deploys Flagger in the `istio-system` namespace and sets the metrics server URL to Istio's Prometheus instance.
|
||||
|
||||
Note that you'll need kubectl 1.14 to run the above the command or you can download the
|
||||
[kustomize binary](https://github.com/kubernetes-sigs/kustomize/releases) and run:
|
||||
|
||||
```bash
|
||||
kustomize build github.com/weaveworks/flagger//kustomize/istio | kubectl apply -f -
|
||||
```
|
||||
|
||||
Install Flagger for Linkerd:
|
||||
|
||||
```bash
|
||||
kubectl apply -k github.com/weaveworks/flagger//kustomize/linkerd
|
||||
```
|
||||
|
||||
This deploys Flagger in the `linkerd` namespace and sets the metrics server URL to Linkerd's Prometheus instance.
|
||||
|
||||
If you want to install a specific Flagger release, add the version number to the URL:
|
||||
|
||||
```bash
|
||||
kubectl apply -k github.com/weaveworks/flagger//kustomize/linkerd?ref=0.18.0
|
||||
```
|
||||
|
||||
## Generic installer
|
||||
|
||||
Install Flagger and Prometheus:
|
||||
|
||||
```bash
|
||||
kubectl apply -k github.com/weaveworks/flagger//kustomize/kubernetes
|
||||
```
|
||||
|
||||
This deploys Flagger and Prometheus in the `flagger-system` namespace,
|
||||
sets the metrics server URL to `http://flagger-prometheus.flagger-system:9090` and the mesh provider to `kubernetes`.
|
||||
|
||||
To target a different provider you can specify it in the canary custom resource:
|
||||
|
||||
```yaml
|
||||
apiVersion: flagger.app/v1alpha3
|
||||
kind: Canary
|
||||
metadata:
|
||||
name: app
|
||||
namespace: test
|
||||
spec:
|
||||
# can be: kubernetes, istio, linkerd, appmesh, nginx, gloo
|
||||
# use the kubernetes provider for Blue/Green style deployments
|
||||
provider: nginx
|
||||
```
|
||||
|
||||
You'll need Prometheus when using Flagger with AWS App Mesh, Gloo or NGINX ingress controller.
|
||||
The Prometheus instance has a two hours data retention and is configured to scrape all pods in your cluster that
|
||||
have the `prometheus.io/scrape: "true"` annotation.
|
||||
|
||||
## Configure Slack notifications
|
||||
|
||||
Create a kustomization file using flagger as base:
|
||||
|
||||
```bash
|
||||
cat > kustomization.yaml <<EOF
|
||||
namespace: istio-system
|
||||
bases:
|
||||
- github.com/weaveworks/flagger/kustomize/base/flagger
|
||||
patchesStrategicMerge:
|
||||
- patch.yaml
|
||||
EOF
|
||||
```
|
||||
|
||||
Create a patch and enable Slack notifications by setting the slack channel and hook URL:
|
||||
|
||||
```bash
|
||||
cat > patch.yaml <<EOF
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: flagger
|
||||
spec:
|
||||
template:
|
||||
spec:
|
||||
containers:
|
||||
- name: flagger
|
||||
args:
|
||||
- -mesh-provider=istio
|
||||
- -metrics-server=http://prometheus.istio-system:9090
|
||||
- -slack-user=flagger
|
||||
- -slack-channel=alerts
|
||||
- -slack-url=https://hooks.slack.com/services/YOUR/SLACK/WEBHOOK
|
||||
EOF
|
||||
```
|
||||
|
||||
Install Flagger for Istio with Slack notifications:
|
||||
|
||||
```bash
|
||||
kubectl apply -k .
|
||||
```
|
||||
|
||||
## Configure MS Teams notifications
|
||||
|
||||
Create a kustomization file using flagger as base:
|
||||
|
||||
```bash
|
||||
cat > kustomization.yaml <<EOF
|
||||
namespace: linkerd
|
||||
bases:
|
||||
- github.com/weaveworks/flagger/kustomize/base/flagger
|
||||
patchesStrategicMerge:
|
||||
- patch.yaml
|
||||
EOF
|
||||
```
|
||||
|
||||
Create a patch and set the MS Teams webhook URL:
|
||||
|
||||
```bash
|
||||
cat > patch.yaml <<EOF
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: flagger
|
||||
spec:
|
||||
template:
|
||||
spec:
|
||||
containers:
|
||||
- name: flagger
|
||||
args:
|
||||
- -mesh-provider=linkerd
|
||||
- -metrics-server=http://linkerd-prometheus:9090
|
||||
- -msteams-url=https://outlook.office.com/webhook/YOUR/TEAMS/WEBHOOK
|
||||
EOF
|
||||
```
|
||||
|
||||
Install Flagger for Linkerd with MS Teams notifications:
|
||||
|
||||
```bash
|
||||
kubectl apply -k .
|
||||
```
|
||||
6
kustomize/base/flagger/account.yaml
Normal file
@@ -0,0 +1,6 @@
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: flagger
|
||||
namespace: flagger-system
|
||||
|
||||
225
kustomize/base/flagger/crd.yaml
Normal file
@@ -0,0 +1,225 @@
|
||||
apiVersion: apiextensions.k8s.io/v1beta1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: canaries.flagger.app
|
||||
annotations:
|
||||
helm.sh/resource-policy: keep
|
||||
spec:
|
||||
group: flagger.app
|
||||
version: v1alpha3
|
||||
versions:
|
||||
- name: v1alpha3
|
||||
served: true
|
||||
storage: true
|
||||
- name: v1alpha2
|
||||
served: true
|
||||
storage: false
|
||||
- name: v1alpha1
|
||||
served: true
|
||||
storage: false
|
||||
names:
|
||||
plural: canaries
|
||||
singular: canary
|
||||
kind: Canary
|
||||
categories:
|
||||
- all
|
||||
scope: Namespaced
|
||||
subresources:
|
||||
status: {}
|
||||
additionalPrinterColumns:
|
||||
- name: Status
|
||||
type: string
|
||||
JSONPath: .status.phase
|
||||
- name: Weight
|
||||
type: string
|
||||
JSONPath: .status.canaryWeight
|
||||
- name: LastTransitionTime
|
||||
type: string
|
||||
JSONPath: .status.lastTransitionTime
|
||||
validation:
|
||||
openAPIV3Schema:
|
||||
properties:
|
||||
spec:
|
||||
required:
|
||||
- targetRef
|
||||
- service
|
||||
- canaryAnalysis
|
||||
properties:
|
||||
provider:
|
||||
type: string
|
||||
progressDeadlineSeconds:
|
||||
type: number
|
||||
targetRef:
|
||||
type: object
|
||||
required: ['apiVersion', 'kind', 'name']
|
||||
properties:
|
||||
apiVersion:
|
||||
type: string
|
||||
kind:
|
||||
type: string
|
||||
name:
|
||||
type: string
|
||||
autoscalerRef:
|
||||
anyOf:
|
||||
- type: string
|
||||
- type: object
|
||||
required: ['apiVersion', 'kind', 'name']
|
||||
properties:
|
||||
apiVersion:
|
||||
type: string
|
||||
kind:
|
||||
type: string
|
||||
name:
|
||||
type: string
|
||||
ingressRef:
|
||||
anyOf:
|
||||
- type: string
|
||||
- type: object
|
||||
required: ['apiVersion', 'kind', 'name']
|
||||
properties:
|
||||
apiVersion:
|
||||
type: string
|
||||
kind:
|
||||
type: string
|
||||
name:
|
||||
type: string
|
||||
service:
|
||||
type: object
|
||||
required: ['port']
|
||||
properties:
|
||||
port:
|
||||
type: number
|
||||
portName:
|
||||
type: string
|
||||
portDiscovery:
|
||||
type: boolean
|
||||
meshName:
|
||||
type: string
|
||||
timeout:
|
||||
type: string
|
||||
skipAnalysis:
|
||||
type: boolean
|
||||
canaryAnalysis:
|
||||
properties:
|
||||
interval:
|
||||
description: Canary schedule interval
|
||||
type: string
|
||||
pattern: "^[0-9]+(m|s)"
|
||||
iterations:
|
||||
description: Number of checks to run for A/B Testing and Blue/Green
|
||||
type: number
|
||||
threshold:
|
||||
description: Max number of failed checks before rollback
|
||||
type: number
|
||||
maxWeight:
|
||||
description: Max traffic percentage routed to canary
|
||||
type: number
|
||||
stepWeight:
|
||||
description: Canary incremental traffic percentage step
|
||||
type: number
|
||||
metrics:
|
||||
description: Prometheus query list for this canary
|
||||
type: array
|
||||
properties:
|
||||
items:
|
||||
type: object
|
||||
required: ['name', 'threshold']
|
||||
properties:
|
||||
name:
|
||||
description: Name of the Prometheus metric
|
||||
type: string
|
||||
interval:
|
||||
description: Interval of the promql query
|
||||
type: string
|
||||
pattern: "^[0-9]+(m|s)"
|
||||
threshold:
|
||||
description: Max scalar value accepted for this metric
|
||||
type: number
|
||||
query:
|
||||
description: Prometheus query
|
||||
type: string
|
||||
webhooks:
|
||||
description: Webhook list for this canary
|
||||
type: array
|
||||
properties:
|
||||
items:
|
||||
type: object
|
||||
required: ['name', 'url', 'timeout']
|
||||
properties:
|
||||
name:
|
||||
description: Name of the webhook
|
||||
type: string
|
||||
type:
|
||||
description: Type of the webhook pre, post or during rollout
|
||||
type: string
|
||||
enum:
|
||||
- ""
|
||||
- pre-rollout
|
||||
- rollout
|
||||
- post-rollout
|
||||
url:
|
||||
description: URL address of this webhook
|
||||
type: string
|
||||
format: url
|
||||
timeout:
|
||||
description: Request timeout for this webhook
|
||||
type: string
|
||||
pattern: "^[0-9]+(m|s)"
|
||||
status:
|
||||
properties:
|
||||
phase:
|
||||
description: Analysis phase of this canary
|
||||
type: string
|
||||
enum:
|
||||
- ""
|
||||
- Initializing
|
||||
- Initialized
|
||||
- Waiting
|
||||
- Progressing
|
||||
- Finalising
|
||||
- Succeeded
|
||||
- Failed
|
||||
canaryWeight:
|
||||
description: Traffic weight percentage routed to canary
|
||||
type: number
|
||||
failedChecks:
|
||||
description: Failed check count of the current canary analysis
|
||||
type: number
|
||||
iterations:
|
||||
description: Iteration count of the current canary analysis
|
||||
type: number
|
||||
lastAppliedSpec:
|
||||
description: LastAppliedSpec of this canary
|
||||
type: string
|
||||
lastTransitionTime:
|
||||
description: LastTransitionTime of this canary
|
||||
format: date-time
|
||||
type: string
|
||||
conditions:
|
||||
description: Status conditions of this canary
|
||||
type: array
|
||||
properties:
|
||||
items:
|
||||
type: object
|
||||
required: ['type', 'status', 'reason']
|
||||
properties:
|
||||
lastTransitionTime:
|
||||
description: LastTransitionTime of this condition
|
||||
format: date-time
|
||||
type: string
|
||||
lastUpdateTime:
|
||||
description: LastUpdateTime of this condition
|
||||
format: date-time
|
||||
type: string
|
||||
message:
|
||||
description: Message associated with this condition
|
||||
type: string
|
||||
reason:
|
||||
description: Reason for the current status of this condition
|
||||
type: string
|
||||
status:
|
||||
description: Status of this condition
|
||||
type: string
|
||||
type:
|
||||
description: Type of this condition
|
||||
type: string
|
||||
56
kustomize/base/flagger/deployment.yaml
Normal file
@@ -0,0 +1,56 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: flagger
|
||||
spec:
|
||||
replicas: 1
|
||||
strategy:
|
||||
type: Recreate
|
||||
selector:
|
||||
matchLabels:
|
||||
app: flagger
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: flagger
|
||||
annotations:
|
||||
prometheus.io/scrape: "true"
|
||||
spec:
|
||||
serviceAccountName: flagger
|
||||
containers:
|
||||
- name: flagger
|
||||
image: weaveworks/flagger:0.16.0
|
||||
imagePullPolicy: IfNotPresent
|
||||
ports:
|
||||
- name: http
|
||||
containerPort: 8080
|
||||
livenessProbe:
|
||||
exec:
|
||||
command:
|
||||
- wget
|
||||
- --quiet
|
||||
- --tries=1
|
||||
- --timeout=2
|
||||
- --spider
|
||||
- http://localhost:8080/healthz
|
||||
timeoutSeconds: 5
|
||||
readinessProbe:
|
||||
exec:
|
||||
command:
|
||||
- wget
|
||||
- --quiet
|
||||
- --tries=1
|
||||
- --timeout=2
|
||||
- --spider
|
||||
- http://localhost:8080/healthz
|
||||
timeoutSeconds: 5
|
||||
resources:
|
||||
limits:
|
||||
memory: "512Mi"
|
||||
cpu: "1000m"
|
||||
requests:
|
||||
memory: "32Mi"
|
||||
cpu: "10m"
|
||||
securityContext:
|
||||
readOnlyRootFilesystem: true
|
||||
runAsUser: 10001
|
||||
11
kustomize/base/flagger/kustomization.yaml
Normal file
@@ -0,0 +1,11 @@
|
||||
namespace: flagger-system
|
||||
commonLabels:
|
||||
app: flagger
|
||||
resources:
|
||||
- account.yaml
|
||||
- rbac.yaml
|
||||
- crd.yaml
|
||||
- deployment.yaml
|
||||
images:
|
||||
- name: weaveworks/flagger
|
||||
newTag: 0.18.0
|
||||
90
kustomize/base/flagger/rbac.yaml
Normal file
@@ -0,0 +1,90 @@
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: flagger
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- events
|
||||
- configmaps
|
||||
- secrets
|
||||
- services
|
||||
verbs: ["*"]
|
||||
- apiGroups:
|
||||
- apps
|
||||
resources:
|
||||
- deployments
|
||||
verbs: ["*"]
|
||||
- apiGroups:
|
||||
- autoscaling
|
||||
resources:
|
||||
- horizontalpodautoscalers
|
||||
verbs: ["*"]
|
||||
- apiGroups:
|
||||
- "extensions"
|
||||
resources:
|
||||
- ingresses
|
||||
- ingresses/status
|
||||
verbs: ["*"]
|
||||
- apiGroups:
|
||||
- flagger.app
|
||||
resources:
|
||||
- canaries
|
||||
- canaries/status
|
||||
verbs: ["*"]
|
||||
- apiGroups:
|
||||
- networking.istio.io
|
||||
resources:
|
||||
- virtualservices
|
||||
- virtualservices/status
|
||||
- destinationrules
|
||||
- destinationrules/status
|
||||
verbs: ["*"]
|
||||
- apiGroups:
|
||||
- appmesh.k8s.aws
|
||||
resources:
|
||||
- meshes
|
||||
- meshes/status
|
||||
- virtualnodes
|
||||
- virtualnodes/status
|
||||
- virtualservices
|
||||
- virtualservices/status
|
||||
verbs: ["*"]
|
||||
- apiGroups:
|
||||
- split.smi-spec.io
|
||||
resources:
|
||||
- trafficsplits
|
||||
verbs: ["*"]
|
||||
- apiGroups:
|
||||
- gloo.solo.io
|
||||
resources:
|
||||
- settings
|
||||
- upstreams
|
||||
- upstreamgroups
|
||||
- proxies
|
||||
- virtualservices
|
||||
verbs: ["*"]
|
||||
- apiGroups:
|
||||
- gateway.solo.io
|
||||
resources:
|
||||
- virtualservices
|
||||
- gateways
|
||||
verbs: ["*"]
|
||||
- nonResourceURLs:
|
||||
- /version
|
||||
verbs:
|
||||
- get
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: flagger
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: flagger
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: flagger
|
||||
namespace: flagger-system
|
||||
5
kustomize/base/prometheus/account.yaml
Normal file
@@ -0,0 +1,5 @@
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: flagger-prometheus
|
||||
namespace: flagger-system
|
||||