mirror of
https://github.com/fluxcd/flagger.git
synced 2026-02-17 19:40:06 +00:00
Compare commits
149 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
1902884b56 | ||
|
|
98d2805267 | ||
|
|
24a74d3589 | ||
|
|
15463456ec | ||
|
|
752eceed4b | ||
|
|
eadce34d6f | ||
|
|
11ccf34bbc | ||
|
|
e308678ed5 | ||
|
|
cbe72f0aa2 | ||
|
|
bc84e1c154 | ||
|
|
344bd45a0e | ||
|
|
72014f736f | ||
|
|
0a2949b6ad | ||
|
|
2ff695ecfe | ||
|
|
8d0b54e059 | ||
|
|
121a65fad0 | ||
|
|
ecaa203091 | ||
|
|
6d0e3c6468 | ||
|
|
c933476fff | ||
|
|
1335210cf5 | ||
|
|
9d12794600 | ||
|
|
d57fc7d03e | ||
|
|
1f9f6fb55a | ||
|
|
948df55de3 | ||
|
|
8914f26754 | ||
|
|
79b3370892 | ||
|
|
a233b99f0b | ||
|
|
0d94c01678 | ||
|
|
00151e92fe | ||
|
|
f7db0210ea | ||
|
|
cf3ba35fb9 | ||
|
|
177dc824e3 | ||
|
|
5f544b90d6 | ||
|
|
921ac00383 | ||
|
|
7df7218978 | ||
|
|
e4c6903a01 | ||
|
|
027342dc72 | ||
|
|
e17a747785 | ||
|
|
e477b37bd0 | ||
|
|
ad25068375 | ||
|
|
c92230c109 | ||
|
|
9e082d9ee3 | ||
|
|
cfd610ac55 | ||
|
|
82067f13bf | ||
|
|
242d79e49d | ||
|
|
4f01ecde5a | ||
|
|
61141c7479 | ||
|
|
62429ff710 | ||
|
|
82a1f45cc1 | ||
|
|
1a95fc2a9c | ||
|
|
13816eeafa | ||
|
|
5279f73c17 | ||
|
|
d196bb2856 | ||
|
|
3f8f634a1b | ||
|
|
5ba27c898e | ||
|
|
57f1b63fa1 | ||
|
|
d69e203479 | ||
|
|
4d7fae39a8 | ||
|
|
2dc554c92a | ||
|
|
21c394ef7f | ||
|
|
2173bfc1a0 | ||
|
|
a19d016e14 | ||
|
|
8f1b5df9e2 | ||
|
|
2d6b8ecfdf | ||
|
|
8093612011 | ||
|
|
39dc761e32 | ||
|
|
0c68983c62 | ||
|
|
c7539f6e4b | ||
|
|
8cebc0acee | ||
|
|
f60c4d60cf | ||
|
|
662f9cba2e | ||
|
|
4a82e1e223 | ||
|
|
b60b912bf8 | ||
|
|
093348bc60 | ||
|
|
37ebbf14f9 | ||
|
|
156488c8d5 | ||
|
|
68d1f583cc | ||
|
|
3492b07d9a | ||
|
|
d0b582048f | ||
|
|
a82eb7b01f | ||
|
|
cd08afcbeb | ||
|
|
331942a4ed | ||
|
|
aa24d6ff7e | ||
|
|
58c2c19f1e | ||
|
|
2a91149211 | ||
|
|
868482c240 | ||
|
|
4e387fa943 | ||
|
|
15484363d6 | ||
|
|
50b7b74480 | ||
|
|
adb53c63dd | ||
|
|
bdc3a32e96 | ||
|
|
65f716182b | ||
|
|
6ef72e2550 | ||
|
|
60f51ad7d5 | ||
|
|
a09dc2cbd8 | ||
|
|
825d07aa54 | ||
|
|
f46882c778 | ||
|
|
663fa08cc1 | ||
|
|
19e625d38e | ||
|
|
edcff9cd15 | ||
|
|
e0fc5ecb39 | ||
|
|
4ac6629969 | ||
|
|
68d8dad7c8 | ||
|
|
4ab9ceafc1 | ||
|
|
352ed898d4 | ||
|
|
e091d6a50d | ||
|
|
c651ef00c9 | ||
|
|
4b17788a77 | ||
|
|
e5612bca50 | ||
|
|
d21fb1afe8 | ||
|
|
89d0a533e2 | ||
|
|
db673dddd9 | ||
|
|
88ad457e87 | ||
|
|
126b68559e | ||
|
|
2cd3fe47e6 | ||
|
|
15eb7cce55 | ||
|
|
13f923aabf | ||
|
|
0ffb112063 | ||
|
|
b4ea6af110 | ||
|
|
611c8f7374 | ||
|
|
1cc73f37e7 | ||
|
|
ca37fc0eb5 | ||
|
|
5380624da9 | ||
|
|
aaece0bd44 | ||
|
|
de7cc17f5d | ||
|
|
66efa39d27 | ||
|
|
ff7c0a105d | ||
|
|
7b29253df4 | ||
|
|
7ef63b341e | ||
|
|
e7bfaa4f1a | ||
|
|
3a9a408941 | ||
|
|
3e43963daa | ||
|
|
69a6e260f5 | ||
|
|
664e7ad555 | ||
|
|
ee4a009a06 | ||
|
|
36dfd4dd35 | ||
|
|
dbf36082b2 | ||
|
|
3a1018cff6 | ||
|
|
fc10745a1a | ||
|
|
347cfd06de | ||
|
|
ec759ce467 | ||
|
|
f211e0fe31 | ||
|
|
c91a128b65 | ||
|
|
6a080f3032 | ||
|
|
b2c12c1131 | ||
|
|
b945b37089 | ||
|
|
9a5529a0aa | ||
|
|
025785389d | ||
|
|
48d9a0dede |
@@ -1,6 +1,6 @@
|
||||
version: 2.1
|
||||
jobs:
|
||||
e2e-testing:
|
||||
e2e-istio-testing:
|
||||
machine: true
|
||||
steps:
|
||||
- checkout
|
||||
@@ -9,14 +9,46 @@ jobs:
|
||||
- run: test/e2e-build.sh
|
||||
- run: test/e2e-tests.sh
|
||||
|
||||
e2e-supergloo-testing:
|
||||
machine: true
|
||||
steps:
|
||||
- checkout
|
||||
- run: test/e2e-kind.sh
|
||||
- run: test/e2e-supergloo.sh
|
||||
- run: test/e2e-build.sh supergloo:test.supergloo-system
|
||||
- run: test/e2e-tests.sh canary
|
||||
|
||||
e2e-nginx-testing:
|
||||
machine: true
|
||||
steps:
|
||||
- checkout
|
||||
- run: test/e2e-kind.sh
|
||||
- run: test/e2e-nginx.sh
|
||||
- run: test/e2e-nginx-build.sh
|
||||
- run: test/e2e-nginx-tests.sh
|
||||
|
||||
workflows:
|
||||
version: 2
|
||||
build-and-test:
|
||||
jobs:
|
||||
- e2e-testing:
|
||||
- e2e-istio-testing:
|
||||
filters:
|
||||
branches:
|
||||
ignore:
|
||||
- gh-pages
|
||||
- /gh-pages.*/
|
||||
- /docs-.*/
|
||||
- /release-.*/
|
||||
- e2e-supergloo-testing:
|
||||
filters:
|
||||
branches:
|
||||
ignore:
|
||||
- /gh-pages.*/
|
||||
- /docs-.*/
|
||||
- /release-.*/
|
||||
- e2e-nginx-testing:
|
||||
filters:
|
||||
branches:
|
||||
ignore:
|
||||
- /gh-pages.*/
|
||||
- /docs-.*/
|
||||
- /release-.*/
|
||||
23
.travis.yml
23
.travis.yml
@@ -1,6 +1,11 @@
|
||||
sudo: required
|
||||
language: go
|
||||
|
||||
branches:
|
||||
except:
|
||||
- /gh-pages.*/
|
||||
- /docs-.*/
|
||||
|
||||
go:
|
||||
- 1.12.x
|
||||
|
||||
@@ -12,13 +17,7 @@ addons:
|
||||
packages:
|
||||
- docker-ce
|
||||
|
||||
#before_script:
|
||||
# - go get -u sigs.k8s.io/kind
|
||||
# - curl https://raw.githubusercontent.com/kubernetes/helm/master/scripts/get | bash
|
||||
# - curl -LO https://storage.googleapis.com/kubernetes-release/release/$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)/bin/linux/amd64/kubectl && chmod +x kubectl && sudo mv kubectl /usr/local/bin/
|
||||
|
||||
script:
|
||||
- set -e
|
||||
- make test-fmt
|
||||
- make test-codegen
|
||||
- go test -race -coverprofile=coverage.txt -covermode=atomic $(go list ./pkg/...)
|
||||
@@ -29,16 +28,16 @@ after_success:
|
||||
echo "PR build, skipping image push";
|
||||
else
|
||||
BRANCH_COMMIT=${TRAVIS_BRANCH}-$(echo ${TRAVIS_COMMIT} | head -c7);
|
||||
docker tag weaveworks/flagger:latest quay.io/weaveworks/flagger:${BRANCH_COMMIT};
|
||||
echo $DOCKER_PASS | docker login -u=$DOCKER_USER --password-stdin quay.io;
|
||||
docker push quay.io/weaveworks/flagger:${BRANCH_COMMIT};
|
||||
docker tag weaveworks/flagger:latest weaveworks/flagger:${BRANCH_COMMIT};
|
||||
echo $DOCKER_PASS | docker login -u=$DOCKER_USER --password-stdin;
|
||||
docker push weaveworks/flagger:${BRANCH_COMMIT};
|
||||
fi
|
||||
- if [ -z "$TRAVIS_TAG" ]; then
|
||||
echo "Not a release, skipping image push";
|
||||
else
|
||||
docker tag weaveworks/flagger:latest quay.io/weaveworks/flagger:${TRAVIS_TAG};
|
||||
echo $DOCKER_PASS | docker login -u=$DOCKER_USER --password-stdin quay.io;
|
||||
docker push quay.io/weaveworks/flagger:$TRAVIS_TAG;
|
||||
docker tag weaveworks/flagger:latest weaveworks/flagger:${TRAVIS_TAG};
|
||||
echo $DOCKER_PASS | docker login -u=$DOCKER_USER --password-stdin;
|
||||
docker push weaveworks/flagger:$TRAVIS_TAG;
|
||||
fi
|
||||
- bash <(curl -s https://codecov.io/bash)
|
||||
- rm coverage.txt
|
||||
|
||||
66
CHANGELOG.md
66
CHANGELOG.md
@@ -2,6 +2,72 @@
|
||||
|
||||
All notable changes to this project are documented in this file.
|
||||
|
||||
## 0.13.2 (2019-04-11)
|
||||
|
||||
Fixes for Jenkins X deployments (prevent the jx GC from removing the primary instance)
|
||||
|
||||
#### Fixes
|
||||
|
||||
- Do not copy labels from canary to primary deployment [#178](https://github.com/weaveworks/flagger/pull/178)
|
||||
|
||||
#### Improvements
|
||||
|
||||
- Add NGINX ingress controller e2e and unit tests [#176](https://github.com/weaveworks/flagger/pull/176)
|
||||
|
||||
## 0.13.1 (2019-04-09)
|
||||
|
||||
Fixes for custom metrics checks and NGINX Prometheus queries
|
||||
|
||||
#### Fixes
|
||||
|
||||
- Fix promql queries for custom checks and NGINX [#174](https://github.com/weaveworks/flagger/pull/174)
|
||||
|
||||
## 0.13.0 (2019-04-08)
|
||||
|
||||
Adds support for [NGINX](https://docs.flagger.app/usage/nginx-progressive-delivery) ingress controller
|
||||
|
||||
#### Features
|
||||
|
||||
- Add support for nginx ingress controller (weighted traffic and A/B testing) [#170](https://github.com/weaveworks/flagger/pull/170)
|
||||
- Add Prometheus add-on to Flagger Helm chart for App Mesh and NGINX [79b3370](https://github.com/weaveworks/flagger/pull/170/commits/79b337089294a92961bc8446fd185b38c50a32df)
|
||||
|
||||
#### Fixes
|
||||
|
||||
- Fix duplicate hosts Istio error when using wildcards [#162](https://github.com/weaveworks/flagger/pull/162)
|
||||
|
||||
## 0.12.0 (2019-04-29)
|
||||
|
||||
Adds support for [SuperGloo](https://docs.flagger.app/install/flagger-install-with-supergloo)
|
||||
|
||||
#### Features
|
||||
|
||||
- Supergloo support for canary deployment (weighted traffic) [#151](https://github.com/weaveworks/flagger/pull/151)
|
||||
|
||||
## 0.11.1 (2019-04-18)
|
||||
|
||||
Move Flagger and the load tester container images to Docker Hub
|
||||
|
||||
#### Features
|
||||
|
||||
- Add Bash Automated Testing System support to Flagger tester for running acceptance tests as pre-rollout hooks
|
||||
|
||||
## 0.11.0 (2019-04-17)
|
||||
|
||||
Adds pre/post rollout [webhooks](https://docs.flagger.app/how-it-works#webhooks)
|
||||
|
||||
#### Features
|
||||
|
||||
- Add `pre-rollout` and `post-rollout` webhook types [#147](https://github.com/weaveworks/flagger/pull/147)
|
||||
|
||||
#### Improvements
|
||||
|
||||
- Unify App Mesh and Istio builtin metric checks [#146](https://github.com/weaveworks/flagger/pull/146)
|
||||
- Make the pod selector label configurable [#148](https://github.com/weaveworks/flagger/pull/148)
|
||||
|
||||
#### Breaking changes
|
||||
|
||||
- Set default `mesh` Istio gateway only if no gateway is specified [#141](https://github.com/weaveworks/flagger/pull/141)
|
||||
|
||||
## 0.10.0 (2019-03-27)
|
||||
|
||||
Adds support for App Mesh
|
||||
|
||||
@@ -1,20 +1,4 @@
|
||||
FROM golang:1.12 AS hey-builder
|
||||
|
||||
RUN mkdir -p /go/src/github.com/rakyll/hey/
|
||||
|
||||
WORKDIR /go/src/github.com/rakyll/hey
|
||||
|
||||
ADD https://github.com/rakyll/hey/archive/v0.1.1.tar.gz .
|
||||
|
||||
RUN tar xzf v0.1.1.tar.gz --strip 1
|
||||
|
||||
RUN go get ./...
|
||||
|
||||
RUN CGO_ENABLED=0 GOOS=linux GOARCH=amd64 \
|
||||
go install -ldflags '-w -extldflags "-static"' \
|
||||
/go/src/github.com/rakyll/hey
|
||||
|
||||
FROM golang:1.11 AS builder
|
||||
FROM golang:1.12 AS builder
|
||||
|
||||
RUN mkdir -p /go/src/github.com/weaveworks/flagger/
|
||||
|
||||
@@ -26,15 +10,17 @@ RUN go test -race ./pkg/loadtester/
|
||||
|
||||
RUN CGO_ENABLED=0 GOOS=linux go build -a -installsuffix cgo -o loadtester ./cmd/loadtester/*
|
||||
|
||||
FROM alpine:3.9
|
||||
FROM bats/bats:v1.1.0
|
||||
|
||||
RUN addgroup -S app \
|
||||
&& adduser -S -g app app \
|
||||
&& apk --no-cache add ca-certificates curl
|
||||
&& apk --no-cache add ca-certificates curl jq
|
||||
|
||||
WORKDIR /home/app
|
||||
|
||||
COPY --from=hey-builder /go/bin/hey /usr/local/bin/hey
|
||||
RUN curl -sSLo hey "https://storage.googleapis.com/jblabs/dist/hey_linux_v0.1.2" && \
|
||||
chmod +x hey && mv hey /usr/local/bin/hey
|
||||
|
||||
COPY --from=builder /go/src/github.com/weaveworks/flagger/loadtester .
|
||||
|
||||
RUN chown -R app:app ./
|
||||
|
||||
609
Gopkg.lock
generated
609
Gopkg.lock
generated
@@ -6,16 +6,32 @@
|
||||
name = "cloud.google.com/go"
|
||||
packages = ["compute/metadata"]
|
||||
pruneopts = "NUT"
|
||||
revision = "c9474f2f8deb81759839474b6bd1726bbfe1c1c4"
|
||||
version = "v0.36.0"
|
||||
revision = "fcb9a2d5f791d07be64506ab54434de65989d370"
|
||||
version = "v0.37.4"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:f12358576cd79bba0ae626530d23cde63416744f486c8bc817802c6907eaadd7"
|
||||
name = "github.com/armon/go-metrics"
|
||||
packages = ["."]
|
||||
pruneopts = "NUT"
|
||||
revision = "f0300d1749da6fa982027e449ec0c7a145510c3c"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:13d5750ba049ce46bf931792803f1d5584b04026df9badea5931e33c22aa34ee"
|
||||
name = "github.com/avast/retry-go"
|
||||
packages = ["."]
|
||||
pruneopts = "NUT"
|
||||
revision = "08d411bf8302219fe47ca04dbdf9de892010c5e5"
|
||||
version = "v2.2.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:707ebe952a8b3d00b343c01536c79c73771d100f63ec6babeaed5c79e2b8a8dd"
|
||||
name = "github.com/beorn7/perks"
|
||||
packages = ["quantile"]
|
||||
pruneopts = "NUT"
|
||||
revision = "3a771d992973f24aa725d07868b467d1ddfceafb"
|
||||
revision = "4b2b341e8d7715fae06375aa633dbb6e91b3fb46"
|
||||
version = "v1.0.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:ffe9824d294da03b391f44e1ae8281281b4afc1bdaa9588c9097785e3af10cec"
|
||||
@@ -25,6 +41,14 @@
|
||||
revision = "8991bc29aa16c548c550c7ff78260e27b9ab7c73"
|
||||
version = "v1.1.1"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:32598368f409bbee79deb9d43569fcd92b9fb27f39155f5e166b3371217f051f"
|
||||
name = "github.com/evanphx/json-patch"
|
||||
packages = ["."]
|
||||
pruneopts = "NUT"
|
||||
revision = "72bf35d0ff611848c1dc9df0f976c81192392fa5"
|
||||
version = "v4.1.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:81466b4218bf6adddac2572a30ac733a9255919bc2f470b4827a317bd4ee1756"
|
||||
name = "github.com/ghodss/yaml"
|
||||
@@ -34,25 +58,20 @@
|
||||
version = "v1.0.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:a1b2a5e38f79688ee8250942d5fa960525fceb1024c855c7bc76fa77b0f3cca2"
|
||||
digest = "1:895d2773c9e78e595dd5f946a25383d579d3094a9d8d9306dba27359f190f275"
|
||||
name = "github.com/gogo/protobuf"
|
||||
packages = [
|
||||
"gogoproto",
|
||||
"jsonpb",
|
||||
"proto",
|
||||
"protoc-gen-gogo/descriptor",
|
||||
"sortkeys",
|
||||
"types",
|
||||
]
|
||||
pruneopts = "NUT"
|
||||
revision = "ba06b47c162d49f2af050fb4c75bcbc86a159d5c"
|
||||
version = "v1.2.1"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:e0f096f9332ad5f84341de82db69fd098864b17c668333a1fbbffd1b846dcc2b"
|
||||
name = "github.com/golang/glog"
|
||||
packages = ["."]
|
||||
pruneopts = "NUT"
|
||||
revision = "2cc4b790554d1a0c48fcc3aeb891e3de70cf8de0"
|
||||
source = "github.com/istio/glog"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:b7cb6054d3dff43b38ad2e92492f220f57ae6087ee797dca298139776749ace8"
|
||||
@@ -62,26 +81,35 @@
|
||||
revision = "5b532d6fd5efaf7fa130d4e859a2fde0fc3a9e1b"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:2d0636a8c490d2272dd725db26f74a537111b99b9dbdda0d8b98febe63702aa4"
|
||||
digest = "1:a98a0b00720dc3149bf3d0c8d5726188899e5bab2f5072b9a7ef82958fbc98b2"
|
||||
name = "github.com/golang/protobuf"
|
||||
packages = [
|
||||
"proto",
|
||||
"protoc-gen-go/descriptor",
|
||||
"ptypes",
|
||||
"ptypes/any",
|
||||
"ptypes/duration",
|
||||
"ptypes/timestamp",
|
||||
]
|
||||
pruneopts = "NUT"
|
||||
revision = "c823c79ea1570fb5ff454033735a8e68575d1d0f"
|
||||
version = "v1.3.0"
|
||||
revision = "b5d812f8a3706043e23a9cd5babf2e5423744d30"
|
||||
version = "v1.3.1"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:7f114b78210bf5b75f307fc97cff293633c835bab1e0ea8a744a44b39c042dfe"
|
||||
name = "github.com/golang/snappy"
|
||||
packages = ["."]
|
||||
pruneopts = "NUT"
|
||||
revision = "2a8bb927dd31d8daada140a5d09578521ce5c36a"
|
||||
version = "v0.0.1"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:05f95ffdfcf651bdb0f05b40b69e7f5663047f8da75c72d58728acb59b5cc107"
|
||||
name = "github.com/google/btree"
|
||||
packages = ["."]
|
||||
pruneopts = "NUT"
|
||||
revision = "4030bb1f1f0c35b30ca7009e9ebd06849dd45306"
|
||||
version = "v1.0.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:d2754cafcab0d22c13541618a8029a70a8959eb3525ff201fe971637e2274cd0"
|
||||
@@ -98,12 +126,12 @@
|
||||
version = "v0.2.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:52c5834e2bebac9030c97cc0798ac11c3aa8a39f098aeb419f142533da6cd3cc"
|
||||
name = "github.com/google/gofuzz"
|
||||
packages = ["."]
|
||||
pruneopts = "NUT"
|
||||
revision = "24818f796faf91cd76ec7bddd72458fbced7a6c1"
|
||||
revision = "f140a6486e521aad38f5917de355cbf147cc0496"
|
||||
version = "v1.0.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:06a7dadb7b760767341ffb6c8d377238d68a1226f2b21b5d497d2e3f6ecf6b4e"
|
||||
@@ -136,6 +164,70 @@
|
||||
revision = "b4df798d65426f8c8ab5ca5f9987aec5575d26c9"
|
||||
version = "v2.0.1"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:adf097b949dbc1e452fbad15322c78651f6e7accb4661dffa38fed30273c5966"
|
||||
name = "github.com/hashicorp/consul"
|
||||
packages = ["api"]
|
||||
pruneopts = "NUT"
|
||||
revision = "ea5210a30e154f4da9a4c8e729b45b8ce7b9b92c"
|
||||
version = "v1.4.4"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:f0d9d74edbd40fdeada436d5ac9cb5197407899af3fef85ff0137077ffe8ae19"
|
||||
name = "github.com/hashicorp/errwrap"
|
||||
packages = ["."]
|
||||
pruneopts = "NUT"
|
||||
revision = "8a6fb523712970c966eefc6b39ed2c5e74880354"
|
||||
version = "v1.0.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:fff05cb0c34d2decaeb27bb6ab6b73a6947c3009d725160070da55f9511fd410"
|
||||
name = "github.com/hashicorp/go-cleanhttp"
|
||||
packages = ["."]
|
||||
pruneopts = "NUT"
|
||||
revision = "eda1e5db218aad1db63ca4642c8906b26bcf2744"
|
||||
version = "v0.5.1"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:1cf16b098a70d6c02899608abbb567296d11c7b830635014dfe6124a02dc1369"
|
||||
name = "github.com/hashicorp/go-immutable-radix"
|
||||
packages = ["."]
|
||||
pruneopts = "NUT"
|
||||
revision = "27df80928bb34bb1b0d6d0e01b9e679902e7a6b5"
|
||||
version = "v1.0.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:2ed138049ab373f696db2081ca48f15c5abdf20893803612a284f2bdce2bf443"
|
||||
name = "github.com/hashicorp/go-multierror"
|
||||
packages = ["."]
|
||||
pruneopts = "NUT"
|
||||
revision = "886a7fbe3eb1c874d46f623bfa70af45f425b3d1"
|
||||
version = "v1.0.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:b3496707ba69dd873a870238644aa8ac259ee67fc4fd05caf37b608e7053e1f7"
|
||||
name = "github.com/hashicorp/go-retryablehttp"
|
||||
packages = ["."]
|
||||
pruneopts = "NUT"
|
||||
revision = "357460732517ec3b57c05c51443296bdd6df1874"
|
||||
version = "v0.5.3"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:cdb5ce76cd7af19e3d2d5ba9b6458a2ee804f0d376711215dd3df5f51100d423"
|
||||
name = "github.com/hashicorp/go-rootcerts"
|
||||
packages = ["."]
|
||||
pruneopts = "NUT"
|
||||
revision = "63503fb4e1eca22f9ae0f90b49c5d5538a0e87eb"
|
||||
version = "v1.0.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:6c69626c7aacae1e573084cdb6ed55713094ba56263f687e5d1750053bd08598"
|
||||
name = "github.com/hashicorp/go-sockaddr"
|
||||
packages = ["."]
|
||||
pruneopts = "NUT"
|
||||
revision = "c7188e74f6acae5a989bdc959aa779f8b9f42faf"
|
||||
version = "v1.0.2"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:52094d0f8bdf831d1a2401e9b6fee5795fdc0b2a2d1f8bb1980834c289e79129"
|
||||
name = "github.com/hashicorp/golang-lru"
|
||||
@@ -147,6 +239,48 @@
|
||||
revision = "7087cb70de9f7a8bc0a10c375cb0d2280a8edf9c"
|
||||
version = "v0.5.1"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:39f543569bf189e228c84a294c50aca8ea56c82b3d9df5c9b788249907d7049a"
|
||||
name = "github.com/hashicorp/hcl"
|
||||
packages = [
|
||||
".",
|
||||
"hcl/ast",
|
||||
"hcl/parser",
|
||||
"hcl/scanner",
|
||||
"hcl/strconv",
|
||||
"hcl/token",
|
||||
"json/parser",
|
||||
"json/scanner",
|
||||
"json/token",
|
||||
]
|
||||
pruneopts = "NUT"
|
||||
revision = "8cb6e5b959231cc1119e43259c4a608f9c51a241"
|
||||
version = "v1.0.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:acc81e4e4289587b257ccdfccbc6eaf16d4c2fb57dda73c6bb349bf50f02501f"
|
||||
name = "github.com/hashicorp/serf"
|
||||
packages = ["coordinate"]
|
||||
pruneopts = "NUT"
|
||||
revision = "15cfd05de3dffb3664aa37b06e91f970b825e380"
|
||||
version = "v0.8.3"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:cded54cacfb6fdc86b916031e4113cbc50dfb55e92535651733604f1e3a8ce59"
|
||||
name = "github.com/hashicorp/vault"
|
||||
packages = [
|
||||
"api",
|
||||
"helper/compressutil",
|
||||
"helper/consts",
|
||||
"helper/hclutil",
|
||||
"helper/jsonutil",
|
||||
"helper/parseutil",
|
||||
"helper/strutil",
|
||||
]
|
||||
pruneopts = "NUT"
|
||||
revision = "36aa8c8dd1936e10ebd7a4c1d412ae0e6f7900bd"
|
||||
version = "v1.1.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:aaa38889f11896ee3644d77e17dc7764cc47f5f3d3b488268df2af2b52541c5f"
|
||||
name = "github.com/imdario/mergo"
|
||||
@@ -156,19 +290,55 @@
|
||||
version = "v0.3.7"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:e0f096f9332ad5f84341de82db69fd098864b17c668333a1fbbffd1b846dcc2b"
|
||||
name = "github.com/istio/glog"
|
||||
packages = ["."]
|
||||
pruneopts = "NUT"
|
||||
revision = "2cc4b790554d1a0c48fcc3aeb891e3de70cf8de0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:0243cffa4a3410f161ee613dfdd903a636d07e838a42d341da95d81f42cd1d41"
|
||||
digest = "1:4e903242fe176238aaa469f59d7035f5abf2aa9acfefb8964ddd203651b574e9"
|
||||
name = "github.com/json-iterator/go"
|
||||
packages = ["."]
|
||||
pruneopts = "NUT"
|
||||
revision = "f2b4162afba35581b6d4a50d3b8f34e33c144682"
|
||||
revision = "0ff49de124c6f76f8494e194af75bde0f1a49a29"
|
||||
version = "v1.1.6"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:2760a8fe9b7bcc95c397bc85b69bc7a11eed03c644b45e8c00c581c114486d3f"
|
||||
name = "github.com/k0kubun/pp"
|
||||
packages = ["."]
|
||||
pruneopts = "NUT"
|
||||
revision = "3d73dea227e0711e38b911ffa6fbafc8ff6b2991"
|
||||
version = "v3.0.1"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:493282a1185f77368678d3886b7e999e37e920d22f69669545f1ee5ae10743a2"
|
||||
name = "github.com/linkerd/linkerd2"
|
||||
packages = [
|
||||
"controller/gen/apis/serviceprofile",
|
||||
"controller/gen/apis/serviceprofile/v1alpha1",
|
||||
]
|
||||
pruneopts = "NUT"
|
||||
revision = "5e47cb150a33150e5aeddc6672d8a64701a970de"
|
||||
version = "stable-2.2.1"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:0dbba7d4d4f3eeb01acd81af338ff4a3c4b0bb814d87368ea536e616f383240d"
|
||||
name = "github.com/lyft/protoc-gen-validate"
|
||||
packages = ["validate"]
|
||||
pruneopts = "NUT"
|
||||
revision = "ff6f7a9bc2e5fe006509b9f8c7594c41a953d50f"
|
||||
version = "v0.0.14"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:9785a54031460a402fab4e4bbb3124c8dd9e9f7b1982109fef605cb91632d480"
|
||||
name = "github.com/mattn/go-colorable"
|
||||
packages = ["."]
|
||||
pruneopts = "NUT"
|
||||
revision = "3a70a971f94a22f2fa562ffcc7a0eb45f5daf045"
|
||||
version = "v0.1.1"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:85edcc76fa95b8b312642905b56284f4fe5c42d8becb219481adba7e97d4f5c5"
|
||||
name = "github.com/mattn/go-isatty"
|
||||
packages = ["."]
|
||||
pruneopts = "NUT"
|
||||
revision = "c2a7a6ca930a4cd0bc33a3f298eb71960732a3a7"
|
||||
version = "v0.0.7"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:5985ef4caf91ece5d54817c11ea25f182697534f8ae6521eadcd628c142ac4b6"
|
||||
@@ -178,6 +348,30 @@
|
||||
revision = "c12348ce28de40eed0136aa2b644d0ee0650e56c"
|
||||
version = "v1.0.1"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:f9f72e583aaacf1d1ac5d6121abd4afd3c690baa9e14e1d009df26bf831ba347"
|
||||
name = "github.com/mitchellh/go-homedir"
|
||||
packages = ["."]
|
||||
pruneopts = "NUT"
|
||||
revision = "af06845cf3004701891bf4fdb884bfe4920b3727"
|
||||
version = "v1.1.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:e34decedbcec12332c5836d16a6838f864e0b43c5b4f9aa9d9a85101015f87c2"
|
||||
name = "github.com/mitchellh/hashstructure"
|
||||
packages = ["."]
|
||||
pruneopts = "NUT"
|
||||
revision = "a38c50148365edc8df43c1580c48fb2b3a1e9cd7"
|
||||
version = "v1.0.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:a45ae66dea4c899d79fceb116accfa1892105c251f0dcd9a217ddc276b42ec68"
|
||||
name = "github.com/mitchellh/mapstructure"
|
||||
packages = ["."]
|
||||
pruneopts = "NUT"
|
||||
revision = "3536a929edddb9a5b34bd6861dc4a9647cb459fe"
|
||||
version = "v1.1.2"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:2f42fa12d6911c7b7659738758631bec870b7e9b4c6be5444f963cdcfccc191f"
|
||||
name = "github.com/modern-go/concurrent"
|
||||
@@ -210,6 +404,25 @@
|
||||
revision = "5f041e8faa004a95c88a202771f4cc3e991971e6"
|
||||
version = "v2.0.1"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:122724025b9505074138089f78f543f643ae3a8fab6d5b9edf72cce4dd49cc91"
|
||||
name = "github.com/pierrec/lz4"
|
||||
packages = [
|
||||
".",
|
||||
"internal/xxh32",
|
||||
]
|
||||
pruneopts = "NUT"
|
||||
revision = "315a67e90e415bcdaff33057da191569bf4d8479"
|
||||
version = "v2.1.1"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:14715f705ff5dfe0ffd6571d7d201dd8e921030f8070321a79380d8ca4ec1a24"
|
||||
name = "github.com/pkg/errors"
|
||||
packages = ["."]
|
||||
pruneopts = "NUT"
|
||||
revision = "ba968bfe8b2f7e042a574c888954fccecfa385b4"
|
||||
version = "v0.8.1"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:03bca087b180bf24c4f9060775f137775550a0834e18f0bca0520a868679dbd7"
|
||||
name = "github.com/prometheus/client_golang"
|
||||
@@ -238,22 +451,118 @@
|
||||
"model",
|
||||
]
|
||||
pruneopts = "NUT"
|
||||
revision = "cfeb6f9992ffa54aaa4f2170ade4067ee478b250"
|
||||
version = "v0.2.0"
|
||||
revision = "a82f4c12f983cc2649298185f296632953e50d3e"
|
||||
version = "v0.3.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:0a2e604afa3cbf53a1ddade2f240ee8472eded98856dd8c7cfbfea392ddbbfc7"
|
||||
digest = "1:7813f698f171bd7132b123364433e1b0362f7fdb4ed7f4a20df595a4c2410f8a"
|
||||
name = "github.com/prometheus/procfs"
|
||||
packages = ["."]
|
||||
pruneopts = "NUT"
|
||||
revision = "8368d24ba045f26503eb745b624d930cbe214c79"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:38969f56c08bdf302a73a7c8adb0520dc9cb4cd54206cbe7c8a147da52cc0890"
|
||||
name = "github.com/radovskyb/watcher"
|
||||
packages = ["."]
|
||||
pruneopts = "NUT"
|
||||
revision = "d8b41ca2397a9b5cfc26adb10edbbcde40187a87"
|
||||
version = "v1.0.6"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:09d61699d553a4e6ec998ad29816177b1f3d3ed0c18fe923d2c174ec065c99c8"
|
||||
name = "github.com/ryanuber/go-glob"
|
||||
packages = ["."]
|
||||
pruneopts = "NUT"
|
||||
revision = "51a8f68e6c24dc43f1e371749c89a267de4ebc53"
|
||||
version = "v1.0.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:cad7db5ed31bef1f9e4429ad0927b40dbf31535167ec4c768fd5985565111ea5"
|
||||
name = "github.com/solo-io/gloo"
|
||||
packages = [
|
||||
".",
|
||||
"internal/util",
|
||||
"iostats",
|
||||
"nfs",
|
||||
"xfs",
|
||||
"projects/gloo/pkg/api/v1",
|
||||
"projects/gloo/pkg/api/v1/plugins",
|
||||
"projects/gloo/pkg/api/v1/plugins/aws",
|
||||
"projects/gloo/pkg/api/v1/plugins/azure",
|
||||
"projects/gloo/pkg/api/v1/plugins/consul",
|
||||
"projects/gloo/pkg/api/v1/plugins/faultinjection",
|
||||
"projects/gloo/pkg/api/v1/plugins/grpc",
|
||||
"projects/gloo/pkg/api/v1/plugins/grpc_web",
|
||||
"projects/gloo/pkg/api/v1/plugins/hcm",
|
||||
"projects/gloo/pkg/api/v1/plugins/kubernetes",
|
||||
"projects/gloo/pkg/api/v1/plugins/rest",
|
||||
"projects/gloo/pkg/api/v1/plugins/retries",
|
||||
"projects/gloo/pkg/api/v1/plugins/static",
|
||||
"projects/gloo/pkg/api/v1/plugins/transformation",
|
||||
]
|
||||
pruneopts = "NUT"
|
||||
revision = "bbced9601137e764853b2fad7ec3e2dc4c504e02"
|
||||
revision = "f767e64f7ee60139ff79e2abb547cd149067da04"
|
||||
version = "v0.13.17"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:4a5b267a6929e4c3980066d745f87993c118f7797373a43537fdc69b3ee2d37e"
|
||||
name = "github.com/solo-io/go-utils"
|
||||
packages = [
|
||||
"contextutils",
|
||||
"errors",
|
||||
"kubeutils",
|
||||
]
|
||||
pruneopts = "NUT"
|
||||
revision = "a27432d89f419897df796a17456410e49a9727c3"
|
||||
version = "v0.7.11"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:1cc9a8be450b7d9e77f100c7662133bf7bc8f0832f4eed3542599d3f35d28c46"
|
||||
name = "github.com/solo-io/solo-kit"
|
||||
packages = [
|
||||
"pkg/api/v1/clients",
|
||||
"pkg/api/v1/clients/configmap",
|
||||
"pkg/api/v1/clients/consul",
|
||||
"pkg/api/v1/clients/factory",
|
||||
"pkg/api/v1/clients/file",
|
||||
"pkg/api/v1/clients/kube",
|
||||
"pkg/api/v1/clients/kube/cache",
|
||||
"pkg/api/v1/clients/kube/controller",
|
||||
"pkg/api/v1/clients/kube/crd",
|
||||
"pkg/api/v1/clients/kube/crd/client/clientset/versioned",
|
||||
"pkg/api/v1/clients/kube/crd/client/clientset/versioned/scheme",
|
||||
"pkg/api/v1/clients/kube/crd/client/clientset/versioned/typed/solo.io/v1",
|
||||
"pkg/api/v1/clients/kube/crd/solo.io/v1",
|
||||
"pkg/api/v1/clients/kubesecret",
|
||||
"pkg/api/v1/clients/memory",
|
||||
"pkg/api/v1/clients/vault",
|
||||
"pkg/api/v1/eventloop",
|
||||
"pkg/api/v1/reconcile",
|
||||
"pkg/api/v1/resources",
|
||||
"pkg/api/v1/resources/core",
|
||||
"pkg/errors",
|
||||
"pkg/utils/errutils",
|
||||
"pkg/utils/fileutils",
|
||||
"pkg/utils/hashutils",
|
||||
"pkg/utils/kubeutils",
|
||||
"pkg/utils/log",
|
||||
"pkg/utils/protoutils",
|
||||
"pkg/utils/stringutils",
|
||||
]
|
||||
pruneopts = "NUT"
|
||||
revision = "ab46647c2845a4830d09db3690b3ace1b06845cd"
|
||||
version = "v0.6.3"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:ba6f00e510774b2f1099d2f39a2ae36796ddbe406b02703b3395f26deb8d0f2c"
|
||||
name = "github.com/solo-io/supergloo"
|
||||
packages = [
|
||||
"api/custom/kubepod",
|
||||
"api/custom/linkerd",
|
||||
"pkg/api/external/istio/authorization/v1alpha1",
|
||||
"pkg/api/external/istio/networking/v1alpha3",
|
||||
"pkg/api/v1",
|
||||
]
|
||||
pruneopts = "NUT"
|
||||
revision = "cb84ba5d7bd1099c5e52c09fc9229d1ee0fed9f9"
|
||||
version = "v0.3.11"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:9d8420bbf131d1618bde6530af37c3799340d3762cc47210c1d9532a4c3a2779"
|
||||
@@ -263,6 +572,36 @@
|
||||
revision = "298182f68c66c05229eb03ac171abe6e309ee79a"
|
||||
version = "v1.0.3"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:755d83f10748295646cf74cd19611ebffad37807e49632feb8e3f47d43210c3d"
|
||||
name = "github.com/stefanprodan/klog"
|
||||
packages = ["."]
|
||||
pruneopts = "NUT"
|
||||
revision = "9cbb78b20423182f9e5b2a214dd255f5e117d2d1"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:1349e632a9915b7075f74c13474bfcae2594750c390d3c0b236e48bf6bce3fa2"
|
||||
name = "go.opencensus.io"
|
||||
packages = [
|
||||
".",
|
||||
"internal",
|
||||
"internal/tagencoding",
|
||||
"metric/metricdata",
|
||||
"metric/metricproducer",
|
||||
"resource",
|
||||
"stats",
|
||||
"stats/internal",
|
||||
"stats/view",
|
||||
"tag",
|
||||
"trace",
|
||||
"trace/internal",
|
||||
"trace/tracestate",
|
||||
]
|
||||
pruneopts = "NUT"
|
||||
revision = "75c0cca22312e51bfd4fafdbe9197ae399e18b38"
|
||||
version = "v0.20.2"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:22f696cee54865fb8e9ff91df7b633f6b8f22037a8015253c6b6a71ca82219c7"
|
||||
name = "go.uber.org/atomic"
|
||||
@@ -296,15 +635,15 @@
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:058e9504b9a79bfe86092974d05bb3298d2aa0c312d266d43148de289a5065d9"
|
||||
digest = "1:bbe51412d9915d64ffaa96b51d409e070665efc5194fcf145c4a27d4133107a4"
|
||||
name = "golang.org/x/crypto"
|
||||
packages = ["ssh/terminal"]
|
||||
pruneopts = "NUT"
|
||||
revision = "8dd112bcdc25174059e45e07517d9fc663123347"
|
||||
revision = "b43e412143f90fca62516c457cae5a8dc1595586"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:e3477b53a5c2fb71a7c9688e9b3d58be702807a5a88def8b9a327259d46e4979"
|
||||
digest = "1:c86c292c268416012ab237c22c2c69fdd04cb891815b40343e75210472198455"
|
||||
name = "golang.org/x/net"
|
||||
packages = [
|
||||
"context",
|
||||
@@ -315,11 +654,11 @@
|
||||
"idna",
|
||||
]
|
||||
pruneopts = "NUT"
|
||||
revision = "16b79f2e4e95ea23b2bf9903c9809ff7b013ce85"
|
||||
revision = "1da14a5a36f220ea3f03470682b737b1dfd5de22"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:17ee74a4d9b6078611784b873cdbfe91892d2c73052c430724e66fcc015b6c7b"
|
||||
digest = "1:3121d742fbe48670a16d98b6da4693501fc33cd76d69ed6f35850c564f255c65"
|
||||
name = "golang.org/x/oauth2"
|
||||
packages = [
|
||||
".",
|
||||
@@ -329,18 +668,18 @@
|
||||
"jwt",
|
||||
]
|
||||
pruneopts = "NUT"
|
||||
revision = "e64efc72b421e893cbf63f17ba2221e7d6d0b0f3"
|
||||
revision = "9f3314589c9a9136388751d9adae6b0ed400978a"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:a0d91ab4d23badd4e64e115c6e6ba7dd56bd3cde5d287845822fb2599ac10236"
|
||||
digest = "1:bd7da85408c51d6ab079e1acc5a2872fdfbea42e845b8bbb538c3fac6ef43d2a"
|
||||
name = "golang.org/x/sys"
|
||||
packages = [
|
||||
"unix",
|
||||
"windows",
|
||||
]
|
||||
pruneopts = "NUT"
|
||||
revision = "30e92a19ae4a77dde818b8c3d41d51e4850cba12"
|
||||
revision = "f0ce4c0180bef7e9c51babed693a6e47fdd8962f"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:e7071ed636b5422cc51c0e3a6cebc229d6c9fffc528814b519a980641422d619"
|
||||
@@ -371,16 +710,15 @@
|
||||
name = "golang.org/x/time"
|
||||
packages = ["rate"]
|
||||
pruneopts = "NUT"
|
||||
revision = "85acf8d2951cb2a3bde7632f9ff273ef0379bcbd"
|
||||
revision = "9d24e82272b4f38b78bc8cff74fa936d31ccd8ef"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:e46d8e20161401a9cf8765dfa428494a3492a0b56fe114156b7da792bf41ba78"
|
||||
digest = "1:be1ab6d2b333b1d487c01f1328aef9dc76cee4ff4f780775a552d2a1653f0207"
|
||||
name = "golang.org/x/tools"
|
||||
packages = [
|
||||
"go/ast/astutil",
|
||||
"go/gcexportdata",
|
||||
"go/internal/cgo",
|
||||
"go/internal/gcimporter",
|
||||
"go/internal/packagesdriver",
|
||||
"go/packages",
|
||||
@@ -392,10 +730,10 @@
|
||||
"internal/semver",
|
||||
]
|
||||
pruneopts = "NUT"
|
||||
revision = "f8c04913dfb7b2339a756441456bdbe0af6eb508"
|
||||
revision = "6732636ccdfd99c4301d1d1ac2307f091331f767"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:d395d49d784dd3a11938a3e85091b6570664aa90ff2767a626565c6c130fa7e9"
|
||||
digest = "1:a4824d8df1fd1f63c6b3690bf4801d6ff1722adcb3e13c0489196a7e248d868a"
|
||||
name = "google.golang.org/appengine"
|
||||
packages = [
|
||||
".",
|
||||
@@ -410,8 +748,8 @@
|
||||
"urlfetch",
|
||||
]
|
||||
pruneopts = "NUT"
|
||||
revision = "e9657d882bb81064595ca3b56cbe2546bbabf7b1"
|
||||
version = "v1.4.0"
|
||||
revision = "54a98f90d1c46b7731eb8fb305d2a321c30ef610"
|
||||
version = "v1.5.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:fe9eb931d7b59027c4a3467f7edc16cc8552dac5328039bec05045143c18e1ce"
|
||||
@@ -438,7 +776,7 @@
|
||||
version = "v2.2.2"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:8960ef753a87391086a307122d23cd5007cee93c28189437e4f1b6ed72bffc50"
|
||||
digest = "1:c453ddc26bdab1e4267683a588ad9046e48d803a73f124fe2927adbab6ff02a5"
|
||||
name = "k8s.io/api"
|
||||
packages = [
|
||||
"admissionregistration/v1alpha1",
|
||||
@@ -446,16 +784,19 @@
|
||||
"apps/v1",
|
||||
"apps/v1beta1",
|
||||
"apps/v1beta2",
|
||||
"auditregistration/v1alpha1",
|
||||
"authentication/v1",
|
||||
"authentication/v1beta1",
|
||||
"authorization/v1",
|
||||
"authorization/v1beta1",
|
||||
"autoscaling/v1",
|
||||
"autoscaling/v2beta1",
|
||||
"autoscaling/v2beta2",
|
||||
"batch/v1",
|
||||
"batch/v1beta1",
|
||||
"batch/v2alpha1",
|
||||
"certificates/v1beta1",
|
||||
"coordination/v1beta1",
|
||||
"core/v1",
|
||||
"events/v1beta1",
|
||||
"extensions/v1beta1",
|
||||
@@ -472,11 +813,25 @@
|
||||
"storage/v1beta1",
|
||||
]
|
||||
pruneopts = "NUT"
|
||||
revision = "072894a440bdee3a891dea811fe42902311cd2a3"
|
||||
version = "kubernetes-1.11.0"
|
||||
revision = "05914d821849570fba9eacfb29466f2d8d3cd229"
|
||||
version = "kubernetes-1.13.1"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:83b01e3d6f85c4e911de84febd69a2d3ece614c5a4a518fbc2b5d59000645980"
|
||||
digest = "1:501a73762f1b2c4530206ffb657b39d8b58a9b40280d30e4509ae1232767962c"
|
||||
name = "k8s.io/apiextensions-apiserver"
|
||||
packages = [
|
||||
"pkg/apis/apiextensions",
|
||||
"pkg/apis/apiextensions/v1beta1",
|
||||
"pkg/client/clientset/clientset",
|
||||
"pkg/client/clientset/clientset/scheme",
|
||||
"pkg/client/clientset/clientset/typed/apiextensions/v1beta1",
|
||||
]
|
||||
pruneopts = "NUT"
|
||||
revision = "0fe22c71c47604641d9aa352c785b7912c200562"
|
||||
version = "kubernetes-1.13.1"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:5ac33dce66ac11d4f41c157be7f13ba30c968c74d25a3a3a0a1eddf44b6b2176"
|
||||
name = "k8s.io/apimachinery"
|
||||
packages = [
|
||||
"pkg/api/errors",
|
||||
@@ -508,6 +863,7 @@
|
||||
"pkg/util/intstr",
|
||||
"pkg/util/json",
|
||||
"pkg/util/mergepatch",
|
||||
"pkg/util/naming",
|
||||
"pkg/util/net",
|
||||
"pkg/util/runtime",
|
||||
"pkg/util/sets",
|
||||
@@ -523,15 +879,61 @@
|
||||
"third_party/forked/golang/reflect",
|
||||
]
|
||||
pruneopts = "NUT"
|
||||
revision = "103fd098999dc9c0c88536f5c9ad2e5da39373ae"
|
||||
version = "kubernetes-1.11.0"
|
||||
revision = "2b1284ed4c93a43499e781493253e2ac5959c4fd"
|
||||
version = "kubernetes-1.13.1"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:c7d6cf5e28c377ab4000b94b6b9ff562c4b13e7e8b948ad943f133c5104be011"
|
||||
digest = "1:ef9bda0e29102ac26750517500a2cb0bd7be69ba21ad267ab89a0b35d035328b"
|
||||
name = "k8s.io/client-go"
|
||||
packages = [
|
||||
"discovery",
|
||||
"discovery/fake",
|
||||
"informers",
|
||||
"informers/admissionregistration",
|
||||
"informers/admissionregistration/v1alpha1",
|
||||
"informers/admissionregistration/v1beta1",
|
||||
"informers/apps",
|
||||
"informers/apps/v1",
|
||||
"informers/apps/v1beta1",
|
||||
"informers/apps/v1beta2",
|
||||
"informers/auditregistration",
|
||||
"informers/auditregistration/v1alpha1",
|
||||
"informers/autoscaling",
|
||||
"informers/autoscaling/v1",
|
||||
"informers/autoscaling/v2beta1",
|
||||
"informers/autoscaling/v2beta2",
|
||||
"informers/batch",
|
||||
"informers/batch/v1",
|
||||
"informers/batch/v1beta1",
|
||||
"informers/batch/v2alpha1",
|
||||
"informers/certificates",
|
||||
"informers/certificates/v1beta1",
|
||||
"informers/coordination",
|
||||
"informers/coordination/v1beta1",
|
||||
"informers/core",
|
||||
"informers/core/v1",
|
||||
"informers/events",
|
||||
"informers/events/v1beta1",
|
||||
"informers/extensions",
|
||||
"informers/extensions/v1beta1",
|
||||
"informers/internalinterfaces",
|
||||
"informers/networking",
|
||||
"informers/networking/v1",
|
||||
"informers/policy",
|
||||
"informers/policy/v1beta1",
|
||||
"informers/rbac",
|
||||
"informers/rbac/v1",
|
||||
"informers/rbac/v1alpha1",
|
||||
"informers/rbac/v1beta1",
|
||||
"informers/scheduling",
|
||||
"informers/scheduling/v1alpha1",
|
||||
"informers/scheduling/v1beta1",
|
||||
"informers/settings",
|
||||
"informers/settings/v1alpha1",
|
||||
"informers/storage",
|
||||
"informers/storage/v1",
|
||||
"informers/storage/v1alpha1",
|
||||
"informers/storage/v1beta1",
|
||||
"kubernetes",
|
||||
"kubernetes/fake",
|
||||
"kubernetes/scheme",
|
||||
@@ -545,6 +947,8 @@
|
||||
"kubernetes/typed/apps/v1beta1/fake",
|
||||
"kubernetes/typed/apps/v1beta2",
|
||||
"kubernetes/typed/apps/v1beta2/fake",
|
||||
"kubernetes/typed/auditregistration/v1alpha1",
|
||||
"kubernetes/typed/auditregistration/v1alpha1/fake",
|
||||
"kubernetes/typed/authentication/v1",
|
||||
"kubernetes/typed/authentication/v1/fake",
|
||||
"kubernetes/typed/authentication/v1beta1",
|
||||
@@ -557,6 +961,8 @@
|
||||
"kubernetes/typed/autoscaling/v1/fake",
|
||||
"kubernetes/typed/autoscaling/v2beta1",
|
||||
"kubernetes/typed/autoscaling/v2beta1/fake",
|
||||
"kubernetes/typed/autoscaling/v2beta2",
|
||||
"kubernetes/typed/autoscaling/v2beta2/fake",
|
||||
"kubernetes/typed/batch/v1",
|
||||
"kubernetes/typed/batch/v1/fake",
|
||||
"kubernetes/typed/batch/v1beta1",
|
||||
@@ -565,6 +971,8 @@
|
||||
"kubernetes/typed/batch/v2alpha1/fake",
|
||||
"kubernetes/typed/certificates/v1beta1",
|
||||
"kubernetes/typed/certificates/v1beta1/fake",
|
||||
"kubernetes/typed/coordination/v1beta1",
|
||||
"kubernetes/typed/coordination/v1beta1/fake",
|
||||
"kubernetes/typed/core/v1",
|
||||
"kubernetes/typed/core/v1/fake",
|
||||
"kubernetes/typed/events/v1beta1",
|
||||
@@ -593,6 +1001,34 @@
|
||||
"kubernetes/typed/storage/v1alpha1/fake",
|
||||
"kubernetes/typed/storage/v1beta1",
|
||||
"kubernetes/typed/storage/v1beta1/fake",
|
||||
"listers/admissionregistration/v1alpha1",
|
||||
"listers/admissionregistration/v1beta1",
|
||||
"listers/apps/v1",
|
||||
"listers/apps/v1beta1",
|
||||
"listers/apps/v1beta2",
|
||||
"listers/auditregistration/v1alpha1",
|
||||
"listers/autoscaling/v1",
|
||||
"listers/autoscaling/v2beta1",
|
||||
"listers/autoscaling/v2beta2",
|
||||
"listers/batch/v1",
|
||||
"listers/batch/v1beta1",
|
||||
"listers/batch/v2alpha1",
|
||||
"listers/certificates/v1beta1",
|
||||
"listers/coordination/v1beta1",
|
||||
"listers/core/v1",
|
||||
"listers/events/v1beta1",
|
||||
"listers/extensions/v1beta1",
|
||||
"listers/networking/v1",
|
||||
"listers/policy/v1beta1",
|
||||
"listers/rbac/v1",
|
||||
"listers/rbac/v1alpha1",
|
||||
"listers/rbac/v1beta1",
|
||||
"listers/scheduling/v1alpha1",
|
||||
"listers/scheduling/v1beta1",
|
||||
"listers/settings/v1alpha1",
|
||||
"listers/storage/v1",
|
||||
"listers/storage/v1alpha1",
|
||||
"listers/storage/v1beta1",
|
||||
"pkg/apis/clientauthentication",
|
||||
"pkg/apis/clientauthentication/v1alpha1",
|
||||
"pkg/apis/clientauthentication/v1beta1",
|
||||
@@ -625,11 +1061,11 @@
|
||||
"util/workqueue",
|
||||
]
|
||||
pruneopts = "NUT"
|
||||
revision = "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65"
|
||||
version = "kubernetes-1.11.0"
|
||||
revision = "8d9ed539ba3134352c586810e749e58df4e94e4f"
|
||||
version = "kubernetes-1.13.1"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:8ab487a323486c8bbbaa3b689850487fdccc6cbea8690620e083b2d230a4447e"
|
||||
digest = "1:dc1ae99dcab96913d81ae970b1f7a7411a54199b14bfb17a7e86f9a56979c720"
|
||||
name = "k8s.io/code-generator"
|
||||
packages = [
|
||||
"cmd/client-gen",
|
||||
@@ -653,12 +1089,12 @@
|
||||
"pkg/util",
|
||||
]
|
||||
pruneopts = "T"
|
||||
revision = "6702109cc68eb6fe6350b83e14407c8d7309fd1a"
|
||||
version = "kubernetes-1.11.0"
|
||||
revision = "c2090bec4d9b1fb25de3812f868accc2bc9ecbae"
|
||||
version = "kubernetes-1.13.1"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:61024ed77a53ac618effed55043bf6a9afbdeb64136bd6a5b0c992d4c0363766"
|
||||
digest = "1:39912eb5f8eaf46486faae0839586c27c93423e552f76875defa048f52c15c15"
|
||||
name = "k8s.io/gengo"
|
||||
packages = [
|
||||
"args",
|
||||
@@ -671,23 +1107,32 @@
|
||||
"types",
|
||||
]
|
||||
pruneopts = "NUT"
|
||||
revision = "0689ccc1d7d65d9dd1bedcc3b0b1ed7df91ba266"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:c263611800c3a97991dbcf9d3bc4de390f6224aaa8ca0a7226a9d734f65a416a"
|
||||
name = "k8s.io/klog"
|
||||
packages = ["."]
|
||||
pruneopts = "NUT"
|
||||
revision = "71442cd4037d612096940ceb0f3fec3f7fff66e0"
|
||||
version = "v0.2.0"
|
||||
revision = "e17681d19d3ac4837a019ece36c2a0ec31ffe985"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:03a96603922fc1f6895ae083e1e16d943b55ef0656b56965351bd87e7d90485f"
|
||||
digest = "1:755d83f10748295646cf74cd19611ebffad37807e49632feb8e3f47d43210c3d"
|
||||
name = "k8s.io/klog"
|
||||
packages = ["."]
|
||||
pruneopts = "NUT"
|
||||
revision = "9cbb78b20423182f9e5b2a214dd255f5e117d2d1"
|
||||
source = "github.com/stefanprodan/klog"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:42674e29bf0cf4662d49bd9528e24b9ecc4895b32d0be281f9cf04d3a7671846"
|
||||
name = "k8s.io/kube-openapi"
|
||||
packages = ["pkg/util/proto"]
|
||||
pruneopts = "NUT"
|
||||
revision = "b3a7cee44a305be0a69e1b9ac03018307287e1b0"
|
||||
revision = "6b3d3b2d5666c5912bab8b7bf26bf50f75a8f887"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:8730e0150dfb2b7e173890c8b9868e7a273082ef8e39f4940e3506a481cf895c"
|
||||
name = "sigs.k8s.io/yaml"
|
||||
packages = ["."]
|
||||
pruneopts = "NUT"
|
||||
revision = "fd68e9863619f6ec2fdd8625fe1f02e7c877e480"
|
||||
version = "v1.1.0"
|
||||
|
||||
[solve-meta]
|
||||
analyzer-name = "dep"
|
||||
@@ -695,9 +1140,17 @@
|
||||
input-imports = [
|
||||
"github.com/google/go-cmp/cmp",
|
||||
"github.com/google/go-cmp/cmp/cmpopts",
|
||||
"github.com/istio/glog",
|
||||
"github.com/prometheus/client_golang/prometheus",
|
||||
"github.com/prometheus/client_golang/prometheus/promhttp",
|
||||
"github.com/solo-io/gloo/projects/gloo/pkg/api/v1",
|
||||
"github.com/solo-io/solo-kit/pkg/api/v1/clients",
|
||||
"github.com/solo-io/solo-kit/pkg/api/v1/clients/factory",
|
||||
"github.com/solo-io/solo-kit/pkg/api/v1/clients/kube",
|
||||
"github.com/solo-io/solo-kit/pkg/api/v1/clients/memory",
|
||||
"github.com/solo-io/solo-kit/pkg/api/v1/resources/core",
|
||||
"github.com/solo-io/solo-kit/pkg/errors",
|
||||
"github.com/solo-io/supergloo/pkg/api/v1",
|
||||
"github.com/stefanprodan/klog",
|
||||
"go.uber.org/zap",
|
||||
"go.uber.org/zap/zapcore",
|
||||
"gopkg.in/h2non/gock.v1",
|
||||
|
||||
30
Gopkg.toml
30
Gopkg.toml
@@ -21,25 +21,27 @@ required = [
|
||||
|
||||
[[override]]
|
||||
name = "k8s.io/api"
|
||||
version = "kubernetes-1.11.0"
|
||||
version = "kubernetes-1.13.1"
|
||||
|
||||
[[override]]
|
||||
name = "k8s.io/apimachinery"
|
||||
version = "kubernetes-1.11.0"
|
||||
version = "kubernetes-1.13.1"
|
||||
|
||||
[[override]]
|
||||
name = "k8s.io/code-generator"
|
||||
version = "kubernetes-1.11.0"
|
||||
version = "kubernetes-1.13.1"
|
||||
|
||||
[[override]]
|
||||
name = "k8s.io/client-go"
|
||||
version = "kubernetes-1.11.0"
|
||||
version = "kubernetes-1.13.1"
|
||||
|
||||
[[override]]
|
||||
name = "github.com/json-iterator/go"
|
||||
# This is the commit at which k8s depends on this in 1.11
|
||||
# It seems to be broken at HEAD.
|
||||
revision = "f2b4162afba35581b6d4a50d3b8f34e33c144682"
|
||||
name = "k8s.io/apiextensions-apiserver"
|
||||
version = "kubernetes-1.13.1"
|
||||
|
||||
[[override]]
|
||||
name = "k8s.io/apiserver"
|
||||
version = "kubernetes-1.13.1"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/prometheus/client_golang"
|
||||
@@ -50,8 +52,8 @@ required = [
|
||||
version = "v0.2.0"
|
||||
|
||||
[[override]]
|
||||
name = "github.com/golang/glog"
|
||||
source = "github.com/istio/glog"
|
||||
name = "k8s.io/klog"
|
||||
source = "github.com/stefanprodan/klog"
|
||||
|
||||
[prune]
|
||||
go-tests = true
|
||||
@@ -62,3 +64,11 @@ required = [
|
||||
name = "k8s.io/code-generator"
|
||||
unused-packages = false
|
||||
non-go = false
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/solo-io/supergloo"
|
||||
version = "v0.3.11"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/solo-io/solo-kit"
|
||||
version = "v0.6.3"
|
||||
|
||||
15
Makefile
15
Makefile
@@ -4,6 +4,7 @@ VERSION_MINOR:=$(shell grep 'VERSION' pkg/version/version.go | awk '{ print $$4
|
||||
PATCH:=$(shell grep 'VERSION' pkg/version/version.go | awk '{ print $$4 }' | tr -d '"' | awk -F. '{print $$NF}')
|
||||
SOURCE_DIRS = cmd pkg/apis pkg/controller pkg/server pkg/logging pkg/version
|
||||
LT_VERSION?=$(shell grep 'VERSION' cmd/loadtester/main.go | awk '{ print $$4 }' | tr -d '"' | head -n1)
|
||||
TS=$(shell date +%Y-%m-%d_%H-%M-%S)
|
||||
|
||||
run:
|
||||
go run cmd/flagger/* -kubeconfig=$$HOME/.kube/config -log-level=info \
|
||||
@@ -17,12 +18,18 @@ run-appmesh:
|
||||
-slack-url=https://hooks.slack.com/services/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 \
|
||||
-slack-channel="devops-alerts"
|
||||
|
||||
run-nginx:
|
||||
go run cmd/flagger/* -kubeconfig=$$HOME/.kube/config -log-level=info -mesh-provider=nginx -namespace=nginx \
|
||||
-metrics-server=http://prometheus-weave.istio.weavedx.com \
|
||||
-slack-url=https://hooks.slack.com/services/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 \
|
||||
-slack-channel="devops-alerts"
|
||||
|
||||
build:
|
||||
docker build -t weaveworks/flagger:$(TAG) . -f Dockerfile
|
||||
|
||||
push:
|
||||
docker tag weaveworks/flagger:$(TAG) quay.io/weaveworks/flagger:$(VERSION)
|
||||
docker push quay.io/weaveworks/flagger:$(VERSION)
|
||||
docker tag weaveworks/flagger:$(TAG) weaveworks/flagger:$(VERSION)
|
||||
docker push weaveworks/flagger:$(VERSION)
|
||||
|
||||
fmt:
|
||||
gofmt -l -s -w $(SOURCE_DIRS)
|
||||
@@ -88,5 +95,5 @@ reset-test:
|
||||
kubectl apply -f ./artifacts/canaries
|
||||
|
||||
loadtester-push:
|
||||
docker build -t quay.io/weaveworks/flagger-loadtester:$(LT_VERSION) . -f Dockerfile.loadtester
|
||||
docker push quay.io/weaveworks/flagger-loadtester:$(LT_VERSION)
|
||||
docker build -t weaveworks/flagger-loadtester:$(LT_VERSION) . -f Dockerfile.loadtester
|
||||
docker push weaveworks/flagger-loadtester:$(LT_VERSION)
|
||||
85
README.md
85
README.md
@@ -7,49 +7,51 @@
|
||||
[](https://github.com/weaveworks/flagger/releases)
|
||||
|
||||
Flagger is a Kubernetes operator that automates the promotion of canary deployments
|
||||
using Istio or App Mesh routing for traffic shifting and Prometheus metrics for canary analysis.
|
||||
The canary analysis can be extended with webhooks for running acceptance tests,
|
||||
using Istio, App Mesh or NGINX routing for traffic shifting and Prometheus metrics for canary analysis.
|
||||
The canary analysis can be extended with webhooks for running acceptance tests,
|
||||
load tests or any other custom validation.
|
||||
|
||||
Flagger implements a control loop that gradually shifts traffic to the canary while measuring key performance
|
||||
indicators like HTTP requests success rate, requests average duration and pods health.
|
||||
Flagger implements a control loop that gradually shifts traffic to the canary while measuring key performance
|
||||
indicators like HTTP requests success rate, requests average duration and pods health.
|
||||
Based on analysis of the KPIs a canary is promoted or aborted, and the analysis result is published to Slack.
|
||||
|
||||

|
||||
|
||||
### Documentation
|
||||
## Documentation
|
||||
|
||||
Flagger documentation can be found at [docs.flagger.app](https://docs.flagger.app)
|
||||
|
||||
* Install
|
||||
* [Flagger install on Kubernetes](https://docs.flagger.app/install/flagger-install-on-kubernetes)
|
||||
* [Flagger install on GKE Istio](https://docs.flagger.app/install/flagger-install-on-google-cloud)
|
||||
* [Flagger install on EKS App Mesh](https://docs.flagger.app/install/flagger-install-on-eks-appmesh)
|
||||
* [Flagger install on Kubernetes](https://docs.flagger.app/install/flagger-install-on-kubernetes)
|
||||
* [Flagger install on GKE Istio](https://docs.flagger.app/install/flagger-install-on-google-cloud)
|
||||
* [Flagger install on EKS App Mesh](https://docs.flagger.app/install/flagger-install-on-eks-appmesh)
|
||||
* [Flagger install with SuperGloo](https://docs.flagger.app/install/flagger-install-with-supergloo)
|
||||
* How it works
|
||||
* [Canary custom resource](https://docs.flagger.app/how-it-works#canary-custom-resource)
|
||||
* [Routing](https://docs.flagger.app/how-it-works#istio-routing)
|
||||
* [Canary deployment stages](https://docs.flagger.app/how-it-works#canary-deployment)
|
||||
* [Canary analysis](https://docs.flagger.app/how-it-works#canary-analysis)
|
||||
* [HTTP metrics](https://docs.flagger.app/how-it-works#http-metrics)
|
||||
* [Custom metrics](https://docs.flagger.app/how-it-works#custom-metrics)
|
||||
* [Webhooks](https://docs.flagger.app/how-it-works#webhooks)
|
||||
* [Load testing](https://docs.flagger.app/how-it-works#load-testing)
|
||||
* [Canary custom resource](https://docs.flagger.app/how-it-works#canary-custom-resource)
|
||||
* [Routing](https://docs.flagger.app/how-it-works#istio-routing)
|
||||
* [Canary deployment stages](https://docs.flagger.app/how-it-works#canary-deployment)
|
||||
* [Canary analysis](https://docs.flagger.app/how-it-works#canary-analysis)
|
||||
* [HTTP metrics](https://docs.flagger.app/how-it-works#http-metrics)
|
||||
* [Custom metrics](https://docs.flagger.app/how-it-works#custom-metrics)
|
||||
* [Webhooks](https://docs.flagger.app/how-it-works#webhooks)
|
||||
* [Load testing](https://docs.flagger.app/how-it-works#load-testing)
|
||||
* Usage
|
||||
* [Istio canary deployments](https://docs.flagger.app/usage/progressive-delivery)
|
||||
* [Istio A/B testing](https://docs.flagger.app/usage/ab-testing)
|
||||
* [App Mesh canary deployments](https://docs.flagger.app/usage/appmesh-progressive-delivery)
|
||||
* [Monitoring](https://docs.flagger.app/usage/monitoring)
|
||||
* [Alerting](https://docs.flagger.app/usage/alerting)
|
||||
* [Istio canary deployments](https://docs.flagger.app/usage/progressive-delivery)
|
||||
* [Istio A/B testing](https://docs.flagger.app/usage/ab-testing)
|
||||
* [App Mesh canary deployments](https://docs.flagger.app/usage/appmesh-progressive-delivery)
|
||||
* [NGINX ingress controller canary deployments](https://docs.flagger.app/usage/nginx-progressive-delivery)
|
||||
* [Monitoring](https://docs.flagger.app/usage/monitoring)
|
||||
* [Alerting](https://docs.flagger.app/usage/alerting)
|
||||
* Tutorials
|
||||
* [Canary deployments with Helm charts and Weave Flux](https://docs.flagger.app/tutorials/canary-helm-gitops)
|
||||
* [Canary deployments with Helm charts and Weave Flux](https://docs.flagger.app/tutorials/canary-helm-gitops)
|
||||
|
||||
### Canary CRD
|
||||
## Canary CRD
|
||||
|
||||
Flagger takes a Kubernetes deployment and optionally a horizontal pod autoscaler (HPA),
|
||||
then creates a series of objects (Kubernetes deployments, ClusterIP services and Istio or App Mesh virtual services).
|
||||
These objects expose the application on the mesh and drive the canary analysis and promotion.
|
||||
|
||||
Flagger keeps track of ConfigMaps and Secrets referenced by a Kubernetes Deployment and triggers a canary analysis if any of those objects change.
|
||||
Flagger keeps track of ConfigMaps and Secrets referenced by a Kubernetes Deployment and triggers a canary analysis if any of those objects change.
|
||||
When promoting a workload in production, both code (container images) and configuration (config maps and secrets) are being synchronised.
|
||||
|
||||
For a deployment named _podinfo_, a canary promotion can be defined using Flagger's custom resource:
|
||||
@@ -80,6 +82,7 @@ spec:
|
||||
# Istio gateways (optional)
|
||||
gateways:
|
||||
- public-gateway.istio-system.svc.cluster.local
|
||||
- mesh
|
||||
# Istio virtual service host names (optional)
|
||||
hosts:
|
||||
- podinfo.example.com
|
||||
@@ -117,13 +120,13 @@ spec:
|
||||
stepWeight: 5
|
||||
# Istio Prometheus checks
|
||||
metrics:
|
||||
# builtin Istio checks
|
||||
- name: istio_requests_total
|
||||
# builtin checks
|
||||
- name: request-success-rate
|
||||
# minimum req success rate (non 5xx responses)
|
||||
# percentage (0-100)
|
||||
threshold: 99
|
||||
interval: 1m
|
||||
- name: istio_request_duration_seconds_bucket
|
||||
- name: request-duration
|
||||
# maximum req duration P99
|
||||
# milliseconds
|
||||
threshold: 500
|
||||
@@ -149,30 +152,44 @@ spec:
|
||||
|
||||
For more details on how the canary analysis and promotion works please [read the docs](https://docs.flagger.app/how-it-works).
|
||||
|
||||
### Roadmap
|
||||
## Features
|
||||
|
||||
* Integrate with other service mesh technologies like Linkerd v2, Super Gloo or Consul Mesh
|
||||
| Feature | Istio | App Mesh | SuperGloo | NGINX Ingress |
|
||||
| -------------------------------------------- | ------------------ | ------------------ |------------------ |------------------ |
|
||||
| Canary deployments (weighted traffic) | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: |
|
||||
| A/B testing (headers and cookies filters) | :heavy_check_mark: | :heavy_minus_sign: | :heavy_minus_sign: | :heavy_check_mark: |
|
||||
| Load testing | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: |
|
||||
| Webhooks (custom acceptance tests) | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: |
|
||||
| Request success rate check (L7 metric) | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: |
|
||||
| Request duration check (L7 metric) | :heavy_check_mark: | :heavy_minus_sign: | :heavy_check_mark: | :heavy_check_mark: |
|
||||
| Custom promql checks | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: |
|
||||
| Ingress gateway (CORS, retries and timeouts) | :heavy_check_mark: | :heavy_minus_sign: | :heavy_check_mark: | :heavy_check_mark: |
|
||||
|
||||
## Roadmap
|
||||
|
||||
* Integrate with other service mesh technologies like Linkerd v2
|
||||
* Add support for comparing the canary metrics to the primary ones and do the validation based on the derivation between the two
|
||||
|
||||
### Contributing
|
||||
## Contributing
|
||||
|
||||
Flagger is Apache 2.0 licensed and accepts contributions via GitHub pull requests.
|
||||
|
||||
When submitting bug reports please include as much details as possible:
|
||||
When submitting bug reports please include as much details as possible:
|
||||
|
||||
* which Flagger version
|
||||
* which Flagger CRD version
|
||||
* which Kubernetes/Istio version
|
||||
* what configuration (canary, virtual service and workloads definitions)
|
||||
* what happened (Flagger, Istio Pilot and Proxy logs)
|
||||
|
||||
### Getting Help
|
||||
## Getting Help
|
||||
|
||||
If you have any questions about Flagger and progressive delivery:
|
||||
|
||||
* Read the Flagger [docs](https://docs.flagger.app).
|
||||
* Invite yourself to the [Weave community slack](https://slack.weave.works/)
|
||||
* Invite yourself to the [Weave community slack](https://slack.weave.works/)
|
||||
and join the [#flagger](https://weave-community.slack.com/messages/flagger/) channel.
|
||||
* Join the [Weave User Group](https://www.meetup.com/pro/Weave/) and get invited to online talks,
|
||||
* Join the [Weave User Group](https://www.meetup.com/pro/Weave/) and get invited to online talks,
|
||||
hands-on training and meetups in your area.
|
||||
* File an [issue](https://github.com/weaveworks/flagger/issues/new).
|
||||
|
||||
|
||||
@@ -23,6 +23,7 @@ spec:
|
||||
# Istio gateways (optional)
|
||||
gateways:
|
||||
- public-gateway.istio-system.svc.cluster.local
|
||||
- mesh
|
||||
# Istio virtual service host names (optional)
|
||||
hosts:
|
||||
- abtest.istio.weavedx.com
|
||||
@@ -42,12 +43,12 @@ spec:
|
||||
cookie:
|
||||
regex: "^(.*?;)?(type=insider)(;.*)?$"
|
||||
metrics:
|
||||
- name: istio_requests_total
|
||||
- name: request-success-rate
|
||||
# minimum req success rate (non 5xx responses)
|
||||
# percentage (0-100)
|
||||
threshold: 99
|
||||
interval: 1m
|
||||
- name: istio_request_duration_seconds_bucket
|
||||
- name: request-duration
|
||||
# maximum req duration P99
|
||||
# milliseconds
|
||||
threshold: 500
|
||||
|
||||
@@ -36,7 +36,7 @@ spec:
|
||||
stepWeight: 5
|
||||
# App Mesh Prometheus checks
|
||||
metrics:
|
||||
- name: envoy_cluster_upstream_rq
|
||||
- name: request-success-rate
|
||||
# minimum req success rate (non 5xx responses)
|
||||
# percentage (0-100)
|
||||
threshold: 99
|
||||
|
||||
@@ -23,6 +23,7 @@ spec:
|
||||
# Istio gateways (optional)
|
||||
gateways:
|
||||
- public-gateway.istio-system.svc.cluster.local
|
||||
- mesh
|
||||
# Istio virtual service host names (optional)
|
||||
hosts:
|
||||
- app.istio.weavedx.com
|
||||
@@ -55,12 +56,12 @@ spec:
|
||||
stepWeight: 5
|
||||
# Istio Prometheus checks
|
||||
metrics:
|
||||
- name: istio_requests_total
|
||||
- name: request-success-rate
|
||||
# minimum req success rate (non 5xx responses)
|
||||
# percentage (0-100)
|
||||
threshold: 99
|
||||
interval: 1m
|
||||
- name: istio_request_duration_seconds_bucket
|
||||
- name: request-duration
|
||||
# maximum req duration P99
|
||||
# milliseconds
|
||||
threshold: 500
|
||||
|
||||
@@ -23,6 +23,7 @@ spec:
|
||||
# Istio gateways (optional)
|
||||
gateways:
|
||||
- public-gateway.istio-system.svc.cluster.local
|
||||
- mesh
|
||||
# Istio virtual service host names (optional)
|
||||
hosts:
|
||||
- app.iowa.weavedx.com
|
||||
@@ -39,12 +40,12 @@ spec:
|
||||
stepWeight: 5
|
||||
# Istio Prometheus checks
|
||||
metrics:
|
||||
- name: istio_requests_total
|
||||
- name: request-success-rate
|
||||
# minimum req success rate (non 5xx responses)
|
||||
# percentage (0-100)
|
||||
threshold: 99
|
||||
interval: 1m
|
||||
- name: istio_request_duration_seconds_bucket
|
||||
- name: request-duration
|
||||
# maximum req duration P99
|
||||
# milliseconds
|
||||
threshold: 500
|
||||
|
||||
@@ -31,6 +31,12 @@ rules:
|
||||
resources:
|
||||
- horizontalpodautoscalers
|
||||
verbs: ["*"]
|
||||
- apiGroups:
|
||||
- "extensions"
|
||||
resources:
|
||||
- ingresses
|
||||
- ingresses/status
|
||||
verbs: ["*"]
|
||||
- apiGroups:
|
||||
- flagger.app
|
||||
resources:
|
||||
|
||||
@@ -2,6 +2,8 @@ apiVersion: apiextensions.k8s.io/v1beta1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: canaries.flagger.app
|
||||
annotations:
|
||||
helm.sh/resource-policy: keep
|
||||
spec:
|
||||
group: flagger.app
|
||||
version: v1alpha3
|
||||
@@ -39,9 +41,9 @@ spec:
|
||||
properties:
|
||||
spec:
|
||||
required:
|
||||
- targetRef
|
||||
- service
|
||||
- canaryAnalysis
|
||||
- targetRef
|
||||
- service
|
||||
- canaryAnalysis
|
||||
properties:
|
||||
progressDeadlineSeconds:
|
||||
type: number
|
||||
@@ -67,6 +69,18 @@ spec:
|
||||
type: string
|
||||
name:
|
||||
type: string
|
||||
ingressRef:
|
||||
anyOf:
|
||||
- type: string
|
||||
- type: object
|
||||
required: ['apiVersion', 'kind', 'name']
|
||||
properties:
|
||||
apiVersion:
|
||||
type: string
|
||||
kind:
|
||||
type: string
|
||||
name:
|
||||
type: string
|
||||
service:
|
||||
type: object
|
||||
required: ['port']
|
||||
@@ -119,9 +133,36 @@ spec:
|
||||
properties:
|
||||
name:
|
||||
type: string
|
||||
type:
|
||||
type: string
|
||||
enum:
|
||||
- ""
|
||||
- pre-rollout
|
||||
- rollout
|
||||
- post-rollout
|
||||
url:
|
||||
type: string
|
||||
format: url
|
||||
timeout:
|
||||
type: string
|
||||
pattern: "^[0-9]+(m|s)"
|
||||
status:
|
||||
properties:
|
||||
phase:
|
||||
type: string
|
||||
enum:
|
||||
- ""
|
||||
- Initialized
|
||||
- Progressing
|
||||
- Succeeded
|
||||
- Failed
|
||||
canaryWeight:
|
||||
type: number
|
||||
failedChecks:
|
||||
type: number
|
||||
iterations:
|
||||
type: number
|
||||
lastAppliedSpec:
|
||||
type: string
|
||||
lastTransitionTime:
|
||||
type: string
|
||||
|
||||
@@ -22,7 +22,7 @@ spec:
|
||||
serviceAccountName: flagger
|
||||
containers:
|
||||
- name: flagger
|
||||
image: quay.io/weaveworks/flagger:0.10.0
|
||||
image: weaveworks/flagger:0.13.2
|
||||
imagePullPolicy: IfNotPresent
|
||||
ports:
|
||||
- name: http
|
||||
@@ -31,6 +31,7 @@ spec:
|
||||
- ./flagger
|
||||
- -log-level=info
|
||||
- -control-loop-interval=10s
|
||||
- -mesh-provider=$(MESH_PROVIDER)
|
||||
- -metrics-server=http://prometheus.istio-system.svc.cluster.local:9090
|
||||
livenessProbe:
|
||||
exec:
|
||||
|
||||
19
artifacts/loadtester/config.yaml
Normal file
19
artifacts/loadtester/config.yaml
Normal file
@@ -0,0 +1,19 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: flagger-loadtester-bats
|
||||
data:
|
||||
tests: |
|
||||
#!/usr/bin/env bats
|
||||
|
||||
@test "check message" {
|
||||
curl -sS http://${URL} | jq -r .message | {
|
||||
run cut -d $' ' -f1
|
||||
[ $output = "greetings" ]
|
||||
}
|
||||
}
|
||||
|
||||
@test "check headers" {
|
||||
curl -sS http://${URL}/headers | grep X-Request-Id
|
||||
}
|
||||
@@ -17,7 +17,7 @@ spec:
|
||||
spec:
|
||||
containers:
|
||||
- name: loadtester
|
||||
image: quay.io/stefanprodan/flagger-loadtester:0.2.0
|
||||
image: weaveworks/flagger-loadtester:0.3.0
|
||||
imagePullPolicy: IfNotPresent
|
||||
ports:
|
||||
- name: http
|
||||
@@ -57,3 +57,11 @@ spec:
|
||||
securityContext:
|
||||
readOnlyRootFilesystem: true
|
||||
runAsUser: 10001
|
||||
# volumeMounts:
|
||||
# - name: tests
|
||||
# mountPath: /bats
|
||||
# readOnly: true
|
||||
# volumes:
|
||||
# - name: tests
|
||||
# configMap:
|
||||
# name: flagger-loadtester-bats
|
||||
68
artifacts/nginx/canary.yaml
Normal file
68
artifacts/nginx/canary.yaml
Normal file
@@ -0,0 +1,68 @@
|
||||
apiVersion: flagger.app/v1alpha3
|
||||
kind: Canary
|
||||
metadata:
|
||||
name: podinfo
|
||||
namespace: test
|
||||
spec:
|
||||
# deployment reference
|
||||
targetRef:
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
name: podinfo
|
||||
# ingress reference
|
||||
ingressRef:
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Ingress
|
||||
name: podinfo
|
||||
# HPA reference (optional)
|
||||
autoscalerRef:
|
||||
apiVersion: autoscaling/v2beta1
|
||||
kind: HorizontalPodAutoscaler
|
||||
name: podinfo
|
||||
# the maximum time in seconds for the canary deployment
|
||||
# to make progress before it is rollback (default 600s)
|
||||
progressDeadlineSeconds: 60
|
||||
service:
|
||||
# container port
|
||||
port: 9898
|
||||
canaryAnalysis:
|
||||
# schedule interval (default 60s)
|
||||
interval: 10s
|
||||
# max number of failed metric checks before rollback
|
||||
threshold: 10
|
||||
# max traffic percentage routed to canary
|
||||
# percentage (0-100)
|
||||
maxWeight: 50
|
||||
# canary increment step
|
||||
# percentage (0-100)
|
||||
stepWeight: 5
|
||||
# NGINX Prometheus checks
|
||||
metrics:
|
||||
- name: request-success-rate
|
||||
# minimum req success rate (non 5xx responses)
|
||||
# percentage (0-100)
|
||||
threshold: 99
|
||||
interval: 1m
|
||||
- name: "latency"
|
||||
threshold: 0.5
|
||||
interval: 1m
|
||||
query: |
|
||||
histogram_quantile(0.99,
|
||||
sum(
|
||||
rate(
|
||||
http_request_duration_seconds_bucket{
|
||||
kubernetes_namespace="test",
|
||||
kubernetes_pod_name=~"podinfo-[0-9a-zA-Z]+(-[0-9a-zA-Z]+)"
|
||||
}[1m]
|
||||
)
|
||||
) by (le)
|
||||
)
|
||||
# external checks (optional)
|
||||
webhooks:
|
||||
- name: load-test
|
||||
url: http://flagger-loadtester.test/
|
||||
timeout: 5s
|
||||
metadata:
|
||||
type: cmd
|
||||
cmd: "hey -z 1m -q 10 -c 2 http://app.example.com/"
|
||||
logCmdOutput: "true"
|
||||
69
artifacts/nginx/deployment.yaml
Normal file
69
artifacts/nginx/deployment.yaml
Normal file
@@ -0,0 +1,69 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: podinfo
|
||||
namespace: test
|
||||
labels:
|
||||
app: podinfo
|
||||
spec:
|
||||
replicas: 1
|
||||
strategy:
|
||||
rollingUpdate:
|
||||
maxUnavailable: 0
|
||||
type: RollingUpdate
|
||||
selector:
|
||||
matchLabels:
|
||||
app: podinfo
|
||||
template:
|
||||
metadata:
|
||||
annotations:
|
||||
prometheus.io/scrape: "true"
|
||||
labels:
|
||||
app: podinfo
|
||||
spec:
|
||||
containers:
|
||||
- name: podinfod
|
||||
image: quay.io/stefanprodan/podinfo:1.4.0
|
||||
imagePullPolicy: IfNotPresent
|
||||
ports:
|
||||
- containerPort: 9898
|
||||
name: http
|
||||
protocol: TCP
|
||||
command:
|
||||
- ./podinfo
|
||||
- --port=9898
|
||||
- --level=info
|
||||
- --random-delay=false
|
||||
- --random-error=false
|
||||
env:
|
||||
- name: PODINFO_UI_COLOR
|
||||
value: green
|
||||
livenessProbe:
|
||||
exec:
|
||||
command:
|
||||
- podcli
|
||||
- check
|
||||
- http
|
||||
- localhost:9898/healthz
|
||||
failureThreshold: 3
|
||||
periodSeconds: 10
|
||||
successThreshold: 1
|
||||
timeoutSeconds: 2
|
||||
readinessProbe:
|
||||
exec:
|
||||
command:
|
||||
- podcli
|
||||
- check
|
||||
- http
|
||||
- localhost:9898/readyz
|
||||
failureThreshold: 3
|
||||
periodSeconds: 3
|
||||
successThreshold: 1
|
||||
timeoutSeconds: 2
|
||||
resources:
|
||||
limits:
|
||||
cpu: 1000m
|
||||
memory: 256Mi
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 16Mi
|
||||
19
artifacts/nginx/hpa.yaml
Normal file
19
artifacts/nginx/hpa.yaml
Normal file
@@ -0,0 +1,19 @@
|
||||
apiVersion: autoscaling/v2beta1
|
||||
kind: HorizontalPodAutoscaler
|
||||
metadata:
|
||||
name: podinfo
|
||||
namespace: test
|
||||
spec:
|
||||
scaleTargetRef:
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
name: podinfo
|
||||
minReplicas: 2
|
||||
maxReplicas: 4
|
||||
metrics:
|
||||
- type: Resource
|
||||
resource:
|
||||
name: cpu
|
||||
# scale up if usage is above
|
||||
# 99% of the requested CPU (100m)
|
||||
targetAverageUtilization: 99
|
||||
17
artifacts/nginx/ingress.yaml
Normal file
17
artifacts/nginx/ingress.yaml
Normal file
@@ -0,0 +1,17 @@
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: podinfo
|
||||
namespace: test
|
||||
labels:
|
||||
app: podinfo
|
||||
annotations:
|
||||
kubernetes.io/ingress.class: "nginx"
|
||||
spec:
|
||||
rules:
|
||||
- host: app.example.com
|
||||
http:
|
||||
paths:
|
||||
- backend:
|
||||
serviceName: podinfo
|
||||
servicePort: 9898
|
||||
@@ -1,10 +1,10 @@
|
||||
apiVersion: v1
|
||||
name: flagger
|
||||
version: 0.10.0
|
||||
appVersion: 0.10.0
|
||||
version: 0.13.2
|
||||
appVersion: 0.13.2
|
||||
kubeVersion: ">=1.11.0-0"
|
||||
engine: gotpl
|
||||
description: Flagger is a Kubernetes operator that automates the promotion of canary deployments using Istio routing for traffic shifting and Prometheus metrics for canary analysis.
|
||||
description: Flagger is a Kubernetes operator that automates the promotion of canary deployments using Istio, App Mesh or NGINX routing for traffic shifting and Prometheus metrics for canary analysis.
|
||||
home: https://docs.flagger.app
|
||||
icon: https://raw.githubusercontent.com/weaveworks/flagger/master/docs/logo/flagger-icon.png
|
||||
sources:
|
||||
|
||||
@@ -45,7 +45,7 @@ The following tables lists the configurable parameters of the Flagger chart and
|
||||
|
||||
Parameter | Description | Default
|
||||
--- | --- | ---
|
||||
`image.repository` | image repository | `quay.io/stefanprodan/flagger`
|
||||
`image.repository` | image repository | `weaveworks/flagger`
|
||||
`image.tag` | image tag | `<VERSION>`
|
||||
`image.pullPolicy` | image pull policy | `IfNotPresent`
|
||||
`metricsServer` | Prometheus URL | `http://prometheus.istio-system:9090`
|
||||
|
||||
@@ -70,6 +70,18 @@ spec:
|
||||
type: string
|
||||
name:
|
||||
type: string
|
||||
ingressRef:
|
||||
anyOf:
|
||||
- type: string
|
||||
- type: object
|
||||
required: ['apiVersion', 'kind', 'name']
|
||||
properties:
|
||||
apiVersion:
|
||||
type: string
|
||||
kind:
|
||||
type: string
|
||||
name:
|
||||
type: string
|
||||
service:
|
||||
type: object
|
||||
required: ['port']
|
||||
@@ -122,10 +134,37 @@ spec:
|
||||
properties:
|
||||
name:
|
||||
type: string
|
||||
type:
|
||||
type: string
|
||||
enum:
|
||||
- ""
|
||||
- pre-rollout
|
||||
- rollout
|
||||
- post-rollout
|
||||
url:
|
||||
type: string
|
||||
format: url
|
||||
timeout:
|
||||
type: string
|
||||
pattern: "^[0-9]+(m|s)"
|
||||
status:
|
||||
properties:
|
||||
phase:
|
||||
type: string
|
||||
enum:
|
||||
- ""
|
||||
- Initialized
|
||||
- Progressing
|
||||
- Succeeded
|
||||
- Failed
|
||||
canaryWeight:
|
||||
type: number
|
||||
failedChecks:
|
||||
type: number
|
||||
iterations:
|
||||
type: number
|
||||
lastAppliedSpec:
|
||||
type: string
|
||||
lastTransitionTime:
|
||||
type: string
|
||||
{{- end }}
|
||||
|
||||
@@ -38,7 +38,11 @@ spec:
|
||||
{{- if .Values.meshProvider }}
|
||||
- -mesh-provider={{ .Values.meshProvider }}
|
||||
{{- end }}
|
||||
{{- if .Values.prometheus.install }}
|
||||
- -metrics-server=http://{{ template "flagger.fullname" . }}-prometheus:9090
|
||||
{{- else }}
|
||||
- -metrics-server={{ .Values.metricsServer }}
|
||||
{{- end }}
|
||||
{{- if .Values.namespace }}
|
||||
- -namespace={{ .Values.namespace }}
|
||||
{{- end }}
|
||||
|
||||
292
charts/flagger/templates/prometheus.yaml
Normal file
292
charts/flagger/templates/prometheus.yaml
Normal file
@@ -0,0 +1,292 @@
|
||||
{{- if .Values.prometheus.install }}
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: {{ template "flagger.fullname" . }}-prometheus
|
||||
labels:
|
||||
helm.sh/chart: {{ template "flagger.chart" . }}
|
||||
app.kubernetes.io/name: {{ template "flagger.name" . }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources:
|
||||
- nodes
|
||||
- services
|
||||
- endpoints
|
||||
- pods
|
||||
- nodes/proxy
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups: [""]
|
||||
resources:
|
||||
- configmaps
|
||||
verbs: ["get"]
|
||||
- nonResourceURLs: ["/metrics"]
|
||||
verbs: ["get"]
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: {{ template "flagger.fullname" . }}-prometheus
|
||||
labels:
|
||||
helm.sh/chart: {{ template "flagger.chart" . }}
|
||||
app.kubernetes.io/name: {{ template "flagger.name" . }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: {{ template "flagger.fullname" . }}-prometheus
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: {{ template "flagger.serviceAccountName" . }}-prometheus
|
||||
namespace: {{ .Release.Namespace }}
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: {{ template "flagger.serviceAccountName" . }}-prometheus
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels:
|
||||
helm.sh/chart: {{ template "flagger.chart" . }}
|
||||
app.kubernetes.io/name: {{ template "flagger.name" . }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: {{ template "flagger.fullname" . }}-prometheus
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels:
|
||||
helm.sh/chart: {{ template "flagger.chart" . }}
|
||||
app.kubernetes.io/name: {{ template "flagger.name" . }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
data:
|
||||
prometheus.yml: |-
|
||||
global:
|
||||
scrape_interval: 5s
|
||||
scrape_configs:
|
||||
|
||||
# Scrape config for AppMesh Envoy sidecar
|
||||
- job_name: 'appmesh-envoy'
|
||||
metrics_path: /stats/prometheus
|
||||
kubernetes_sd_configs:
|
||||
- role: pod
|
||||
|
||||
relabel_configs:
|
||||
- source_labels: [__meta_kubernetes_pod_container_name]
|
||||
action: keep
|
||||
regex: '^envoy$'
|
||||
- source_labels: [__address__, __meta_kubernetes_pod_annotation_prometheus_io_port]
|
||||
action: replace
|
||||
regex: ([^:]+)(?::\d+)?;(\d+)
|
||||
replacement: ${1}:9901
|
||||
target_label: __address__
|
||||
- action: labelmap
|
||||
regex: __meta_kubernetes_pod_label_(.+)
|
||||
- source_labels: [__meta_kubernetes_namespace]
|
||||
action: replace
|
||||
target_label: kubernetes_namespace
|
||||
- source_labels: [__meta_kubernetes_pod_name]
|
||||
action: replace
|
||||
target_label: kubernetes_pod_name
|
||||
|
||||
# Exclude high cardinality metrics
|
||||
metric_relabel_configs:
|
||||
- source_labels: [ cluster_name ]
|
||||
regex: '(outbound|inbound|prometheus_stats).*'
|
||||
action: drop
|
||||
- source_labels: [ tcp_prefix ]
|
||||
regex: '(outbound|inbound|prometheus_stats).*'
|
||||
action: drop
|
||||
- source_labels: [ listener_address ]
|
||||
regex: '(.+)'
|
||||
action: drop
|
||||
- source_labels: [ http_conn_manager_listener_prefix ]
|
||||
regex: '(.+)'
|
||||
action: drop
|
||||
- source_labels: [ http_conn_manager_prefix ]
|
||||
regex: '(.+)'
|
||||
action: drop
|
||||
- source_labels: [ __name__ ]
|
||||
regex: 'envoy_tls.*'
|
||||
action: drop
|
||||
- source_labels: [ __name__ ]
|
||||
regex: 'envoy_tcp_downstream.*'
|
||||
action: drop
|
||||
- source_labels: [ __name__ ]
|
||||
regex: 'envoy_http_(stats|admin).*'
|
||||
action: drop
|
||||
- source_labels: [ __name__ ]
|
||||
regex: 'envoy_cluster_(lb|retry|bind|internal|max|original).*'
|
||||
action: drop
|
||||
|
||||
# Scrape config for API servers
|
||||
- job_name: 'kubernetes-apiservers'
|
||||
kubernetes_sd_configs:
|
||||
- role: endpoints
|
||||
namespaces:
|
||||
names:
|
||||
- default
|
||||
scheme: https
|
||||
tls_config:
|
||||
ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
|
||||
bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
|
||||
relabel_configs:
|
||||
- source_labels: [__meta_kubernetes_service_name, __meta_kubernetes_endpoint_port_name]
|
||||
action: keep
|
||||
regex: kubernetes;https
|
||||
|
||||
# Scrape config for nodes
|
||||
- job_name: 'kubernetes-nodes'
|
||||
scheme: https
|
||||
tls_config:
|
||||
ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
|
||||
bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
|
||||
kubernetes_sd_configs:
|
||||
- role: node
|
||||
relabel_configs:
|
||||
- action: labelmap
|
||||
regex: __meta_kubernetes_node_label_(.+)
|
||||
- target_label: __address__
|
||||
replacement: kubernetes.default.svc:443
|
||||
- source_labels: [__meta_kubernetes_node_name]
|
||||
regex: (.+)
|
||||
target_label: __metrics_path__
|
||||
replacement: /api/v1/nodes/${1}/proxy/metrics
|
||||
|
||||
# scrape config for cAdvisor
|
||||
- job_name: 'kubernetes-cadvisor'
|
||||
scheme: https
|
||||
tls_config:
|
||||
ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
|
||||
bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
|
||||
kubernetes_sd_configs:
|
||||
- role: node
|
||||
relabel_configs:
|
||||
- action: labelmap
|
||||
regex: __meta_kubernetes_node_label_(.+)
|
||||
- target_label: __address__
|
||||
replacement: kubernetes.default.svc:443
|
||||
- source_labels: [__meta_kubernetes_node_name]
|
||||
regex: (.+)
|
||||
target_label: __metrics_path__
|
||||
replacement: /api/v1/nodes/${1}/proxy/metrics/cadvisor
|
||||
|
||||
# scrape config for pods
|
||||
- job_name: kubernetes-pods
|
||||
kubernetes_sd_configs:
|
||||
- role: pod
|
||||
relabel_configs:
|
||||
- action: keep
|
||||
regex: true
|
||||
source_labels:
|
||||
- __meta_kubernetes_pod_annotation_prometheus_io_scrape
|
||||
- source_labels: [ __address__ ]
|
||||
regex: '.*9901.*'
|
||||
action: drop
|
||||
- action: replace
|
||||
regex: (.+)
|
||||
source_labels:
|
||||
- __meta_kubernetes_pod_annotation_prometheus_io_path
|
||||
target_label: __metrics_path__
|
||||
- action: replace
|
||||
regex: ([^:]+)(?::\d+)?;(\d+)
|
||||
replacement: $1:$2
|
||||
source_labels:
|
||||
- __address__
|
||||
- __meta_kubernetes_pod_annotation_prometheus_io_port
|
||||
target_label: __address__
|
||||
- action: labelmap
|
||||
regex: __meta_kubernetes_pod_label_(.+)
|
||||
- action: replace
|
||||
source_labels:
|
||||
- __meta_kubernetes_namespace
|
||||
target_label: kubernetes_namespace
|
||||
- action: replace
|
||||
source_labels:
|
||||
- __meta_kubernetes_pod_name
|
||||
target_label: kubernetes_pod_name
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: {{ template "flagger.fullname" . }}-prometheus
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels:
|
||||
helm.sh/chart: {{ template "flagger.chart" . }}
|
||||
app.kubernetes.io/name: {{ template "flagger.name" . }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app.kubernetes.io/name: {{ template "flagger.name" . }}-prometheus
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/name: {{ template "flagger.name" . }}-prometheus
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
annotations:
|
||||
appmesh.k8s.aws/sidecarInjectorWebhook: disabled
|
||||
sidecar.istio.io/inject: "false"
|
||||
spec:
|
||||
serviceAccountName: {{ template "flagger.serviceAccountName" . }}-prometheus
|
||||
containers:
|
||||
- name: prometheus
|
||||
image: "docker.io/prom/prometheus:v2.7.1"
|
||||
imagePullPolicy: IfNotPresent
|
||||
args:
|
||||
- '--storage.tsdb.retention=6h'
|
||||
- '--config.file=/etc/prometheus/prometheus.yml'
|
||||
ports:
|
||||
- containerPort: 9090
|
||||
name: http
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /-/healthy
|
||||
port: 9090
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /-/ready
|
||||
port: 9090
|
||||
resources:
|
||||
requests:
|
||||
cpu: 10m
|
||||
memory: 128Mi
|
||||
volumeMounts:
|
||||
- name: config-volume
|
||||
mountPath: /etc/prometheus
|
||||
- name: data-volume
|
||||
mountPath: /prometheus/data
|
||||
|
||||
volumes:
|
||||
- name: config-volume
|
||||
configMap:
|
||||
name: {{ template "flagger.fullname" . }}-prometheus
|
||||
- name: data-volume
|
||||
emptyDir: {}
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: {{ template "flagger.fullname" . }}-prometheus
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels:
|
||||
helm.sh/chart: {{ template "flagger.chart" . }}
|
||||
app.kubernetes.io/name: {{ template "flagger.name" . }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
spec:
|
||||
selector:
|
||||
app.kubernetes.io/name: {{ template "flagger.name" . }}-prometheus
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
ports:
|
||||
- name: http
|
||||
protocol: TCP
|
||||
port: 9090
|
||||
{{- end }}
|
||||
@@ -27,6 +27,12 @@ rules:
|
||||
resources:
|
||||
- horizontalpodautoscalers
|
||||
verbs: ["*"]
|
||||
- apiGroups:
|
||||
- "extensions"
|
||||
resources:
|
||||
- ingresses
|
||||
- ingresses/status
|
||||
verbs: ["*"]
|
||||
- apiGroups:
|
||||
- flagger.app
|
||||
resources:
|
||||
|
||||
@@ -1,13 +1,13 @@
|
||||
# Default values for flagger.
|
||||
|
||||
image:
|
||||
repository: quay.io/weaveworks/flagger
|
||||
tag: 0.10.0
|
||||
repository: weaveworks/flagger
|
||||
tag: 0.13.2
|
||||
pullPolicy: IfNotPresent
|
||||
|
||||
metricsServer: "http://prometheus:9090"
|
||||
|
||||
# accepted values are istio or appmesh (defaults to istio)
|
||||
# accepted values are istio, appmesh, nginx or supergloo:mesh.namespace (defaults to istio)
|
||||
meshProvider: ""
|
||||
|
||||
# single namespace restriction
|
||||
@@ -49,3 +49,7 @@ nodeSelector: {}
|
||||
tolerations: []
|
||||
|
||||
affinity: {}
|
||||
|
||||
prometheus:
|
||||
# to be used with AppMesh or nginx ingress
|
||||
install: false
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
apiVersion: v1
|
||||
name: grafana
|
||||
version: 1.1.0
|
||||
version: 1.2.0
|
||||
appVersion: 5.4.3
|
||||
description: Grafana dashboards for monitoring Flagger canary deployments
|
||||
icon: https://raw.githubusercontent.com/weaveworks/flagger/master/docs/logo/flagger-icon.png
|
||||
|
||||
@@ -1614,9 +1614,9 @@
|
||||
"multi": false,
|
||||
"name": "primary",
|
||||
"options": [],
|
||||
"query": "query_result(sum(istio_requests_total{destination_workload_namespace=~\"$namespace\"}) by (destination_service_name))",
|
||||
"query": "query_result(sum(istio_requests_total{destination_workload_namespace=~\"$namespace\"}) by (destination_workload))",
|
||||
"refresh": 1,
|
||||
"regex": "/.*destination_service_name=\"([^\"]*).*/",
|
||||
"regex": "/.*destination_workload=\"([^\"]*).*/",
|
||||
"skipUrlSync": false,
|
||||
"sort": 1,
|
||||
"tagValuesQuery": "",
|
||||
@@ -1636,9 +1636,9 @@
|
||||
"multi": false,
|
||||
"name": "canary",
|
||||
"options": [],
|
||||
"query": "query_result(sum(istio_requests_total{destination_workload_namespace=~\"$namespace\"}) by (destination_service_name))",
|
||||
"query": "query_result(sum(istio_requests_total{destination_workload_namespace=~\"$namespace\"}) by (destination_workload))",
|
||||
"refresh": 1,
|
||||
"regex": "/.*destination_service_name=\"([^\"]*).*/",
|
||||
"regex": "/.*destination_workload=\"([^\"]*).*/",
|
||||
"skipUrlSync": false,
|
||||
"sort": 1,
|
||||
"tagValuesQuery": "",
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
apiVersion: v1
|
||||
name: loadtester
|
||||
version: 0.2.0
|
||||
appVersion: 0.2.0
|
||||
version: 0.4.0
|
||||
appVersion: 0.3.0
|
||||
kubeVersion: ">=1.11.0-0"
|
||||
engine: gotpl
|
||||
description: Flagger's load testing services based on rakyll/hey that generates traffic during canary analysis when configured as a webhook.
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
replicaCount: 1
|
||||
|
||||
image:
|
||||
repository: quay.io/weaveworks/flagger-loadtester
|
||||
tag: 0.2.0
|
||||
repository: weaveworks/flagger-loadtester
|
||||
tag: 0.3.0
|
||||
pullPolicy: IfNotPresent
|
||||
|
||||
logLevel: info
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
apiVersion: v1
|
||||
version: 2.0.0
|
||||
version: 2.0.1
|
||||
appVersion: 1.4.0
|
||||
name: podinfo
|
||||
engine: gotpl
|
||||
|
||||
@@ -32,10 +32,10 @@ spec:
|
||||
maxWeight: {{ .Values.canary.analysis.maxWeight }}
|
||||
stepWeight: {{ .Values.canary.analysis.stepWeight }}
|
||||
metrics:
|
||||
- name: istio_requests_total
|
||||
- name: request-success-rate
|
||||
threshold: {{ .Values.canary.thresholds.successRate }}
|
||||
interval: 1m
|
||||
- name: istio_request_duration_seconds_bucket
|
||||
- name: request-duration
|
||||
threshold: {{ .Values.canary.thresholds.latency }}
|
||||
interval: 1m
|
||||
{{- if .Values.canary.loadtest.enabled }}
|
||||
|
||||
@@ -2,12 +2,18 @@ package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
_ "github.com/istio/glog"
|
||||
"log"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
_ "github.com/stefanprodan/klog"
|
||||
clientset "github.com/weaveworks/flagger/pkg/client/clientset/versioned"
|
||||
informers "github.com/weaveworks/flagger/pkg/client/informers/externalversions"
|
||||
"github.com/weaveworks/flagger/pkg/controller"
|
||||
"github.com/weaveworks/flagger/pkg/logging"
|
||||
"github.com/weaveworks/flagger/pkg/logger"
|
||||
"github.com/weaveworks/flagger/pkg/metrics"
|
||||
"github.com/weaveworks/flagger/pkg/notifier"
|
||||
"github.com/weaveworks/flagger/pkg/router"
|
||||
"github.com/weaveworks/flagger/pkg/server"
|
||||
"github.com/weaveworks/flagger/pkg/signals"
|
||||
"github.com/weaveworks/flagger/pkg/version"
|
||||
@@ -16,8 +22,6 @@ import (
|
||||
_ "k8s.io/client-go/plugin/pkg/client/auth/gcp"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
"log"
|
||||
"time"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -35,6 +39,7 @@ var (
|
||||
zapEncoding string
|
||||
namespace string
|
||||
meshProvider string
|
||||
selectorLabels string
|
||||
)
|
||||
|
||||
func init() {
|
||||
@@ -52,12 +57,13 @@ func init() {
|
||||
flag.StringVar(&zapEncoding, "zap-encoding", "json", "Zap logger encoding.")
|
||||
flag.StringVar(&namespace, "namespace", "", "Namespace that flagger would watch canary object")
|
||||
flag.StringVar(&meshProvider, "mesh-provider", "istio", "Service mesh provider, can be istio or appmesh")
|
||||
flag.StringVar(&selectorLabels, "selector-labels", "app,name,app.kubernetes.io/name", "List of pod labels that Flagger uses to create pod selectors")
|
||||
}
|
||||
|
||||
func main() {
|
||||
flag.Parse()
|
||||
|
||||
logger, err := logging.NewLoggerWithEncoding(logLevel, zapEncoding)
|
||||
logger, err := logger.NewLoggerWithEncoding(logLevel, zapEncoding)
|
||||
if err != nil {
|
||||
log.Fatalf("Error creating logger: %v", err)
|
||||
}
|
||||
@@ -93,19 +99,24 @@ func main() {
|
||||
|
||||
canaryInformer := flaggerInformerFactory.Flagger().V1alpha3().Canaries()
|
||||
|
||||
logger.Infof("Starting flagger version %s revision %s", version.VERSION, version.REVISION)
|
||||
logger.Infof("Starting flagger version %s revision %s mesh provider %s", version.VERSION, version.REVISION, meshProvider)
|
||||
|
||||
ver, err := kubeClient.Discovery().ServerVersion()
|
||||
if err != nil {
|
||||
logger.Fatalf("Error calling Kubernetes API: %v", err)
|
||||
}
|
||||
|
||||
labels := strings.Split(selectorLabels, ",")
|
||||
if len(labels) < 1 {
|
||||
logger.Fatalf("At least one selector label is required")
|
||||
}
|
||||
|
||||
logger.Infof("Connected to Kubernetes API %s", ver)
|
||||
if namespace != "" {
|
||||
logger.Infof("Watching namespace %s", namespace)
|
||||
}
|
||||
|
||||
ok, err := controller.CheckMetricsServer(metricsServer)
|
||||
ok, err := metrics.CheckMetricsServer(metricsServer)
|
||||
if ok {
|
||||
logger.Infof("Connected to metrics server %s", metricsServer)
|
||||
} else {
|
||||
@@ -125,6 +136,8 @@ func main() {
|
||||
// start HTTP server
|
||||
go server.ListenAndServe(port, 3*time.Second, logger, stopCh)
|
||||
|
||||
routerFactory := router.NewFactory(cfg, kubeClient, flaggerClient, logger, meshClient)
|
||||
|
||||
c := controller.NewController(
|
||||
kubeClient,
|
||||
meshClient,
|
||||
@@ -134,7 +147,10 @@ func main() {
|
||||
metricsServer,
|
||||
logger,
|
||||
slack,
|
||||
routerFactory,
|
||||
meshProvider,
|
||||
version.VERSION,
|
||||
labels,
|
||||
)
|
||||
|
||||
flaggerInformerFactory.Start(stopCh)
|
||||
|
||||
@@ -3,14 +3,14 @@ package main
|
||||
import (
|
||||
"flag"
|
||||
"github.com/weaveworks/flagger/pkg/loadtester"
|
||||
"github.com/weaveworks/flagger/pkg/logging"
|
||||
"github.com/weaveworks/flagger/pkg/logger"
|
||||
"github.com/weaveworks/flagger/pkg/signals"
|
||||
"go.uber.org/zap"
|
||||
"log"
|
||||
"time"
|
||||
)
|
||||
|
||||
var VERSION = "0.2.0"
|
||||
var VERSION = "0.3.0"
|
||||
var (
|
||||
logLevel string
|
||||
port string
|
||||
@@ -30,7 +30,7 @@ func init() {
|
||||
func main() {
|
||||
flag.Parse()
|
||||
|
||||
logger, err := logging.NewLoggerWithEncoding(logLevel, zapEncoding)
|
||||
logger, err := logger.NewLoggerWithEncoding(logLevel, zapEncoding)
|
||||
if err != nil {
|
||||
log.Fatalf("Error creating logger: %v", err)
|
||||
}
|
||||
|
||||
BIN
docs/diagrams/flagger-gitops-istio.png
Normal file
BIN
docs/diagrams/flagger-gitops-istio.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 35 KiB |
BIN
docs/diagrams/flagger-nginx-overview.png
Normal file
BIN
docs/diagrams/flagger-nginx-overview.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 40 KiB |
@@ -5,7 +5,7 @@ description: Flagger is a progressive delivery Kubernetes operator
|
||||
# Introduction
|
||||
|
||||
[Flagger](https://github.com/weaveworks/flagger) is a **Kubernetes** operator that automates the promotion of canary
|
||||
deployments using **Istio** or **App Mesh** routing for traffic shifting and **Prometheus** metrics for canary analysis.
|
||||
deployments using **Istio**, **App Mesh** or **NGINX** routing for traffic shifting and **Prometheus** metrics for canary analysis.
|
||||
The canary analysis can be extended with webhooks for running
|
||||
system integration/acceptance tests, load tests, or any other custom validation.
|
||||
|
||||
|
||||
@@ -8,12 +8,14 @@
|
||||
* [Flagger Install on Kubernetes](install/flagger-install-on-kubernetes.md)
|
||||
* [Flagger Install on GKE Istio](install/flagger-install-on-google-cloud.md)
|
||||
* [Flagger Install on EKS App Mesh](install/flagger-install-on-eks-appmesh.md)
|
||||
* [Flagger Install with SuperGloo](install/flagger-install-with-supergloo.md)
|
||||
|
||||
## Usage
|
||||
|
||||
* [Istio Canary Deployments](usage/progressive-delivery.md)
|
||||
* [Istio A/B Testing](usage/ab-testing.md)
|
||||
* [App Mesh Canary Deployments](usage/appmesh-progressive-delivery.md)
|
||||
* [NGINX Canary Deployments](usage/nginx-progressive-delivery.md)
|
||||
* [Monitoring](usage/monitoring.md)
|
||||
* [Alerting](usage/alerting.md)
|
||||
|
||||
|
||||
21
docs/gitbook/faq.md
Normal file
21
docs/gitbook/faq.md
Normal file
@@ -0,0 +1,21 @@
|
||||
# Frequently asked questions
|
||||
|
||||
**Can Flagger be part of my integration tests?**
|
||||
> Yes, Flagger supports webhooks to do integration testing.
|
||||
|
||||
**What if I only want to target beta testers?**
|
||||
> That's a feature in Flagger, not in App Mesh. It's on the App Mesh roadmap.
|
||||
|
||||
**When do I use A/B testing when Canary?**
|
||||
> One advantage of using A/B testing is that each version remains separated and routes aren't mixed.
|
||||
>
|
||||
> Using a Canary deployment can lead to behaviour like this one observed by a
|
||||
> user:
|
||||
>
|
||||
> [..] during a canary deployment of our nodejs app, the version that is being served <50% traffic reports mime type mismatch errors in the browser (js as "text/html")
|
||||
> When the deployment Passes/ Fails (doesn't really matter) the version that stays alive works as expected. If anyone has any tips or direction I would greatly appreciate it. Even if its as simple as I'm looking in the wrong place. Thanks in advance!
|
||||
>
|
||||
> The issue was that we were not maintaining session affinity while serving files for our frontend. Which resulted in any redirects or refreshes occasionally returning a mismatched app.*.js file (generated from vue)
|
||||
>
|
||||
> Read up on [A/B testing](https://docs.flagger.app/usage/ab-testing).
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
|
||||
[Flagger](https://github.com/weaveworks/flagger) takes a Kubernetes deployment and optionally
|
||||
a horizontal pod autoscaler \(HPA\) and creates a series of objects
|
||||
\(Kubernetes deployments, ClusterIP services and Istio virtual services\) to drive the canary analysis and promotion.
|
||||
\(Kubernetes deployments, ClusterIP services and Istio or App Mesh virtual services\) to drive the canary analysis and promotion.
|
||||
|
||||

|
||||
|
||||
@@ -38,6 +38,7 @@ spec:
|
||||
# Istio gateways (optional)
|
||||
gateways:
|
||||
- public-gateway.istio-system.svc.cluster.local
|
||||
- mesh
|
||||
# Istio virtual service host names (optional)
|
||||
hosts:
|
||||
- podinfo.example.com
|
||||
@@ -55,14 +56,14 @@ spec:
|
||||
# canary increment step
|
||||
# percentage (0-100)
|
||||
stepWeight: 5
|
||||
# Istio Prometheus checks
|
||||
# Prometheus checks
|
||||
metrics:
|
||||
- name: istio_requests_total
|
||||
- name: request-success-rate
|
||||
# minimum req success rate (non 5xx responses)
|
||||
# percentage (0-100)
|
||||
threshold: 99
|
||||
interval: 1m
|
||||
- name: istio_request_duration_seconds_bucket
|
||||
- name: request-duration
|
||||
# maximum req duration P99
|
||||
# milliseconds
|
||||
threshold: 500
|
||||
@@ -95,6 +96,9 @@ spec:
|
||||
app: podinfo
|
||||
```
|
||||
|
||||
Besides `app` Flagger supports `name` and `app.kubernetes.io/name` selectors. If you use a different
|
||||
convention you can specify your label with the `-selector-labels` flag.
|
||||
|
||||
The target deployment should expose a TCP port that will be used by Flagger to create the ClusterIP Service and
|
||||
the Istio Virtual Service. The container port from the target deployment should match the `service.port` value.
|
||||
|
||||
@@ -122,6 +126,7 @@ spec:
|
||||
# Istio gateways (optional)
|
||||
gateways:
|
||||
- public-gateway.istio-system.svc.cluster.local
|
||||
- mesh
|
||||
# Istio virtual service host names (optional)
|
||||
hosts:
|
||||
- frontend.example.com
|
||||
@@ -266,16 +271,22 @@ Gated canary promotion stages:
|
||||
* check primary and canary deployments status
|
||||
* halt advancement if a rolling update is underway
|
||||
* halt advancement if pods are unhealthy
|
||||
* call pre-rollout webhooks are check results
|
||||
* halt advancement if any hook returned a non HTTP 2xx result
|
||||
* increment the failed checks counter
|
||||
* increase canary traffic weight percentage from 0% to 5% (step weight)
|
||||
* call webhooks and check results
|
||||
* call rollout webhooks and check results
|
||||
* check canary HTTP request success rate and latency
|
||||
* halt advancement if any metric is under the specified threshold
|
||||
* increment the failed checks counter
|
||||
* check if the number of failed checks reached the threshold
|
||||
* route all traffic to primary
|
||||
* scale to zero the canary deployment and mark it as failed
|
||||
* call post-rollout webhooks
|
||||
* post the analysis result to Slack
|
||||
* wait for the canary deployment to be updated and start over
|
||||
* increase canary traffic weight by 5% (step weight) till it reaches 50% (max weight)
|
||||
* halt advancement if any webhook call fails
|
||||
* halt advancement while canary request success rate is under the threshold
|
||||
* halt advancement while canary request duration P99 is over the threshold
|
||||
* halt advancement if the primary or canary deployment becomes unhealthy
|
||||
@@ -288,6 +299,8 @@ Gated canary promotion stages:
|
||||
* route all traffic to primary
|
||||
* scale to zero the canary deployment
|
||||
* mark rollout as finished
|
||||
* call post-rollout webhooks
|
||||
* post the analysis result to Slack
|
||||
* wait for the canary deployment to be updated and start over
|
||||
|
||||
### Canary Analysis
|
||||
@@ -385,14 +398,14 @@ Spec:
|
||||
```yaml
|
||||
canaryAnalysis:
|
||||
metrics:
|
||||
- name: istio_requests_total
|
||||
- name: request-success-rate
|
||||
# minimum req success rate (non 5xx responses)
|
||||
# percentage (0-100)
|
||||
threshold: 99
|
||||
interval: 1m
|
||||
```
|
||||
|
||||
Query:
|
||||
Istio query:
|
||||
|
||||
```javascript
|
||||
sum(
|
||||
@@ -417,6 +430,29 @@ sum(
|
||||
)
|
||||
```
|
||||
|
||||
App Mesh query:
|
||||
|
||||
```javascript
|
||||
sum(
|
||||
rate(
|
||||
envoy_cluster_upstream_rq{
|
||||
kubernetes_namespace="$namespace",
|
||||
kubernetes_pod_name=~"$workload",
|
||||
response_code!~"5.*"
|
||||
}[$interval]
|
||||
)
|
||||
)
|
||||
/
|
||||
sum(
|
||||
rate(
|
||||
envoy_cluster_upstream_rq{
|
||||
kubernetes_namespace="$namespace",
|
||||
kubernetes_pod_name=~"$workload"
|
||||
}[$interval]
|
||||
)
|
||||
)
|
||||
```
|
||||
|
||||
**HTTP requests milliseconds duration P99**
|
||||
|
||||
Spec:
|
||||
@@ -424,14 +460,14 @@ Spec:
|
||||
```yaml
|
||||
canaryAnalysis:
|
||||
metrics:
|
||||
- name: istio_request_duration_seconds_bucket
|
||||
- name: request-duration
|
||||
# maximum req duration P99
|
||||
# milliseconds
|
||||
threshold: 500
|
||||
interval: 1m
|
||||
```
|
||||
|
||||
Query:
|
||||
Istio query:
|
||||
|
||||
```javascript
|
||||
histogram_quantile(0.99,
|
||||
@@ -447,6 +483,21 @@ histogram_quantile(0.99,
|
||||
)
|
||||
```
|
||||
|
||||
App Mesh query:
|
||||
|
||||
```javascript
|
||||
histogram_quantile(0.99,
|
||||
sum(
|
||||
irate(
|
||||
envoy_cluster_upstream_rq_time_bucket{
|
||||
kubernetes_pod_name=~"$workload",
|
||||
kubernetes_namespace=~"$namespace"
|
||||
}[$interval]
|
||||
)
|
||||
) by (le)
|
||||
)
|
||||
```
|
||||
|
||||
> **Note** that the metric interval should be lower or equal to the control loop interval.
|
||||
|
||||
### Custom Metrics
|
||||
@@ -522,39 +573,55 @@ rate reaches the 5% threshold, then the canary fails.
|
||||
When specifying a query, Flagger will run the promql query and convert the result to float64.
|
||||
Then it compares the query result value with the metric threshold value.
|
||||
|
||||
|
||||
### Webhooks
|
||||
|
||||
The canary analysis can be extended with webhooks.
|
||||
Flagger will call each webhook URL and determine from the response status code (HTTP 2xx) if the canary is failing or not.
|
||||
The canary analysis can be extended with webhooks. Flagger will call each webhook URL and
|
||||
determine from the response status code (HTTP 2xx) if the canary is failing or not.
|
||||
|
||||
There are three types of hooks:
|
||||
* Pre-rollout hooks are executed before routing traffic to canary.
|
||||
The canary advancement is paused if a pre-rollout hook fails and if the number of failures reach the
|
||||
threshold the canary will be rollback.
|
||||
* Rollout hooks are executed during the analysis on each iteration before the metric checks.
|
||||
If a rollout hook call fails the canary advancement is paused and eventfully rolled back.
|
||||
* Post-rollout hooks are executed after the canary has been promoted or rolled back.
|
||||
If a post rollout hook fails the error is logged.
|
||||
|
||||
Spec:
|
||||
|
||||
```yaml
|
||||
canaryAnalysis:
|
||||
webhooks:
|
||||
- name: integration-test
|
||||
url: http://int-runner.test:8080/
|
||||
timeout: 30s
|
||||
metadata:
|
||||
test: "all"
|
||||
token: "16688eb5e9f289f1991c"
|
||||
- name: db-test
|
||||
- name: "smoke test"
|
||||
type: pre-rollout
|
||||
url: http://migration-check.db/query
|
||||
timeout: 30s
|
||||
metadata:
|
||||
key1: "val1"
|
||||
key2: "val2"
|
||||
- name: "load test"
|
||||
type: rollout
|
||||
url: http://flagger-loadtester.test/
|
||||
timeout: 15s
|
||||
metadata:
|
||||
cmd: "hey -z 1m -q 5 -c 2 http://podinfo-canary.test:9898/"
|
||||
- name: "notify"
|
||||
type: post-rollout
|
||||
url: http://telegram.bot:8080/
|
||||
timeout: 5s
|
||||
metadata:
|
||||
some: "message"
|
||||
```
|
||||
|
||||
> **Note** that the sum of all webhooks timeouts should be lower than the control loop interval.
|
||||
> **Note** that the sum of all rollout webhooks timeouts should be lower than the analysis interval.
|
||||
|
||||
Webhook payload (HTTP POST):
|
||||
|
||||
```json
|
||||
{
|
||||
"name": "podinfo",
|
||||
"namespace": "test",
|
||||
"namespace": "test",
|
||||
"phase": "Progressing",
|
||||
"metadata": {
|
||||
"test": "all",
|
||||
"token": "16688eb5e9f289f1991c"
|
||||
@@ -622,7 +689,7 @@ webhooks:
|
||||
|
||||
When the canary analysis starts, Flagger will call the webhooks and the load tester will run the `hey` commands
|
||||
in the background, if they are not already running. This will ensure that during the
|
||||
analysis, the `podinfo.test` virtual service will receive a steady steam of GET and POST requests.
|
||||
analysis, the `podinfo.test` virtual service will receive a steady stream of GET and POST requests.
|
||||
|
||||
If your workload is exposed outside the mesh with the Istio Gateway and TLS you can point `hey` to the
|
||||
public URL and use HTTP2.
|
||||
@@ -674,4 +741,4 @@ webhooks:
|
||||
```
|
||||
When the canary analysis starts, the load tester will initiate a [clone_and_start request](https://github.com/naver/ngrinder/wiki/REST-API-PerfTest)
|
||||
to the nGrinder server and start a new performance test. the load tester will periodically poll the nGrinder server
|
||||
for the status of the test, and prevent duplicate requests from being sent in subsequent analysis loops.
|
||||
for the status of the test, and prevent duplicate requests from being sent in subsequent analysis loops.
|
||||
|
||||
@@ -125,19 +125,6 @@ Status:
|
||||
Type: MeshActive
|
||||
```
|
||||
|
||||
### Install Prometheus
|
||||
|
||||
In order to collect the App Mesh metrics that Flagger needs to run the canary analysis,
|
||||
you'll need to setup a Prometheus instance to scrape the Envoy sidecars.
|
||||
|
||||
Deploy Prometheus in the `appmesh-system` namespace:
|
||||
|
||||
```bash
|
||||
REPO=https://raw.githubusercontent.com/weaveworks/flagger/master
|
||||
|
||||
kubectl apply -f ${REPO}/artifacts/eks/appmesh-prometheus.yaml
|
||||
```
|
||||
|
||||
### Install Flagger and Grafana
|
||||
|
||||
Add Flagger Helm repository:
|
||||
@@ -146,16 +133,17 @@ Add Flagger Helm repository:
|
||||
helm repo add flagger https://flagger.app
|
||||
```
|
||||
|
||||
Deploy Flagger in the _**appmesh-system**_ namespace:
|
||||
Deploy Flagger and Prometheus in the _**appmesh-system**_ namespace:
|
||||
|
||||
```bash
|
||||
helm upgrade -i flagger flagger/flagger \
|
||||
--namespace=appmesh-system \
|
||||
--set meshProvider=appmesh \
|
||||
--set metricsServer=http://prometheus.appmesh-system:9090
|
||||
--set prometheus.install=true
|
||||
```
|
||||
|
||||
You can install Flagger in any namespace as long as it can talk to the Istio Prometheus service on port 9090.
|
||||
In order to collect the App Mesh metrics that Flagger needs to run the canary analysis,
|
||||
you'll need to setup a Prometheus instance to scrape the Envoy sidecars.
|
||||
|
||||
You can enable **Slack** notifications with:
|
||||
|
||||
|
||||
@@ -186,7 +186,7 @@ Install cert-manager's CRDs:
|
||||
```bash
|
||||
CERT_REPO=https://raw.githubusercontent.com/jetstack/cert-manager
|
||||
|
||||
kubectl apply -f ${CERT_REPO}/release-0.6/deploy/manifests/00-crds.yaml
|
||||
kubectl apply -f ${CERT_REPO}/release-0.7/deploy/manifests/00-crds.yaml
|
||||
```
|
||||
|
||||
Create the cert-manager namespace and disable resource validation:
|
||||
@@ -200,10 +200,12 @@ kubectl label namespace cert-manager certmanager.k8s.io/disable-validation=true
|
||||
Install cert-manager with Helm:
|
||||
|
||||
```bash
|
||||
helm repo update && helm upgrade -i cert-manager \
|
||||
helm repo add jetstack https://charts.jetstack.io && \
|
||||
helm repo update && \
|
||||
helm upgrade -i cert-manager \
|
||||
--namespace cert-manager \
|
||||
--version v0.6.0 \
|
||||
stable/cert-manager
|
||||
--version v0.7.0 \
|
||||
jetstack/cert-manager
|
||||
```
|
||||
|
||||
### Istio Gateway TLS setup
|
||||
|
||||
@@ -52,7 +52,8 @@ If you don't have Tiller you can use the helm template command and apply the gen
|
||||
|
||||
```bash
|
||||
# generate
|
||||
helm template flagger/flagger \
|
||||
helm fetch --untar --untardir . flagger/flagger &&
|
||||
helm template flagger \
|
||||
--name flagger \
|
||||
--namespace=istio-system \
|
||||
--set metricsServer=http://prometheus.istio-system:9090 \
|
||||
@@ -98,12 +99,10 @@ Or use helm template command and apply the generated yaml with kubectl:
|
||||
|
||||
```bash
|
||||
# generate
|
||||
helm template flagger/grafana \
|
||||
helm fetch --untar --untardir . flagger/grafana &&
|
||||
helm template grafana \
|
||||
--name flagger-grafana \
|
||||
--namespace=istio-system \
|
||||
--set url=http://prometheus.istio-system:9090 \
|
||||
--set user=admin \
|
||||
--set password=change-me \
|
||||
> $HOME/flagger-grafana.yaml
|
||||
|
||||
# apply
|
||||
@@ -132,10 +131,14 @@ helm upgrade -i flagger-loadtester flagger/loadtester \
|
||||
Deploy with kubectl:
|
||||
|
||||
```bash
|
||||
export REPO=https://raw.githubusercontent.com/weaveworks/flagger/master
|
||||
helm fetch --untar --untardir . flagger/loadtester &&
|
||||
helm template loadtester \
|
||||
--name flagger-loadtester \
|
||||
--namespace=test
|
||||
> $HOME/flagger-loadtester.yaml
|
||||
|
||||
kubectl -n test apply -f ${REPO}/artifacts/loadtester/deployment.yaml
|
||||
kubectl -n test apply -f ${REPO}/artifacts/loadtester/service.yaml
|
||||
# apply
|
||||
kubectl apply -f $HOME/flagger-loadtester.yaml
|
||||
```
|
||||
|
||||
> **Note** that the load tester should be deployed in a namespace with Istio sidecar injection enabled.
|
||||
|
||||
184
docs/gitbook/install/flagger-install-with-supergloo.md
Normal file
184
docs/gitbook/install/flagger-install-with-supergloo.md
Normal file
@@ -0,0 +1,184 @@
|
||||
# Flagger install on Kubernetes with SuperGloo
|
||||
|
||||
This guide walks you through setting up Flagger on a Kubernetes cluster using [SuperGloo](https://github.com/solo-io/supergloo).
|
||||
|
||||
SuperGloo by [Solo.io](https://solo.io) is an opinionated abstraction layer that simplifies the installation, management, and operation of your service mesh.
|
||||
It supports running multiple ingresses with multiple meshes (Istio, App Mesh, Consul Connect and Linkerd 2) in the same cluster.
|
||||
|
||||
### Prerequisites
|
||||
|
||||
Flagger requires a Kubernetes cluster **v1.11** or newer with the following admission controllers enabled:
|
||||
|
||||
* MutatingAdmissionWebhook
|
||||
* ValidatingAdmissionWebhook
|
||||
|
||||
### Install Istio with SuperGloo
|
||||
|
||||
#### Install SuperGloo command line interface helper
|
||||
|
||||
SuperGloo includes a command line helper (CLI) that makes operation of SuperGloo easier.
|
||||
The CLI is not required for SuperGloo to function correctly.
|
||||
|
||||
If you use [Homebrew](https://brew.sh) package manager run the following
|
||||
commands to install the SuperGloo CLI.
|
||||
|
||||
```bash
|
||||
brew tap solo-io/tap
|
||||
brew solo-io/tap/supergloo
|
||||
```
|
||||
|
||||
Or you can download SuperGloo CLI and add it to your path:
|
||||
|
||||
```bash
|
||||
curl -sL https://run.solo.io/supergloo/install | sh
|
||||
export PATH=$HOME/.supergloo/bin:$PATH
|
||||
```
|
||||
|
||||
#### Install SuperGloo controller
|
||||
|
||||
Deploy the SuperGloo controller in the `supergloo-system` namespace:
|
||||
|
||||
```bash
|
||||
supergloo init
|
||||
```
|
||||
|
||||
This is equivalent to installing SuperGloo using its Helm chart
|
||||
|
||||
```bash
|
||||
helm repo add supergloo http://storage.googleapis.com/supergloo-helm
|
||||
helm upgrade --install supergloo supergloo/supergloo --namespace supergloo-system
|
||||
```
|
||||
|
||||
#### Install Istio using SuperGloo
|
||||
|
||||
Create the `istio-system` namespace and install Istio with traffic management, telemetry and Prometheus enabled:
|
||||
|
||||
```bash
|
||||
ISTIO_VER="1.0.6"
|
||||
|
||||
kubectl create namespace istio-system
|
||||
|
||||
supergloo install istio --name istio \
|
||||
--namespace=supergloo-system \
|
||||
--auto-inject=true \
|
||||
--installation-namespace=istio-system \
|
||||
--mtls=false \
|
||||
--prometheus=true \
|
||||
--version=${ISTIO_VER}
|
||||
```
|
||||
|
||||
This creates a Kubernetes Custom Resource (CRD) like the following.
|
||||
|
||||
```yaml
|
||||
apiVersion: supergloo.solo.io/v1
|
||||
kind: Install
|
||||
metadata:
|
||||
name: istio
|
||||
namespace: supergloo-system
|
||||
spec:
|
||||
installationNamespace: istio-system
|
||||
mesh:
|
||||
installedMesh:
|
||||
name: istio
|
||||
namespace: supergloo-system
|
||||
istioMesh:
|
||||
enableAutoInject: true
|
||||
enableMtls: false
|
||||
installGrafana: false
|
||||
installJaeger: false
|
||||
installPrometheus: true
|
||||
istioVersion: 1.0.6
|
||||
```
|
||||
|
||||
#### Allow Flagger to manipulate SuperGloo
|
||||
|
||||
Create a cluster role binding so that Flagger can manipulate SuperGloo custom resources:
|
||||
|
||||
```bash
|
||||
kubectl create clusterrolebinding flagger-supergloo \
|
||||
--clusterrole=mesh-discovery \
|
||||
--serviceaccount=istio-system:flagger
|
||||
```
|
||||
|
||||
Wait for the Istio control plane to become available:
|
||||
|
||||
```bash
|
||||
kubectl --namespace istio-system rollout status deployment/istio-sidecar-injector
|
||||
kubectl --namespace istio-system rollout status deployment/prometheus
|
||||
```
|
||||
|
||||
### Install Flagger
|
||||
|
||||
Add Flagger Helm repository:
|
||||
|
||||
```bash
|
||||
helm repo add flagger https://flagger.app
|
||||
```
|
||||
|
||||
Deploy Flagger in the _**istio-system**_ namespace and set the service mesh provider to SuperGloo:
|
||||
|
||||
```bash
|
||||
helm upgrade -i flagger flagger/flagger \
|
||||
--namespace=istio-system \
|
||||
--set metricsServer=http://prometheus.istio-system:9090 \
|
||||
--set meshProvider=supergloo:istio.supergloo-system
|
||||
```
|
||||
|
||||
When using SuperGloo the mesh provider format is `supergloo:<MESH-NAME>.<SUPERGLOO-NAMESPACE>`.
|
||||
|
||||
Optionally you can enable **Slack** notifications:
|
||||
|
||||
```bash
|
||||
helm upgrade -i flagger flagger/flagger \
|
||||
--reuse-values \
|
||||
--namespace=istio-system \
|
||||
--set slack.url=https://hooks.slack.com/services/YOUR/SLACK/WEBHOOK \
|
||||
--set slack.channel=general \
|
||||
--set slack.user=flagger
|
||||
```
|
||||
|
||||
### Install Grafana
|
||||
|
||||
Flagger comes with a Grafana dashboard made for monitoring the canary analysis.
|
||||
|
||||
Deploy Grafana in the _**istio-system**_ namespace:
|
||||
|
||||
```bash
|
||||
helm upgrade -i flagger-grafana flagger/grafana \
|
||||
--namespace=istio-system \
|
||||
--set url=http://prometheus.istio-system:9090
|
||||
```
|
||||
|
||||
You can access Grafana using port forwarding:
|
||||
|
||||
```bash
|
||||
kubectl -n istio-system port-forward svc/flagger-grafana 3000:80
|
||||
```
|
||||
|
||||
### Install Load Tester
|
||||
|
||||
Flagger comes with an optional load testing service that generates traffic
|
||||
during canary analysis when configured as a webhook.
|
||||
|
||||
Deploy the load test runner with Helm:
|
||||
|
||||
```bash
|
||||
helm upgrade -i flagger-loadtester flagger/loadtester \
|
||||
--namespace=test \
|
||||
--set cmd.timeout=1h
|
||||
```
|
||||
|
||||
Deploy with kubectl:
|
||||
|
||||
```bash
|
||||
helm fetch --untar --untardir . flagger/loadtester &&
|
||||
helm template loadtester \
|
||||
--name flagger-loadtester \
|
||||
--namespace=test
|
||||
> $HOME/flagger-loadtester.yaml
|
||||
|
||||
# apply
|
||||
kubectl apply -f $HOME/flagger-loadtester.yaml
|
||||
```
|
||||
|
||||
> **Note** that the load tester should be deployed in a namespace with Istio sidecar injection enabled.
|
||||
@@ -60,6 +60,7 @@ spec:
|
||||
# Istio gateways (optional)
|
||||
gateways:
|
||||
- public-gateway.istio-system.svc.cluster.local
|
||||
- mesh
|
||||
# Istio virtual service host names (optional)
|
||||
hosts:
|
||||
- app.example.com
|
||||
@@ -79,12 +80,12 @@ spec:
|
||||
cookie:
|
||||
regex: "^(.*?;)?(type=insider)(;.*)?$"
|
||||
metrics:
|
||||
- name: istio_requests_total
|
||||
- name: request-success-rate
|
||||
# minimum req success rate (non 5xx responses)
|
||||
# percentage (0-100)
|
||||
threshold: 99
|
||||
interval: 1m
|
||||
- name: istio_request_duration_seconds_bucket
|
||||
- name: request-duration
|
||||
# maximum req duration P99
|
||||
# milliseconds
|
||||
threshold: 500
|
||||
|
||||
@@ -85,7 +85,7 @@ spec:
|
||||
stepWeight: 5
|
||||
# App Mesh Prometheus checks
|
||||
metrics:
|
||||
- name: envoy_cluster_upstream_rq
|
||||
- name: request-success-rate
|
||||
# minimum req success rate (non 5xx responses)
|
||||
# percentage (0-100)
|
||||
threshold: 99
|
||||
|
||||
@@ -46,6 +46,9 @@ Flagger exposes Prometheus metrics that can be used to determine the canary anal
|
||||
the destination weight values:
|
||||
|
||||
```bash
|
||||
# Flagger version and mesh provider gauge
|
||||
flagger_info{version="0.10.0", mesh_provider="istio"} 1
|
||||
|
||||
# Canaries total gauge
|
||||
flagger_canary_total{namespace="test"} 1
|
||||
|
||||
|
||||
421
docs/gitbook/usage/nginx-progressive-delivery.md
Normal file
421
docs/gitbook/usage/nginx-progressive-delivery.md
Normal file
@@ -0,0 +1,421 @@
|
||||
# NGNIX Ingress Controller Canary Deployments
|
||||
|
||||
This guide shows you how to use the NGINX ingress controller and Flagger to automate canary deployments and A/B testing.
|
||||
|
||||

|
||||
|
||||
### Prerequisites
|
||||
|
||||
Flagger requires a Kubernetes cluster **v1.11** or newer and NGINX ingress **0.24** or newer.
|
||||
|
||||
Install NGINX with Helm:
|
||||
|
||||
```bash
|
||||
helm upgrade -i nginx-ingress stable/nginx-ingress \
|
||||
--namespace ingress-nginx \
|
||||
--set controller.stats.enabled=true \
|
||||
--set controller.metrics.enabled=true \
|
||||
--set controller.podAnnotations."prometheus\.io/scrape"=true \
|
||||
--set controller.podAnnotations."prometheus\.io/port"=10254
|
||||
```
|
||||
|
||||
Install Flagger and the Prometheus add-on in the same namespace as NGINX:
|
||||
|
||||
```bash
|
||||
helm repo add flagger https://flagger.app
|
||||
|
||||
helm upgrade -i flagger flagger/flagger \
|
||||
--namespace ingress-nginx \
|
||||
--set prometheus.install=true \
|
||||
--set meshProvider=nginx
|
||||
```
|
||||
|
||||
Optionally you can enable Slack notifications:
|
||||
|
||||
```bash
|
||||
helm upgrade -i flagger flagger/flagger \
|
||||
--reuse-values \
|
||||
--namespace ingress-nginx \
|
||||
--set slack.url=https://hooks.slack.com/services/YOUR/SLACK/WEBHOOK \
|
||||
--set slack.channel=general \
|
||||
--set slack.user=flagger
|
||||
```
|
||||
|
||||
### Bootstrap
|
||||
|
||||
Flagger takes a Kubernetes deployment and optionally a horizontal pod autoscaler (HPA),
|
||||
then creates a series of objects (Kubernetes deployments, ClusterIP services and canary ingress).
|
||||
These objects expose the application outside the cluster and drive the canary analysis and promotion.
|
||||
|
||||
Create a test namespace:
|
||||
|
||||
```bash
|
||||
kubectl create ns test
|
||||
```
|
||||
|
||||
Create a deployment and a horizontal pod autoscaler:
|
||||
|
||||
```bash
|
||||
kubectl apply -f ${REPO}/artifacts/nginx/deployment.yaml
|
||||
kubectl apply -f ${REPO}/artifacts/nginx/hpa.yaml
|
||||
```
|
||||
|
||||
Deploy the load testing service to generate traffic during the canary analysis:
|
||||
|
||||
```bash
|
||||
helm upgrade -i flagger-loadtester flagger/loadtester \
|
||||
--namespace=test
|
||||
```
|
||||
|
||||
Create an ingress definition (replace `app.example.com` with your own domain):
|
||||
|
||||
```yaml
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: podinfo
|
||||
namespace: test
|
||||
labels:
|
||||
app: podinfo
|
||||
annotations:
|
||||
kubernetes.io/ingress.class: "nginx"
|
||||
spec:
|
||||
rules:
|
||||
- host: app.example.com
|
||||
http:
|
||||
paths:
|
||||
- backend:
|
||||
serviceName: podinfo
|
||||
servicePort: 9898
|
||||
```
|
||||
|
||||
Save the above resource as podinfo-ingress.yaml and then apply it:
|
||||
|
||||
```bash
|
||||
kubectl apply -f ./podinfo-ingress.yaml
|
||||
```
|
||||
|
||||
Create a canary custom resource (replace `app.example.com` with your own domain):
|
||||
|
||||
```yaml
|
||||
apiVersion: flagger.app/v1alpha3
|
||||
kind: Canary
|
||||
metadata:
|
||||
name: podinfo
|
||||
namespace: test
|
||||
spec:
|
||||
# deployment reference
|
||||
targetRef:
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
name: podinfo
|
||||
# ingress reference
|
||||
ingressRef:
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Ingress
|
||||
name: podinfo
|
||||
# HPA reference (optional)
|
||||
autoscalerRef:
|
||||
apiVersion: autoscaling/v2beta1
|
||||
kind: HorizontalPodAutoscaler
|
||||
name: podinfo
|
||||
# the maximum time in seconds for the canary deployment
|
||||
# to make progress before it is rollback (default 600s)
|
||||
progressDeadlineSeconds: 60
|
||||
service:
|
||||
# container port
|
||||
port: 9898
|
||||
canaryAnalysis:
|
||||
# schedule interval (default 60s)
|
||||
interval: 10s
|
||||
# max number of failed metric checks before rollback
|
||||
threshold: 10
|
||||
# max traffic percentage routed to canary
|
||||
# percentage (0-100)
|
||||
maxWeight: 50
|
||||
# canary increment step
|
||||
# percentage (0-100)
|
||||
stepWeight: 5
|
||||
# NGINX Prometheus checks
|
||||
metrics:
|
||||
- name: request-success-rate
|
||||
# minimum req success rate (non 5xx responses)
|
||||
# percentage (0-100)
|
||||
threshold: 99
|
||||
interval: 1m
|
||||
# load testing (optional)
|
||||
webhooks:
|
||||
- name: load-test
|
||||
url: http://flagger-loadtester.test/
|
||||
timeout: 5s
|
||||
metadata:
|
||||
type: cmd
|
||||
cmd: "hey -z 1m -q 10 -c 2 http://app.example.com/"
|
||||
```
|
||||
|
||||
Save the above resource as podinfo-canary.yaml and then apply it:
|
||||
|
||||
```bash
|
||||
kubectl apply -f ./podinfo-canary.yaml
|
||||
```
|
||||
|
||||
After a couple of seconds Flagger will create the canary objects:
|
||||
|
||||
```bash
|
||||
# applied
|
||||
deployment.apps/podinfo
|
||||
horizontalpodautoscaler.autoscaling/podinfo
|
||||
ingresses.extensions/podinfo
|
||||
canary.flagger.app/podinfo
|
||||
|
||||
# generated
|
||||
deployment.apps/podinfo-primary
|
||||
horizontalpodautoscaler.autoscaling/podinfo-primary
|
||||
service/podinfo
|
||||
service/podinfo-canary
|
||||
service/podinfo-primary
|
||||
ingresses.extensions/podinfo-canary
|
||||
```
|
||||
|
||||
### Automated canary promotion
|
||||
|
||||
Flagger implements a control loop that gradually shifts traffic to the canary while measuring key performance indicators
|
||||
like HTTP requests success rate, requests average duration and pod health.
|
||||
Based on analysis of the KPIs a canary is promoted or aborted, and the analysis result is published to Slack.
|
||||
|
||||

|
||||
|
||||
Trigger a canary deployment by updating the container image:
|
||||
|
||||
```bash
|
||||
kubectl -n test set image deployment/podinfo \
|
||||
podinfod=quay.io/stefanprodan/podinfo:1.4.1
|
||||
```
|
||||
|
||||
Flagger detects that the deployment revision changed and starts a new rollout:
|
||||
|
||||
```text
|
||||
kubectl -n test describe canary/podinfo
|
||||
|
||||
Status:
|
||||
Canary Weight: 0
|
||||
Failed Checks: 0
|
||||
Phase: Succeeded
|
||||
Events:
|
||||
Type Reason Age From Message
|
||||
---- ------ ---- ---- -------
|
||||
Normal Synced 3m flagger New revision detected podinfo.test
|
||||
Normal Synced 3m flagger Scaling up podinfo.test
|
||||
Warning Synced 3m flagger Waiting for podinfo.test rollout to finish: 0 of 1 updated replicas are available
|
||||
Normal Synced 3m flagger Advance podinfo.test canary weight 5
|
||||
Normal Synced 3m flagger Advance podinfo.test canary weight 10
|
||||
Normal Synced 3m flagger Advance podinfo.test canary weight 15
|
||||
Normal Synced 2m flagger Advance podinfo.test canary weight 20
|
||||
Normal Synced 2m flagger Advance podinfo.test canary weight 25
|
||||
Normal Synced 1m flagger Advance podinfo.test canary weight 30
|
||||
Normal Synced 1m flagger Advance podinfo.test canary weight 35
|
||||
Normal Synced 55s flagger Advance podinfo.test canary weight 40
|
||||
Normal Synced 45s flagger Advance podinfo.test canary weight 45
|
||||
Normal Synced 35s flagger Advance podinfo.test canary weight 50
|
||||
Normal Synced 25s flagger Copying podinfo.test template spec to podinfo-primary.test
|
||||
Warning Synced 15s flagger Waiting for podinfo-primary.test rollout to finish: 1 of 2 updated replicas are available
|
||||
Normal Synced 5s flagger Promotion completed! Scaling down podinfo.test
|
||||
```
|
||||
|
||||
**Note** that if you apply new changes to the deployment during the canary analysis, Flagger will restart the analysis.
|
||||
|
||||
You can monitor all canaries with:
|
||||
|
||||
```bash
|
||||
watch kubectl get canaries --all-namespaces
|
||||
|
||||
NAMESPACE NAME STATUS WEIGHT LASTTRANSITIONTIME
|
||||
test podinfo Progressing 15 2019-05-06T14:05:07Z
|
||||
prod frontend Succeeded 0 2019-05-05T16:15:07Z
|
||||
prod backend Failed 0 2019-05-04T17:05:07Z
|
||||
```
|
||||
|
||||
### Automated rollback
|
||||
|
||||
During the canary analysis you can generate HTTP 500 errors to test if Flagger pauses and rolls back the faulted version.
|
||||
|
||||
Trigger another canary deployment:
|
||||
|
||||
```bash
|
||||
kubectl -n test set image deployment/podinfo \
|
||||
podinfod=quay.io/stefanprodan/podinfo:1.4.2
|
||||
```
|
||||
|
||||
Generate HTTP 500 errors:
|
||||
|
||||
```bash
|
||||
watch curl http://app.example.com/status/500
|
||||
```
|
||||
|
||||
When the number of failed checks reaches the canary analysis threshold, the traffic is routed back to the primary,
|
||||
the canary is scaled to zero and the rollout is marked as failed.
|
||||
|
||||
```text
|
||||
kubectl -n test describe canary/podinfo
|
||||
|
||||
Status:
|
||||
Canary Weight: 0
|
||||
Failed Checks: 10
|
||||
Phase: Failed
|
||||
Events:
|
||||
Type Reason Age From Message
|
||||
---- ------ ---- ---- -------
|
||||
Normal Synced 3m flagger Starting canary deployment for podinfo.test
|
||||
Normal Synced 3m flagger Advance podinfo.test canary weight 5
|
||||
Normal Synced 3m flagger Advance podinfo.test canary weight 10
|
||||
Normal Synced 3m flagger Advance podinfo.test canary weight 15
|
||||
Normal Synced 3m flagger Halt podinfo.test advancement success rate 69.17% < 99%
|
||||
Normal Synced 2m flagger Halt podinfo.test advancement success rate 61.39% < 99%
|
||||
Normal Synced 2m flagger Halt podinfo.test advancement success rate 55.06% < 99%
|
||||
Normal Synced 2m flagger Halt podinfo.test advancement success rate 47.00% < 99%
|
||||
Normal Synced 2m flagger (combined from similar events): Halt podinfo.test advancement success rate 38.08% < 99%
|
||||
Warning Synced 1m flagger Rolling back podinfo.test failed checks threshold reached 10
|
||||
Warning Synced 1m flagger Canary failed! Scaling down podinfo.test
|
||||
```
|
||||
|
||||
### Custom metrics
|
||||
|
||||
The canary analysis can be extended with Prometheus queries.
|
||||
|
||||
The demo app is instrumented with Prometheus so you can create a custom check that will use the HTTP request duration
|
||||
histogram to validate the canary.
|
||||
|
||||
Edit the canary analysis and add the following metric:
|
||||
|
||||
```yaml
|
||||
canaryAnalysis:
|
||||
metrics:
|
||||
- name: "latency"
|
||||
threshold: 0.5
|
||||
interval: 1m
|
||||
query: |
|
||||
histogram_quantile(0.99,
|
||||
sum(
|
||||
rate(
|
||||
http_request_duration_seconds_bucket{
|
||||
kubernetes_namespace="test",
|
||||
kubernetes_pod_name=~"podinfo-[0-9a-zA-Z]+(-[0-9a-zA-Z]+)"
|
||||
}[1m]
|
||||
)
|
||||
) by (le)
|
||||
)
|
||||
```
|
||||
|
||||
The threshold is set to 500ms so if the average request duration in the last minute
|
||||
goes over half a second then the analysis will fail and the canary will not be promoted.
|
||||
|
||||
Trigger a canary deployment by updating the container image:
|
||||
|
||||
```bash
|
||||
kubectl -n test set image deployment/podinfo \
|
||||
podinfod=quay.io/stefanprodan/podinfo:1.4.3
|
||||
```
|
||||
|
||||
Generate high response latency:
|
||||
|
||||
```bash
|
||||
watch curl http://app.exmaple.com/delay/2
|
||||
```
|
||||
|
||||
Watch Flagger logs:
|
||||
|
||||
```
|
||||
kubectl -n nginx-ingress logs deployment/flagger -f | jq .msg
|
||||
|
||||
Starting canary deployment for podinfo.test
|
||||
Advance podinfo.test canary weight 5
|
||||
Advance podinfo.test canary weight 10
|
||||
Advance podinfo.test canary weight 15
|
||||
Halt podinfo.test advancement latency 1.20 > 0.5
|
||||
Halt podinfo.test advancement latency 1.45 > 0.5
|
||||
Halt podinfo.test advancement latency 1.60 > 0.5
|
||||
Halt podinfo.test advancement latency 1.69 > 0.5
|
||||
Halt podinfo.test advancement latency 1.70 > 0.5
|
||||
Rolling back podinfo.test failed checks threshold reached 5
|
||||
Canary failed! Scaling down podinfo.test
|
||||
```
|
||||
|
||||
If you have Slack configured, Flagger will send a notification with the reason why the canary failed.
|
||||
|
||||
### A/B Testing
|
||||
|
||||
Besides weighted routing, Flagger can be configured to route traffic to the canary based on HTTP match conditions.
|
||||
In an A/B testing scenario, you'll be using HTTP headers or cookies to target a certain segment of your users.
|
||||
This is particularly useful for frontend applications that require session affinity.
|
||||
|
||||

|
||||
|
||||
Edit the canary analysis, remove the max/step weight and add the match conditions and iterations:
|
||||
|
||||
```yaml
|
||||
canaryAnalysis:
|
||||
interval: 1m
|
||||
threshold: 10
|
||||
iterations: 10
|
||||
match:
|
||||
# curl -H 'X-Canary: insider' http://app.example.com
|
||||
- headers:
|
||||
x-canary:
|
||||
exact: "insider"
|
||||
# curl -b 'canary=always' http://app.example.com
|
||||
- headers:
|
||||
cookie:
|
||||
exact: "canary"
|
||||
metrics:
|
||||
- name: request-success-rate
|
||||
threshold: 99
|
||||
interval: 1m
|
||||
webhooks:
|
||||
- name: load-test
|
||||
url: http://localhost:8888/
|
||||
timeout: 5s
|
||||
metadata:
|
||||
type: cmd
|
||||
cmd: "hey -z 1m -q 10 -c 2 -H 'Cookie: canary=always' http://app.example.com/"
|
||||
logCmdOutput: "true"
|
||||
```
|
||||
|
||||
The above configuration will run an analysis for ten minutes targeting users that have a `canary` cookie set to `always` or
|
||||
those that call the service using the `X-Canary: insider` header.
|
||||
|
||||
Trigger a canary deployment by updating the container image:
|
||||
|
||||
```bash
|
||||
kubectl -n test set image deployment/podinfo \
|
||||
podinfod=quay.io/stefanprodan/podinfo:1.5.0
|
||||
```
|
||||
|
||||
Flagger detects that the deployment revision changed and starts the A/B testing:
|
||||
|
||||
```text
|
||||
kubectl -n test describe canary/podinfo
|
||||
|
||||
Status:
|
||||
Failed Checks: 0
|
||||
Phase: Succeeded
|
||||
Events:
|
||||
Type Reason Age From Message
|
||||
---- ------ ---- ---- -------
|
||||
Normal Synced 3m flagger New revision detected podinfo.test
|
||||
Normal Synced 3m flagger Scaling up podinfo.test
|
||||
Warning Synced 3m flagger Waiting for podinfo.test rollout to finish: 0 of 1 updated replicas are available
|
||||
Normal Synced 3m flagger Advance podinfo.test canary iteration 1/10
|
||||
Normal Synced 3m flagger Advance podinfo.test canary iteration 2/10
|
||||
Normal Synced 3m flagger Advance podinfo.test canary iteration 3/10
|
||||
Normal Synced 2m flagger Advance podinfo.test canary iteration 4/10
|
||||
Normal Synced 2m flagger Advance podinfo.test canary iteration 5/10
|
||||
Normal Synced 1m flagger Advance podinfo.test canary iteration 6/10
|
||||
Normal Synced 1m flagger Advance podinfo.test canary iteration 7/10
|
||||
Normal Synced 55s flagger Advance podinfo.test canary iteration 8/10
|
||||
Normal Synced 45s flagger Advance podinfo.test canary iteration 9/10
|
||||
Normal Synced 35s flagger Advance podinfo.test canary iteration 10/10
|
||||
Normal Synced 25s flagger Copying podinfo.test template spec to podinfo-primary.test
|
||||
Warning Synced 15s flagger Waiting for podinfo-primary.test rollout to finish: 1 of 2 updated replicas are available
|
||||
Normal Synced 5s flagger Promotion completed! Scaling down podinfo.test
|
||||
```
|
||||
|
||||
@@ -54,6 +54,7 @@ spec:
|
||||
# Istio gateways (optional)
|
||||
gateways:
|
||||
- public-gateway.istio-system.svc.cluster.local
|
||||
- mesh
|
||||
# Istio virtual service host names (optional)
|
||||
hosts:
|
||||
- app.example.com
|
||||
@@ -69,12 +70,12 @@ spec:
|
||||
# percentage (0-100)
|
||||
stepWeight: 10
|
||||
metrics:
|
||||
- name: istio_requests_total
|
||||
- name: request-success-rate
|
||||
# minimum req success rate (non 5xx responses)
|
||||
# percentage (0-100)
|
||||
threshold: 99
|
||||
interval: 1m
|
||||
- name: istio_request_duration_seconds_bucket
|
||||
- name: request-duration
|
||||
# maximum req duration P99
|
||||
# milliseconds
|
||||
threshold: 500
|
||||
|
||||
@@ -52,6 +52,10 @@ type CanarySpec struct {
|
||||
// +optional
|
||||
AutoscalerRef *hpav1.CrossVersionObjectReference `json:"autoscalerRef,omitempty"`
|
||||
|
||||
// reference to NGINX ingress resource
|
||||
// +optional
|
||||
IngressRef *hpav1.CrossVersionObjectReference `json:"ingressRef,omitempty"`
|
||||
|
||||
// virtual service spec
|
||||
Service CanaryService `json:"service"`
|
||||
|
||||
@@ -148,11 +152,24 @@ type CanaryMetric struct {
|
||||
Query string `json:"query,omitempty"`
|
||||
}
|
||||
|
||||
// HookType can be pre, post or during rollout
|
||||
type HookType string
|
||||
|
||||
const (
|
||||
// RolloutHook execute webhook during the canary analysis
|
||||
RolloutHook HookType = "rollout"
|
||||
// PreRolloutHook execute webhook before routing traffic to canary
|
||||
PreRolloutHook HookType = "pre-rollout"
|
||||
// PreRolloutHook execute webhook after the canary analysis
|
||||
PostRolloutHook HookType = "post-rollout"
|
||||
)
|
||||
|
||||
// CanaryWebhook holds the reference to external checks used for canary analysis
|
||||
type CanaryWebhook struct {
|
||||
Name string `json:"name"`
|
||||
URL string `json:"url"`
|
||||
Timeout string `json:"timeout"`
|
||||
Type HookType `json:"type"`
|
||||
Name string `json:"name"`
|
||||
URL string `json:"url"`
|
||||
Timeout string `json:"timeout"`
|
||||
// +optional
|
||||
Metadata *map[string]string `json:"metadata,omitempty"`
|
||||
}
|
||||
@@ -161,6 +178,7 @@ type CanaryWebhook struct {
|
||||
type CanaryWebhookPayload struct {
|
||||
Name string `json:"name"`
|
||||
Namespace string `json:"namespace"`
|
||||
Phase CanaryPhase `json:"phase"`
|
||||
Metadata map[string]string `json:"metadata,omitempty"`
|
||||
}
|
||||
|
||||
|
||||
@@ -205,6 +205,11 @@ func (in *CanarySpec) DeepCopyInto(out *CanarySpec) {
|
||||
*out = new(v1.CrossVersionObjectReference)
|
||||
**out = **in
|
||||
}
|
||||
if in.IngressRef != nil {
|
||||
in, out := &in.IngressRef, &out.IngressRef
|
||||
*out = new(v1.CrossVersionObjectReference)
|
||||
**out = **in
|
||||
}
|
||||
in.Service.DeepCopyInto(&out.Service)
|
||||
in.CanaryAnalysis.DeepCopyInto(&out.CanaryAnalysis)
|
||||
if in.ProgressDeadlineSeconds != nil {
|
||||
|
||||
356
pkg/canary/deployer.go
Normal file
356
pkg/canary/deployer.go
Normal file
@@ -0,0 +1,356 @@
|
||||
package canary
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"github.com/google/go-cmp/cmp/cmpopts"
|
||||
flaggerv1 "github.com/weaveworks/flagger/pkg/apis/flagger/v1alpha3"
|
||||
clientset "github.com/weaveworks/flagger/pkg/client/clientset/versioned"
|
||||
"go.uber.org/zap"
|
||||
"io"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
hpav1 "k8s.io/api/autoscaling/v2beta1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
)
|
||||
|
||||
// Deployer is managing the operations for Kubernetes deployment kind
|
||||
type Deployer struct {
|
||||
KubeClient kubernetes.Interface
|
||||
FlaggerClient clientset.Interface
|
||||
Logger *zap.SugaredLogger
|
||||
ConfigTracker ConfigTracker
|
||||
Labels []string
|
||||
}
|
||||
|
||||
// Initialize creates the primary deployment, hpa,
|
||||
// scales to zero the canary deployment and returns the pod selector label
|
||||
func (c *Deployer) Initialize(cd *flaggerv1.Canary) (string, error) {
|
||||
primaryName := fmt.Sprintf("%s-primary", cd.Spec.TargetRef.Name)
|
||||
label, err := c.createPrimaryDeployment(cd)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("creating deployment %s.%s failed: %v", primaryName, cd.Namespace, err)
|
||||
}
|
||||
|
||||
if cd.Status.Phase == "" {
|
||||
c.Logger.With("canary", fmt.Sprintf("%s.%s", cd.Name, cd.Namespace)).Infof("Scaling down %s.%s", cd.Spec.TargetRef.Name, cd.Namespace)
|
||||
if err := c.Scale(cd, 0); err != nil {
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
|
||||
if cd.Spec.AutoscalerRef != nil && cd.Spec.AutoscalerRef.Kind == "HorizontalPodAutoscaler" {
|
||||
if err := c.createPrimaryHpa(cd); err != nil {
|
||||
return "", fmt.Errorf("creating hpa %s.%s failed: %v", primaryName, cd.Namespace, err)
|
||||
}
|
||||
}
|
||||
return label, nil
|
||||
}
|
||||
|
||||
// Promote copies the pod spec, secrets and config maps from canary to primary
|
||||
func (c *Deployer) Promote(cd *flaggerv1.Canary) error {
|
||||
targetName := cd.Spec.TargetRef.Name
|
||||
primaryName := fmt.Sprintf("%s-primary", targetName)
|
||||
|
||||
canary, err := c.KubeClient.AppsV1().Deployments(cd.Namespace).Get(targetName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
if errors.IsNotFound(err) {
|
||||
return fmt.Errorf("deployment %s.%s not found", targetName, cd.Namespace)
|
||||
}
|
||||
return fmt.Errorf("deployment %s.%s query error %v", targetName, cd.Namespace, err)
|
||||
}
|
||||
|
||||
label, err := c.getSelectorLabel(canary)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid label selector! Deployment %s.%s spec.selector.matchLabels must contain selector 'app: %s'",
|
||||
targetName, cd.Namespace, targetName)
|
||||
}
|
||||
|
||||
primary, err := c.KubeClient.AppsV1().Deployments(cd.Namespace).Get(primaryName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
if errors.IsNotFound(err) {
|
||||
return fmt.Errorf("deployment %s.%s not found", primaryName, cd.Namespace)
|
||||
}
|
||||
return fmt.Errorf("deployment %s.%s query error %v", primaryName, cd.Namespace, err)
|
||||
}
|
||||
|
||||
// promote secrets and config maps
|
||||
configRefs, err := c.ConfigTracker.GetTargetConfigs(cd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := c.ConfigTracker.CreatePrimaryConfigs(cd, configRefs); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
primaryCopy := primary.DeepCopy()
|
||||
primaryCopy.Spec.ProgressDeadlineSeconds = canary.Spec.ProgressDeadlineSeconds
|
||||
primaryCopy.Spec.MinReadySeconds = canary.Spec.MinReadySeconds
|
||||
primaryCopy.Spec.RevisionHistoryLimit = canary.Spec.RevisionHistoryLimit
|
||||
primaryCopy.Spec.Strategy = canary.Spec.Strategy
|
||||
|
||||
// update spec with primary secrets and config maps
|
||||
primaryCopy.Spec.Template.Spec = c.ConfigTracker.ApplyPrimaryConfigs(canary.Spec.Template.Spec, configRefs)
|
||||
|
||||
// update pod annotations to ensure a rolling update
|
||||
annotations, err := c.makeAnnotations(canary.Spec.Template.Annotations)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
primaryCopy.Spec.Template.Annotations = annotations
|
||||
|
||||
primaryCopy.Spec.Template.Labels = makePrimaryLabels(canary.Spec.Template.Labels, primaryName, label)
|
||||
|
||||
_, err = c.KubeClient.AppsV1().Deployments(cd.Namespace).Update(primaryCopy)
|
||||
if err != nil {
|
||||
return fmt.Errorf("updating deployment %s.%s template spec failed: %v",
|
||||
primaryCopy.GetName(), primaryCopy.Namespace, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// HasDeploymentChanged returns true if the canary deployment pod spec has changed
|
||||
func (c *Deployer) HasDeploymentChanged(cd *flaggerv1.Canary) (bool, error) {
|
||||
targetName := cd.Spec.TargetRef.Name
|
||||
canary, err := c.KubeClient.AppsV1().Deployments(cd.Namespace).Get(targetName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
if errors.IsNotFound(err) {
|
||||
return false, fmt.Errorf("deployment %s.%s not found", targetName, cd.Namespace)
|
||||
}
|
||||
return false, fmt.Errorf("deployment %s.%s query error %v", targetName, cd.Namespace, err)
|
||||
}
|
||||
|
||||
if cd.Status.LastAppliedSpec == "" {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
newSpec := &canary.Spec.Template.Spec
|
||||
oldSpecJson, err := base64.StdEncoding.DecodeString(cd.Status.LastAppliedSpec)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("%s.%s decode error %v", cd.Name, cd.Namespace, err)
|
||||
}
|
||||
oldSpec := &corev1.PodSpec{}
|
||||
err = json.Unmarshal(oldSpecJson, oldSpec)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("%s.%s unmarshal error %v", cd.Name, cd.Namespace, err)
|
||||
}
|
||||
|
||||
if diff := cmp.Diff(*newSpec, *oldSpec, cmpopts.IgnoreUnexported(resource.Quantity{})); diff != "" {
|
||||
//fmt.Println(diff)
|
||||
return true, nil
|
||||
}
|
||||
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// Scale sets the canary deployment replicas
|
||||
func (c *Deployer) Scale(cd *flaggerv1.Canary, replicas int32) error {
|
||||
targetName := cd.Spec.TargetRef.Name
|
||||
dep, err := c.KubeClient.AppsV1().Deployments(cd.Namespace).Get(targetName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
if errors.IsNotFound(err) {
|
||||
return fmt.Errorf("deployment %s.%s not found", targetName, cd.Namespace)
|
||||
}
|
||||
return fmt.Errorf("deployment %s.%s query error %v", targetName, cd.Namespace, err)
|
||||
}
|
||||
|
||||
depCopy := dep.DeepCopy()
|
||||
depCopy.Spec.Replicas = int32p(replicas)
|
||||
|
||||
_, err = c.KubeClient.AppsV1().Deployments(dep.Namespace).Update(depCopy)
|
||||
if err != nil {
|
||||
return fmt.Errorf("scaling %s.%s to %v failed: %v", depCopy.GetName(), depCopy.Namespace, replicas, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Deployer) createPrimaryDeployment(cd *flaggerv1.Canary) (string, error) {
|
||||
targetName := cd.Spec.TargetRef.Name
|
||||
primaryName := fmt.Sprintf("%s-primary", cd.Spec.TargetRef.Name)
|
||||
|
||||
canaryDep, err := c.KubeClient.AppsV1().Deployments(cd.Namespace).Get(targetName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
if errors.IsNotFound(err) {
|
||||
return "", fmt.Errorf("deployment %s.%s not found, retrying", targetName, cd.Namespace)
|
||||
}
|
||||
return "", err
|
||||
}
|
||||
|
||||
label, err := c.getSelectorLabel(canaryDep)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("invalid label selector! Deployment %s.%s spec.selector.matchLabels must contain selector 'app: %s'",
|
||||
targetName, cd.Namespace, targetName)
|
||||
}
|
||||
|
||||
primaryDep, err := c.KubeClient.AppsV1().Deployments(cd.Namespace).Get(primaryName, metav1.GetOptions{})
|
||||
if errors.IsNotFound(err) {
|
||||
// create primary secrets and config maps
|
||||
configRefs, err := c.ConfigTracker.GetTargetConfigs(cd)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if err := c.ConfigTracker.CreatePrimaryConfigs(cd, configRefs); err != nil {
|
||||
return "", err
|
||||
}
|
||||
annotations, err := c.makeAnnotations(canaryDep.Spec.Template.Annotations)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
replicas := int32(1)
|
||||
if canaryDep.Spec.Replicas != nil && *canaryDep.Spec.Replicas > 0 {
|
||||
replicas = *canaryDep.Spec.Replicas
|
||||
}
|
||||
|
||||
// create primary deployment
|
||||
primaryDep = &appsv1.Deployment{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: primaryName,
|
||||
Namespace: cd.Namespace,
|
||||
OwnerReferences: []metav1.OwnerReference{
|
||||
*metav1.NewControllerRef(cd, schema.GroupVersionKind{
|
||||
Group: flaggerv1.SchemeGroupVersion.Group,
|
||||
Version: flaggerv1.SchemeGroupVersion.Version,
|
||||
Kind: flaggerv1.CanaryKind,
|
||||
}),
|
||||
},
|
||||
},
|
||||
Spec: appsv1.DeploymentSpec{
|
||||
ProgressDeadlineSeconds: canaryDep.Spec.ProgressDeadlineSeconds,
|
||||
MinReadySeconds: canaryDep.Spec.MinReadySeconds,
|
||||
RevisionHistoryLimit: canaryDep.Spec.RevisionHistoryLimit,
|
||||
Replicas: int32p(replicas),
|
||||
Strategy: canaryDep.Spec.Strategy,
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{
|
||||
label: primaryName,
|
||||
},
|
||||
},
|
||||
Template: corev1.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: makePrimaryLabels(canaryDep.Spec.Template.Labels, primaryName, label),
|
||||
Annotations: annotations,
|
||||
},
|
||||
// update spec with the primary secrets and config maps
|
||||
Spec: c.ConfigTracker.ApplyPrimaryConfigs(canaryDep.Spec.Template.Spec, configRefs),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
_, err = c.KubeClient.AppsV1().Deployments(cd.Namespace).Create(primaryDep)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
c.Logger.With("canary", fmt.Sprintf("%s.%s", cd.Name, cd.Namespace)).Infof("Deployment %s.%s created", primaryDep.GetName(), cd.Namespace)
|
||||
}
|
||||
|
||||
return label, nil
|
||||
}
|
||||
|
||||
func (c *Deployer) createPrimaryHpa(cd *flaggerv1.Canary) error {
|
||||
primaryName := fmt.Sprintf("%s-primary", cd.Spec.TargetRef.Name)
|
||||
hpa, err := c.KubeClient.AutoscalingV2beta1().HorizontalPodAutoscalers(cd.Namespace).Get(cd.Spec.AutoscalerRef.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
if errors.IsNotFound(err) {
|
||||
return fmt.Errorf("HorizontalPodAutoscaler %s.%s not found, retrying",
|
||||
cd.Spec.AutoscalerRef.Name, cd.Namespace)
|
||||
}
|
||||
return err
|
||||
}
|
||||
primaryHpaName := fmt.Sprintf("%s-primary", cd.Spec.AutoscalerRef.Name)
|
||||
primaryHpa, err := c.KubeClient.AutoscalingV2beta1().HorizontalPodAutoscalers(cd.Namespace).Get(primaryHpaName, metav1.GetOptions{})
|
||||
|
||||
if errors.IsNotFound(err) {
|
||||
primaryHpa = &hpav1.HorizontalPodAutoscaler{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: primaryHpaName,
|
||||
Namespace: cd.Namespace,
|
||||
Labels: hpa.Labels,
|
||||
OwnerReferences: []metav1.OwnerReference{
|
||||
*metav1.NewControllerRef(cd, schema.GroupVersionKind{
|
||||
Group: flaggerv1.SchemeGroupVersion.Group,
|
||||
Version: flaggerv1.SchemeGroupVersion.Version,
|
||||
Kind: flaggerv1.CanaryKind,
|
||||
}),
|
||||
},
|
||||
},
|
||||
Spec: hpav1.HorizontalPodAutoscalerSpec{
|
||||
ScaleTargetRef: hpav1.CrossVersionObjectReference{
|
||||
Name: primaryName,
|
||||
Kind: hpa.Spec.ScaleTargetRef.Kind,
|
||||
APIVersion: hpa.Spec.ScaleTargetRef.APIVersion,
|
||||
},
|
||||
MinReplicas: hpa.Spec.MinReplicas,
|
||||
MaxReplicas: hpa.Spec.MaxReplicas,
|
||||
Metrics: hpa.Spec.Metrics,
|
||||
},
|
||||
}
|
||||
|
||||
_, err = c.KubeClient.AutoscalingV2beta1().HorizontalPodAutoscalers(cd.Namespace).Create(primaryHpa)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
c.Logger.With("canary", fmt.Sprintf("%s.%s", cd.Name, cd.Namespace)).Infof("HorizontalPodAutoscaler %s.%s created", primaryHpa.GetName(), cd.Namespace)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// makeAnnotations appends an unique ID to annotations map
|
||||
func (c *Deployer) makeAnnotations(annotations map[string]string) (map[string]string, error) {
|
||||
idKey := "flagger-id"
|
||||
res := make(map[string]string)
|
||||
uuid := make([]byte, 16)
|
||||
n, err := io.ReadFull(rand.Reader, uuid)
|
||||
if n != len(uuid) || err != nil {
|
||||
return res, err
|
||||
}
|
||||
uuid[8] = uuid[8]&^0xc0 | 0x80
|
||||
uuid[6] = uuid[6]&^0xf0 | 0x40
|
||||
id := fmt.Sprintf("%x-%x-%x-%x-%x", uuid[0:4], uuid[4:6], uuid[6:8], uuid[8:10], uuid[10:])
|
||||
|
||||
for k, v := range annotations {
|
||||
if k != idKey {
|
||||
res[k] = v
|
||||
}
|
||||
}
|
||||
res[idKey] = id
|
||||
|
||||
return res, nil
|
||||
}
|
||||
|
||||
// getSelectorLabel returns the selector match label
|
||||
func (c *Deployer) getSelectorLabel(deployment *appsv1.Deployment) (string, error) {
|
||||
for _, l := range c.Labels {
|
||||
if _, ok := deployment.Spec.Selector.MatchLabels[l]; ok {
|
||||
return l, nil
|
||||
}
|
||||
}
|
||||
|
||||
return "", fmt.Errorf("selector not found")
|
||||
}
|
||||
|
||||
func makePrimaryLabels(labels map[string]string, primaryName string, label string) map[string]string {
|
||||
res := make(map[string]string)
|
||||
for k, v := range labels {
|
||||
if k != label {
|
||||
res[k] = v
|
||||
}
|
||||
}
|
||||
res[label] = primaryName
|
||||
|
||||
return res
|
||||
}
|
||||
|
||||
func int32p(i int32) *int32 {
|
||||
return &i
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
package controller
|
||||
package canary
|
||||
|
||||
import (
|
||||
"testing"
|
||||
@@ -8,8 +8,8 @@ import (
|
||||
)
|
||||
|
||||
func TestCanaryDeployer_Sync(t *testing.T) {
|
||||
mocks := SetupMocks(false)
|
||||
err := mocks.deployer.Sync(mocks.canary)
|
||||
mocks := SetupMocks()
|
||||
_, err := mocks.deployer.Initialize(mocks.canary)
|
||||
if err != nil {
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
@@ -94,8 +94,8 @@ func TestCanaryDeployer_Sync(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestCanaryDeployer_IsNewSpec(t *testing.T) {
|
||||
mocks := SetupMocks(false)
|
||||
err := mocks.deployer.Sync(mocks.canary)
|
||||
mocks := SetupMocks()
|
||||
_, err := mocks.deployer.Initialize(mocks.canary)
|
||||
if err != nil {
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
@@ -106,7 +106,7 @@ func TestCanaryDeployer_IsNewSpec(t *testing.T) {
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
|
||||
isNew, err := mocks.deployer.IsNewSpec(mocks.canary)
|
||||
isNew, err := mocks.deployer.HasDeploymentChanged(mocks.canary)
|
||||
if err != nil {
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
@@ -117,8 +117,8 @@ func TestCanaryDeployer_IsNewSpec(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestCanaryDeployer_Promote(t *testing.T) {
|
||||
mocks := SetupMocks(false)
|
||||
err := mocks.deployer.Sync(mocks.canary)
|
||||
mocks := SetupMocks()
|
||||
_, err := mocks.deployer.Initialize(mocks.canary)
|
||||
if err != nil {
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
@@ -162,8 +162,8 @@ func TestCanaryDeployer_Promote(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestCanaryDeployer_IsReady(t *testing.T) {
|
||||
mocks := SetupMocks(false)
|
||||
err := mocks.deployer.Sync(mocks.canary)
|
||||
mocks := SetupMocks()
|
||||
_, err := mocks.deployer.Initialize(mocks.canary)
|
||||
if err != nil {
|
||||
t.Error("Expected primary readiness check to fail")
|
||||
}
|
||||
@@ -180,8 +180,8 @@ func TestCanaryDeployer_IsReady(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestCanaryDeployer_SetFailedChecks(t *testing.T) {
|
||||
mocks := SetupMocks(false)
|
||||
err := mocks.deployer.Sync(mocks.canary)
|
||||
mocks := SetupMocks()
|
||||
_, err := mocks.deployer.Initialize(mocks.canary)
|
||||
if err != nil {
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
@@ -202,8 +202,8 @@ func TestCanaryDeployer_SetFailedChecks(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestCanaryDeployer_SetState(t *testing.T) {
|
||||
mocks := SetupMocks(false)
|
||||
err := mocks.deployer.Sync(mocks.canary)
|
||||
mocks := SetupMocks()
|
||||
_, err := mocks.deployer.Initialize(mocks.canary)
|
||||
if err != nil {
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
@@ -224,8 +224,8 @@ func TestCanaryDeployer_SetState(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestCanaryDeployer_SyncStatus(t *testing.T) {
|
||||
mocks := SetupMocks(false)
|
||||
err := mocks.deployer.Sync(mocks.canary)
|
||||
mocks := SetupMocks()
|
||||
_, err := mocks.deployer.Initialize(mocks.canary)
|
||||
if err != nil {
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
@@ -263,8 +263,8 @@ func TestCanaryDeployer_SyncStatus(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestCanaryDeployer_Scale(t *testing.T) {
|
||||
mocks := SetupMocks(false)
|
||||
err := mocks.deployer.Sync(mocks.canary)
|
||||
mocks := SetupMocks()
|
||||
_, err := mocks.deployer.Initialize(mocks.canary)
|
||||
if err != nil {
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
@@ -279,5 +279,4 @@ func TestCanaryDeployer_Scale(t *testing.T) {
|
||||
if *c.Spec.Replicas != 2 {
|
||||
t.Errorf("Got replicas %v wanted %v", *c.Spec.Replicas, 2)
|
||||
}
|
||||
|
||||
}
|
||||
471
pkg/canary/mock.go
Normal file
471
pkg/canary/mock.go
Normal file
@@ -0,0 +1,471 @@
|
||||
package canary
|
||||
|
||||
import (
|
||||
"github.com/weaveworks/flagger/pkg/apis/flagger/v1alpha3"
|
||||
clientset "github.com/weaveworks/flagger/pkg/client/clientset/versioned"
|
||||
fakeFlagger "github.com/weaveworks/flagger/pkg/client/clientset/versioned/fake"
|
||||
"github.com/weaveworks/flagger/pkg/logger"
|
||||
"go.uber.org/zap"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
hpav1 "k8s.io/api/autoscaling/v1"
|
||||
hpav2 "k8s.io/api/autoscaling/v2beta1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
)
|
||||
|
||||
type Mocks struct {
|
||||
canary *v1alpha3.Canary
|
||||
kubeClient kubernetes.Interface
|
||||
flaggerClient clientset.Interface
|
||||
deployer Deployer
|
||||
logger *zap.SugaredLogger
|
||||
}
|
||||
|
||||
func SetupMocks() Mocks {
|
||||
// init canary
|
||||
canary := newTestCanary()
|
||||
flaggerClient := fakeFlagger.NewSimpleClientset(canary)
|
||||
|
||||
// init kube clientset and register mock objects
|
||||
kubeClient := fake.NewSimpleClientset(
|
||||
newTestDeployment(),
|
||||
newTestHPA(),
|
||||
NewTestConfigMap(),
|
||||
NewTestConfigMapEnv(),
|
||||
NewTestConfigMapVol(),
|
||||
NewTestSecret(),
|
||||
NewTestSecretEnv(),
|
||||
NewTestSecretVol(),
|
||||
)
|
||||
|
||||
logger, _ := logger.NewLogger("debug")
|
||||
|
||||
deployer := Deployer{
|
||||
FlaggerClient: flaggerClient,
|
||||
KubeClient: kubeClient,
|
||||
Logger: logger,
|
||||
Labels: []string{"app", "name"},
|
||||
ConfigTracker: ConfigTracker{
|
||||
Logger: logger,
|
||||
KubeClient: kubeClient,
|
||||
FlaggerClient: flaggerClient,
|
||||
},
|
||||
}
|
||||
|
||||
return Mocks{
|
||||
canary: canary,
|
||||
deployer: deployer,
|
||||
logger: logger,
|
||||
flaggerClient: flaggerClient,
|
||||
kubeClient: kubeClient,
|
||||
}
|
||||
}
|
||||
|
||||
func NewTestConfigMap() *corev1.ConfigMap {
|
||||
return &corev1.ConfigMap{
|
||||
TypeMeta: metav1.TypeMeta{APIVersion: corev1.SchemeGroupVersion.String()},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "default",
|
||||
Name: "podinfo-config-env",
|
||||
},
|
||||
Data: map[string]string{
|
||||
"color": "red",
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func NewTestConfigMapV2() *corev1.ConfigMap {
|
||||
return &corev1.ConfigMap{
|
||||
TypeMeta: metav1.TypeMeta{APIVersion: corev1.SchemeGroupVersion.String()},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "default",
|
||||
Name: "podinfo-config-env",
|
||||
},
|
||||
Data: map[string]string{
|
||||
"color": "blue",
|
||||
"output": "console",
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func NewTestConfigMapEnv() *corev1.ConfigMap {
|
||||
return &corev1.ConfigMap{
|
||||
TypeMeta: metav1.TypeMeta{APIVersion: corev1.SchemeGroupVersion.String()},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "default",
|
||||
Name: "podinfo-config-all-env",
|
||||
},
|
||||
Data: map[string]string{
|
||||
"color": "red",
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func NewTestConfigMapVol() *corev1.ConfigMap {
|
||||
return &corev1.ConfigMap{
|
||||
TypeMeta: metav1.TypeMeta{APIVersion: corev1.SchemeGroupVersion.String()},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "default",
|
||||
Name: "podinfo-config-vol",
|
||||
},
|
||||
Data: map[string]string{
|
||||
"color": "red",
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func NewTestSecret() *corev1.Secret {
|
||||
return &corev1.Secret{
|
||||
TypeMeta: metav1.TypeMeta{APIVersion: corev1.SchemeGroupVersion.String()},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "default",
|
||||
Name: "podinfo-secret-env",
|
||||
},
|
||||
Type: corev1.SecretTypeOpaque,
|
||||
Data: map[string][]byte{
|
||||
"apiKey": []byte("test"),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func NewTestSecretV2() *corev1.Secret {
|
||||
return &corev1.Secret{
|
||||
TypeMeta: metav1.TypeMeta{APIVersion: corev1.SchemeGroupVersion.String()},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "default",
|
||||
Name: "podinfo-secret-env",
|
||||
},
|
||||
Type: corev1.SecretTypeOpaque,
|
||||
Data: map[string][]byte{
|
||||
"apiKey": []byte("test2"),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func NewTestSecretEnv() *corev1.Secret {
|
||||
return &corev1.Secret{
|
||||
TypeMeta: metav1.TypeMeta{APIVersion: corev1.SchemeGroupVersion.String()},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "default",
|
||||
Name: "podinfo-secret-all-env",
|
||||
},
|
||||
Type: corev1.SecretTypeOpaque,
|
||||
Data: map[string][]byte{
|
||||
"apiKey": []byte("test"),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func NewTestSecretVol() *corev1.Secret {
|
||||
return &corev1.Secret{
|
||||
TypeMeta: metav1.TypeMeta{APIVersion: corev1.SchemeGroupVersion.String()},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "default",
|
||||
Name: "podinfo-secret-vol",
|
||||
},
|
||||
Type: corev1.SecretTypeOpaque,
|
||||
Data: map[string][]byte{
|
||||
"apiKey": []byte("test"),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func newTestCanary() *v1alpha3.Canary {
|
||||
cd := &v1alpha3.Canary{
|
||||
TypeMeta: metav1.TypeMeta{APIVersion: v1alpha3.SchemeGroupVersion.String()},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "default",
|
||||
Name: "podinfo",
|
||||
},
|
||||
Spec: v1alpha3.CanarySpec{
|
||||
TargetRef: hpav1.CrossVersionObjectReference{
|
||||
Name: "podinfo",
|
||||
APIVersion: "apps/v1",
|
||||
Kind: "Deployment",
|
||||
},
|
||||
AutoscalerRef: &hpav1.CrossVersionObjectReference{
|
||||
Name: "podinfo",
|
||||
APIVersion: "autoscaling/v2beta1",
|
||||
Kind: "HorizontalPodAutoscaler",
|
||||
}, Service: v1alpha3.CanaryService{
|
||||
Port: 9898,
|
||||
}, CanaryAnalysis: v1alpha3.CanaryAnalysis{
|
||||
Threshold: 10,
|
||||
StepWeight: 10,
|
||||
MaxWeight: 50,
|
||||
Metrics: []v1alpha3.CanaryMetric{
|
||||
{
|
||||
Name: "istio_requests_total",
|
||||
Threshold: 99,
|
||||
Interval: "1m",
|
||||
},
|
||||
{
|
||||
Name: "istio_request_duration_seconds_bucket",
|
||||
Threshold: 500,
|
||||
Interval: "1m",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
return cd
|
||||
}
|
||||
|
||||
func newTestDeployment() *appsv1.Deployment {
|
||||
d := &appsv1.Deployment{
|
||||
TypeMeta: metav1.TypeMeta{APIVersion: appsv1.SchemeGroupVersion.String()},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "default",
|
||||
Name: "podinfo",
|
||||
},
|
||||
Spec: appsv1.DeploymentSpec{
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{
|
||||
"name": "podinfo",
|
||||
},
|
||||
},
|
||||
Template: corev1.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: map[string]string{
|
||||
"name": "podinfo",
|
||||
},
|
||||
},
|
||||
Spec: corev1.PodSpec{
|
||||
Containers: []corev1.Container{
|
||||
{
|
||||
Name: "podinfo",
|
||||
Image: "quay.io/stefanprodan/podinfo:1.2.0",
|
||||
Command: []string{
|
||||
"./podinfo",
|
||||
"--port=9898",
|
||||
},
|
||||
Args: nil,
|
||||
WorkingDir: "",
|
||||
Ports: []corev1.ContainerPort{
|
||||
{
|
||||
Name: "http",
|
||||
ContainerPort: 9898,
|
||||
Protocol: corev1.ProtocolTCP,
|
||||
},
|
||||
},
|
||||
Env: []corev1.EnvVar{
|
||||
{
|
||||
Name: "PODINFO_UI_COLOR",
|
||||
ValueFrom: &corev1.EnvVarSource{
|
||||
ConfigMapKeyRef: &corev1.ConfigMapKeySelector{
|
||||
LocalObjectReference: corev1.LocalObjectReference{
|
||||
Name: "podinfo-config-env",
|
||||
},
|
||||
Key: "color",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "API_KEY",
|
||||
ValueFrom: &corev1.EnvVarSource{
|
||||
SecretKeyRef: &corev1.SecretKeySelector{
|
||||
LocalObjectReference: corev1.LocalObjectReference{
|
||||
Name: "podinfo-secret-env",
|
||||
},
|
||||
Key: "apiKey",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
EnvFrom: []corev1.EnvFromSource{
|
||||
{
|
||||
ConfigMapRef: &corev1.ConfigMapEnvSource{
|
||||
LocalObjectReference: corev1.LocalObjectReference{
|
||||
Name: "podinfo-config-all-env",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
SecretRef: &corev1.SecretEnvSource{
|
||||
LocalObjectReference: corev1.LocalObjectReference{
|
||||
Name: "podinfo-secret-all-env",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
VolumeMounts: []corev1.VolumeMount{
|
||||
{
|
||||
Name: "config",
|
||||
MountPath: "/etc/podinfo/config",
|
||||
ReadOnly: true,
|
||||
},
|
||||
{
|
||||
Name: "secret",
|
||||
MountPath: "/etc/podinfo/secret",
|
||||
ReadOnly: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Volumes: []corev1.Volume{
|
||||
{
|
||||
Name: "config",
|
||||
VolumeSource: corev1.VolumeSource{
|
||||
ConfigMap: &corev1.ConfigMapVolumeSource{
|
||||
LocalObjectReference: corev1.LocalObjectReference{
|
||||
Name: "podinfo-config-vol",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "secret",
|
||||
VolumeSource: corev1.VolumeSource{
|
||||
Secret: &corev1.SecretVolumeSource{
|
||||
SecretName: "podinfo-secret-vol",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
return d
|
||||
}
|
||||
|
||||
func newTestDeploymentV2() *appsv1.Deployment {
|
||||
d := &appsv1.Deployment{
|
||||
TypeMeta: metav1.TypeMeta{APIVersion: appsv1.SchemeGroupVersion.String()},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "default",
|
||||
Name: "podinfo",
|
||||
},
|
||||
Spec: appsv1.DeploymentSpec{
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{
|
||||
"name": "podinfo",
|
||||
},
|
||||
},
|
||||
Template: corev1.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: map[string]string{
|
||||
"name": "podinfo",
|
||||
},
|
||||
},
|
||||
Spec: corev1.PodSpec{
|
||||
Containers: []corev1.Container{
|
||||
{
|
||||
Name: "podinfo",
|
||||
Image: "quay.io/stefanprodan/podinfo:1.2.1",
|
||||
Ports: []corev1.ContainerPort{
|
||||
{
|
||||
Name: "http",
|
||||
ContainerPort: 9898,
|
||||
Protocol: corev1.ProtocolTCP,
|
||||
},
|
||||
},
|
||||
Command: []string{
|
||||
"./podinfo",
|
||||
"--port=9898",
|
||||
},
|
||||
Env: []corev1.EnvVar{
|
||||
{
|
||||
Name: "PODINFO_UI_COLOR",
|
||||
ValueFrom: &corev1.EnvVarSource{
|
||||
ConfigMapKeyRef: &corev1.ConfigMapKeySelector{
|
||||
LocalObjectReference: corev1.LocalObjectReference{
|
||||
Name: "podinfo-config-env",
|
||||
},
|
||||
Key: "color",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "API_KEY",
|
||||
ValueFrom: &corev1.EnvVarSource{
|
||||
SecretKeyRef: &corev1.SecretKeySelector{
|
||||
LocalObjectReference: corev1.LocalObjectReference{
|
||||
Name: "podinfo-secret-env",
|
||||
},
|
||||
Key: "apiKey",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
EnvFrom: []corev1.EnvFromSource{
|
||||
{
|
||||
ConfigMapRef: &corev1.ConfigMapEnvSource{
|
||||
LocalObjectReference: corev1.LocalObjectReference{
|
||||
Name: "podinfo-config-all-env",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
VolumeMounts: []corev1.VolumeMount{
|
||||
{
|
||||
Name: "config",
|
||||
MountPath: "/etc/podinfo/config",
|
||||
ReadOnly: true,
|
||||
},
|
||||
{
|
||||
Name: "secret",
|
||||
MountPath: "/etc/podinfo/secret",
|
||||
ReadOnly: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Volumes: []corev1.Volume{
|
||||
{
|
||||
Name: "config",
|
||||
VolumeSource: corev1.VolumeSource{
|
||||
ConfigMap: &corev1.ConfigMapVolumeSource{
|
||||
LocalObjectReference: corev1.LocalObjectReference{
|
||||
Name: "podinfo-config-vol",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "secret",
|
||||
VolumeSource: corev1.VolumeSource{
|
||||
Secret: &corev1.SecretVolumeSource{
|
||||
SecretName: "podinfo-secret-vol",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
return d
|
||||
}
|
||||
|
||||
func newTestHPA() *hpav2.HorizontalPodAutoscaler {
|
||||
h := &hpav2.HorizontalPodAutoscaler{
|
||||
TypeMeta: metav1.TypeMeta{APIVersion: hpav2.SchemeGroupVersion.String()},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "default",
|
||||
Name: "podinfo",
|
||||
},
|
||||
Spec: hpav2.HorizontalPodAutoscalerSpec{
|
||||
ScaleTargetRef: hpav2.CrossVersionObjectReference{
|
||||
Name: "podinfo",
|
||||
APIVersion: "apps/v1",
|
||||
Kind: "Deployment",
|
||||
},
|
||||
Metrics: []hpav2.MetricSpec{
|
||||
{
|
||||
Type: "Resource",
|
||||
Resource: &hpav2.ResourceMetricSource{
|
||||
Name: "cpu",
|
||||
TargetAverageUtilization: int32p(99),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
return h
|
||||
}
|
||||
112
pkg/canary/ready.go
Normal file
112
pkg/canary/ready.go
Normal file
@@ -0,0 +1,112 @@
|
||||
package canary
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
flaggerv1 "github.com/weaveworks/flagger/pkg/apis/flagger/v1alpha3"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
// IsPrimaryReady checks the primary deployment status and returns an error if
|
||||
// the deployment is in the middle of a rolling update or if the pods are unhealthy
|
||||
// it will return a non retriable error if the rolling update is stuck
|
||||
func (c *Deployer) IsPrimaryReady(cd *flaggerv1.Canary) (bool, error) {
|
||||
primaryName := fmt.Sprintf("%s-primary", cd.Spec.TargetRef.Name)
|
||||
primary, err := c.KubeClient.AppsV1().Deployments(cd.Namespace).Get(primaryName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
if errors.IsNotFound(err) {
|
||||
return true, fmt.Errorf("deployment %s.%s not found", primaryName, cd.Namespace)
|
||||
}
|
||||
return true, fmt.Errorf("deployment %s.%s query error %v", primaryName, cd.Namespace, err)
|
||||
}
|
||||
|
||||
retriable, err := c.isDeploymentReady(primary, cd.GetProgressDeadlineSeconds())
|
||||
if err != nil {
|
||||
return retriable, fmt.Errorf("Halt advancement %s.%s %s", primaryName, cd.Namespace, err.Error())
|
||||
}
|
||||
|
||||
if primary.Spec.Replicas == int32p(0) {
|
||||
return true, fmt.Errorf("Halt %s.%s advancement primary deployment is scaled to zero",
|
||||
cd.Name, cd.Namespace)
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// IsCanaryReady checks the primary deployment status and returns an error if
|
||||
// the deployment is in the middle of a rolling update or if the pods are unhealthy
|
||||
// it will return a non retriable error if the rolling update is stuck
|
||||
func (c *Deployer) IsCanaryReady(cd *flaggerv1.Canary) (bool, error) {
|
||||
targetName := cd.Spec.TargetRef.Name
|
||||
canary, err := c.KubeClient.AppsV1().Deployments(cd.Namespace).Get(targetName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
if errors.IsNotFound(err) {
|
||||
return true, fmt.Errorf("deployment %s.%s not found", targetName, cd.Namespace)
|
||||
}
|
||||
return true, fmt.Errorf("deployment %s.%s query error %v", targetName, cd.Namespace, err)
|
||||
}
|
||||
|
||||
retriable, err := c.isDeploymentReady(canary, cd.GetProgressDeadlineSeconds())
|
||||
if err != nil {
|
||||
if retriable {
|
||||
return retriable, fmt.Errorf("Halt advancement %s.%s %s", targetName, cd.Namespace, err.Error())
|
||||
} else {
|
||||
return retriable, fmt.Errorf("deployment does not have minimum availability for more than %vs",
|
||||
cd.GetProgressDeadlineSeconds())
|
||||
}
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// isDeploymentReady determines if a deployment is ready by checking the status conditions
|
||||
// if a deployment has exceeded the progress deadline it returns a non retriable error
|
||||
func (c *Deployer) isDeploymentReady(deployment *appsv1.Deployment, deadline int) (bool, error) {
|
||||
retriable := true
|
||||
if deployment.Generation <= deployment.Status.ObservedGeneration {
|
||||
progress := c.getDeploymentCondition(deployment.Status, appsv1.DeploymentProgressing)
|
||||
if progress != nil {
|
||||
// Determine if the deployment is stuck by checking if there is a minimum replicas unavailable condition
|
||||
// and if the last update time exceeds the deadline
|
||||
available := c.getDeploymentCondition(deployment.Status, appsv1.DeploymentAvailable)
|
||||
if available != nil && available.Status == "False" && available.Reason == "MinimumReplicasUnavailable" {
|
||||
from := available.LastUpdateTime
|
||||
delta := time.Duration(deadline) * time.Second
|
||||
retriable = !from.Add(delta).Before(time.Now())
|
||||
}
|
||||
}
|
||||
|
||||
if progress != nil && progress.Reason == "ProgressDeadlineExceeded" {
|
||||
return false, fmt.Errorf("deployment %q exceeded its progress deadline", deployment.GetName())
|
||||
} else if deployment.Spec.Replicas != nil && deployment.Status.UpdatedReplicas < *deployment.Spec.Replicas {
|
||||
return retriable, fmt.Errorf("waiting for rollout to finish: %d out of %d new replicas have been updated",
|
||||
deployment.Status.UpdatedReplicas, *deployment.Spec.Replicas)
|
||||
} else if deployment.Status.Replicas > deployment.Status.UpdatedReplicas {
|
||||
return retriable, fmt.Errorf("waiting for rollout to finish: %d old replicas are pending termination",
|
||||
deployment.Status.Replicas-deployment.Status.UpdatedReplicas)
|
||||
} else if deployment.Status.AvailableReplicas < deployment.Status.UpdatedReplicas {
|
||||
return retriable, fmt.Errorf("waiting for rollout to finish: %d of %d updated replicas are available",
|
||||
deployment.Status.AvailableReplicas, deployment.Status.UpdatedReplicas)
|
||||
}
|
||||
|
||||
} else {
|
||||
return true, fmt.Errorf("waiting for rollout to finish: observed deployment generation less then desired generation")
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (c *Deployer) getDeploymentCondition(
|
||||
status appsv1.DeploymentStatus,
|
||||
conditionType appsv1.DeploymentConditionType,
|
||||
) *appsv1.DeploymentCondition {
|
||||
for i := range status.Conditions {
|
||||
c := status.Conditions[i]
|
||||
if c.Type == conditionType {
|
||||
return &c
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
104
pkg/canary/status.go
Normal file
104
pkg/canary/status.go
Normal file
@@ -0,0 +1,104 @@
|
||||
package canary
|
||||
|
||||
import (
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
|
||||
flaggerv1 "github.com/weaveworks/flagger/pkg/apis/flagger/v1alpha3"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
// SyncStatus encodes the canary pod spec and updates the canary status
|
||||
func (c *Deployer) SyncStatus(cd *flaggerv1.Canary, status flaggerv1.CanaryStatus) error {
|
||||
dep, err := c.KubeClient.AppsV1().Deployments(cd.Namespace).Get(cd.Spec.TargetRef.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
if errors.IsNotFound(err) {
|
||||
return fmt.Errorf("deployment %s.%s not found", cd.Spec.TargetRef.Name, cd.Namespace)
|
||||
}
|
||||
return fmt.Errorf("deployment %s.%s query error %v", cd.Spec.TargetRef.Name, cd.Namespace, err)
|
||||
}
|
||||
|
||||
specJson, err := json.Marshal(dep.Spec.Template.Spec)
|
||||
if err != nil {
|
||||
return fmt.Errorf("deployment %s.%s marshal error %v", cd.Spec.TargetRef.Name, cd.Namespace, err)
|
||||
}
|
||||
|
||||
configs, err := c.ConfigTracker.GetConfigRefs(cd)
|
||||
if err != nil {
|
||||
return fmt.Errorf("configs query error %v", err)
|
||||
}
|
||||
|
||||
cdCopy := cd.DeepCopy()
|
||||
cdCopy.Status.Phase = status.Phase
|
||||
cdCopy.Status.CanaryWeight = status.CanaryWeight
|
||||
cdCopy.Status.FailedChecks = status.FailedChecks
|
||||
cdCopy.Status.Iterations = status.Iterations
|
||||
cdCopy.Status.LastAppliedSpec = base64.StdEncoding.EncodeToString(specJson)
|
||||
cdCopy.Status.LastTransitionTime = metav1.Now()
|
||||
cdCopy.Status.TrackedConfigs = configs
|
||||
|
||||
cd, err = c.FlaggerClient.FlaggerV1alpha3().Canaries(cd.Namespace).UpdateStatus(cdCopy)
|
||||
if err != nil {
|
||||
return fmt.Errorf("canary %s.%s status update error %v", cdCopy.Name, cdCopy.Namespace, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetStatusFailedChecks updates the canary failed checks counter
|
||||
func (c *Deployer) SetStatusFailedChecks(cd *flaggerv1.Canary, val int) error {
|
||||
cdCopy := cd.DeepCopy()
|
||||
cdCopy.Status.FailedChecks = val
|
||||
cdCopy.Status.LastTransitionTime = metav1.Now()
|
||||
|
||||
cd, err := c.FlaggerClient.FlaggerV1alpha3().Canaries(cd.Namespace).UpdateStatus(cdCopy)
|
||||
if err != nil {
|
||||
return fmt.Errorf("canary %s.%s status update error %v", cdCopy.Name, cdCopy.Namespace, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetStatusWeight updates the canary status weight value
|
||||
func (c *Deployer) SetStatusWeight(cd *flaggerv1.Canary, val int) error {
|
||||
cdCopy := cd.DeepCopy()
|
||||
cdCopy.Status.CanaryWeight = val
|
||||
cdCopy.Status.LastTransitionTime = metav1.Now()
|
||||
|
||||
cd, err := c.FlaggerClient.FlaggerV1alpha3().Canaries(cd.Namespace).UpdateStatus(cdCopy)
|
||||
if err != nil {
|
||||
return fmt.Errorf("canary %s.%s status update error %v", cdCopy.Name, cdCopy.Namespace, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetStatusIterations updates the canary status iterations value
|
||||
func (c *Deployer) SetStatusIterations(cd *flaggerv1.Canary, val int) error {
|
||||
cdCopy := cd.DeepCopy()
|
||||
cdCopy.Status.Iterations = val
|
||||
cdCopy.Status.LastTransitionTime = metav1.Now()
|
||||
|
||||
cd, err := c.FlaggerClient.FlaggerV1alpha3().Canaries(cd.Namespace).UpdateStatus(cdCopy)
|
||||
if err != nil {
|
||||
return fmt.Errorf("canary %s.%s status update error %v", cdCopy.Name, cdCopy.Namespace, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetStatusPhase updates the canary status phase
|
||||
func (c *Deployer) SetStatusPhase(cd *flaggerv1.Canary, phase flaggerv1.CanaryPhase) error {
|
||||
cdCopy := cd.DeepCopy()
|
||||
cdCopy.Status.Phase = phase
|
||||
cdCopy.Status.LastTransitionTime = metav1.Now()
|
||||
|
||||
if phase != flaggerv1.CanaryProgressing {
|
||||
cdCopy.Status.CanaryWeight = 0
|
||||
cdCopy.Status.Iterations = 0
|
||||
}
|
||||
|
||||
cd, err := c.FlaggerClient.FlaggerV1alpha3().Canaries(cd.Namespace).UpdateStatus(cdCopy)
|
||||
if err != nil {
|
||||
return fmt.Errorf("canary %s.%s status update error %v", cdCopy.Name, cdCopy.Namespace, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
package controller
|
||||
package canary
|
||||
|
||||
import (
|
||||
"crypto/sha256"
|
||||
@@ -16,9 +16,9 @@ import (
|
||||
|
||||
// ConfigTracker is managing the operations for Kubernetes ConfigMaps and Secrets
|
||||
type ConfigTracker struct {
|
||||
kubeClient kubernetes.Interface
|
||||
flaggerClient clientset.Interface
|
||||
logger *zap.SugaredLogger
|
||||
KubeClient kubernetes.Interface
|
||||
FlaggerClient clientset.Interface
|
||||
Logger *zap.SugaredLogger
|
||||
}
|
||||
|
||||
type ConfigRefType string
|
||||
@@ -50,7 +50,7 @@ func checksum(data interface{}) string {
|
||||
// getRefFromConfigMap transforms a Kubernetes ConfigMap into a ConfigRef
|
||||
// and computes the checksum of the ConfigMap data
|
||||
func (ct *ConfigTracker) getRefFromConfigMap(name string, namespace string) (*ConfigRef, error) {
|
||||
config, err := ct.kubeClient.CoreV1().ConfigMaps(namespace).Get(name, metav1.GetOptions{})
|
||||
config, err := ct.KubeClient.CoreV1().ConfigMaps(namespace).Get(name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -65,7 +65,7 @@ func (ct *ConfigTracker) getRefFromConfigMap(name string, namespace string) (*Co
|
||||
// getRefFromConfigMap transforms a Kubernetes Secret into a ConfigRef
|
||||
// and computes the checksum of the Secret data
|
||||
func (ct *ConfigTracker) getRefFromSecret(name string, namespace string) (*ConfigRef, error) {
|
||||
secret, err := ct.kubeClient.CoreV1().Secrets(namespace).Get(name, metav1.GetOptions{})
|
||||
secret, err := ct.KubeClient.CoreV1().Secrets(namespace).Get(name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -75,7 +75,7 @@ func (ct *ConfigTracker) getRefFromSecret(name string, namespace string) (*Confi
|
||||
secret.Type != corev1.SecretTypeBasicAuth &&
|
||||
secret.Type != corev1.SecretTypeSSHAuth &&
|
||||
secret.Type != corev1.SecretTypeTLS {
|
||||
ct.logger.Debugf("ignoring secret %s.%s type not supported %v", name, namespace, secret.Type)
|
||||
ct.Logger.Debugf("ignoring secret %s.%s type not supported %v", name, namespace, secret.Type)
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
@@ -91,7 +91,7 @@ func (ct *ConfigTracker) getRefFromSecret(name string, namespace string) (*Confi
|
||||
func (ct *ConfigTracker) GetTargetConfigs(cd *flaggerv1.Canary) (map[string]ConfigRef, error) {
|
||||
res := make(map[string]ConfigRef)
|
||||
targetName := cd.Spec.TargetRef.Name
|
||||
targetDep, err := ct.kubeClient.AppsV1().Deployments(cd.Namespace).Get(targetName, metav1.GetOptions{})
|
||||
targetDep, err := ct.KubeClient.AppsV1().Deployments(cd.Namespace).Get(targetName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
if errors.IsNotFound(err) {
|
||||
return res, fmt.Errorf("deployment %s.%s not found", targetName, cd.Namespace)
|
||||
@@ -104,7 +104,7 @@ func (ct *ConfigTracker) GetTargetConfigs(cd *flaggerv1.Canary) (map[string]Conf
|
||||
if cmv := volume.ConfigMap; cmv != nil {
|
||||
config, err := ct.getRefFromConfigMap(cmv.Name, cd.Namespace)
|
||||
if err != nil {
|
||||
ct.logger.Errorf("configMap %s.%s query error %v", cmv.Name, cd.Namespace, err)
|
||||
ct.Logger.Errorf("configMap %s.%s query error %v", cmv.Name, cd.Namespace, err)
|
||||
continue
|
||||
}
|
||||
if config != nil {
|
||||
@@ -115,7 +115,7 @@ func (ct *ConfigTracker) GetTargetConfigs(cd *flaggerv1.Canary) (map[string]Conf
|
||||
if sv := volume.Secret; sv != nil {
|
||||
secret, err := ct.getRefFromSecret(sv.SecretName, cd.Namespace)
|
||||
if err != nil {
|
||||
ct.logger.Errorf("secret %s.%s query error %v", sv.SecretName, cd.Namespace, err)
|
||||
ct.Logger.Errorf("secret %s.%s query error %v", sv.SecretName, cd.Namespace, err)
|
||||
continue
|
||||
}
|
||||
if secret != nil {
|
||||
@@ -133,7 +133,7 @@ func (ct *ConfigTracker) GetTargetConfigs(cd *flaggerv1.Canary) (map[string]Conf
|
||||
name := env.ValueFrom.ConfigMapKeyRef.LocalObjectReference.Name
|
||||
config, err := ct.getRefFromConfigMap(name, cd.Namespace)
|
||||
if err != nil {
|
||||
ct.logger.Errorf("configMap %s.%s query error %v", name, cd.Namespace, err)
|
||||
ct.Logger.Errorf("configMap %s.%s query error %v", name, cd.Namespace, err)
|
||||
continue
|
||||
}
|
||||
if config != nil {
|
||||
@@ -143,7 +143,7 @@ func (ct *ConfigTracker) GetTargetConfigs(cd *flaggerv1.Canary) (map[string]Conf
|
||||
name := env.ValueFrom.SecretKeyRef.LocalObjectReference.Name
|
||||
secret, err := ct.getRefFromSecret(name, cd.Namespace)
|
||||
if err != nil {
|
||||
ct.logger.Errorf("secret %s.%s query error %v", name, cd.Namespace, err)
|
||||
ct.Logger.Errorf("secret %s.%s query error %v", name, cd.Namespace, err)
|
||||
continue
|
||||
}
|
||||
if secret != nil {
|
||||
@@ -159,7 +159,7 @@ func (ct *ConfigTracker) GetTargetConfigs(cd *flaggerv1.Canary) (map[string]Conf
|
||||
name := envFrom.ConfigMapRef.LocalObjectReference.Name
|
||||
config, err := ct.getRefFromConfigMap(name, cd.Namespace)
|
||||
if err != nil {
|
||||
ct.logger.Errorf("configMap %s.%s query error %v", name, cd.Namespace, err)
|
||||
ct.Logger.Errorf("configMap %s.%s query error %v", name, cd.Namespace, err)
|
||||
continue
|
||||
}
|
||||
if config != nil {
|
||||
@@ -169,7 +169,7 @@ func (ct *ConfigTracker) GetTargetConfigs(cd *flaggerv1.Canary) (map[string]Conf
|
||||
name := envFrom.SecretRef.LocalObjectReference.Name
|
||||
secret, err := ct.getRefFromSecret(name, cd.Namespace)
|
||||
if err != nil {
|
||||
ct.logger.Errorf("secret %s.%s query error %v", name, cd.Namespace, err)
|
||||
ct.Logger.Errorf("secret %s.%s query error %v", name, cd.Namespace, err)
|
||||
continue
|
||||
}
|
||||
if secret != nil {
|
||||
@@ -221,7 +221,7 @@ func (ct *ConfigTracker) HasConfigChanged(cd *flaggerv1.Canary) (bool, error) {
|
||||
|
||||
for _, cfg := range configs {
|
||||
if trackedConfigs[cfg.GetName()] != cfg.Checksum {
|
||||
ct.logger.With("canary", fmt.Sprintf("%s.%s", cd.Name, cd.Namespace)).
|
||||
ct.Logger.With("canary", fmt.Sprintf("%s.%s", cd.Name, cd.Namespace)).
|
||||
Infof("%s %s has changed", cfg.Type, cfg.Name)
|
||||
return true, nil
|
||||
}
|
||||
@@ -236,7 +236,7 @@ func (ct *ConfigTracker) CreatePrimaryConfigs(cd *flaggerv1.Canary, refs map[str
|
||||
for _, ref := range refs {
|
||||
switch ref.Type {
|
||||
case ConfigRefMap:
|
||||
config, err := ct.kubeClient.CoreV1().ConfigMaps(cd.Namespace).Get(ref.Name, metav1.GetOptions{})
|
||||
config, err := ct.KubeClient.CoreV1().ConfigMaps(cd.Namespace).Get(ref.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -258,10 +258,10 @@ func (ct *ConfigTracker) CreatePrimaryConfigs(cd *flaggerv1.Canary, refs map[str
|
||||
}
|
||||
|
||||
// update or insert primary ConfigMap
|
||||
_, err = ct.kubeClient.CoreV1().ConfigMaps(cd.Namespace).Update(primaryConfigMap)
|
||||
_, err = ct.KubeClient.CoreV1().ConfigMaps(cd.Namespace).Update(primaryConfigMap)
|
||||
if err != nil {
|
||||
if errors.IsNotFound(err) {
|
||||
_, err = ct.kubeClient.CoreV1().ConfigMaps(cd.Namespace).Create(primaryConfigMap)
|
||||
_, err = ct.KubeClient.CoreV1().ConfigMaps(cd.Namespace).Create(primaryConfigMap)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -270,10 +270,10 @@ func (ct *ConfigTracker) CreatePrimaryConfigs(cd *flaggerv1.Canary, refs map[str
|
||||
}
|
||||
}
|
||||
|
||||
ct.logger.With("canary", fmt.Sprintf("%s.%s", cd.Name, cd.Namespace)).
|
||||
ct.Logger.With("canary", fmt.Sprintf("%s.%s", cd.Name, cd.Namespace)).
|
||||
Infof("ConfigMap %s synced", primaryConfigMap.GetName())
|
||||
case ConfigRefSecret:
|
||||
secret, err := ct.kubeClient.CoreV1().Secrets(cd.Namespace).Get(ref.Name, metav1.GetOptions{})
|
||||
secret, err := ct.KubeClient.CoreV1().Secrets(cd.Namespace).Get(ref.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -296,10 +296,10 @@ func (ct *ConfigTracker) CreatePrimaryConfigs(cd *flaggerv1.Canary, refs map[str
|
||||
}
|
||||
|
||||
// update or insert primary Secret
|
||||
_, err = ct.kubeClient.CoreV1().Secrets(cd.Namespace).Update(primarySecret)
|
||||
_, err = ct.KubeClient.CoreV1().Secrets(cd.Namespace).Update(primarySecret)
|
||||
if err != nil {
|
||||
if errors.IsNotFound(err) {
|
||||
_, err = ct.kubeClient.CoreV1().Secrets(cd.Namespace).Create(primarySecret)
|
||||
_, err = ct.KubeClient.CoreV1().Secrets(cd.Namespace).Create(primarySecret)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -308,7 +308,7 @@ func (ct *ConfigTracker) CreatePrimaryConfigs(cd *flaggerv1.Canary, refs map[str
|
||||
}
|
||||
}
|
||||
|
||||
ct.logger.With("canary", fmt.Sprintf("%s.%s", cd.Name, cd.Namespace)).
|
||||
ct.Logger.With("canary", fmt.Sprintf("%s.%s", cd.Name, cd.Namespace)).
|
||||
Infof("Secret %s synced", primarySecret.GetName())
|
||||
}
|
||||
}
|
||||
@@ -26,15 +26,16 @@ import (
|
||||
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||
schema "k8s.io/apimachinery/pkg/runtime/schema"
|
||||
serializer "k8s.io/apimachinery/pkg/runtime/serializer"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
)
|
||||
|
||||
var scheme = runtime.NewScheme()
|
||||
var codecs = serializer.NewCodecFactory(scheme)
|
||||
var parameterCodec = runtime.NewParameterCodec(scheme)
|
||||
|
||||
func init() {
|
||||
v1.AddToGroupVersion(scheme, schema.GroupVersion{Version: "v1"})
|
||||
AddToScheme(scheme)
|
||||
var localSchemeBuilder = runtime.SchemeBuilder{
|
||||
appmeshv1beta1.AddToScheme,
|
||||
flaggerv1alpha3.AddToScheme,
|
||||
networkingv1alpha3.AddToScheme,
|
||||
}
|
||||
|
||||
// AddToScheme adds all types of this clientset into the given scheme. This allows composition
|
||||
@@ -47,12 +48,13 @@ func init() {
|
||||
// )
|
||||
//
|
||||
// kclientset, _ := kubernetes.NewForConfig(c)
|
||||
// aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme)
|
||||
// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme)
|
||||
//
|
||||
// After this, RawExtensions in Kubernetes types will serialize kube-aggregator types
|
||||
// correctly.
|
||||
func AddToScheme(scheme *runtime.Scheme) {
|
||||
appmeshv1beta1.AddToScheme(scheme)
|
||||
flaggerv1alpha3.AddToScheme(scheme)
|
||||
networkingv1alpha3.AddToScheme(scheme)
|
||||
var AddToScheme = localSchemeBuilder.AddToScheme
|
||||
|
||||
func init() {
|
||||
v1.AddToGroupVersion(scheme, schema.GroupVersion{Version: "v1"})
|
||||
utilruntime.Must(AddToScheme(scheme))
|
||||
}
|
||||
|
||||
@@ -26,15 +26,16 @@ import (
|
||||
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||
schema "k8s.io/apimachinery/pkg/runtime/schema"
|
||||
serializer "k8s.io/apimachinery/pkg/runtime/serializer"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
)
|
||||
|
||||
var Scheme = runtime.NewScheme()
|
||||
var Codecs = serializer.NewCodecFactory(Scheme)
|
||||
var ParameterCodec = runtime.NewParameterCodec(Scheme)
|
||||
|
||||
func init() {
|
||||
v1.AddToGroupVersion(Scheme, schema.GroupVersion{Version: "v1"})
|
||||
AddToScheme(Scheme)
|
||||
var localSchemeBuilder = runtime.SchemeBuilder{
|
||||
appmeshv1beta1.AddToScheme,
|
||||
flaggerv1alpha3.AddToScheme,
|
||||
networkingv1alpha3.AddToScheme,
|
||||
}
|
||||
|
||||
// AddToScheme adds all types of this clientset into the given scheme. This allows composition
|
||||
@@ -47,12 +48,13 @@ func init() {
|
||||
// )
|
||||
//
|
||||
// kclientset, _ := kubernetes.NewForConfig(c)
|
||||
// aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme)
|
||||
// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme)
|
||||
//
|
||||
// After this, RawExtensions in Kubernetes types will serialize kube-aggregator types
|
||||
// correctly.
|
||||
func AddToScheme(scheme *runtime.Scheme) {
|
||||
appmeshv1beta1.AddToScheme(scheme)
|
||||
flaggerv1alpha3.AddToScheme(scheme)
|
||||
networkingv1alpha3.AddToScheme(scheme)
|
||||
var AddToScheme = localSchemeBuilder.AddToScheme
|
||||
|
||||
func init() {
|
||||
v1.AddToGroupVersion(Scheme, schema.GroupVersion{Version: "v1"})
|
||||
utilruntime.Must(AddToScheme(Scheme))
|
||||
}
|
||||
|
||||
@@ -123,7 +123,7 @@ func (c *FakeMeshes) DeleteCollection(options *v1.DeleteOptions, listOptions v1.
|
||||
// Patch applies the patch and returns the patched mesh.
|
||||
func (c *FakeMeshes) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.Mesh, err error) {
|
||||
obj, err := c.Fake.
|
||||
Invokes(testing.NewRootPatchSubresourceAction(meshesResource, name, data, subresources...), &v1beta1.Mesh{})
|
||||
Invokes(testing.NewRootPatchSubresourceAction(meshesResource, name, pt, data, subresources...), &v1beta1.Mesh{})
|
||||
if obj == nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -131,7 +131,7 @@ func (c *FakeVirtualNodes) DeleteCollection(options *v1.DeleteOptions, listOptio
|
||||
// Patch applies the patch and returns the patched virtualNode.
|
||||
func (c *FakeVirtualNodes) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.VirtualNode, err error) {
|
||||
obj, err := c.Fake.
|
||||
Invokes(testing.NewPatchSubresourceAction(virtualnodesResource, c.ns, name, data, subresources...), &v1beta1.VirtualNode{})
|
||||
Invokes(testing.NewPatchSubresourceAction(virtualnodesResource, c.ns, name, pt, data, subresources...), &v1beta1.VirtualNode{})
|
||||
|
||||
if obj == nil {
|
||||
return nil, err
|
||||
|
||||
@@ -131,7 +131,7 @@ func (c *FakeVirtualServices) DeleteCollection(options *v1.DeleteOptions, listOp
|
||||
// Patch applies the patch and returns the patched virtualService.
|
||||
func (c *FakeVirtualServices) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.VirtualService, err error) {
|
||||
obj, err := c.Fake.
|
||||
Invokes(testing.NewPatchSubresourceAction(virtualservicesResource, c.ns, name, data, subresources...), &v1beta1.VirtualService{})
|
||||
Invokes(testing.NewPatchSubresourceAction(virtualservicesResource, c.ns, name, pt, data, subresources...), &v1beta1.VirtualService{})
|
||||
|
||||
if obj == nil {
|
||||
return nil, err
|
||||
|
||||
@@ -19,6 +19,8 @@ limitations under the License.
|
||||
package v1beta1
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
v1beta1 "github.com/weaveworks/flagger/pkg/apis/appmesh/v1beta1"
|
||||
scheme "github.com/weaveworks/flagger/pkg/client/clientset/versioned/scheme"
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
@@ -73,10 +75,15 @@ func (c *meshes) Get(name string, options v1.GetOptions) (result *v1beta1.Mesh,
|
||||
|
||||
// List takes label and field selectors, and returns the list of Meshes that match those selectors.
|
||||
func (c *meshes) List(opts v1.ListOptions) (result *v1beta1.MeshList, err error) {
|
||||
var timeout time.Duration
|
||||
if opts.TimeoutSeconds != nil {
|
||||
timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
|
||||
}
|
||||
result = &v1beta1.MeshList{}
|
||||
err = c.client.Get().
|
||||
Resource("meshes").
|
||||
VersionedParams(&opts, scheme.ParameterCodec).
|
||||
Timeout(timeout).
|
||||
Do().
|
||||
Into(result)
|
||||
return
|
||||
@@ -84,10 +91,15 @@ func (c *meshes) List(opts v1.ListOptions) (result *v1beta1.MeshList, err error)
|
||||
|
||||
// Watch returns a watch.Interface that watches the requested meshes.
|
||||
func (c *meshes) Watch(opts v1.ListOptions) (watch.Interface, error) {
|
||||
var timeout time.Duration
|
||||
if opts.TimeoutSeconds != nil {
|
||||
timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
|
||||
}
|
||||
opts.Watch = true
|
||||
return c.client.Get().
|
||||
Resource("meshes").
|
||||
VersionedParams(&opts, scheme.ParameterCodec).
|
||||
Timeout(timeout).
|
||||
Watch()
|
||||
}
|
||||
|
||||
@@ -141,9 +153,14 @@ func (c *meshes) Delete(name string, options *v1.DeleteOptions) error {
|
||||
|
||||
// DeleteCollection deletes a collection of objects.
|
||||
func (c *meshes) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
|
||||
var timeout time.Duration
|
||||
if listOptions.TimeoutSeconds != nil {
|
||||
timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second
|
||||
}
|
||||
return c.client.Delete().
|
||||
Resource("meshes").
|
||||
VersionedParams(&listOptions, scheme.ParameterCodec).
|
||||
Timeout(timeout).
|
||||
Body(options).
|
||||
Do().
|
||||
Error()
|
||||
|
||||
@@ -19,6 +19,8 @@ limitations under the License.
|
||||
package v1beta1
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
v1beta1 "github.com/weaveworks/flagger/pkg/apis/appmesh/v1beta1"
|
||||
scheme "github.com/weaveworks/flagger/pkg/client/clientset/versioned/scheme"
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
@@ -76,11 +78,16 @@ func (c *virtualNodes) Get(name string, options v1.GetOptions) (result *v1beta1.
|
||||
|
||||
// List takes label and field selectors, and returns the list of VirtualNodes that match those selectors.
|
||||
func (c *virtualNodes) List(opts v1.ListOptions) (result *v1beta1.VirtualNodeList, err error) {
|
||||
var timeout time.Duration
|
||||
if opts.TimeoutSeconds != nil {
|
||||
timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
|
||||
}
|
||||
result = &v1beta1.VirtualNodeList{}
|
||||
err = c.client.Get().
|
||||
Namespace(c.ns).
|
||||
Resource("virtualnodes").
|
||||
VersionedParams(&opts, scheme.ParameterCodec).
|
||||
Timeout(timeout).
|
||||
Do().
|
||||
Into(result)
|
||||
return
|
||||
@@ -88,11 +95,16 @@ func (c *virtualNodes) List(opts v1.ListOptions) (result *v1beta1.VirtualNodeLis
|
||||
|
||||
// Watch returns a watch.Interface that watches the requested virtualNodes.
|
||||
func (c *virtualNodes) Watch(opts v1.ListOptions) (watch.Interface, error) {
|
||||
var timeout time.Duration
|
||||
if opts.TimeoutSeconds != nil {
|
||||
timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
|
||||
}
|
||||
opts.Watch = true
|
||||
return c.client.Get().
|
||||
Namespace(c.ns).
|
||||
Resource("virtualnodes").
|
||||
VersionedParams(&opts, scheme.ParameterCodec).
|
||||
Timeout(timeout).
|
||||
Watch()
|
||||
}
|
||||
|
||||
@@ -150,10 +162,15 @@ func (c *virtualNodes) Delete(name string, options *v1.DeleteOptions) error {
|
||||
|
||||
// DeleteCollection deletes a collection of objects.
|
||||
func (c *virtualNodes) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
|
||||
var timeout time.Duration
|
||||
if listOptions.TimeoutSeconds != nil {
|
||||
timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second
|
||||
}
|
||||
return c.client.Delete().
|
||||
Namespace(c.ns).
|
||||
Resource("virtualnodes").
|
||||
VersionedParams(&listOptions, scheme.ParameterCodec).
|
||||
Timeout(timeout).
|
||||
Body(options).
|
||||
Do().
|
||||
Error()
|
||||
|
||||
@@ -19,6 +19,8 @@ limitations under the License.
|
||||
package v1beta1
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
v1beta1 "github.com/weaveworks/flagger/pkg/apis/appmesh/v1beta1"
|
||||
scheme "github.com/weaveworks/flagger/pkg/client/clientset/versioned/scheme"
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
@@ -76,11 +78,16 @@ func (c *virtualServices) Get(name string, options v1.GetOptions) (result *v1bet
|
||||
|
||||
// List takes label and field selectors, and returns the list of VirtualServices that match those selectors.
|
||||
func (c *virtualServices) List(opts v1.ListOptions) (result *v1beta1.VirtualServiceList, err error) {
|
||||
var timeout time.Duration
|
||||
if opts.TimeoutSeconds != nil {
|
||||
timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
|
||||
}
|
||||
result = &v1beta1.VirtualServiceList{}
|
||||
err = c.client.Get().
|
||||
Namespace(c.ns).
|
||||
Resource("virtualservices").
|
||||
VersionedParams(&opts, scheme.ParameterCodec).
|
||||
Timeout(timeout).
|
||||
Do().
|
||||
Into(result)
|
||||
return
|
||||
@@ -88,11 +95,16 @@ func (c *virtualServices) List(opts v1.ListOptions) (result *v1beta1.VirtualServ
|
||||
|
||||
// Watch returns a watch.Interface that watches the requested virtualServices.
|
||||
func (c *virtualServices) Watch(opts v1.ListOptions) (watch.Interface, error) {
|
||||
var timeout time.Duration
|
||||
if opts.TimeoutSeconds != nil {
|
||||
timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
|
||||
}
|
||||
opts.Watch = true
|
||||
return c.client.Get().
|
||||
Namespace(c.ns).
|
||||
Resource("virtualservices").
|
||||
VersionedParams(&opts, scheme.ParameterCodec).
|
||||
Timeout(timeout).
|
||||
Watch()
|
||||
}
|
||||
|
||||
@@ -150,10 +162,15 @@ func (c *virtualServices) Delete(name string, options *v1.DeleteOptions) error {
|
||||
|
||||
// DeleteCollection deletes a collection of objects.
|
||||
func (c *virtualServices) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
|
||||
var timeout time.Duration
|
||||
if listOptions.TimeoutSeconds != nil {
|
||||
timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second
|
||||
}
|
||||
return c.client.Delete().
|
||||
Namespace(c.ns).
|
||||
Resource("virtualservices").
|
||||
VersionedParams(&listOptions, scheme.ParameterCodec).
|
||||
Timeout(timeout).
|
||||
Body(options).
|
||||
Do().
|
||||
Error()
|
||||
|
||||
@@ -19,6 +19,8 @@ limitations under the License.
|
||||
package v1alpha3
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
v1alpha3 "github.com/weaveworks/flagger/pkg/apis/flagger/v1alpha3"
|
||||
scheme "github.com/weaveworks/flagger/pkg/client/clientset/versioned/scheme"
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
@@ -76,11 +78,16 @@ func (c *canaries) Get(name string, options v1.GetOptions) (result *v1alpha3.Can
|
||||
|
||||
// List takes label and field selectors, and returns the list of Canaries that match those selectors.
|
||||
func (c *canaries) List(opts v1.ListOptions) (result *v1alpha3.CanaryList, err error) {
|
||||
var timeout time.Duration
|
||||
if opts.TimeoutSeconds != nil {
|
||||
timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
|
||||
}
|
||||
result = &v1alpha3.CanaryList{}
|
||||
err = c.client.Get().
|
||||
Namespace(c.ns).
|
||||
Resource("canaries").
|
||||
VersionedParams(&opts, scheme.ParameterCodec).
|
||||
Timeout(timeout).
|
||||
Do().
|
||||
Into(result)
|
||||
return
|
||||
@@ -88,11 +95,16 @@ func (c *canaries) List(opts v1.ListOptions) (result *v1alpha3.CanaryList, err e
|
||||
|
||||
// Watch returns a watch.Interface that watches the requested canaries.
|
||||
func (c *canaries) Watch(opts v1.ListOptions) (watch.Interface, error) {
|
||||
var timeout time.Duration
|
||||
if opts.TimeoutSeconds != nil {
|
||||
timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
|
||||
}
|
||||
opts.Watch = true
|
||||
return c.client.Get().
|
||||
Namespace(c.ns).
|
||||
Resource("canaries").
|
||||
VersionedParams(&opts, scheme.ParameterCodec).
|
||||
Timeout(timeout).
|
||||
Watch()
|
||||
}
|
||||
|
||||
@@ -150,10 +162,15 @@ func (c *canaries) Delete(name string, options *v1.DeleteOptions) error {
|
||||
|
||||
// DeleteCollection deletes a collection of objects.
|
||||
func (c *canaries) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
|
||||
var timeout time.Duration
|
||||
if listOptions.TimeoutSeconds != nil {
|
||||
timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second
|
||||
}
|
||||
return c.client.Delete().
|
||||
Namespace(c.ns).
|
||||
Resource("canaries").
|
||||
VersionedParams(&listOptions, scheme.ParameterCodec).
|
||||
Timeout(timeout).
|
||||
Body(options).
|
||||
Do().
|
||||
Error()
|
||||
|
||||
@@ -131,7 +131,7 @@ func (c *FakeCanaries) DeleteCollection(options *v1.DeleteOptions, listOptions v
|
||||
// Patch applies the patch and returns the patched canary.
|
||||
func (c *FakeCanaries) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha3.Canary, err error) {
|
||||
obj, err := c.Fake.
|
||||
Invokes(testing.NewPatchSubresourceAction(canariesResource, c.ns, name, data, subresources...), &v1alpha3.Canary{})
|
||||
Invokes(testing.NewPatchSubresourceAction(canariesResource, c.ns, name, pt, data, subresources...), &v1alpha3.Canary{})
|
||||
|
||||
if obj == nil {
|
||||
return nil, err
|
||||
|
||||
@@ -119,7 +119,7 @@ func (c *FakeVirtualServices) DeleteCollection(options *v1.DeleteOptions, listOp
|
||||
// Patch applies the patch and returns the patched virtualService.
|
||||
func (c *FakeVirtualServices) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha3.VirtualService, err error) {
|
||||
obj, err := c.Fake.
|
||||
Invokes(testing.NewPatchSubresourceAction(virtualservicesResource, c.ns, name, data, subresources...), &v1alpha3.VirtualService{})
|
||||
Invokes(testing.NewPatchSubresourceAction(virtualservicesResource, c.ns, name, pt, data, subresources...), &v1alpha3.VirtualService{})
|
||||
|
||||
if obj == nil {
|
||||
return nil, err
|
||||
|
||||
@@ -19,6 +19,8 @@ limitations under the License.
|
||||
package v1alpha3
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
v1alpha3 "github.com/weaveworks/flagger/pkg/apis/istio/v1alpha3"
|
||||
scheme "github.com/weaveworks/flagger/pkg/client/clientset/versioned/scheme"
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
@@ -75,11 +77,16 @@ func (c *virtualServices) Get(name string, options v1.GetOptions) (result *v1alp
|
||||
|
||||
// List takes label and field selectors, and returns the list of VirtualServices that match those selectors.
|
||||
func (c *virtualServices) List(opts v1.ListOptions) (result *v1alpha3.VirtualServiceList, err error) {
|
||||
var timeout time.Duration
|
||||
if opts.TimeoutSeconds != nil {
|
||||
timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
|
||||
}
|
||||
result = &v1alpha3.VirtualServiceList{}
|
||||
err = c.client.Get().
|
||||
Namespace(c.ns).
|
||||
Resource("virtualservices").
|
||||
VersionedParams(&opts, scheme.ParameterCodec).
|
||||
Timeout(timeout).
|
||||
Do().
|
||||
Into(result)
|
||||
return
|
||||
@@ -87,11 +94,16 @@ func (c *virtualServices) List(opts v1.ListOptions) (result *v1alpha3.VirtualSer
|
||||
|
||||
// Watch returns a watch.Interface that watches the requested virtualServices.
|
||||
func (c *virtualServices) Watch(opts v1.ListOptions) (watch.Interface, error) {
|
||||
var timeout time.Duration
|
||||
if opts.TimeoutSeconds != nil {
|
||||
timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
|
||||
}
|
||||
opts.Watch = true
|
||||
return c.client.Get().
|
||||
Namespace(c.ns).
|
||||
Resource("virtualservices").
|
||||
VersionedParams(&opts, scheme.ParameterCodec).
|
||||
Timeout(timeout).
|
||||
Watch()
|
||||
}
|
||||
|
||||
@@ -133,10 +145,15 @@ func (c *virtualServices) Delete(name string, options *v1.DeleteOptions) error {
|
||||
|
||||
// DeleteCollection deletes a collection of objects.
|
||||
func (c *virtualServices) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
|
||||
var timeout time.Duration
|
||||
if listOptions.TimeoutSeconds != nil {
|
||||
timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second
|
||||
}
|
||||
return c.client.Delete().
|
||||
Namespace(c.ns).
|
||||
Resource("virtualservices").
|
||||
VersionedParams(&listOptions, scheme.ParameterCodec).
|
||||
Timeout(timeout).
|
||||
Body(options).
|
||||
Do().
|
||||
Error()
|
||||
|
||||
@@ -27,6 +27,7 @@ import (
|
||||
cache "k8s.io/client-go/tools/cache"
|
||||
)
|
||||
|
||||
// NewInformerFunc takes versioned.Interface and time.Duration to return a SharedIndexInformer.
|
||||
type NewInformerFunc func(versioned.Interface, time.Duration) cache.SharedIndexInformer
|
||||
|
||||
// SharedInformerFactory a small interface to allow for adding an informer without an import cycle
|
||||
@@ -35,4 +36,5 @@ type SharedInformerFactory interface {
|
||||
InformerFor(obj runtime.Object, newFunc NewInformerFunc) cache.SharedIndexInformer
|
||||
}
|
||||
|
||||
// TweakListOptionsFunc is a function that transforms a v1.ListOptions.
|
||||
type TweakListOptionsFunc func(*v1.ListOptions)
|
||||
|
||||
@@ -5,6 +5,10 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/weaveworks/flagger/pkg/canary"
|
||||
"github.com/weaveworks/flagger/pkg/metrics"
|
||||
"github.com/weaveworks/flagger/pkg/router"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
flaggerv1 "github.com/weaveworks/flagger/pkg/apis/flagger/v1alpha3"
|
||||
clientset "github.com/weaveworks/flagger/pkg/client/clientset/versioned"
|
||||
@@ -40,10 +44,11 @@ type Controller struct {
|
||||
logger *zap.SugaredLogger
|
||||
canaries *sync.Map
|
||||
jobs map[string]CanaryJob
|
||||
deployer CanaryDeployer
|
||||
observer CanaryObserver
|
||||
recorder CanaryRecorder
|
||||
deployer canary.Deployer
|
||||
observer metrics.Observer
|
||||
recorder metrics.Recorder
|
||||
notifier *notifier.Slack
|
||||
routerFactory *router.Factory
|
||||
meshProvider string
|
||||
}
|
||||
|
||||
@@ -56,8 +61,10 @@ func NewController(
|
||||
metricServer string,
|
||||
logger *zap.SugaredLogger,
|
||||
notifier *notifier.Slack,
|
||||
routerFactory *router.Factory,
|
||||
meshProvider string,
|
||||
|
||||
version string,
|
||||
labels []string,
|
||||
) *Controller {
|
||||
logger.Debug("Creating event broadcaster")
|
||||
flaggerscheme.AddToScheme(scheme.Scheme)
|
||||
@@ -69,22 +76,20 @@ func NewController(
|
||||
eventRecorder := eventBroadcaster.NewRecorder(
|
||||
scheme.Scheme, corev1.EventSource{Component: controllerAgentName})
|
||||
|
||||
deployer := CanaryDeployer{
|
||||
logger: logger,
|
||||
kubeClient: kubeClient,
|
||||
flaggerClient: flaggerClient,
|
||||
configTracker: ConfigTracker{
|
||||
logger: logger,
|
||||
kubeClient: kubeClient,
|
||||
flaggerClient: flaggerClient,
|
||||
deployer := canary.Deployer{
|
||||
Logger: logger,
|
||||
KubeClient: kubeClient,
|
||||
FlaggerClient: flaggerClient,
|
||||
Labels: labels,
|
||||
ConfigTracker: canary.ConfigTracker{
|
||||
Logger: logger,
|
||||
KubeClient: kubeClient,
|
||||
FlaggerClient: flaggerClient,
|
||||
},
|
||||
}
|
||||
|
||||
observer := CanaryObserver{
|
||||
metricsServer: metricServer,
|
||||
}
|
||||
|
||||
recorder := NewCanaryRecorder(true)
|
||||
recorder := metrics.NewRecorder(controllerAgentName, true)
|
||||
recorder.SetInfo(version, meshProvider)
|
||||
|
||||
ctrl := &Controller{
|
||||
kubeClient: kubeClient,
|
||||
@@ -99,9 +104,10 @@ func NewController(
|
||||
jobs: map[string]CanaryJob{},
|
||||
flaggerWindow: flaggerWindow,
|
||||
deployer: deployer,
|
||||
observer: observer,
|
||||
observer: metrics.NewObserver(metricServer),
|
||||
recorder: recorder,
|
||||
notifier: notifier,
|
||||
routerFactory: routerFactory,
|
||||
meshProvider: meshProvider,
|
||||
}
|
||||
|
||||
|
||||
@@ -1,13 +1,18 @@
|
||||
package controller
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/weaveworks/flagger/pkg/apis/flagger/v1alpha3"
|
||||
istiov1alpha1 "github.com/weaveworks/flagger/pkg/apis/istio/common/v1alpha1"
|
||||
istiov1alpha3 "github.com/weaveworks/flagger/pkg/apis/istio/v1alpha3"
|
||||
"github.com/weaveworks/flagger/pkg/canary"
|
||||
clientset "github.com/weaveworks/flagger/pkg/client/clientset/versioned"
|
||||
fakeFlagger "github.com/weaveworks/flagger/pkg/client/clientset/versioned/fake"
|
||||
informers "github.com/weaveworks/flagger/pkg/client/informers/externalversions"
|
||||
"github.com/weaveworks/flagger/pkg/logging"
|
||||
"github.com/weaveworks/flagger/pkg/logger"
|
||||
"github.com/weaveworks/flagger/pkg/metrics"
|
||||
"github.com/weaveworks/flagger/pkg/router"
|
||||
"go.uber.org/zap"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
@@ -19,8 +24,6 @@ import (
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
"k8s.io/client-go/tools/record"
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -33,8 +36,8 @@ type Mocks struct {
|
||||
kubeClient kubernetes.Interface
|
||||
meshClient clientset.Interface
|
||||
flaggerClient clientset.Interface
|
||||
deployer CanaryDeployer
|
||||
observer CanaryObserver
|
||||
deployer canary.Deployer
|
||||
observer metrics.Observer
|
||||
ctrl *Controller
|
||||
logger *zap.SugaredLogger
|
||||
router router.Interface
|
||||
@@ -42,11 +45,11 @@ type Mocks struct {
|
||||
|
||||
func SetupMocks(abtest bool) Mocks {
|
||||
// init canary
|
||||
canary := newTestCanary()
|
||||
c := newTestCanary()
|
||||
if abtest {
|
||||
canary = newTestCanaryAB()
|
||||
c = newTestCanaryAB()
|
||||
}
|
||||
flaggerClient := fakeFlagger.NewSimpleClientset(canary)
|
||||
flaggerClient := fakeFlagger.NewSimpleClientset(c)
|
||||
|
||||
// init kube clientset and register mock objects
|
||||
kubeClient := fake.NewSimpleClientset(
|
||||
@@ -60,27 +63,29 @@ func SetupMocks(abtest bool) Mocks {
|
||||
NewTestSecretVol(),
|
||||
)
|
||||
|
||||
logger, _ := logging.NewLogger("debug")
|
||||
logger, _ := logger.NewLogger("debug")
|
||||
|
||||
// init controller helpers
|
||||
deployer := CanaryDeployer{
|
||||
flaggerClient: flaggerClient,
|
||||
kubeClient: kubeClient,
|
||||
logger: logger,
|
||||
configTracker: ConfigTracker{
|
||||
logger: logger,
|
||||
kubeClient: kubeClient,
|
||||
flaggerClient: flaggerClient,
|
||||
deployer := canary.Deployer{
|
||||
Logger: logger,
|
||||
KubeClient: kubeClient,
|
||||
FlaggerClient: flaggerClient,
|
||||
Labels: []string{"app", "name"},
|
||||
ConfigTracker: canary.ConfigTracker{
|
||||
Logger: logger,
|
||||
KubeClient: kubeClient,
|
||||
FlaggerClient: flaggerClient,
|
||||
},
|
||||
}
|
||||
observer := CanaryObserver{
|
||||
metricsServer: "fake",
|
||||
}
|
||||
observer := metrics.NewObserver("fake")
|
||||
|
||||
// init controller
|
||||
flaggerInformerFactory := informers.NewSharedInformerFactory(flaggerClient, noResyncPeriodFunc())
|
||||
flaggerInformer := flaggerInformerFactory.Flagger().V1alpha3().Canaries()
|
||||
|
||||
// init router
|
||||
rf := router.NewFactory(nil, kubeClient, flaggerClient, logger, flaggerClient)
|
||||
|
||||
ctrl := &Controller{
|
||||
kubeClient: kubeClient,
|
||||
istioClient: flaggerClient,
|
||||
@@ -94,16 +99,15 @@ func SetupMocks(abtest bool) Mocks {
|
||||
flaggerWindow: time.Second,
|
||||
deployer: deployer,
|
||||
observer: observer,
|
||||
recorder: NewCanaryRecorder(false),
|
||||
recorder: metrics.NewRecorder(controllerAgentName, false),
|
||||
routerFactory: rf,
|
||||
}
|
||||
ctrl.flaggerSynced = alwaysReady
|
||||
|
||||
// init router
|
||||
rf := router.NewFactory(kubeClient, flaggerClient, logger, flaggerClient)
|
||||
meshRouter := rf.MeshRouter("istio")
|
||||
|
||||
return Mocks{
|
||||
canary: canary,
|
||||
canary: c,
|
||||
observer: observer,
|
||||
deployer: deployer,
|
||||
logger: logger,
|
||||
|
||||
@@ -1,566 +0,0 @@
|
||||
package controller
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"time"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"github.com/google/go-cmp/cmp/cmpopts"
|
||||
flaggerv1 "github.com/weaveworks/flagger/pkg/apis/flagger/v1alpha3"
|
||||
clientset "github.com/weaveworks/flagger/pkg/client/clientset/versioned"
|
||||
"go.uber.org/zap"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
hpav1 "k8s.io/api/autoscaling/v2beta1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
)
|
||||
|
||||
// CanaryDeployer is managing the operations for Kubernetes deployment kind
|
||||
type CanaryDeployer struct {
|
||||
kubeClient kubernetes.Interface
|
||||
flaggerClient clientset.Interface
|
||||
logger *zap.SugaredLogger
|
||||
configTracker ConfigTracker
|
||||
}
|
||||
|
||||
// Promote copies the pod spec, secrets and config maps from canary to primary
|
||||
func (c *CanaryDeployer) Promote(cd *flaggerv1.Canary) error {
|
||||
targetName := cd.Spec.TargetRef.Name
|
||||
primaryName := fmt.Sprintf("%s-primary", targetName)
|
||||
|
||||
canary, err := c.kubeClient.AppsV1().Deployments(cd.Namespace).Get(targetName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
if errors.IsNotFound(err) {
|
||||
return fmt.Errorf("deployment %s.%s not found", targetName, cd.Namespace)
|
||||
}
|
||||
return fmt.Errorf("deployment %s.%s query error %v", targetName, cd.Namespace, err)
|
||||
}
|
||||
|
||||
primary, err := c.kubeClient.AppsV1().Deployments(cd.Namespace).Get(primaryName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
if errors.IsNotFound(err) {
|
||||
return fmt.Errorf("deployment %s.%s not found", primaryName, cd.Namespace)
|
||||
}
|
||||
return fmt.Errorf("deployment %s.%s query error %v", primaryName, cd.Namespace, err)
|
||||
}
|
||||
|
||||
// promote secrets and config maps
|
||||
configRefs, err := c.configTracker.GetTargetConfigs(cd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := c.configTracker.CreatePrimaryConfigs(cd, configRefs); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
primaryCopy := primary.DeepCopy()
|
||||
primaryCopy.Spec.ProgressDeadlineSeconds = canary.Spec.ProgressDeadlineSeconds
|
||||
primaryCopy.Spec.MinReadySeconds = canary.Spec.MinReadySeconds
|
||||
primaryCopy.Spec.RevisionHistoryLimit = canary.Spec.RevisionHistoryLimit
|
||||
primaryCopy.Spec.Strategy = canary.Spec.Strategy
|
||||
|
||||
// update spec with primary secrets and config maps
|
||||
primaryCopy.Spec.Template.Spec = c.configTracker.ApplyPrimaryConfigs(canary.Spec.Template.Spec, configRefs)
|
||||
|
||||
// update pod annotations to ensure a rolling update
|
||||
annotations, err := c.makeAnnotations(canary.Spec.Template.Annotations)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
primaryCopy.Spec.Template.Annotations = annotations
|
||||
|
||||
primaryCopy.Spec.Template.Labels = makePrimaryLabels(canary.Spec.Template.Labels, primaryName)
|
||||
|
||||
_, err = c.kubeClient.AppsV1().Deployments(cd.Namespace).Update(primaryCopy)
|
||||
if err != nil {
|
||||
return fmt.Errorf("updating deployment %s.%s template spec failed: %v",
|
||||
primaryCopy.GetName(), primaryCopy.Namespace, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// IsPrimaryReady checks the primary deployment status and returns an error if
|
||||
// the deployment is in the middle of a rolling update or if the pods are unhealthy
|
||||
// it will return a non retriable error if the rolling update is stuck
|
||||
func (c *CanaryDeployer) IsPrimaryReady(cd *flaggerv1.Canary) (bool, error) {
|
||||
primaryName := fmt.Sprintf("%s-primary", cd.Spec.TargetRef.Name)
|
||||
primary, err := c.kubeClient.AppsV1().Deployments(cd.Namespace).Get(primaryName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
if errors.IsNotFound(err) {
|
||||
return true, fmt.Errorf("deployment %s.%s not found", primaryName, cd.Namespace)
|
||||
}
|
||||
return true, fmt.Errorf("deployment %s.%s query error %v", primaryName, cd.Namespace, err)
|
||||
}
|
||||
|
||||
retriable, err := c.isDeploymentReady(primary, cd.GetProgressDeadlineSeconds())
|
||||
if err != nil {
|
||||
return retriable, fmt.Errorf("Halt advancement %s.%s %s", primaryName, cd.Namespace, err.Error())
|
||||
}
|
||||
|
||||
if primary.Spec.Replicas == int32p(0) {
|
||||
return true, fmt.Errorf("Halt %s.%s advancement primary deployment is scaled to zero",
|
||||
cd.Name, cd.Namespace)
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// IsCanaryReady checks the primary deployment status and returns an error if
|
||||
// the deployment is in the middle of a rolling update or if the pods are unhealthy
|
||||
// it will return a non retriable error if the rolling update is stuck
|
||||
func (c *CanaryDeployer) IsCanaryReady(cd *flaggerv1.Canary) (bool, error) {
|
||||
targetName := cd.Spec.TargetRef.Name
|
||||
canary, err := c.kubeClient.AppsV1().Deployments(cd.Namespace).Get(targetName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
if errors.IsNotFound(err) {
|
||||
return true, fmt.Errorf("deployment %s.%s not found", targetName, cd.Namespace)
|
||||
}
|
||||
return true, fmt.Errorf("deployment %s.%s query error %v", targetName, cd.Namespace, err)
|
||||
}
|
||||
|
||||
retriable, err := c.isDeploymentReady(canary, cd.GetProgressDeadlineSeconds())
|
||||
if err != nil {
|
||||
if retriable {
|
||||
return retriable, fmt.Errorf("Halt advancement %s.%s %s", targetName, cd.Namespace, err.Error())
|
||||
} else {
|
||||
return retriable, fmt.Errorf("deployment does not have minimum availability for more than %vs",
|
||||
cd.GetProgressDeadlineSeconds())
|
||||
}
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// IsNewSpec returns true if the canary deployment pod spec has changed
|
||||
func (c *CanaryDeployer) IsNewSpec(cd *flaggerv1.Canary) (bool, error) {
|
||||
targetName := cd.Spec.TargetRef.Name
|
||||
canary, err := c.kubeClient.AppsV1().Deployments(cd.Namespace).Get(targetName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
if errors.IsNotFound(err) {
|
||||
return false, fmt.Errorf("deployment %s.%s not found", targetName, cd.Namespace)
|
||||
}
|
||||
return false, fmt.Errorf("deployment %s.%s query error %v", targetName, cd.Namespace, err)
|
||||
}
|
||||
|
||||
if cd.Status.LastAppliedSpec == "" {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
newSpec := &canary.Spec.Template.Spec
|
||||
oldSpecJson, err := base64.StdEncoding.DecodeString(cd.Status.LastAppliedSpec)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("%s.%s decode error %v", cd.Name, cd.Namespace, err)
|
||||
}
|
||||
oldSpec := &corev1.PodSpec{}
|
||||
err = json.Unmarshal(oldSpecJson, oldSpec)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("%s.%s unmarshal error %v", cd.Name, cd.Namespace, err)
|
||||
}
|
||||
|
||||
if diff := cmp.Diff(*newSpec, *oldSpec, cmpopts.IgnoreUnexported(resource.Quantity{})); diff != "" {
|
||||
//fmt.Println(diff)
|
||||
return true, nil
|
||||
}
|
||||
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// ShouldAdvance determines if the canary analysis can proceed
|
||||
func (c *CanaryDeployer) ShouldAdvance(cd *flaggerv1.Canary) (bool, error) {
|
||||
if cd.Status.LastAppliedSpec == "" || cd.Status.Phase == flaggerv1.CanaryProgressing {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
newDep, err := c.IsNewSpec(cd)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if newDep {
|
||||
return newDep, nil
|
||||
}
|
||||
|
||||
newCfg, err := c.configTracker.HasConfigChanged(cd)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
return newCfg, nil
|
||||
|
||||
}
|
||||
|
||||
// SetStatusFailedChecks updates the canary failed checks counter
|
||||
func (c *CanaryDeployer) SetStatusFailedChecks(cd *flaggerv1.Canary, val int) error {
|
||||
cdCopy := cd.DeepCopy()
|
||||
cdCopy.Status.FailedChecks = val
|
||||
cdCopy.Status.LastTransitionTime = metav1.Now()
|
||||
|
||||
cd, err := c.flaggerClient.FlaggerV1alpha3().Canaries(cd.Namespace).UpdateStatus(cdCopy)
|
||||
if err != nil {
|
||||
return fmt.Errorf("canary %s.%s status update error %v", cdCopy.Name, cdCopy.Namespace, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetStatusWeight updates the canary status weight value
|
||||
func (c *CanaryDeployer) SetStatusWeight(cd *flaggerv1.Canary, val int) error {
|
||||
cdCopy := cd.DeepCopy()
|
||||
cdCopy.Status.CanaryWeight = val
|
||||
cdCopy.Status.LastTransitionTime = metav1.Now()
|
||||
|
||||
cd, err := c.flaggerClient.FlaggerV1alpha3().Canaries(cd.Namespace).UpdateStatus(cdCopy)
|
||||
if err != nil {
|
||||
return fmt.Errorf("canary %s.%s status update error %v", cdCopy.Name, cdCopy.Namespace, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetStatusIterations updates the canary status iterations value
|
||||
func (c *CanaryDeployer) SetStatusIterations(cd *flaggerv1.Canary, val int) error {
|
||||
cdCopy := cd.DeepCopy()
|
||||
cdCopy.Status.Iterations = val
|
||||
cdCopy.Status.LastTransitionTime = metav1.Now()
|
||||
|
||||
cd, err := c.flaggerClient.FlaggerV1alpha3().Canaries(cd.Namespace).UpdateStatus(cdCopy)
|
||||
if err != nil {
|
||||
return fmt.Errorf("canary %s.%s status update error %v", cdCopy.Name, cdCopy.Namespace, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetStatusWeight updates the canary status weight value
|
||||
func (c *CanaryDeployer) IncrementStatusIterations(cd *flaggerv1.Canary) error {
|
||||
cdCopy := cd.DeepCopy()
|
||||
cdCopy.Status.Iterations = cdCopy.Status.Iterations + 1
|
||||
cdCopy.Status.LastTransitionTime = metav1.Now()
|
||||
|
||||
cd, err := c.flaggerClient.FlaggerV1alpha3().Canaries(cd.Namespace).UpdateStatus(cdCopy)
|
||||
if err != nil {
|
||||
return fmt.Errorf("canary %s.%s status update error %v", cdCopy.Name, cdCopy.Namespace, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetStatusPhase updates the canary status phase
|
||||
func (c *CanaryDeployer) SetStatusPhase(cd *flaggerv1.Canary, phase flaggerv1.CanaryPhase) error {
|
||||
cdCopy := cd.DeepCopy()
|
||||
cdCopy.Status.Phase = phase
|
||||
cdCopy.Status.LastTransitionTime = metav1.Now()
|
||||
|
||||
if phase != flaggerv1.CanaryProgressing {
|
||||
cdCopy.Status.CanaryWeight = 0
|
||||
cdCopy.Status.Iterations = 0
|
||||
}
|
||||
|
||||
cd, err := c.flaggerClient.FlaggerV1alpha3().Canaries(cd.Namespace).UpdateStatus(cdCopy)
|
||||
if err != nil {
|
||||
return fmt.Errorf("canary %s.%s status update error %v", cdCopy.Name, cdCopy.Namespace, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// SyncStatus encodes the canary pod spec and updates the canary status
|
||||
func (c *CanaryDeployer) SyncStatus(cd *flaggerv1.Canary, status flaggerv1.CanaryStatus) error {
|
||||
dep, err := c.kubeClient.AppsV1().Deployments(cd.Namespace).Get(cd.Spec.TargetRef.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
if errors.IsNotFound(err) {
|
||||
return fmt.Errorf("deployment %s.%s not found", cd.Spec.TargetRef.Name, cd.Namespace)
|
||||
}
|
||||
return fmt.Errorf("deployment %s.%s query error %v", cd.Spec.TargetRef.Name, cd.Namespace, err)
|
||||
}
|
||||
|
||||
specJson, err := json.Marshal(dep.Spec.Template.Spec)
|
||||
if err != nil {
|
||||
return fmt.Errorf("deployment %s.%s marshal error %v", cd.Spec.TargetRef.Name, cd.Namespace, err)
|
||||
}
|
||||
|
||||
configs, err := c.configTracker.GetConfigRefs(cd)
|
||||
if err != nil {
|
||||
return fmt.Errorf("configs query error %v", err)
|
||||
}
|
||||
|
||||
cdCopy := cd.DeepCopy()
|
||||
cdCopy.Status.Phase = status.Phase
|
||||
cdCopy.Status.CanaryWeight = status.CanaryWeight
|
||||
cdCopy.Status.FailedChecks = status.FailedChecks
|
||||
cdCopy.Status.Iterations = status.Iterations
|
||||
cdCopy.Status.LastAppliedSpec = base64.StdEncoding.EncodeToString(specJson)
|
||||
cdCopy.Status.LastTransitionTime = metav1.Now()
|
||||
cdCopy.Status.TrackedConfigs = configs
|
||||
|
||||
cd, err = c.flaggerClient.FlaggerV1alpha3().Canaries(cd.Namespace).UpdateStatus(cdCopy)
|
||||
if err != nil {
|
||||
return fmt.Errorf("canary %s.%s status update error %v", cdCopy.Name, cdCopy.Namespace, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Scale sets the canary deployment replicas
|
||||
func (c *CanaryDeployer) Scale(cd *flaggerv1.Canary, replicas int32) error {
|
||||
targetName := cd.Spec.TargetRef.Name
|
||||
dep, err := c.kubeClient.AppsV1().Deployments(cd.Namespace).Get(targetName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
if errors.IsNotFound(err) {
|
||||
return fmt.Errorf("deployment %s.%s not found", targetName, cd.Namespace)
|
||||
}
|
||||
return fmt.Errorf("deployment %s.%s query error %v", targetName, cd.Namespace, err)
|
||||
}
|
||||
|
||||
depCopy := dep.DeepCopy()
|
||||
depCopy.Spec.Replicas = int32p(replicas)
|
||||
|
||||
_, err = c.kubeClient.AppsV1().Deployments(dep.Namespace).Update(depCopy)
|
||||
if err != nil {
|
||||
return fmt.Errorf("scaling %s.%s to %v failed: %v", depCopy.GetName(), depCopy.Namespace, replicas, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Reconcile creates the primary deployment and hpa
|
||||
// and scales to zero the canary deployment
|
||||
func (c *CanaryDeployer) Sync(cd *flaggerv1.Canary) error {
|
||||
primaryName := fmt.Sprintf("%s-primary", cd.Spec.TargetRef.Name)
|
||||
if err := c.createPrimaryDeployment(cd); err != nil {
|
||||
return fmt.Errorf("creating deployment %s.%s failed: %v", primaryName, cd.Namespace, err)
|
||||
}
|
||||
|
||||
if cd.Status.Phase == "" {
|
||||
c.logger.With("canary", fmt.Sprintf("%s.%s", cd.Name, cd.Namespace)).Infof("Scaling down %s.%s", cd.Spec.TargetRef.Name, cd.Namespace)
|
||||
if err := c.Scale(cd, 0); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if cd.Spec.AutoscalerRef != nil && cd.Spec.AutoscalerRef.Kind == "HorizontalPodAutoscaler" {
|
||||
if err := c.createPrimaryHpa(cd); err != nil {
|
||||
return fmt.Errorf("creating hpa %s.%s failed: %v", primaryName, cd.Namespace, err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *CanaryDeployer) createPrimaryDeployment(cd *flaggerv1.Canary) error {
|
||||
targetName := cd.Spec.TargetRef.Name
|
||||
primaryName := fmt.Sprintf("%s-primary", cd.Spec.TargetRef.Name)
|
||||
|
||||
canaryDep, err := c.kubeClient.AppsV1().Deployments(cd.Namespace).Get(targetName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
if errors.IsNotFound(err) {
|
||||
return fmt.Errorf("deployment %s.%s not found, retrying", targetName, cd.Namespace)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
if appSel, ok := canaryDep.Spec.Selector.MatchLabels["app"]; !ok || appSel != canaryDep.Name {
|
||||
return fmt.Errorf("invalid label selector! Deployment %s.%s spec.selector.matchLabels must contain selector 'app: %s'",
|
||||
targetName, cd.Namespace, targetName)
|
||||
}
|
||||
|
||||
primaryDep, err := c.kubeClient.AppsV1().Deployments(cd.Namespace).Get(primaryName, metav1.GetOptions{})
|
||||
if errors.IsNotFound(err) {
|
||||
// create primary secrets and config maps
|
||||
configRefs, err := c.configTracker.GetTargetConfigs(cd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := c.configTracker.CreatePrimaryConfigs(cd, configRefs); err != nil {
|
||||
return err
|
||||
}
|
||||
annotations, err := c.makeAnnotations(canaryDep.Spec.Template.Annotations)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
replicas := int32(1)
|
||||
if canaryDep.Spec.Replicas != nil && *canaryDep.Spec.Replicas > 0 {
|
||||
replicas = *canaryDep.Spec.Replicas
|
||||
}
|
||||
|
||||
// create primary deployment
|
||||
primaryDep = &appsv1.Deployment{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: primaryName,
|
||||
Labels: canaryDep.Labels,
|
||||
Namespace: cd.Namespace,
|
||||
OwnerReferences: []metav1.OwnerReference{
|
||||
*metav1.NewControllerRef(cd, schema.GroupVersionKind{
|
||||
Group: flaggerv1.SchemeGroupVersion.Group,
|
||||
Version: flaggerv1.SchemeGroupVersion.Version,
|
||||
Kind: flaggerv1.CanaryKind,
|
||||
}),
|
||||
},
|
||||
},
|
||||
Spec: appsv1.DeploymentSpec{
|
||||
ProgressDeadlineSeconds: canaryDep.Spec.ProgressDeadlineSeconds,
|
||||
MinReadySeconds: canaryDep.Spec.MinReadySeconds,
|
||||
RevisionHistoryLimit: canaryDep.Spec.RevisionHistoryLimit,
|
||||
Replicas: int32p(replicas),
|
||||
Strategy: canaryDep.Spec.Strategy,
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{
|
||||
"app": primaryName,
|
||||
},
|
||||
},
|
||||
Template: corev1.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: makePrimaryLabels(canaryDep.Spec.Template.Labels, primaryName),
|
||||
Annotations: annotations,
|
||||
},
|
||||
// update spec with the primary secrets and config maps
|
||||
Spec: c.configTracker.ApplyPrimaryConfigs(canaryDep.Spec.Template.Spec, configRefs),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
_, err = c.kubeClient.AppsV1().Deployments(cd.Namespace).Create(primaryDep)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
c.logger.With("canary", fmt.Sprintf("%s.%s", cd.Name, cd.Namespace)).Infof("Deployment %s.%s created", primaryDep.GetName(), cd.Namespace)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *CanaryDeployer) createPrimaryHpa(cd *flaggerv1.Canary) error {
|
||||
primaryName := fmt.Sprintf("%s-primary", cd.Spec.TargetRef.Name)
|
||||
hpa, err := c.kubeClient.AutoscalingV2beta1().HorizontalPodAutoscalers(cd.Namespace).Get(cd.Spec.AutoscalerRef.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
if errors.IsNotFound(err) {
|
||||
return fmt.Errorf("HorizontalPodAutoscaler %s.%s not found, retrying",
|
||||
cd.Spec.AutoscalerRef.Name, cd.Namespace)
|
||||
}
|
||||
return err
|
||||
}
|
||||
primaryHpaName := fmt.Sprintf("%s-primary", cd.Spec.AutoscalerRef.Name)
|
||||
primaryHpa, err := c.kubeClient.AutoscalingV2beta1().HorizontalPodAutoscalers(cd.Namespace).Get(primaryHpaName, metav1.GetOptions{})
|
||||
|
||||
if errors.IsNotFound(err) {
|
||||
primaryHpa = &hpav1.HorizontalPodAutoscaler{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: primaryHpaName,
|
||||
Namespace: cd.Namespace,
|
||||
Labels: hpa.Labels,
|
||||
OwnerReferences: []metav1.OwnerReference{
|
||||
*metav1.NewControllerRef(cd, schema.GroupVersionKind{
|
||||
Group: flaggerv1.SchemeGroupVersion.Group,
|
||||
Version: flaggerv1.SchemeGroupVersion.Version,
|
||||
Kind: flaggerv1.CanaryKind,
|
||||
}),
|
||||
},
|
||||
},
|
||||
Spec: hpav1.HorizontalPodAutoscalerSpec{
|
||||
ScaleTargetRef: hpav1.CrossVersionObjectReference{
|
||||
Name: primaryName,
|
||||
Kind: hpa.Spec.ScaleTargetRef.Kind,
|
||||
APIVersion: hpa.Spec.ScaleTargetRef.APIVersion,
|
||||
},
|
||||
MinReplicas: hpa.Spec.MinReplicas,
|
||||
MaxReplicas: hpa.Spec.MaxReplicas,
|
||||
Metrics: hpa.Spec.Metrics,
|
||||
},
|
||||
}
|
||||
|
||||
_, err = c.kubeClient.AutoscalingV2beta1().HorizontalPodAutoscalers(cd.Namespace).Create(primaryHpa)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
c.logger.With("canary", fmt.Sprintf("%s.%s", cd.Name, cd.Namespace)).Infof("HorizontalPodAutoscaler %s.%s created", primaryHpa.GetName(), cd.Namespace)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// isDeploymentReady determines if a deployment is ready by checking the status conditions
|
||||
// if a deployment has exceeded the progress deadline it returns a non retriable error
|
||||
func (c *CanaryDeployer) isDeploymentReady(deployment *appsv1.Deployment, deadline int) (bool, error) {
|
||||
retriable := true
|
||||
if deployment.Generation <= deployment.Status.ObservedGeneration {
|
||||
progress := c.getDeploymentCondition(deployment.Status, appsv1.DeploymentProgressing)
|
||||
if progress != nil {
|
||||
// Determine if the deployment is stuck by checking if there is a minimum replicas unavailable condition
|
||||
// and if the last update time exceeds the deadline
|
||||
available := c.getDeploymentCondition(deployment.Status, appsv1.DeploymentAvailable)
|
||||
if available != nil && available.Status == "False" && available.Reason == "MinimumReplicasUnavailable" {
|
||||
from := available.LastUpdateTime
|
||||
delta := time.Duration(deadline) * time.Second
|
||||
retriable = !from.Add(delta).Before(time.Now())
|
||||
}
|
||||
}
|
||||
|
||||
if progress != nil && progress.Reason == "ProgressDeadlineExceeded" {
|
||||
return false, fmt.Errorf("deployment %q exceeded its progress deadline", deployment.GetName())
|
||||
} else if deployment.Spec.Replicas != nil && deployment.Status.UpdatedReplicas < *deployment.Spec.Replicas {
|
||||
return retriable, fmt.Errorf("waiting for rollout to finish: %d out of %d new replicas have been updated",
|
||||
deployment.Status.UpdatedReplicas, *deployment.Spec.Replicas)
|
||||
} else if deployment.Status.Replicas > deployment.Status.UpdatedReplicas {
|
||||
return retriable, fmt.Errorf("waiting for rollout to finish: %d old replicas are pending termination",
|
||||
deployment.Status.Replicas-deployment.Status.UpdatedReplicas)
|
||||
} else if deployment.Status.AvailableReplicas < deployment.Status.UpdatedReplicas {
|
||||
return retriable, fmt.Errorf("waiting for rollout to finish: %d of %d updated replicas are available",
|
||||
deployment.Status.AvailableReplicas, deployment.Status.UpdatedReplicas)
|
||||
}
|
||||
|
||||
} else {
|
||||
return true, fmt.Errorf("waiting for rollout to finish: observed deployment generation less then desired generation")
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (c *CanaryDeployer) getDeploymentCondition(
|
||||
status appsv1.DeploymentStatus,
|
||||
conditionType appsv1.DeploymentConditionType,
|
||||
) *appsv1.DeploymentCondition {
|
||||
for i := range status.Conditions {
|
||||
c := status.Conditions[i]
|
||||
if c.Type == conditionType {
|
||||
return &c
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// makeAnnotations appends an unique ID to annotations map
|
||||
func (c *CanaryDeployer) makeAnnotations(annotations map[string]string) (map[string]string, error) {
|
||||
idKey := "flagger-id"
|
||||
res := make(map[string]string)
|
||||
uuid := make([]byte, 16)
|
||||
n, err := io.ReadFull(rand.Reader, uuid)
|
||||
if n != len(uuid) || err != nil {
|
||||
return res, err
|
||||
}
|
||||
uuid[8] = uuid[8]&^0xc0 | 0x80
|
||||
uuid[6] = uuid[6]&^0xf0 | 0x40
|
||||
id := fmt.Sprintf("%x-%x-%x-%x-%x", uuid[0:4], uuid[4:6], uuid[6:8], uuid[8:10], uuid[10:])
|
||||
|
||||
for k, v := range annotations {
|
||||
if k != idKey {
|
||||
res[k] = v
|
||||
}
|
||||
}
|
||||
res[idKey] = id
|
||||
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func makePrimaryLabels(labels map[string]string, primaryName string) map[string]string {
|
||||
idKey := "app"
|
||||
res := make(map[string]string)
|
||||
for k, v := range labels {
|
||||
if k != idKey {
|
||||
res[k] = v
|
||||
}
|
||||
}
|
||||
res[idKey] = primaryName
|
||||
|
||||
return res
|
||||
}
|
||||
@@ -1,257 +0,0 @@
|
||||
package controller
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// CanaryObserver is used to query the Istio Prometheus db
|
||||
type CanaryObserver struct {
|
||||
metricsServer string
|
||||
}
|
||||
|
||||
type vectorQueryResponse struct {
|
||||
Data struct {
|
||||
Result []struct {
|
||||
Metric struct {
|
||||
Code string `json:"response_code"`
|
||||
Name string `json:"destination_workload"`
|
||||
}
|
||||
Value []interface{} `json:"value"`
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (c *CanaryObserver) queryMetric(query string) (*vectorQueryResponse, error) {
|
||||
promURL, err := url.Parse(c.metricsServer)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
u, err := url.Parse(fmt.Sprintf("./api/v1/query?query=%s", query))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
u = promURL.ResolveReference(u)
|
||||
|
||||
req, err := http.NewRequest("GET", u.String(), nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(req.Context(), 5*time.Second)
|
||||
defer cancel()
|
||||
|
||||
r, err := http.DefaultClient.Do(req.WithContext(ctx))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer r.Body.Close()
|
||||
|
||||
b, err := ioutil.ReadAll(r.Body)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error reading body: %s", err.Error())
|
||||
}
|
||||
|
||||
if 400 <= r.StatusCode {
|
||||
return nil, fmt.Errorf("error response: %s", string(b))
|
||||
}
|
||||
|
||||
var values vectorQueryResponse
|
||||
err = json.Unmarshal(b, &values)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error unmarshaling result: %s, '%s'", err.Error(), string(b))
|
||||
}
|
||||
|
||||
return &values, nil
|
||||
}
|
||||
|
||||
// GetScalar runs the promql query and returns the first value found
|
||||
func (c *CanaryObserver) GetScalar(query string) (float64, error) {
|
||||
if c.metricsServer == "fake" {
|
||||
return 100, nil
|
||||
}
|
||||
|
||||
query = strings.Replace(query, "\n", "", -1)
|
||||
query = strings.Replace(query, " ", "", -1)
|
||||
|
||||
var value *float64
|
||||
result, err := c.queryMetric(query)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
for _, v := range result.Data.Result {
|
||||
metricValue := v.Value[1]
|
||||
switch metricValue.(type) {
|
||||
case string:
|
||||
f, err := strconv.ParseFloat(metricValue.(string), 64)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
value = &f
|
||||
}
|
||||
}
|
||||
if value == nil {
|
||||
return 0, fmt.Errorf("no values found for query %s", query)
|
||||
}
|
||||
return *value, nil
|
||||
}
|
||||
|
||||
func (c *CanaryObserver) GetEnvoySuccessRate(name string, namespace string, metric string, interval string) (float64, error) {
|
||||
if c.metricsServer == "fake" {
|
||||
return 100, nil
|
||||
}
|
||||
|
||||
var rate *float64
|
||||
querySt := url.QueryEscape(`sum(rate(` +
|
||||
metric + `{kubernetes_namespace="` +
|
||||
namespace + `",kubernetes_pod_name=~"` +
|
||||
name + `-[0-9a-zA-Z]+(-[0-9a-zA-Z]+)",envoy_response_code!~"5.*"}[` +
|
||||
interval + `])) / sum(rate(` +
|
||||
metric + `{kubernetes_namespace="` +
|
||||
namespace + `",kubernetes_pod_name=~"` +
|
||||
name + `-[0-9a-zA-Z]+(-[0-9a-zA-Z]+)"}[` +
|
||||
interval + `])) * 100 `)
|
||||
result, err := c.queryMetric(querySt)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
for _, v := range result.Data.Result {
|
||||
metricValue := v.Value[1]
|
||||
switch metricValue.(type) {
|
||||
case string:
|
||||
f, err := strconv.ParseFloat(metricValue.(string), 64)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
rate = &f
|
||||
}
|
||||
}
|
||||
if rate == nil {
|
||||
return 0, fmt.Errorf("no values found for metric %s", metric)
|
||||
}
|
||||
return *rate, nil
|
||||
}
|
||||
|
||||
// GetDeploymentCounter returns the requests success rate using istio_requests_total metric
|
||||
func (c *CanaryObserver) GetDeploymentCounter(name string, namespace string, metric string, interval string) (float64, error) {
|
||||
if c.metricsServer == "fake" {
|
||||
return 100, nil
|
||||
}
|
||||
|
||||
var rate *float64
|
||||
querySt := url.QueryEscape(`sum(rate(` +
|
||||
metric + `{reporter="destination",destination_workload_namespace=~"` +
|
||||
namespace + `",destination_workload=~"` +
|
||||
name + `",response_code!~"5.*"}[` +
|
||||
interval + `])) / sum(rate(` +
|
||||
metric + `{reporter="destination",destination_workload_namespace=~"` +
|
||||
namespace + `",destination_workload=~"` +
|
||||
name + `"}[` +
|
||||
interval + `])) * 100 `)
|
||||
result, err := c.queryMetric(querySt)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
for _, v := range result.Data.Result {
|
||||
metricValue := v.Value[1]
|
||||
switch metricValue.(type) {
|
||||
case string:
|
||||
f, err := strconv.ParseFloat(metricValue.(string), 64)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
rate = &f
|
||||
}
|
||||
}
|
||||
if rate == nil {
|
||||
return 0, fmt.Errorf("no values found for metric %s", metric)
|
||||
}
|
||||
return *rate, nil
|
||||
}
|
||||
|
||||
// GetDeploymentHistogram returns the 99P requests delay using istio_request_duration_seconds_bucket metrics
|
||||
func (c *CanaryObserver) GetDeploymentHistogram(name string, namespace string, metric string, interval string) (time.Duration, error) {
|
||||
if c.metricsServer == "fake" {
|
||||
return 1, nil
|
||||
}
|
||||
var rate *float64
|
||||
querySt := url.QueryEscape(`histogram_quantile(0.99, sum(rate(` +
|
||||
metric + `{reporter="destination",destination_workload=~"` +
|
||||
name + `", destination_workload_namespace=~"` +
|
||||
namespace + `"}[` +
|
||||
interval + `])) by (le))`)
|
||||
result, err := c.queryMetric(querySt)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
for _, v := range result.Data.Result {
|
||||
metricValue := v.Value[1]
|
||||
switch metricValue.(type) {
|
||||
case string:
|
||||
f, err := strconv.ParseFloat(metricValue.(string), 64)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
rate = &f
|
||||
}
|
||||
}
|
||||
if rate == nil {
|
||||
return 0, fmt.Errorf("no values found for metric %s", metric)
|
||||
}
|
||||
ms := time.Duration(int64(*rate*1000)) * time.Millisecond
|
||||
return ms, nil
|
||||
}
|
||||
|
||||
// CheckMetricsServer call Prometheus status endpoint and returns an error if
|
||||
// the API is unreachable
|
||||
func CheckMetricsServer(address string) (bool, error) {
|
||||
promURL, err := url.Parse(address)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
u, err := url.Parse("./api/v1/status/flags")
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
u = promURL.ResolveReference(u)
|
||||
|
||||
req, err := http.NewRequest("GET", u.String(), nil)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(req.Context(), 5*time.Second)
|
||||
defer cancel()
|
||||
|
||||
r, err := http.DefaultClient.Do(req.WithContext(ctx))
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
defer r.Body.Close()
|
||||
|
||||
b, err := ioutil.ReadAll(r.Body)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("error reading body: %s", err.Error())
|
||||
}
|
||||
|
||||
if 400 <= r.StatusCode {
|
||||
return false, fmt.Errorf("error response: %s", string(b))
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
@@ -2,12 +2,13 @@ package controller
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/weaveworks/flagger/pkg/router"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/weaveworks/flagger/pkg/router"
|
||||
|
||||
flaggerv1 "github.com/weaveworks/flagger/pkg/apis/flagger/v1alpha3"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
// scheduleCanaries synchronises the canary map with the jobs map,
|
||||
@@ -90,17 +91,17 @@ func (c *Controller) advanceCanary(name string, namespace string, skipLivenessCh
|
||||
primaryName := fmt.Sprintf("%s-primary", cd.Spec.TargetRef.Name)
|
||||
|
||||
// create primary deployment and hpa if needed
|
||||
if err := c.deployer.Sync(cd); err != nil {
|
||||
label, err := c.deployer.Initialize(cd)
|
||||
if err != nil {
|
||||
c.recordEventWarningf(cd, "%v", err)
|
||||
return
|
||||
}
|
||||
|
||||
// init routers
|
||||
routerFactory := router.NewFactory(c.kubeClient, c.flaggerClient, c.logger, c.istioClient)
|
||||
meshRouter := routerFactory.MeshRouter(c.meshProvider)
|
||||
meshRouter := c.routerFactory.MeshRouter(c.meshProvider)
|
||||
|
||||
// create or update ClusterIP services
|
||||
if err := routerFactory.KubernetesRouter().Reconcile(cd); err != nil {
|
||||
if err := c.routerFactory.KubernetesRouter(label).Reconcile(cd); err != nil {
|
||||
c.recordEventWarningf(cd, "%v", err)
|
||||
return
|
||||
}
|
||||
@@ -111,13 +112,14 @@ func (c *Controller) advanceCanary(name string, namespace string, skipLivenessCh
|
||||
return
|
||||
}
|
||||
|
||||
shouldAdvance, err := c.deployer.ShouldAdvance(cd)
|
||||
shouldAdvance, err := c.shouldAdvance(cd)
|
||||
if err != nil {
|
||||
c.recordEventWarningf(cd, "%v", err)
|
||||
return
|
||||
}
|
||||
|
||||
if !shouldAdvance {
|
||||
c.recorder.SetStatus(cd, cd.Status.Phase)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -239,6 +241,7 @@ func (c *Controller) advanceCanary(name string, namespace string, skipLivenessCh
|
||||
}
|
||||
|
||||
c.recorder.SetStatus(cd, flaggerv1.CanaryFailed)
|
||||
c.runPostRolloutHooks(cd, flaggerv1.CanaryFailed)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -246,6 +249,15 @@ func (c *Controller) advanceCanary(name string, namespace string, skipLivenessCh
|
||||
// skip check if no traffic is routed to canary
|
||||
if canaryWeight == 0 {
|
||||
c.recordEventInfof(cd, "Starting canary analysis for %s.%s", cd.Spec.TargetRef.Name, cd.Namespace)
|
||||
|
||||
// run pre-rollout web hooks
|
||||
if ok := c.runPreRolloutHooks(cd); !ok {
|
||||
if err := c.deployer.SetStatusFailedChecks(cd, cd.Status.FailedChecks+1); err != nil {
|
||||
c.recordEventWarningf(cd, "%v", err)
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
} else {
|
||||
if ok := c.analyseCanary(cd); !ok {
|
||||
if err := c.deployer.SetStatusFailedChecks(cd, cd.Status.FailedChecks+1); err != nil {
|
||||
@@ -313,6 +325,7 @@ func (c *Controller) advanceCanary(name string, namespace string, skipLivenessCh
|
||||
return
|
||||
}
|
||||
c.recorder.SetStatus(cd, flaggerv1.CanarySucceeded)
|
||||
c.runPostRolloutHooks(cd, flaggerv1.CanarySucceeded)
|
||||
c.sendNotification(cd, "Canary analysis completed successfully, promotion finished.",
|
||||
false, false)
|
||||
return
|
||||
@@ -379,6 +392,7 @@ func (c *Controller) advanceCanary(name string, namespace string, skipLivenessCh
|
||||
return
|
||||
}
|
||||
c.recorder.SetStatus(cd, flaggerv1.CanarySucceeded)
|
||||
c.runPostRolloutHooks(cd, flaggerv1.CanarySucceeded)
|
||||
c.sendNotification(cd, "Canary analysis completed successfully, promotion finished.",
|
||||
false, false)
|
||||
}
|
||||
@@ -428,6 +442,28 @@ func (c *Controller) shouldSkipAnalysis(cd *flaggerv1.Canary, meshRouter router.
|
||||
return true
|
||||
}
|
||||
|
||||
func (c *Controller) shouldAdvance(cd *flaggerv1.Canary) (bool, error) {
|
||||
if cd.Status.LastAppliedSpec == "" || cd.Status.Phase == flaggerv1.CanaryProgressing {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
newDep, err := c.deployer.HasDeploymentChanged(cd)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if newDep {
|
||||
return newDep, nil
|
||||
}
|
||||
|
||||
newCfg, err := c.deployer.ConfigTracker.HasConfigChanged(cd)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
return newCfg, nil
|
||||
|
||||
}
|
||||
|
||||
func (c *Controller) checkCanaryStatus(cd *flaggerv1.Canary, shouldAdvance bool) bool {
|
||||
c.recorder.SetStatus(cd, cd.Status.Phase)
|
||||
if cd.Status.Phase == flaggerv1.CanaryProgressing {
|
||||
@@ -466,24 +502,57 @@ func (c *Controller) checkCanaryStatus(cd *flaggerv1.Canary, shouldAdvance bool)
|
||||
|
||||
func (c *Controller) hasCanaryRevisionChanged(cd *flaggerv1.Canary) bool {
|
||||
if cd.Status.Phase == flaggerv1.CanaryProgressing {
|
||||
if diff, _ := c.deployer.IsNewSpec(cd); diff {
|
||||
if diff, _ := c.deployer.HasDeploymentChanged(cd); diff {
|
||||
return true
|
||||
}
|
||||
if diff, _ := c.deployer.configTracker.HasConfigChanged(cd); diff {
|
||||
if diff, _ := c.deployer.ConfigTracker.HasConfigChanged(cd); diff {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (c *Controller) runPreRolloutHooks(canary *flaggerv1.Canary) bool {
|
||||
for _, webhook := range canary.Spec.CanaryAnalysis.Webhooks {
|
||||
if webhook.Type == flaggerv1.PreRolloutHook {
|
||||
err := CallWebhook(canary.Name, canary.Namespace, flaggerv1.CanaryProgressing, webhook)
|
||||
if err != nil {
|
||||
c.recordEventWarningf(canary, "Halt %s.%s advancement pre-rollout check %s failed %v",
|
||||
canary.Name, canary.Namespace, webhook.Name, err)
|
||||
return false
|
||||
} else {
|
||||
c.recordEventInfof(canary, "Pre-rollout check %s passed", webhook.Name)
|
||||
}
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (c *Controller) runPostRolloutHooks(canary *flaggerv1.Canary, phase flaggerv1.CanaryPhase) bool {
|
||||
for _, webhook := range canary.Spec.CanaryAnalysis.Webhooks {
|
||||
if webhook.Type == flaggerv1.PostRolloutHook {
|
||||
err := CallWebhook(canary.Name, canary.Namespace, phase, webhook)
|
||||
if err != nil {
|
||||
c.recordEventWarningf(canary, "Post-rollout hook %s failed %v", webhook.Name, err)
|
||||
return false
|
||||
} else {
|
||||
c.recordEventInfof(canary, "Post-rollout check %s passed", webhook.Name)
|
||||
}
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (c *Controller) analyseCanary(r *flaggerv1.Canary) bool {
|
||||
// run external checks
|
||||
for _, webhook := range r.Spec.CanaryAnalysis.Webhooks {
|
||||
err := CallWebhook(r.Name, r.Namespace, webhook)
|
||||
if err != nil {
|
||||
c.recordEventWarningf(r, "Halt %s.%s advancement external check %s failed %v",
|
||||
r.Name, r.Namespace, webhook.Name, err)
|
||||
return false
|
||||
if webhook.Type == "" || webhook.Type == flaggerv1.RolloutHook {
|
||||
err := CallWebhook(r.Name, r.Namespace, flaggerv1.CanaryProgressing, webhook)
|
||||
if err != nil {
|
||||
c.recordEventWarningf(r, "Halt %s.%s advancement external check %s failed %v",
|
||||
r.Name, r.Namespace, webhook.Name, err)
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -493,56 +562,112 @@ func (c *Controller) analyseCanary(r *flaggerv1.Canary) bool {
|
||||
metric.Interval = r.GetMetricInterval()
|
||||
}
|
||||
|
||||
if metric.Name == "envoy_cluster_upstream_rq" {
|
||||
val, err := c.observer.GetEnvoySuccessRate(r.Spec.TargetRef.Name, r.Namespace, metric.Name, metric.Interval)
|
||||
if err != nil {
|
||||
if strings.Contains(err.Error(), "no values found") {
|
||||
c.recordEventWarningf(r, "Halt advancement no values found for metric %s probably %s.%s is not receiving traffic",
|
||||
metric.Name, r.Spec.TargetRef.Name, r.Namespace)
|
||||
} else {
|
||||
c.recordEventErrorf(r, "Metrics server %s query failed: %v", c.observer.metricsServer, err)
|
||||
// App Mesh checks
|
||||
if c.meshProvider == "appmesh" {
|
||||
if metric.Name == "request-success-rate" || metric.Name == "envoy_cluster_upstream_rq" {
|
||||
val, err := c.observer.GetEnvoySuccessRate(r.Spec.TargetRef.Name, r.Namespace, metric.Name, metric.Interval)
|
||||
if err != nil {
|
||||
if strings.Contains(err.Error(), "no values found") {
|
||||
c.recordEventWarningf(r, "Halt advancement no values found for metric %s probably %s.%s is not receiving traffic",
|
||||
metric.Name, r.Spec.TargetRef.Name, r.Namespace)
|
||||
} else {
|
||||
c.recordEventErrorf(r, "Metrics server %s query failed: %v", c.observer.GetMetricsServer(), err)
|
||||
}
|
||||
return false
|
||||
}
|
||||
return false
|
||||
}
|
||||
if float64(metric.Threshold) > val {
|
||||
c.recordEventWarningf(r, "Halt %s.%s advancement success rate %.2f%% < %v%%",
|
||||
r.Name, r.Namespace, val, metric.Threshold)
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
if metric.Name == "istio_requests_total" {
|
||||
val, err := c.observer.GetDeploymentCounter(r.Spec.TargetRef.Name, r.Namespace, metric.Name, metric.Interval)
|
||||
if err != nil {
|
||||
if strings.Contains(err.Error(), "no values found") {
|
||||
c.recordEventWarningf(r, "Halt advancement no values found for metric %s probably %s.%s is not receiving traffic",
|
||||
metric.Name, r.Spec.TargetRef.Name, r.Namespace)
|
||||
} else {
|
||||
c.recordEventErrorf(r, "Metrics server %s query failed: %v", c.observer.metricsServer, err)
|
||||
if float64(metric.Threshold) > val {
|
||||
c.recordEventWarningf(r, "Halt %s.%s advancement success rate %.2f%% < %v%%",
|
||||
r.Name, r.Namespace, val, metric.Threshold)
|
||||
return false
|
||||
}
|
||||
return false
|
||||
}
|
||||
if float64(metric.Threshold) > val {
|
||||
c.recordEventWarningf(r, "Halt %s.%s advancement success rate %.2f%% < %v%%",
|
||||
r.Name, r.Namespace, val, metric.Threshold)
|
||||
return false
|
||||
|
||||
if metric.Name == "request-duration" || metric.Name == "envoy_cluster_upstream_rq_time_bucket" {
|
||||
val, err := c.observer.GetEnvoyRequestDuration(r.Spec.TargetRef.Name, r.Namespace, metric.Name, metric.Interval)
|
||||
if err != nil {
|
||||
c.recordEventErrorf(r, "Metrics server %s query failed: %v", c.observer.GetMetricsServer(), err)
|
||||
return false
|
||||
}
|
||||
t := time.Duration(metric.Threshold) * time.Millisecond
|
||||
if val > t {
|
||||
c.recordEventWarningf(r, "Halt %s.%s advancement request duration %v > %v",
|
||||
r.Name, r.Namespace, val, t)
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if metric.Name == "istio_request_duration_seconds_bucket" {
|
||||
val, err := c.observer.GetDeploymentHistogram(r.Spec.TargetRef.Name, r.Namespace, metric.Name, metric.Interval)
|
||||
if err != nil {
|
||||
c.recordEventErrorf(r, "Metrics server %s query failed: %v", c.observer.metricsServer, err)
|
||||
return false
|
||||
// Istio checks
|
||||
if c.meshProvider == "istio" {
|
||||
if metric.Name == "request-success-rate" || metric.Name == "istio_requests_total" {
|
||||
val, err := c.observer.GetIstioSuccessRate(r.Spec.TargetRef.Name, r.Namespace, metric.Name, metric.Interval)
|
||||
if err != nil {
|
||||
if strings.Contains(err.Error(), "no values found") {
|
||||
c.recordEventWarningf(r, "Halt advancement no values found for metric %s probably %s.%s is not receiving traffic",
|
||||
metric.Name, r.Spec.TargetRef.Name, r.Namespace)
|
||||
} else {
|
||||
c.recordEventErrorf(r, "Metrics server %s query failed: %v", c.observer.GetMetricsServer(), err)
|
||||
}
|
||||
return false
|
||||
}
|
||||
if float64(metric.Threshold) > val {
|
||||
c.recordEventWarningf(r, "Halt %s.%s advancement success rate %.2f%% < %v%%",
|
||||
r.Name, r.Namespace, val, metric.Threshold)
|
||||
return false
|
||||
}
|
||||
}
|
||||
t := time.Duration(metric.Threshold) * time.Millisecond
|
||||
if val > t {
|
||||
c.recordEventWarningf(r, "Halt %s.%s advancement request duration %v > %v",
|
||||
r.Name, r.Namespace, val, t)
|
||||
return false
|
||||
|
||||
if metric.Name == "request-duration" || metric.Name == "istio_request_duration_seconds_bucket" {
|
||||
val, err := c.observer.GetIstioRequestDuration(r.Spec.TargetRef.Name, r.Namespace, metric.Name, metric.Interval)
|
||||
if err != nil {
|
||||
c.recordEventErrorf(r, "Metrics server %s query failed: %v", c.observer.GetMetricsServer(), err)
|
||||
return false
|
||||
}
|
||||
t := time.Duration(metric.Threshold) * time.Millisecond
|
||||
if val > t {
|
||||
c.recordEventWarningf(r, "Halt %s.%s advancement request duration %v > %v",
|
||||
r.Name, r.Namespace, val, t)
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// NGINX checks
|
||||
if c.meshProvider == "nginx" {
|
||||
if metric.Name == "request-success-rate" {
|
||||
val, err := c.observer.GetNginxSuccessRate(r.Spec.IngressRef.Name, r.Namespace, metric.Name, metric.Interval)
|
||||
if err != nil {
|
||||
if strings.Contains(err.Error(), "no values found") {
|
||||
c.recordEventWarningf(r, "Halt advancement no values found for metric %s probably %s.%s is not receiving traffic",
|
||||
metric.Name, r.Spec.TargetRef.Name, r.Namespace)
|
||||
} else {
|
||||
c.recordEventErrorf(r, "Metrics server %s query failed: %v", c.observer.GetMetricsServer(), err)
|
||||
}
|
||||
return false
|
||||
}
|
||||
if float64(metric.Threshold) > val {
|
||||
c.recordEventWarningf(r, "Halt %s.%s advancement success rate %.2f%% < %v%%",
|
||||
r.Name, r.Namespace, val, metric.Threshold)
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
if metric.Name == "request-duration" {
|
||||
val, err := c.observer.GetNginxRequestDuration(r.Spec.IngressRef.Name, r.Namespace, metric.Name, metric.Interval)
|
||||
if err != nil {
|
||||
c.recordEventErrorf(r, "Metrics server %s query failed: %v", c.observer.GetMetricsServer(), err)
|
||||
return false
|
||||
}
|
||||
t := time.Duration(metric.Threshold) * time.Millisecond
|
||||
if val > t {
|
||||
c.recordEventWarningf(r, "Halt %s.%s advancement request duration %v > %v",
|
||||
r.Name, r.Namespace, val, t)
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// custom checks
|
||||
if metric.Query != "" {
|
||||
val, err := c.observer.GetScalar(metric.Query)
|
||||
if err != nil {
|
||||
@@ -550,7 +675,7 @@ func (c *Controller) analyseCanary(r *flaggerv1.Canary) bool {
|
||||
c.recordEventWarningf(r, "Halt advancement no values found for metric %s probably %s.%s is not receiving traffic",
|
||||
metric.Name, r.Spec.TargetRef.Name, r.Namespace)
|
||||
} else {
|
||||
c.recordEventErrorf(r, "Metrics server %s query failed: %v", c.observer.metricsServer, err)
|
||||
c.recordEventErrorf(r, "Metrics server %s query failed: %v", c.observer.GetMetricsServer(), err)
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -15,10 +15,11 @@ import (
|
||||
|
||||
// CallWebhook does a HTTP POST to an external service and
|
||||
// returns an error if the response status code is non-2xx
|
||||
func CallWebhook(name string, namespace string, w flaggerv1.CanaryWebhook) error {
|
||||
func CallWebhook(name string, namespace string, phase flaggerv1.CanaryPhase, w flaggerv1.CanaryWebhook) error {
|
||||
payload := flaggerv1.CanaryWebhookPayload{
|
||||
Name: name,
|
||||
Namespace: namespace,
|
||||
Phase: phase,
|
||||
}
|
||||
|
||||
if w.Metadata != nil {
|
||||
|
||||
@@ -19,7 +19,7 @@ func TestCallWebhook(t *testing.T) {
|
||||
Metadata: &map[string]string{"key1": "val1"},
|
||||
}
|
||||
|
||||
err := CallWebhook("podinfo", "default", hook)
|
||||
err := CallWebhook("podinfo", "default", flaggerv1.CanaryProgressing, hook)
|
||||
if err != nil {
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
@@ -35,7 +35,7 @@ func TestCallWebhook_StatusCode(t *testing.T) {
|
||||
URL: ts.URL,
|
||||
}
|
||||
|
||||
err := CallWebhook("podinfo", "default", hook)
|
||||
err := CallWebhook("podinfo", "default", flaggerv1.CanaryProgressing, hook)
|
||||
if err == nil {
|
||||
t.Errorf("Got no error wanted %v", http.StatusInternalServerError)
|
||||
}
|
||||
|
||||
39
pkg/loadtester/bats.go
Normal file
39
pkg/loadtester/bats.go
Normal file
@@ -0,0 +1,39 @@
|
||||
package loadtester
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os/exec"
|
||||
)
|
||||
|
||||
const TaskTypeBats = "bats"
|
||||
|
||||
type BatsTask struct {
|
||||
TaskBase
|
||||
command string
|
||||
logCmdOutput bool
|
||||
}
|
||||
|
||||
func (task *BatsTask) Hash() string {
|
||||
return hash(task.canary + task.command)
|
||||
}
|
||||
|
||||
func (task *BatsTask) Run(ctx context.Context) (bool, error) {
|
||||
cmd := exec.CommandContext(ctx, "bash", "-c", task.command)
|
||||
out, err := cmd.CombinedOutput()
|
||||
|
||||
if err != nil {
|
||||
task.logger.With("canary", task.canary).Errorf("command failed %s %v %s", task.command, err, out)
|
||||
return false, fmt.Errorf(" %v %v", err, out)
|
||||
} else {
|
||||
if task.logCmdOutput {
|
||||
fmt.Printf("%s\n", out)
|
||||
}
|
||||
task.logger.With("canary", task.canary).Infof("command finished %s", task.command)
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (task *BatsTask) String() string {
|
||||
return task.command
|
||||
}
|
||||
@@ -1,14 +1,14 @@
|
||||
package loadtester
|
||||
|
||||
import (
|
||||
"github.com/weaveworks/flagger/pkg/logging"
|
||||
"github.com/weaveworks/flagger/pkg/logger"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestTaskRunner_Start(t *testing.T) {
|
||||
stop := make(chan struct{})
|
||||
logger, _ := logging.NewLogger("debug")
|
||||
logger, _ := logger.NewLogger("debug")
|
||||
tr := NewTaskRunner(logger, time.Hour)
|
||||
|
||||
go tr.Start(10*time.Millisecond, stop)
|
||||
|
||||
@@ -44,6 +44,27 @@ func ListenAndServe(port string, timeout time.Duration, logger *zap.SugaredLogge
|
||||
if !ok {
|
||||
typ = TaskTypeShell
|
||||
}
|
||||
|
||||
// run bats command (blocking task)
|
||||
if typ == TaskTypeBats {
|
||||
bats := BatsTask{
|
||||
command: payload.Metadata["cmd"],
|
||||
logCmdOutput: taskRunner.logCmdOutput,
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), taskRunner.timeout)
|
||||
defer cancel()
|
||||
|
||||
ok, err := bats.Run(ctx)
|
||||
if !ok {
|
||||
w.WriteHeader(http.StatusInternalServerError)
|
||||
w.Write([]byte(err.Error()))
|
||||
}
|
||||
|
||||
w.WriteHeader(http.StatusOK)
|
||||
return
|
||||
}
|
||||
|
||||
taskFactory, ok := GetTaskFactory(typ)
|
||||
if !ok {
|
||||
w.WriteHeader(http.StatusBadRequest)
|
||||
|
||||
@@ -24,6 +24,7 @@ type TaskBase struct {
|
||||
func (task *TaskBase) Canary() string {
|
||||
return task.canary
|
||||
}
|
||||
|
||||
func hash(str string) string {
|
||||
fnvHash := fnv.New32()
|
||||
fnvBytes := fnvHash.Sum([]byte(str))
|
||||
|
||||
@@ -3,7 +3,7 @@ package loadtester
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"github.com/weaveworks/flagger/pkg/logging"
|
||||
"github.com/weaveworks/flagger/pkg/logger"
|
||||
"gopkg.in/h2non/gock.v1"
|
||||
"testing"
|
||||
"time"
|
||||
@@ -12,7 +12,7 @@ import (
|
||||
func TestTaskNGrinder(t *testing.T) {
|
||||
server := "http://ngrinder:8080"
|
||||
cloneId := "960"
|
||||
logger, _ := logging.NewLoggerWithEncoding("debug", "console")
|
||||
logger, _ := logger.NewLoggerWithEncoding("debug", "console")
|
||||
canary := "podinfo.default"
|
||||
taskFactory, ok := GetTaskFactory(TaskTypeNGrinder)
|
||||
if !ok {
|
||||
|
||||
@@ -1,9 +1,6 @@
|
||||
package logging
|
||||
package logger
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"go.uber.org/zap"
|
||||
"go.uber.org/zap/zapcore"
|
||||
)
|
||||
@@ -64,12 +61,3 @@ func NewLoggerWithEncoding(logLevel, zapEncoding string) (*zap.SugaredLogger, er
|
||||
}
|
||||
return logger.Sugar(), nil
|
||||
}
|
||||
|
||||
// Console writes to stdout if the console env var exists
|
||||
func Console(a ...interface{}) (n int, err error) {
|
||||
if os.Getenv("console") != "" {
|
||||
return fmt.Fprintln(os.Stdout, a...)
|
||||
}
|
||||
|
||||
return 0, nil
|
||||
}
|
||||
119
pkg/metrics/envoy.go
Normal file
119
pkg/metrics/envoy.go
Normal file
@@ -0,0 +1,119 @@
|
||||
package metrics
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"time"
|
||||
)
|
||||
|
||||
const envoySuccessRateQuery = `
|
||||
sum(rate(
|
||||
envoy_cluster_upstream_rq{kubernetes_namespace="{{ .Namespace }}",
|
||||
kubernetes_pod_name=~"{{ .Name }}-[0-9a-zA-Z]+(-[0-9a-zA-Z]+)",
|
||||
envoy_response_code!~"5.*"}
|
||||
[{{ .Interval }}]))
|
||||
/
|
||||
sum(rate(
|
||||
envoy_cluster_upstream_rq{kubernetes_namespace="{{ .Namespace }}",
|
||||
kubernetes_pod_name=~"{{ .Name }}-[0-9a-zA-Z]+(-[0-9a-zA-Z]+)"}
|
||||
[{{ .Interval }}]))
|
||||
* 100
|
||||
`
|
||||
|
||||
func (c *Observer) GetEnvoySuccessRate(name string, namespace string, metric string, interval string) (float64, error) {
|
||||
if c.metricsServer == "fake" {
|
||||
return 100, nil
|
||||
}
|
||||
|
||||
meta := struct {
|
||||
Name string
|
||||
Namespace string
|
||||
Interval string
|
||||
}{
|
||||
name,
|
||||
namespace,
|
||||
interval,
|
||||
}
|
||||
|
||||
query, err := render(meta, envoySuccessRateQuery)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
var rate *float64
|
||||
querySt := url.QueryEscape(query)
|
||||
result, err := c.queryMetric(querySt)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
for _, v := range result.Data.Result {
|
||||
metricValue := v.Value[1]
|
||||
switch metricValue.(type) {
|
||||
case string:
|
||||
f, err := strconv.ParseFloat(metricValue.(string), 64)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
rate = &f
|
||||
}
|
||||
}
|
||||
if rate == nil {
|
||||
return 0, fmt.Errorf("no values found for metric %s", metric)
|
||||
}
|
||||
return *rate, nil
|
||||
}
|
||||
|
||||
const envoyRequestDurationQuery = `
|
||||
histogram_quantile(0.99, sum(rate(
|
||||
envoy_cluster_upstream_rq_time_bucket{kubernetes_namespace="{{ .Namespace }}",
|
||||
kubernetes_pod_name=~"{{ .Name }}-[0-9a-zA-Z]+(-[0-9a-zA-Z]+)"}
|
||||
[{{ .Interval }}])) by (le))
|
||||
`
|
||||
|
||||
// GetEnvoyRequestDuration returns the 99P requests delay using envoy_cluster_upstream_rq_time_bucket metrics
|
||||
func (c *Observer) GetEnvoyRequestDuration(name string, namespace string, metric string, interval string) (time.Duration, error) {
|
||||
if c.metricsServer == "fake" {
|
||||
return 1, nil
|
||||
}
|
||||
|
||||
meta := struct {
|
||||
Name string
|
||||
Namespace string
|
||||
Interval string
|
||||
}{
|
||||
name,
|
||||
namespace,
|
||||
interval,
|
||||
}
|
||||
|
||||
query, err := render(meta, envoyRequestDurationQuery)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
var rate *float64
|
||||
querySt := url.QueryEscape(query)
|
||||
result, err := c.queryMetric(querySt)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
for _, v := range result.Data.Result {
|
||||
metricValue := v.Value[1]
|
||||
switch metricValue.(type) {
|
||||
case string:
|
||||
f, err := strconv.ParseFloat(metricValue.(string), 64)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
rate = &f
|
||||
}
|
||||
}
|
||||
if rate == nil {
|
||||
return 0, fmt.Errorf("no values found for metric %s", metric)
|
||||
}
|
||||
ms := time.Duration(int64(*rate)) * time.Millisecond
|
||||
return ms, nil
|
||||
}
|
||||
51
pkg/metrics/envoy_test.go
Normal file
51
pkg/metrics/envoy_test.go
Normal file
@@ -0,0 +1,51 @@
|
||||
package metrics
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func Test_EnvoySuccessRateQueryRender(t *testing.T) {
|
||||
meta := struct {
|
||||
Name string
|
||||
Namespace string
|
||||
Interval string
|
||||
}{
|
||||
"podinfo",
|
||||
"default",
|
||||
"1m",
|
||||
}
|
||||
|
||||
query, err := render(meta, envoySuccessRateQuery)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
expected := `sum(rate(envoy_cluster_upstream_rq{kubernetes_namespace="default",kubernetes_pod_name=~"podinfo-[0-9a-zA-Z]+(-[0-9a-zA-Z]+)",envoy_response_code!~"5.*"}[1m])) / sum(rate(envoy_cluster_upstream_rq{kubernetes_namespace="default",kubernetes_pod_name=~"podinfo-[0-9a-zA-Z]+(-[0-9a-zA-Z]+)"}[1m])) * 100`
|
||||
|
||||
if query != expected {
|
||||
t.Errorf("\nGot %s \nWanted %s", query, expected)
|
||||
}
|
||||
}
|
||||
|
||||
func Test_EnvoyRequestDurationQueryRender(t *testing.T) {
|
||||
meta := struct {
|
||||
Name string
|
||||
Namespace string
|
||||
Interval string
|
||||
}{
|
||||
"podinfo",
|
||||
"default",
|
||||
"1m",
|
||||
}
|
||||
|
||||
query, err := render(meta, envoyRequestDurationQuery)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
expected := `histogram_quantile(0.99, sum(rate(envoy_cluster_upstream_rq_time_bucket{kubernetes_namespace="default",kubernetes_pod_name=~"podinfo-[0-9a-zA-Z]+(-[0-9a-zA-Z]+)"}[1m])) by (le))`
|
||||
|
||||
if query != expected {
|
||||
t.Errorf("\nGot %s \nWanted %s", query, expected)
|
||||
}
|
||||
}
|
||||
123
pkg/metrics/istio.go
Normal file
123
pkg/metrics/istio.go
Normal file
@@ -0,0 +1,123 @@
|
||||
package metrics
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"time"
|
||||
)
|
||||
|
||||
const istioSuccessRateQuery = `
|
||||
sum(rate(
|
||||
istio_requests_total{reporter="destination",
|
||||
destination_workload_namespace="{{ .Namespace }}",
|
||||
destination_workload=~"{{ .Name }}",
|
||||
response_code!~"5.*"}
|
||||
[{{ .Interval }}]))
|
||||
/
|
||||
sum(rate(
|
||||
istio_requests_total{reporter="destination",
|
||||
destination_workload_namespace="{{ .Namespace }}",
|
||||
destination_workload=~"{{ .Name }}"}
|
||||
[{{ .Interval }}]))
|
||||
* 100
|
||||
`
|
||||
|
||||
// GetIstioSuccessRate returns the requests success rate (non 5xx) using istio_requests_total metric
|
||||
func (c *Observer) GetIstioSuccessRate(name string, namespace string, metric string, interval string) (float64, error) {
|
||||
if c.metricsServer == "fake" {
|
||||
return 100, nil
|
||||
}
|
||||
|
||||
meta := struct {
|
||||
Name string
|
||||
Namespace string
|
||||
Interval string
|
||||
}{
|
||||
name,
|
||||
namespace,
|
||||
interval,
|
||||
}
|
||||
|
||||
query, err := render(meta, istioSuccessRateQuery)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
var rate *float64
|
||||
querySt := url.QueryEscape(query)
|
||||
result, err := c.queryMetric(querySt)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
for _, v := range result.Data.Result {
|
||||
metricValue := v.Value[1]
|
||||
switch metricValue.(type) {
|
||||
case string:
|
||||
f, err := strconv.ParseFloat(metricValue.(string), 64)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
rate = &f
|
||||
}
|
||||
}
|
||||
if rate == nil {
|
||||
return 0, fmt.Errorf("no values found for metric %s", metric)
|
||||
}
|
||||
return *rate, nil
|
||||
}
|
||||
|
||||
const istioRequestDurationQuery = `
|
||||
histogram_quantile(0.99, sum(rate(
|
||||
istio_request_duration_seconds_bucket{reporter="destination",
|
||||
destination_workload_namespace="{{ .Namespace }}",
|
||||
destination_workload=~"{{ .Name }}"}
|
||||
[{{ .Interval }}])) by (le))
|
||||
`
|
||||
|
||||
// GetIstioRequestDuration returns the 99P requests delay using istio_request_duration_seconds_bucket metrics
|
||||
func (c *Observer) GetIstioRequestDuration(name string, namespace string, metric string, interval string) (time.Duration, error) {
|
||||
if c.metricsServer == "fake" {
|
||||
return 1, nil
|
||||
}
|
||||
|
||||
meta := struct {
|
||||
Name string
|
||||
Namespace string
|
||||
Interval string
|
||||
}{
|
||||
name,
|
||||
namespace,
|
||||
interval,
|
||||
}
|
||||
|
||||
query, err := render(meta, istioRequestDurationQuery)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
var rate *float64
|
||||
querySt := url.QueryEscape(query)
|
||||
result, err := c.queryMetric(querySt)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
for _, v := range result.Data.Result {
|
||||
metricValue := v.Value[1]
|
||||
switch metricValue.(type) {
|
||||
case string:
|
||||
f, err := strconv.ParseFloat(metricValue.(string), 64)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
rate = &f
|
||||
}
|
||||
}
|
||||
if rate == nil {
|
||||
return 0, fmt.Errorf("no values found for metric %s", metric)
|
||||
}
|
||||
ms := time.Duration(int64(*rate*1000)) * time.Millisecond
|
||||
return ms, nil
|
||||
}
|
||||
51
pkg/metrics/istio_test.go
Normal file
51
pkg/metrics/istio_test.go
Normal file
@@ -0,0 +1,51 @@
|
||||
package metrics
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func Test_IstioSuccessRateQueryRender(t *testing.T) {
|
||||
meta := struct {
|
||||
Name string
|
||||
Namespace string
|
||||
Interval string
|
||||
}{
|
||||
"podinfo",
|
||||
"default",
|
||||
"1m",
|
||||
}
|
||||
|
||||
query, err := render(meta, istioSuccessRateQuery)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
expected := `sum(rate(istio_requests_total{reporter="destination",destination_workload_namespace="default",destination_workload=~"podinfo",response_code!~"5.*"}[1m])) / sum(rate(istio_requests_total{reporter="destination",destination_workload_namespace="default",destination_workload=~"podinfo"}[1m])) * 100`
|
||||
|
||||
if query != expected {
|
||||
t.Errorf("\nGot %s \nWanted %s", query, expected)
|
||||
}
|
||||
}
|
||||
|
||||
func Test_IstioRequestDurationQueryRender(t *testing.T) {
|
||||
meta := struct {
|
||||
Name string
|
||||
Namespace string
|
||||
Interval string
|
||||
}{
|
||||
"podinfo",
|
||||
"default",
|
||||
"1m",
|
||||
}
|
||||
|
||||
query, err := render(meta, istioRequestDurationQuery)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
expected := `histogram_quantile(0.99, sum(rate(istio_request_duration_seconds_bucket{reporter="destination",destination_workload_namespace="default",destination_workload=~"podinfo"}[1m])) by (le))`
|
||||
|
||||
if query != expected {
|
||||
t.Errorf("\nGot %s \nWanted %s", query, expected)
|
||||
}
|
||||
}
|
||||
122
pkg/metrics/nginx.go
Normal file
122
pkg/metrics/nginx.go
Normal file
@@ -0,0 +1,122 @@
|
||||
package metrics
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"time"
|
||||
)
|
||||
|
||||
const nginxSuccessRateQuery = `
|
||||
sum(rate(
|
||||
nginx_ingress_controller_requests{namespace="{{ .Namespace }}",
|
||||
ingress="{{ .Name }}",
|
||||
status!~"5.*"}
|
||||
[{{ .Interval }}]))
|
||||
/
|
||||
sum(rate(
|
||||
nginx_ingress_controller_requests{namespace="{{ .Namespace }}",
|
||||
ingress="{{ .Name }}"}
|
||||
[{{ .Interval }}]))
|
||||
* 100
|
||||
`
|
||||
|
||||
// GetNginxSuccessRate returns the requests success rate (non 5xx) using nginx_ingress_controller_requests metric
|
||||
func (c *Observer) GetNginxSuccessRate(name string, namespace string, metric string, interval string) (float64, error) {
|
||||
if c.metricsServer == "fake" {
|
||||
return 100, nil
|
||||
}
|
||||
|
||||
meta := struct {
|
||||
Name string
|
||||
Namespace string
|
||||
Interval string
|
||||
}{
|
||||
name,
|
||||
namespace,
|
||||
interval,
|
||||
}
|
||||
|
||||
query, err := render(meta, nginxSuccessRateQuery)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
var rate *float64
|
||||
querySt := url.QueryEscape(query)
|
||||
result, err := c.queryMetric(querySt)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
for _, v := range result.Data.Result {
|
||||
metricValue := v.Value[1]
|
||||
switch metricValue.(type) {
|
||||
case string:
|
||||
f, err := strconv.ParseFloat(metricValue.(string), 64)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
rate = &f
|
||||
}
|
||||
}
|
||||
if rate == nil {
|
||||
return 0, fmt.Errorf("no values found for metric %s", metric)
|
||||
}
|
||||
return *rate, nil
|
||||
}
|
||||
|
||||
const nginxRequestDurationQuery = `
|
||||
sum(rate(
|
||||
nginx_ingress_controller_ingress_upstream_latency_seconds_sum{namespace="{{ .Namespace }}",
|
||||
ingress="{{ .Name }}"}[{{ .Interval }}]))
|
||||
/
|
||||
sum(rate(nginx_ingress_controller_ingress_upstream_latency_seconds_count{namespace="{{ .Namespace }}",
|
||||
ingress="{{ .Name }}"}[{{ .Interval }}])) * 1000
|
||||
`
|
||||
|
||||
// GetNginxRequestDuration returns the avg requests latency using nginx_ingress_controller_ingress_upstream_latency_seconds_sum metric
|
||||
func (c *Observer) GetNginxRequestDuration(name string, namespace string, metric string, interval string) (time.Duration, error) {
|
||||
if c.metricsServer == "fake" {
|
||||
return 1, nil
|
||||
}
|
||||
|
||||
meta := struct {
|
||||
Name string
|
||||
Namespace string
|
||||
Interval string
|
||||
}{
|
||||
name,
|
||||
namespace,
|
||||
interval,
|
||||
}
|
||||
|
||||
query, err := render(meta, nginxRequestDurationQuery)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
var rate *float64
|
||||
querySt := url.QueryEscape(query)
|
||||
result, err := c.queryMetric(querySt)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
for _, v := range result.Data.Result {
|
||||
metricValue := v.Value[1]
|
||||
switch metricValue.(type) {
|
||||
case string:
|
||||
f, err := strconv.ParseFloat(metricValue.(string), 64)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
rate = &f
|
||||
}
|
||||
}
|
||||
if rate == nil {
|
||||
return 0, fmt.Errorf("no values found for metric %s", metric)
|
||||
}
|
||||
ms := time.Duration(int64(*rate)) * time.Millisecond
|
||||
return ms, nil
|
||||
}
|
||||
51
pkg/metrics/nginx_test.go
Normal file
51
pkg/metrics/nginx_test.go
Normal file
@@ -0,0 +1,51 @@
|
||||
package metrics
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func Test_NginxSuccessRateQueryRender(t *testing.T) {
|
||||
meta := struct {
|
||||
Name string
|
||||
Namespace string
|
||||
Interval string
|
||||
}{
|
||||
"podinfo",
|
||||
"nginx",
|
||||
"1m",
|
||||
}
|
||||
|
||||
query, err := render(meta, nginxSuccessRateQuery)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
expected := `sum(rate(nginx_ingress_controller_requests{namespace="nginx",ingress="podinfo",status!~"5.*"}[1m])) / sum(rate(nginx_ingress_controller_requests{namespace="nginx",ingress="podinfo"}[1m])) * 100`
|
||||
|
||||
if query != expected {
|
||||
t.Errorf("\nGot %s \nWanted %s", query, expected)
|
||||
}
|
||||
}
|
||||
|
||||
func Test_NginxRequestDurationQueryRender(t *testing.T) {
|
||||
meta := struct {
|
||||
Name string
|
||||
Namespace string
|
||||
Interval string
|
||||
}{
|
||||
"podinfo",
|
||||
"nginx",
|
||||
"1m",
|
||||
}
|
||||
|
||||
query, err := render(meta, nginxRequestDurationQuery)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
expected := `sum(rate(nginx_ingress_controller_ingress_upstream_latency_seconds_sum{namespace="nginx",ingress="podinfo"}[1m])) /sum(rate(nginx_ingress_controller_ingress_upstream_latency_seconds_count{namespace="nginx",ingress="podinfo"}[1m])) * 1000`
|
||||
|
||||
if query != expected {
|
||||
t.Errorf("\nGot %s \nWanted %s", query, expected)
|
||||
}
|
||||
}
|
||||
186
pkg/metrics/observer.go
Normal file
186
pkg/metrics/observer.go
Normal file
@@ -0,0 +1,186 @@
|
||||
package metrics
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"strings"
|
||||
"text/template"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Observer is used to query Prometheus
|
||||
type Observer struct {
|
||||
metricsServer string
|
||||
}
|
||||
|
||||
type vectorQueryResponse struct {
|
||||
Data struct {
|
||||
Result []struct {
|
||||
Metric struct {
|
||||
Code string `json:"response_code"`
|
||||
Name string `json:"destination_workload"`
|
||||
}
|
||||
Value []interface{} `json:"value"`
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// NewObserver creates a new observer
|
||||
func NewObserver(metricsServer string) Observer {
|
||||
return Observer{
|
||||
metricsServer: metricsServer,
|
||||
}
|
||||
}
|
||||
|
||||
// GetMetricsServer returns the Prometheus URL
|
||||
func (c *Observer) GetMetricsServer() string {
|
||||
return c.metricsServer
|
||||
}
|
||||
|
||||
func (c *Observer) queryMetric(query string) (*vectorQueryResponse, error) {
|
||||
promURL, err := url.Parse(c.metricsServer)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
u, err := url.Parse(fmt.Sprintf("./api/v1/query?query=%s", query))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
u = promURL.ResolveReference(u)
|
||||
|
||||
req, err := http.NewRequest("GET", u.String(), nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(req.Context(), 5*time.Second)
|
||||
defer cancel()
|
||||
|
||||
r, err := http.DefaultClient.Do(req.WithContext(ctx))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer r.Body.Close()
|
||||
|
||||
b, err := ioutil.ReadAll(r.Body)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error reading body: %s", err.Error())
|
||||
}
|
||||
|
||||
if 400 <= r.StatusCode {
|
||||
return nil, fmt.Errorf("error response: %s", string(b))
|
||||
}
|
||||
|
||||
var values vectorQueryResponse
|
||||
err = json.Unmarshal(b, &values)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error unmarshaling result: %s, '%s'", err.Error(), string(b))
|
||||
}
|
||||
|
||||
return &values, nil
|
||||
}
|
||||
|
||||
// GetScalar runs the promql query and returns the first value found
|
||||
func (c *Observer) GetScalar(query string) (float64, error) {
|
||||
if c.metricsServer == "fake" {
|
||||
return 100, nil
|
||||
}
|
||||
|
||||
query = strings.Replace(query, "\n", "", -1)
|
||||
query = strings.Replace(query, " ", "", -1)
|
||||
|
||||
var value *float64
|
||||
|
||||
querySt := url.QueryEscape(query)
|
||||
result, err := c.queryMetric(querySt)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
for _, v := range result.Data.Result {
|
||||
metricValue := v.Value[1]
|
||||
switch metricValue.(type) {
|
||||
case string:
|
||||
f, err := strconv.ParseFloat(metricValue.(string), 64)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
value = &f
|
||||
}
|
||||
}
|
||||
if value == nil {
|
||||
return 0, fmt.Errorf("no values found for query %s", query)
|
||||
}
|
||||
return *value, nil
|
||||
}
|
||||
|
||||
// CheckMetricsServer call Prometheus status endpoint and returns an error if
|
||||
// the API is unreachable
|
||||
func CheckMetricsServer(address string) (bool, error) {
|
||||
promURL, err := url.Parse(address)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
u, err := url.Parse("./api/v1/status/flags")
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
u = promURL.ResolveReference(u)
|
||||
|
||||
req, err := http.NewRequest("GET", u.String(), nil)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(req.Context(), 5*time.Second)
|
||||
defer cancel()
|
||||
|
||||
r, err := http.DefaultClient.Do(req.WithContext(ctx))
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
defer r.Body.Close()
|
||||
|
||||
b, err := ioutil.ReadAll(r.Body)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("error reading body: %s", err.Error())
|
||||
}
|
||||
|
||||
if 400 <= r.StatusCode {
|
||||
return false, fmt.Errorf("error response: %s", string(b))
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func render(meta interface{}, tmpl string) (string, error) {
|
||||
t, err := template.New("tmpl").Parse(tmpl)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
var data bytes.Buffer
|
||||
b := bufio.NewWriter(&data)
|
||||
|
||||
if err := t.Execute(b, meta); err != nil {
|
||||
return "", err
|
||||
}
|
||||
err = b.Flush()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
res := strings.ReplaceAll(data.String(), "\n", "")
|
||||
|
||||
return res, nil
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
package controller
|
||||
package metrics
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
@@ -7,18 +7,16 @@ import (
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestCanaryObserver_GetDeploymentCounter(t *testing.T) {
|
||||
func TestCanaryObserver_GetEnvoySuccessRate(t *testing.T) {
|
||||
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
json := `{"status":"success","data":{"resultType":"vector","result":[{"metric":{},"value":[1545905245.458,"100"]}]}}`
|
||||
w.Write([]byte(json))
|
||||
}))
|
||||
defer ts.Close()
|
||||
|
||||
observer := CanaryObserver{
|
||||
metricsServer: ts.URL,
|
||||
}
|
||||
observer := NewObserver(ts.URL)
|
||||
|
||||
val, err := observer.GetDeploymentCounter("podinfo", "default", "istio_requests_total", "1m")
|
||||
val, err := observer.GetEnvoySuccessRate("podinfo", "default", "envoy_cluster_upstream_rq", "1m")
|
||||
if err != nil {
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
@@ -29,18 +27,55 @@ func TestCanaryObserver_GetDeploymentCounter(t *testing.T) {
|
||||
|
||||
}
|
||||
|
||||
func TestCanaryObserver_GetDeploymentHistogram(t *testing.T) {
|
||||
func TestCanaryObserver_GetEnvoyRequestDuration(t *testing.T) {
|
||||
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
json := `{"status":"success","data":{"resultType":"vector","result":[{"metric":{},"value":[1545905245.596,"200"]}]}}`
|
||||
w.Write([]byte(json))
|
||||
}))
|
||||
defer ts.Close()
|
||||
|
||||
observer := NewObserver(ts.URL)
|
||||
|
||||
val, err := observer.GetEnvoyRequestDuration("podinfo", "default", "envoy_cluster_upstream_rq_time_bucket", "1m")
|
||||
if err != nil {
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
|
||||
if val != 200*time.Millisecond {
|
||||
t.Errorf("Got %v wanted %v", val, 200*time.Millisecond)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCanaryObserver_GetIstioSuccessRate(t *testing.T) {
|
||||
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
json := `{"status":"success","data":{"resultType":"vector","result":[{"metric":{},"value":[1545905245.458,"100"]}]}}`
|
||||
w.Write([]byte(json))
|
||||
}))
|
||||
defer ts.Close()
|
||||
|
||||
observer := NewObserver(ts.URL)
|
||||
|
||||
val, err := observer.GetIstioSuccessRate("podinfo", "default", "istio_requests_total", "1m")
|
||||
if err != nil {
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
|
||||
if val != 100 {
|
||||
t.Errorf("Got %v wanted %v", val, 100)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestCanaryObserver_GetIstioRequestDuration(t *testing.T) {
|
||||
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
json := `{"status":"success","data":{"resultType":"vector","result":[{"metric":{},"value":[1545905245.596,"0.2"]}]}}`
|
||||
w.Write([]byte(json))
|
||||
}))
|
||||
defer ts.Close()
|
||||
|
||||
observer := CanaryObserver{
|
||||
metricsServer: ts.URL,
|
||||
}
|
||||
observer := NewObserver(ts.URL)
|
||||
|
||||
val, err := observer.GetDeploymentHistogram("podinfo", "default", "istio_request_duration_seconds_bucket", "1m")
|
||||
val, err := observer.GetIstioRequestDuration("podinfo", "default", "istio_request_duration_seconds_bucket", "1m")
|
||||
if err != nil {
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
package controller
|
||||
package metrics
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
@@ -8,50 +8,59 @@ import (
|
||||
flaggerv1 "github.com/weaveworks/flagger/pkg/apis/flagger/v1alpha3"
|
||||
)
|
||||
|
||||
// CanaryRecorder records the canary analysis as Prometheus metrics
|
||||
type CanaryRecorder struct {
|
||||
// Recorder records the canary analysis as Prometheus metrics
|
||||
type Recorder struct {
|
||||
info *prometheus.GaugeVec
|
||||
duration *prometheus.HistogramVec
|
||||
total *prometheus.GaugeVec
|
||||
status *prometheus.GaugeVec
|
||||
weight *prometheus.GaugeVec
|
||||
}
|
||||
|
||||
// NewCanaryRecorder creates a new recorder and registers the Prometheus metrics
|
||||
func NewCanaryRecorder(register bool) CanaryRecorder {
|
||||
// NewRecorder creates a new recorder and registers the Prometheus metrics
|
||||
func NewRecorder(controller string, register bool) Recorder {
|
||||
info := prometheus.NewGaugeVec(prometheus.GaugeOpts{
|
||||
Subsystem: controller,
|
||||
Name: "info",
|
||||
Help: "Flagger version and mesh provider information",
|
||||
}, []string{"version", "mesh_provider"})
|
||||
|
||||
duration := prometheus.NewHistogramVec(prometheus.HistogramOpts{
|
||||
Subsystem: controllerAgentName,
|
||||
Subsystem: controller,
|
||||
Name: "canary_duration_seconds",
|
||||
Help: "Seconds spent performing canary analysis.",
|
||||
Buckets: prometheus.DefBuckets,
|
||||
}, []string{"name", "namespace"})
|
||||
|
||||
total := prometheus.NewGaugeVec(prometheus.GaugeOpts{
|
||||
Subsystem: controllerAgentName,
|
||||
Subsystem: controller,
|
||||
Name: "canary_total",
|
||||
Help: "Total number of canary object",
|
||||
}, []string{"namespace"})
|
||||
|
||||
// 0 - running, 1 - successful, 2 - failed
|
||||
status := prometheus.NewGaugeVec(prometheus.GaugeOpts{
|
||||
Subsystem: controllerAgentName,
|
||||
Subsystem: controller,
|
||||
Name: "canary_status",
|
||||
Help: "Last canary analysis result",
|
||||
}, []string{"name", "namespace"})
|
||||
|
||||
weight := prometheus.NewGaugeVec(prometheus.GaugeOpts{
|
||||
Subsystem: controllerAgentName,
|
||||
Subsystem: controller,
|
||||
Name: "canary_weight",
|
||||
Help: "The virtual service destination weight current value",
|
||||
}, []string{"workload", "namespace"})
|
||||
|
||||
if register {
|
||||
prometheus.MustRegister(info)
|
||||
prometheus.MustRegister(duration)
|
||||
prometheus.MustRegister(total)
|
||||
prometheus.MustRegister(status)
|
||||
prometheus.MustRegister(weight)
|
||||
}
|
||||
|
||||
return CanaryRecorder{
|
||||
return Recorder{
|
||||
info: info,
|
||||
duration: duration,
|
||||
total: total,
|
||||
status: status,
|
||||
@@ -59,18 +68,23 @@ func NewCanaryRecorder(register bool) CanaryRecorder {
|
||||
}
|
||||
}
|
||||
|
||||
// SetInfo sets the version and mesh provider labels
|
||||
func (cr *Recorder) SetInfo(version string, meshProvider string) {
|
||||
cr.info.WithLabelValues(version, meshProvider).Set(1)
|
||||
}
|
||||
|
||||
// SetDuration sets the time spent in seconds performing canary analysis
|
||||
func (cr *CanaryRecorder) SetDuration(cd *flaggerv1.Canary, duration time.Duration) {
|
||||
func (cr *Recorder) SetDuration(cd *flaggerv1.Canary, duration time.Duration) {
|
||||
cr.duration.WithLabelValues(cd.Spec.TargetRef.Name, cd.Namespace).Observe(duration.Seconds())
|
||||
}
|
||||
|
||||
// SetTotal sets the total number of canaries per namespace
|
||||
func (cr *CanaryRecorder) SetTotal(namespace string, total int) {
|
||||
func (cr *Recorder) SetTotal(namespace string, total int) {
|
||||
cr.total.WithLabelValues(namespace).Set(float64(total))
|
||||
}
|
||||
|
||||
// SetStatus sets the last known canary analysis status
|
||||
func (cr *CanaryRecorder) SetStatus(cd *flaggerv1.Canary, phase flaggerv1.CanaryPhase) {
|
||||
func (cr *Recorder) SetStatus(cd *flaggerv1.Canary, phase flaggerv1.CanaryPhase) {
|
||||
status := 1
|
||||
switch phase {
|
||||
case flaggerv1.CanaryProgressing:
|
||||
@@ -84,7 +98,7 @@ func (cr *CanaryRecorder) SetStatus(cd *flaggerv1.Canary, phase flaggerv1.Canary
|
||||
}
|
||||
|
||||
// SetWeight sets the weight values for primary and canary destinations
|
||||
func (cr *CanaryRecorder) SetWeight(cd *flaggerv1.Canary, primary int, canary int) {
|
||||
func (cr *Recorder) SetWeight(cd *flaggerv1.Canary, primary int, canary int) {
|
||||
cr.weight.WithLabelValues(fmt.Sprintf("%s-primary", cd.Spec.TargetRef.Name), cd.Namespace).Set(float64(primary))
|
||||
cr.weight.WithLabelValues(cd.Spec.TargetRef.Name, cd.Namespace).Set(float64(canary))
|
||||
}
|
||||
@@ -1,23 +1,29 @@
|
||||
package router
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strings"
|
||||
|
||||
clientset "github.com/weaveworks/flagger/pkg/client/clientset/versioned"
|
||||
"go.uber.org/zap"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
)
|
||||
|
||||
type Factory struct {
|
||||
kubeConfig *restclient.Config
|
||||
kubeClient kubernetes.Interface
|
||||
meshClient clientset.Interface
|
||||
flaggerClient clientset.Interface
|
||||
logger *zap.SugaredLogger
|
||||
}
|
||||
|
||||
func NewFactory(kubeClient kubernetes.Interface,
|
||||
func NewFactory(kubeConfig *restclient.Config, kubeClient kubernetes.Interface,
|
||||
flaggerClient clientset.Interface,
|
||||
logger *zap.SugaredLogger,
|
||||
meshClient clientset.Interface) *Factory {
|
||||
return &Factory{
|
||||
kubeConfig: kubeConfig,
|
||||
meshClient: meshClient,
|
||||
kubeClient: kubeClient,
|
||||
flaggerClient: flaggerClient,
|
||||
@@ -26,28 +32,42 @@ func NewFactory(kubeClient kubernetes.Interface,
|
||||
}
|
||||
|
||||
// KubernetesRouter returns a ClusterIP service router
|
||||
func (factory *Factory) KubernetesRouter() *KubernetesRouter {
|
||||
func (factory *Factory) KubernetesRouter(label string) *KubernetesRouter {
|
||||
return &KubernetesRouter{
|
||||
logger: factory.logger,
|
||||
flaggerClient: factory.flaggerClient,
|
||||
kubeClient: factory.kubeClient,
|
||||
label: label,
|
||||
}
|
||||
}
|
||||
|
||||
// MeshRouter returns a service mesh router (Istio or AppMesh)
|
||||
// MeshRouter returns a service mesh router
|
||||
func (factory *Factory) MeshRouter(provider string) Interface {
|
||||
if provider == "appmesh" {
|
||||
switch {
|
||||
case provider == "nginx":
|
||||
return &IngressRouter{
|
||||
logger: factory.logger,
|
||||
kubeClient: factory.kubeClient,
|
||||
}
|
||||
case provider == "appmesh":
|
||||
return &AppMeshRouter{
|
||||
logger: factory.logger,
|
||||
flaggerClient: factory.flaggerClient,
|
||||
kubeClient: factory.kubeClient,
|
||||
appmeshClient: factory.meshClient,
|
||||
}
|
||||
}
|
||||
return &IstioRouter{
|
||||
logger: factory.logger,
|
||||
flaggerClient: factory.flaggerClient,
|
||||
kubeClient: factory.kubeClient,
|
||||
istioClient: factory.meshClient,
|
||||
case strings.HasPrefix(provider, "supergloo"):
|
||||
supergloo, err := NewSuperglooRouter(context.TODO(), provider, factory.flaggerClient, factory.logger, factory.kubeConfig)
|
||||
if err != nil {
|
||||
panic("failed creating supergloo client")
|
||||
}
|
||||
return supergloo
|
||||
default:
|
||||
return &IstioRouter{
|
||||
logger: factory.logger,
|
||||
flaggerClient: factory.flaggerClient,
|
||||
kubeClient: factory.kubeClient,
|
||||
istioClient: factory.meshClient,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
231
pkg/router/ingress.go
Normal file
231
pkg/router/ingress.go
Normal file
@@ -0,0 +1,231 @@
|
||||
package router
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/google/go-cmp/cmp"
|
||||
flaggerv1 "github.com/weaveworks/flagger/pkg/apis/flagger/v1alpha3"
|
||||
"go.uber.org/zap"
|
||||
"k8s.io/api/extensions/v1beta1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type IngressRouter struct {
|
||||
kubeClient kubernetes.Interface
|
||||
logger *zap.SugaredLogger
|
||||
}
|
||||
|
||||
func (i *IngressRouter) Reconcile(canary *flaggerv1.Canary) error {
|
||||
if canary.Spec.IngressRef == nil || canary.Spec.IngressRef.Name == "" {
|
||||
return fmt.Errorf("ingress selector is empty")
|
||||
}
|
||||
|
||||
targetName := canary.Spec.TargetRef.Name
|
||||
canaryName := fmt.Sprintf("%s-canary", targetName)
|
||||
canaryIngressName := fmt.Sprintf("%s-canary", canary.Spec.IngressRef.Name)
|
||||
|
||||
ingress, err := i.kubeClient.ExtensionsV1beta1().Ingresses(canary.Namespace).Get(canary.Spec.IngressRef.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ingressClone := ingress.DeepCopy()
|
||||
|
||||
// change backend to <deployment-name>-canary
|
||||
backendExists := false
|
||||
for k, v := range ingressClone.Spec.Rules {
|
||||
for x, y := range v.HTTP.Paths {
|
||||
if y.Backend.ServiceName == targetName {
|
||||
ingressClone.Spec.Rules[k].HTTP.Paths[x].Backend.ServiceName = canaryName
|
||||
backendExists = true
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if !backendExists {
|
||||
return fmt.Errorf("backend %s not found in ingress %s", targetName, canary.Spec.IngressRef.Name)
|
||||
}
|
||||
|
||||
canaryIngress, err := i.kubeClient.ExtensionsV1beta1().Ingresses(canary.Namespace).Get(canaryIngressName, metav1.GetOptions{})
|
||||
|
||||
if errors.IsNotFound(err) {
|
||||
ing := &v1beta1.Ingress{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: canaryIngressName,
|
||||
Namespace: canary.Namespace,
|
||||
OwnerReferences: []metav1.OwnerReference{
|
||||
*metav1.NewControllerRef(canary, schema.GroupVersionKind{
|
||||
Group: flaggerv1.SchemeGroupVersion.Group,
|
||||
Version: flaggerv1.SchemeGroupVersion.Version,
|
||||
Kind: flaggerv1.CanaryKind,
|
||||
}),
|
||||
},
|
||||
Annotations: i.makeAnnotations(ingressClone.Annotations),
|
||||
Labels: ingressClone.Labels,
|
||||
},
|
||||
Spec: ingressClone.Spec,
|
||||
}
|
||||
|
||||
_, err := i.kubeClient.ExtensionsV1beta1().Ingresses(canary.Namespace).Create(ing)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
i.logger.With("canary", fmt.Sprintf("%s.%s", canary.Name, canary.Namespace)).
|
||||
Infof("Ingress %s.%s created", ing.GetName(), canary.Namespace)
|
||||
return nil
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("ingress %s query error %v", canaryIngressName, err)
|
||||
}
|
||||
|
||||
if diff := cmp.Diff(ingressClone.Spec, canaryIngress.Spec); diff != "" {
|
||||
iClone := canaryIngress.DeepCopy()
|
||||
iClone.Spec = ingressClone.Spec
|
||||
|
||||
_, err := i.kubeClient.ExtensionsV1beta1().Ingresses(canary.Namespace).Update(iClone)
|
||||
if err != nil {
|
||||
return fmt.Errorf("ingress %s update error %v", canaryIngressName, err)
|
||||
}
|
||||
|
||||
i.logger.With("canary", fmt.Sprintf("%s.%s", canary.Name, canary.Namespace)).
|
||||
Infof("Ingress %s updated", canaryIngressName)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (i *IngressRouter) GetRoutes(canary *flaggerv1.Canary) (
|
||||
primaryWeight int,
|
||||
canaryWeight int,
|
||||
err error,
|
||||
) {
|
||||
canaryIngressName := fmt.Sprintf("%s-canary", canary.Spec.IngressRef.Name)
|
||||
canaryIngress, err := i.kubeClient.ExtensionsV1beta1().Ingresses(canary.Namespace).Get(canaryIngressName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return 0, 0, err
|
||||
}
|
||||
|
||||
// A/B testing
|
||||
if len(canary.Spec.CanaryAnalysis.Match) > 0 {
|
||||
for k := range canaryIngress.Annotations {
|
||||
if k == "nginx.ingress.kubernetes.io/canary-by-cookie" || k == "nginx.ingress.kubernetes.io/canary-by-header" {
|
||||
return 0, 100, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Canary
|
||||
for k, v := range canaryIngress.Annotations {
|
||||
if k == "nginx.ingress.kubernetes.io/canary-weight" {
|
||||
val, err := strconv.Atoi(v)
|
||||
if err != nil {
|
||||
return 0, 0, err
|
||||
}
|
||||
|
||||
canaryWeight = val
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
primaryWeight = 100 - canaryWeight
|
||||
return
|
||||
}
|
||||
|
||||
func (i *IngressRouter) SetRoutes(
|
||||
canary *flaggerv1.Canary,
|
||||
primaryWeight int,
|
||||
canaryWeight int,
|
||||
) error {
|
||||
canaryIngressName := fmt.Sprintf("%s-canary", canary.Spec.IngressRef.Name)
|
||||
canaryIngress, err := i.kubeClient.ExtensionsV1beta1().Ingresses(canary.Namespace).Get(canaryIngressName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
iClone := canaryIngress.DeepCopy()
|
||||
|
||||
// A/B testing
|
||||
if len(canary.Spec.CanaryAnalysis.Match) > 0 {
|
||||
cookie := ""
|
||||
header := ""
|
||||
headerValue := ""
|
||||
for _, m := range canary.Spec.CanaryAnalysis.Match {
|
||||
for k, v := range m.Headers {
|
||||
if k == "cookie" {
|
||||
cookie = v.Exact
|
||||
} else {
|
||||
header = k
|
||||
headerValue = v.Exact
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
iClone.Annotations = i.makeHeaderAnnotations(iClone.Annotations, header, headerValue, cookie)
|
||||
} else {
|
||||
// canary
|
||||
iClone.Annotations["nginx.ingress.kubernetes.io/canary-weight"] = fmt.Sprintf("%v", canaryWeight)
|
||||
}
|
||||
|
||||
// toggle canary
|
||||
if canaryWeight > 0 {
|
||||
iClone.Annotations["nginx.ingress.kubernetes.io/canary"] = "true"
|
||||
} else {
|
||||
iClone.Annotations = i.makeAnnotations(iClone.Annotations)
|
||||
}
|
||||
|
||||
_, err = i.kubeClient.ExtensionsV1beta1().Ingresses(canary.Namespace).Update(iClone)
|
||||
if err != nil {
|
||||
return fmt.Errorf("ingress %s update error %v", canaryIngressName, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (i *IngressRouter) makeAnnotations(annotations map[string]string) map[string]string {
|
||||
res := make(map[string]string)
|
||||
for k, v := range annotations {
|
||||
if !strings.Contains(k, "nginx.ingress.kubernetes.io/canary") &&
|
||||
!strings.Contains(k, "kubectl.kubernetes.io/last-applied-configuration") {
|
||||
res[k] = v
|
||||
}
|
||||
}
|
||||
|
||||
res["nginx.ingress.kubernetes.io/canary"] = "false"
|
||||
res["nginx.ingress.kubernetes.io/canary-weight"] = "0"
|
||||
|
||||
return res
|
||||
}
|
||||
|
||||
func (i *IngressRouter) makeHeaderAnnotations(annotations map[string]string,
|
||||
header string, headerValue string, cookie string) map[string]string {
|
||||
res := make(map[string]string)
|
||||
for k, v := range annotations {
|
||||
if !strings.Contains(v, "nginx.ingress.kubernetes.io/canary") {
|
||||
res[k] = v
|
||||
}
|
||||
}
|
||||
|
||||
res["nginx.ingress.kubernetes.io/canary"] = "true"
|
||||
res["nginx.ingress.kubernetes.io/canary-weight"] = "0"
|
||||
|
||||
if cookie != "" {
|
||||
res["nginx.ingress.kubernetes.io/canary-by-cookie"] = cookie
|
||||
}
|
||||
|
||||
if header != "" {
|
||||
res["nginx.ingress.kubernetes.io/canary-by-header"] = header
|
||||
}
|
||||
|
||||
if headerValue != "" {
|
||||
res["nginx.ingress.kubernetes.io/canary-by-header-value"] = headerValue
|
||||
}
|
||||
|
||||
return res
|
||||
}
|
||||
112
pkg/router/ingress_test.go
Normal file
112
pkg/router/ingress_test.go
Normal file
@@ -0,0 +1,112 @@
|
||||
package router
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestIngressRouter_Reconcile(t *testing.T) {
|
||||
mocks := setupfakeClients()
|
||||
router := &IngressRouter{
|
||||
logger: mocks.logger,
|
||||
kubeClient: mocks.kubeClient,
|
||||
}
|
||||
|
||||
err := router.Reconcile(mocks.ingressCanary)
|
||||
if err != nil {
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
|
||||
canaryAn := "nginx.ingress.kubernetes.io/canary"
|
||||
canaryWeightAn := "nginx.ingress.kubernetes.io/canary-weight"
|
||||
|
||||
canaryName := fmt.Sprintf("%s-canary", mocks.ingressCanary.Spec.IngressRef.Name)
|
||||
inCanary, err := router.kubeClient.ExtensionsV1beta1().Ingresses("default").Get(canaryName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
|
||||
if _, ok := inCanary.Annotations[canaryAn]; !ok {
|
||||
t.Errorf("Canary annotation missing")
|
||||
}
|
||||
|
||||
// test initialisation
|
||||
if inCanary.Annotations[canaryAn] != "false" {
|
||||
t.Errorf("Got canary annotation %v wanted false", inCanary.Annotations[canaryAn])
|
||||
}
|
||||
|
||||
if inCanary.Annotations[canaryWeightAn] != "0" {
|
||||
t.Errorf("Got canary weight annotation %v wanted 0", inCanary.Annotations[canaryWeightAn])
|
||||
}
|
||||
}
|
||||
|
||||
func TestIngressRouter_GetSetRoutes(t *testing.T) {
|
||||
mocks := setupfakeClients()
|
||||
router := &IngressRouter{
|
||||
logger: mocks.logger,
|
||||
kubeClient: mocks.kubeClient,
|
||||
}
|
||||
|
||||
err := router.Reconcile(mocks.ingressCanary)
|
||||
if err != nil {
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
|
||||
p, c, err := router.GetRoutes(mocks.ingressCanary)
|
||||
if err != nil {
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
|
||||
p = 50
|
||||
c = 50
|
||||
|
||||
err = router.SetRoutes(mocks.ingressCanary, p, c)
|
||||
if err != nil {
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
|
||||
canaryAn := "nginx.ingress.kubernetes.io/canary"
|
||||
canaryWeightAn := "nginx.ingress.kubernetes.io/canary-weight"
|
||||
|
||||
canaryName := fmt.Sprintf("%s-canary", mocks.ingressCanary.Spec.IngressRef.Name)
|
||||
inCanary, err := router.kubeClient.ExtensionsV1beta1().Ingresses("default").Get(canaryName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
|
||||
if _, ok := inCanary.Annotations[canaryAn]; !ok {
|
||||
t.Errorf("Canary annotation missing")
|
||||
}
|
||||
|
||||
// test rollout
|
||||
if inCanary.Annotations[canaryAn] != "true" {
|
||||
t.Errorf("Got canary annotation %v wanted true", inCanary.Annotations[canaryAn])
|
||||
}
|
||||
|
||||
if inCanary.Annotations[canaryWeightAn] != "50" {
|
||||
t.Errorf("Got canary weight annotation %v wanted 50", inCanary.Annotations[canaryWeightAn])
|
||||
}
|
||||
|
||||
p = 100
|
||||
c = 0
|
||||
|
||||
err = router.SetRoutes(mocks.ingressCanary, p, c)
|
||||
if err != nil {
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
|
||||
inCanary, err = router.kubeClient.ExtensionsV1beta1().Ingresses("default").Get(canaryName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
|
||||
// test promotion
|
||||
if inCanary.Annotations[canaryAn] != "false" {
|
||||
t.Errorf("Got canary annotation %v wanted false", inCanary.Annotations[canaryAn])
|
||||
}
|
||||
|
||||
if inCanary.Annotations[canaryWeightAn] != "0" {
|
||||
t.Errorf("Got canary weight annotation %v wanted 0", inCanary.Annotations[canaryWeightAn])
|
||||
}
|
||||
}
|
||||
@@ -32,7 +32,7 @@ func (ir *IstioRouter) Reconcile(canary *flaggerv1.Canary) error {
|
||||
hosts := canary.Spec.Service.Hosts
|
||||
var hasServiceHost bool
|
||||
for _, h := range hosts {
|
||||
if h == targetName {
|
||||
if h == targetName || h == "*" {
|
||||
hasServiceHost = true
|
||||
break
|
||||
}
|
||||
@@ -50,7 +50,9 @@ func (ir *IstioRouter) Reconcile(canary *flaggerv1.Canary) error {
|
||||
break
|
||||
}
|
||||
}
|
||||
if !hasMeshGateway {
|
||||
|
||||
// set default mesh gateway if no gateway is specified
|
||||
if !hasMeshGateway && len(canary.Spec.Service.Gateways) == 0 {
|
||||
gateways = append(gateways, "mesh")
|
||||
}
|
||||
|
||||
|
||||
@@ -19,6 +19,7 @@ type KubernetesRouter struct {
|
||||
kubeClient kubernetes.Interface
|
||||
flaggerClient clientset.Interface
|
||||
logger *zap.SugaredLogger
|
||||
label string
|
||||
}
|
||||
|
||||
// Reconcile creates or updates the primary and canary services
|
||||
@@ -64,7 +65,7 @@ func (c *KubernetesRouter) reconcileService(canary *flaggerv1.Canary, name strin
|
||||
|
||||
svcSpec := corev1.ServiceSpec{
|
||||
Type: corev1.ServiceTypeClusterIP,
|
||||
Selector: map[string]string{"app": target},
|
||||
Selector: map[string]string{c.label: target},
|
||||
Ports: []corev1.ServicePort{
|
||||
{
|
||||
Name: portName,
|
||||
|
||||
@@ -6,12 +6,14 @@ import (
|
||||
istiov1alpha3 "github.com/weaveworks/flagger/pkg/apis/istio/v1alpha3"
|
||||
clientset "github.com/weaveworks/flagger/pkg/client/clientset/versioned"
|
||||
fakeFlagger "github.com/weaveworks/flagger/pkg/client/clientset/versioned/fake"
|
||||
"github.com/weaveworks/flagger/pkg/logging"
|
||||
"github.com/weaveworks/flagger/pkg/logger"
|
||||
"go.uber.org/zap"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
hpav1 "k8s.io/api/autoscaling/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/api/extensions/v1beta1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
)
|
||||
@@ -20,6 +22,7 @@ type fakeClients struct {
|
||||
canary *v1alpha3.Canary
|
||||
abtest *v1alpha3.Canary
|
||||
appmeshCanary *v1alpha3.Canary
|
||||
ingressCanary *v1alpha3.Canary
|
||||
kubeClient kubernetes.Interface
|
||||
meshClient clientset.Interface
|
||||
flaggerClient clientset.Interface
|
||||
@@ -30,17 +33,19 @@ func setupfakeClients() fakeClients {
|
||||
canary := newMockCanary()
|
||||
abtest := newMockABTest()
|
||||
appmeshCanary := newMockCanaryAppMesh()
|
||||
flaggerClient := fakeFlagger.NewSimpleClientset(canary, abtest, appmeshCanary)
|
||||
ingressCanary := newMockCanaryIngress()
|
||||
flaggerClient := fakeFlagger.NewSimpleClientset(canary, abtest, appmeshCanary, ingressCanary)
|
||||
|
||||
kubeClient := fake.NewSimpleClientset(newMockDeployment(), newMockABTestDeployment())
|
||||
kubeClient := fake.NewSimpleClientset(newMockDeployment(), newMockABTestDeployment(), newMockIngress())
|
||||
|
||||
meshClient := fakeFlagger.NewSimpleClientset()
|
||||
logger, _ := logging.NewLogger("debug")
|
||||
logger, _ := logger.NewLogger("debug")
|
||||
|
||||
return fakeClients{
|
||||
canary: canary,
|
||||
abtest: abtest,
|
||||
appmeshCanary: appmeshCanary,
|
||||
ingressCanary: ingressCanary,
|
||||
kubeClient: kubeClient,
|
||||
meshClient: meshClient,
|
||||
flaggerClient: flaggerClient,
|
||||
@@ -266,3 +271,73 @@ func newMockABTestDeployment() *appsv1.Deployment {
|
||||
|
||||
return d
|
||||
}
|
||||
|
||||
func newMockCanaryIngress() *v1alpha3.Canary {
|
||||
cd := &v1alpha3.Canary{
|
||||
TypeMeta: metav1.TypeMeta{APIVersion: v1alpha3.SchemeGroupVersion.String()},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "default",
|
||||
Name: "nginx",
|
||||
},
|
||||
Spec: v1alpha3.CanarySpec{
|
||||
TargetRef: hpav1.CrossVersionObjectReference{
|
||||
Name: "podinfo",
|
||||
APIVersion: "apps/v1",
|
||||
Kind: "Deployment",
|
||||
},
|
||||
IngressRef: &hpav1.CrossVersionObjectReference{
|
||||
Name: "podinfo",
|
||||
APIVersion: "extensions/v1beta1",
|
||||
Kind: "Ingress",
|
||||
},
|
||||
Service: v1alpha3.CanaryService{
|
||||
Port: 9898,
|
||||
}, CanaryAnalysis: v1alpha3.CanaryAnalysis{
|
||||
Threshold: 10,
|
||||
StepWeight: 10,
|
||||
MaxWeight: 50,
|
||||
Metrics: []v1alpha3.CanaryMetric{
|
||||
{
|
||||
Name: "request-success-rate",
|
||||
Threshold: 99,
|
||||
Interval: "1m",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
return cd
|
||||
}
|
||||
|
||||
func newMockIngress() *v1beta1.Ingress {
|
||||
return &v1beta1.Ingress{
|
||||
TypeMeta: metav1.TypeMeta{APIVersion: v1beta1.SchemeGroupVersion.String()},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "default",
|
||||
Name: "podinfo",
|
||||
Annotations: map[string]string{
|
||||
"kubernetes.io/ingress.class": "nginx",
|
||||
},
|
||||
},
|
||||
Spec: v1beta1.IngressSpec{
|
||||
Rules: []v1beta1.IngressRule{
|
||||
{
|
||||
Host: "app.example.com",
|
||||
IngressRuleValue: v1beta1.IngressRuleValue{
|
||||
HTTP: &v1beta1.HTTPIngressRuleValue{
|
||||
Paths: []v1beta1.HTTPIngressPath{
|
||||
{
|
||||
Path: "/",
|
||||
Backend: v1beta1.IngressBackend{
|
||||
ServiceName: "podinfo",
|
||||
ServicePort: intstr.FromInt(9898),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user