mirror of
https://github.com/fluxcd/flagger.git
synced 2026-02-15 02:20:22 +00:00
Compare commits
29 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
4463d41744 | ||
|
|
66663c2aaf | ||
|
|
d34a504516 | ||
|
|
9fc5f9ca31 | ||
|
|
3cf9fe7cb7 | ||
|
|
6d57b5c490 | ||
|
|
7254a5a7f6 | ||
|
|
671016834d | ||
|
|
41dc056c87 | ||
|
|
6757553685 | ||
|
|
1abceb8334 | ||
|
|
0939a1bcc9 | ||
|
|
d91012d79c | ||
|
|
6e9fe2c584 | ||
|
|
ccad05c92f | ||
|
|
8b9929008b | ||
|
|
a881446252 | ||
|
|
183c73f2c4 | ||
|
|
3e3eaebbf2 | ||
|
|
bcc5bbf6fa | ||
|
|
42c3052247 | ||
|
|
3593e20ad5 | ||
|
|
1f0819465b | ||
|
|
7e48d7537f | ||
|
|
45c69ce3f7 | ||
|
|
8699a07de9 | ||
|
|
fb92d31744 | ||
|
|
bb7c4b1909 | ||
|
|
84d74d1eb5 |
@@ -1,247 +0,0 @@
|
||||
version: 2.1
|
||||
jobs:
|
||||
|
||||
build-binary:
|
||||
docker:
|
||||
- image: circleci/golang:1.13
|
||||
working_directory: ~/build
|
||||
steps:
|
||||
- checkout
|
||||
- restore_cache:
|
||||
keys:
|
||||
- go-mod-v3-{{ checksum "go.sum" }}
|
||||
- run:
|
||||
name: Run go mod download
|
||||
command: go mod download
|
||||
- run:
|
||||
name: Run go fmt
|
||||
command: make test-fmt
|
||||
- run:
|
||||
name: Build Flagger
|
||||
command: |
|
||||
CGO_ENABLED=0 GOOS=linux go build \
|
||||
-ldflags "-s -w -X github.com/weaveworks/flagger/pkg/version.REVISION=${CIRCLE_SHA1}" \
|
||||
-a -installsuffix cgo -o bin/flagger ./cmd/flagger/*.go
|
||||
- run:
|
||||
name: Build Flagger load tester
|
||||
command: |
|
||||
CGO_ENABLED=0 GOOS=linux go build \
|
||||
-a -installsuffix cgo -o bin/loadtester ./cmd/loadtester/*.go
|
||||
- run:
|
||||
name: Run unit tests
|
||||
command: |
|
||||
go test -race -coverprofile=coverage.txt -covermode=atomic $(go list ./pkg/...)
|
||||
bash <(curl -s https://codecov.io/bash)
|
||||
- run:
|
||||
name: Verify code gen
|
||||
command: make test-codegen
|
||||
- save_cache:
|
||||
key: go-mod-v3-{{ checksum "go.sum" }}
|
||||
paths:
|
||||
- "/go/pkg/mod/"
|
||||
- persist_to_workspace:
|
||||
root: bin
|
||||
paths:
|
||||
- flagger
|
||||
- loadtester
|
||||
|
||||
push-container:
|
||||
docker:
|
||||
- image: circleci/golang:1.13
|
||||
steps:
|
||||
- checkout
|
||||
- setup_remote_docker:
|
||||
docker_layer_caching: true
|
||||
- attach_workspace:
|
||||
at: /tmp/bin
|
||||
- run: test/container-build.sh
|
||||
- run: test/container-push.sh
|
||||
|
||||
push-binary:
|
||||
docker:
|
||||
- image: circleci/golang:1.13
|
||||
working_directory: ~/build
|
||||
steps:
|
||||
- checkout
|
||||
- setup_remote_docker:
|
||||
docker_layer_caching: true
|
||||
- restore_cache:
|
||||
keys:
|
||||
- go-mod-v3-{{ checksum "go.sum" }}
|
||||
- run: test/goreleaser.sh
|
||||
|
||||
e2e-istio-testing:
|
||||
machine: true
|
||||
steps:
|
||||
- checkout
|
||||
- attach_workspace:
|
||||
at: /tmp/bin
|
||||
- run: test/container-build.sh
|
||||
- run: test/e2e-kind.sh
|
||||
- run: test/e2e-istio.sh
|
||||
- run: test/e2e-tests.sh
|
||||
|
||||
e2e-kubernetes-testing:
|
||||
machine: true
|
||||
steps:
|
||||
- checkout
|
||||
- attach_workspace:
|
||||
at: /tmp/bin
|
||||
- run: test/container-build.sh
|
||||
- run: test/e2e-kind.sh
|
||||
- run: test/e2e-kubernetes.sh
|
||||
- run: test/e2e-kubernetes-tests.sh
|
||||
|
||||
e2e-smi-istio-testing:
|
||||
machine: true
|
||||
steps:
|
||||
- checkout
|
||||
- attach_workspace:
|
||||
at: /tmp/bin
|
||||
- run: test/container-build.sh
|
||||
- run: test/e2e-kind.sh
|
||||
- run: test/e2e-smi-istio.sh
|
||||
- run: test/e2e-tests.sh canary
|
||||
|
||||
e2e-gloo-testing:
|
||||
machine: true
|
||||
steps:
|
||||
- checkout
|
||||
- attach_workspace:
|
||||
at: /tmp/bin
|
||||
- run: test/container-build.sh
|
||||
- run: test/e2e-kind.sh
|
||||
- run: test/e2e-gloo.sh
|
||||
- run: test/e2e-gloo-tests.sh
|
||||
|
||||
e2e-nginx-testing:
|
||||
machine: true
|
||||
steps:
|
||||
- checkout
|
||||
- attach_workspace:
|
||||
at: /tmp/bin
|
||||
- run: test/container-build.sh
|
||||
- run: test/e2e-kind.sh
|
||||
- run: test/e2e-nginx.sh
|
||||
- run: test/e2e-nginx-tests.sh
|
||||
- run: test/e2e-nginx-cleanup.sh
|
||||
- run: test/e2e-nginx-custom-annotations.sh
|
||||
- run: test/e2e-nginx-tests.sh
|
||||
|
||||
e2e-linkerd-testing:
|
||||
machine: true
|
||||
steps:
|
||||
- checkout
|
||||
- attach_workspace:
|
||||
at: /tmp/bin
|
||||
- run: test/container-build.sh
|
||||
- run: test/e2e-kind.sh
|
||||
- run: test/e2e-linkerd.sh
|
||||
- run: test/e2e-linkerd-tests.sh
|
||||
|
||||
push-helm-charts:
|
||||
docker:
|
||||
- image: circleci/golang:1.13
|
||||
steps:
|
||||
- checkout
|
||||
- run:
|
||||
name: Install kubectl
|
||||
command: sudo curl -L https://storage.googleapis.com/kubernetes-release/release/$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)/bin/linux/amd64/kubectl -o /usr/local/bin/kubectl && sudo chmod +x /usr/local/bin/kubectl
|
||||
- run:
|
||||
name: Install helm
|
||||
command: sudo curl -L https://storage.googleapis.com/kubernetes-helm/helm-v2.14.2-linux-amd64.tar.gz | tar xz && sudo mv linux-amd64/helm /bin/helm && sudo rm -rf linux-amd64
|
||||
- run:
|
||||
name: Initialize helm
|
||||
command: helm init --client-only --kubeconfig=$HOME/.kube/kubeconfig
|
||||
- run:
|
||||
name: Lint charts
|
||||
command: |
|
||||
helm lint ./charts/*
|
||||
- run:
|
||||
name: Package charts
|
||||
command: |
|
||||
mkdir $HOME/charts
|
||||
helm package ./charts/* --destination $HOME/charts
|
||||
- run:
|
||||
name: Publish charts
|
||||
command: |
|
||||
if echo "${CIRCLE_TAG}" | grep -Eq "[0-9]+(\.[0-9]+)*(-[a-z]+)?$"; then
|
||||
REPOSITORY="https://weaveworksbot:${GITHUB_TOKEN}@github.com/weaveworks/flagger.git"
|
||||
git config user.email weaveworksbot@users.noreply.github.com
|
||||
git config user.name weaveworksbot
|
||||
git remote set-url origin ${REPOSITORY}
|
||||
git checkout gh-pages
|
||||
mv -f $HOME/charts/*.tgz .
|
||||
helm repo index . --url https://flagger.app
|
||||
git add .
|
||||
git commit -m "Publish Helm charts v${CIRCLE_TAG}"
|
||||
git push origin gh-pages
|
||||
else
|
||||
echo "Not a release! Skip charts publish"
|
||||
fi
|
||||
|
||||
workflows:
|
||||
version: 2
|
||||
build-test-push:
|
||||
jobs:
|
||||
- build-binary:
|
||||
filters:
|
||||
branches:
|
||||
ignore:
|
||||
- gh-pages
|
||||
- e2e-istio-testing:
|
||||
requires:
|
||||
- build-binary
|
||||
- e2e-kubernetes-testing:
|
||||
requires:
|
||||
- build-binary
|
||||
- e2e-gloo-testing:
|
||||
requires:
|
||||
- build-binary
|
||||
- e2e-nginx-testing:
|
||||
requires:
|
||||
- build-binary
|
||||
- e2e-linkerd-testing:
|
||||
requires:
|
||||
- build-binary
|
||||
- push-container:
|
||||
requires:
|
||||
- build-binary
|
||||
- e2e-istio-testing
|
||||
- e2e-kubernetes-testing
|
||||
- e2e-gloo-testing
|
||||
- e2e-nginx-testing
|
||||
- e2e-linkerd-testing
|
||||
|
||||
release:
|
||||
jobs:
|
||||
- build-binary:
|
||||
filters:
|
||||
branches:
|
||||
ignore: /.*/
|
||||
tags:
|
||||
ignore: /^chart.*/
|
||||
- push-container:
|
||||
requires:
|
||||
- build-binary
|
||||
filters:
|
||||
branches:
|
||||
ignore: /.*/
|
||||
tags:
|
||||
ignore: /^chart.*/
|
||||
- push-binary:
|
||||
requires:
|
||||
- push-container
|
||||
filters:
|
||||
branches:
|
||||
ignore: /.*/
|
||||
tags:
|
||||
ignore: /^chart.*/
|
||||
- push-helm-charts:
|
||||
requires:
|
||||
- push-container
|
||||
filters:
|
||||
branches:
|
||||
ignore: /.*/
|
||||
tags:
|
||||
ignore: /^chart.*/
|
||||
11
.codecov.yml
11
.codecov.yml
@@ -1,11 +0,0 @@
|
||||
coverage:
|
||||
status:
|
||||
project:
|
||||
default:
|
||||
target: auto
|
||||
threshold: 50
|
||||
base: auto
|
||||
patch: off
|
||||
|
||||
comment:
|
||||
require_changes: yes
|
||||
@@ -1 +0,0 @@
|
||||
root: ./docs/gitbook
|
||||
1
.github/CODEOWNERS
vendored
1
.github/CODEOWNERS
vendored
@@ -1 +0,0 @@
|
||||
* @stefanprodan
|
||||
17
.github/_main.workflow
vendored
17
.github/_main.workflow
vendored
@@ -1,17 +0,0 @@
|
||||
workflow "Publish Helm charts" {
|
||||
on = "push"
|
||||
resolves = ["helm-push"]
|
||||
}
|
||||
|
||||
action "helm-lint" {
|
||||
uses = "stefanprodan/gh-actions/helm@master"
|
||||
args = ["lint charts/*"]
|
||||
}
|
||||
|
||||
action "helm-push" {
|
||||
needs = ["helm-lint"]
|
||||
uses = "stefanprodan/gh-actions/helm-gh-pages@master"
|
||||
args = ["charts/*","https://flagger.app"]
|
||||
secrets = ["GITHUB_TOKEN"]
|
||||
}
|
||||
|
||||
53
.github/workflows/website.yaml
vendored
Normal file
53
.github/workflows/website.yaml
vendored
Normal file
@@ -0,0 +1,53 @@
|
||||
name: website
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- website
|
||||
|
||||
jobs:
|
||||
vuepress:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v5
|
||||
- uses: actions/setup-node@v5
|
||||
with:
|
||||
node-version: '12.x'
|
||||
- id: yarn-cache-dir-path
|
||||
run: echo "::set-output name=dir::$(yarn cache dir)"
|
||||
- uses: actions/cache@v4
|
||||
id: yarn-cache
|
||||
with:
|
||||
path: ${{ steps.yarn-cache-dir-path.outputs.dir }}
|
||||
key: ${{ runner.os }}-yarn-${{ hashFiles('**/yarn.lock') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-yarn-
|
||||
- name: Build
|
||||
run: |
|
||||
yarn add -D vuepress
|
||||
yarn docs:build
|
||||
mkdir $HOME/site
|
||||
rsync -avzh docs/.vuepress/dist/ $HOME/site
|
||||
- name: Publish
|
||||
env:
|
||||
REPOSITORY: "https://x-access-token:${{ secrets.BOT_GITHUB_TOKEN }}@github.com/fluxcd/flagger.git"
|
||||
run: |
|
||||
tmpDir=$(mktemp -d)
|
||||
pushd $tmpDir >& /dev/null
|
||||
|
||||
git clone ${REPOSITORY}
|
||||
cd flagger
|
||||
git config user.name ${{ github.actor }}
|
||||
git config user.email ${{ github.actor }}@users.noreply.github.com
|
||||
git remote set-url origin ${REPOSITORY}
|
||||
git checkout gh-pages
|
||||
rm -rf assets
|
||||
rsync -avzh $HOME/site/ .
|
||||
rm -rf node_modules
|
||||
git add .
|
||||
git commit -m "Publish website"
|
||||
git push origin gh-pages
|
||||
|
||||
popd >& /dev/null
|
||||
rm -rf $tmpDir
|
||||
76
.gitignore
vendored
76
.gitignore
vendored
@@ -1,19 +1,65 @@
|
||||
# Binaries for programs and plugins
|
||||
*.exe
|
||||
*.exe~
|
||||
*.dll
|
||||
*.so
|
||||
*.dylib
|
||||
# Logs
|
||||
logs
|
||||
*.log
|
||||
npm-debug.log*
|
||||
yarn-debug.log*
|
||||
yarn-error.log*
|
||||
|
||||
# Test binary, build with `go test -c`
|
||||
*.test
|
||||
# Runtime data
|
||||
pids
|
||||
*.pid
|
||||
*.seed
|
||||
*.pid.lock
|
||||
|
||||
# Output of the go coverage tool, specifically when used with LiteIDE
|
||||
*.out
|
||||
.DS_Store
|
||||
# Directory for instrumented libs generated by jscoverage/JSCover
|
||||
lib-cov
|
||||
|
||||
# Coverage directory used by tools like istanbul
|
||||
coverage
|
||||
|
||||
# nyc test coverage
|
||||
.nyc_output
|
||||
|
||||
# Grunt intermediate storage (http://gruntjs.com/creating-plugins#storing-task-files)
|
||||
.grunt
|
||||
|
||||
# Bower dependency directory (https://bower.io/)
|
||||
bower_components
|
||||
|
||||
# node-waf configuration
|
||||
.lock-wscript
|
||||
|
||||
# Compiled binary addons (https://nodejs.org/api/addons.html)
|
||||
build/Release
|
||||
|
||||
# Dependency directories
|
||||
node_modules/
|
||||
jspm_packages/
|
||||
|
||||
# TypeScript v1 declaration files
|
||||
typings/
|
||||
|
||||
# Optional npm cache directory
|
||||
.npm
|
||||
|
||||
# Optional eslint cache
|
||||
.eslintcache
|
||||
|
||||
# Optional REPL history
|
||||
.node_repl_history
|
||||
|
||||
# Output of 'npm pack'
|
||||
*.tgz
|
||||
|
||||
# Yarn Integrity file
|
||||
.yarn-integrity
|
||||
|
||||
# dotenv environment variables file
|
||||
.env
|
||||
|
||||
# next.js build output
|
||||
.next
|
||||
|
||||
bin/
|
||||
_tmp/
|
||||
|
||||
artifacts/gcloud/
|
||||
.idea
|
||||
docs/.vuepress/dist/
|
||||
Makefile.dev
|
||||
@@ -1,18 +0,0 @@
|
||||
builds:
|
||||
- main: ./cmd/flagger
|
||||
binary: flagger
|
||||
ldflags: -s -w -X github.com/weaveworks/flagger/pkg/version.REVISION={{.Commit}}
|
||||
goos:
|
||||
- linux
|
||||
goarch:
|
||||
- amd64
|
||||
env:
|
||||
- CGO_ENABLED=0
|
||||
archives:
|
||||
- name_template: "{{ .Binary }}_{{ .Version }}_{{ .Os }}_{{ .Arch }}"
|
||||
files:
|
||||
- none*
|
||||
changelog:
|
||||
filters:
|
||||
exclude:
|
||||
- '^CircleCI'
|
||||
516
CHANGELOG.md
516
CHANGELOG.md
@@ -1,516 +0,0 @@
|
||||
# Changelog
|
||||
|
||||
All notable changes to this project are documented in this file.
|
||||
|
||||
## 0.20.2 (2019-11-07)
|
||||
|
||||
Adds support for exposing canaries outside the cluster using App Mesh Gateway annotations
|
||||
|
||||
#### Improvements
|
||||
|
||||
- Expose canaries on public domains with App Mesh Gateway [#358](https://github.com/weaveworks/flagger/pull/358)
|
||||
|
||||
#### Fixes
|
||||
|
||||
- Use the specified replicas when scaling up the canary [#363](https://github.com/weaveworks/flagger/pull/363)
|
||||
|
||||
## 0.20.1 (2019-11-03)
|
||||
|
||||
Fixes promql execution and updates the load testing tools
|
||||
|
||||
#### Improvements
|
||||
|
||||
- Update load tester Helm tools [#8349dd1](https://github.com/weaveworks/flagger/commit/8349dd1cda59a741c7bed9a0f67c0fc0fbff4635)
|
||||
- e2e testing: update providers [#346](https://github.com/weaveworks/flagger/pull/346)
|
||||
|
||||
#### Fixes
|
||||
|
||||
- Fix Prometheus query escape [#353](https://github.com/weaveworks/flagger/pull/353)
|
||||
- Updating hey release link [#350](https://github.com/weaveworks/flagger/pull/350)
|
||||
|
||||
## 0.20.0 (2019-10-21)
|
||||
|
||||
Adds support for [A/B Testing](https://docs.flagger.app/usage/progressive-delivery#traffic-mirroring) and retry policies when using App Mesh
|
||||
|
||||
#### Features
|
||||
|
||||
- Implement App Mesh A/B testing based on HTTP headers match conditions [#340](https://github.com/weaveworks/flagger/pull/340)
|
||||
- Implement App Mesh HTTP retry policy [#338](https://github.com/weaveworks/flagger/pull/338)
|
||||
- Implement metrics server override [#342](https://github.com/weaveworks/flagger/pull/342)
|
||||
|
||||
#### Improvements
|
||||
|
||||
- Add the app/name label to services and primary deployment [#333](https://github.com/weaveworks/flagger/pull/333)
|
||||
- Allow setting Slack and Teams URLs with env vars [#334](https://github.com/weaveworks/flagger/pull/334)
|
||||
- Refactor Gloo integration [#344](https://github.com/weaveworks/flagger/pull/344)
|
||||
|
||||
#### Fixes
|
||||
|
||||
- Generate unique names for App Mesh virtual routers and routes [#336](https://github.com/weaveworks/flagger/pull/336)
|
||||
|
||||
## 0.19.0 (2019-10-08)
|
||||
|
||||
Adds support for canary and blue/green [traffic mirroring](https://docs.flagger.app/usage/progressive-delivery#traffic-mirroring)
|
||||
|
||||
#### Features
|
||||
|
||||
- Add traffic mirroring for Istio service mesh [#311](https://github.com/weaveworks/flagger/pull/311)
|
||||
- Implement canary service target port [#327](https://github.com/weaveworks/flagger/pull/327)
|
||||
|
||||
#### Improvements
|
||||
|
||||
- Allow gPRC protocol for App Mesh [#325](https://github.com/weaveworks/flagger/pull/325)
|
||||
- Enforce blue/green when using Kubernetes networking [#326](https://github.com/weaveworks/flagger/pull/326)
|
||||
|
||||
#### Fixes
|
||||
|
||||
- Fix port discovery diff [#324](https://github.com/weaveworks/flagger/pull/324)
|
||||
- Helm chart: Enable Prometheus scraping of Flagger metrics [#2141d88](https://github.com/weaveworks/flagger/commit/2141d88ce1cc6be220dab34171c215a334ecde24)
|
||||
|
||||
## 0.18.6 (2019-10-03)
|
||||
|
||||
Adds support for App Mesh conformance tests and latency metric checks
|
||||
|
||||
#### Improvements
|
||||
|
||||
- Add support for acceptance testing when using App Mesh [#322](https://github.com/weaveworks/flagger/pull/322)
|
||||
- Add Kustomize installer for App Mesh [#310](https://github.com/weaveworks/flagger/pull/310)
|
||||
- Update Linkerd to v2.5.0 and Prometheus to v2.12.0 [#323](https://github.com/weaveworks/flagger/pull/323)
|
||||
|
||||
#### Fixes
|
||||
|
||||
- Fix slack/teams notification fields mapping [#318](https://github.com/weaveworks/flagger/pull/318)
|
||||
|
||||
## 0.18.5 (2019-10-02)
|
||||
|
||||
Adds support for [confirm-promotion](https://docs.flagger.app/how-it-works#webhooks) webhooks and blue/green deployments when using a service mesh
|
||||
|
||||
#### Features
|
||||
|
||||
- Implement confirm-promotion hook [#307](https://github.com/weaveworks/flagger/pull/307)
|
||||
- Implement B/G for service mesh providers [#305](https://github.com/weaveworks/flagger/pull/305)
|
||||
|
||||
#### Improvements
|
||||
|
||||
- Canary promotion improvements to avoid dropping in-flight requests [#310](https://github.com/weaveworks/flagger/pull/310)
|
||||
- Update end-to-end tests to Kubernetes v1.15.3 and Istio 1.3.0 [#306](https://github.com/weaveworks/flagger/pull/306)
|
||||
|
||||
#### Fixes
|
||||
|
||||
- Skip primary check for App Mesh [#315](https://github.com/weaveworks/flagger/pull/315)
|
||||
|
||||
## 0.18.4 (2019-09-08)
|
||||
|
||||
Adds support for NGINX custom annotations and Helm v3 acceptance testing
|
||||
|
||||
#### Features
|
||||
|
||||
- Add annotations prefix for NGINX ingresses [#293](https://github.com/weaveworks/flagger/pull/293)
|
||||
- Add wide columns in CRD [#289](https://github.com/weaveworks/flagger/pull/289)
|
||||
- loadtester: implement Helm v3 test command [#296](https://github.com/weaveworks/flagger/pull/296)
|
||||
- loadtester: add gPRC health check to load tester image [#295](https://github.com/weaveworks/flagger/pull/295)
|
||||
|
||||
#### Fixes
|
||||
|
||||
- loadtester: fix tests error logging [#286](https://github.com/weaveworks/flagger/pull/286)
|
||||
|
||||
## 0.18.3 (2019-08-22)
|
||||
|
||||
Adds support for tillerless helm tests and protobuf health checking
|
||||
|
||||
#### Features
|
||||
|
||||
- loadtester: add support for tillerless helm [#280](https://github.com/weaveworks/flagger/pull/280)
|
||||
- loadtester: add support for protobuf health checking [#280](https://github.com/weaveworks/flagger/pull/280)
|
||||
|
||||
#### Improvements
|
||||
|
||||
- Set HTTP listeners for AppMesh virtual routers [#272](https://github.com/weaveworks/flagger/pull/272)
|
||||
|
||||
#### Fixes
|
||||
|
||||
- Add missing fields to CRD validation spec [#271](https://github.com/weaveworks/flagger/pull/271)
|
||||
- Fix App Mesh backends validation in CRD [#281](https://github.com/weaveworks/flagger/pull/281)
|
||||
|
||||
## 0.18.2 (2019-08-05)
|
||||
|
||||
Fixes multi-port support for Istio
|
||||
|
||||
#### Fixes
|
||||
|
||||
- Fix port discovery for multiple port services [#267](https://github.com/weaveworks/flagger/pull/267)
|
||||
|
||||
#### Improvements
|
||||
|
||||
- Update e2e testing to Istio v1.2.3, Gloo v0.18.8 and NGINX ingress chart v1.12.1 [#268](https://github.com/weaveworks/flagger/pull/268)
|
||||
|
||||
## 0.18.1 (2019-07-30)
|
||||
|
||||
Fixes Blue/Green style deployments for Kubernetes and Linkerd providers
|
||||
|
||||
#### Fixes
|
||||
|
||||
- Fix Blue/Green metrics provider and add e2e tests [#261](https://github.com/weaveworks/flagger/pull/261)
|
||||
|
||||
## 0.18.0 (2019-07-29)
|
||||
|
||||
Adds support for [manual gating](https://docs.flagger.app/how-it-works#manual-gating) and pausing/resuming an ongoing analysis
|
||||
|
||||
#### Features
|
||||
|
||||
- Implement confirm rollout gate, hook and API [#251](https://github.com/weaveworks/flagger/pull/251)
|
||||
|
||||
#### Improvements
|
||||
|
||||
- Refactor canary change detection and status [#240](https://github.com/weaveworks/flagger/pull/240)
|
||||
- Implement finalising state [#257](https://github.com/weaveworks/flagger/pull/257)
|
||||
- Add gRPC load testing tool [#248](https://github.com/weaveworks/flagger/pull/248)
|
||||
|
||||
#### Breaking changes
|
||||
|
||||
- Due to the status sub-resource changes in [#240](https://github.com/weaveworks/flagger/pull/240), when upgrading Flagger the canaries status phase will be reset to `Initialized`
|
||||
- Upgrading Flagger with Helm will fail due to Helm poor support of CRDs, see [workaround](https://github.com/weaveworks/flagger/issues/223)
|
||||
|
||||
## 0.17.0 (2019-07-08)
|
||||
|
||||
Adds support for Linkerd (SMI Traffic Split API), MS Teams notifications and HA mode with leader election
|
||||
|
||||
#### Features
|
||||
|
||||
- Add Linkerd support [#230](https://github.com/weaveworks/flagger/pull/230)
|
||||
- Implement MS Teams notifications [#235](https://github.com/weaveworks/flagger/pull/235)
|
||||
- Implement leader election [#236](https://github.com/weaveworks/flagger/pull/236)
|
||||
|
||||
#### Improvements
|
||||
|
||||
- Add [Kustomize](https://docs.flagger.app/install/flagger-install-on-kubernetes#install-flagger-with-kustomize) installer [#232](https://github.com/weaveworks/flagger/pull/232)
|
||||
- Add Pod Security Policy to Helm chart [#234](https://github.com/weaveworks/flagger/pull/234)
|
||||
|
||||
## 0.16.0 (2019-06-23)
|
||||
|
||||
Adds support for running [Blue/Green deployments](https://docs.flagger.app/usage/blue-green) without a service mesh or ingress controller
|
||||
|
||||
#### Features
|
||||
|
||||
- Allow blue/green deployments without a service mesh provider [#211](https://github.com/weaveworks/flagger/pull/211)
|
||||
- Add the service mesh provider to the canary spec [#217](https://github.com/weaveworks/flagger/pull/217)
|
||||
- Allow multi-port services and implement port discovery [#207](https://github.com/weaveworks/flagger/pull/207)
|
||||
|
||||
#### Improvements
|
||||
|
||||
- Add [FAQ page](https://docs.flagger.app/faq) to docs website
|
||||
- Switch to go modules in CI [#218](https://github.com/weaveworks/flagger/pull/218)
|
||||
- Update e2e testing to Kubernetes Kind 0.3.0 and Istio 1.2.0
|
||||
|
||||
#### Fixes
|
||||
|
||||
- Update the primary HPA on canary promotion [#216](https://github.com/weaveworks/flagger/pull/216)
|
||||
|
||||
## 0.15.0 (2019-06-12)
|
||||
|
||||
Adds support for customising the Istio [traffic policy](https://docs.flagger.app/how-it-works#istio-routing) in the canary service spec
|
||||
|
||||
#### Features
|
||||
|
||||
- Generate Istio destination rules and allow traffic policy customisation [#200](https://github.com/weaveworks/flagger/pull/200)
|
||||
|
||||
#### Improvements
|
||||
|
||||
- Update Kubernetes packages to 1.14 and use go modules instead of dep [#202](https://github.com/weaveworks/flagger/pull/202)
|
||||
|
||||
## 0.14.1 (2019-06-05)
|
||||
|
||||
Adds support for running [acceptance/integration tests](https://docs.flagger.app/how-it-works#integration-testing) with Helm test or Bash Bats using pre-rollout hooks
|
||||
|
||||
#### Features
|
||||
|
||||
- Implement Helm and Bash pre-rollout hooks [#196](https://github.com/weaveworks/flagger/pull/196)
|
||||
|
||||
#### Fixes
|
||||
|
||||
- Fix promoting canary when max weight is not a multiple of step [#190](https://github.com/weaveworks/flagger/pull/190)
|
||||
- Add ability to set Prometheus url with custom path without trailing '/' [#197](https://github.com/weaveworks/flagger/pull/197)
|
||||
|
||||
## 0.14.0 (2019-05-21)
|
||||
|
||||
Adds support for Service Mesh Interface and [Gloo](https://docs.flagger.app/usage/gloo-progressive-delivery) ingress controller
|
||||
|
||||
#### Features
|
||||
|
||||
- Add support for SMI (Istio weighted traffic) [#180](https://github.com/weaveworks/flagger/pull/180)
|
||||
- Add support for Gloo ingress controller (weighted traffic) [#179](https://github.com/weaveworks/flagger/pull/179)
|
||||
|
||||
## 0.13.2 (2019-04-11)
|
||||
|
||||
Fixes for Jenkins X deployments (prevent the jx GC from removing the primary instance)
|
||||
|
||||
#### Fixes
|
||||
|
||||
- Do not copy labels from canary to primary deployment [#178](https://github.com/weaveworks/flagger/pull/178)
|
||||
|
||||
#### Improvements
|
||||
|
||||
- Add NGINX ingress controller e2e and unit tests [#176](https://github.com/weaveworks/flagger/pull/176)
|
||||
|
||||
## 0.13.1 (2019-04-09)
|
||||
|
||||
Fixes for custom metrics checks and NGINX Prometheus queries
|
||||
|
||||
#### Fixes
|
||||
|
||||
- Fix promql queries for custom checks and NGINX [#174](https://github.com/weaveworks/flagger/pull/174)
|
||||
|
||||
## 0.13.0 (2019-04-08)
|
||||
|
||||
Adds support for [NGINX](https://docs.flagger.app/usage/nginx-progressive-delivery) ingress controller
|
||||
|
||||
#### Features
|
||||
|
||||
- Add support for nginx ingress controller (weighted traffic and A/B testing) [#170](https://github.com/weaveworks/flagger/pull/170)
|
||||
- Add Prometheus add-on to Flagger Helm chart for App Mesh and NGINX [79b3370](https://github.com/weaveworks/flagger/pull/170/commits/79b337089294a92961bc8446fd185b38c50a32df)
|
||||
|
||||
#### Fixes
|
||||
|
||||
- Fix duplicate hosts Istio error when using wildcards [#162](https://github.com/weaveworks/flagger/pull/162)
|
||||
|
||||
## 0.12.0 (2019-04-29)
|
||||
|
||||
Adds support for [SuperGloo](https://docs.flagger.app/install/flagger-install-with-supergloo)
|
||||
|
||||
#### Features
|
||||
|
||||
- Supergloo support for canary deployment (weighted traffic) [#151](https://github.com/weaveworks/flagger/pull/151)
|
||||
|
||||
## 0.11.1 (2019-04-18)
|
||||
|
||||
Move Flagger and the load tester container images to Docker Hub
|
||||
|
||||
#### Features
|
||||
|
||||
- Add Bash Automated Testing System support to Flagger tester for running acceptance tests as pre-rollout hooks
|
||||
|
||||
## 0.11.0 (2019-04-17)
|
||||
|
||||
Adds pre/post rollout [webhooks](https://docs.flagger.app/how-it-works#webhooks)
|
||||
|
||||
#### Features
|
||||
|
||||
- Add `pre-rollout` and `post-rollout` webhook types [#147](https://github.com/weaveworks/flagger/pull/147)
|
||||
|
||||
#### Improvements
|
||||
|
||||
- Unify App Mesh and Istio builtin metric checks [#146](https://github.com/weaveworks/flagger/pull/146)
|
||||
- Make the pod selector label configurable [#148](https://github.com/weaveworks/flagger/pull/148)
|
||||
|
||||
#### Breaking changes
|
||||
|
||||
- Set default `mesh` Istio gateway only if no gateway is specified [#141](https://github.com/weaveworks/flagger/pull/141)
|
||||
|
||||
## 0.10.0 (2019-03-27)
|
||||
|
||||
Adds support for App Mesh
|
||||
|
||||
#### Features
|
||||
|
||||
- AWS App Mesh integration
|
||||
[#107](https://github.com/weaveworks/flagger/pull/107)
|
||||
[#123](https://github.com/weaveworks/flagger/pull/123)
|
||||
|
||||
#### Improvements
|
||||
|
||||
- Reconcile Kubernetes ClusterIP services [#122](https://github.com/weaveworks/flagger/pull/122)
|
||||
|
||||
#### Fixes
|
||||
|
||||
- Preserve pod labels on canary promotion [#105](https://github.com/weaveworks/flagger/pull/105)
|
||||
- Fix canary status Prometheus metric [#121](https://github.com/weaveworks/flagger/pull/121)
|
||||
|
||||
## 0.9.0 (2019-03-11)
|
||||
|
||||
Allows A/B testing scenarios where instead of weighted routing, the traffic is split between the
|
||||
primary and canary based on HTTP headers or cookies.
|
||||
|
||||
#### Features
|
||||
|
||||
- A/B testing - canary with session affinity [#88](https://github.com/weaveworks/flagger/pull/88)
|
||||
|
||||
#### Fixes
|
||||
|
||||
- Update the analysis interval when the custom resource changes [#91](https://github.com/weaveworks/flagger/pull/91)
|
||||
|
||||
## 0.8.0 (2019-03-06)
|
||||
|
||||
Adds support for CORS policy and HTTP request headers manipulation
|
||||
|
||||
#### Features
|
||||
|
||||
- CORS policy support [#83](https://github.com/weaveworks/flagger/pull/83)
|
||||
- Allow headers to be appended to HTTP requests [#82](https://github.com/weaveworks/flagger/pull/82)
|
||||
|
||||
#### Improvements
|
||||
|
||||
- Refactor the routing management
|
||||
[#72](https://github.com/weaveworks/flagger/pull/72)
|
||||
[#80](https://github.com/weaveworks/flagger/pull/80)
|
||||
- Fine-grained RBAC [#73](https://github.com/weaveworks/flagger/pull/73)
|
||||
- Add option to limit Flagger to a single namespace [#78](https://github.com/weaveworks/flagger/pull/78)
|
||||
|
||||
## 0.7.0 (2019-02-28)
|
||||
|
||||
Adds support for custom metric checks, HTTP timeouts and HTTP retries
|
||||
|
||||
#### Features
|
||||
|
||||
- Allow custom promql queries in the canary analysis spec [#60](https://github.com/weaveworks/flagger/pull/60)
|
||||
- Add HTTP timeout and retries to canary service spec [#62](https://github.com/weaveworks/flagger/pull/62)
|
||||
|
||||
## 0.6.0 (2019-02-25)
|
||||
|
||||
Allows for [HTTPMatchRequests](https://istio.io/docs/reference/config/istio.networking.v1alpha3/#HTTPMatchRequest)
|
||||
and [HTTPRewrite](https://istio.io/docs/reference/config/istio.networking.v1alpha3/#HTTPRewrite)
|
||||
to be customized in the service spec of the canary custom resource.
|
||||
|
||||
#### Features
|
||||
|
||||
- Add HTTP match conditions and URI rewrite to the canary service spec [#55](https://github.com/weaveworks/flagger/pull/55)
|
||||
- Update virtual service when the canary service spec changes
|
||||
[#54](https://github.com/weaveworks/flagger/pull/54)
|
||||
[#51](https://github.com/weaveworks/flagger/pull/51)
|
||||
|
||||
#### Improvements
|
||||
|
||||
- Run e2e testing on [Kubernetes Kind](https://github.com/kubernetes-sigs/kind) for canary promotion
|
||||
[#53](https://github.com/weaveworks/flagger/pull/53)
|
||||
|
||||
## 0.5.1 (2019-02-14)
|
||||
|
||||
Allows skipping the analysis phase to ship changes directly to production
|
||||
|
||||
#### Features
|
||||
|
||||
- Add option to skip the canary analysis [#46](https://github.com/weaveworks/flagger/pull/46)
|
||||
|
||||
#### Fixes
|
||||
|
||||
- Reject deployment if the pod label selector doesn't match `app: <DEPLOYMENT_NAME>` [#43](https://github.com/weaveworks/flagger/pull/43)
|
||||
|
||||
## 0.5.0 (2019-01-30)
|
||||
|
||||
Track changes in ConfigMaps and Secrets [#37](https://github.com/weaveworks/flagger/pull/37)
|
||||
|
||||
#### Features
|
||||
|
||||
- Promote configmaps and secrets changes from canary to primary
|
||||
- Detect changes in configmaps and/or secrets and (re)start canary analysis
|
||||
- Add configs checksum to Canary CRD status
|
||||
- Create primary configmaps and secrets at bootstrap
|
||||
- Scan canary volumes and containers for configmaps and secrets
|
||||
|
||||
#### Fixes
|
||||
|
||||
- Copy deployment labels from canary to primary at bootstrap and promotion
|
||||
|
||||
## 0.4.1 (2019-01-24)
|
||||
|
||||
Load testing webhook [#35](https://github.com/weaveworks/flagger/pull/35)
|
||||
|
||||
#### Features
|
||||
|
||||
- Add the load tester chart to Flagger Helm repository
|
||||
- Implement a load test runner based on [rakyll/hey](https://github.com/rakyll/hey)
|
||||
- Log warning when no values are found for Istio metric due to lack of traffic
|
||||
|
||||
#### Fixes
|
||||
|
||||
- Run wekbooks before the metrics checks to avoid failures when using a load tester
|
||||
|
||||
## 0.4.0 (2019-01-18)
|
||||
|
||||
Restart canary analysis if revision changes [#31](https://github.com/weaveworks/flagger/pull/31)
|
||||
|
||||
#### Breaking changes
|
||||
|
||||
- Drop support for Kubernetes 1.10
|
||||
|
||||
#### Features
|
||||
|
||||
- Detect changes during canary analysis and reset advancement
|
||||
- Add status and additional printer columns to CRD
|
||||
- Add canary name and namespace to controller structured logs
|
||||
|
||||
#### Fixes
|
||||
|
||||
- Allow canary name to be different to the target name
|
||||
- Check if multiple canaries have the same target and log error
|
||||
- Use deep copy when updating Kubernetes objects
|
||||
- Skip readiness checks if canary analysis has finished
|
||||
|
||||
## 0.3.0 (2019-01-11)
|
||||
|
||||
Configurable canary analysis duration [#20](https://github.com/weaveworks/flagger/pull/20)
|
||||
|
||||
#### Breaking changes
|
||||
|
||||
- Helm chart: flag `controlLoopInterval` has been removed
|
||||
|
||||
#### Features
|
||||
|
||||
- CRD: canaries.flagger.app v1alpha3
|
||||
- Schedule canary analysis independently based on `canaryAnalysis.interval`
|
||||
- Add analysis interval to Canary CRD (defaults to one minute)
|
||||
- Make autoscaler (HPA) reference optional
|
||||
|
||||
## 0.2.0 (2019-01-04)
|
||||
|
||||
Webhooks [#18](https://github.com/weaveworks/flagger/pull/18)
|
||||
|
||||
#### Features
|
||||
|
||||
- CRD: canaries.flagger.app v1alpha2
|
||||
- Implement canary external checks based on webhooks HTTP POST calls
|
||||
- Add webhooks to Canary CRD
|
||||
- Move docs to gitbook [docs.flagger.app](https://docs.flagger.app)
|
||||
|
||||
## 0.1.2 (2018-12-06)
|
||||
|
||||
Improve Slack notifications [#14](https://github.com/weaveworks/flagger/pull/14)
|
||||
|
||||
#### Features
|
||||
|
||||
- Add canary analysis metadata to init and start Slack messages
|
||||
- Add rollback reason to failed canary Slack messages
|
||||
|
||||
## 0.1.1 (2018-11-28)
|
||||
|
||||
Canary progress deadline [#10](https://github.com/weaveworks/flagger/pull/10)
|
||||
|
||||
#### Features
|
||||
|
||||
- Rollback canary based on the deployment progress deadline check
|
||||
- Add progress deadline to Canary CRD (defaults to 10 minutes)
|
||||
|
||||
## 0.1.0 (2018-11-25)
|
||||
|
||||
First stable release
|
||||
|
||||
#### Features
|
||||
|
||||
- CRD: canaries.flagger.app v1alpha1
|
||||
- Notifications: post canary events to Slack
|
||||
- Instrumentation: expose Prometheus metrics for canary status and traffic weight percentage
|
||||
- Autoscaling: add HPA reference to CRD and create primary HPA at bootstrap
|
||||
- Bootstrap: create primary deployment, ClusterIP services and Istio virtual service based on CRD spec
|
||||
|
||||
|
||||
## 0.0.1 (2018-10-07)
|
||||
|
||||
Initial semver release
|
||||
|
||||
#### Features
|
||||
|
||||
- Implement canary rollback based on failed checks threshold
|
||||
- Scale up the deployment when canary revision changes
|
||||
- Add OpenAPI v3 schema validation to Canary CRD
|
||||
- Use CRD status for canary state persistence
|
||||
- Add Helm charts for Flagger and Grafana
|
||||
- Add canary analysis Grafana dashboard
|
||||
@@ -1,72 +0,0 @@
|
||||
# How to Contribute
|
||||
|
||||
Flagger is [Apache 2.0 licensed](LICENSE) and accepts contributions via GitHub
|
||||
pull requests. This document outlines some of the conventions on development
|
||||
workflow, commit message formatting, contact points and other resources to make
|
||||
it easier to get your contribution accepted.
|
||||
|
||||
We gratefully welcome improvements to documentation as well as to code.
|
||||
|
||||
## Certificate of Origin
|
||||
|
||||
By contributing to this project you agree to the Developer Certificate of
|
||||
Origin (DCO). This document was created by the Linux Kernel community and is a
|
||||
simple statement that you, as a contributor, have the legal right to make the
|
||||
contribution.
|
||||
|
||||
## Chat
|
||||
|
||||
The project uses Slack: To join the conversation, simply join the
|
||||
[Weave community](https://slack.weave.works/) Slack workspace.
|
||||
|
||||
## Getting Started
|
||||
|
||||
- Fork the repository on GitHub
|
||||
- If you want to contribute as a developer, continue reading this document for further instructions
|
||||
- If you have questions, concerns, get stuck or need a hand, let us know
|
||||
on the Slack channel. We are happy to help and look forward to having
|
||||
you part of the team. No matter in which capacity.
|
||||
- Play with the project, submit bugs, submit pull requests!
|
||||
|
||||
## Contribution workflow
|
||||
|
||||
This is a rough outline of how to prepare a contribution:
|
||||
|
||||
- Create a topic branch from where you want to base your work (usually branched from master).
|
||||
- Make commits of logical units.
|
||||
- Make sure your commit messages are in the proper format (see below).
|
||||
- Push your changes to a topic branch in your fork of the repository.
|
||||
- If you changed code:
|
||||
- add automated tests to cover your changes
|
||||
- Submit a pull request to the original repository.
|
||||
|
||||
## Acceptance policy
|
||||
|
||||
These things will make a PR more likely to be accepted:
|
||||
|
||||
- a well-described requirement
|
||||
- new code and tests follow the conventions in old code and tests
|
||||
- a good commit message (see below)
|
||||
- All code must abide [Go Code Review Comments](https://github.com/golang/go/wiki/CodeReviewComments)
|
||||
- Names should abide [What's in a name](https://talks.golang.org/2014/names.slide#1)
|
||||
- Code must build on both Linux and Darwin, via plain `go build`
|
||||
- Code should have appropriate test coverage and tests should be written
|
||||
to work with `go test`
|
||||
|
||||
In general, we will merge a PR once one maintainer has endorsed it.
|
||||
For substantial changes, more people may become involved, and you might
|
||||
get asked to resubmit the PR or divide the changes into more than one PR.
|
||||
|
||||
### Format of the Commit Message
|
||||
|
||||
For Flux we prefer the following rules for good commit messages:
|
||||
|
||||
- Limit the subject to 50 characters and write as the continuation
|
||||
of the sentence "If applied, this commit will ..."
|
||||
- Explain what and why in the body, if more than a trivial change;
|
||||
wrap it at 72 characters.
|
||||
|
||||
The [following article](https://chris.beams.io/posts/git-commit/#seven-rules)
|
||||
has some more helpful advice on documenting your work.
|
||||
|
||||
This doc is adapted from the [Weaveworks Flux](https://github.com/weaveworks/flux/blob/master/CONTRIBUTING.md)
|
||||
16
Dockerfile
16
Dockerfile
@@ -1,16 +0,0 @@
|
||||
FROM alpine:3.10
|
||||
|
||||
RUN addgroup -S flagger \
|
||||
&& adduser -S -g flagger flagger \
|
||||
&& apk --no-cache add ca-certificates
|
||||
|
||||
WORKDIR /home/flagger
|
||||
|
||||
COPY /bin/flagger .
|
||||
|
||||
RUN chown -R flagger:flagger ./
|
||||
|
||||
USER flagger
|
||||
|
||||
ENTRYPOINT ["./flagger"]
|
||||
|
||||
@@ -1,44 +0,0 @@
|
||||
FROM bats/bats:v1.1.0
|
||||
|
||||
RUN addgroup -S app \
|
||||
&& adduser -S -g app app \
|
||||
&& apk --no-cache add ca-certificates curl jq
|
||||
|
||||
WORKDIR /home/app
|
||||
|
||||
RUN curl -sSLo hey "https://storage.googleapis.com/hey-release/hey_linux_amd64" && \
|
||||
chmod +x hey && mv hey /usr/local/bin/hey
|
||||
|
||||
# verify hey works
|
||||
RUN hey -n 1 -c 1 https://flagger.app > /dev/null && echo $? | grep 0
|
||||
|
||||
RUN curl -sSL "https://get.helm.sh/helm-v2.15.1-linux-amd64.tar.gz" | tar xvz && \
|
||||
chmod +x linux-amd64/helm && mv linux-amd64/helm /usr/local/bin/helm && \
|
||||
chmod +x linux-amd64/tiller && mv linux-amd64/tiller /usr/local/bin/tiller && \
|
||||
rm -rf linux-amd64
|
||||
|
||||
RUN curl -sSL "https://get.helm.sh/helm-v3.0.0-rc.2-linux-amd64.tar.gz" | tar xvz && \
|
||||
chmod +x linux-amd64/helm && mv linux-amd64/helm /usr/local/bin/helmv3 && \
|
||||
rm -rf linux-amd64
|
||||
|
||||
RUN GRPC_HEALTH_PROBE_VERSION=v0.3.1 && \
|
||||
wget -qO /usr/local/bin/grpc_health_probe https://github.com/grpc-ecosystem/grpc-health-probe/releases/download/${GRPC_HEALTH_PROBE_VERSION}/grpc_health_probe-linux-amd64 && \
|
||||
chmod +x /usr/local/bin/grpc_health_probe
|
||||
|
||||
RUN curl -sSL "https://github.com/bojand/ghz/releases/download/v0.39.0/ghz_0.39.0_Linux_x86_64.tar.gz" | tar xz -C /tmp && \
|
||||
mv /tmp/ghz /usr/local/bin && chmod +x /usr/local/bin/ghz && rm -rf /tmp/ghz-web
|
||||
|
||||
ADD https://raw.githubusercontent.com/grpc/grpc-proto/master/grpc/health/v1/health.proto /tmp/ghz/health.proto
|
||||
|
||||
RUN ls /tmp
|
||||
|
||||
COPY ./bin/loadtester .
|
||||
|
||||
RUN chown -R app:app ./
|
||||
|
||||
USER app
|
||||
|
||||
RUN curl -sSL "https://github.com/rimusz/helm-tiller/archive/v0.9.3.tar.gz" | tar xvz && \
|
||||
helm init --client-only && helm plugin install helm-tiller-0.9.3 && helm plugin list
|
||||
|
||||
ENTRYPOINT ["./loadtester"]
|
||||
2
LICENSE
2
LICENSE
@@ -186,7 +186,7 @@
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright 2018 Weaveworks. All rights reserved.
|
||||
Copyright 2019 Weaveworks
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
||||
@@ -1,5 +0,0 @@
|
||||
The maintainers are generally available in Slack at
|
||||
https://weave-community.slack.com/messages/flagger/ (obtain an invitation
|
||||
at https://slack.weave.works/).
|
||||
|
||||
Stefan Prodan, Weaveworks <stefan@weave.works> (Slack: @stefan Twitter: @stefanprodan)
|
||||
120
Makefile
120
Makefile
@@ -1,120 +0,0 @@
|
||||
TAG?=latest
|
||||
VERSION?=$(shell grep 'VERSION' pkg/version/version.go | awk '{ print $$4 }' | tr -d '"')
|
||||
VERSION_MINOR:=$(shell grep 'VERSION' pkg/version/version.go | awk '{ print $$4 }' | tr -d '"' | rev | cut -d'.' -f2- | rev)
|
||||
PATCH:=$(shell grep 'VERSION' pkg/version/version.go | awk '{ print $$4 }' | tr -d '"' | awk -F. '{print $$NF}')
|
||||
SOURCE_DIRS = cmd pkg/apis pkg/controller pkg/server pkg/canary pkg/metrics pkg/router pkg/notifier
|
||||
LT_VERSION?=$(shell grep 'VERSION' cmd/loadtester/main.go | awk '{ print $$4 }' | tr -d '"' | head -n1)
|
||||
TS=$(shell date +%Y-%m-%d_%H-%M-%S)
|
||||
|
||||
run:
|
||||
GO111MODULE=on go run cmd/flagger/* -kubeconfig=$$HOME/.kube/config -log-level=info -mesh-provider=istio -namespace=test-istio \
|
||||
-metrics-server=https://prometheus.istio.flagger.dev
|
||||
|
||||
run-appmesh:
|
||||
GO111MODULE=on go run cmd/flagger/* -kubeconfig=$$HOME/.kube/config -log-level=info -mesh-provider=appmesh \
|
||||
-metrics-server=http://acfc235624ca911e9a94c02c4171f346-1585187926.us-west-2.elb.amazonaws.com:9090
|
||||
|
||||
run-nginx:
|
||||
GO111MODULE=on go run cmd/flagger/* -kubeconfig=$$HOME/.kube/config -log-level=info -mesh-provider=nginx -namespace=nginx \
|
||||
-metrics-server=http://prometheus-weave.istio.weavedx.com
|
||||
|
||||
run-smi:
|
||||
GO111MODULE=on go run cmd/flagger/* -kubeconfig=$$HOME/.kube/config -log-level=info -mesh-provider=smi:istio -namespace=smi \
|
||||
-metrics-server=https://prometheus.istio.weavedx.com
|
||||
|
||||
run-gloo:
|
||||
GO111MODULE=on go run cmd/flagger/* -kubeconfig=$$HOME/.kube/config -log-level=info -mesh-provider=gloo -namespace=gloo \
|
||||
-metrics-server=https://prometheus.istio.weavedx.com
|
||||
|
||||
run-nop:
|
||||
GO111MODULE=on go run cmd/flagger/* -kubeconfig=$$HOME/.kube/config -log-level=info -mesh-provider=none -namespace=bg \
|
||||
-metrics-server=https://prometheus.istio.weavedx.com
|
||||
|
||||
run-linkerd:
|
||||
GO111MODULE=on go run cmd/flagger/* -kubeconfig=$$HOME/.kube/config -log-level=info -mesh-provider=linkerd -namespace=dev \
|
||||
-metrics-server=https://prometheus.linkerd.flagger.dev
|
||||
|
||||
build:
|
||||
GIT_COMMIT=$$(git rev-list -1 HEAD) && GO111MODULE=on CGO_ENABLED=0 GOOS=linux go build -ldflags "-s -w -X github.com/weaveworks/flagger/pkg/version.REVISION=$${GIT_COMMIT}" -a -installsuffix cgo -o ./bin/flagger ./cmd/flagger/*
|
||||
docker build -t weaveworks/flagger:$(TAG) . -f Dockerfile
|
||||
|
||||
push:
|
||||
docker tag weaveworks/flagger:$(TAG) weaveworks/flagger:$(VERSION)
|
||||
docker push weaveworks/flagger:$(VERSION)
|
||||
|
||||
fmt:
|
||||
gofmt -l -s -w $(SOURCE_DIRS)
|
||||
|
||||
test-fmt:
|
||||
gofmt -l -s $(SOURCE_DIRS) | grep ".*\.go"; if [ "$$?" = "0" ]; then exit 1; fi
|
||||
|
||||
test-codegen:
|
||||
./hack/verify-codegen.sh
|
||||
|
||||
test: test-fmt test-codegen
|
||||
go test ./...
|
||||
|
||||
helm-package:
|
||||
cd charts/ && helm package ./*
|
||||
mv charts/*.tgz bin/
|
||||
curl -s https://raw.githubusercontent.com/weaveworks/flagger/gh-pages/index.yaml > ./bin/index.yaml
|
||||
helm repo index bin --url https://flagger.app --merge ./bin/index.yaml
|
||||
|
||||
helm-up:
|
||||
helm upgrade --install flagger ./charts/flagger --namespace=istio-system --set crd.create=false
|
||||
helm upgrade --install flagger-grafana ./charts/grafana --namespace=istio-system
|
||||
|
||||
version-set:
|
||||
@next="$(TAG)" && \
|
||||
current="$(VERSION)" && \
|
||||
sed -i '' "s/$$current/$$next/g" pkg/version/version.go && \
|
||||
sed -i '' "s/flagger:$$current/flagger:$$next/g" artifacts/flagger/deployment.yaml && \
|
||||
sed -i '' "s/tag: $$current/tag: $$next/g" charts/flagger/values.yaml && \
|
||||
sed -i '' "s/appVersion: $$current/appVersion: $$next/g" charts/flagger/Chart.yaml && \
|
||||
sed -i '' "s/version: $$current/version: $$next/g" charts/flagger/Chart.yaml && \
|
||||
sed -i '' "s/newTag: $$current/newTag: $$next/g" kustomize/base/flagger/kustomization.yaml && \
|
||||
echo "Version $$next set in code, deployment, chart and kustomize"
|
||||
|
||||
version-up:
|
||||
@next="$(VERSION_MINOR).$$(($(PATCH) + 1))" && \
|
||||
current="$(VERSION)" && \
|
||||
sed -i '' "s/$$current/$$next/g" pkg/version/version.go && \
|
||||
sed -i '' "s/flagger:$$current/flagger:$$next/g" artifacts/flagger/deployment.yaml && \
|
||||
sed -i '' "s/tag: $$current/tag: $$next/g" charts/flagger/values.yaml && \
|
||||
sed -i '' "s/appVersion: $$current/appVersion: $$next/g" charts/flagger/Chart.yaml && \
|
||||
echo "Version $$next set in code, deployment and chart"
|
||||
|
||||
dev-up: version-up
|
||||
@echo "Starting build/push/deploy pipeline for $(VERSION)"
|
||||
docker build -t quay.io/stefanprodan/flagger:$(VERSION) . -f Dockerfile
|
||||
docker push quay.io/stefanprodan/flagger:$(VERSION)
|
||||
kubectl apply -f ./artifacts/flagger/crd.yaml
|
||||
helm upgrade -i flagger ./charts/flagger --namespace=istio-system --set crd.create=false
|
||||
|
||||
release:
|
||||
git tag $(VERSION)
|
||||
git push origin $(VERSION)
|
||||
|
||||
release-set: fmt version-set helm-package
|
||||
git add .
|
||||
git commit -m "Release $(VERSION)"
|
||||
git push origin master
|
||||
git tag $(VERSION)
|
||||
git push origin $(VERSION)
|
||||
|
||||
reset-test:
|
||||
kubectl delete -f ./artifacts/namespaces
|
||||
kubectl apply -f ./artifacts/namespaces
|
||||
kubectl apply -f ./artifacts/canaries
|
||||
|
||||
loadtester-run: loadtester-build
|
||||
docker build -t weaveworks/flagger-loadtester:$(LT_VERSION) . -f Dockerfile.loadtester
|
||||
docker rm -f tester || true
|
||||
docker run -dp 8888:9090 --name tester weaveworks/flagger-loadtester:$(LT_VERSION)
|
||||
|
||||
loadtester-build:
|
||||
GO111MODULE=on CGO_ENABLED=0 GOOS=linux go build -a -installsuffix cgo -o ./bin/loadtester ./cmd/loadtester/*
|
||||
|
||||
loadtester-push:
|
||||
docker build -t weaveworks/flagger-loadtester:$(LT_VERSION) . -f Dockerfile.loadtester
|
||||
docker push weaveworks/flagger-loadtester:$(LT_VERSION)
|
||||
192
README.md
192
README.md
@@ -1,192 +1,4 @@
|
||||
# flagger
|
||||
# flagger website
|
||||
|
||||
[](https://circleci.com/gh/weaveworks/flagger)
|
||||
[](https://goreportcard.com/report/github.com/weaveworks/flagger)
|
||||
[](https://codecov.io/gh/weaveworks/flagger)
|
||||
[](https://github.com/weaveworks/flagger/blob/master/LICENSE)
|
||||
[](https://github.com/weaveworks/flagger/releases)
|
||||
[flagger.app](https://flagger.app)
|
||||
|
||||
Flagger is a Kubernetes operator that automates the promotion of canary deployments
|
||||
using Istio, Linkerd, App Mesh, NGINX or Gloo routing for traffic shifting and Prometheus metrics for canary analysis.
|
||||
The canary analysis can be extended with webhooks for running acceptance tests,
|
||||
load tests or any other custom validation.
|
||||
|
||||
Flagger implements a control loop that gradually shifts traffic to the canary while measuring key performance
|
||||
indicators like HTTP requests success rate, requests average duration and pods health.
|
||||
Based on analysis of the KPIs a canary is promoted or aborted, and the analysis result is published to Slack or MS Teams.
|
||||
|
||||

|
||||
|
||||
## Documentation
|
||||
|
||||
Flagger documentation can be found at [docs.flagger.app](https://docs.flagger.app)
|
||||
|
||||
* Install
|
||||
* [Flagger install on Kubernetes](https://docs.flagger.app/install/flagger-install-on-kubernetes)
|
||||
* [Flagger install on GKE Istio](https://docs.flagger.app/install/flagger-install-on-google-cloud)
|
||||
* [Flagger install on EKS App Mesh](https://docs.flagger.app/install/flagger-install-on-eks-appmesh)
|
||||
* [Flagger install with SuperGloo](https://docs.flagger.app/install/flagger-install-with-supergloo)
|
||||
* How it works
|
||||
* [Canary custom resource](https://docs.flagger.app/how-it-works#canary-custom-resource)
|
||||
* [Routing](https://docs.flagger.app/how-it-works#istio-routing)
|
||||
* [Canary deployment stages](https://docs.flagger.app/how-it-works#canary-deployment)
|
||||
* [Canary analysis](https://docs.flagger.app/how-it-works#canary-analysis)
|
||||
* [HTTP metrics](https://docs.flagger.app/how-it-works#http-metrics)
|
||||
* [Custom metrics](https://docs.flagger.app/how-it-works#custom-metrics)
|
||||
* [Webhooks](https://docs.flagger.app/how-it-works#webhooks)
|
||||
* [Load testing](https://docs.flagger.app/how-it-works#load-testing)
|
||||
* [Manual gating](https://docs.flagger.app/how-it-works#manual-gating)
|
||||
* [FAQ](https://docs.flagger.app/faq)
|
||||
* Usage
|
||||
* [Istio canary deployments](https://docs.flagger.app/usage/progressive-delivery)
|
||||
* [Linkerd canary deployments](https://docs.flagger.app/usage/linkerd-progressive-delivery)
|
||||
* [App Mesh canary deployments](https://docs.flagger.app/usage/appmesh-progressive-delivery)
|
||||
* [NGINX ingress controller canary deployments](https://docs.flagger.app/usage/nginx-progressive-delivery)
|
||||
* [Gloo ingress controller canary deployments](https://docs.flagger.app/usage/gloo-progressive-delivery)
|
||||
* [Blue/Green deployments](https://docs.flagger.app/usage/blue-green)
|
||||
* [Monitoring](https://docs.flagger.app/usage/monitoring)
|
||||
* [Alerting](https://docs.flagger.app/usage/alerting)
|
||||
* Tutorials
|
||||
* [Canary deployments with Helm charts and Weave Flux](https://docs.flagger.app/tutorials/canary-helm-gitops)
|
||||
|
||||
## Canary CRD
|
||||
|
||||
Flagger takes a Kubernetes deployment and optionally a horizontal pod autoscaler (HPA),
|
||||
then creates a series of objects (Kubernetes deployments, ClusterIP services and Istio or App Mesh virtual services).
|
||||
These objects expose the application on the mesh and drive the canary analysis and promotion.
|
||||
|
||||
Flagger keeps track of ConfigMaps and Secrets referenced by a Kubernetes Deployment and triggers a canary analysis if any of those objects change.
|
||||
When promoting a workload in production, both code (container images) and configuration (config maps and secrets) are being synchronised.
|
||||
|
||||
For a deployment named _podinfo_, a canary promotion can be defined using Flagger's custom resource:
|
||||
|
||||
```yaml
|
||||
apiVersion: flagger.app/v1alpha3
|
||||
kind: Canary
|
||||
metadata:
|
||||
name: podinfo
|
||||
namespace: test
|
||||
spec:
|
||||
# service mesh provider (optional)
|
||||
# can be: kubernetes, istio, linkerd, appmesh, nginx, gloo, supergloo
|
||||
provider: istio
|
||||
# deployment reference
|
||||
targetRef:
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
name: podinfo
|
||||
# the maximum time in seconds for the canary deployment
|
||||
# to make progress before it is rollback (default 600s)
|
||||
progressDeadlineSeconds: 60
|
||||
# HPA reference (optional)
|
||||
autoscalerRef:
|
||||
apiVersion: autoscaling/v2beta1
|
||||
kind: HorizontalPodAutoscaler
|
||||
name: podinfo
|
||||
service:
|
||||
# ClusterIP port number
|
||||
port: 9898
|
||||
# container port name or number (optional)
|
||||
targetPort: 9898
|
||||
# port name can be http or grpc (default http)
|
||||
portName: http
|
||||
# HTTP match conditions (optional)
|
||||
match:
|
||||
- uri:
|
||||
prefix: /
|
||||
# HTTP rewrite (optional)
|
||||
rewrite:
|
||||
uri: /
|
||||
# request timeout (optional)
|
||||
timeout: 5s
|
||||
# promote the canary without analysing it (default false)
|
||||
skipAnalysis: false
|
||||
# define the canary analysis timing and KPIs
|
||||
canaryAnalysis:
|
||||
# schedule interval (default 60s)
|
||||
interval: 1m
|
||||
# max number of failed metric checks before rollback
|
||||
threshold: 10
|
||||
# max traffic percentage routed to canary
|
||||
# percentage (0-100)
|
||||
maxWeight: 50
|
||||
# canary increment step
|
||||
# percentage (0-100)
|
||||
stepWeight: 5
|
||||
# Istio Prometheus checks
|
||||
metrics:
|
||||
# builtin checks
|
||||
- name: request-success-rate
|
||||
# minimum req success rate (non 5xx responses)
|
||||
# percentage (0-100)
|
||||
threshold: 99
|
||||
interval: 1m
|
||||
- name: request-duration
|
||||
# maximum req duration P99
|
||||
# milliseconds
|
||||
threshold: 500
|
||||
interval: 30s
|
||||
# custom check
|
||||
- name: "kafka lag"
|
||||
threshold: 100
|
||||
query: |
|
||||
avg_over_time(
|
||||
kafka_consumergroup_lag{
|
||||
consumergroup=~"podinfo-consumer-.*",
|
||||
topic="podinfo"
|
||||
}[1m]
|
||||
)
|
||||
# testing (optional)
|
||||
webhooks:
|
||||
- name: load-test
|
||||
url: http://flagger-loadtester.test/
|
||||
timeout: 5s
|
||||
metadata:
|
||||
cmd: "hey -z 1m -q 10 -c 2 http://podinfo.test:9898/"
|
||||
```
|
||||
|
||||
For more details on how the canary analysis and promotion works please [read the docs](https://docs.flagger.app/how-it-works).
|
||||
|
||||
## Features
|
||||
|
||||
| Feature | Istio | Linkerd | App Mesh | NGINX | Gloo | Kubernetes CNI |
|
||||
| -------------------------------------------- | ------------------ | ------------------ |------------------ |------------------ |------------------ |------------------ |
|
||||
| Canary deployments (weighted traffic) | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_minus_sign: |
|
||||
| A/B testing (headers and cookies routing) | :heavy_check_mark: | :heavy_minus_sign: | :heavy_check_mark: | :heavy_check_mark: | :heavy_minus_sign: | :heavy_minus_sign: |
|
||||
| Blue/Green deployments (traffic switch) | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: |
|
||||
| Webhooks (acceptance/load testing) | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: |
|
||||
| Manual gating (approve/pause/resume) | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: |
|
||||
| Request success rate check (L7 metric) | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_minus_sign: |
|
||||
| Request duration check (L7 metric) | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_minus_sign: |
|
||||
| Custom promql checks | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: |
|
||||
| Traffic policy, CORS, retries and timeouts | :heavy_check_mark: | :heavy_minus_sign: | :heavy_minus_sign: | :heavy_minus_sign: | :heavy_minus_sign: | :heavy_minus_sign: |
|
||||
|
||||
## Roadmap
|
||||
|
||||
* Integrate with other ingress controllers like Contour, HAProxy, ALB
|
||||
* Add support for comparing the canary metrics to the primary ones and do the validation based on the derivation between the two
|
||||
|
||||
## Contributing
|
||||
|
||||
Flagger is Apache 2.0 licensed and accepts contributions via GitHub pull requests.
|
||||
|
||||
When submitting bug reports please include as much details as possible:
|
||||
|
||||
* which Flagger version
|
||||
* which Flagger CRD version
|
||||
* which Kubernetes/Istio version
|
||||
* what configuration (canary, virtual service and workloads definitions)
|
||||
* what happened (Flagger, Istio Pilot and Proxy logs)
|
||||
|
||||
## Getting Help
|
||||
|
||||
If you have any questions about Flagger and progressive delivery:
|
||||
|
||||
* Read the Flagger [docs](https://docs.flagger.app).
|
||||
* Invite yourself to the [Weave community slack](https://slack.weave.works/)
|
||||
and join the [#flagger](https://weave-community.slack.com/messages/flagger/) channel.
|
||||
* Join the [Weave User Group](https://www.meetup.com/pro/Weave/) and get invited to online talks,
|
||||
hands-on training and meetups in your area.
|
||||
* File an [issue](https://github.com/weaveworks/flagger/issues/new).
|
||||
|
||||
Your feedback is always welcome!
|
||||
|
||||
@@ -1,70 +0,0 @@
|
||||
apiVersion: flagger.app/v1alpha3
|
||||
kind: Canary
|
||||
metadata:
|
||||
name: podinfo
|
||||
namespace: test
|
||||
spec:
|
||||
# deployment reference
|
||||
targetRef:
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
name: podinfo
|
||||
# the maximum time in seconds for the canary deployment
|
||||
# to make progress before it is rollback (default 600s)
|
||||
progressDeadlineSeconds: 60
|
||||
# HPA reference (optional)
|
||||
autoscalerRef:
|
||||
apiVersion: autoscaling/v2beta1
|
||||
kind: HorizontalPodAutoscaler
|
||||
name: podinfo
|
||||
service:
|
||||
# container port
|
||||
port: 9898
|
||||
# container port name (optional)
|
||||
# can be http or grpc
|
||||
portName: http
|
||||
# App Mesh reference
|
||||
meshName: global
|
||||
# App Mesh retry policy (optional)
|
||||
retries:
|
||||
attempts: 3
|
||||
perTryTimeout: 1s
|
||||
retryOn: "gateway-error,client-error,stream-error"
|
||||
# define the canary analysis timing and KPIs
|
||||
canaryAnalysis:
|
||||
# schedule interval (default 60s)
|
||||
interval: 10s
|
||||
# max number of failed metric checks before rollback
|
||||
threshold: 10
|
||||
# max traffic percentage routed to canary
|
||||
# percentage (0-100)
|
||||
maxWeight: 50
|
||||
# canary increment step
|
||||
# percentage (0-100)
|
||||
stepWeight: 5
|
||||
# App Mesh Prometheus checks
|
||||
metrics:
|
||||
- name: request-success-rate
|
||||
# minimum req success rate (non 5xx responses)
|
||||
# percentage (0-100)
|
||||
threshold: 99
|
||||
interval: 1m
|
||||
- name: request-duration
|
||||
# maximum req duration P99
|
||||
# milliseconds
|
||||
threshold: 500
|
||||
interval: 30s
|
||||
# testing (optional)
|
||||
webhooks:
|
||||
- name: acceptance-test
|
||||
type: pre-rollout
|
||||
url: http://flagger-loadtester.test/
|
||||
timeout: 30s
|
||||
metadata:
|
||||
type: bash
|
||||
cmd: "curl -sd 'test' http://podinfo-canary.test:9898/token | grep token"
|
||||
- name: load-test
|
||||
url: http://flagger-loadtester.test/
|
||||
timeout: 5s
|
||||
metadata:
|
||||
cmd: "hey -z 1m -q 10 -c 2 http://podinfo.test:9898/"
|
||||
@@ -1,65 +0,0 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: podinfo
|
||||
namespace: test
|
||||
labels:
|
||||
app: podinfo
|
||||
spec:
|
||||
minReadySeconds: 5
|
||||
revisionHistoryLimit: 5
|
||||
progressDeadlineSeconds: 60
|
||||
strategy:
|
||||
rollingUpdate:
|
||||
maxUnavailable: 0
|
||||
type: RollingUpdate
|
||||
selector:
|
||||
matchLabels:
|
||||
app: podinfo
|
||||
template:
|
||||
metadata:
|
||||
annotations:
|
||||
prometheus.io/scrape: "true"
|
||||
labels:
|
||||
app: podinfo
|
||||
spec:
|
||||
containers:
|
||||
- name: podinfod
|
||||
image: stefanprodan/podinfo:3.1.0
|
||||
imagePullPolicy: IfNotPresent
|
||||
ports:
|
||||
- containerPort: 9898
|
||||
name: http
|
||||
protocol: TCP
|
||||
command:
|
||||
- ./podinfo
|
||||
- --port=9898
|
||||
- --level=info
|
||||
env:
|
||||
- name: PODINFO_UI_COLOR
|
||||
value: blue
|
||||
livenessProbe:
|
||||
exec:
|
||||
command:
|
||||
- podcli
|
||||
- check
|
||||
- http
|
||||
- localhost:9898/healthz
|
||||
initialDelaySeconds: 5
|
||||
timeoutSeconds: 5
|
||||
readinessProbe:
|
||||
exec:
|
||||
command:
|
||||
- podcli
|
||||
- check
|
||||
- http
|
||||
- localhost:9898/readyz
|
||||
initialDelaySeconds: 5
|
||||
timeoutSeconds: 5
|
||||
resources:
|
||||
limits:
|
||||
cpu: 2000m
|
||||
memory: 512Mi
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 64Mi
|
||||
@@ -1,6 +0,0 @@
|
||||
apiVersion: appmesh.k8s.aws/v1beta1
|
||||
kind: Mesh
|
||||
metadata:
|
||||
name: global
|
||||
spec:
|
||||
serviceDiscoveryType: dns
|
||||
@@ -1,19 +0,0 @@
|
||||
apiVersion: autoscaling/v2beta1
|
||||
kind: HorizontalPodAutoscaler
|
||||
metadata:
|
||||
name: podinfo
|
||||
namespace: test
|
||||
spec:
|
||||
scaleTargetRef:
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
name: podinfo
|
||||
minReplicas: 2
|
||||
maxReplicas: 4
|
||||
metrics:
|
||||
- type: Resource
|
||||
resource:
|
||||
name: cpu
|
||||
# scale up if usage is above
|
||||
# 99% of the requested CPU (100m)
|
||||
targetAverageUtilization: 99
|
||||
@@ -1,172 +0,0 @@
|
||||
---
|
||||
kind: ConfigMap
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: ingress-config
|
||||
namespace: test
|
||||
labels:
|
||||
app: ingress
|
||||
data:
|
||||
envoy.yaml: |
|
||||
static_resources:
|
||||
listeners:
|
||||
- address:
|
||||
socket_address:
|
||||
address: 0.0.0.0
|
||||
port_value: 8080
|
||||
filter_chains:
|
||||
- filters:
|
||||
- name: envoy.http_connection_manager
|
||||
config:
|
||||
access_log:
|
||||
- name: envoy.file_access_log
|
||||
config:
|
||||
path: /dev/stdout
|
||||
codec_type: auto
|
||||
stat_prefix: ingress_http
|
||||
http_filters:
|
||||
- name: envoy.router
|
||||
config: {}
|
||||
route_config:
|
||||
name: local_route
|
||||
virtual_hosts:
|
||||
- name: local_service
|
||||
domains: ["*"]
|
||||
routes:
|
||||
- match:
|
||||
prefix: "/"
|
||||
route:
|
||||
cluster: podinfo
|
||||
host_rewrite: podinfo.test
|
||||
timeout: 15s
|
||||
retry_policy:
|
||||
retry_on: "gateway-error,connect-failure,refused-stream"
|
||||
num_retries: 10
|
||||
per_try_timeout: 5s
|
||||
clusters:
|
||||
- name: podinfo
|
||||
connect_timeout: 0.30s
|
||||
type: strict_dns
|
||||
lb_policy: round_robin
|
||||
load_assignment:
|
||||
cluster_name: podinfo
|
||||
endpoints:
|
||||
- lb_endpoints:
|
||||
- endpoint:
|
||||
address:
|
||||
socket_address:
|
||||
address: podinfo.test
|
||||
port_value: 9898
|
||||
admin:
|
||||
access_log_path: /dev/null
|
||||
address:
|
||||
socket_address:
|
||||
address: 0.0.0.0
|
||||
port_value: 9999
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: ingress
|
||||
namespace: test
|
||||
labels:
|
||||
app: ingress
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: ingress
|
||||
strategy:
|
||||
type: RollingUpdate
|
||||
rollingUpdate:
|
||||
maxUnavailable: 0
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: ingress
|
||||
annotations:
|
||||
prometheus.io/path: "/stats/prometheus"
|
||||
prometheus.io/port: "9999"
|
||||
prometheus.io/scrape: "true"
|
||||
# dummy port to exclude ingress from mesh traffic
|
||||
# only egress should go over the mesh
|
||||
appmesh.k8s.aws/ports: "444"
|
||||
spec:
|
||||
terminationGracePeriodSeconds: 30
|
||||
containers:
|
||||
- name: ingress
|
||||
image: "envoyproxy/envoy-alpine:v1.11.1"
|
||||
securityContext:
|
||||
capabilities:
|
||||
drop:
|
||||
- ALL
|
||||
add:
|
||||
- NET_BIND_SERVICE
|
||||
command:
|
||||
- /usr/local/bin/envoy
|
||||
args:
|
||||
- -l
|
||||
- $loglevel
|
||||
- -c
|
||||
- /config/envoy.yaml
|
||||
- --base-id
|
||||
- "1234"
|
||||
ports:
|
||||
- name: admin
|
||||
containerPort: 9999
|
||||
protocol: TCP
|
||||
- name: http
|
||||
containerPort: 8080
|
||||
protocol: TCP
|
||||
livenessProbe:
|
||||
initialDelaySeconds: 5
|
||||
tcpSocket:
|
||||
port: admin
|
||||
readinessProbe:
|
||||
initialDelaySeconds: 5
|
||||
tcpSocket:
|
||||
port: admin
|
||||
resources:
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 64Mi
|
||||
volumeMounts:
|
||||
- name: config
|
||||
mountPath: /config
|
||||
volumes:
|
||||
- name: config
|
||||
configMap:
|
||||
name: ingress-config
|
||||
---
|
||||
kind: Service
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: ingress
|
||||
namespace: test
|
||||
spec:
|
||||
selector:
|
||||
app: ingress
|
||||
ports:
|
||||
- protocol: TCP
|
||||
name: http
|
||||
port: 80
|
||||
targetPort: http
|
||||
type: LoadBalancer
|
||||
---
|
||||
apiVersion: appmesh.k8s.aws/v1beta1
|
||||
kind: VirtualNode
|
||||
metadata:
|
||||
name: ingress
|
||||
namespace: test
|
||||
spec:
|
||||
meshName: global
|
||||
listeners:
|
||||
- portMapping:
|
||||
port: 80
|
||||
protocol: http
|
||||
serviceDiscovery:
|
||||
dns:
|
||||
hostName: ingress.test
|
||||
backends:
|
||||
- virtualService:
|
||||
virtualServiceName: podinfo.test
|
||||
@@ -1,67 +0,0 @@
|
||||
apiVersion: flagger.app/v1alpha3
|
||||
kind: Canary
|
||||
metadata:
|
||||
name: podinfo
|
||||
namespace: test
|
||||
spec:
|
||||
# deployment reference
|
||||
targetRef:
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
name: podinfo
|
||||
# the maximum time in seconds for the canary deployment
|
||||
# to make progress before it is rollback (default 600s)
|
||||
progressDeadlineSeconds: 60
|
||||
# HPA reference (optional)
|
||||
autoscalerRef:
|
||||
apiVersion: autoscaling/v2beta1
|
||||
kind: HorizontalPodAutoscaler
|
||||
name: podinfo
|
||||
service:
|
||||
# container port
|
||||
port: 9898
|
||||
# Istio gateways (optional)
|
||||
gateways:
|
||||
- public-gateway.istio-system.svc.cluster.local
|
||||
- mesh
|
||||
# Istio virtual service host names (optional)
|
||||
hosts:
|
||||
- app.example.com
|
||||
# Istio traffic policy (optional)
|
||||
trafficPolicy:
|
||||
tls:
|
||||
# use ISTIO_MUTUAL when mTLS is enabled
|
||||
mode: DISABLE
|
||||
canaryAnalysis:
|
||||
# schedule interval (default 60s)
|
||||
interval: 10s
|
||||
# max number of failed metric checks before rollback
|
||||
threshold: 10
|
||||
# total number of iterations
|
||||
iterations: 10
|
||||
# canary match condition
|
||||
match:
|
||||
- headers:
|
||||
cookie:
|
||||
regex: "^(.*?;)?(type=insider)(;.*)?$"
|
||||
- headers:
|
||||
user-agent:
|
||||
regex: "(?=.*Safari)(?!.*Chrome).*$"
|
||||
metrics:
|
||||
- name: request-success-rate
|
||||
# minimum req success rate (non 5xx responses)
|
||||
# percentage (0-100)
|
||||
threshold: 99
|
||||
interval: 1m
|
||||
- name: request-duration
|
||||
# maximum req duration P99
|
||||
# milliseconds
|
||||
threshold: 500
|
||||
interval: 30s
|
||||
# external checks (optional)
|
||||
webhooks:
|
||||
- name: load-test
|
||||
url: http://flagger-loadtester.test/
|
||||
timeout: 5s
|
||||
metadata:
|
||||
cmd: "hey -z 1m -q 10 -c 2 -H 'Cookie: type=insider' http://podinfo.test:9898/"
|
||||
@@ -1,88 +0,0 @@
|
||||
apiVersion: flagger.app/v1alpha3
|
||||
kind: Canary
|
||||
metadata:
|
||||
name: podinfo
|
||||
namespace: test
|
||||
spec:
|
||||
# service mesh provider (default istio)
|
||||
# can be: kubernetes, istio, appmesh, smi, nginx, gloo, supergloo
|
||||
# use the kubernetes provider for Blue/Green style deployments
|
||||
provider: istio
|
||||
# deployment reference
|
||||
targetRef:
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
name: podinfo
|
||||
# the maximum time in seconds for the canary deployment
|
||||
# to make progress before it is rollback (default 600s)
|
||||
progressDeadlineSeconds: 60
|
||||
# HPA reference (optional)
|
||||
autoscalerRef:
|
||||
apiVersion: autoscaling/v2beta1
|
||||
kind: HorizontalPodAutoscaler
|
||||
name: podinfo
|
||||
service:
|
||||
# container port
|
||||
port: 9898
|
||||
# port name can be http or grpc (default http)
|
||||
portName: http
|
||||
# add all the other container ports
|
||||
# when generating ClusterIP services (default false)
|
||||
portDiscovery: false
|
||||
# Istio gateways (optional)
|
||||
gateways:
|
||||
- public-gateway.istio-system.svc.cluster.local
|
||||
# remove the mesh gateway if the public host is
|
||||
# shared across multiple virtual services
|
||||
- mesh
|
||||
# Istio virtual service host names (optional)
|
||||
hosts:
|
||||
- app.example.com
|
||||
# Istio traffic policy (optional)
|
||||
trafficPolicy:
|
||||
tls:
|
||||
# use ISTIO_MUTUAL when mTLS is enabled
|
||||
mode: DISABLE
|
||||
# HTTP match conditions (optional)
|
||||
match:
|
||||
- uri:
|
||||
prefix: /
|
||||
# HTTP rewrite (optional)
|
||||
rewrite:
|
||||
uri: /
|
||||
# HTTP timeout (optional)
|
||||
timeout: 30s
|
||||
# promote the canary without analysing it (default false)
|
||||
skipAnalysis: false
|
||||
canaryAnalysis:
|
||||
# schedule interval (default 60s)
|
||||
interval: 10s
|
||||
# max number of failed metric checks before rollback
|
||||
threshold: 10
|
||||
# max traffic percentage routed to canary
|
||||
# percentage (0-100)
|
||||
maxWeight: 50
|
||||
# canary increment step
|
||||
# percentage (0-100)
|
||||
stepWeight: 5
|
||||
# Prometheus checks
|
||||
metrics:
|
||||
- name: request-success-rate
|
||||
# minimum req success rate (non 5xx responses)
|
||||
# percentage (0-100)
|
||||
threshold: 99
|
||||
interval: 1m
|
||||
- name: request-duration
|
||||
# maximum req duration P99
|
||||
# milliseconds
|
||||
threshold: 500
|
||||
interval: 30s
|
||||
# external checks (optional)
|
||||
webhooks:
|
||||
- name: load-test
|
||||
url: http://flagger-loadtester.test/
|
||||
timeout: 5s
|
||||
metadata:
|
||||
type: cmd
|
||||
cmd: "hey -z 1m -q 10 -c 2 http://podinfo-canary.test:9898/"
|
||||
logCmdOutput: "true"
|
||||
@@ -1,68 +0,0 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: podinfo
|
||||
namespace: test
|
||||
labels:
|
||||
app: podinfo
|
||||
spec:
|
||||
minReadySeconds: 5
|
||||
revisionHistoryLimit: 5
|
||||
progressDeadlineSeconds: 60
|
||||
strategy:
|
||||
rollingUpdate:
|
||||
maxUnavailable: 0
|
||||
type: RollingUpdate
|
||||
selector:
|
||||
matchLabels:
|
||||
app: podinfo
|
||||
template:
|
||||
metadata:
|
||||
annotations:
|
||||
prometheus.io/scrape: "true"
|
||||
prometheus.io/port: "9898"
|
||||
labels:
|
||||
app: podinfo
|
||||
spec:
|
||||
containers:
|
||||
- name: podinfod
|
||||
image: stefanprodan/podinfo:3.1.0
|
||||
imagePullPolicy: IfNotPresent
|
||||
ports:
|
||||
- containerPort: 9898
|
||||
name: http
|
||||
protocol: TCP
|
||||
command:
|
||||
- ./podinfo
|
||||
- --port=9898
|
||||
- --level=info
|
||||
- --random-delay=false
|
||||
- --random-error=false
|
||||
env:
|
||||
- name: PODINFO_UI_COLOR
|
||||
value: blue
|
||||
livenessProbe:
|
||||
exec:
|
||||
command:
|
||||
- podcli
|
||||
- check
|
||||
- http
|
||||
- localhost:9898/healthz
|
||||
initialDelaySeconds: 5
|
||||
timeoutSeconds: 5
|
||||
readinessProbe:
|
||||
exec:
|
||||
command:
|
||||
- podcli
|
||||
- check
|
||||
- http
|
||||
- localhost:9898/readyz
|
||||
initialDelaySeconds: 5
|
||||
timeoutSeconds: 5
|
||||
resources:
|
||||
limits:
|
||||
cpu: 2000m
|
||||
memory: 512Mi
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 64Mi
|
||||
@@ -1,19 +0,0 @@
|
||||
apiVersion: autoscaling/v2beta1
|
||||
kind: HorizontalPodAutoscaler
|
||||
metadata:
|
||||
name: podinfo
|
||||
namespace: test
|
||||
spec:
|
||||
scaleTargetRef:
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
name: podinfo
|
||||
minReplicas: 2
|
||||
maxReplicas: 4
|
||||
metrics:
|
||||
- type: Resource
|
||||
resource:
|
||||
name: cpu
|
||||
# scale up if usage is above
|
||||
# 99% of the requested CPU (100m)
|
||||
targetAverageUtilization: 99
|
||||
@@ -1,6 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: test
|
||||
labels:
|
||||
istio-injection: enabled
|
||||
@@ -1,26 +0,0 @@
|
||||
apiVersion: flux.weave.works/v1beta1
|
||||
kind: HelmRelease
|
||||
metadata:
|
||||
name: backend
|
||||
namespace: test
|
||||
annotations:
|
||||
flux.weave.works/automated: "true"
|
||||
flux.weave.works/tag.chart-image: regexp:^1.7.*
|
||||
spec:
|
||||
releaseName: backend
|
||||
chart:
|
||||
repository: https://flagger.app/
|
||||
name: podinfo
|
||||
version: 2.2.0
|
||||
values:
|
||||
image:
|
||||
repository: quay.io/stefanprodan/podinfo
|
||||
tag: 1.7.0
|
||||
httpServer:
|
||||
timeout: 30s
|
||||
canary:
|
||||
enabled: true
|
||||
istioIngress:
|
||||
enabled: false
|
||||
loadtest:
|
||||
enabled: true
|
||||
@@ -1,27 +0,0 @@
|
||||
apiVersion: flux.weave.works/v1beta1
|
||||
kind: HelmRelease
|
||||
metadata:
|
||||
name: frontend
|
||||
namespace: test
|
||||
annotations:
|
||||
flux.weave.works/automated: "true"
|
||||
flux.weave.works/tag.chart-image: semver:~1.7
|
||||
spec:
|
||||
releaseName: frontend
|
||||
chart:
|
||||
repository: https://flagger.app/
|
||||
name: podinfo
|
||||
version: 2.2.0
|
||||
values:
|
||||
image:
|
||||
repository: quay.io/stefanprodan/podinfo
|
||||
tag: 1.7.0
|
||||
backend: http://backend-podinfo:9898/echo
|
||||
canary:
|
||||
enabled: true
|
||||
istioIngress:
|
||||
enabled: true
|
||||
gateway: public-gateway.istio-system.svc.cluster.local
|
||||
host: frontend.istio.example.com
|
||||
loadtest:
|
||||
enabled: true
|
||||
@@ -1,18 +0,0 @@
|
||||
apiVersion: flux.weave.works/v1beta1
|
||||
kind: HelmRelease
|
||||
metadata:
|
||||
name: loadtester
|
||||
namespace: test
|
||||
annotations:
|
||||
flux.weave.works/automated: "true"
|
||||
flux.weave.works/tag.chart-image: glob:0.*
|
||||
spec:
|
||||
releaseName: flagger-loadtester
|
||||
chart:
|
||||
repository: https://flagger.app/
|
||||
name: loadtester
|
||||
version: 0.6.0
|
||||
values:
|
||||
image:
|
||||
repository: weaveworks/flagger-loadtester
|
||||
tag: 0.6.1
|
||||
@@ -1,264 +0,0 @@
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: prometheus
|
||||
labels:
|
||||
app: prometheus
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources:
|
||||
- nodes
|
||||
- services
|
||||
- endpoints
|
||||
- pods
|
||||
- nodes/proxy
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups: [""]
|
||||
resources:
|
||||
- configmaps
|
||||
verbs: ["get"]
|
||||
- nonResourceURLs: ["/metrics"]
|
||||
verbs: ["get"]
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: prometheus
|
||||
labels:
|
||||
app: prometheus
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: prometheus
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: prometheus
|
||||
namespace: appmesh-system
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: prometheus
|
||||
namespace: appmesh-system
|
||||
labels:
|
||||
app: prometheus
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: prometheus
|
||||
namespace: appmesh-system
|
||||
labels:
|
||||
app: prometheus
|
||||
data:
|
||||
prometheus.yml: |-
|
||||
global:
|
||||
scrape_interval: 5s
|
||||
scrape_configs:
|
||||
|
||||
# Scrape config for AppMesh Envoy sidecar
|
||||
- job_name: 'appmesh-envoy'
|
||||
metrics_path: /stats/prometheus
|
||||
kubernetes_sd_configs:
|
||||
- role: pod
|
||||
|
||||
relabel_configs:
|
||||
- source_labels: [__meta_kubernetes_pod_container_name]
|
||||
action: keep
|
||||
regex: '^envoy$'
|
||||
- source_labels: [__address__, __meta_kubernetes_pod_annotation_prometheus_io_port]
|
||||
action: replace
|
||||
regex: ([^:]+)(?::\d+)?;(\d+)
|
||||
replacement: ${1}:9901
|
||||
target_label: __address__
|
||||
- action: labelmap
|
||||
regex: __meta_kubernetes_pod_label_(.+)
|
||||
- source_labels: [__meta_kubernetes_namespace]
|
||||
action: replace
|
||||
target_label: kubernetes_namespace
|
||||
- source_labels: [__meta_kubernetes_pod_name]
|
||||
action: replace
|
||||
target_label: kubernetes_pod_name
|
||||
|
||||
# Exclude high cardinality metrics
|
||||
metric_relabel_configs:
|
||||
- source_labels: [ cluster_name ]
|
||||
regex: '(outbound|inbound|prometheus_stats).*'
|
||||
action: drop
|
||||
- source_labels: [ tcp_prefix ]
|
||||
regex: '(outbound|inbound|prometheus_stats).*'
|
||||
action: drop
|
||||
- source_labels: [ listener_address ]
|
||||
regex: '(.+)'
|
||||
action: drop
|
||||
- source_labels: [ http_conn_manager_listener_prefix ]
|
||||
regex: '(.+)'
|
||||
action: drop
|
||||
- source_labels: [ http_conn_manager_prefix ]
|
||||
regex: '(.+)'
|
||||
action: drop
|
||||
- source_labels: [ __name__ ]
|
||||
regex: 'envoy_tls.*'
|
||||
action: drop
|
||||
- source_labels: [ __name__ ]
|
||||
regex: 'envoy_tcp_downstream.*'
|
||||
action: drop
|
||||
- source_labels: [ __name__ ]
|
||||
regex: 'envoy_http_(stats|admin).*'
|
||||
action: drop
|
||||
- source_labels: [ __name__ ]
|
||||
regex: 'envoy_cluster_(lb|retry|bind|internal|max|original).*'
|
||||
action: drop
|
||||
|
||||
# Scrape config for API servers
|
||||
- job_name: 'kubernetes-apiservers'
|
||||
kubernetes_sd_configs:
|
||||
- role: endpoints
|
||||
namespaces:
|
||||
names:
|
||||
- default
|
||||
scheme: https
|
||||
tls_config:
|
||||
ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
|
||||
bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
|
||||
relabel_configs:
|
||||
- source_labels: [__meta_kubernetes_service_name, __meta_kubernetes_endpoint_port_name]
|
||||
action: keep
|
||||
regex: kubernetes;https
|
||||
|
||||
# Scrape config for nodes
|
||||
- job_name: 'kubernetes-nodes'
|
||||
scheme: https
|
||||
tls_config:
|
||||
ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
|
||||
bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
|
||||
kubernetes_sd_configs:
|
||||
- role: node
|
||||
relabel_configs:
|
||||
- action: labelmap
|
||||
regex: __meta_kubernetes_node_label_(.+)
|
||||
- target_label: __address__
|
||||
replacement: kubernetes.default.svc:443
|
||||
- source_labels: [__meta_kubernetes_node_name]
|
||||
regex: (.+)
|
||||
target_label: __metrics_path__
|
||||
replacement: /api/v1/nodes/${1}/proxy/metrics
|
||||
|
||||
# scrape config for cAdvisor
|
||||
- job_name: 'kubernetes-cadvisor'
|
||||
scheme: https
|
||||
tls_config:
|
||||
ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
|
||||
bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
|
||||
kubernetes_sd_configs:
|
||||
- role: node
|
||||
relabel_configs:
|
||||
- action: labelmap
|
||||
regex: __meta_kubernetes_node_label_(.+)
|
||||
- target_label: __address__
|
||||
replacement: kubernetes.default.svc:443
|
||||
- source_labels: [__meta_kubernetes_node_name]
|
||||
regex: (.+)
|
||||
target_label: __metrics_path__
|
||||
replacement: /api/v1/nodes/${1}/proxy/metrics/cadvisor
|
||||
|
||||
# scrape config for pods
|
||||
- job_name: kubernetes-pods
|
||||
kubernetes_sd_configs:
|
||||
- role: pod
|
||||
relabel_configs:
|
||||
- action: keep
|
||||
regex: true
|
||||
source_labels:
|
||||
- __meta_kubernetes_pod_annotation_prometheus_io_scrape
|
||||
- source_labels: [ __address__ ]
|
||||
regex: '.*9901.*'
|
||||
action: drop
|
||||
- action: replace
|
||||
regex: (.+)
|
||||
source_labels:
|
||||
- __meta_kubernetes_pod_annotation_prometheus_io_path
|
||||
target_label: __metrics_path__
|
||||
- action: replace
|
||||
regex: ([^:]+)(?::\d+)?;(\d+)
|
||||
replacement: $1:$2
|
||||
source_labels:
|
||||
- __address__
|
||||
- __meta_kubernetes_pod_annotation_prometheus_io_port
|
||||
target_label: __address__
|
||||
- action: labelmap
|
||||
regex: __meta_kubernetes_pod_label_(.+)
|
||||
- action: replace
|
||||
source_labels:
|
||||
- __meta_kubernetes_namespace
|
||||
target_label: kubernetes_namespace
|
||||
- action: replace
|
||||
source_labels:
|
||||
- __meta_kubernetes_pod_name
|
||||
target_label: kubernetes_pod_name
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: prometheus
|
||||
namespace: appmesh-system
|
||||
labels:
|
||||
app: prometheus
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: prometheus
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: prometheus
|
||||
annotations:
|
||||
version: "appmesh-v1alpha1"
|
||||
spec:
|
||||
serviceAccountName: prometheus
|
||||
containers:
|
||||
- name: prometheus
|
||||
image: "docker.io/prom/prometheus:v2.7.1"
|
||||
imagePullPolicy: IfNotPresent
|
||||
args:
|
||||
- '--storage.tsdb.retention=6h'
|
||||
- '--config.file=/etc/prometheus/prometheus.yml'
|
||||
ports:
|
||||
- containerPort: 9090
|
||||
name: http
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /-/healthy
|
||||
port: 9090
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /-/ready
|
||||
port: 9090
|
||||
resources:
|
||||
requests:
|
||||
cpu: 10m
|
||||
memory: 128Mi
|
||||
volumeMounts:
|
||||
- name: config-volume
|
||||
mountPath: /etc/prometheus
|
||||
volumes:
|
||||
- name: config-volume
|
||||
configMap:
|
||||
name: prometheus
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: prometheus
|
||||
namespace: appmesh-system
|
||||
labels:
|
||||
name: prometheus
|
||||
spec:
|
||||
selector:
|
||||
app: prometheus
|
||||
ports:
|
||||
- name: http
|
||||
protocol: TCP
|
||||
port: 9090
|
||||
@@ -1,102 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: flagger
|
||||
namespace: istio-system
|
||||
labels:
|
||||
app: flagger
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: flagger
|
||||
labels:
|
||||
app: flagger
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- events
|
||||
- configmaps
|
||||
- secrets
|
||||
- services
|
||||
verbs: ["*"]
|
||||
- apiGroups:
|
||||
- apps
|
||||
resources:
|
||||
- deployments
|
||||
verbs: ["*"]
|
||||
- apiGroups:
|
||||
- autoscaling
|
||||
resources:
|
||||
- horizontalpodautoscalers
|
||||
verbs: ["*"]
|
||||
- apiGroups:
|
||||
- "extensions"
|
||||
resources:
|
||||
- ingresses
|
||||
- ingresses/status
|
||||
verbs: ["*"]
|
||||
- apiGroups:
|
||||
- flagger.app
|
||||
resources:
|
||||
- canaries
|
||||
- canaries/status
|
||||
verbs: ["*"]
|
||||
- apiGroups:
|
||||
- networking.istio.io
|
||||
resources:
|
||||
- virtualservices
|
||||
- virtualservices/status
|
||||
- destinationrules
|
||||
- destinationrules/status
|
||||
verbs: ["*"]
|
||||
- apiGroups:
|
||||
- appmesh.k8s.aws
|
||||
resources:
|
||||
- meshes
|
||||
- meshes/status
|
||||
- virtualnodes
|
||||
- virtualnodes/status
|
||||
- virtualservices
|
||||
- virtualservices/status
|
||||
verbs: ["*"]
|
||||
- apiGroups:
|
||||
- split.smi-spec.io
|
||||
resources:
|
||||
- trafficsplits
|
||||
verbs: ["*"]
|
||||
- apiGroups:
|
||||
- gloo.solo.io
|
||||
resources:
|
||||
- settings
|
||||
- upstreams
|
||||
- upstreamgroups
|
||||
- proxies
|
||||
- virtualservices
|
||||
verbs: ["*"]
|
||||
- apiGroups:
|
||||
- gateway.solo.io
|
||||
resources:
|
||||
- virtualservices
|
||||
- gateways
|
||||
verbs: ["*"]
|
||||
- nonResourceURLs:
|
||||
- /version
|
||||
verbs:
|
||||
- get
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: flagger
|
||||
labels:
|
||||
app: flagger
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: flagger
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: flagger
|
||||
namespace: istio-system
|
||||
@@ -1,319 +0,0 @@
|
||||
apiVersion: apiextensions.k8s.io/v1beta1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: canaries.flagger.app
|
||||
annotations:
|
||||
helm.sh/resource-policy: keep
|
||||
spec:
|
||||
group: flagger.app
|
||||
version: v1alpha3
|
||||
versions:
|
||||
- name: v1alpha3
|
||||
served: true
|
||||
storage: true
|
||||
- name: v1alpha2
|
||||
served: true
|
||||
storage: false
|
||||
- name: v1alpha1
|
||||
served: true
|
||||
storage: false
|
||||
names:
|
||||
plural: canaries
|
||||
singular: canary
|
||||
kind: Canary
|
||||
categories:
|
||||
- all
|
||||
scope: Namespaced
|
||||
subresources:
|
||||
status: {}
|
||||
additionalPrinterColumns:
|
||||
- name: Status
|
||||
type: string
|
||||
JSONPath: .status.phase
|
||||
- name: Weight
|
||||
type: string
|
||||
JSONPath: .status.canaryWeight
|
||||
- name: FailedChecks
|
||||
type: string
|
||||
JSONPath: .status.failedChecks
|
||||
priority: 1
|
||||
- name: Interval
|
||||
type: string
|
||||
JSONPath: .spec.canaryAnalysis.interval
|
||||
priority: 1
|
||||
- name: Mirror
|
||||
type: boolean
|
||||
JSONPath: .spec.canaryAnalysis.mirror
|
||||
priority: 1
|
||||
- name: StepWeight
|
||||
type: string
|
||||
JSONPath: .spec.canaryAnalysis.stepWeight
|
||||
priority: 1
|
||||
- name: MaxWeight
|
||||
type: string
|
||||
JSONPath: .spec.canaryAnalysis.maxWeight
|
||||
priority: 1
|
||||
- name: LastTransitionTime
|
||||
type: string
|
||||
JSONPath: .status.lastTransitionTime
|
||||
validation:
|
||||
openAPIV3Schema:
|
||||
properties:
|
||||
spec:
|
||||
required:
|
||||
- targetRef
|
||||
- service
|
||||
- canaryAnalysis
|
||||
properties:
|
||||
provider:
|
||||
description: Traffic managent provider
|
||||
type: string
|
||||
metricsServer:
|
||||
description: Prometheus URL
|
||||
type: string
|
||||
progressDeadlineSeconds:
|
||||
description: Deployment progress deadline
|
||||
type: number
|
||||
targetRef:
|
||||
description: Deployment selector
|
||||
type: object
|
||||
required: ["apiVersion", "kind", "name"]
|
||||
properties:
|
||||
apiVersion:
|
||||
type: string
|
||||
kind:
|
||||
type: string
|
||||
name:
|
||||
type: string
|
||||
autoscalerRef:
|
||||
description: HPA selector
|
||||
anyOf:
|
||||
- type: string
|
||||
- type: object
|
||||
required: ["apiVersion", "kind", "name"]
|
||||
properties:
|
||||
apiVersion:
|
||||
type: string
|
||||
kind:
|
||||
type: string
|
||||
name:
|
||||
type: string
|
||||
ingressRef:
|
||||
description: NGINX ingress selector
|
||||
anyOf:
|
||||
- type: string
|
||||
- type: object
|
||||
required: ["apiVersion", "kind", "name"]
|
||||
properties:
|
||||
apiVersion:
|
||||
type: string
|
||||
kind:
|
||||
type: string
|
||||
name:
|
||||
type: string
|
||||
service:
|
||||
type: object
|
||||
required: ["port"]
|
||||
properties:
|
||||
port:
|
||||
description: Container port number
|
||||
type: number
|
||||
portName:
|
||||
description: Container port name
|
||||
type: string
|
||||
targetPort:
|
||||
description: Container target port name
|
||||
anyOf:
|
||||
- type: string
|
||||
- type: number
|
||||
portDiscovery:
|
||||
description: Enable port dicovery
|
||||
type: boolean
|
||||
meshName:
|
||||
description: AppMesh mesh name
|
||||
type: string
|
||||
backends:
|
||||
description: AppMesh backend array
|
||||
anyOf:
|
||||
- type: string
|
||||
- type: array
|
||||
timeout:
|
||||
description: Istio HTTP or gRPC request timeout
|
||||
type: string
|
||||
trafficPolicy:
|
||||
description: Istio traffic policy
|
||||
anyOf:
|
||||
- type: string
|
||||
- type: object
|
||||
match:
|
||||
description: Istio URL match conditions
|
||||
anyOf:
|
||||
- type: string
|
||||
- type: array
|
||||
rewrite:
|
||||
description: Istio URL rewrite
|
||||
anyOf:
|
||||
- type: string
|
||||
- type: object
|
||||
headers:
|
||||
description: Istio headers operations
|
||||
anyOf:
|
||||
- type: string
|
||||
- type: object
|
||||
corsPolicy:
|
||||
description: Istio CORS policy
|
||||
anyOf:
|
||||
- type: string
|
||||
- type: object
|
||||
gateways:
|
||||
description: Istio gateways list
|
||||
anyOf:
|
||||
- type: string
|
||||
- type: array
|
||||
hosts:
|
||||
description: Istio hosts list
|
||||
anyOf:
|
||||
- type: string
|
||||
- type: array
|
||||
skipAnalysis:
|
||||
type: boolean
|
||||
canaryAnalysis:
|
||||
properties:
|
||||
interval:
|
||||
description: Canary schedule interval
|
||||
type: string
|
||||
pattern: "^[0-9]+(m|s)"
|
||||
iterations:
|
||||
description: Number of checks to run for A/B Testing and Blue/Green
|
||||
type: number
|
||||
threshold:
|
||||
description: Max number of failed checks before rollback
|
||||
type: number
|
||||
maxWeight:
|
||||
description: Max traffic percentage routed to canary
|
||||
type: number
|
||||
stepWeight:
|
||||
description: Canary incremental traffic percentage step
|
||||
type: number
|
||||
mirror:
|
||||
description: Mirror traffic to canary before shifting
|
||||
type: boolean
|
||||
match:
|
||||
description: A/B testing match conditions
|
||||
anyOf:
|
||||
- type: string
|
||||
- type: array
|
||||
metrics:
|
||||
description: Prometheus query list for this canary
|
||||
type: array
|
||||
properties:
|
||||
items:
|
||||
type: object
|
||||
required: ["name", "threshold"]
|
||||
properties:
|
||||
name:
|
||||
description: Name of the Prometheus metric
|
||||
type: string
|
||||
interval:
|
||||
description: Interval of the promql query
|
||||
type: string
|
||||
pattern: "^[0-9]+(m|s)"
|
||||
threshold:
|
||||
description: Max scalar value accepted for this metric
|
||||
type: number
|
||||
query:
|
||||
description: Prometheus query
|
||||
type: string
|
||||
webhooks:
|
||||
description: Webhook list for this canary
|
||||
type: array
|
||||
properties:
|
||||
items:
|
||||
type: object
|
||||
required: ["name", "url"]
|
||||
properties:
|
||||
name:
|
||||
description: Name of the webhook
|
||||
type: string
|
||||
type:
|
||||
description: Type of the webhook pre, post or during rollout
|
||||
type: string
|
||||
enum:
|
||||
- ""
|
||||
- confirm-rollout
|
||||
- pre-rollout
|
||||
- rollout
|
||||
- confirm-promotion
|
||||
- post-rollout
|
||||
url:
|
||||
description: URL address of this webhook
|
||||
type: string
|
||||
format: url
|
||||
timeout:
|
||||
description: Request timeout for this webhook
|
||||
type: string
|
||||
pattern: "^[0-9]+(m|s)"
|
||||
metadata:
|
||||
description: Metadata (key-value pairs) for this webhook
|
||||
anyOf:
|
||||
- type: string
|
||||
- type: object
|
||||
status:
|
||||
properties:
|
||||
phase:
|
||||
description: Analysis phase of this canary
|
||||
type: string
|
||||
enum:
|
||||
- ""
|
||||
- Initializing
|
||||
- Initialized
|
||||
- Waiting
|
||||
- Progressing
|
||||
- Promoting
|
||||
- Finalising
|
||||
- Succeeded
|
||||
- Failed
|
||||
canaryWeight:
|
||||
description: Traffic weight percentage routed to canary
|
||||
type: number
|
||||
failedChecks:
|
||||
description: Failed check count of the current canary analysis
|
||||
type: number
|
||||
iterations:
|
||||
description: Iteration count of the current canary analysis
|
||||
type: number
|
||||
lastAppliedSpec:
|
||||
description: LastAppliedSpec of this canary
|
||||
type: string
|
||||
lastTransitionTime:
|
||||
description: LastTransitionTime of this canary
|
||||
format: date-time
|
||||
type: string
|
||||
conditions:
|
||||
description: Status conditions of this canary
|
||||
type: array
|
||||
properties:
|
||||
items:
|
||||
type: object
|
||||
required: ["type", "status", "reason"]
|
||||
properties:
|
||||
lastTransitionTime:
|
||||
description: LastTransitionTime of this condition
|
||||
format: date-time
|
||||
type: string
|
||||
lastUpdateTime:
|
||||
description: LastUpdateTime of this condition
|
||||
format: date-time
|
||||
type: string
|
||||
message:
|
||||
description: Message associated with this condition
|
||||
type: string
|
||||
reason:
|
||||
description: Reason for the current status of this condition
|
||||
type: string
|
||||
status:
|
||||
description: Status of this condition
|
||||
type: string
|
||||
type:
|
||||
description: Type of this condition
|
||||
type: string
|
||||
@@ -1,65 +0,0 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: flagger
|
||||
namespace: istio-system
|
||||
labels:
|
||||
app: flagger
|
||||
spec:
|
||||
replicas: 1
|
||||
strategy:
|
||||
type: Recreate
|
||||
selector:
|
||||
matchLabels:
|
||||
app: flagger
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: flagger
|
||||
annotations:
|
||||
prometheus.io/scrape: "true"
|
||||
spec:
|
||||
serviceAccountName: flagger
|
||||
containers:
|
||||
- name: flagger
|
||||
image: weaveworks/flagger:0.20.2
|
||||
imagePullPolicy: IfNotPresent
|
||||
ports:
|
||||
- name: http
|
||||
containerPort: 8080
|
||||
command:
|
||||
- ./flagger
|
||||
- -log-level=info
|
||||
- -control-loop-interval=10s
|
||||
- -mesh-provider=$(MESH_PROVIDER)
|
||||
- -metrics-server=http://prometheus.istio-system.svc.cluster.local:9090
|
||||
livenessProbe:
|
||||
exec:
|
||||
command:
|
||||
- wget
|
||||
- --quiet
|
||||
- --tries=1
|
||||
- --timeout=2
|
||||
- --spider
|
||||
- http://localhost:8080/healthz
|
||||
timeoutSeconds: 5
|
||||
readinessProbe:
|
||||
exec:
|
||||
command:
|
||||
- wget
|
||||
- --quiet
|
||||
- --tries=1
|
||||
- --timeout=2
|
||||
- --spider
|
||||
- http://localhost:8080/healthz
|
||||
timeoutSeconds: 5
|
||||
resources:
|
||||
limits:
|
||||
memory: "512Mi"
|
||||
cpu: "1000m"
|
||||
requests:
|
||||
memory: "32Mi"
|
||||
cpu: "10m"
|
||||
securityContext:
|
||||
readOnlyRootFilesystem: true
|
||||
runAsUser: 10001
|
||||
@@ -1,27 +0,0 @@
|
||||
apiVersion: networking.istio.io/v1alpha3
|
||||
kind: Gateway
|
||||
metadata:
|
||||
name: public-gateway
|
||||
namespace: istio-system
|
||||
spec:
|
||||
selector:
|
||||
istio: ingressgateway
|
||||
servers:
|
||||
- port:
|
||||
number: 80
|
||||
name: http
|
||||
protocol: HTTP
|
||||
hosts:
|
||||
- "*"
|
||||
tls:
|
||||
httpsRedirect: true
|
||||
- port:
|
||||
number: 443
|
||||
name: https
|
||||
protocol: HTTPS
|
||||
hosts:
|
||||
- "*"
|
||||
tls:
|
||||
mode: SIMPLE
|
||||
privateKey: /etc/istio/ingressgateway-certs/tls.key
|
||||
serverCertificate: /etc/istio/ingressgateway-certs/tls.crt
|
||||
@@ -1,834 +0,0 @@
|
||||
# Source: istio/charts/prometheus/templates/configmap.yaml
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: prometheus
|
||||
namespace: istio-system
|
||||
labels:
|
||||
app: prometheus
|
||||
chart: prometheus-1.0.6
|
||||
heritage: Tiller
|
||||
release: istio
|
||||
data:
|
||||
prometheus.yml: |-
|
||||
global:
|
||||
scrape_interval: 15s
|
||||
scrape_configs:
|
||||
|
||||
- job_name: 'istio-mesh'
|
||||
# Override the global default and scrape targets from this job every 5 seconds.
|
||||
scrape_interval: 5s
|
||||
|
||||
kubernetes_sd_configs:
|
||||
- role: endpoints
|
||||
namespaces:
|
||||
names:
|
||||
- istio-system
|
||||
|
||||
relabel_configs:
|
||||
- source_labels: [__meta_kubernetes_service_name, __meta_kubernetes_endpoint_port_name]
|
||||
action: keep
|
||||
regex: istio-telemetry;prometheus
|
||||
|
||||
|
||||
# Scrape config for envoy stats
|
||||
- job_name: 'envoy-stats'
|
||||
metrics_path: /stats/prometheus
|
||||
kubernetes_sd_configs:
|
||||
- role: pod
|
||||
|
||||
relabel_configs:
|
||||
- source_labels: [__meta_kubernetes_pod_container_port_name]
|
||||
action: keep
|
||||
regex: '.*-envoy-prom'
|
||||
- source_labels: [__address__, __meta_kubernetes_pod_annotation_prometheus_io_port]
|
||||
action: replace
|
||||
regex: ([^:]+)(?::\d+)?;(\d+)
|
||||
replacement: $1:15090
|
||||
target_label: __address__
|
||||
- action: labelmap
|
||||
regex: __meta_kubernetes_pod_label_(.+)
|
||||
- source_labels: [__meta_kubernetes_namespace]
|
||||
action: replace
|
||||
target_label: namespace
|
||||
- source_labels: [__meta_kubernetes_pod_name]
|
||||
action: replace
|
||||
target_label: pod_name
|
||||
|
||||
metric_relabel_configs:
|
||||
# Exclude some of the envoy metrics that have massive cardinality
|
||||
# This list may need to be pruned further moving forward, as informed
|
||||
# by performance and scalability testing.
|
||||
- source_labels: [ cluster_name ]
|
||||
regex: '(outbound|inbound|prometheus_stats).*'
|
||||
action: drop
|
||||
- source_labels: [ tcp_prefix ]
|
||||
regex: '(outbound|inbound|prometheus_stats).*'
|
||||
action: drop
|
||||
- source_labels: [ listener_address ]
|
||||
regex: '(.+)'
|
||||
action: drop
|
||||
- source_labels: [ http_conn_manager_listener_prefix ]
|
||||
regex: '(.+)'
|
||||
action: drop
|
||||
- source_labels: [ http_conn_manager_prefix ]
|
||||
regex: '(.+)'
|
||||
action: drop
|
||||
- source_labels: [ __name__ ]
|
||||
regex: 'envoy_tls.*'
|
||||
action: drop
|
||||
- source_labels: [ __name__ ]
|
||||
regex: 'envoy_tcp_downstream.*'
|
||||
action: drop
|
||||
- source_labels: [ __name__ ]
|
||||
regex: 'envoy_http_(stats|admin).*'
|
||||
action: drop
|
||||
- source_labels: [ __name__ ]
|
||||
regex: 'envoy_cluster_(lb|retry|bind|internal|max|original).*'
|
||||
action: drop
|
||||
|
||||
|
||||
- job_name: 'istio-policy'
|
||||
# Override the global default and scrape targets from this job every 5 seconds.
|
||||
scrape_interval: 5s
|
||||
# metrics_path defaults to '/metrics'
|
||||
# scheme defaults to 'http'.
|
||||
|
||||
kubernetes_sd_configs:
|
||||
- role: endpoints
|
||||
namespaces:
|
||||
names:
|
||||
- istio-system
|
||||
|
||||
|
||||
relabel_configs:
|
||||
- source_labels: [__meta_kubernetes_service_name, __meta_kubernetes_endpoint_port_name]
|
||||
action: keep
|
||||
regex: istio-policy;http-monitoring
|
||||
|
||||
- job_name: 'istio-telemetry'
|
||||
# Override the global default and scrape targets from this job every 5 seconds.
|
||||
scrape_interval: 5s
|
||||
# metrics_path defaults to '/metrics'
|
||||
# scheme defaults to 'http'.
|
||||
|
||||
kubernetes_sd_configs:
|
||||
- role: endpoints
|
||||
namespaces:
|
||||
names:
|
||||
- istio-system
|
||||
|
||||
relabel_configs:
|
||||
- source_labels: [__meta_kubernetes_service_name, __meta_kubernetes_endpoint_port_name]
|
||||
action: keep
|
||||
regex: istio-telemetry;http-monitoring
|
||||
|
||||
- job_name: 'pilot'
|
||||
# Override the global default and scrape targets from this job every 5 seconds.
|
||||
scrape_interval: 5s
|
||||
# metrics_path defaults to '/metrics'
|
||||
# scheme defaults to 'http'.
|
||||
|
||||
kubernetes_sd_configs:
|
||||
- role: endpoints
|
||||
namespaces:
|
||||
names:
|
||||
- istio-system
|
||||
|
||||
relabel_configs:
|
||||
- source_labels: [__meta_kubernetes_service_name, __meta_kubernetes_endpoint_port_name]
|
||||
action: keep
|
||||
regex: istio-pilot;http-monitoring
|
||||
|
||||
- job_name: 'galley'
|
||||
# Override the global default and scrape targets from this job every 5 seconds.
|
||||
scrape_interval: 5s
|
||||
# metrics_path defaults to '/metrics'
|
||||
# scheme defaults to 'http'.
|
||||
|
||||
kubernetes_sd_configs:
|
||||
- role: endpoints
|
||||
namespaces:
|
||||
names:
|
||||
- istio-system
|
||||
|
||||
relabel_configs:
|
||||
- source_labels: [__meta_kubernetes_service_name, __meta_kubernetes_endpoint_port_name]
|
||||
action: keep
|
||||
regex: istio-galley;http-monitoring
|
||||
|
||||
# scrape config for API servers
|
||||
- job_name: 'kubernetes-apiservers'
|
||||
kubernetes_sd_configs:
|
||||
- role: endpoints
|
||||
namespaces:
|
||||
names:
|
||||
- default
|
||||
scheme: https
|
||||
tls_config:
|
||||
ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
|
||||
bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
|
||||
relabel_configs:
|
||||
- source_labels: [__meta_kubernetes_service_name, __meta_kubernetes_endpoint_port_name]
|
||||
action: keep
|
||||
regex: kubernetes;https
|
||||
|
||||
# scrape config for nodes (kubelet)
|
||||
- job_name: 'kubernetes-nodes'
|
||||
scheme: https
|
||||
tls_config:
|
||||
ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
|
||||
bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
|
||||
kubernetes_sd_configs:
|
||||
- role: node
|
||||
relabel_configs:
|
||||
- action: labelmap
|
||||
regex: __meta_kubernetes_node_label_(.+)
|
||||
- target_label: __address__
|
||||
replacement: kubernetes.default.svc:443
|
||||
- source_labels: [__meta_kubernetes_node_name]
|
||||
regex: (.+)
|
||||
target_label: __metrics_path__
|
||||
replacement: /api/v1/nodes/${1}/proxy/metrics
|
||||
|
||||
# Scrape config for Kubelet cAdvisor.
|
||||
#
|
||||
# This is required for Kubernetes 1.7.3 and later, where cAdvisor metrics
|
||||
# (those whose names begin with 'container_') have been removed from the
|
||||
# Kubelet metrics endpoint. This job scrapes the cAdvisor endpoint to
|
||||
# retrieve those metrics.
|
||||
#
|
||||
# In Kubernetes 1.7.0-1.7.2, these metrics are only exposed on the cAdvisor
|
||||
# HTTP endpoint; use "replacement: /api/v1/nodes/${1}:4194/proxy/metrics"
|
||||
# in that case (and ensure cAdvisor's HTTP server hasn't been disabled with
|
||||
# the --cadvisor-port=0 Kubelet flag).
|
||||
#
|
||||
# This job is not necessary and should be removed in Kubernetes 1.6 and
|
||||
# earlier versions, or it will cause the metrics to be scraped twice.
|
||||
- job_name: 'kubernetes-cadvisor'
|
||||
scheme: https
|
||||
tls_config:
|
||||
ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
|
||||
bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
|
||||
kubernetes_sd_configs:
|
||||
- role: node
|
||||
relabel_configs:
|
||||
- action: labelmap
|
||||
regex: __meta_kubernetes_node_label_(.+)
|
||||
- target_label: __address__
|
||||
replacement: kubernetes.default.svc:443
|
||||
- source_labels: [__meta_kubernetes_node_name]
|
||||
regex: (.+)
|
||||
target_label: __metrics_path__
|
||||
replacement: /api/v1/nodes/${1}/proxy/metrics/cadvisor
|
||||
|
||||
# scrape config for service endpoints.
|
||||
- job_name: 'kubernetes-service-endpoints'
|
||||
kubernetes_sd_configs:
|
||||
- role: endpoints
|
||||
relabel_configs:
|
||||
- source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scrape]
|
||||
action: keep
|
||||
regex: true
|
||||
- source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scheme]
|
||||
action: replace
|
||||
target_label: __scheme__
|
||||
regex: (https?)
|
||||
- source_labels: [__meta_kubernetes_service_annotation_prometheus_io_path]
|
||||
action: replace
|
||||
target_label: __metrics_path__
|
||||
regex: (.+)
|
||||
- source_labels: [__address__, __meta_kubernetes_service_annotation_prometheus_io_port]
|
||||
action: replace
|
||||
target_label: __address__
|
||||
regex: ([^:]+)(?::\d+)?;(\d+)
|
||||
replacement: $1:$2
|
||||
- action: labelmap
|
||||
regex: __meta_kubernetes_service_label_(.+)
|
||||
- source_labels: [__meta_kubernetes_namespace]
|
||||
action: replace
|
||||
target_label: kubernetes_namespace
|
||||
- source_labels: [__meta_kubernetes_service_name]
|
||||
action: replace
|
||||
target_label: kubernetes_name
|
||||
|
||||
- job_name: 'kubernetes-pods'
|
||||
kubernetes_sd_configs:
|
||||
- role: pod
|
||||
relabel_configs: # If first two labels are present, pod should be scraped by the istio-secure job.
|
||||
- source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_scrape]
|
||||
action: keep
|
||||
regex: true
|
||||
- source_labels: [__meta_kubernetes_pod_annotation_sidecar_istio_io_status]
|
||||
action: drop
|
||||
regex: (.+)
|
||||
- source_labels: [__meta_kubernetes_pod_annotation_istio_mtls]
|
||||
action: drop
|
||||
regex: (true)
|
||||
- source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_path]
|
||||
action: replace
|
||||
target_label: __metrics_path__
|
||||
regex: (.+)
|
||||
- source_labels: [__address__, __meta_kubernetes_pod_annotation_prometheus_io_port]
|
||||
action: replace
|
||||
regex: ([^:]+)(?::\d+)?;(\d+)
|
||||
replacement: $1:$2
|
||||
target_label: __address__
|
||||
- action: labelmap
|
||||
regex: __meta_kubernetes_pod_label_(.+)
|
||||
- source_labels: [__meta_kubernetes_namespace]
|
||||
action: replace
|
||||
target_label: namespace
|
||||
- source_labels: [__meta_kubernetes_pod_name]
|
||||
action: replace
|
||||
target_label: pod_name
|
||||
|
||||
- job_name: 'kubernetes-pods-istio-secure'
|
||||
scheme: https
|
||||
tls_config:
|
||||
ca_file: /etc/istio-certs/root-cert.pem
|
||||
cert_file: /etc/istio-certs/cert-chain.pem
|
||||
key_file: /etc/istio-certs/key.pem
|
||||
insecure_skip_verify: true # prometheus does not support secure naming.
|
||||
kubernetes_sd_configs:
|
||||
- role: pod
|
||||
relabel_configs:
|
||||
- source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_scrape]
|
||||
action: keep
|
||||
regex: true
|
||||
# sidecar status annotation is added by sidecar injector and
|
||||
# istio_workload_mtls_ability can be specifically placed on a pod to indicate its ability to receive mtls traffic.
|
||||
- source_labels: [__meta_kubernetes_pod_annotation_sidecar_istio_io_status, __meta_kubernetes_pod_annotation_istio_mtls]
|
||||
action: keep
|
||||
regex: (([^;]+);([^;]*))|(([^;]*);(true))
|
||||
- source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_path]
|
||||
action: replace
|
||||
target_label: __metrics_path__
|
||||
regex: (.+)
|
||||
- source_labels: [__address__] # Only keep address that is host:port
|
||||
action: keep # otherwise an extra target with ':443' is added for https scheme
|
||||
regex: ([^:]+):(\d+)
|
||||
- source_labels: [__address__, __meta_kubernetes_pod_annotation_prometheus_io_port]
|
||||
action: replace
|
||||
regex: ([^:]+)(?::\d+)?;(\d+)
|
||||
replacement: $1:$2
|
||||
target_label: __address__
|
||||
- action: labelmap
|
||||
regex: __meta_kubernetes_pod_label_(.+)
|
||||
- source_labels: [__meta_kubernetes_namespace]
|
||||
action: replace
|
||||
target_label: namespace
|
||||
- source_labels: [__meta_kubernetes_pod_name]
|
||||
action: replace
|
||||
target_label: pod_name
|
||||
|
||||
---
|
||||
|
||||
# Source: istio/charts/prometheus/templates/clusterrole.yaml
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: prometheus-istio-system
|
||||
labels:
|
||||
app: prometheus
|
||||
chart: prometheus-1.0.6
|
||||
heritage: Tiller
|
||||
release: istio
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources:
|
||||
- nodes
|
||||
- services
|
||||
- endpoints
|
||||
- pods
|
||||
- nodes/proxy
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups: [""]
|
||||
resources:
|
||||
- configmaps
|
||||
verbs: ["get"]
|
||||
- nonResourceURLs: ["/metrics"]
|
||||
verbs: ["get"]
|
||||
|
||||
---
|
||||
|
||||
# Source: istio/charts/prometheus/templates/serviceaccount.yaml
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: prometheus
|
||||
namespace: istio-system
|
||||
labels:
|
||||
app: prometheus
|
||||
chart: prometheus-1.0.6
|
||||
heritage: Tiller
|
||||
release: istio
|
||||
|
||||
---
|
||||
|
||||
# Source: istio/charts/prometheus/templates/clusterrolebindings.yaml
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: prometheus-istio-system
|
||||
labels:
|
||||
app: prometheus
|
||||
chart: prometheus-1.0.6
|
||||
heritage: Tiller
|
||||
release: istio
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: prometheus-istio-system
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: prometheus
|
||||
namespace: istio-system
|
||||
|
||||
---
|
||||
|
||||
# Source: istio/charts/prometheus/templates/service.yaml
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: prometheus
|
||||
namespace: istio-system
|
||||
annotations:
|
||||
prometheus.io/scrape: 'true'
|
||||
labels:
|
||||
name: prometheus
|
||||
spec:
|
||||
selector:
|
||||
app: prometheus
|
||||
ports:
|
||||
- name: http-prometheus
|
||||
protocol: TCP
|
||||
port: 9090
|
||||
|
||||
---
|
||||
|
||||
# Source: istio/charts/prometheus/templates/deployment.yaml
|
||||
apiVersion: apps/v1beta1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: prometheus
|
||||
namespace: istio-system
|
||||
labels:
|
||||
app: prometheus
|
||||
chart: prometheus-1.0.6
|
||||
heritage: Tiller
|
||||
release: istio
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: prometheus
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: prometheus
|
||||
annotations:
|
||||
sidecar.istio.io/inject: "false"
|
||||
scheduler.alpha.kubernetes.io/critical-pod: ""
|
||||
spec:
|
||||
serviceAccountName: prometheus
|
||||
containers:
|
||||
- name: prometheus
|
||||
image: "docker.io/prom/prometheus:v2.3.1"
|
||||
imagePullPolicy: IfNotPresent
|
||||
args:
|
||||
- '--storage.tsdb.retention=6h'
|
||||
- '--config.file=/etc/prometheus/prometheus.yml'
|
||||
ports:
|
||||
- containerPort: 9090
|
||||
name: http
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /-/healthy
|
||||
port: 9090
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /-/ready
|
||||
port: 9090
|
||||
resources:
|
||||
requests:
|
||||
cpu: 10m
|
||||
|
||||
volumeMounts:
|
||||
- name: config-volume
|
||||
mountPath: /etc/prometheus
|
||||
- mountPath: /etc/istio-certs
|
||||
name: istio-certs
|
||||
volumes:
|
||||
- name: config-volume
|
||||
configMap:
|
||||
name: prometheus
|
||||
- name: istio-certs
|
||||
secret:
|
||||
defaultMode: 420
|
||||
optional: true
|
||||
secretName: istio.default
|
||||
affinity:
|
||||
nodeAffinity:
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
nodeSelectorTerms:
|
||||
- matchExpressions:
|
||||
- key: beta.kubernetes.io/arch
|
||||
operator: In
|
||||
values:
|
||||
- amd64
|
||||
- ppc64le
|
||||
- s390x
|
||||
preferredDuringSchedulingIgnoredDuringExecution:
|
||||
- weight: 2
|
||||
preference:
|
||||
matchExpressions:
|
||||
- key: beta.kubernetes.io/arch
|
||||
operator: In
|
||||
values:
|
||||
- amd64
|
||||
- weight: 2
|
||||
preference:
|
||||
matchExpressions:
|
||||
- key: beta.kubernetes.io/arch
|
||||
operator: In
|
||||
values:
|
||||
- ppc64le
|
||||
- weight: 2
|
||||
preference:
|
||||
matchExpressions:
|
||||
- key: beta.kubernetes.io/arch
|
||||
operator: In
|
||||
values:
|
||||
- s390x
|
||||
|
||||
---
|
||||
apiVersion: "config.istio.io/v1alpha2"
|
||||
kind: metric
|
||||
metadata:
|
||||
name: requestcount
|
||||
namespace: istio-system
|
||||
spec:
|
||||
value: "1"
|
||||
dimensions:
|
||||
reporter: conditional((context.reporter.kind | "inbound") == "outbound", "source", "destination")
|
||||
source_workload: source.workload.name | "unknown"
|
||||
source_workload_namespace: source.workload.namespace | "unknown"
|
||||
source_principal: source.principal | "unknown"
|
||||
source_app: source.labels["app"] | "unknown"
|
||||
source_version: source.labels["version"] | "unknown"
|
||||
destination_workload: destination.workload.name | "unknown"
|
||||
destination_workload_namespace: destination.workload.namespace | "unknown"
|
||||
destination_principal: destination.principal | "unknown"
|
||||
destination_app: destination.labels["app"] | "unknown"
|
||||
destination_version: destination.labels["version"] | "unknown"
|
||||
destination_service: destination.service.host | "unknown"
|
||||
destination_service_name: destination.service.name | "unknown"
|
||||
destination_service_namespace: destination.service.namespace | "unknown"
|
||||
request_protocol: api.protocol | context.protocol | "unknown"
|
||||
response_code: response.code | 200
|
||||
connection_security_policy: conditional((context.reporter.kind | "inbound") == "outbound", "unknown", conditional(connection.mtls | false, "mutual_tls", "none"))
|
||||
monitored_resource_type: '"UNSPECIFIED"'
|
||||
---
|
||||
apiVersion: "config.istio.io/v1alpha2"
|
||||
kind: metric
|
||||
metadata:
|
||||
name: requestduration
|
||||
namespace: istio-system
|
||||
spec:
|
||||
value: response.duration | "0ms"
|
||||
dimensions:
|
||||
reporter: conditional((context.reporter.kind | "inbound") == "outbound", "source", "destination")
|
||||
source_workload: source.workload.name | "unknown"
|
||||
source_workload_namespace: source.workload.namespace | "unknown"
|
||||
source_principal: source.principal | "unknown"
|
||||
source_app: source.labels["app"] | "unknown"
|
||||
source_version: source.labels["version"] | "unknown"
|
||||
destination_workload: destination.workload.name | "unknown"
|
||||
destination_workload_namespace: destination.workload.namespace | "unknown"
|
||||
destination_principal: destination.principal | "unknown"
|
||||
destination_app: destination.labels["app"] | "unknown"
|
||||
destination_version: destination.labels["version"] | "unknown"
|
||||
destination_service: destination.service.host | "unknown"
|
||||
destination_service_name: destination.service.name | "unknown"
|
||||
destination_service_namespace: destination.service.namespace | "unknown"
|
||||
request_protocol: api.protocol | context.protocol | "unknown"
|
||||
response_code: response.code | 200
|
||||
connection_security_policy: conditional((context.reporter.kind | "inbound") == "outbound", "unknown", conditional(connection.mtls | false, "mutual_tls", "none"))
|
||||
monitored_resource_type: '"UNSPECIFIED"'
|
||||
---
|
||||
apiVersion: "config.istio.io/v1alpha2"
|
||||
kind: metric
|
||||
metadata:
|
||||
name: requestsize
|
||||
namespace: istio-system
|
||||
spec:
|
||||
value: request.size | 0
|
||||
dimensions:
|
||||
reporter: conditional((context.reporter.kind | "inbound") == "outbound", "source", "destination")
|
||||
source_workload: source.workload.name | "unknown"
|
||||
source_workload_namespace: source.workload.namespace | "unknown"
|
||||
source_principal: source.principal | "unknown"
|
||||
source_app: source.labels["app"] | "unknown"
|
||||
source_version: source.labels["version"] | "unknown"
|
||||
destination_workload: destination.workload.name | "unknown"
|
||||
destination_workload_namespace: destination.workload.namespace | "unknown"
|
||||
destination_principal: destination.principal | "unknown"
|
||||
destination_app: destination.labels["app"] | "unknown"
|
||||
destination_version: destination.labels["version"] | "unknown"
|
||||
destination_service: destination.service.host | "unknown"
|
||||
destination_service_name: destination.service.name | "unknown"
|
||||
destination_service_namespace: destination.service.namespace | "unknown"
|
||||
request_protocol: api.protocol | context.protocol | "unknown"
|
||||
response_code: response.code | 200
|
||||
connection_security_policy: conditional((context.reporter.kind | "inbound") == "outbound", "unknown", conditional(connection.mtls | false, "mutual_tls", "none"))
|
||||
monitored_resource_type: '"UNSPECIFIED"'
|
||||
---
|
||||
apiVersion: "config.istio.io/v1alpha2"
|
||||
kind: metric
|
||||
metadata:
|
||||
name: responsesize
|
||||
namespace: istio-system
|
||||
spec:
|
||||
value: response.size | 0
|
||||
dimensions:
|
||||
reporter: conditional((context.reporter.kind | "inbound") == "outbound", "source", "destination")
|
||||
source_workload: source.workload.name | "unknown"
|
||||
source_workload_namespace: source.workload.namespace | "unknown"
|
||||
source_principal: source.principal | "unknown"
|
||||
source_app: source.labels["app"] | "unknown"
|
||||
source_version: source.labels["version"] | "unknown"
|
||||
destination_workload: destination.workload.name | "unknown"
|
||||
destination_workload_namespace: destination.workload.namespace | "unknown"
|
||||
destination_principal: destination.principal | "unknown"
|
||||
destination_app: destination.labels["app"] | "unknown"
|
||||
destination_version: destination.labels["version"] | "unknown"
|
||||
destination_service: destination.service.host | "unknown"
|
||||
destination_service_name: destination.service.name | "unknown"
|
||||
destination_service_namespace: destination.service.namespace | "unknown"
|
||||
request_protocol: api.protocol | context.protocol | "unknown"
|
||||
response_code: response.code | 200
|
||||
connection_security_policy: conditional((context.reporter.kind | "inbound") == "outbound", "unknown", conditional(connection.mtls | false, "mutual_tls", "none"))
|
||||
monitored_resource_type: '"UNSPECIFIED"'
|
||||
---
|
||||
apiVersion: "config.istio.io/v1alpha2"
|
||||
kind: metric
|
||||
metadata:
|
||||
name: tcpbytesent
|
||||
namespace: istio-system
|
||||
spec:
|
||||
value: connection.sent.bytes | 0
|
||||
dimensions:
|
||||
reporter: conditional((context.reporter.kind | "inbound") == "outbound", "source", "destination")
|
||||
source_workload: source.workload.name | "unknown"
|
||||
source_workload_namespace: source.workload.namespace | "unknown"
|
||||
source_principal: source.principal | "unknown"
|
||||
source_app: source.labels["app"] | "unknown"
|
||||
source_version: source.labels["version"] | "unknown"
|
||||
destination_workload: destination.workload.name | "unknown"
|
||||
destination_workload_namespace: destination.workload.namespace | "unknown"
|
||||
destination_principal: destination.principal | "unknown"
|
||||
destination_app: destination.labels["app"] | "unknown"
|
||||
destination_version: destination.labels["version"] | "unknown"
|
||||
destination_service: destination.service.name | "unknown"
|
||||
destination_service_name: destination.service.name | "unknown"
|
||||
destination_service_namespace: destination.service.namespace | "unknown"
|
||||
connection_security_policy: conditional((context.reporter.kind | "inbound") == "outbound", "unknown", conditional(connection.mtls | false, "mutual_tls", "none"))
|
||||
monitored_resource_type: '"UNSPECIFIED"'
|
||||
---
|
||||
apiVersion: "config.istio.io/v1alpha2"
|
||||
kind: metric
|
||||
metadata:
|
||||
name: tcpbytereceived
|
||||
namespace: istio-system
|
||||
spec:
|
||||
value: connection.received.bytes | 0
|
||||
dimensions:
|
||||
reporter: conditional((context.reporter.kind | "inbound") == "outbound", "source", "destination")
|
||||
source_workload: source.workload.name | "unknown"
|
||||
source_workload_namespace: source.workload.namespace | "unknown"
|
||||
source_principal: source.principal | "unknown"
|
||||
source_app: source.labels["app"] | "unknown"
|
||||
source_version: source.labels["version"] | "unknown"
|
||||
destination_workload: destination.workload.name | "unknown"
|
||||
destination_workload_namespace: destination.workload.namespace | "unknown"
|
||||
destination_principal: destination.principal | "unknown"
|
||||
destination_app: destination.labels["app"] | "unknown"
|
||||
destination_version: destination.labels["version"] | "unknown"
|
||||
destination_service: destination.service.name | "unknown"
|
||||
destination_service_name: destination.service.name | "unknown"
|
||||
destination_service_namespace: destination.service.namespace | "unknown"
|
||||
connection_security_policy: conditional((context.reporter.kind | "inbound") == "outbound", "unknown", conditional(connection.mtls | false, "mutual_tls", "none"))
|
||||
monitored_resource_type: '"UNSPECIFIED"'
|
||||
---
|
||||
apiVersion: "config.istio.io/v1alpha2"
|
||||
kind: prometheus
|
||||
metadata:
|
||||
name: handler
|
||||
namespace: istio-system
|
||||
spec:
|
||||
metrics:
|
||||
- name: requests_total
|
||||
instance_name: requestcount.metric.istio-system
|
||||
kind: COUNTER
|
||||
label_names:
|
||||
- reporter
|
||||
- source_app
|
||||
- source_principal
|
||||
- source_workload
|
||||
- source_workload_namespace
|
||||
- source_version
|
||||
- destination_app
|
||||
- destination_principal
|
||||
- destination_workload
|
||||
- destination_workload_namespace
|
||||
- destination_version
|
||||
- destination_service
|
||||
- destination_service_name
|
||||
- destination_service_namespace
|
||||
- request_protocol
|
||||
- response_code
|
||||
- connection_security_policy
|
||||
- name: request_duration_seconds
|
||||
instance_name: requestduration.metric.istio-system
|
||||
kind: DISTRIBUTION
|
||||
label_names:
|
||||
- reporter
|
||||
- source_app
|
||||
- source_principal
|
||||
- source_workload
|
||||
- source_workload_namespace
|
||||
- source_version
|
||||
- destination_app
|
||||
- destination_principal
|
||||
- destination_workload
|
||||
- destination_workload_namespace
|
||||
- destination_version
|
||||
- destination_service
|
||||
- destination_service_name
|
||||
- destination_service_namespace
|
||||
- request_protocol
|
||||
- response_code
|
||||
- connection_security_policy
|
||||
buckets:
|
||||
explicit_buckets:
|
||||
bounds: [0.005, 0.01, 0.025, 0.05, 0.1, 0.25, 0.5, 1, 2.5, 5, 10]
|
||||
- name: request_bytes
|
||||
instance_name: requestsize.metric.istio-system
|
||||
kind: DISTRIBUTION
|
||||
label_names:
|
||||
- reporter
|
||||
- source_app
|
||||
- source_principal
|
||||
- source_workload
|
||||
- source_workload_namespace
|
||||
- source_version
|
||||
- destination_app
|
||||
- destination_principal
|
||||
- destination_workload
|
||||
- destination_workload_namespace
|
||||
- destination_version
|
||||
- destination_service
|
||||
- destination_service_name
|
||||
- destination_service_namespace
|
||||
- request_protocol
|
||||
- response_code
|
||||
- connection_security_policy
|
||||
buckets:
|
||||
exponentialBuckets:
|
||||
numFiniteBuckets: 8
|
||||
scale: 1
|
||||
growthFactor: 10
|
||||
- name: response_bytes
|
||||
instance_name: responsesize.metric.istio-system
|
||||
kind: DISTRIBUTION
|
||||
label_names:
|
||||
- reporter
|
||||
- source_app
|
||||
- source_principal
|
||||
- source_workload
|
||||
- source_workload_namespace
|
||||
- source_version
|
||||
- destination_app
|
||||
- destination_principal
|
||||
- destination_workload
|
||||
- destination_workload_namespace
|
||||
- destination_version
|
||||
- destination_service
|
||||
- destination_service_name
|
||||
- destination_service_namespace
|
||||
- request_protocol
|
||||
- response_code
|
||||
- connection_security_policy
|
||||
buckets:
|
||||
exponentialBuckets:
|
||||
numFiniteBuckets: 8
|
||||
scale: 1
|
||||
growthFactor: 10
|
||||
- name: tcp_sent_bytes_total
|
||||
instance_name: tcpbytesent.metric.istio-system
|
||||
kind: COUNTER
|
||||
label_names:
|
||||
- reporter
|
||||
- source_app
|
||||
- source_principal
|
||||
- source_workload
|
||||
- source_workload_namespace
|
||||
- source_version
|
||||
- destination_app
|
||||
- destination_principal
|
||||
- destination_workload
|
||||
- destination_workload_namespace
|
||||
- destination_version
|
||||
- destination_service
|
||||
- destination_service_name
|
||||
- destination_service_namespace
|
||||
- connection_security_policy
|
||||
- name: tcp_received_bytes_total
|
||||
instance_name: tcpbytereceived.metric.istio-system
|
||||
kind: COUNTER
|
||||
label_names:
|
||||
- reporter
|
||||
- source_app
|
||||
- source_principal
|
||||
- source_workload
|
||||
- source_workload_namespace
|
||||
- source_version
|
||||
- destination_app
|
||||
- destination_principal
|
||||
- destination_workload
|
||||
- destination_workload_namespace
|
||||
- destination_version
|
||||
- destination_service
|
||||
- destination_service_name
|
||||
- destination_service_namespace
|
||||
- connection_security_policy
|
||||
---
|
||||
apiVersion: "config.istio.io/v1alpha2"
|
||||
kind: rule
|
||||
metadata:
|
||||
name: promhttp
|
||||
namespace: istio-system
|
||||
spec:
|
||||
match: context.protocol == "http" || context.protocol == "grpc"
|
||||
actions:
|
||||
- handler: handler.prometheus
|
||||
instances:
|
||||
- requestcount.metric
|
||||
- requestduration.metric
|
||||
- requestsize.metric
|
||||
- responsesize.metric
|
||||
---
|
||||
apiVersion: "config.istio.io/v1alpha2"
|
||||
kind: rule
|
||||
metadata:
|
||||
name: promtcp
|
||||
namespace: istio-system
|
||||
spec:
|
||||
match: context.protocol == "tcp"
|
||||
actions:
|
||||
- handler: handler.prometheus
|
||||
instances:
|
||||
- tcpbytesent.metric
|
||||
- tcpbytereceived.metric
|
||||
---
|
||||
@@ -1,52 +0,0 @@
|
||||
apiVersion: flagger.app/v1alpha3
|
||||
kind: Canary
|
||||
metadata:
|
||||
name: podinfo
|
||||
namespace: test
|
||||
spec:
|
||||
provider: gloo
|
||||
targetRef:
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
name: podinfo
|
||||
progressDeadlineSeconds: 60
|
||||
autoscalerRef:
|
||||
apiVersion: autoscaling/v2beta1
|
||||
kind: HorizontalPodAutoscaler
|
||||
name: podinfo
|
||||
service:
|
||||
port: 9898
|
||||
canaryAnalysis:
|
||||
interval: 10s
|
||||
threshold: 10
|
||||
maxWeight: 50
|
||||
stepWeight: 5
|
||||
metrics:
|
||||
- name: request-success-rate
|
||||
threshold: 99
|
||||
interval: 1m
|
||||
- name: request-duration
|
||||
threshold: 500
|
||||
interval: 30s
|
||||
webhooks:
|
||||
- name: acceptance-test
|
||||
type: pre-rollout
|
||||
url: http://flagger-loadtester.test/
|
||||
timeout: 10s
|
||||
metadata:
|
||||
type: bash
|
||||
cmd: "curl -sd 'test' http://podinfo-canary:9898/token | grep token"
|
||||
- name: gloo-acceptance-test
|
||||
type: pre-rollout
|
||||
url: http://flagger-loadtester.test/
|
||||
timeout: 10s
|
||||
metadata:
|
||||
type: bash
|
||||
cmd: "curl -sd 'test' -H 'Host: app.example.com' http://gateway-proxy-v2.gloo-system/token | grep token"
|
||||
- name: load-test
|
||||
url: http://flagger-loadtester.test/
|
||||
timeout: 5s
|
||||
metadata:
|
||||
type: cmd
|
||||
cmd: "hey -z 2m -q 5 -c 2 -host app.example.com http://gateway-proxy-v2.gloo-system"
|
||||
logCmdOutput: "true"
|
||||
@@ -1,17 +0,0 @@
|
||||
apiVersion: gateway.solo.io/v1
|
||||
kind: VirtualService
|
||||
metadata:
|
||||
name: podinfo
|
||||
namespace: test
|
||||
spec:
|
||||
virtualHost:
|
||||
domains:
|
||||
- '*'
|
||||
name: podinfo
|
||||
routes:
|
||||
- matcher:
|
||||
prefix: /
|
||||
routeAction:
|
||||
upstreamGroup:
|
||||
name: podinfo
|
||||
namespace: test
|
||||
@@ -1,58 +0,0 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: flagger-helmtester
|
||||
namespace: kube-system
|
||||
labels:
|
||||
app: flagger-helmtester
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: flagger-helmtester
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: flagger-helmtester
|
||||
annotations:
|
||||
prometheus.io/scrape: "true"
|
||||
spec:
|
||||
serviceAccountName: tiller
|
||||
containers:
|
||||
- name: helmtester
|
||||
image: weaveworks/flagger-loadtester:0.8.0
|
||||
imagePullPolicy: IfNotPresent
|
||||
ports:
|
||||
- name: http
|
||||
containerPort: 8080
|
||||
command:
|
||||
- ./loadtester
|
||||
- -port=8080
|
||||
- -log-level=info
|
||||
- -timeout=1h
|
||||
livenessProbe:
|
||||
exec:
|
||||
command:
|
||||
- wget
|
||||
- --quiet
|
||||
- --tries=1
|
||||
- --timeout=4
|
||||
- --spider
|
||||
- http://localhost:8080/healthz
|
||||
timeoutSeconds: 5
|
||||
readinessProbe:
|
||||
exec:
|
||||
command:
|
||||
- wget
|
||||
- --quiet
|
||||
- --tries=1
|
||||
- --timeout=4
|
||||
- --spider
|
||||
- http://localhost:8080/healthz
|
||||
timeoutSeconds: 5
|
||||
resources:
|
||||
limits:
|
||||
memory: "512Mi"
|
||||
cpu: "1000m"
|
||||
requests:
|
||||
memory: "32Mi"
|
||||
cpu: "10m"
|
||||
@@ -1,16 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: flagger-helmtester
|
||||
namespace: kube-system
|
||||
labels:
|
||||
app: flagger-helmtester
|
||||
spec:
|
||||
type: ClusterIP
|
||||
selector:
|
||||
app: flagger-helmtester
|
||||
ports:
|
||||
- name: http
|
||||
port: 80
|
||||
protocol: TCP
|
||||
targetPort: http
|
||||
@@ -1,19 +0,0 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: flagger-loadtester-bats
|
||||
data:
|
||||
tests: |
|
||||
#!/usr/bin/env bats
|
||||
|
||||
@test "check message" {
|
||||
curl -sS http://${URL} | jq -r .message | {
|
||||
run cut -d $' ' -f1
|
||||
[ $output = "greetings" ]
|
||||
}
|
||||
}
|
||||
|
||||
@test "check headers" {
|
||||
curl -sS http://${URL}/headers | grep X-Request-Id
|
||||
}
|
||||
@@ -1,67 +0,0 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: flagger-loadtester
|
||||
labels:
|
||||
app: flagger-loadtester
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: flagger-loadtester
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: flagger-loadtester
|
||||
annotations:
|
||||
prometheus.io/scrape: "true"
|
||||
spec:
|
||||
containers:
|
||||
- name: loadtester
|
||||
image: weaveworks/flagger-loadtester:0.11.0
|
||||
imagePullPolicy: IfNotPresent
|
||||
ports:
|
||||
- name: http
|
||||
containerPort: 8080
|
||||
command:
|
||||
- ./loadtester
|
||||
- -port=8080
|
||||
- -log-level=info
|
||||
- -timeout=1h
|
||||
livenessProbe:
|
||||
exec:
|
||||
command:
|
||||
- wget
|
||||
- --quiet
|
||||
- --tries=1
|
||||
- --timeout=4
|
||||
- --spider
|
||||
- http://localhost:8080/healthz
|
||||
timeoutSeconds: 5
|
||||
readinessProbe:
|
||||
exec:
|
||||
command:
|
||||
- wget
|
||||
- --quiet
|
||||
- --tries=1
|
||||
- --timeout=4
|
||||
- --spider
|
||||
- http://localhost:8080/healthz
|
||||
timeoutSeconds: 5
|
||||
resources:
|
||||
limits:
|
||||
memory: "512Mi"
|
||||
cpu: "1000m"
|
||||
requests:
|
||||
memory: "32Mi"
|
||||
cpu: "10m"
|
||||
securityContext:
|
||||
readOnlyRootFilesystem: true
|
||||
runAsUser: 10001
|
||||
# volumeMounts:
|
||||
# - name: tests
|
||||
# mountPath: /bats
|
||||
# readOnly: true
|
||||
# volumes:
|
||||
# - name: tests
|
||||
# configMap:
|
||||
# name: flagger-loadtester-bats
|
||||
@@ -1,15 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: flagger-loadtester
|
||||
labels:
|
||||
app: flagger-loadtester
|
||||
spec:
|
||||
type: ClusterIP
|
||||
selector:
|
||||
app: flagger-loadtester
|
||||
ports:
|
||||
- name: http
|
||||
port: 80
|
||||
protocol: TCP
|
||||
targetPort: http
|
||||
@@ -1,7 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: test
|
||||
labels:
|
||||
istio-injection: enabled
|
||||
appmesh.k8s.aws/sidecarInjectorWebhook: enabled
|
||||
@@ -1,70 +0,0 @@
|
||||
apiVersion: flagger.app/v1alpha3
|
||||
kind: Canary
|
||||
metadata:
|
||||
name: podinfo
|
||||
namespace: test
|
||||
spec:
|
||||
# deployment reference
|
||||
targetRef:
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
name: podinfo
|
||||
# ingress reference
|
||||
ingressRef:
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Ingress
|
||||
name: podinfo
|
||||
# HPA reference (optional)
|
||||
autoscalerRef:
|
||||
apiVersion: autoscaling/v2beta1
|
||||
kind: HorizontalPodAutoscaler
|
||||
name: podinfo
|
||||
# the maximum time in seconds for the canary deployment
|
||||
# to make progress before it is rollback (default 600s)
|
||||
progressDeadlineSeconds: 60
|
||||
service:
|
||||
# ClusterIP port number
|
||||
port: 80
|
||||
# container port number or name
|
||||
targetPort: 9898
|
||||
canaryAnalysis:
|
||||
# schedule interval (default 60s)
|
||||
interval: 10s
|
||||
# max number of failed metric checks before rollback
|
||||
threshold: 10
|
||||
# max traffic percentage routed to canary
|
||||
# percentage (0-100)
|
||||
maxWeight: 50
|
||||
# canary increment step
|
||||
# percentage (0-100)
|
||||
stepWeight: 5
|
||||
# NGINX Prometheus checks
|
||||
metrics:
|
||||
- name: request-success-rate
|
||||
# minimum req success rate (non 5xx responses)
|
||||
# percentage (0-100)
|
||||
threshold: 99
|
||||
interval: 1m
|
||||
- name: "latency"
|
||||
threshold: 0.5
|
||||
interval: 1m
|
||||
query: |
|
||||
histogram_quantile(0.99,
|
||||
sum(
|
||||
rate(
|
||||
http_request_duration_seconds_bucket{
|
||||
kubernetes_namespace="test",
|
||||
kubernetes_pod_name=~"podinfo-[0-9a-zA-Z]+(-[0-9a-zA-Z]+)"
|
||||
}[1m]
|
||||
)
|
||||
) by (le)
|
||||
)
|
||||
# external checks (optional)
|
||||
webhooks:
|
||||
- name: load-test
|
||||
url: http://flagger-loadtester.test/
|
||||
timeout: 5s
|
||||
metadata:
|
||||
type: cmd
|
||||
cmd: "hey -z 1m -q 10 -c 2 http://app.example.com/"
|
||||
logCmdOutput: "true"
|
||||
@@ -1,17 +0,0 @@
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: podinfo
|
||||
namespace: test
|
||||
labels:
|
||||
app: podinfo
|
||||
annotations:
|
||||
kubernetes.io/ingress.class: "nginx"
|
||||
spec:
|
||||
rules:
|
||||
- host: app.example.com
|
||||
http:
|
||||
paths:
|
||||
- backend:
|
||||
serviceName: podinfo
|
||||
servicePort: 9898
|
||||
@@ -1,21 +0,0 @@
|
||||
# Patterns to ignore when building packages.
|
||||
# This supports shell glob matching, relative path matching, and
|
||||
# negation (prefixed with !). Only one pattern per line.
|
||||
.DS_Store
|
||||
# Common VCS dirs
|
||||
.git/
|
||||
.gitignore
|
||||
.bzr/
|
||||
.bzrignore
|
||||
.hg/
|
||||
.hgignore
|
||||
.svn/
|
||||
# Common backup files
|
||||
*.swp
|
||||
*.bak
|
||||
*.tmp
|
||||
*~
|
||||
# Various IDEs
|
||||
.project
|
||||
.idea/
|
||||
*.tmproj
|
||||
@@ -1,21 +0,0 @@
|
||||
apiVersion: v1
|
||||
name: flagger
|
||||
version: 0.20.2
|
||||
appVersion: 0.20.2
|
||||
kubeVersion: ">=1.11.0-0"
|
||||
engine: gotpl
|
||||
description: Flagger is a Kubernetes operator that automates the promotion of canary deployments using Istio, Linkerd, App Mesh, Gloo or NGINX routing for traffic shifting and Prometheus metrics for canary analysis.
|
||||
home: https://docs.flagger.app
|
||||
icon: https://raw.githubusercontent.com/weaveworks/flagger/master/docs/logo/flagger-icon.png
|
||||
sources:
|
||||
- https://github.com/weaveworks/flagger
|
||||
maintainers:
|
||||
- name: stefanprodan
|
||||
url: https://github.com/stefanprodan
|
||||
email: stefanprodan@users.noreply.github.com
|
||||
keywords:
|
||||
- canary
|
||||
- istio
|
||||
- appmesh
|
||||
- linkerd
|
||||
- gitops
|
||||
@@ -1,110 +0,0 @@
|
||||
# Flagger
|
||||
|
||||
[Flagger](https://github.com/weaveworks/flagger) is a Kubernetes operator that automates the promotion of
|
||||
canary deployments using Istio, Linkerd, App Mesh, NGINX or Gloo routing for traffic shifting and Prometheus metrics for canary analysis.
|
||||
Flagger implements a control loop that gradually shifts traffic to the canary while measuring key performance indicators
|
||||
like HTTP requests success rate, requests average duration and pods health.
|
||||
Based on the KPIs analysis a canary is promoted or aborted and the analysis result is published to Slack or MS Teams.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
* Kubernetes >= 1.11
|
||||
* Prometheus >= 2.6
|
||||
|
||||
## Installing the Chart
|
||||
|
||||
Add Flagger Helm repository:
|
||||
|
||||
```console
|
||||
$ helm repo add flagger https://flagger.app
|
||||
```
|
||||
|
||||
Install Flagger's custom resource definitions:
|
||||
|
||||
```console
|
||||
$ kubectl apply -f https://raw.githubusercontent.com/weaveworks/flagger/master/artifacts/flagger/crd.yaml
|
||||
```
|
||||
|
||||
To install the chart with the release name `flagger` for Istio:
|
||||
|
||||
```console
|
||||
$ helm upgrade -i flagger flagger/flagger \
|
||||
--namespace=istio-system \
|
||||
--set crd.create=false \
|
||||
--set meshProvider=istio \
|
||||
--set metricsServer=http://prometheus:9090
|
||||
```
|
||||
|
||||
To install the chart with the release name `flagger` for Linkerd:
|
||||
|
||||
```console
|
||||
$ helm upgrade -i flagger flagger/flagger \
|
||||
--namespace=linkerd \
|
||||
--set crd.create=false \
|
||||
--set meshProvider=linkerd \
|
||||
--set metricsServer=http://linkerd-prometheus:9090
|
||||
```
|
||||
|
||||
The [configuration](#configuration) section lists the parameters that can be configured during installation.
|
||||
|
||||
## Uninstalling the Chart
|
||||
|
||||
To uninstall/delete the `flagger` deployment:
|
||||
|
||||
```console
|
||||
$ helm delete --purge flagger
|
||||
```
|
||||
|
||||
The command removes all the Kubernetes components associated with the chart and deletes the release.
|
||||
|
||||
## Configuration
|
||||
|
||||
The following tables lists the configurable parameters of the Flagger chart and their default values.
|
||||
|
||||
Parameter | Description | Default
|
||||
--- | --- | ---
|
||||
`image.repository` | image repository | `weaveworks/flagger`
|
||||
`image.tag` | image tag | `<VERSION>`
|
||||
`image.pullPolicy` | image pull policy | `IfNotPresent`
|
||||
`prometheus.install` | if `true`, installs Prometheus configured to scrape all pods in the custer including the App Mesh sidecar | `false`
|
||||
`metricsServer` | Prometheus URL, used when `prometheus.install` is `false` | `http://prometheus.istio-system:9090`
|
||||
`selectorLabels` | list of labels that Flagger uses to create pod selectors | `app,name,app.kubernetes.io/name`
|
||||
`slack.url` | Slack incoming webhook | None
|
||||
`slack.channel` | Slack channel | None
|
||||
`slack.user` | Slack username | `flagger`
|
||||
`msteams.url` | Microsoft Teams incoming webhook | None
|
||||
`leaderElection.enabled` | leader election must be enabled when running more than one replica | `false`
|
||||
`leaderElection.replicaCount` | number of replicas | `1`
|
||||
`ingressAnnotationsPrefix` | annotations prefix for ingresses | `custom.ingress.kubernetes.io`
|
||||
`rbac.create` | if `true`, create and use RBAC resources | `true`
|
||||
`rbac.pspEnabled` | If `true`, create and use a restricted pod security policy | `false`
|
||||
`crd.create` | if `true`, create Flagger's CRDs | `true`
|
||||
`resources.requests/cpu` | pod CPU request | `10m`
|
||||
`resources.requests/memory` | pod memory request | `32Mi`
|
||||
`resources.limits/cpu` | pod CPU limit | `1000m`
|
||||
`resources.limits/memory` | pod memory limit | `512Mi`
|
||||
`affinity` | node/pod affinities | None
|
||||
`nodeSelector` | node labels for pod assignment | `{}`
|
||||
`tolerations` | list of node taints to tolerate | `[]`
|
||||
|
||||
Specify each parameter using the `--set key=value[,key=value]` argument to `helm upgrade`. For example,
|
||||
|
||||
```console
|
||||
$ helm upgrade -i flagger flagger/flagger \
|
||||
--namespace istio-system \
|
||||
--set crd.create=false \
|
||||
--set slack.url=https://hooks.slack.com/services/YOUR/SLACK/WEBHOOK \
|
||||
--set slack.channel=general
|
||||
```
|
||||
|
||||
Alternatively, a YAML file that specifies the values for the above parameters can be provided while installing the chart. For example,
|
||||
|
||||
```console
|
||||
$ helm upgrade -i flagger flagger/flagger \
|
||||
--namespace istio-system \
|
||||
-f values.yaml
|
||||
```
|
||||
|
||||
> **Tip**: You can use the default [values.yaml](values.yaml)
|
||||
|
||||
|
||||
@@ -1 +0,0 @@
|
||||
Flagger installed
|
||||
@@ -1,42 +0,0 @@
|
||||
{{/*
|
||||
Create chart name and version as used by the chart label.
|
||||
*/}}
|
||||
{{- define "flagger.chart" -}}
|
||||
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Expand the name of the chart.
|
||||
*/}}
|
||||
{{- define "flagger.name" -}}
|
||||
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Create a default fully qualified app name.
|
||||
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
|
||||
If release name contains chart name it will be used as a full name.
|
||||
*/}}
|
||||
{{- define "flagger.fullname" -}}
|
||||
{{- if .Values.fullnameOverride -}}
|
||||
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
|
||||
{{- else -}}
|
||||
{{- $name := default .Chart.Name .Values.nameOverride -}}
|
||||
{{- if contains $name .Release.Name -}}
|
||||
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
|
||||
{{- else -}}
|
||||
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Create the name of the service account to use
|
||||
*/}}
|
||||
{{- define "flagger.serviceAccountName" -}}
|
||||
{{- if .Values.serviceAccount.create -}}
|
||||
{{ default (include "flagger.fullname" .) .Values.serviceAccount.name }}
|
||||
{{- else -}}
|
||||
{{ default "default" .Values.serviceAccount.name }}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
@@ -1,11 +0,0 @@
|
||||
{{- if .Values.serviceAccount.create }}
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: {{ template "flagger.serviceAccountName" . }}
|
||||
labels:
|
||||
helm.sh/chart: {{ template "flagger.chart" . }}
|
||||
app.kubernetes.io/name: {{ template "flagger.name" . }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
{{- end }}
|
||||
@@ -1,321 +0,0 @@
|
||||
{{- if .Values.crd.create }}
|
||||
apiVersion: apiextensions.k8s.io/v1beta1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: canaries.flagger.app
|
||||
annotations:
|
||||
helm.sh/resource-policy: keep
|
||||
spec:
|
||||
group: flagger.app
|
||||
version: v1alpha3
|
||||
versions:
|
||||
- name: v1alpha3
|
||||
served: true
|
||||
storage: true
|
||||
- name: v1alpha2
|
||||
served: true
|
||||
storage: false
|
||||
- name: v1alpha1
|
||||
served: true
|
||||
storage: false
|
||||
names:
|
||||
plural: canaries
|
||||
singular: canary
|
||||
kind: Canary
|
||||
categories:
|
||||
- all
|
||||
scope: Namespaced
|
||||
subresources:
|
||||
status: {}
|
||||
additionalPrinterColumns:
|
||||
- name: Status
|
||||
type: string
|
||||
JSONPath: .status.phase
|
||||
- name: Weight
|
||||
type: string
|
||||
JSONPath: .status.canaryWeight
|
||||
- name: FailedChecks
|
||||
type: string
|
||||
JSONPath: .status.failedChecks
|
||||
priority: 1
|
||||
- name: Interval
|
||||
type: string
|
||||
JSONPath: .spec.canaryAnalysis.interval
|
||||
priority: 1
|
||||
- name: Mirror
|
||||
type: boolean
|
||||
JSONPath: .spec.canaryAnalysis.mirror
|
||||
priority: 1
|
||||
- name: StepWeight
|
||||
type: string
|
||||
JSONPath: .spec.canaryAnalysis.stepWeight
|
||||
priority: 1
|
||||
- name: MaxWeight
|
||||
type: string
|
||||
JSONPath: .spec.canaryAnalysis.maxWeight
|
||||
priority: 1
|
||||
- name: LastTransitionTime
|
||||
type: string
|
||||
JSONPath: .status.lastTransitionTime
|
||||
validation:
|
||||
openAPIV3Schema:
|
||||
properties:
|
||||
spec:
|
||||
required:
|
||||
- targetRef
|
||||
- service
|
||||
- canaryAnalysis
|
||||
properties:
|
||||
provider:
|
||||
description: Traffic managent provider
|
||||
type: string
|
||||
metricsServer:
|
||||
description: Prometheus URL
|
||||
type: string
|
||||
progressDeadlineSeconds:
|
||||
description: Deployment progress deadline
|
||||
type: number
|
||||
targetRef:
|
||||
description: Deployment selector
|
||||
type: object
|
||||
required: ['apiVersion', 'kind', 'name']
|
||||
properties:
|
||||
apiVersion:
|
||||
type: string
|
||||
kind:
|
||||
type: string
|
||||
name:
|
||||
type: string
|
||||
autoscalerRef:
|
||||
description: HPA selector
|
||||
anyOf:
|
||||
- type: string
|
||||
- type: object
|
||||
required: ['apiVersion', 'kind', 'name']
|
||||
properties:
|
||||
apiVersion:
|
||||
type: string
|
||||
kind:
|
||||
type: string
|
||||
name:
|
||||
type: string
|
||||
ingressRef:
|
||||
description: NGINX ingress selector
|
||||
anyOf:
|
||||
- type: string
|
||||
- type: object
|
||||
required: ['apiVersion', 'kind', 'name']
|
||||
properties:
|
||||
apiVersion:
|
||||
type: string
|
||||
kind:
|
||||
type: string
|
||||
name:
|
||||
type: string
|
||||
service:
|
||||
type: object
|
||||
required: ['port']
|
||||
properties:
|
||||
port:
|
||||
description: Container port number
|
||||
type: number
|
||||
portName:
|
||||
description: Container port name
|
||||
type: string
|
||||
targetPort:
|
||||
description: Container target port name
|
||||
anyOf:
|
||||
- type: string
|
||||
- type: number
|
||||
portDiscovery:
|
||||
description: Enable port dicovery
|
||||
type: boolean
|
||||
meshName:
|
||||
description: AppMesh mesh name
|
||||
type: string
|
||||
backends:
|
||||
description: AppMesh backend array
|
||||
anyOf:
|
||||
- type: string
|
||||
- type: array
|
||||
timeout:
|
||||
description: Istio HTTP or gRPC request timeout
|
||||
type: string
|
||||
trafficPolicy:
|
||||
description: Istio traffic policy
|
||||
anyOf:
|
||||
- type: string
|
||||
- type: object
|
||||
match:
|
||||
description: Istio URL match conditions
|
||||
anyOf:
|
||||
- type: string
|
||||
- type: array
|
||||
rewrite:
|
||||
description: Istio URL rewrite
|
||||
anyOf:
|
||||
- type: string
|
||||
- type: object
|
||||
headers:
|
||||
description: Istio headers operations
|
||||
anyOf:
|
||||
- type: string
|
||||
- type: object
|
||||
corsPolicy:
|
||||
description: Istio CORS policy
|
||||
anyOf:
|
||||
- type: string
|
||||
- type: object
|
||||
gateways:
|
||||
description: Istio gateways list
|
||||
anyOf:
|
||||
- type: string
|
||||
- type: array
|
||||
hosts:
|
||||
description: Istio hosts list
|
||||
anyOf:
|
||||
- type: string
|
||||
- type: array
|
||||
skipAnalysis:
|
||||
type: boolean
|
||||
canaryAnalysis:
|
||||
properties:
|
||||
interval:
|
||||
description: Canary schedule interval
|
||||
type: string
|
||||
pattern: "^[0-9]+(m|s)"
|
||||
iterations:
|
||||
description: Number of checks to run for A/B Testing and Blue/Green
|
||||
type: number
|
||||
threshold:
|
||||
description: Max number of failed checks before rollback
|
||||
type: number
|
||||
maxWeight:
|
||||
description: Max traffic percentage routed to canary
|
||||
type: number
|
||||
stepWeight:
|
||||
description: Canary incremental traffic percentage step
|
||||
type: number
|
||||
mirror:
|
||||
description: Mirror traffic to canary before shifting
|
||||
type: boolean
|
||||
match:
|
||||
description: A/B testing match conditions
|
||||
anyOf:
|
||||
- type: string
|
||||
- type: array
|
||||
metrics:
|
||||
description: Prometheus query list for this canary
|
||||
type: array
|
||||
properties:
|
||||
items:
|
||||
type: object
|
||||
required: ['name', 'threshold']
|
||||
properties:
|
||||
name:
|
||||
description: Name of the Prometheus metric
|
||||
type: string
|
||||
interval:
|
||||
description: Interval of the promql query
|
||||
type: string
|
||||
pattern: "^[0-9]+(m|s)"
|
||||
threshold:
|
||||
description: Max scalar value accepted for this metric
|
||||
type: number
|
||||
query:
|
||||
description: Prometheus query
|
||||
type: string
|
||||
webhooks:
|
||||
description: Webhook list for this canary
|
||||
type: array
|
||||
properties:
|
||||
items:
|
||||
type: object
|
||||
required: ["name", "url"]
|
||||
properties:
|
||||
name:
|
||||
description: Name of the webhook
|
||||
type: string
|
||||
type:
|
||||
description: Type of the webhook pre, post or during rollout
|
||||
type: string
|
||||
enum:
|
||||
- ""
|
||||
- confirm-rollout
|
||||
- pre-rollout
|
||||
- rollout
|
||||
- confirm-promotion
|
||||
- post-rollout
|
||||
url:
|
||||
description: URL address of this webhook
|
||||
type: string
|
||||
format: url
|
||||
timeout:
|
||||
description: Request timeout for this webhook
|
||||
type: string
|
||||
pattern: "^[0-9]+(m|s)"
|
||||
metadata:
|
||||
description: Metadata (key-value pairs) for this webhook
|
||||
anyOf:
|
||||
- type: string
|
||||
- type: object
|
||||
status:
|
||||
properties:
|
||||
phase:
|
||||
description: Analysis phase of this canary
|
||||
type: string
|
||||
enum:
|
||||
- ""
|
||||
- Initializing
|
||||
- Initialized
|
||||
- Waiting
|
||||
- Progressing
|
||||
- Promoting
|
||||
- Finalising
|
||||
- Succeeded
|
||||
- Failed
|
||||
canaryWeight:
|
||||
description: Traffic weight percentage routed to canary
|
||||
type: number
|
||||
failedChecks:
|
||||
description: Failed check count of the current canary analysis
|
||||
type: number
|
||||
iterations:
|
||||
description: Iteration count of the current canary analysis
|
||||
type: number
|
||||
lastAppliedSpec:
|
||||
description: LastAppliedSpec of this canary
|
||||
type: string
|
||||
lastTransitionTime:
|
||||
description: LastTransitionTime of this canary
|
||||
format: date-time
|
||||
type: string
|
||||
conditions:
|
||||
description: Status conditions of this canary
|
||||
type: array
|
||||
properties:
|
||||
items:
|
||||
type: object
|
||||
required: ['type', 'status', 'reason']
|
||||
properties:
|
||||
lastTransitionTime:
|
||||
description: LastTransitionTime of this condition
|
||||
format: date-time
|
||||
type: string
|
||||
lastUpdateTime:
|
||||
description: LastUpdateTime of this condition
|
||||
format: date-time
|
||||
type: string
|
||||
message:
|
||||
description: Message associated with this condition
|
||||
type: string
|
||||
reason:
|
||||
description: Reason for the current status of this condition
|
||||
type: string
|
||||
status:
|
||||
description: Status of this condition
|
||||
type: string
|
||||
type:
|
||||
description: Type of this condition
|
||||
type: string
|
||||
{{- end }}
|
||||
@@ -1,118 +0,0 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: {{ include "flagger.fullname" . }}
|
||||
labels:
|
||||
helm.sh/chart: {{ template "flagger.chart" . }}
|
||||
app.kubernetes.io/name: {{ template "flagger.name" . }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
spec:
|
||||
replicas: {{ .Values.leaderElection.replicaCount }}
|
||||
strategy:
|
||||
type: Recreate
|
||||
selector:
|
||||
matchLabels:
|
||||
app.kubernetes.io/name: {{ template "flagger.name" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/name: {{ template "flagger.name" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
annotations:
|
||||
{{- if .Values.podAnnotations }}
|
||||
{{ toYaml .Values.podAnnotations | indent 8 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
serviceAccountName: {{ template "flagger.serviceAccountName" . }}
|
||||
affinity:
|
||||
podAntiAffinity:
|
||||
preferredDuringSchedulingIgnoredDuringExecution:
|
||||
- weight: 100
|
||||
podAffinityTerm:
|
||||
labelSelector:
|
||||
matchLabels:
|
||||
app.kubernetes.io/name: {{ template "flagger.name" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
topologyKey: kubernetes.io/hostname
|
||||
{{- if .Values.image.pullSecret }}
|
||||
imagePullSecrets:
|
||||
- name: {{ .Values.image.pullSecret }}
|
||||
{{- end }}
|
||||
containers:
|
||||
- name: flagger
|
||||
securityContext:
|
||||
readOnlyRootFilesystem: true
|
||||
runAsUser: 10001
|
||||
image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
|
||||
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
||||
ports:
|
||||
- name: http
|
||||
containerPort: 8080
|
||||
command:
|
||||
- ./flagger
|
||||
- -log-level=info
|
||||
{{- if .Values.meshProvider }}
|
||||
- -mesh-provider={{ .Values.meshProvider }}
|
||||
{{- end }}
|
||||
{{- if .Values.prometheus.install }}
|
||||
- -metrics-server=http://{{ template "flagger.fullname" . }}-prometheus:9090
|
||||
{{- else }}
|
||||
- -metrics-server={{ .Values.metricsServer }}
|
||||
{{- end }}
|
||||
{{- if .Values.selectorLabels }}
|
||||
- -selector-labels={{ .Values.selectorLabels }}
|
||||
{{- end }}
|
||||
{{- if .Values.namespace }}
|
||||
- -namespace={{ .Values.namespace }}
|
||||
{{- end }}
|
||||
{{- if .Values.slack.url }}
|
||||
- -slack-url={{ .Values.slack.url }}
|
||||
- -slack-user={{ .Values.slack.user }}
|
||||
- -slack-channel={{ .Values.slack.channel }}
|
||||
{{- end }}
|
||||
{{- if .Values.msteams.url }}
|
||||
- -msteams-url={{ .Values.msteams.url }}
|
||||
{{- end }}
|
||||
{{- if .Values.leaderElection.enabled }}
|
||||
- -enable-leader-election=true
|
||||
- -leader-election-namespace={{ .Release.Namespace }}
|
||||
{{- end }}
|
||||
{{- if .Values.ingressAnnotationsPrefix }}
|
||||
- -ingress-annotations-prefix={{ .Values.ingressAnnotationsPrefix }}
|
||||
{{- end }}
|
||||
livenessProbe:
|
||||
exec:
|
||||
command:
|
||||
- wget
|
||||
- --quiet
|
||||
- --tries=1
|
||||
- --timeout=4
|
||||
- --spider
|
||||
- http://localhost:8080/healthz
|
||||
timeoutSeconds: 5
|
||||
readinessProbe:
|
||||
exec:
|
||||
command:
|
||||
- wget
|
||||
- --quiet
|
||||
- --tries=1
|
||||
- --timeout=4
|
||||
- --spider
|
||||
- http://localhost:8080/healthz
|
||||
timeoutSeconds: 5
|
||||
{{- if .Values.env }}
|
||||
env:
|
||||
{{ toYaml .Values.env | indent 12 }}
|
||||
{{- end }}
|
||||
resources:
|
||||
{{ toYaml .Values.resources | indent 12 }}
|
||||
{{- with .Values.nodeSelector }}
|
||||
nodeSelector:
|
||||
{{ toYaml . | indent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.tolerations }}
|
||||
tolerations:
|
||||
{{ toYaml . | indent 8 }}
|
||||
{{- end }}
|
||||
@@ -1,284 +0,0 @@
|
||||
{{- if .Values.prometheus.install }}
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: {{ template "flagger.fullname" . }}-prometheus
|
||||
labels:
|
||||
helm.sh/chart: {{ template "flagger.chart" . }}
|
||||
app.kubernetes.io/name: {{ template "flagger.name" . }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources:
|
||||
- nodes
|
||||
- services
|
||||
- endpoints
|
||||
- pods
|
||||
- nodes/proxy
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups: [""]
|
||||
resources:
|
||||
- configmaps
|
||||
verbs: ["get"]
|
||||
- nonResourceURLs: ["/metrics"]
|
||||
verbs: ["get"]
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: {{ template "flagger.fullname" . }}-prometheus
|
||||
labels:
|
||||
helm.sh/chart: {{ template "flagger.chart" . }}
|
||||
app.kubernetes.io/name: {{ template "flagger.name" . }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: {{ template "flagger.fullname" . }}-prometheus
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: {{ template "flagger.serviceAccountName" . }}-prometheus
|
||||
namespace: {{ .Release.Namespace }}
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: {{ template "flagger.serviceAccountName" . }}-prometheus
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels:
|
||||
helm.sh/chart: {{ template "flagger.chart" . }}
|
||||
app.kubernetes.io/name: {{ template "flagger.name" . }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: {{ template "flagger.fullname" . }}-prometheus
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels:
|
||||
helm.sh/chart: {{ template "flagger.chart" . }}
|
||||
app.kubernetes.io/name: {{ template "flagger.name" . }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
data:
|
||||
prometheus.yml: |-
|
||||
global:
|
||||
scrape_interval: 5s
|
||||
scrape_configs:
|
||||
|
||||
# Scrape config for AppMesh Envoy sidecar
|
||||
- job_name: 'appmesh-envoy'
|
||||
metrics_path: /stats/prometheus
|
||||
kubernetes_sd_configs:
|
||||
- role: pod
|
||||
|
||||
relabel_configs:
|
||||
- source_labels: [__meta_kubernetes_pod_container_name]
|
||||
action: keep
|
||||
regex: '^envoy$'
|
||||
- source_labels: [__address__, __meta_kubernetes_pod_annotation_prometheus_io_port]
|
||||
action: replace
|
||||
regex: ([^:]+)(?::\d+)?;(\d+)
|
||||
replacement: ${1}:9901
|
||||
target_label: __address__
|
||||
- action: labelmap
|
||||
regex: __meta_kubernetes_pod_label_(.+)
|
||||
- source_labels: [__meta_kubernetes_namespace]
|
||||
action: replace
|
||||
target_label: kubernetes_namespace
|
||||
- source_labels: [__meta_kubernetes_pod_name]
|
||||
action: replace
|
||||
target_label: kubernetes_pod_name
|
||||
|
||||
# Exclude high cardinality metrics
|
||||
metric_relabel_configs:
|
||||
- source_labels: [ cluster_name ]
|
||||
regex: '(outbound|inbound|prometheus_stats).*'
|
||||
action: drop
|
||||
- source_labels: [ tcp_prefix ]
|
||||
regex: '(outbound|inbound|prometheus_stats).*'
|
||||
action: drop
|
||||
- source_labels: [ listener_address ]
|
||||
regex: '(.+)'
|
||||
action: drop
|
||||
- source_labels: [ http_conn_manager_listener_prefix ]
|
||||
regex: '(.+)'
|
||||
action: drop
|
||||
- source_labels: [ http_conn_manager_prefix ]
|
||||
regex: '(.+)'
|
||||
action: drop
|
||||
- source_labels: [ __name__ ]
|
||||
regex: 'envoy_tls.*'
|
||||
action: drop
|
||||
- source_labels: [ __name__ ]
|
||||
regex: 'envoy_tcp_downstream.*'
|
||||
action: drop
|
||||
- source_labels: [ __name__ ]
|
||||
regex: 'envoy_http_(stats|admin).*'
|
||||
action: drop
|
||||
- source_labels: [ __name__ ]
|
||||
regex: 'envoy_cluster_(lb|retry|bind|internal|max|original).*'
|
||||
action: drop
|
||||
|
||||
# Scrape config for API servers
|
||||
- job_name: 'kubernetes-apiservers'
|
||||
kubernetes_sd_configs:
|
||||
- role: endpoints
|
||||
namespaces:
|
||||
names:
|
||||
- default
|
||||
scheme: https
|
||||
tls_config:
|
||||
ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
|
||||
insecure_skip_verify: true
|
||||
bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
|
||||
relabel_configs:
|
||||
- source_labels: [__meta_kubernetes_service_name, __meta_kubernetes_endpoint_port_name]
|
||||
action: keep
|
||||
regex: kubernetes;https
|
||||
|
||||
# scrape config for cAdvisor
|
||||
- job_name: 'kubernetes-cadvisor'
|
||||
scheme: https
|
||||
tls_config:
|
||||
ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
|
||||
insecure_skip_verify: true
|
||||
bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
|
||||
kubernetes_sd_configs:
|
||||
- role: node
|
||||
relabel_configs:
|
||||
- action: labelmap
|
||||
regex: __meta_kubernetes_node_label_(.+)
|
||||
- target_label: __address__
|
||||
replacement: kubernetes.default.svc:443
|
||||
- source_labels: [__meta_kubernetes_node_name]
|
||||
regex: (.+)
|
||||
target_label: __metrics_path__
|
||||
replacement: /api/v1/nodes/${1}/proxy/metrics/cadvisor
|
||||
# exclude high cardinality metrics
|
||||
metric_relabel_configs:
|
||||
- source_labels: [__name__]
|
||||
regex: (container|machine)_(cpu|memory|network|fs)_(.+)
|
||||
action: keep
|
||||
- source_labels: [__name__]
|
||||
regex: container_memory_failures_total
|
||||
action: drop
|
||||
|
||||
# scrape config for pods
|
||||
- job_name: kubernetes-pods
|
||||
kubernetes_sd_configs:
|
||||
- role: pod
|
||||
relabel_configs:
|
||||
- action: keep
|
||||
regex: true
|
||||
source_labels:
|
||||
- __meta_kubernetes_pod_annotation_prometheus_io_scrape
|
||||
- source_labels: [ __address__ ]
|
||||
regex: '.*9901.*'
|
||||
action: drop
|
||||
- action: replace
|
||||
regex: (.+)
|
||||
source_labels:
|
||||
- __meta_kubernetes_pod_annotation_prometheus_io_path
|
||||
target_label: __metrics_path__
|
||||
- action: replace
|
||||
regex: ([^:]+)(?::\d+)?;(\d+)
|
||||
replacement: $1:$2
|
||||
source_labels:
|
||||
- __address__
|
||||
- __meta_kubernetes_pod_annotation_prometheus_io_port
|
||||
target_label: __address__
|
||||
- action: labelmap
|
||||
regex: __meta_kubernetes_pod_label_(.+)
|
||||
- action: replace
|
||||
source_labels:
|
||||
- __meta_kubernetes_namespace
|
||||
target_label: kubernetes_namespace
|
||||
- action: replace
|
||||
source_labels:
|
||||
- __meta_kubernetes_pod_name
|
||||
target_label: kubernetes_pod_name
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: {{ template "flagger.fullname" . }}-prometheus
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels:
|
||||
helm.sh/chart: {{ template "flagger.chart" . }}
|
||||
app.kubernetes.io/name: {{ template "flagger.name" . }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app.kubernetes.io/name: {{ template "flagger.name" . }}-prometheus
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/name: {{ template "flagger.name" . }}-prometheus
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
annotations:
|
||||
appmesh.k8s.aws/sidecarInjectorWebhook: disabled
|
||||
sidecar.istio.io/inject: "false"
|
||||
spec:
|
||||
serviceAccountName: {{ template "flagger.serviceAccountName" . }}-prometheus
|
||||
containers:
|
||||
- name: prometheus
|
||||
image: "docker.io/prom/prometheus:v2.12.0"
|
||||
imagePullPolicy: IfNotPresent
|
||||
args:
|
||||
- '--storage.tsdb.retention=2h'
|
||||
- '--config.file=/etc/prometheus/prometheus.yml'
|
||||
ports:
|
||||
- containerPort: 9090
|
||||
name: http
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /-/healthy
|
||||
port: 9090
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /-/ready
|
||||
port: 9090
|
||||
resources:
|
||||
requests:
|
||||
cpu: 10m
|
||||
memory: 128Mi
|
||||
volumeMounts:
|
||||
- name: config-volume
|
||||
mountPath: /etc/prometheus
|
||||
- name: data-volume
|
||||
mountPath: /prometheus/data
|
||||
|
||||
volumes:
|
||||
- name: config-volume
|
||||
configMap:
|
||||
name: {{ template "flagger.fullname" . }}-prometheus
|
||||
- name: data-volume
|
||||
emptyDir: {}
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: {{ template "flagger.fullname" . }}-prometheus
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels:
|
||||
helm.sh/chart: {{ template "flagger.chart" . }}
|
||||
app.kubernetes.io/name: {{ template "flagger.name" . }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
spec:
|
||||
selector:
|
||||
app.kubernetes.io/name: {{ template "flagger.name" . }}-prometheus
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
ports:
|
||||
- name: http
|
||||
protocol: TCP
|
||||
port: 9090
|
||||
{{- end }}
|
||||
@@ -1,66 +0,0 @@
|
||||
{{- if .Values.rbac.pspEnabled }}
|
||||
apiVersion: policy/v1beta1
|
||||
kind: PodSecurityPolicy
|
||||
metadata:
|
||||
name: {{ template "flagger.fullname" . }}
|
||||
labels:
|
||||
helm.sh/chart: {{ template "flagger.chart" . }}
|
||||
app.kubernetes.io/name: {{ template "flagger.name" . }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
annotations:
|
||||
seccomp.security.alpha.kubernetes.io/allowedProfileNames: '*'
|
||||
spec:
|
||||
privileged: false
|
||||
hostIPC: false
|
||||
hostNetwork: false
|
||||
hostPID: false
|
||||
readOnlyRootFilesystem: false
|
||||
allowPrivilegeEscalation: false
|
||||
allowedCapabilities:
|
||||
- '*'
|
||||
fsGroup:
|
||||
rule: RunAsAny
|
||||
runAsUser:
|
||||
rule: RunAsAny
|
||||
seLinux:
|
||||
rule: RunAsAny
|
||||
supplementalGroups:
|
||||
rule: RunAsAny
|
||||
volumes:
|
||||
- '*'
|
||||
---
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: {{ template "flagger.fullname" . }}-psp
|
||||
labels:
|
||||
helm.sh/chart: {{ template "flagger.chart" . }}
|
||||
app.kubernetes.io/name: {{ template "flagger.name" . }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
rules:
|
||||
- apiGroups: ['policy']
|
||||
resources: ['podsecuritypolicies']
|
||||
verbs: ['use']
|
||||
resourceNames:
|
||||
- {{ template "flagger.fullname" . }}
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
name: {{ template "flagger.fullname" . }}-psp
|
||||
labels:
|
||||
helm.sh/chart: {{ template "flagger.chart" . }}
|
||||
app.kubernetes.io/name: {{ template "flagger.name" . }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: {{ template "flagger.fullname" . }}-psp
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: {{ template "flagger.serviceAccountName" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
{{- end }}
|
||||
@@ -1,102 +0,0 @@
|
||||
{{- if .Values.rbac.create }}
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: {{ template "flagger.fullname" . }}
|
||||
labels:
|
||||
helm.sh/chart: {{ template "flagger.chart" . }}
|
||||
app.kubernetes.io/name: {{ template "flagger.name" . }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- events
|
||||
- configmaps
|
||||
- secrets
|
||||
- services
|
||||
verbs: ["*"]
|
||||
- apiGroups:
|
||||
- apps
|
||||
resources:
|
||||
- deployments
|
||||
verbs: ["*"]
|
||||
- apiGroups:
|
||||
- autoscaling
|
||||
resources:
|
||||
- horizontalpodautoscalers
|
||||
verbs: ["*"]
|
||||
- apiGroups:
|
||||
- "extensions"
|
||||
resources:
|
||||
- ingresses
|
||||
- ingresses/status
|
||||
verbs: ["*"]
|
||||
- apiGroups:
|
||||
- flagger.app
|
||||
resources:
|
||||
- canaries
|
||||
- canaries/status
|
||||
verbs: ["*"]
|
||||
- apiGroups:
|
||||
- networking.istio.io
|
||||
resources:
|
||||
- virtualservices
|
||||
- virtualservices/status
|
||||
- destinationrules
|
||||
- destinationrules/status
|
||||
verbs: ["*"]
|
||||
- apiGroups:
|
||||
- appmesh.k8s.aws
|
||||
resources:
|
||||
- meshes
|
||||
- meshes/status
|
||||
- virtualnodes
|
||||
- virtualnodes/status
|
||||
- virtualservices
|
||||
- virtualservices/status
|
||||
verbs: ["*"]
|
||||
- apiGroups:
|
||||
- split.smi-spec.io
|
||||
resources:
|
||||
- trafficsplits
|
||||
verbs: ["*"]
|
||||
- apiGroups:
|
||||
- gloo.solo.io
|
||||
resources:
|
||||
- settings
|
||||
- upstreams
|
||||
- upstreamgroups
|
||||
- proxies
|
||||
- virtualservices
|
||||
verbs: ["*"]
|
||||
- apiGroups:
|
||||
- gateway.solo.io
|
||||
resources:
|
||||
- virtualservices
|
||||
- gateways
|
||||
verbs: ["*"]
|
||||
- nonResourceURLs:
|
||||
- /version
|
||||
verbs:
|
||||
- get
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: {{ template "flagger.fullname" . }}
|
||||
labels:
|
||||
helm.sh/chart: {{ template "flagger.chart" . }}
|
||||
app.kubernetes.io/name: {{ template "flagger.name" . }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: {{ template "flagger.fullname" . }}
|
||||
subjects:
|
||||
- name: {{ template "flagger.serviceAccountName" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
kind: ServiceAccount
|
||||
{{- end }}
|
||||
@@ -1,85 +0,0 @@
|
||||
# Default values for flagger.
|
||||
|
||||
image:
|
||||
repository: weaveworks/flagger
|
||||
tag: 0.20.2
|
||||
pullPolicy: IfNotPresent
|
||||
pullSecret:
|
||||
|
||||
podAnnotations:
|
||||
prometheus.io/scrape: "true"
|
||||
prometheus.io/port: "8080"
|
||||
|
||||
metricsServer: "http://prometheus:9090"
|
||||
|
||||
# accepted values are kubernetes, istio, linkerd, appmesh, nginx, gloo or supergloo:mesh.namespace (defaults to istio)
|
||||
meshProvider: ""
|
||||
|
||||
# single namespace restriction
|
||||
namespace: ""
|
||||
|
||||
# list of pod labels that Flagger uses to create pod selectors
|
||||
# defaults to: app,name,app.kubernetes.io/name
|
||||
selectorLabels: ""
|
||||
|
||||
slack:
|
||||
user: flagger
|
||||
channel:
|
||||
# incoming webhook https://api.slack.com/incoming-webhooks
|
||||
url:
|
||||
|
||||
msteams:
|
||||
# MS Teams incoming webhook URL
|
||||
url:
|
||||
|
||||
#env:
|
||||
#- name: SLACK_URL
|
||||
# valueFrom:
|
||||
# secretKeyRef:
|
||||
# name: slack
|
||||
# key: url
|
||||
#- name: MSTEAMS_URL
|
||||
# valueFrom:
|
||||
# secretKeyRef:
|
||||
# name: msteams
|
||||
# key: url
|
||||
env: []
|
||||
|
||||
leaderElection:
|
||||
enabled: false
|
||||
replicaCount: 1
|
||||
|
||||
serviceAccount:
|
||||
# serviceAccount.create: Whether to create a service account or not
|
||||
create: true
|
||||
# serviceAccount.name: The name of the service account to create or use
|
||||
name: ""
|
||||
|
||||
rbac:
|
||||
# rbac.create: `true` if rbac resources should be created
|
||||
create: true
|
||||
# rbac.pspEnabled: `true` if PodSecurityPolicy resources should be created
|
||||
pspEnabled: false
|
||||
|
||||
crd:
|
||||
# crd.create: `true` if custom resource definitions should be created
|
||||
create: true
|
||||
|
||||
nameOverride: ""
|
||||
fullnameOverride: ""
|
||||
|
||||
resources:
|
||||
limits:
|
||||
memory: "512Mi"
|
||||
cpu: "1000m"
|
||||
requests:
|
||||
memory: "32Mi"
|
||||
cpu: "10m"
|
||||
|
||||
nodeSelector: {}
|
||||
|
||||
tolerations: []
|
||||
|
||||
prometheus:
|
||||
# to be used with AppMesh or nginx ingress
|
||||
install: false
|
||||
@@ -1,21 +0,0 @@
|
||||
# Patterns to ignore when building packages.
|
||||
# This supports shell glob matching, relative path matching, and
|
||||
# negation (prefixed with !). Only one pattern per line.
|
||||
.DS_Store
|
||||
# Common VCS dirs
|
||||
.git/
|
||||
.gitignore
|
||||
.bzr/
|
||||
.bzrignore
|
||||
.hg/
|
||||
.hgignore
|
||||
.svn/
|
||||
# Common backup files
|
||||
*.swp
|
||||
*.bak
|
||||
*.tmp
|
||||
*~
|
||||
# Various IDEs
|
||||
.project
|
||||
.idea/
|
||||
*.tmproj
|
||||
@@ -1,13 +0,0 @@
|
||||
apiVersion: v1
|
||||
name: grafana
|
||||
version: 1.3.0
|
||||
appVersion: 6.2.5
|
||||
description: Grafana dashboards for monitoring Flagger canary deployments
|
||||
icon: https://raw.githubusercontent.com/weaveworks/flagger/master/docs/logo/flagger-icon.png
|
||||
home: https://flagger.app
|
||||
sources:
|
||||
- https://github.com/weaveworks/flagger
|
||||
maintainers:
|
||||
- name: stefanprodan
|
||||
url: https://github.com/stefanprodan
|
||||
email: stefanprodan@users.noreply.github.com
|
||||
@@ -1,79 +0,0 @@
|
||||
# Flagger Grafana
|
||||
|
||||
Grafana dashboards for monitoring progressive deployments powered by Istio, Prometheus and Flagger.
|
||||
|
||||

|
||||
|
||||
## Prerequisites
|
||||
|
||||
* Kubernetes >= 1.11
|
||||
* Istio >= 1.0
|
||||
* Prometheus >= 2.6
|
||||
|
||||
## Installing the Chart
|
||||
|
||||
Add Flagger Helm repository:
|
||||
|
||||
```console
|
||||
helm repo add flagger https://flagger.app
|
||||
```
|
||||
|
||||
To install the chart with the release name `flagger-grafana`:
|
||||
|
||||
```console
|
||||
helm upgrade -i flagger-grafana flagger/grafana \
|
||||
--namespace=istio-system \
|
||||
--set url=http://prometheus:9090 \
|
||||
--set user=admin \
|
||||
--set password=admin
|
||||
```
|
||||
|
||||
The command deploys Grafana on the Kubernetes cluster in the default namespace.
|
||||
The [configuration](#configuration) section lists the parameters that can be configured during installation.
|
||||
|
||||
## Uninstalling the Chart
|
||||
|
||||
To uninstall/delete the `flagger-grafana` deployment:
|
||||
|
||||
```console
|
||||
helm delete --purge flagger-grafana
|
||||
```
|
||||
|
||||
The command removes all the Kubernetes components associated with the chart and deletes the release.
|
||||
|
||||
## Configuration
|
||||
|
||||
The following tables lists the configurable parameters of the Grafana chart and their default values.
|
||||
|
||||
Parameter | Description | Default
|
||||
--- | --- | ---
|
||||
`image.repository` | Image repository | `grafana/grafana`
|
||||
`image.pullPolicy` | Image pull policy | `IfNotPresent`
|
||||
`image.tag` | Image tag | `<VERSION>`
|
||||
`replicaCount` | desired number of pods | `1`
|
||||
`resources` | pod resources | `none`
|
||||
`tolerations` | List of node taints to tolerate | `[]`
|
||||
`affinity` | node/pod affinities | `node`
|
||||
`nodeSelector` | node labels for pod assignment | `{}`
|
||||
`service.type` | type of service | `ClusterIP`
|
||||
`url` | Prometheus URL, used when Weave Cloud token is empty | `http://prometheus:9090`
|
||||
`token` | Weave Cloud token | `none`
|
||||
`user` | Grafana admin username | `admin`
|
||||
`password` | Grafana admin password | `admin`
|
||||
|
||||
Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example,
|
||||
|
||||
```console
|
||||
helm install flagger/grafana --name flagger-grafana \
|
||||
--set token=WEAVE-CLOUD-TOKEN
|
||||
```
|
||||
|
||||
Alternatively, a YAML file that specifies the values for the above parameters can be provided while installing the chart. For example,
|
||||
|
||||
```console
|
||||
helm install flagger/grafana --name flagger-grafana -f values.yaml
|
||||
```
|
||||
|
||||
> **Tip**: You can use the default [values.yaml](values.yaml)
|
||||
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@@ -1,7 +0,0 @@
|
||||
1. Run the port forward command:
|
||||
|
||||
kubectl -n {{ .Release.Namespace }} port-forward svc/{{ .Release.Name }} 3000:80
|
||||
|
||||
2. Navigate to:
|
||||
|
||||
http://localhost:3000
|
||||
@@ -1,32 +0,0 @@
|
||||
{{/* vim: set filetype=mustache: */}}
|
||||
{{/*
|
||||
Expand the name of the chart.
|
||||
*/}}
|
||||
{{- define "grafana.name" -}}
|
||||
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Create a default fully qualified app name.
|
||||
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
|
||||
If release name contains chart name it will be used as a full name.
|
||||
*/}}
|
||||
{{- define "grafana.fullname" -}}
|
||||
{{- if .Values.fullnameOverride -}}
|
||||
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
|
||||
{{- else -}}
|
||||
{{- $name := default .Chart.Name .Values.nameOverride -}}
|
||||
{{- if contains $name .Release.Name -}}
|
||||
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
|
||||
{{- else -}}
|
||||
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Create chart name and version as used by the chart label.
|
||||
*/}}
|
||||
{{- define "grafana.chart" -}}
|
||||
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
|
||||
{{- end -}}
|
||||
@@ -1,6 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: {{ template "grafana.fullname" . }}-dashboards
|
||||
data:
|
||||
{{ (.Files.Glob "dashboards/*").AsConfig | indent 2 }}
|
||||
@@ -1,32 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: {{ template "grafana.fullname" . }}-datasources
|
||||
data:
|
||||
datasources.yaml: |-
|
||||
apiVersion: 1
|
||||
|
||||
deleteDatasources:
|
||||
- name: prometheus
|
||||
{{- if .Values.token }}
|
||||
datasources:
|
||||
- name: prometheus
|
||||
type: prometheus
|
||||
access: proxy
|
||||
url: https://cloud.weave.works/api/prom
|
||||
isDefault: true
|
||||
editable: true
|
||||
version: 1
|
||||
basicAuth: true
|
||||
basicAuthUser: weave
|
||||
basicAuthPassword: {{ .Values.token }}
|
||||
{{- else }}
|
||||
datasources:
|
||||
- name: prometheus
|
||||
type: prometheus
|
||||
access: proxy
|
||||
url: {{ .Values.url }}
|
||||
isDefault: true
|
||||
editable: true
|
||||
version: 1
|
||||
{{- end }}
|
||||
@@ -1,93 +0,0 @@
|
||||
apiVersion: apps/v1beta2
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: {{ template "grafana.fullname" . }}
|
||||
labels:
|
||||
app: {{ template "grafana.fullname" . }}
|
||||
chart: {{ template "grafana.chart" . }}
|
||||
release: {{ .Release.Name }}
|
||||
heritage: {{ .Release.Service }}
|
||||
spec:
|
||||
replicas: {{ .Values.replicaCount }}
|
||||
selector:
|
||||
matchLabels:
|
||||
app: {{ template "grafana.fullname" . }}
|
||||
release: {{ .Release.Name }}
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: {{ template "grafana.fullname" . }}
|
||||
release: {{ .Release.Name }}
|
||||
annotations:
|
||||
prometheus.io/scrape: 'false'
|
||||
{{- if .Values.podAnnotations }}
|
||||
{{ toYaml .Values.podAnnotations | indent 8 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
containers:
|
||||
- name: {{ .Chart.Name }}
|
||||
image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
|
||||
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
||||
ports:
|
||||
- name: http
|
||||
containerPort: 3000
|
||||
protocol: TCP
|
||||
# livenessProbe:
|
||||
# httpGet:
|
||||
# path: /
|
||||
# port: http
|
||||
# readinessProbe:
|
||||
# httpGet:
|
||||
# path: /
|
||||
# port: http
|
||||
env:
|
||||
- name: GF_PATHS_PROVISIONING
|
||||
value: /etc/grafana/provisioning/
|
||||
{{- if .Values.password }}
|
||||
- name: GF_SECURITY_ADMIN_USER
|
||||
value: {{ .Values.user }}
|
||||
- name: GF_SECURITY_ADMIN_PASSWORD
|
||||
value: {{ .Values.password }}
|
||||
{{- else }}
|
||||
- name: GF_AUTH_BASIC_ENABLED
|
||||
value: "false"
|
||||
- name: GF_AUTH_ANONYMOUS_ENABLED
|
||||
value: "true"
|
||||
- name: GF_AUTH_ANONYMOUS_ORG_ROLE
|
||||
value: Admin
|
||||
{{- end }}
|
||||
volumeMounts:
|
||||
- name: grafana
|
||||
mountPath: /var/lib/grafana
|
||||
- name: dashboards
|
||||
mountPath: /etc/grafana/dashboards
|
||||
- name: datasources
|
||||
mountPath: /etc/grafana/provisioning/datasources
|
||||
- name: providers
|
||||
mountPath: /etc/grafana/provisioning/dashboards
|
||||
resources:
|
||||
{{ toYaml .Values.resources | indent 12 }}
|
||||
{{- with .Values.nodeSelector }}
|
||||
nodeSelector:
|
||||
{{ toYaml . | indent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.affinity }}
|
||||
affinity:
|
||||
{{ toYaml . | indent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.tolerations }}
|
||||
tolerations:
|
||||
{{ toYaml . | indent 8 }}
|
||||
{{- end }}
|
||||
volumes:
|
||||
- name: grafana
|
||||
emptyDir: {}
|
||||
- name: dashboards
|
||||
configMap:
|
||||
name: {{ template "grafana.fullname" . }}-dashboards
|
||||
- name: providers
|
||||
configMap:
|
||||
name: {{ template "grafana.fullname" . }}-providers
|
||||
- name: datasources
|
||||
configMap:
|
||||
name: {{ template "grafana.fullname" . }}-datasources
|
||||
@@ -1,17 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: {{ template "grafana.fullname" . }}-providers
|
||||
data:
|
||||
providers.yaml: |+
|
||||
apiVersion: 1
|
||||
|
||||
providers:
|
||||
- name: 'default'
|
||||
orgId: 1
|
||||
folder: ''
|
||||
type: file
|
||||
disableDeletion: false
|
||||
editable: true
|
||||
options:
|
||||
path: /etc/grafana/dashboards
|
||||
@@ -1,19 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: {{ template "grafana.fullname" . }}
|
||||
labels:
|
||||
app: {{ template "grafana.name" . }}
|
||||
chart: {{ template "grafana.chart" . }}
|
||||
release: {{ .Release.Name }}
|
||||
heritage: {{ .Release.Service }}
|
||||
spec:
|
||||
type: {{ .Values.service.type }}
|
||||
ports:
|
||||
- port: {{ .Values.service.port }}
|
||||
targetPort: http
|
||||
protocol: TCP
|
||||
name: http
|
||||
selector:
|
||||
app: {{ template "grafana.fullname" . }}
|
||||
release: {{ .Release.Name }}
|
||||
@@ -1,39 +0,0 @@
|
||||
# Default values for grafana.
|
||||
# This is a YAML-formatted file.
|
||||
# Declare variables to be passed into your templates.
|
||||
|
||||
replicaCount: 1
|
||||
|
||||
image:
|
||||
repository: grafana/grafana
|
||||
tag: 6.2.5
|
||||
pullPolicy: IfNotPresent
|
||||
|
||||
podAnnotations: {}
|
||||
|
||||
service:
|
||||
type: ClusterIP
|
||||
port: 80
|
||||
|
||||
resources: {}
|
||||
# limits:
|
||||
# cpu: 100m
|
||||
# memory: 128Mi
|
||||
# requests:
|
||||
# cpu: 100m
|
||||
# memory: 128Mi
|
||||
|
||||
nodeSelector: {}
|
||||
|
||||
tolerations: []
|
||||
|
||||
affinity: {}
|
||||
|
||||
user: admin
|
||||
password:
|
||||
|
||||
# Istio Prometheus instance
|
||||
url: http://prometheus:9090
|
||||
|
||||
# Weave Cloud instance token
|
||||
token:
|
||||
@@ -1,22 +0,0 @@
|
||||
# Patterns to ignore when building packages.
|
||||
# This supports shell glob matching, relative path matching, and
|
||||
# negation (prefixed with !). Only one pattern per line.
|
||||
.DS_Store
|
||||
# Common VCS dirs
|
||||
.git/
|
||||
.gitignore
|
||||
.bzr/
|
||||
.bzrignore
|
||||
.hg/
|
||||
.hgignore
|
||||
.svn/
|
||||
# Common backup files
|
||||
*.swp
|
||||
*.bak
|
||||
*.tmp
|
||||
*~
|
||||
# Various IDEs
|
||||
.project
|
||||
.idea/
|
||||
*.tmproj
|
||||
.vscode/
|
||||
@@ -1,21 +0,0 @@
|
||||
apiVersion: v1
|
||||
name: loadtester
|
||||
version: 0.11.0
|
||||
appVersion: 0.11.0
|
||||
kubeVersion: ">=1.11.0-0"
|
||||
engine: gotpl
|
||||
description: Flagger's load testing services based on rakyll/hey and bojand/ghz that generates traffic during canary analysis when configured as a webhook.
|
||||
home: https://docs.flagger.app
|
||||
icon: https://raw.githubusercontent.com/weaveworks/flagger/master/docs/logo/flagger-icon.png
|
||||
sources:
|
||||
- https://github.com/weaveworks/flagger
|
||||
maintainers:
|
||||
- name: stefanprodan
|
||||
url: https://github.com/stefanprodan
|
||||
email: stefanprodan@users.noreply.github.com
|
||||
keywords:
|
||||
- canary
|
||||
- istio
|
||||
- appmesh
|
||||
- gitops
|
||||
- load testing
|
||||
@@ -1,78 +0,0 @@
|
||||
# Flagger load testing service
|
||||
|
||||
[Flagger's](https://github.com/weaveworks/flagger) load testing service is based on
|
||||
[rakyll/hey](https://github.com/rakyll/hey)
|
||||
and can be used to generates traffic during canary analysis when configured as a webhook.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
* Kubernetes >= 1.11
|
||||
|
||||
## Installing the Chart
|
||||
|
||||
Add Flagger Helm repository:
|
||||
|
||||
```console
|
||||
helm repo add flagger https://flagger.app
|
||||
```
|
||||
|
||||
To install the chart with the release name `flagger-loadtester`:
|
||||
|
||||
```console
|
||||
helm upgrade -i flagger-loadtester flagger/loadtester
|
||||
```
|
||||
|
||||
The command deploys Grafana on the Kubernetes cluster in the default namespace.
|
||||
|
||||
> **Tip**: Note that the namespace where you deploy the load tester should have the Istio or App Mesh sidecar injection enabled
|
||||
|
||||
The [configuration](#configuration) section lists the parameters that can be configured during installation.
|
||||
|
||||
## Uninstalling the Chart
|
||||
|
||||
To uninstall/delete the `flagger-loadtester` deployment:
|
||||
|
||||
```console
|
||||
helm delete --purge flagger-loadtester
|
||||
```
|
||||
|
||||
The command removes all the Kubernetes components associated with the chart and deletes the release.
|
||||
|
||||
## Configuration
|
||||
|
||||
The following tables lists the configurable parameters of the load tester chart and their default values.
|
||||
|
||||
Parameter | Description | Default
|
||||
--- | --- | ---
|
||||
`image.repository` | Image repository | `quay.io/stefanprodan/flagger-loadtester`
|
||||
`image.pullPolicy` | Image pull policy | `IfNotPresent`
|
||||
`image.tag` | Image tag | `<VERSION>`
|
||||
`replicaCount` | Desired number of pods | `1`
|
||||
`serviceAccountName` | Kubernetes service account name | `none`
|
||||
`resources.requests.cpu` | CPU requests | `10m`
|
||||
`resources.requests.memory` | Memory requests | `64Mi`
|
||||
`tolerations` | List of node taints to tolerate | `[]`
|
||||
`affinity` | node/pod affinities | `node`
|
||||
`nodeSelector` | Node labels for pod assignment | `{}`
|
||||
`service.type` | Type of service | `ClusterIP`
|
||||
`service.port` | ClusterIP port | `80`
|
||||
`cmd.timeout` | Command execution timeout | `1h`
|
||||
`logLevel` | Log level can be debug, info, warning, error or panic | `info`
|
||||
`meshName` | AWS App Mesh name | `none`
|
||||
`backends` | AWS App Mesh virtual services | `none`
|
||||
|
||||
Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example,
|
||||
|
||||
```console
|
||||
helm install flagger/loadtester --name flagger-loadtester
|
||||
```
|
||||
|
||||
Alternatively, a YAML file that specifies the values for the above parameters can be provided while installing the chart. For example,
|
||||
|
||||
```console
|
||||
helm install flagger/loadtester --name flagger-loadtester -f values.yaml
|
||||
```
|
||||
|
||||
> **Tip**: You can use the default [values.yaml](values.yaml)
|
||||
|
||||
|
||||
@@ -1 +0,0 @@
|
||||
Flagger's load testing service is available at http://{{ include "loadtester.fullname" . }}.{{ .Release.Namespace }}/
|
||||
@@ -1,32 +0,0 @@
|
||||
{{/* vim: set filetype=mustache: */}}
|
||||
{{/*
|
||||
Expand the name of the chart.
|
||||
*/}}
|
||||
{{- define "loadtester.name" -}}
|
||||
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Create a default fully qualified app name.
|
||||
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
|
||||
If release name contains chart name it will be used as a full name.
|
||||
*/}}
|
||||
{{- define "loadtester.fullname" -}}
|
||||
{{- if .Values.fullnameOverride -}}
|
||||
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
|
||||
{{- else -}}
|
||||
{{- $name := default .Chart.Name .Values.nameOverride -}}
|
||||
{{- if contains $name .Release.Name -}}
|
||||
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
|
||||
{{- else -}}
|
||||
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Create chart name and version as used by the chart label.
|
||||
*/}}
|
||||
{{- define "loadtester.chart" -}}
|
||||
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
|
||||
{{- end -}}
|
||||
@@ -1,75 +0,0 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: {{ include "loadtester.fullname" . }}
|
||||
labels:
|
||||
app.kubernetes.io/name: {{ include "loadtester.name" . }}
|
||||
helm.sh/chart: {{ include "loadtester.chart" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
spec:
|
||||
replicas: {{ .Values.replicaCount }}
|
||||
selector:
|
||||
matchLabels:
|
||||
app: {{ include "loadtester.name" . }}
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: {{ include "loadtester.name" . }}
|
||||
annotations:
|
||||
appmesh.k8s.aws/ports: "444"
|
||||
{{- if .Values.podAnnotations }}
|
||||
{{ toYaml .Values.podAnnotations | indent 8 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
{{- if .Values.serviceAccountName }}
|
||||
serviceAccountName: {{ .Values.serviceAccountName }}
|
||||
{{- else if .Values.rbac.create }}
|
||||
serviceAccountName: {{ include "loadtester.fullname" . }}
|
||||
{{- end }}
|
||||
containers:
|
||||
- name: {{ .Chart.Name }}
|
||||
image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
|
||||
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
||||
ports:
|
||||
- name: http
|
||||
containerPort: 8080
|
||||
command:
|
||||
- ./loadtester
|
||||
- -port=8080
|
||||
- -log-level={{ .Values.logLevel }}
|
||||
- -timeout={{ .Values.cmd.timeout }}
|
||||
livenessProbe:
|
||||
exec:
|
||||
command:
|
||||
- wget
|
||||
- --quiet
|
||||
- --tries=1
|
||||
- --timeout=4
|
||||
- --spider
|
||||
- http://localhost:8080/healthz
|
||||
timeoutSeconds: 5
|
||||
readinessProbe:
|
||||
exec:
|
||||
command:
|
||||
- wget
|
||||
- --quiet
|
||||
- --tries=1
|
||||
- --timeout=4
|
||||
- --spider
|
||||
- http://localhost:8080/healthz
|
||||
timeoutSeconds: 5
|
||||
resources:
|
||||
{{- toYaml .Values.resources | nindent 12 }}
|
||||
{{- with .Values.nodeSelector }}
|
||||
nodeSelector:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.affinity }}
|
||||
affinity:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.tolerations }}
|
||||
tolerations:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
@@ -1,54 +0,0 @@
|
||||
---
|
||||
{{- if .Values.rbac.create }}
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
{{- if eq .Values.rbac.scope "cluster" }}
|
||||
kind: ClusterRole
|
||||
{{- else }}
|
||||
kind: Role
|
||||
{{- end }}
|
||||
metadata:
|
||||
name: {{ template "loadtester.fullname" . }}
|
||||
labels:
|
||||
helm.sh/chart: {{ template "loadtester.chart" . }}
|
||||
app.kubernetes.io/name: {{ template "loadtester.name" . }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
rules:
|
||||
{{ toYaml .Values.rbac.rules | indent 2 }}
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
{{- if eq .Values.rbac.scope "cluster" }}
|
||||
kind: ClusterRoleBinding
|
||||
{{- else }}
|
||||
kind: RoleBinding
|
||||
{{- end }}
|
||||
metadata:
|
||||
name: {{ template "loadtester.fullname" . }}
|
||||
labels:
|
||||
helm.sh/chart: {{ template "loadtester.chart" . }}
|
||||
app.kubernetes.io/name: {{ template "loadtester.name" . }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
{{- if eq .Values.rbac.scope "cluster" }}
|
||||
kind: ClusterRole
|
||||
{{- else }}
|
||||
kind: Role
|
||||
{{- end }}
|
||||
name: {{ template "loadtester.fullname" . }}
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: {{ template "loadtester.fullname" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: {{ template "loadtester.fullname" . }}
|
||||
labels:
|
||||
helm.sh/chart: {{ template "loadtester.chart" . }}
|
||||
app.kubernetes.io/name: {{ template "loadtester.name" . }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
{{- end }}
|
||||
@@ -1,18 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: {{ include "loadtester.fullname" . }}
|
||||
labels:
|
||||
app.kubernetes.io/name: {{ include "loadtester.name" . }}
|
||||
helm.sh/chart: {{ include "loadtester.chart" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
spec:
|
||||
type: {{ .Values.service.type }}
|
||||
ports:
|
||||
- port: {{ .Values.service.port }}
|
||||
targetPort: http
|
||||
protocol: TCP
|
||||
name: http
|
||||
selector:
|
||||
app: {{ include "loadtester.name" . }}
|
||||
@@ -1,27 +0,0 @@
|
||||
{{- if .Values.meshName }}
|
||||
apiVersion: appmesh.k8s.aws/v1beta1
|
||||
kind: VirtualNode
|
||||
metadata:
|
||||
name: {{ include "loadtester.fullname" . }}
|
||||
labels:
|
||||
app.kubernetes.io/name: {{ include "loadtester.name" . }}
|
||||
helm.sh/chart: {{ include "loadtester.chart" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
spec:
|
||||
meshName: {{ .Values.meshName }}
|
||||
listeners:
|
||||
- portMapping:
|
||||
port: 444
|
||||
protocol: http
|
||||
serviceDiscovery:
|
||||
dns:
|
||||
hostName: {{ include "loadtester.fullname" . }}.{{ .Release.Namespace }}
|
||||
{{- if .Values.backends }}
|
||||
backends:
|
||||
{{- range .Values.backends }}
|
||||
- virtualService:
|
||||
virtualServiceName: {{ . }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
@@ -1,54 +0,0 @@
|
||||
replicaCount: 1
|
||||
|
||||
image:
|
||||
repository: weaveworks/flagger-loadtester
|
||||
tag: 0.11.0
|
||||
pullPolicy: IfNotPresent
|
||||
|
||||
podAnnotations:
|
||||
prometheus.io/scrape: "true"
|
||||
prometheus.io/port: "8080"
|
||||
|
||||
logLevel: info
|
||||
cmd:
|
||||
timeout: 1h
|
||||
|
||||
nameOverride: ""
|
||||
fullnameOverride: ""
|
||||
|
||||
service:
|
||||
type: ClusterIP
|
||||
port: 80
|
||||
|
||||
resources:
|
||||
requests:
|
||||
cpu: 10m
|
||||
memory: 64Mi
|
||||
|
||||
nodeSelector: {}
|
||||
|
||||
tolerations: []
|
||||
|
||||
affinity: {}
|
||||
|
||||
rbac:
|
||||
# rbac.create: `true` if rbac resources should be created
|
||||
create: false
|
||||
# rbac.scope: `cluster` to create cluster-scope rbac resources (ClusterRole/ClusterRoleBinding)
|
||||
# otherwise, namespace-scope rbac resources will be created (Role/RoleBinding)
|
||||
scope:
|
||||
# rbac.rules: array of rules to apply to the role. example:
|
||||
# rules:
|
||||
# - apiGroups: [""]
|
||||
# resources: ["pods"]
|
||||
# verbs: ["list", "get"]
|
||||
rules: []
|
||||
|
||||
# name of an existing service account to use - if not creating rbac resources
|
||||
serviceAccountName: ""
|
||||
|
||||
# App Mesh virtual node settings
|
||||
meshName: ""
|
||||
#backends:
|
||||
# - app1.namespace
|
||||
# - app2.namespace
|
||||
@@ -1,21 +0,0 @@
|
||||
# Patterns to ignore when building packages.
|
||||
# This supports shell glob matching, relative path matching, and
|
||||
# negation (prefixed with !). Only one pattern per line.
|
||||
.DS_Store
|
||||
# Common VCS dirs
|
||||
.git/
|
||||
.gitignore
|
||||
.bzr/
|
||||
.bzrignore
|
||||
.hg/
|
||||
.hgignore
|
||||
.svn/
|
||||
# Common backup files
|
||||
*.swp
|
||||
*.bak
|
||||
*.tmp
|
||||
*~
|
||||
# Various IDEs
|
||||
.project
|
||||
.idea/
|
||||
*.tmproj
|
||||
@@ -1,12 +0,0 @@
|
||||
apiVersion: v1
|
||||
version: 3.1.0
|
||||
appVersion: 3.1.0
|
||||
name: podinfo
|
||||
engine: gotpl
|
||||
description: Flagger canary deployment demo chart
|
||||
home: https://flagger.app
|
||||
maintainers:
|
||||
- email: stefanprodan@users.noreply.github.com
|
||||
name: stefanprodan
|
||||
sources:
|
||||
- https://github.com/weaveworks/flagger
|
||||
@@ -1,79 +0,0 @@
|
||||
# Podinfo
|
||||
|
||||
Podinfo is a tiny web application made with Go
|
||||
that showcases best practices of running canary deployments with Flagger and Istio.
|
||||
|
||||
## Installing the Chart
|
||||
|
||||
Add Flagger Helm repository:
|
||||
|
||||
```console
|
||||
helm repo add flagger https://flagger.app
|
||||
```
|
||||
|
||||
To install the chart with the release name `frontend`:
|
||||
|
||||
```console
|
||||
helm upgrade -i frontend flagger/podinfo \
|
||||
--namespace test \
|
||||
--set nameOverride=frontend \
|
||||
--set backend=http://backend.test:9898/echo \
|
||||
--set canary.enabled=true \
|
||||
--set canary.istioIngress.enabled=true \
|
||||
--set canary.istioIngress.gateway=public-gateway.istio-system.svc.cluster.local \
|
||||
--set canary.istioIngress.host=frontend.istio.example.com
|
||||
```
|
||||
|
||||
To install the chart as `backend`:
|
||||
|
||||
```console
|
||||
helm upgrade -i backend flagger/podinfo \
|
||||
--namespace test \
|
||||
--set nameOverride=backend \
|
||||
--set canary.enabled=true
|
||||
```
|
||||
|
||||
## Uninstalling the Chart
|
||||
|
||||
To uninstall/delete the `frontend` deployment:
|
||||
|
||||
```console
|
||||
$ helm delete --purge frontend
|
||||
```
|
||||
|
||||
The command removes all the Kubernetes components associated with the chart and deletes the release.
|
||||
|
||||
## Configuration
|
||||
|
||||
The following tables lists the configurable parameters of the podinfo chart and their default values.
|
||||
|
||||
Parameter | Description | Default
|
||||
--- | --- | ---
|
||||
`image.repository` | image repository | `quay.io/stefanprodan/podinfo`
|
||||
`image.tag` | image tag | `<VERSION>`
|
||||
`image.pullPolicy` | image pull policy | `IfNotPresent`
|
||||
`hpa.enabled` | enables HPA | `true`
|
||||
`hpa.cpu` | target CPU usage per pod | `80`
|
||||
`hpa.memory` | target memory usage per pod | `512Mi`
|
||||
`hpa.minReplicas` | maximum pod replicas | `2`
|
||||
`hpa.maxReplicas` | maximum pod replicas | `4`
|
||||
`resources.requests/cpu` | pod CPU request | `1m`
|
||||
`resources.requests/memory` | pod memory request | `16Mi`
|
||||
`backend` | backend URL | None
|
||||
`faults.delay` | random HTTP response delays between 0 and 5 seconds | `false`
|
||||
`faults.error` | 1/3 chances of a random HTTP response error | `false`
|
||||
|
||||
Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example,
|
||||
|
||||
```console
|
||||
$ helm install flagger/podinfo --name frontend \
|
||||
--set=image.tag=1.4.1,hpa.enabled=false
|
||||
```
|
||||
|
||||
Alternatively, a YAML file that specifies the values for the above parameters can be provided while installing the chart. For example,
|
||||
|
||||
```console
|
||||
$ helm install flagger/podinfo --name frontend -f values.yaml
|
||||
```
|
||||
|
||||
|
||||
@@ -1 +0,0 @@
|
||||
podinfo {{ .Release.Name }} deployed!
|
||||
@@ -1,43 +0,0 @@
|
||||
{{/* vim: set filetype=mustache: */}}
|
||||
{{/*
|
||||
Expand the name of the chart.
|
||||
*/}}
|
||||
{{- define "podinfo.name" -}}
|
||||
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Create a default fully qualified app name.
|
||||
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
|
||||
If release name contains chart name it will be used as a full name.
|
||||
*/}}
|
||||
{{- define "podinfo.fullname" -}}
|
||||
{{- if .Values.fullnameOverride -}}
|
||||
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
|
||||
{{- else -}}
|
||||
{{- $name := default .Chart.Name .Values.nameOverride -}}
|
||||
{{- if contains $name .Release.Name -}}
|
||||
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
|
||||
{{- else -}}
|
||||
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Create chart name and version as used by the chart label.
|
||||
*/}}
|
||||
{{- define "podinfo.chart" -}}
|
||||
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Create chart name suffix.
|
||||
*/}}
|
||||
{{- define "podinfo.suffix" -}}
|
||||
{{- if .Values.canary.enabled -}}
|
||||
{{- "-primary" -}}
|
||||
{{- else -}}
|
||||
{{- "" -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
@@ -1,66 +0,0 @@
|
||||
{{- if .Values.canary.enabled }}
|
||||
apiVersion: flagger.app/v1alpha3
|
||||
kind: Canary
|
||||
metadata:
|
||||
name: {{ template "podinfo.fullname" . }}
|
||||
labels:
|
||||
app: {{ template "podinfo.name" . }}
|
||||
chart: {{ template "podinfo.chart" . }}
|
||||
release: {{ .Release.Name }}
|
||||
heritage: {{ .Release.Service }}
|
||||
spec:
|
||||
targetRef:
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
name: {{ template "podinfo.fullname" . }}
|
||||
progressDeadlineSeconds: 60
|
||||
autoscalerRef:
|
||||
apiVersion: autoscaling/v2beta1
|
||||
kind: HorizontalPodAutoscaler
|
||||
name: {{ template "podinfo.fullname" . }}
|
||||
service:
|
||||
port: {{ .Values.service.port }}
|
||||
{{- if .Values.canary.istioIngress.enabled }}
|
||||
gateways:
|
||||
- {{ .Values.canary.istioIngress.gateway }}
|
||||
hosts:
|
||||
- {{ .Values.canary.istioIngress.host }}
|
||||
{{- end }}
|
||||
trafficPolicy:
|
||||
tls:
|
||||
mode: {{ .Values.canary.istioTLS }}
|
||||
canaryAnalysis:
|
||||
interval: {{ .Values.canary.analysis.interval }}
|
||||
threshold: {{ .Values.canary.analysis.threshold }}
|
||||
maxWeight: {{ .Values.canary.analysis.maxWeight }}
|
||||
stepWeight: {{ .Values.canary.analysis.stepWeight }}
|
||||
metrics:
|
||||
- name: request-success-rate
|
||||
threshold: {{ .Values.canary.thresholds.successRate }}
|
||||
interval: 1m
|
||||
- name: request-duration
|
||||
threshold: {{ .Values.canary.thresholds.latency }}
|
||||
interval: 1m
|
||||
webhooks:
|
||||
{{- if .Values.canary.helmtest.enabled }}
|
||||
- name: "helm test"
|
||||
type: pre-rollout
|
||||
url: {{ .Values.canary.helmtest.url }}
|
||||
timeout: 3m
|
||||
metadata:
|
||||
type: "helm"
|
||||
cmd: "test {{ .Release.Name }} --cleanup"
|
||||
{{- end }}
|
||||
{{- if .Values.canary.loadtest.enabled }}
|
||||
- name: load-test-get
|
||||
url: {{ .Values.canary.loadtest.url }}
|
||||
timeout: 5s
|
||||
metadata:
|
||||
cmd: "hey -z 1m -q 5 -c 2 http://{{ template "podinfo.fullname" . }}.{{ .Release.Namespace }}:{{ .Values.service.port }}"
|
||||
- name: load-test-post
|
||||
url: {{ .Values.canary.loadtest.url }}
|
||||
timeout: 5s
|
||||
metadata:
|
||||
cmd: "hey -z 1m -q 5 -c 2 -m POST -d '{\"test\": true}' http://{{ template "podinfo.fullname" . }}.{{ .Release.Namespace }}:{{ .Values.service.port }}/echo"
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
@@ -1,15 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: {{ template "podinfo.fullname" . }}
|
||||
labels:
|
||||
app: {{ template "podinfo.name" . }}
|
||||
chart: {{ template "podinfo.chart" . }}
|
||||
release: {{ .Release.Name }}
|
||||
heritage: {{ .Release.Service }}
|
||||
data:
|
||||
config.yaml: |-
|
||||
# http settings
|
||||
http-client-timeout: 1m
|
||||
http-server-timeout: {{ .Values.httpServer.timeout }}
|
||||
http-server-shutdown-timeout: 5s
|
||||
@@ -1,99 +0,0 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: {{ template "podinfo.fullname" . }}
|
||||
labels:
|
||||
app: {{ template "podinfo.name" . }}
|
||||
chart: {{ template "podinfo.chart" . }}
|
||||
release: {{ .Release.Name }}
|
||||
heritage: {{ .Release.Service }}
|
||||
spec:
|
||||
strategy:
|
||||
type: RollingUpdate
|
||||
rollingUpdate:
|
||||
maxUnavailable: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: {{ template "podinfo.fullname" . }}
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: {{ template "podinfo.fullname" . }}
|
||||
annotations:
|
||||
prometheus.io/scrape: 'true'
|
||||
{{- if .Values.podAnnotations }}
|
||||
{{ toYaml .Values.podAnnotations | indent 8 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
terminationGracePeriodSeconds: 30
|
||||
containers:
|
||||
- name: podinfo
|
||||
image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
|
||||
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
||||
command:
|
||||
- ./podinfo
|
||||
- --port={{ .Values.service.port }}
|
||||
- --level={{ .Values.logLevel }}
|
||||
- --random-delay={{ .Values.faults.delay }}
|
||||
- --random-error={{ .Values.faults.error }}
|
||||
- --config-path=/podinfo/config
|
||||
{{- range .Values.backends }}
|
||||
- --backend-url={{ . }}
|
||||
{{- end }}
|
||||
env:
|
||||
{{- if .Values.message }}
|
||||
- name: PODINFO_UI_MESSAGE
|
||||
value: {{ .Values.message }}
|
||||
{{- end }}
|
||||
{{- if .Values.backend }}
|
||||
- name: PODINFO_BACKEND_URL
|
||||
value: {{ .Values.backend }}
|
||||
{{- end }}
|
||||
ports:
|
||||
- name: http
|
||||
containerPort: {{ .Values.service.port }}
|
||||
protocol: TCP
|
||||
livenessProbe:
|
||||
exec:
|
||||
command:
|
||||
- podcli
|
||||
- check
|
||||
- http
|
||||
- localhost:{{ .Values.service.port }}/healthz
|
||||
initialDelaySeconds: 5
|
||||
timeoutSeconds: 5
|
||||
readinessProbe:
|
||||
exec:
|
||||
command:
|
||||
- podcli
|
||||
- check
|
||||
- http
|
||||
- localhost:{{ .Values.service.port }}/readyz
|
||||
initialDelaySeconds: 5
|
||||
timeoutSeconds: 5
|
||||
volumeMounts:
|
||||
- name: data
|
||||
mountPath: /data
|
||||
- name: config
|
||||
mountPath: /podinfo/config
|
||||
readOnly: true
|
||||
resources:
|
||||
{{ toYaml .Values.resources | indent 12 }}
|
||||
{{- with .Values.nodeSelector }}
|
||||
nodeSelector:
|
||||
{{ toYaml . | indent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.affinity }}
|
||||
affinity:
|
||||
{{ toYaml . | indent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.tolerations }}
|
||||
tolerations:
|
||||
{{ toYaml . | indent 8 }}
|
||||
{{- end }}
|
||||
volumes:
|
||||
- name: data
|
||||
emptyDir: {}
|
||||
- name: config
|
||||
configMap:
|
||||
name: {{ template "podinfo.fullname" . }}
|
||||
@@ -1,37 +0,0 @@
|
||||
{{- if .Values.hpa.enabled -}}
|
||||
apiVersion: autoscaling/v2beta1
|
||||
kind: HorizontalPodAutoscaler
|
||||
metadata:
|
||||
name: {{ template "podinfo.fullname" . }}
|
||||
labels:
|
||||
app: {{ template "podinfo.name" . }}
|
||||
chart: {{ template "podinfo.chart" . }}
|
||||
release: {{ .Release.Name }}
|
||||
heritage: {{ .Release.Service }}
|
||||
spec:
|
||||
scaleTargetRef:
|
||||
apiVersion: apps/v1beta2
|
||||
kind: Deployment
|
||||
name: {{ template "podinfo.fullname" . }}
|
||||
minReplicas: {{ .Values.hpa.minReplicas }}
|
||||
maxReplicas: {{ .Values.hpa.maxReplicas }}
|
||||
metrics:
|
||||
{{- if .Values.hpa.cpu }}
|
||||
- type: Resource
|
||||
resource:
|
||||
name: cpu
|
||||
targetAverageUtilization: {{ .Values.hpa.cpu }}
|
||||
{{- end }}
|
||||
{{- if .Values.hpa.memory }}
|
||||
- type: Resource
|
||||
resource:
|
||||
name: memory
|
||||
targetAverageValue: {{ .Values.hpa.memory }}
|
||||
{{- end }}
|
||||
{{- if .Values.hpa.requests }}
|
||||
- type: Pod
|
||||
pods:
|
||||
metricName: http_requests
|
||||
targetAverageValue: {{ .Values.hpa.requests }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
@@ -1,20 +0,0 @@
|
||||
{{- if not .Values.canary.enabled }}
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: {{ template "podinfo.fullname" . }}
|
||||
labels:
|
||||
app: {{ template "podinfo.name" . }}
|
||||
chart: {{ template "podinfo.chart" . }}
|
||||
release: {{ .Release.Name }}
|
||||
heritage: {{ .Release.Service }}
|
||||
spec:
|
||||
type: {{ .Values.service.type }}
|
||||
ports:
|
||||
- port: {{ .Values.service.port }}
|
||||
targetPort: http
|
||||
protocol: TCP
|
||||
name: http
|
||||
selector:
|
||||
app: {{ template "podinfo.fullname" . }}
|
||||
{{- end }}
|
||||
@@ -1,29 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: {{ template "podinfo.fullname" . }}-jwt-test-{{ randAlphaNum 5 | lower }}
|
||||
labels:
|
||||
heritage: {{ .Release.Service }}
|
||||
release: {{ .Release.Name }}
|
||||
chart: {{ .Chart.Name }}-{{ .Chart.Version }}
|
||||
app: {{ template "podinfo.name" . }}
|
||||
annotations:
|
||||
"helm.sh/hook": test-success
|
||||
sidecar.istio.io/inject: "false"
|
||||
linkerd.io/inject: disabled
|
||||
appmesh.k8s.aws/sidecarInjectorWebhook: disabled
|
||||
spec:
|
||||
containers:
|
||||
- name: tools
|
||||
image: giantswarm/tiny-tools
|
||||
command:
|
||||
- sh
|
||||
- -c
|
||||
- |
|
||||
TOKEN=$(curl -sd 'test' ${PODINFO_SVC}/token | jq -r .token) &&
|
||||
curl -H "Authorization: Bearer ${TOKEN}" ${PODINFO_SVC}/token/validate | grep test
|
||||
env:
|
||||
- name: PODINFO_SVC
|
||||
value: {{ template "podinfo.fullname" . }}:{{ .Values.service.port }}
|
||||
restartPolicy: Never
|
||||
|
||||
@@ -1,83 +0,0 @@
|
||||
# Default values for podinfo.
|
||||
image:
|
||||
repository: stefanprodan/podinfo
|
||||
tag: 3.1.0
|
||||
pullPolicy: IfNotPresent
|
||||
|
||||
podAnnotations: {}
|
||||
|
||||
service:
|
||||
enabled: false
|
||||
type: ClusterIP
|
||||
port: 9898
|
||||
|
||||
hpa:
|
||||
enabled: true
|
||||
minReplicas: 2
|
||||
maxReplicas: 4
|
||||
cpu: 80
|
||||
memory: 512Mi
|
||||
|
||||
canary:
|
||||
enabled: false
|
||||
# Istio traffic policy tls can be DISABLE or ISTIO_MUTUAL
|
||||
istioTLS: DISABLE
|
||||
istioIngress:
|
||||
enabled: false
|
||||
# Istio ingress gateway name
|
||||
gateway: public-gateway.istio-system.svc.cluster.local
|
||||
# external host name eg. podinfo.example.com
|
||||
host:
|
||||
analysis:
|
||||
# schedule interval (default 60s)
|
||||
interval: 15s
|
||||
# max number of failed metric checks before rollback
|
||||
threshold: 10
|
||||
# max traffic percentage routed to canary
|
||||
# percentage (0-100)
|
||||
maxWeight: 50
|
||||
# canary increment step
|
||||
# percentage (0-100)
|
||||
stepWeight: 5
|
||||
thresholds:
|
||||
# minimum req success rate (non 5xx responses)
|
||||
# percentage (0-100)
|
||||
successRate: 99
|
||||
# maximum req duration P99
|
||||
# milliseconds
|
||||
latency: 500
|
||||
loadtest:
|
||||
enabled: false
|
||||
# load tester address
|
||||
url: http://flagger-loadtester.test/
|
||||
helmtest:
|
||||
enabled: false
|
||||
# helm tester address
|
||||
url: http://flagger-helmtester.kube-system/
|
||||
|
||||
resources:
|
||||
limits:
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 32Mi
|
||||
|
||||
nodeSelector: {}
|
||||
|
||||
tolerations: []
|
||||
|
||||
affinity: {}
|
||||
|
||||
nameOverride: ""
|
||||
fullnameOverride: ""
|
||||
|
||||
logLevel: info
|
||||
backend: #http://backend-podinfo:9898/echo
|
||||
backends: []
|
||||
message: #UI greetings
|
||||
|
||||
faults:
|
||||
delay: false
|
||||
error: false
|
||||
|
||||
httpServer:
|
||||
timeout: 30s
|
||||
@@ -1,313 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"flag"
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/Masterminds/semver"
|
||||
clientset "github.com/weaveworks/flagger/pkg/client/clientset/versioned"
|
||||
informers "github.com/weaveworks/flagger/pkg/client/informers/externalversions"
|
||||
"github.com/weaveworks/flagger/pkg/controller"
|
||||
"github.com/weaveworks/flagger/pkg/logger"
|
||||
"github.com/weaveworks/flagger/pkg/metrics"
|
||||
"github.com/weaveworks/flagger/pkg/notifier"
|
||||
"github.com/weaveworks/flagger/pkg/router"
|
||||
"github.com/weaveworks/flagger/pkg/server"
|
||||
"github.com/weaveworks/flagger/pkg/signals"
|
||||
"github.com/weaveworks/flagger/pkg/version"
|
||||
"go.uber.org/zap"
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
_ "k8s.io/client-go/plugin/pkg/client/auth/gcp"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
"k8s.io/client-go/tools/leaderelection"
|
||||
"k8s.io/client-go/tools/leaderelection/resourcelock"
|
||||
"k8s.io/client-go/transport"
|
||||
_ "k8s.io/code-generator/cmd/client-gen/generators"
|
||||
)
|
||||
|
||||
var (
|
||||
masterURL string
|
||||
kubeconfig string
|
||||
metricsServer string
|
||||
controlLoopInterval time.Duration
|
||||
logLevel string
|
||||
port string
|
||||
msteamsURL string
|
||||
slackURL string
|
||||
slackUser string
|
||||
slackChannel string
|
||||
threadiness int
|
||||
zapReplaceGlobals bool
|
||||
zapEncoding string
|
||||
namespace string
|
||||
meshProvider string
|
||||
selectorLabels string
|
||||
ingressAnnotationsPrefix string
|
||||
enableLeaderElection bool
|
||||
leaderElectionNamespace string
|
||||
ver bool
|
||||
)
|
||||
|
||||
func init() {
|
||||
flag.StringVar(&kubeconfig, "kubeconfig", "", "Path to a kubeconfig. Only required if out-of-cluster.")
|
||||
flag.StringVar(&masterURL, "master", "", "The address of the Kubernetes API server. Overrides any value in kubeconfig. Only required if out-of-cluster.")
|
||||
flag.StringVar(&metricsServer, "metrics-server", "http://prometheus:9090", "Prometheus URL.")
|
||||
flag.DurationVar(&controlLoopInterval, "control-loop-interval", 10*time.Second, "Kubernetes API sync interval.")
|
||||
flag.StringVar(&logLevel, "log-level", "debug", "Log level can be: debug, info, warning, error.")
|
||||
flag.StringVar(&port, "port", "8080", "Port to listen on.")
|
||||
flag.StringVar(&slackURL, "slack-url", "", "Slack hook URL.")
|
||||
flag.StringVar(&slackUser, "slack-user", "flagger", "Slack user name.")
|
||||
flag.StringVar(&slackChannel, "slack-channel", "", "Slack channel.")
|
||||
flag.StringVar(&msteamsURL, "msteams-url", "", "MS Teams incoming webhook URL.")
|
||||
flag.IntVar(&threadiness, "threadiness", 2, "Worker concurrency.")
|
||||
flag.BoolVar(&zapReplaceGlobals, "zap-replace-globals", false, "Whether to change the logging level of the global zap logger.")
|
||||
flag.StringVar(&zapEncoding, "zap-encoding", "json", "Zap logger encoding.")
|
||||
flag.StringVar(&namespace, "namespace", "", "Namespace that flagger would watch canary object.")
|
||||
flag.StringVar(&meshProvider, "mesh-provider", "istio", "Service mesh provider, can be istio, linkerd, appmesh, supergloo, nginx or smi.")
|
||||
flag.StringVar(&selectorLabels, "selector-labels", "app,name,app.kubernetes.io/name", "List of pod labels that Flagger uses to create pod selectors.")
|
||||
flag.StringVar(&ingressAnnotationsPrefix, "ingress-annotations-prefix", "nginx.ingress.kubernetes.io", "Annotations prefix for ingresses.")
|
||||
flag.BoolVar(&enableLeaderElection, "enable-leader-election", false, "Enable leader election.")
|
||||
flag.StringVar(&leaderElectionNamespace, "leader-election-namespace", "kube-system", "Namespace used to create the leader election config map.")
|
||||
flag.BoolVar(&ver, "version", false, "Print version")
|
||||
}
|
||||
|
||||
func main() {
|
||||
flag.Parse()
|
||||
|
||||
if ver {
|
||||
fmt.Println("Flagger version", version.VERSION, "revision ", version.REVISION)
|
||||
os.Exit(0)
|
||||
}
|
||||
|
||||
logger, err := logger.NewLoggerWithEncoding(logLevel, zapEncoding)
|
||||
if err != nil {
|
||||
log.Fatalf("Error creating logger: %v", err)
|
||||
}
|
||||
if zapReplaceGlobals {
|
||||
zap.ReplaceGlobals(logger.Desugar())
|
||||
}
|
||||
|
||||
defer logger.Sync()
|
||||
|
||||
stopCh := signals.SetupSignalHandler()
|
||||
|
||||
cfg, err := clientcmd.BuildConfigFromFlags(masterURL, kubeconfig)
|
||||
if err != nil {
|
||||
logger.Fatalf("Error building kubeconfig: %v", err)
|
||||
}
|
||||
|
||||
kubeClient, err := kubernetes.NewForConfig(cfg)
|
||||
if err != nil {
|
||||
logger.Fatalf("Error building kubernetes clientset: %v", err)
|
||||
}
|
||||
|
||||
meshClient, err := clientset.NewForConfig(cfg)
|
||||
if err != nil {
|
||||
logger.Fatalf("Error building mesh clientset: %v", err)
|
||||
}
|
||||
|
||||
flaggerClient, err := clientset.NewForConfig(cfg)
|
||||
if err != nil {
|
||||
logger.Fatalf("Error building flagger clientset: %s", err.Error())
|
||||
}
|
||||
|
||||
flaggerInformerFactory := informers.NewSharedInformerFactoryWithOptions(flaggerClient, time.Second*30, informers.WithNamespace(namespace))
|
||||
|
||||
canaryInformer := flaggerInformerFactory.Flagger().V1alpha3().Canaries()
|
||||
|
||||
logger.Infof("Starting flagger version %s revision %s mesh provider %s", version.VERSION, version.REVISION, meshProvider)
|
||||
|
||||
ver, err := kubeClient.Discovery().ServerVersion()
|
||||
if err != nil {
|
||||
logger.Fatalf("Error calling Kubernetes API: %v", err)
|
||||
}
|
||||
|
||||
k8sVersionConstraint := "^1.11.0"
|
||||
|
||||
// We append -alpha.1 to the end of our version constraint so that prebuilds of later versions
|
||||
// are considered valid for our purposes, as well as some managed solutions like EKS where they provide
|
||||
// a version like `v1.12.6-eks-d69f1b`. It doesn't matter what the prelease value is here, just that it
|
||||
// exists in our constraint.
|
||||
semverConstraint, err := semver.NewConstraint(k8sVersionConstraint + "-alpha.1")
|
||||
if err != nil {
|
||||
logger.Fatalf("Error parsing kubernetes version constraint: %v", err)
|
||||
}
|
||||
|
||||
k8sSemver, err := semver.NewVersion(ver.GitVersion)
|
||||
if err != nil {
|
||||
logger.Fatalf("Error parsing kubernetes version as a semantic version: %v", err)
|
||||
}
|
||||
|
||||
if !semverConstraint.Check(k8sSemver) {
|
||||
logger.Fatalf("Unsupported version of kubernetes detected. Expected %s, got %v", k8sVersionConstraint, ver)
|
||||
}
|
||||
|
||||
labels := strings.Split(selectorLabels, ",")
|
||||
if len(labels) < 1 {
|
||||
logger.Fatalf("At least one selector label is required")
|
||||
}
|
||||
|
||||
logger.Infof("Connected to Kubernetes API %s", ver)
|
||||
if namespace != "" {
|
||||
logger.Infof("Watching namespace %s", namespace)
|
||||
}
|
||||
|
||||
observerFactory, err := metrics.NewFactory(metricsServer, meshProvider, 5*time.Second)
|
||||
if err != nil {
|
||||
logger.Fatalf("Error building prometheus client: %s", err.Error())
|
||||
}
|
||||
|
||||
ok, err := observerFactory.Client.IsOnline()
|
||||
if ok {
|
||||
logger.Infof("Connected to metrics server %s", metricsServer)
|
||||
} else {
|
||||
logger.Errorf("Metrics server %s unreachable %v", metricsServer, err)
|
||||
}
|
||||
|
||||
// setup Slack or MS Teams notifications
|
||||
notifierClient := initNotifier(logger)
|
||||
|
||||
// start HTTP server
|
||||
go server.ListenAndServe(port, 3*time.Second, logger, stopCh)
|
||||
|
||||
routerFactory := router.NewFactory(cfg, kubeClient, flaggerClient, ingressAnnotationsPrefix, logger, meshClient)
|
||||
|
||||
c := controller.NewController(
|
||||
kubeClient,
|
||||
meshClient,
|
||||
flaggerClient,
|
||||
canaryInformer,
|
||||
controlLoopInterval,
|
||||
logger,
|
||||
notifierClient,
|
||||
routerFactory,
|
||||
observerFactory,
|
||||
meshProvider,
|
||||
version.VERSION,
|
||||
labels,
|
||||
)
|
||||
|
||||
flaggerInformerFactory.Start(stopCh)
|
||||
|
||||
logger.Info("Waiting for informer caches to sync")
|
||||
for _, synced := range []cache.InformerSynced{
|
||||
canaryInformer.Informer().HasSynced,
|
||||
} {
|
||||
if ok := cache.WaitForCacheSync(stopCh, synced); !ok {
|
||||
logger.Fatalf("Failed to wait for cache sync")
|
||||
}
|
||||
}
|
||||
|
||||
// leader election context
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
// prevents new requests when leadership is lost
|
||||
cfg.Wrap(transport.ContextCanceller(ctx, fmt.Errorf("the leader is shutting down")))
|
||||
|
||||
// cancel leader election context on shutdown signals
|
||||
go func() {
|
||||
<-stopCh
|
||||
cancel()
|
||||
}()
|
||||
|
||||
// wrap controller run
|
||||
runController := func() {
|
||||
if err := c.Run(threadiness, stopCh); err != nil {
|
||||
logger.Fatalf("Error running controller: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// run controller when this instance wins the leader election
|
||||
if enableLeaderElection {
|
||||
ns := leaderElectionNamespace
|
||||
if namespace != "" {
|
||||
ns = namespace
|
||||
}
|
||||
startLeaderElection(ctx, runController, ns, kubeClient, logger)
|
||||
} else {
|
||||
runController()
|
||||
}
|
||||
}
|
||||
|
||||
func startLeaderElection(ctx context.Context, run func(), ns string, kubeClient kubernetes.Interface, logger *zap.SugaredLogger) {
|
||||
configMapName := "flagger-leader-election"
|
||||
id, err := os.Hostname()
|
||||
if err != nil {
|
||||
logger.Fatalf("Error running controller: %v", err)
|
||||
}
|
||||
id = id + "_" + string(uuid.NewUUID())
|
||||
|
||||
lock, err := resourcelock.New(
|
||||
resourcelock.ConfigMapsResourceLock,
|
||||
ns,
|
||||
configMapName,
|
||||
kubeClient.CoreV1(),
|
||||
kubeClient.CoordinationV1(),
|
||||
resourcelock.ResourceLockConfig{
|
||||
Identity: id,
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
logger.Fatalf("Error running controller: %v", err)
|
||||
}
|
||||
|
||||
logger.Infof("Starting leader election id: %s configmap: %s namespace: %s", id, configMapName, ns)
|
||||
leaderelection.RunOrDie(ctx, leaderelection.LeaderElectionConfig{
|
||||
Lock: lock,
|
||||
ReleaseOnCancel: true,
|
||||
LeaseDuration: 60 * time.Second,
|
||||
RenewDeadline: 15 * time.Second,
|
||||
RetryPeriod: 5 * time.Second,
|
||||
Callbacks: leaderelection.LeaderCallbacks{
|
||||
OnStartedLeading: func(ctx context.Context) {
|
||||
logger.Info("Acting as elected leader")
|
||||
run()
|
||||
},
|
||||
OnStoppedLeading: func() {
|
||||
logger.Infof("Leadership lost")
|
||||
os.Exit(1)
|
||||
},
|
||||
OnNewLeader: func(identity string) {
|
||||
if identity != id {
|
||||
logger.Infof("Another instance has been elected as leader: %v", identity)
|
||||
}
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func initNotifier(logger *zap.SugaredLogger) (client notifier.Interface) {
|
||||
provider := "slack"
|
||||
notifierURL := fromEnv("SLACK_URL", slackURL)
|
||||
if msteamsURL != "" || os.Getenv("MSTEAMS_URL") != "" {
|
||||
provider = "msteams"
|
||||
notifierURL = fromEnv("MSTEAMS_URL", msteamsURL)
|
||||
}
|
||||
notifierFactory := notifier.NewFactory(notifierURL, slackUser, slackChannel)
|
||||
|
||||
if notifierURL != "" {
|
||||
var err error
|
||||
client, err = notifierFactory.Notifier(provider)
|
||||
if err != nil {
|
||||
logger.Errorf("Notifier %v", err)
|
||||
} else {
|
||||
logger.Infof("Notifications enabled for %s", notifierURL[0:30])
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func fromEnv(envVar string, defaultVal string) string {
|
||||
if os.Getenv(envVar) != "" {
|
||||
return os.Getenv(envVar)
|
||||
}
|
||||
return defaultVal
|
||||
}
|
||||
@@ -1,53 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"github.com/weaveworks/flagger/pkg/loadtester"
|
||||
"github.com/weaveworks/flagger/pkg/logger"
|
||||
"github.com/weaveworks/flagger/pkg/signals"
|
||||
"go.uber.org/zap"
|
||||
"log"
|
||||
"time"
|
||||
)
|
||||
|
||||
var VERSION = "0.11.0"
|
||||
var (
|
||||
logLevel string
|
||||
port string
|
||||
timeout time.Duration
|
||||
zapReplaceGlobals bool
|
||||
zapEncoding string
|
||||
)
|
||||
|
||||
func init() {
|
||||
flag.StringVar(&logLevel, "log-level", "debug", "Log level can be: debug, info, warning, error.")
|
||||
flag.StringVar(&port, "port", "9090", "Port to listen on.")
|
||||
flag.DurationVar(&timeout, "timeout", time.Hour, "Load test exec timeout.")
|
||||
flag.BoolVar(&zapReplaceGlobals, "zap-replace-globals", false, "Whether to change the logging level of the global zap logger.")
|
||||
flag.StringVar(&zapEncoding, "zap-encoding", "json", "Zap logger encoding.")
|
||||
}
|
||||
|
||||
func main() {
|
||||
flag.Parse()
|
||||
|
||||
logger, err := logger.NewLoggerWithEncoding(logLevel, zapEncoding)
|
||||
if err != nil {
|
||||
log.Fatalf("Error creating logger: %v", err)
|
||||
}
|
||||
if zapReplaceGlobals {
|
||||
zap.ReplaceGlobals(logger.Desugar())
|
||||
}
|
||||
|
||||
defer logger.Sync()
|
||||
|
||||
stopCh := signals.SetupSignalHandler()
|
||||
|
||||
taskRunner := loadtester.NewTaskRunner(logger, timeout)
|
||||
|
||||
go taskRunner.Start(100*time.Millisecond, stopCh)
|
||||
|
||||
logger.Infof("Starting load tester v%s API on port %s", VERSION, port)
|
||||
|
||||
gateStorage := loadtester.NewGateStorage("in-memory")
|
||||
loadtester.ListenAndServe(port, time.Minute, logger, taskRunner, gateStorage, stopCh)
|
||||
}
|
||||
@@ -1,73 +0,0 @@
|
||||
# Contributor Covenant Code of Conduct
|
||||
|
||||
## Our Pledge
|
||||
|
||||
In the interest of fostering an open and welcoming environment, we as
|
||||
contributors and maintainers pledge to making participation in our project and
|
||||
our community a harassment-free experience for everyone, regardless of age, body
|
||||
size, disability, ethnicity, gender identity and expression, level of experience,
|
||||
education, socio-economic status, nationality, personal appearance, race,
|
||||
religion, or sexual identity and orientation.
|
||||
|
||||
## Our Standards
|
||||
|
||||
Examples of behavior that contributes to creating a positive environment
|
||||
include:
|
||||
|
||||
* Using welcoming and inclusive language
|
||||
* Being respectful of differing viewpoints and experiences
|
||||
* Gracefully accepting constructive criticism
|
||||
* Focusing on what is best for the community
|
||||
* Showing empathy towards other community members
|
||||
|
||||
Examples of unacceptable behavior by participants include:
|
||||
|
||||
* The use of sexualized language or imagery and unwelcome sexual attention or
|
||||
advances
|
||||
* Trolling, insulting/derogatory comments, and personal or political attacks
|
||||
* Public or private harassment
|
||||
* Publishing others' private information, such as a physical or electronic
|
||||
address, without explicit permission
|
||||
* Other conduct which could reasonably be considered inappropriate in a
|
||||
professional setting
|
||||
|
||||
## Our Responsibilities
|
||||
|
||||
Project maintainers are responsible for clarifying the standards of acceptable
|
||||
behavior and are expected to take appropriate and fair corrective action in
|
||||
response to any instances of unacceptable behavior.
|
||||
|
||||
Project maintainers have the right and responsibility to remove, edit, or
|
||||
reject comments, commits, code, wiki edits, issues, and other contributions
|
||||
that are not aligned to this Code of Conduct, or to ban temporarily or
|
||||
permanently any contributor for other behaviors that they deem inappropriate,
|
||||
threatening, offensive, or harmful.
|
||||
|
||||
## Scope
|
||||
|
||||
This Code of Conduct applies both within project spaces and in public spaces
|
||||
when an individual is representing the project or its community. Examples of
|
||||
representing a project or community include using an official project e-mail
|
||||
address, posting via an official social media account, or acting as an appointed
|
||||
representative at an online or offline event. Representation of a project may be
|
||||
further defined and clarified by project maintainers.
|
||||
|
||||
## Enforcement
|
||||
|
||||
Instances of abusive, harassing, or otherwise unacceptable behavior
|
||||
may be reported by contacting stefan.prodan(at)gmail.com.
|
||||
All complaints will be reviewed and investigated and will result in a response that is deemed
|
||||
necessary and appropriate to the circumstances. The project team is
|
||||
obligated to maintain confidentiality with regard to the reporter of
|
||||
an incident. Further details of specific enforcement policies may be
|
||||
posted separately.
|
||||
|
||||
Project maintainers who do not follow or enforce the Code of Conduct in good
|
||||
faith may face temporary or permanent repercussions as determined by other
|
||||
members of the project's leadership.
|
||||
|
||||
## Attribution
|
||||
|
||||
This Code of Conduct is adapted from the [Contributor Covenant](https://www.contributor-covenant.org), version 1.4,
|
||||
available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html
|
||||
|
||||
23
docs/.vuepress/config.js
Normal file
23
docs/.vuepress/config.js
Normal file
@@ -0,0 +1,23 @@
|
||||
module.exports = {
|
||||
title: 'Flagger',
|
||||
description: 'Progressive Delivery operator for Kubernetes (Canary, A/B Testing and Blue/Green deployments)',
|
||||
themeConfig: {
|
||||
search: true,
|
||||
activeHeaderLinks: false,
|
||||
docsDir: 'docs',
|
||||
repo: 'fluxcd/flagger',
|
||||
nav: [
|
||||
{ text: 'Docs', link: 'https://docs.flagger.app' },
|
||||
{ text: 'Changelog', link: 'https://github.com/fluxcd/flagger/blob/main/CHANGELOG.md' }
|
||||
]
|
||||
},
|
||||
head: [
|
||||
['link', { rel: 'icon', href: '/favicon.png' }],
|
||||
['link', { rel: 'stylesheet', href: '/website.css' }],
|
||||
['meta', { name: 'keywords', content: 'gitops kubernetes flagger flux istio linkerd appmesh contour gloo nginx skipper traefik' }],
|
||||
['meta', { name: 'twitter:card', content: 'summary_large_image' }],
|
||||
['meta', { name: 'twitter:title', content: 'Flagger' }],
|
||||
['meta', { name: 'twitter:description', content: 'Progressive delivery Kubernetes operator (Canary, A/B Testing and Blue/Green deployments)' }],
|
||||
['meta', { name: 'twitter:image:src', content: 'https://flagger.app/flagger-overview.png' }]
|
||||
]
|
||||
};
|
||||
BIN
docs/.vuepress/public/cncf.png
Normal file
BIN
docs/.vuepress/public/cncf.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 24 KiB |
BIN
docs/.vuepress/public/favicon.png
Normal file
BIN
docs/.vuepress/public/favicon.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 7.5 KiB |
BIN
docs/.vuepress/public/flagger-gitops.png
Normal file
BIN
docs/.vuepress/public/flagger-gitops.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 188 KiB |
BIN
docs/.vuepress/public/flagger-overview.png
Normal file
BIN
docs/.vuepress/public/flagger-overview.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 245 KiB |
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user