mirror of
https://github.com/fluxcd/flagger.git
synced 2026-02-15 18:40:12 +00:00
Compare commits
29 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
4463d41744 | ||
|
|
66663c2aaf | ||
|
|
d34a504516 | ||
|
|
9fc5f9ca31 | ||
|
|
3cf9fe7cb7 | ||
|
|
6d57b5c490 | ||
|
|
7254a5a7f6 | ||
|
|
671016834d | ||
|
|
41dc056c87 | ||
|
|
6757553685 | ||
|
|
1abceb8334 | ||
|
|
0939a1bcc9 | ||
|
|
d91012d79c | ||
|
|
6e9fe2c584 | ||
|
|
ccad05c92f | ||
|
|
8b9929008b | ||
|
|
a881446252 | ||
|
|
183c73f2c4 | ||
|
|
3e3eaebbf2 | ||
|
|
bcc5bbf6fa | ||
|
|
42c3052247 | ||
|
|
3593e20ad5 | ||
|
|
1f0819465b | ||
|
|
7e48d7537f | ||
|
|
45c69ce3f7 | ||
|
|
8699a07de9 | ||
|
|
fb92d31744 | ||
|
|
bb7c4b1909 | ||
|
|
84d74d1eb5 |
@@ -1,3 +0,0 @@
|
||||
exemptions:
|
||||
- check: analytics
|
||||
reason: "We don't track people"
|
||||
14
.codecov.yml
14
.codecov.yml
@@ -1,14 +0,0 @@
|
||||
coverage:
|
||||
status:
|
||||
project:
|
||||
default:
|
||||
target: auto
|
||||
threshold: 50
|
||||
base: auto
|
||||
patch: off
|
||||
|
||||
comment:
|
||||
require_changes: true
|
||||
branches:
|
||||
- "!docs"
|
||||
- "!release"
|
||||
@@ -1,19 +0,0 @@
|
||||
root: ./docs/gitbook
|
||||
|
||||
redirects:
|
||||
how-it-works: usage/how-it-works.md
|
||||
usage/progressive-delivery: tutorials/istio-progressive-delivery.md
|
||||
usage/ab-testing: tutorials/istio-ab-testing.md
|
||||
usage/blue-green: tutorials/kubernetes-blue-green.md
|
||||
usage/appmesh-progressive-delivery: tutorials/appmesh-progressive-delivery.md
|
||||
usage/linkerd-progressive-delivery: tutorials/linkerd-progressive-delivery.md
|
||||
usage/contour-progressive-delivery: tutorials/contour-progressive-delivery.md
|
||||
usage/gloo-progressive-delivery: tutorials/gloo-progressive-delivery.md
|
||||
usage/nginx-progressive-delivery: tutorials/nginx-progressive-delivery.md
|
||||
usage/skipper-progressive-delivery: tutorials/skipper-progressive-delivery.md
|
||||
usage/crossover-progressive-delivery: tutorials/crossover-progressive-delivery.md
|
||||
usage/traefik-progressive-delivery: tutorials/traefik-progressive-delivery.md
|
||||
usage/osm-progressive-delivery: tutorials/osm-progressive-delivery.md
|
||||
usage/kuma-progressive-delivery: tutorials/kuma-progressive-delivery.md
|
||||
usage/gatewayapi-progressive-delivery: tutorials/gatewayapi-progressive-delivery.md
|
||||
usage/apisix-progressive-delivery: tutorials/apisix-progressive-delivery.md
|
||||
1
.github/CODEOWNERS
vendored
1
.github/CODEOWNERS
vendored
@@ -1 +0,0 @@
|
||||
* @stefanprodan
|
||||
29
.github/ISSUE_TEMPLATE/bug_report.md
vendored
29
.github/ISSUE_TEMPLATE/bug_report.md
vendored
@@ -1,29 +0,0 @@
|
||||
---
|
||||
name: Bug report
|
||||
about: Create a report to help us improve this project
|
||||
title: ''
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
### Describe the bug
|
||||
|
||||
A clear and concise description of what the bug is.
|
||||
Please provide the Canary definition and Flagger logs.
|
||||
|
||||
### To Reproduce
|
||||
|
||||
<!--
|
||||
Steps to reproduce the behaviour
|
||||
-->
|
||||
|
||||
### Expected behavior
|
||||
|
||||
A clear and concise description of what you expected to happen.
|
||||
|
||||
### Additional context
|
||||
|
||||
- Flagger version:
|
||||
- Kubernetes version:
|
||||
- Service Mesh provider:
|
||||
- Ingress provider:
|
||||
2
.github/ISSUE_TEMPLATE/config.yml
vendored
2
.github/ISSUE_TEMPLATE/config.yml
vendored
@@ -1,2 +0,0 @@
|
||||
blank_issues_enabled: true
|
||||
|
||||
19
.github/ISSUE_TEMPLATE/feature_request.md
vendored
19
.github/ISSUE_TEMPLATE/feature_request.md
vendored
@@ -1,19 +0,0 @@
|
||||
---
|
||||
name: Feature Request
|
||||
about: I have a suggestion (and may want to implement it 🙂)!
|
||||
title: ''
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
## Describe the feature
|
||||
|
||||
What problem are you trying to solve?
|
||||
|
||||
### Proposed solution
|
||||
|
||||
What do you want to happen? Add any considered drawbacks.
|
||||
|
||||
### Any alternatives you've considered?
|
||||
|
||||
Is there another way to solve this problem that isn't as good a solution?
|
||||
17
.github/dependabot.yml
vendored
17
.github/dependabot.yml
vendored
@@ -1,17 +0,0 @@
|
||||
version: 2
|
||||
|
||||
updates:
|
||||
- package-ecosystem: "github-actions"
|
||||
directory: "/"
|
||||
labels: ["area/ci", "dependencies"]
|
||||
groups:
|
||||
# Group all updates together, so that they are all applied in a single PR.
|
||||
# Grouped updates are currently in beta and is subject to change.
|
||||
# xref: https://docs.github.com/en/code-security/dependabot/dependabot-version-updates/configuration-options-for-the-dependabot.yml-file#groups
|
||||
ci:
|
||||
patterns:
|
||||
- "*"
|
||||
schedule:
|
||||
# By default, this will be on a monday.
|
||||
interval: "weekly"
|
||||
|
||||
52
.github/workflows/build.yaml
vendored
52
.github/workflows/build.yaml
vendored
@@ -1,52 +0,0 @@
|
||||
name: build
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
pull_request:
|
||||
branches:
|
||||
- main
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
build-flagger:
|
||||
runs-on:
|
||||
group: "Default Larger Runners"
|
||||
labels: ubuntu-latest-16-cores
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: 1.22.x
|
||||
cache-dependency-path: |
|
||||
**/go.sum
|
||||
**/go.mod
|
||||
- name: Download modules
|
||||
run: |
|
||||
go mod download
|
||||
go install golang.org/x/tools/cmd/goimports
|
||||
- name: Run linters
|
||||
run: make fmt test-codegen
|
||||
- name: Verify CRDs
|
||||
run: make verify-crd
|
||||
- name: Run tests
|
||||
run: go test -race -coverprofile=coverage.txt -covermode=atomic $(go list ./pkg/...)
|
||||
- name: Check if working tree is dirty
|
||||
run: |
|
||||
if [[ $(git diff --stat) != '' ]]; then
|
||||
git --no-pager diff
|
||||
echo 'run make test and commit changes'
|
||||
exit 1
|
||||
fi
|
||||
- name: Upload coverage to Codecov
|
||||
uses: codecov/codecov-action@v4
|
||||
with:
|
||||
file: ./coverage.txt
|
||||
- name: Build container image
|
||||
run: docker build -t test/flagger:latest .
|
||||
61
.github/workflows/e2e.yaml
vendored
61
.github/workflows/e2e.yaml
vendored
@@ -1,61 +0,0 @@
|
||||
name: e2e
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
pull_request:
|
||||
branches:
|
||||
- main
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
e2e-test:
|
||||
runs-on:
|
||||
group: "Default Larger Runners"
|
||||
labels: ubuntu-latest-16-cores
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
provider:
|
||||
# service mesh
|
||||
- istio
|
||||
- linkerd
|
||||
- kuma
|
||||
# ingress controllers
|
||||
- contour
|
||||
- nginx
|
||||
- traefik
|
||||
- gloo
|
||||
- skipper
|
||||
- kubernetes
|
||||
- gatewayapi
|
||||
- keda
|
||||
- apisix
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
- name: Setup Kubernetes
|
||||
uses: helm/kind-action@v1.10.0
|
||||
if: matrix.provider != 'skipper'
|
||||
with:
|
||||
version: v0.23.0
|
||||
cluster_name: kind
|
||||
node_image: kindest/node:v1.30.0@sha256:047357ac0cfea04663786a612ba1eaba9702bef25227a794b52890dd8bcd692e
|
||||
- name: Setup Kubernetes for skipper
|
||||
uses: helm/kind-action@v1.10.0
|
||||
if: matrix.provider == 'skipper'
|
||||
with:
|
||||
version: v0.23.0
|
||||
cluster_name: kind
|
||||
node_image: kindest/node:v1.24.12@sha256:0bdca26bd7fe65c823640b14253ea7bac4baad9336b332c94850f84d8102f873
|
||||
- name: Build container image
|
||||
run: |
|
||||
docker build -t test/flagger:latest .
|
||||
kind load docker-image test/flagger:latest
|
||||
- name: Run tests
|
||||
run: |
|
||||
./test/${{ matrix['provider'] }}/run.sh
|
||||
20
.github/workflows/helm.yaml
vendored
20
.github/workflows/helm.yaml
vendored
@@ -1,20 +0,0 @@
|
||||
name: helm
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
release-charts:
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: write
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Publish Helm charts
|
||||
uses: stefanprodan/helm-gh-pages@v1.7.0
|
||||
with:
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
charts_url: https://flagger.app
|
||||
63
.github/workflows/push-ld.yml
vendored
63
.github/workflows/push-ld.yml
vendored
@@ -1,63 +0,0 @@
|
||||
name: push-ld
|
||||
on:
|
||||
workflow_dispatch:
|
||||
|
||||
env:
|
||||
IMAGE: "ghcr.io/fluxcd/flagger-loadtester"
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
release-load-tester:
|
||||
runs-on:
|
||||
group: "Default Larger Runners"
|
||||
permissions:
|
||||
id-token: write
|
||||
packages: write
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: sigstore/cosign-installer@v3.5.0
|
||||
- name: Prepare
|
||||
id: prep
|
||||
run: |
|
||||
VERSION=$(grep 'VERSION' cmd/loadtester/main.go | head -1 | awk '{ print $4 }' | tr -d '"')
|
||||
echo "BUILD_DATE=$(date -u +'%Y-%m-%dT%H:%M:%SZ')" >> $GITHUB_OUTPUT
|
||||
echo "VERSION=${VERSION}" >> $GITHUB_OUTPUT
|
||||
- name: Setup QEMU
|
||||
uses: docker/setup-qemu-action@v3
|
||||
- name: Setup Docker Buildx
|
||||
id: buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
- name: Login to GitHub Container Registry
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: fluxcdbot
|
||||
password: ${{ secrets.GHCR_TOKEN }}
|
||||
- name: Generate image meta
|
||||
id: meta
|
||||
uses: docker/metadata-action@v5
|
||||
with:
|
||||
images: |
|
||||
${{ env.IMAGE }}
|
||||
tags: |
|
||||
type=raw,value=${{ steps.prep.outputs.VERSION }}
|
||||
- name: Publish image
|
||||
id: build-push
|
||||
uses: docker/build-push-action@v5
|
||||
with:
|
||||
push: true
|
||||
builder: ${{ steps.buildx.outputs.name }}
|
||||
context: .
|
||||
file: ./Dockerfile.loadtester
|
||||
platforms: linux/amd64,linux/arm64
|
||||
build-args: |
|
||||
REVISION=${{ github.sha }}
|
||||
tags: ${{ steps.meta.outputs.tags }}
|
||||
labels: ${{ steps.meta.outputs.labels }}
|
||||
- name: Sign image
|
||||
env:
|
||||
COSIGN_EXPERIMENTAL: 1
|
||||
run: |
|
||||
cosign sign --yes ${{ env.IMAGE }}@${{ steps.build-push.outputs.digest }}
|
||||
153
.github/workflows/release.yml
vendored
153
.github/workflows/release.yml
vendored
@@ -1,153 +0,0 @@
|
||||
name: release
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- 'v*'
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
tag:
|
||||
description: 'image tag prefix'
|
||||
default: 'rc'
|
||||
required: true
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
env:
|
||||
IMAGE: "ghcr.io/fluxcd/${{ github.event.repository.name }}"
|
||||
|
||||
jobs:
|
||||
release-flagger:
|
||||
outputs:
|
||||
hashes: ${{ steps.slsa.outputs.hashes }}
|
||||
runs-on:
|
||||
group: "Default Larger Runners"
|
||||
permissions:
|
||||
contents: write # needed to write releases
|
||||
id-token: write # needed for keyless signing
|
||||
packages: write # needed for ghcr access
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: 1.22.x
|
||||
- uses: fluxcd/flux2/action@main
|
||||
- uses: sigstore/cosign-installer@v3.5.0
|
||||
- name: Prepare
|
||||
id: prep
|
||||
run: |
|
||||
if [[ ${GITHUB_EVENT_NAME} = "workflow_dispatch" ]]; then
|
||||
VERSION="${{ github.event.inputs.tag }}-${GITHUB_SHA::8}"
|
||||
else
|
||||
VERSION=$(grep 'VERSION' pkg/version/version.go | awk '{ print $4 }' | tr -d '"')
|
||||
fi
|
||||
CHANGELOG="https://github.com/fluxcd/flagger/blob/main/CHANGELOG.md#$(echo $VERSION | tr -d '.')"
|
||||
echo "[CHANGELOG](${CHANGELOG})" > notes.md
|
||||
echo "BUILD_DATE=$(date -u +'%Y-%m-%dT%H:%M:%SZ')" >> $GITHUB_OUTPUT
|
||||
echo "VERSION=${VERSION}" >> $GITHUB_OUTPUT
|
||||
- name: Setup QEMU
|
||||
uses: docker/setup-qemu-action@v3
|
||||
- name: Setup Docker Buildx
|
||||
id: buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
- name: Login to GitHub Container Registry
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: fluxcdbot
|
||||
password: ${{ secrets.GHCR_TOKEN }}
|
||||
- name: Generate image meta
|
||||
id: meta
|
||||
uses: docker/metadata-action@v5
|
||||
with:
|
||||
images: |
|
||||
${{ env.IMAGE }}
|
||||
tags: |
|
||||
type=raw,value=${{ steps.prep.outputs.VERSION }}
|
||||
- name: Publish image
|
||||
id: build-push
|
||||
uses: docker/build-push-action@v5
|
||||
with:
|
||||
sbom: true
|
||||
provenance: true
|
||||
push: true
|
||||
builder: ${{ steps.buildx.outputs.name }}
|
||||
context: .
|
||||
file: ./Dockerfile
|
||||
platforms: linux/amd64,linux/arm64,linux/arm/v7
|
||||
build-args: |
|
||||
REVISON=${{ github.sha }}
|
||||
tags: ${{ steps.meta.outputs.tags }}
|
||||
labels: ${{ steps.meta.outputs.labels }}
|
||||
- name: Sign image
|
||||
env:
|
||||
COSIGN_EXPERIMENTAL: 1
|
||||
run: |
|
||||
cosign sign --yes ${{ env.IMAGE }}@${{ steps.build-push.outputs.digest }}
|
||||
- name: Publish signed manifests to GHCR
|
||||
if: startsWith(github.ref, 'refs/tags/v')
|
||||
env:
|
||||
COSIGN_EXPERIMENTAL: 1
|
||||
run: |
|
||||
OCI_URL=$(flux push artifact \
|
||||
oci://ghcr.io/fluxcd/flagger-manifests:${{ steps.prep.outputs.VERSION }} \
|
||||
--path="./kustomize" \
|
||||
--source="$(git config --get remote.origin.url)" \
|
||||
--revision="${{ steps.prep.outputs.VERSION }}/$(git rev-parse HEAD)" \
|
||||
--output json | \
|
||||
jq -r '. | .repository + "@" + .digest')
|
||||
cosign sign --yes ${OCI_URL}
|
||||
- name: Publish Helm charts
|
||||
if: startsWith(github.ref, 'refs/tags/v')
|
||||
uses: stefanprodan/helm-gh-pages@v1.7.0
|
||||
with:
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
charts_url: https://flagger.app
|
||||
linting: off
|
||||
- uses: fluxcd/pkg/actions/helm@main
|
||||
with:
|
||||
version: 3.12.3
|
||||
- name: Publish signed Helm chart to GHCR
|
||||
if: startsWith(github.ref, 'refs/tags/v')
|
||||
env:
|
||||
COSIGN_EXPERIMENTAL: 1
|
||||
run: |
|
||||
helm package charts/flagger
|
||||
helm push flagger-${{ steps.prep.outputs.VERSION }}.tgz oci://ghcr.io/fluxcd/charts |& tee .digest
|
||||
cosign sign --yes ghcr.io/fluxcd/charts/flagger@$(cat .digest | awk -F "[, ]+" '/Digest/{print $NF}')
|
||||
rm flagger-${{ steps.prep.outputs.VERSION }}.tgz
|
||||
rm .digest
|
||||
- uses: anchore/sbom-action/download-syft@v0
|
||||
- name: Create release and SBOM
|
||||
id: run-goreleaser
|
||||
uses: goreleaser/goreleaser-action@v5
|
||||
if: startsWith(github.ref, 'refs/tags/v')
|
||||
with:
|
||||
version: latest
|
||||
args: release --release-notes=notes.md --rm-dist --skip-validate
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
- name: Generate SLSA metadata
|
||||
id: slsa
|
||||
if: startsWith(github.ref, 'refs/tags/v')
|
||||
env:
|
||||
ARTIFACTS: "${{ steps.run-goreleaser.outputs.artifacts }}"
|
||||
run: |
|
||||
set -euo pipefail
|
||||
|
||||
hashes=$(echo -E $ARTIFACTS | jq --raw-output '.[] | {name, "digest": (.extra.Digest // .extra.Checksum)} | select(.digest) | {digest} + {name} | join(" ") | sub("^sha256:";"")' | base64 -w0)
|
||||
echo "hashes=$hashes" >> $GITHUB_OUTPUT
|
||||
|
||||
release-provenance:
|
||||
needs: [release-flagger]
|
||||
if: startsWith(github.ref, 'refs/tags/v')
|
||||
permissions:
|
||||
actions: read # for detecting the Github Actions environment.
|
||||
id-token: write # for creating OIDC tokens for signing.
|
||||
contents: write # for uploading attestations to GitHub releases.
|
||||
uses: slsa-framework/slsa-github-generator/.github/workflows/generator_generic_slsa3.yml@v2.0.0
|
||||
with:
|
||||
provenance-name: "provenance.intoto.jsonl"
|
||||
base64-subjects: "${{ needs.release-flagger.outputs.hashes }}"
|
||||
upload-assets: true
|
||||
45
.github/workflows/scan.yml
vendored
45
.github/workflows/scan.yml
vendored
@@ -1,45 +0,0 @@
|
||||
name: scan
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ main ]
|
||||
pull_request:
|
||||
branches: [ main ]
|
||||
schedule:
|
||||
- cron: '18 10 * * 3'
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
scan-fossa:
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
security-events: write
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Run FOSSA scan and upload build data
|
||||
uses: fossa-contrib/fossa-action@v3
|
||||
with:
|
||||
# FOSSA Push-Only API Token
|
||||
fossa-api-key: 5ee8bf422db1471e0bcf2bcb289185de
|
||||
github-token: ${{ github.token }}
|
||||
scan-codeql:
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
security-events: write
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: 1.22.x
|
||||
- name: Initialize CodeQL
|
||||
uses: github/codeql-action/init@v3
|
||||
with:
|
||||
languages: go
|
||||
- name: Autobuild
|
||||
uses: github/codeql-action/autobuild@v3
|
||||
- name: Perform CodeQL Analysis
|
||||
uses: github/codeql-action/analyze@v3
|
||||
53
.github/workflows/website.yaml
vendored
Normal file
53
.github/workflows/website.yaml
vendored
Normal file
@@ -0,0 +1,53 @@
|
||||
name: website
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- website
|
||||
|
||||
jobs:
|
||||
vuepress:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v5
|
||||
- uses: actions/setup-node@v5
|
||||
with:
|
||||
node-version: '12.x'
|
||||
- id: yarn-cache-dir-path
|
||||
run: echo "::set-output name=dir::$(yarn cache dir)"
|
||||
- uses: actions/cache@v4
|
||||
id: yarn-cache
|
||||
with:
|
||||
path: ${{ steps.yarn-cache-dir-path.outputs.dir }}
|
||||
key: ${{ runner.os }}-yarn-${{ hashFiles('**/yarn.lock') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-yarn-
|
||||
- name: Build
|
||||
run: |
|
||||
yarn add -D vuepress
|
||||
yarn docs:build
|
||||
mkdir $HOME/site
|
||||
rsync -avzh docs/.vuepress/dist/ $HOME/site
|
||||
- name: Publish
|
||||
env:
|
||||
REPOSITORY: "https://x-access-token:${{ secrets.BOT_GITHUB_TOKEN }}@github.com/fluxcd/flagger.git"
|
||||
run: |
|
||||
tmpDir=$(mktemp -d)
|
||||
pushd $tmpDir >& /dev/null
|
||||
|
||||
git clone ${REPOSITORY}
|
||||
cd flagger
|
||||
git config user.name ${{ github.actor }}
|
||||
git config user.email ${{ github.actor }}@users.noreply.github.com
|
||||
git remote set-url origin ${REPOSITORY}
|
||||
git checkout gh-pages
|
||||
rm -rf assets
|
||||
rsync -avzh $HOME/site/ .
|
||||
rm -rf node_modules
|
||||
git add .
|
||||
git commit -m "Publish website"
|
||||
git push origin gh-pages
|
||||
|
||||
popd >& /dev/null
|
||||
rm -rf $tmpDir
|
||||
80
.gitignore
vendored
80
.gitignore
vendored
@@ -1,23 +1,65 @@
|
||||
# Binaries for programs and plugins
|
||||
*.exe
|
||||
*.exe~
|
||||
*.dll
|
||||
*.so
|
||||
*.dylib
|
||||
# Logs
|
||||
logs
|
||||
*.log
|
||||
npm-debug.log*
|
||||
yarn-debug.log*
|
||||
yarn-error.log*
|
||||
|
||||
# Test binary, build with `go test -c`
|
||||
*.test
|
||||
# Runtime data
|
||||
pids
|
||||
*.pid
|
||||
*.seed
|
||||
*.pid.lock
|
||||
|
||||
# Output of the go coverage tool, specifically when used with LiteIDE
|
||||
*.out
|
||||
.DS_Store
|
||||
# Directory for instrumented libs generated by jscoverage/JSCover
|
||||
lib-cov
|
||||
|
||||
# Coverage directory used by tools like istanbul
|
||||
coverage
|
||||
|
||||
# nyc test coverage
|
||||
.nyc_output
|
||||
|
||||
# Grunt intermediate storage (http://gruntjs.com/creating-plugins#storing-task-files)
|
||||
.grunt
|
||||
|
||||
# Bower dependency directory (https://bower.io/)
|
||||
bower_components
|
||||
|
||||
# node-waf configuration
|
||||
.lock-wscript
|
||||
|
||||
# Compiled binary addons (https://nodejs.org/api/addons.html)
|
||||
build/Release
|
||||
|
||||
# Dependency directories
|
||||
node_modules/
|
||||
jspm_packages/
|
||||
|
||||
# TypeScript v1 declaration files
|
||||
typings/
|
||||
|
||||
# Optional npm cache directory
|
||||
.npm
|
||||
|
||||
# Optional eslint cache
|
||||
.eslintcache
|
||||
|
||||
# Optional REPL history
|
||||
.node_repl_history
|
||||
|
||||
# Output of 'npm pack'
|
||||
*.tgz
|
||||
|
||||
# Yarn Integrity file
|
||||
.yarn-integrity
|
||||
|
||||
# dotenv environment variables file
|
||||
.env
|
||||
|
||||
# next.js build output
|
||||
.next
|
||||
|
||||
bin/
|
||||
_tmp/
|
||||
|
||||
artifacts/gcloud/
|
||||
.idea
|
||||
Makefile.dev
|
||||
|
||||
vendor
|
||||
coverage.txt
|
||||
docs/.vuepress/dist/
|
||||
Makefile.dev
|
||||
@@ -1,31 +0,0 @@
|
||||
project_name: flagger
|
||||
|
||||
builds:
|
||||
- skip: true
|
||||
|
||||
release:
|
||||
prerelease: auto
|
||||
|
||||
source:
|
||||
enabled: true
|
||||
name_template: "{{ .ProjectName }}_{{ .Version }}_source_code"
|
||||
|
||||
sboms:
|
||||
- id: source
|
||||
artifacts: source
|
||||
documents:
|
||||
- "{{ .ProjectName }}_{{ .Version }}_sbom.spdx.json"
|
||||
|
||||
signs:
|
||||
- cmd: cosign
|
||||
env:
|
||||
- COSIGN_EXPERIMENTAL=1
|
||||
certificate: '${artifact}.pem'
|
||||
args:
|
||||
- sign-blob
|
||||
- "--yes"
|
||||
- '--output-certificate=${certificate}'
|
||||
- '--output-signature=${signature}'
|
||||
- '${artifact}'
|
||||
artifacts: checksum
|
||||
output: true
|
||||
2193
CHANGELOG.md
2193
CHANGELOG.md
File diff suppressed because it is too large
Load Diff
@@ -1,3 +0,0 @@
|
||||
## Code of Conduct
|
||||
|
||||
Flagger follows the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/master/code-of-conduct.md).
|
||||
@@ -1,89 +0,0 @@
|
||||
# How to Contribute
|
||||
|
||||
Flagger is [Apache 2.0 licensed](LICENSE) and accepts contributions via GitHub
|
||||
pull requests. This document outlines some of the conventions on development
|
||||
workflow, commit message formatting, contact points and other resources to make
|
||||
it easier to get your contribution accepted.
|
||||
|
||||
We gratefully welcome improvements to documentation as well as to code.
|
||||
|
||||
## Certificate of Origin
|
||||
|
||||
By contributing to this project you agree to the Developer Certificate of
|
||||
Origin (DCO). This document was created by the Linux Kernel community and is a
|
||||
simple statement that you, as a contributor, have the legal right to make the
|
||||
contribution.
|
||||
|
||||
We require all commits to be signed. By signing off with your signature, you
|
||||
certify that you wrote the patch or otherwise have the right to contribute the
|
||||
material by the rules of the [DCO](DCO):
|
||||
|
||||
`Signed-off-by: Jane Doe <jane.doe@example.com>`
|
||||
|
||||
The signature must contain your real name
|
||||
(sorry, no pseudonyms or anonymous contributions)
|
||||
If your `user.name` and `user.email` are configured in your Git config,
|
||||
you can sign your commit automatically with `git commit -s`.
|
||||
|
||||
## Communications
|
||||
|
||||
The project uses Slack: To join the conversation, simply join the
|
||||
[CNCF](https://slack.cncf.io/) Slack workspace and use the
|
||||
[#flagger](https://cloud-native.slack.com/messages/flagger/) channel.
|
||||
|
||||
The developers use a mailing list to discuss development as well.
|
||||
Simply subscribe to [flux-dev on cncf.io](https://lists.cncf.io/g/cncf-flux-dev)
|
||||
to join the conversation (this will also add an invitation to your
|
||||
Google calendar for our [Flux
|
||||
meeting](https://docs.google.com/document/d/1l_M0om0qUEN_NNiGgpqJ2tvsF2iioHkaARDeh6b70B0/edit#)).
|
||||
|
||||
## Getting Started
|
||||
|
||||
- Fork the repository on GitHub
|
||||
- If you want to contribute as a developer, read [Flagger Development Guide](https://docs.flagger.app/dev/dev-guide)
|
||||
- If you have questions, concerns, get stuck or need a hand, let us know
|
||||
on the Slack channel. We are happy to help and look forward to having
|
||||
you part of the team. No matter in which capacity.
|
||||
- Play with the project, submit bugs, submit pull requests!
|
||||
|
||||
## Contribution workflow
|
||||
|
||||
This is a rough outline of how to prepare a contribution:
|
||||
|
||||
- Create a topic branch from where you want to base your work (usually branched from master).
|
||||
- Make commits of logical units.
|
||||
- Make sure your commit messages are in the proper format (see below).
|
||||
- Push your changes to a topic branch in your fork of the repository.
|
||||
- If you changed code:
|
||||
- add automated tests to cover your changes
|
||||
- Submit a pull request to the original repository.
|
||||
|
||||
## Acceptance policy
|
||||
|
||||
These things will make a PR more likely to be accepted:
|
||||
|
||||
- a well-described requirement
|
||||
- new code and tests follow the conventions in old code and tests
|
||||
- a good commit message (see below)
|
||||
- All code must abide [Go Code Review Comments](https://github.com/golang/go/wiki/CodeReviewComments)
|
||||
- Names should abide [What's in a name](https://talks.golang.org/2014/names.slide#1)
|
||||
- Code must build on both Linux and Darwin, via plain `go build`
|
||||
- Code should have appropriate test coverage and tests should be written
|
||||
to work with `go test`
|
||||
|
||||
In general, we will merge a PR once one maintainer has endorsed it.
|
||||
For substantial changes, more people may become involved, and you might
|
||||
get asked to resubmit the PR or divide the changes into more than one PR.
|
||||
|
||||
### Format of the Commit Message
|
||||
|
||||
For Flagger we prefer the following rules for good commit messages:
|
||||
|
||||
- Limit the subject to 50 characters and write as the continuation
|
||||
of the sentence "If applied, this commit will ..."
|
||||
- Explain what and why in the body, if more than a trivial change;
|
||||
wrap it at 72 characters.
|
||||
|
||||
The [following article](https://chris.beams.io/posts/git-commit/#seven-rules)
|
||||
has some more helpful advice on documenting your work.
|
||||
|
||||
36
DCO
36
DCO
@@ -1,36 +0,0 @@
|
||||
Developer Certificate of Origin
|
||||
Version 1.1
|
||||
|
||||
Copyright (C) 2004, 2006 The Linux Foundation and its contributors.
|
||||
660 York Street, Suite 102,
|
||||
San Francisco, CA 94110 USA
|
||||
|
||||
Everyone is permitted to copy and distribute verbatim copies of this
|
||||
license document, but changing it is not allowed.
|
||||
|
||||
|
||||
Developer's Certificate of Origin 1.1
|
||||
|
||||
By making a contribution to this project, I certify that:
|
||||
|
||||
(a) The contribution was created in whole or in part by me and I
|
||||
have the right to submit it under the open source license
|
||||
indicated in the file; or
|
||||
|
||||
(b) The contribution is based upon previous work that, to the best
|
||||
of my knowledge, is covered under an appropriate open source
|
||||
license and I have the right under that license to submit that
|
||||
work with modifications, whether created in whole or in part
|
||||
by me, under the same open source license (unless I am
|
||||
permitted to submit under a different license), as indicated
|
||||
in the file; or
|
||||
|
||||
(c) The contribution was provided directly to me by some other
|
||||
person who certified (a), (b) or (c) and I have not modified
|
||||
it.
|
||||
|
||||
(d) I understand and agree that this project and the contribution
|
||||
are public and that a record of the contribution (including all
|
||||
personal information I submit with it, including my sign-off) is
|
||||
maintained indefinitely and may be redistributed consistent with
|
||||
this project or the open source license(s) involved.
|
||||
32
Dockerfile
32
Dockerfile
@@ -1,32 +0,0 @@
|
||||
FROM golang:1.22-alpine as builder
|
||||
|
||||
ARG TARGETPLATFORM
|
||||
ARG REVISON
|
||||
|
||||
WORKDIR /workspace
|
||||
|
||||
# copy modules manifests
|
||||
COPY go.mod go.mod
|
||||
COPY go.sum go.sum
|
||||
|
||||
# cache modules
|
||||
RUN go mod download
|
||||
|
||||
# copy source code
|
||||
COPY cmd/ cmd/
|
||||
COPY pkg/ pkg/
|
||||
|
||||
# build
|
||||
RUN CGO_ENABLED=0 go build \
|
||||
-ldflags "-s -w -X github.com/fluxcd/flagger/pkg/version.REVISION=${REVISON}" \
|
||||
-a -o flagger ./cmd/flagger
|
||||
|
||||
FROM alpine:3.20
|
||||
|
||||
RUN apk --no-cache add ca-certificates
|
||||
|
||||
USER nobody
|
||||
|
||||
COPY --from=builder --chown=nobody:nobody /workspace/flagger .
|
||||
|
||||
ENTRYPOINT ["./flagger"]
|
||||
@@ -1,73 +0,0 @@
|
||||
FROM golang:1.22-alpine as builder
|
||||
|
||||
ARG TARGETPLATFORM
|
||||
ARG TARGETARCH
|
||||
ARG REVISION
|
||||
|
||||
RUN apk --no-cache add alpine-sdk perl curl bash tar
|
||||
|
||||
RUN HELM3_VERSION=3.15.3 && \
|
||||
curl -sSL "https://get.helm.sh/helm-v${HELM3_VERSION}-linux-${TARGETARCH}.tar.gz" | tar xvz && \
|
||||
chmod +x linux-${TARGETARCH}/helm && mv linux-${TARGETARCH}/helm /usr/local/bin/helm
|
||||
|
||||
RUN KUBECTL_VERSION=v1.29.7 && \
|
||||
curl -LO "https://dl.k8s.io/release/${KUBECTL_VERSION}/bin/linux/${TARGETARCH}/kubectl" && \
|
||||
chmod +x kubectl && mv kubectl /usr/local/bin/kubectl
|
||||
|
||||
RUN GRPC_HEALTH_PROBE_VERSION=v0.4.28 && \
|
||||
wget -qO /usr/local/bin/grpc_health_probe https://github.com/grpc-ecosystem/grpc-health-probe/releases/download/${GRPC_HEALTH_PROBE_VERSION}/grpc_health_probe-linux-${TARGETARCH} && \
|
||||
chmod +x /usr/local/bin/grpc_health_probe
|
||||
|
||||
RUN GHZ_VERSION=0.120.0 && \
|
||||
curl -sSL "https://github.com/bojand/ghz/archive/refs/tags/v${GHZ_VERSION}.tar.gz" | tar xz -C /tmp && \
|
||||
cd /tmp/ghz-${GHZ_VERSION}/cmd/ghz && GOARCH=$TARGETARCH go build . && mv ghz /usr/local/bin && \
|
||||
chmod +x /usr/local/bin/ghz
|
||||
|
||||
WORKDIR /workspace
|
||||
|
||||
# copy modules manifests
|
||||
COPY go.mod go.mod
|
||||
COPY go.sum go.sum
|
||||
|
||||
# cache modules
|
||||
RUN go mod download
|
||||
|
||||
# copy source code
|
||||
COPY cmd/ cmd/
|
||||
COPY pkg/ pkg/
|
||||
|
||||
# build
|
||||
RUN CGO_ENABLED=0 go build -o loadtester ./cmd/loadtester/*
|
||||
|
||||
FROM bash:5.0
|
||||
|
||||
ARG TARGETPLATFORM
|
||||
|
||||
RUN addgroup -S app && \
|
||||
adduser -S -g app app && \
|
||||
apk --no-cache add ca-certificates curl jq libgcc wrk hey git
|
||||
|
||||
WORKDIR /home/app
|
||||
|
||||
COPY --from=bats/bats:v1.1.0 /opt/bats/ /opt/bats/
|
||||
RUN ln -s /opt/bats/bin/bats /usr/local/bin/
|
||||
|
||||
COPY --from=builder /usr/local/bin/helm /usr/local/bin/
|
||||
COPY --from=builder /usr/local/bin/ghz /usr/local/bin/
|
||||
COPY --from=builder /usr/local/bin/grpc_health_probe /usr/local/bin/
|
||||
COPY --from=builder /usr/local/bin/kubectl /usr/local/bin/
|
||||
|
||||
ADD https://raw.githubusercontent.com/grpc/grpc-proto/master/grpc/health/v1/health.proto /tmp/ghz/health.proto
|
||||
|
||||
RUN chown -R app:app ./
|
||||
RUN chown -R app:app /tmp/ghz
|
||||
|
||||
USER app
|
||||
|
||||
# test load generator tools
|
||||
RUN hey -n 1 -c 1 https://flagger.app > /dev/null && echo $? | grep 0
|
||||
RUN wrk -d 1s -c 1 -t 1 https://flagger.app > /dev/null && echo $? | grep 0
|
||||
|
||||
COPY --from=builder --chown=app:app /workspace/loadtester .
|
||||
|
||||
ENTRYPOINT ["./loadtester"]
|
||||
@@ -1,5 +0,0 @@
|
||||
# Flagger Governance
|
||||
|
||||
The Flagger project is governed by the [Flux governance document](https://github.com/fluxcd/community/blob/main/GOVERNANCE.md),
|
||||
involvement is defined in the [Flux community roles document](chttps://github.com/fluxcd/community/blob/main/community-roles.md),
|
||||
and processes can be found in the [Flux process document](https://github.com/fluxcd/community/blob/main/PROCESS.md).
|
||||
2
LICENSE
2
LICENSE
@@ -186,7 +186,7 @@
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
Copyright 2019 Weaveworks
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
||||
@@ -1,9 +0,0 @@
|
||||
The maintainers are generally available in Slack at
|
||||
https://cloud-native.slack.com/messages/flagger/ (obtain an invitation
|
||||
at https://slack.cncf.io/).
|
||||
|
||||
In alphabetical order:
|
||||
|
||||
Sanskar Jaiswal, Independent <jaiswalsanskar078@gmail.com> (github: @aryan9600, slack: aryan9600)
|
||||
Stefan Prodan, ControlPlane <stefan.prodan@gmail.com> (github: @stefanprodan, slack: stefanprodan)
|
||||
Takeshi Yoneda, Tetrate <takeshi@tetrate.io> (github: @mathetake, slack: mathetake)
|
||||
57
Makefile
57
Makefile
@@ -1,57 +0,0 @@
|
||||
TAG?=latest
|
||||
VERSION?=$(shell grep 'VERSION' pkg/version/version.go | awk '{ print $$4 }' | tr -d '"')
|
||||
LT_VERSION?=$(shell grep 'VERSION' cmd/loadtester/main.go | awk '{ print $$4 }' | tr -d '"' | head -n1)
|
||||
|
||||
build:
|
||||
CGO_ENABLED=0 go build -a -o ./bin/flagger ./cmd/flagger
|
||||
|
||||
tidy:
|
||||
rm -f go.sum; go mod tidy -compat=1.22
|
||||
|
||||
vet:
|
||||
go vet ./...
|
||||
|
||||
fmt:
|
||||
go fmt ./...
|
||||
|
||||
codegen:
|
||||
./hack/update-codegen.sh
|
||||
|
||||
test-codegen:
|
||||
./hack/verify-codegen.sh
|
||||
|
||||
test: fmt test-codegen
|
||||
go test ./...
|
||||
|
||||
test-coverage: fmt test-codegen
|
||||
go test -coverprofile cover.out ./...
|
||||
go tool cover -html=cover.out
|
||||
rm cover.out
|
||||
|
||||
crd:
|
||||
cat artifacts/flagger/crd.yaml > charts/flagger/crds/crd.yaml
|
||||
cat artifacts/flagger/crd.yaml > kustomize/base/flagger/crd.yaml
|
||||
|
||||
verify-crd:
|
||||
./hack/verify-crd.sh
|
||||
|
||||
version-set:
|
||||
@next="$(TAG)" && \
|
||||
current="$(VERSION)" && \
|
||||
sed -i "s/$$current/$$next/g" pkg/version/version.go && \
|
||||
sed -i "s/flagger:$$current/flagger:$$next/g" artifacts/flagger/deployment.yaml && \
|
||||
sed -i "s/tag: $$current/tag: $$next/g" charts/flagger/values.yaml && \
|
||||
sed -i "s/appVersion: $$current/appVersion: $$next/g" charts/flagger/Chart.yaml && \
|
||||
sed -i "s/version: $$current/version: $$next/g" charts/flagger/Chart.yaml && \
|
||||
sed -i "s/newTag: $$current/newTag: $$next/g" kustomize/base/flagger/kustomization.yaml && \
|
||||
echo "Version $$next set in code, deployment, chart and kustomize"
|
||||
|
||||
release:
|
||||
git tag "v$(VERSION)"
|
||||
git push origin "v$(VERSION)"
|
||||
|
||||
loadtester-build:
|
||||
docker build -t ghcr.io/fluxcd/flagger-loadtester:$(LT_VERSION) . -f Dockerfile.loadtester
|
||||
|
||||
loadtester-push:
|
||||
docker push ghcr.io/fluxcd/flagger-loadtester:$(LT_VERSION)
|
||||
280
README.md
280
README.md
@@ -1,280 +1,4 @@
|
||||
# flagger
|
||||
# flagger website
|
||||
|
||||
[](https://github.com/fluxcd/flagger/releases)
|
||||
[](https://bestpractices.coreinfrastructure.org/projects/4783)
|
||||
[](https://goreportcard.com/report/github.com/fluxcd/flagger)
|
||||
[](https://app.fossa.com/projects/custom%2B162%2Fgithub.com%2Ffluxcd%2Fflagger?ref=badge_shield)
|
||||
[](https://artifacthub.io/packages/search?repo=flagger)
|
||||
[](https://clomonitor.io/projects/cncf/flagger)
|
||||
[flagger.app](https://flagger.app)
|
||||
|
||||
Flagger is a progressive delivery tool that automates the release process for applications running on Kubernetes.
|
||||
It reduces the risk of introducing a new software version in production
|
||||
by gradually shifting traffic to the new version while measuring metrics and running conformance tests.
|
||||
|
||||

|
||||
|
||||
Flagger implements several deployment strategies (Canary releases, A/B testing, Blue/Green mirroring)
|
||||
and integrates with various Kubernetes ingress controllers, service mesh, and monitoring solutions.
|
||||
|
||||
Flagger is a [Cloud Native Computing Foundation](https://cncf.io/) project
|
||||
and part of the [Flux](https://fluxcd.io) family of GitOps tools.
|
||||
|
||||
### Documentation
|
||||
|
||||
Flagger documentation can be found at [fluxcd.io/flagger](https://fluxcd.io/flagger/).
|
||||
|
||||
* Install
|
||||
* [Flagger install on Kubernetes](https://fluxcd.io/flagger/install/flagger-install-on-kubernetes)
|
||||
* Usage
|
||||
* [How it works](https://fluxcd.io/flagger/usage/how-it-works)
|
||||
* [Deployment strategies](https://fluxcd.io/flagger/usage/deployment-strategies)
|
||||
* [Metrics analysis](https://fluxcd.io/flagger/usage/metrics)
|
||||
* [Webhooks](https://fluxcd.io/flagger/usage/webhooks)
|
||||
* [Alerting](https://fluxcd.io/flagger/usage/alerting)
|
||||
* [Monitoring](https://fluxcd.io/flagger/usage/monitoring)
|
||||
* Tutorials
|
||||
* [App Mesh](https://fluxcd.io/flagger/tutorials/appmesh-progressive-delivery)
|
||||
* [Istio](https://fluxcd.io/flagger/tutorials/istio-progressive-delivery)
|
||||
* [Linkerd](https://fluxcd.io/flagger/tutorials/linkerd-progressive-delivery)
|
||||
* [Open Service Mesh (OSM)](https://dfluxcd.io/flagger/tutorials/osm-progressive-delivery)
|
||||
* [Kuma Service Mesh](https://fluxcd.io/flagger/tutorials/kuma-progressive-delivery)
|
||||
* [Contour](https://fluxcd.io/flagger/tutorials/contour-progressive-delivery)
|
||||
* [Gloo](https://fluxcd.io/flagger/tutorials/gloo-progressive-delivery)
|
||||
* [NGINX Ingress](https://fluxcd.io/flagger/tutorials/nginx-progressive-delivery)
|
||||
* [Skipper](https://fluxcd.io/flagger/tutorials/skipper-progressive-delivery)
|
||||
* [Traefik](https://fluxcd.io/flagger/tutorials/traefik-progressive-delivery)
|
||||
* [Gateway API](https://fluxcd.io/flagger/tutorials/gatewayapi-progressive-delivery/)
|
||||
* [Kubernetes Blue/Green](https://fluxcd.io/flagger/tutorials/kubernetes-blue-green)
|
||||
|
||||
### Adopters
|
||||
|
||||
**Our list of production users has moved to <https://fluxcd.io/adopters/#flagger>**.
|
||||
|
||||
If you are using Flagger, please
|
||||
[submit a PR to add your organization](https://github.com/fluxcd/website/blob/main/data/adopters/2-flagger.yaml) to the list!
|
||||
|
||||
### Canary CRD
|
||||
|
||||
Flagger takes a Kubernetes deployment and optionally a horizontal pod autoscaler (HPA),
|
||||
then creates a series of objects (Kubernetes deployments, ClusterIP services, service mesh, or ingress routes).
|
||||
These objects expose the application on the mesh and drive the canary analysis and promotion.
|
||||
|
||||
Flagger keeps track of ConfigMaps and Secrets referenced by a Kubernetes Deployment and triggers a canary analysis if any of those objects change.
|
||||
When promoting a workload in production, both code (container images) and configuration (config maps and secrets) are being synchronized.
|
||||
|
||||
For a deployment named _podinfo_, a canary promotion can be defined using Flagger's custom resource:
|
||||
|
||||
```yaml
|
||||
apiVersion: flagger.app/v1beta1
|
||||
kind: Canary
|
||||
metadata:
|
||||
name: podinfo
|
||||
namespace: test
|
||||
spec:
|
||||
# service mesh provider (optional)
|
||||
# can be: kubernetes, istio, linkerd, appmesh, nginx, skipper, contour, gloo, supergloo, traefik, osm
|
||||
# for SMI TrafficSplit can be: smi:v1alpha1, smi:v1alpha2, smi:v1alpha3
|
||||
provider: istio
|
||||
# deployment reference
|
||||
targetRef:
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
name: podinfo
|
||||
# the maximum time in seconds for the canary deployment
|
||||
# to make progress before it is rollback (default 600s)
|
||||
progressDeadlineSeconds: 60
|
||||
# HPA reference (optional)
|
||||
autoscalerRef:
|
||||
apiVersion: autoscaling/v2
|
||||
kind: HorizontalPodAutoscaler
|
||||
name: podinfo
|
||||
service:
|
||||
# service name (defaults to targetRef.name)
|
||||
name: podinfo
|
||||
# ClusterIP port number
|
||||
port: 9898
|
||||
# container port name or number (optional)
|
||||
targetPort: 9898
|
||||
# port name can be http or grpc (default http)
|
||||
portName: http
|
||||
# add all the other container ports
|
||||
# to the ClusterIP services (default false)
|
||||
portDiscovery: true
|
||||
# HTTP match conditions (optional)
|
||||
match:
|
||||
- uri:
|
||||
prefix: /
|
||||
# HTTP rewrite (optional)
|
||||
rewrite:
|
||||
uri: /
|
||||
# request timeout (optional)
|
||||
timeout: 5s
|
||||
# promote the canary without analysing it (default false)
|
||||
skipAnalysis: false
|
||||
# define the canary analysis timing and KPIs
|
||||
analysis:
|
||||
# schedule interval (default 60s)
|
||||
interval: 1m
|
||||
# max number of failed metric checks before rollback
|
||||
threshold: 10
|
||||
# max traffic percentage routed to canary
|
||||
# percentage (0-100)
|
||||
maxWeight: 50
|
||||
# canary increment step
|
||||
# percentage (0-100)
|
||||
stepWeight: 5
|
||||
# validation (optional)
|
||||
metrics:
|
||||
- name: request-success-rate
|
||||
# builtin Prometheus check
|
||||
# minimum req success rate (non 5xx responses)
|
||||
# percentage (0-100)
|
||||
thresholdRange:
|
||||
min: 99
|
||||
interval: 1m
|
||||
- name: request-duration
|
||||
# builtin Prometheus check
|
||||
# maximum req duration P99
|
||||
# milliseconds
|
||||
thresholdRange:
|
||||
max: 500
|
||||
interval: 30s
|
||||
- name: "database connections"
|
||||
# custom metric check
|
||||
templateRef:
|
||||
name: db-connections
|
||||
thresholdRange:
|
||||
min: 2
|
||||
max: 100
|
||||
interval: 1m
|
||||
# testing (optional)
|
||||
webhooks:
|
||||
- name: "conformance test"
|
||||
type: pre-rollout
|
||||
url: http://flagger-helmtester.test/
|
||||
timeout: 5m
|
||||
metadata:
|
||||
type: "helmv3"
|
||||
cmd: "test run podinfo -n test"
|
||||
- name: "load test"
|
||||
type: rollout
|
||||
url: http://flagger-loadtester.test/
|
||||
metadata:
|
||||
cmd: "hey -z 1m -q 10 -c 2 http://podinfo.test:9898/"
|
||||
# alerting (optional)
|
||||
alerts:
|
||||
- name: "dev team Slack"
|
||||
severity: error
|
||||
providerRef:
|
||||
name: dev-slack
|
||||
namespace: flagger
|
||||
- name: "qa team Discord"
|
||||
severity: warn
|
||||
providerRef:
|
||||
name: qa-discord
|
||||
- name: "on-call MS Teams"
|
||||
severity: info
|
||||
providerRef:
|
||||
name: on-call-msteams
|
||||
```
|
||||
|
||||
For more details on how the canary analysis and promotion works please [read the docs](https://fluxcd.io/flagger/usage/how-it-works).
|
||||
|
||||
### Features
|
||||
|
||||
**Service Mesh**
|
||||
|
||||
| Feature | App Mesh | Istio | Linkerd | Kuma | OSM | Kubernetes CNI |
|
||||
|--------------------------------------------|--------------------|--------------------|--------------------|--------------------|--------------------|--------------------|
|
||||
| Canary deployments (weighted traffic) | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_minus_sign: |
|
||||
| A/B testing (headers and cookies routing) | :heavy_check_mark: | :heavy_check_mark: | :heavy_minus_sign: | :heavy_minus_sign: | :heavy_minus_sign: | :heavy_minus_sign: |
|
||||
| Blue/Green deployments (traffic switch) | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: |
|
||||
| Blue/Green deployments (traffic mirroring) | :heavy_minus_sign: | :heavy_check_mark: | :heavy_minus_sign: | :heavy_minus_sign: | :heavy_minus_sign: | :heavy_minus_sign: |
|
||||
| Webhooks (acceptance/load testing) | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: |
|
||||
| Manual gating (approve/pause/resume) | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: |
|
||||
| Request success rate check (L7 metric) | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_minus_sign: |
|
||||
| Request duration check (L7 metric) | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_minus_sign: |
|
||||
| Custom metric checks | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: |
|
||||
|
||||
**Ingress**
|
||||
|
||||
| Feature | Contour | Gloo | NGINX | Skipper | Traefik | Apache APISIX |
|
||||
|-------------------------------------------|--------------------|--------------------|--------------------|--------------------|--------------------|--------------------|
|
||||
| Canary deployments (weighted traffic) | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: |
|
||||
| A/B testing (headers and cookies routing) | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_minus_sign: | :heavy_minus_sign: | :heavy_minus_sign: |
|
||||
| Blue/Green deployments (traffic switch) | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: |
|
||||
| Webhooks (acceptance/load testing) | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: |
|
||||
| Manual gating (approve/pause/resume) | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: |
|
||||
| Request success rate check (L7 metric) | :heavy_check_mark: | :heavy_check_mark: | :heavy_minus_sign: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: |
|
||||
| Request duration check (L7 metric) | :heavy_check_mark: | :heavy_check_mark: | :heavy_minus_sign: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: |
|
||||
| Custom metric checks | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: |
|
||||
|
||||
**Networking Interface**
|
||||
|
||||
| Feature | Gateway API | SMI |
|
||||
|-----------------------------------------------|--------------------|--------------------|
|
||||
| Canary deployments (weighted traffic) | :heavy_check_mark: | :heavy_check_mark: |
|
||||
| A/B testing (headers and cookies routing) | :heavy_check_mark: | :heavy_minus_sign: |
|
||||
| Blue/Green deployments (traffic switch) | :heavy_check_mark: | :heavy_check_mark: |
|
||||
| Blue/Green deployments (traffic mirrroring) | :heavy_minus_sign: | :heavy_minus_sign: |
|
||||
| Webhooks (acceptance/load testing) | :heavy_check_mark: | :heavy_check_mark: |
|
||||
| Manual gating (approve/pause/resume) | :heavy_check_mark: | :heavy_check_mark: |
|
||||
| Request success rate check (L7 metric) | :heavy_minus_sign: | :heavy_minus_sign: |
|
||||
| Request duration check (L7 metric) | :heavy_minus_sign: | :heavy_minus_sign: |
|
||||
| Custom metric checks | :heavy_check_mark: | :heavy_check_mark: |
|
||||
|
||||
For all [Gateway API](https://gateway-api.sigs.k8s.io/) implementations like
|
||||
[Contour](https://projectcontour.io/guides/gateway-api/) or
|
||||
[Istio](https://istio.io/latest/docs/tasks/traffic-management/ingress/gateway-api/)
|
||||
and [SMI](https://smi-spec.io) compatible service mesh solutions like
|
||||
[Nginx Service Mesh](https://docs.nginx.com/nginx-service-mesh/),
|
||||
[Prometheus MetricTemplates](https://docs.flagger.app/usage/metrics#prometheus)
|
||||
can be used to implement the request success rate and request duration checks.
|
||||
|
||||
### Roadmap
|
||||
|
||||
#### [GitOps Toolkit](https://github.com/fluxcd/flux2) compatibility
|
||||
|
||||
- Migrate Flagger to Kubernetes controller-runtime and [kubebuilder](https://github.com/kubernetes-sigs/kubebuilder)
|
||||
- Make the Canary status compatible with [kstatus](https://github.com/kubernetes-sigs/cli-utils)
|
||||
- Make Flagger emit Kubernetes events compatible with Flux v2 notification API
|
||||
- Integrate Flagger into Flux v2 as the progressive delivery component
|
||||
|
||||
#### Integrations
|
||||
|
||||
- Add support for ingress controllers like HAProxy, ALB, and Apache APISIX
|
||||
- Add support for Knative Serving
|
||||
|
||||
### Contributing
|
||||
|
||||
Flagger is Apache 2.0 licensed and accepts contributions via GitHub pull requests.
|
||||
To start contributing please read the [development guide](https://docs.flagger.app/dev/dev-guide).
|
||||
|
||||
When submitting bug reports please include as many details as possible:
|
||||
|
||||
- which Flagger version
|
||||
- which Kubernetes version
|
||||
- what configuration (canary, ingress and workloads definitions)
|
||||
- what happened (Flagger and Proxy logs)
|
||||
|
||||
### Communication
|
||||
|
||||
Here is a list of good entry points into our community, how we stay in touch and how you can meet us as a team.
|
||||
|
||||
- Slack: Join in and talk to us in the `#flagger` channel on [CNCF Slack](https://slack.cncf.io/).
|
||||
- Public meetings: We run weekly meetings - join one of the upcoming dev meetings from the [Flux calendar](https://fluxcd.io/#calendar).
|
||||
- Blog: Stay up to date with the latest news on [the Flux blog](https://fluxcd.io/blog/).
|
||||
- Mailing list: To be updated on Flux and Flagger progress regularly, please [join the flux-dev mailing list](https://lists.cncf.io/g/cncf-flux-dev).
|
||||
|
||||
#### Subscribing to the flux-dev calendar
|
||||
|
||||
To add the meetings to your e.g. Google calendar
|
||||
|
||||
1. visit the [Flux calendar](https://lists.cncf.io/g/cncf-flux-dev/calendar)
|
||||
2. click on "Subscribe to Calendar" at the very bottom of the page
|
||||
3. copy the iCalendar URL
|
||||
4. open e.g. your Google calendar
|
||||
5. find the "add calendar" option
|
||||
6. choose "add by URL"
|
||||
7. paste iCalendar URL (ends with `.ics`)
|
||||
8. done
|
||||
|
||||
@@ -1,62 +0,0 @@
|
||||
apiVersion: flagger.app/v1beta1
|
||||
kind: Canary
|
||||
metadata:
|
||||
name: podinfo
|
||||
namespace: test
|
||||
spec:
|
||||
provider: appmesh
|
||||
progressDeadlineSeconds: 600
|
||||
targetRef:
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
name: podinfo
|
||||
autoscalerRef:
|
||||
apiVersion: autoscaling/v2
|
||||
kind: HorizontalPodAutoscaler
|
||||
name: podinfo
|
||||
service:
|
||||
port: 80
|
||||
targetPort: 9898
|
||||
meshName: global
|
||||
retries:
|
||||
attempts: 3
|
||||
perTryTimeout: 5s
|
||||
retryOn: "gateway-error,client-error,stream-error"
|
||||
timeout: 35s
|
||||
match:
|
||||
- uri:
|
||||
prefix: /
|
||||
rewrite:
|
||||
uri: /
|
||||
analysis:
|
||||
interval: 15s
|
||||
threshold: 10
|
||||
iterations: 10
|
||||
match:
|
||||
- headers:
|
||||
x-canary:
|
||||
exact: "insider"
|
||||
metrics:
|
||||
- name: request-success-rate
|
||||
thresholdRange:
|
||||
min: 99
|
||||
interval: 1m
|
||||
- name: request-duration
|
||||
thresholdRange:
|
||||
max: 500
|
||||
interval: 30s
|
||||
webhooks:
|
||||
- name: conformance-test
|
||||
type: pre-rollout
|
||||
url: http://flagger-loadtester.test/
|
||||
timeout: 15s
|
||||
metadata:
|
||||
type: "bash"
|
||||
cmd: "curl -sd 'test' http://podinfo-canary.test/token | grep token"
|
||||
- name: load-test
|
||||
type: rollout
|
||||
url: http://flagger-loadtester.test/
|
||||
timeout: 5s
|
||||
metadata:
|
||||
type: cmd
|
||||
cmd: "hey -z 1m -q 10 -c 2 -H 'X-Canary: insider' http://podinfo-canary.test/"
|
||||
@@ -1,59 +0,0 @@
|
||||
apiVersion: flagger.app/v1beta1
|
||||
kind: Canary
|
||||
metadata:
|
||||
name: podinfo
|
||||
namespace: test
|
||||
spec:
|
||||
provider: appmesh
|
||||
progressDeadlineSeconds: 600
|
||||
targetRef:
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
name: podinfo
|
||||
autoscalerRef:
|
||||
apiVersion: autoscaling/v2
|
||||
kind: HorizontalPodAutoscaler
|
||||
name: podinfo
|
||||
service:
|
||||
port: 80
|
||||
targetPort: http
|
||||
meshName: global
|
||||
retries:
|
||||
attempts: 3
|
||||
perTryTimeout: 5s
|
||||
retryOn: "gateway-error,client-error,stream-error"
|
||||
timeout: 35s
|
||||
match:
|
||||
- uri:
|
||||
prefix: /
|
||||
rewrite:
|
||||
uri: /
|
||||
analysis:
|
||||
interval: 15s
|
||||
threshold: 10
|
||||
maxWeight: 50
|
||||
stepWeight: 5
|
||||
metrics:
|
||||
- name: request-success-rate
|
||||
thresholdRange:
|
||||
min: 99
|
||||
interval: 1m
|
||||
- name: request-duration
|
||||
thresholdRange:
|
||||
max: 500
|
||||
interval: 30s
|
||||
webhooks:
|
||||
- name: conformance-test
|
||||
type: pre-rollout
|
||||
url: http://flagger-loadtester.test/
|
||||
timeout: 15s
|
||||
metadata:
|
||||
type: "bash"
|
||||
cmd: "curl -sd 'test' http://podinfo-canary.test/token | grep token"
|
||||
- name: load-test
|
||||
type: rollout
|
||||
url: http://flagger-loadtester.test/
|
||||
timeout: 5s
|
||||
metadata:
|
||||
type: cmd
|
||||
cmd: "hey -z 1m -q 10 -c 2 http://podinfo-canary.test/"
|
||||
@@ -1,70 +0,0 @@
|
||||
apiVersion: flagger.app/v1beta1
|
||||
kind: Canary
|
||||
metadata:
|
||||
name: podinfo
|
||||
namespace: test
|
||||
spec:
|
||||
provider: istio
|
||||
targetRef:
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
name: podinfo
|
||||
autoscalerRef:
|
||||
apiVersion: autoscaling/v2
|
||||
kind: HorizontalPodAutoscaler
|
||||
name: podinfo
|
||||
service:
|
||||
name: podinfo
|
||||
port: 80
|
||||
targetPort: 9898
|
||||
portName: http
|
||||
portDiscovery: true
|
||||
gateways:
|
||||
- istio-system/public-gateway
|
||||
- mesh
|
||||
hosts:
|
||||
- app.example.com
|
||||
trafficPolicy:
|
||||
tls:
|
||||
mode: DISABLE
|
||||
match:
|
||||
- uri:
|
||||
prefix: /
|
||||
rewrite:
|
||||
uri: /
|
||||
timeout: 30s
|
||||
analysis:
|
||||
interval: 15s
|
||||
threshold: 10
|
||||
iterations: 10
|
||||
match:
|
||||
- headers:
|
||||
cookie:
|
||||
regex: "^(.*?;)?(type=insider)(;.*)?$"
|
||||
- headers:
|
||||
user-agent:
|
||||
regex: ".*Firefox.*"
|
||||
metrics:
|
||||
- name: request-success-rate
|
||||
thresholdRange:
|
||||
min: 99
|
||||
interval: 1m
|
||||
- name: request-duration
|
||||
thresholdRange:
|
||||
max: 500
|
||||
interval: 30s
|
||||
webhooks:
|
||||
- name: conformance-test
|
||||
type: pre-rollout
|
||||
url: http://flagger-loadtester.test/
|
||||
timeout: 15s
|
||||
metadata:
|
||||
type: "bash"
|
||||
cmd: "curl -sd 'test' http://podinfo-canary.test/token | grep token"
|
||||
- name: load-test
|
||||
type: rollout
|
||||
url: http://flagger-loadtester.test/
|
||||
timeout: 5s
|
||||
metadata:
|
||||
type: cmd
|
||||
cmd: "hey -z 1m -q 10 -c 2 -H 'Cookie: type=insider' http://podinfo.test/"
|
||||
@@ -1,66 +0,0 @@
|
||||
apiVersion: flagger.app/v1beta1
|
||||
kind: Canary
|
||||
metadata:
|
||||
name: podinfo
|
||||
namespace: test
|
||||
spec:
|
||||
provider: istio
|
||||
progressDeadlineSeconds: 600
|
||||
targetRef:
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
name: podinfo
|
||||
autoscalerRef:
|
||||
apiVersion: autoscaling/v2
|
||||
kind: HorizontalPodAutoscaler
|
||||
name: podinfo
|
||||
service:
|
||||
name: podinfo
|
||||
port: 80
|
||||
targetPort: 9898
|
||||
portName: http
|
||||
portDiscovery: true
|
||||
gateways:
|
||||
- istio-system/public-gateway
|
||||
- mesh
|
||||
hosts:
|
||||
- app.example.com
|
||||
trafficPolicy:
|
||||
tls:
|
||||
mode: DISABLE
|
||||
match:
|
||||
- uri:
|
||||
prefix: /
|
||||
rewrite:
|
||||
uri: /
|
||||
timeout: 30s
|
||||
skipAnalysis: false
|
||||
analysis:
|
||||
interval: 15s
|
||||
threshold: 10
|
||||
maxWeight: 50
|
||||
stepWeight: 5
|
||||
metrics:
|
||||
- name: request-success-rate
|
||||
thresholdRange:
|
||||
min: 99
|
||||
interval: 1m
|
||||
- name: request-duration
|
||||
thresholdRange:
|
||||
max: 500
|
||||
interval: 30s
|
||||
webhooks:
|
||||
- name: conformance-test
|
||||
type: pre-rollout
|
||||
url: http://flagger-loadtester.test/
|
||||
timeout: 15s
|
||||
metadata:
|
||||
type: "bash"
|
||||
cmd: "curl -sd 'test' http://podinfo-canary.test/token | grep token"
|
||||
- name: load-test
|
||||
type: rollout
|
||||
url: http://flagger-loadtester.test/
|
||||
timeout: 5s
|
||||
metadata:
|
||||
type: cmd
|
||||
cmd: "hey -z 1m -q 10 -c 2 http://podinfo-canary.test/"
|
||||
@@ -1,50 +0,0 @@
|
||||
apiVersion: flagger.app/v1beta1
|
||||
kind: Canary
|
||||
metadata:
|
||||
name: podinfo
|
||||
namespace: test
|
||||
annotations:
|
||||
kuma.io/mesh: default
|
||||
spec:
|
||||
targetRef:
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
name: podinfo
|
||||
progressDeadlineSeconds: 60
|
||||
service:
|
||||
port: 9898
|
||||
targetPort: 9898
|
||||
apex:
|
||||
annotations:
|
||||
9898.service.kuma.io/protocol: "http"
|
||||
canary:
|
||||
annotations:
|
||||
9898.service.kuma.io/protocol: "http"
|
||||
primary:
|
||||
annotations:
|
||||
9898.service.kuma.io/protocol: "http"
|
||||
analysis:
|
||||
interval: 15s
|
||||
threshold: 15
|
||||
maxWeight: 50
|
||||
stepWeight: 10
|
||||
metrics:
|
||||
- name: request-success-rate
|
||||
threshold: 99
|
||||
interval: 1m
|
||||
- name: request-duration
|
||||
threshold: 500
|
||||
interval: 30s
|
||||
webhooks:
|
||||
- name: acceptance-test
|
||||
type: pre-rollout
|
||||
url: http://flagger-loadtester.test/
|
||||
timeout: 30s
|
||||
metadata:
|
||||
type: bash
|
||||
cmd: "curl -sd 'test' http://podinfo-canary.test:9898/token | grep token"
|
||||
- name: load-test
|
||||
type: rollout
|
||||
url: http://flagger-loadtester.test/
|
||||
metadata:
|
||||
cmd: "hey -z 2m -q 10 -c 2 http://podinfo-canary.test:9898/"
|
||||
@@ -1,51 +0,0 @@
|
||||
apiVersion: flagger.app/v1beta1
|
||||
kind: Canary
|
||||
metadata:
|
||||
name: podinfo
|
||||
namespace: test
|
||||
spec:
|
||||
provider: linkerd
|
||||
progressDeadlineSeconds: 600
|
||||
targetRef:
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
name: podinfo
|
||||
autoscalerRef:
|
||||
apiVersion: autoscaling/v2
|
||||
kind: HorizontalPodAutoscaler
|
||||
name: podinfo
|
||||
service:
|
||||
name: podinfo
|
||||
port: 80
|
||||
targetPort: 9898
|
||||
portName: http
|
||||
portDiscovery: true
|
||||
skipAnalysis: false
|
||||
analysis:
|
||||
interval: 15s
|
||||
threshold: 10
|
||||
stepWeights: [5, 10, 15, 20, 25, 30, 35, 40, 45, 50, 55]
|
||||
metrics:
|
||||
- name: request-success-rate
|
||||
thresholdRange:
|
||||
min: 99
|
||||
interval: 1m
|
||||
- name: request-duration
|
||||
thresholdRange:
|
||||
max: 500
|
||||
interval: 30s
|
||||
webhooks:
|
||||
- name: conformance-test
|
||||
type: pre-rollout
|
||||
url: http://flagger-loadtester.test/
|
||||
timeout: 15s
|
||||
metadata:
|
||||
type: "bash"
|
||||
cmd: "curl -sd 'test' http://podinfo-canary.test/token | grep token"
|
||||
- name: load-test
|
||||
type: rollout
|
||||
url: http://flagger-loadtester.test/
|
||||
timeout: 5s
|
||||
metadata:
|
||||
type: cmd
|
||||
cmd: "hey -z 1m -q 10 -c 2 http://podinfo-canary.test/"
|
||||
@@ -1,52 +0,0 @@
|
||||
apiVersion: flagger.app/v1beta1
|
||||
kind: Canary
|
||||
metadata:
|
||||
name: podinfo
|
||||
namespace: test
|
||||
spec:
|
||||
provider: linkerd
|
||||
progressDeadlineSeconds: 600
|
||||
targetRef:
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
name: podinfo
|
||||
autoscalerRef:
|
||||
apiVersion: autoscaling/v2
|
||||
kind: HorizontalPodAutoscaler
|
||||
name: podinfo
|
||||
service:
|
||||
name: podinfo
|
||||
port: 80
|
||||
targetPort: 9898
|
||||
portName: http
|
||||
portDiscovery: true
|
||||
skipAnalysis: false
|
||||
analysis:
|
||||
interval: 15s
|
||||
threshold: 10
|
||||
maxWeight: 50
|
||||
stepWeight: 5
|
||||
metrics:
|
||||
- name: request-success-rate
|
||||
thresholdRange:
|
||||
min: 99
|
||||
interval: 1m
|
||||
- name: request-duration
|
||||
thresholdRange:
|
||||
max: 500
|
||||
interval: 30s
|
||||
webhooks:
|
||||
- name: conformance-test
|
||||
type: pre-rollout
|
||||
url: http://flagger-loadtester.test/
|
||||
timeout: 15s
|
||||
metadata:
|
||||
type: "bash"
|
||||
cmd: "curl -sd 'test' http://podinfo-canary.test/token | grep token"
|
||||
- name: load-test
|
||||
type: rollout
|
||||
url: http://flagger-loadtester.test/
|
||||
timeout: 5s
|
||||
metadata:
|
||||
type: cmd
|
||||
cmd: "hey -z 1m -q 10 -c 2 http://podinfo-canary.test/"
|
||||
@@ -1,42 +0,0 @@
|
||||
apiVersion: flagger.app/v1beta1
|
||||
kind: Canary
|
||||
metadata:
|
||||
name: podinfo
|
||||
namespace: test
|
||||
spec:
|
||||
provider: osm
|
||||
targetRef:
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
name: podinfo
|
||||
progressDeadlineSeconds: 600
|
||||
service:
|
||||
port: 9898
|
||||
targetPort: 9898
|
||||
analysis:
|
||||
interval: 15s
|
||||
threshold: 10
|
||||
stepWeights: [5, 10, 15, 20, 25, 30, 35, 40, 45, 50, 55]
|
||||
metrics:
|
||||
- name: request-success-rate
|
||||
thresholdRange:
|
||||
min: 99
|
||||
interval: 1m
|
||||
- name: request-duration
|
||||
thresholdRange:
|
||||
max: 500
|
||||
interval: 30s
|
||||
webhooks:
|
||||
- name: acceptance-test
|
||||
type: pre-rollout
|
||||
url: http://flagger-loadtester.test/
|
||||
timeout: 15s
|
||||
metadata:
|
||||
type: bash
|
||||
cmd: "curl -sd 'test' http://podinfo-canary.test:9898/token | grep token"
|
||||
- name: load-test
|
||||
type: rollout
|
||||
url: http://flagger-loadtester.test/
|
||||
timeout: 5s
|
||||
metadata:
|
||||
cmd: "hey -z 1m -q 10 -c 2 http://podinfo-canary.test:9898/"
|
||||
@@ -1,43 +0,0 @@
|
||||
apiVersion: flagger.app/v1beta1
|
||||
kind: Canary
|
||||
metadata:
|
||||
name: podinfo
|
||||
namespace: test
|
||||
spec:
|
||||
provider: osm
|
||||
targetRef:
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
name: podinfo
|
||||
progressDeadlineSeconds: 600
|
||||
service:
|
||||
port: 9898
|
||||
targetPort: 9898
|
||||
analysis:
|
||||
interval: 15s
|
||||
threshold: 10
|
||||
maxWeight: 50
|
||||
stepWeight: 5
|
||||
metrics:
|
||||
- name: request-success-rate
|
||||
thresholdRange:
|
||||
min: 99
|
||||
interval: 1m
|
||||
- name: request-duration
|
||||
thresholdRange:
|
||||
max: 500
|
||||
interval: 30s
|
||||
webhooks:
|
||||
- name: acceptance-test
|
||||
type: pre-rollout
|
||||
url: http://flagger-loadtester.test/
|
||||
timeout: 15s
|
||||
metadata:
|
||||
type: bash
|
||||
cmd: "curl -sd 'test' http://podinfo-canary.test:9898/token | grep token"
|
||||
- name: load-test
|
||||
type: rollout
|
||||
url: http://flagger-loadtester.test/
|
||||
timeout: 5s
|
||||
metadata:
|
||||
cmd: "hey -z 1m -q 10 -c 2 http://podinfo-canary.test:9898/"
|
||||
@@ -1,272 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: flagger
|
||||
namespace: default
|
||||
labels:
|
||||
app: flagger
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: flagger
|
||||
labels:
|
||||
app: flagger
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- events
|
||||
- configmaps
|
||||
- configmaps/finalizers
|
||||
- secrets
|
||||
- secrets/finalizers
|
||||
- services
|
||||
- services/finalizers
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- create
|
||||
- update
|
||||
- patch
|
||||
- delete
|
||||
- apiGroups:
|
||||
- "coordination.k8s.io"
|
||||
resources:
|
||||
- leases
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- create
|
||||
- update
|
||||
- patch
|
||||
- delete
|
||||
- apiGroups:
|
||||
- apps
|
||||
resources:
|
||||
- daemonsets
|
||||
- daemonsets/finalizers
|
||||
- deployments
|
||||
- deployments/finalizers
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- create
|
||||
- update
|
||||
- patch
|
||||
- delete
|
||||
- apiGroups:
|
||||
- autoscaling
|
||||
resources:
|
||||
- horizontalpodautoscalers
|
||||
- horizontalpodautoscalers/finalizers
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- create
|
||||
- update
|
||||
- patch
|
||||
- delete
|
||||
- apiGroups:
|
||||
- extensions
|
||||
- networking.k8s.io
|
||||
resources:
|
||||
- ingresses
|
||||
- ingresses/finalizers
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- create
|
||||
- update
|
||||
- patch
|
||||
- delete
|
||||
- apiGroups:
|
||||
- flagger.app
|
||||
resources:
|
||||
- canaries
|
||||
- canaries/status
|
||||
- canaries/finalizers
|
||||
- metrictemplates
|
||||
- metrictemplates/status
|
||||
- alertproviders
|
||||
- alertproviders/status
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- create
|
||||
- update
|
||||
- patch
|
||||
- delete
|
||||
- apiGroups:
|
||||
- networking.istio.io
|
||||
resources:
|
||||
- virtualservices
|
||||
- virtualservices/finalizers
|
||||
- destinationrules
|
||||
- destinationrules/finalizers
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- create
|
||||
- update
|
||||
- patch
|
||||
- delete
|
||||
- apiGroups:
|
||||
- appmesh.k8s.aws
|
||||
resources:
|
||||
- virtualnodes
|
||||
- virtualnodes/finalizers
|
||||
- virtualrouters
|
||||
- virtualrouters/finalizers
|
||||
- virtualservices
|
||||
- virtualservices/finalizers
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- create
|
||||
- update
|
||||
- patch
|
||||
- delete
|
||||
- apiGroups:
|
||||
- split.smi-spec.io
|
||||
resources:
|
||||
- trafficsplits
|
||||
- trafficsplits/finalizers
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- create
|
||||
- update
|
||||
- patch
|
||||
- delete
|
||||
- apiGroups:
|
||||
- specs.smi-spec.io
|
||||
resources:
|
||||
- httproutegroups
|
||||
- httproutegroups/finalizers
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- create
|
||||
- update
|
||||
- patch
|
||||
- delete
|
||||
- apiGroups:
|
||||
- gloo.solo.io
|
||||
resources:
|
||||
- upstreams
|
||||
- upstreams/finalizers
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- create
|
||||
- update
|
||||
- patch
|
||||
- delete
|
||||
- apiGroups:
|
||||
- gateway.solo.io
|
||||
resources:
|
||||
- routetables
|
||||
- routetables/finalizers
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- create
|
||||
- update
|
||||
- patch
|
||||
- delete
|
||||
- apiGroups:
|
||||
- projectcontour.io
|
||||
resources:
|
||||
- httpproxies
|
||||
- httpproxies/finalizers
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- create
|
||||
- update
|
||||
- patch
|
||||
- delete
|
||||
- apiGroups:
|
||||
- kuma.io
|
||||
resources:
|
||||
- trafficroutes
|
||||
- trafficroutes/finalizers
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- create
|
||||
- update
|
||||
- patch
|
||||
- delete
|
||||
- apiGroups:
|
||||
- gateway.networking.k8s.io
|
||||
resources:
|
||||
- httproutes
|
||||
- httproutes/finalizers
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- create
|
||||
- update
|
||||
- patch
|
||||
- delete
|
||||
- apiGroups:
|
||||
- keda.sh
|
||||
resources:
|
||||
- scaledobjects
|
||||
- scaledobjects/finalizers
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- create
|
||||
- update
|
||||
- patch
|
||||
- delete
|
||||
- apiGroups:
|
||||
- apisix.apache.org
|
||||
resources:
|
||||
- apisixroutes
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- create
|
||||
- update
|
||||
- patch
|
||||
- delete
|
||||
- nonResourceURLs:
|
||||
- /version
|
||||
verbs:
|
||||
- get
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: flagger
|
||||
labels:
|
||||
app: flagger
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: flagger
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: flagger
|
||||
namespace: default
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,62 +0,0 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: flagger
|
||||
namespace: default
|
||||
labels:
|
||||
app: flagger
|
||||
spec:
|
||||
replicas: 1
|
||||
strategy:
|
||||
type: Recreate
|
||||
selector:
|
||||
matchLabels:
|
||||
app: flagger
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: flagger
|
||||
annotations:
|
||||
prometheus.io/scrape: "true"
|
||||
spec:
|
||||
serviceAccountName: flagger
|
||||
containers:
|
||||
- name: flagger
|
||||
image: ghcr.io/fluxcd/flagger:1.38.0
|
||||
imagePullPolicy: IfNotPresent
|
||||
ports:
|
||||
- name: http
|
||||
containerPort: 8080
|
||||
command:
|
||||
- ./flagger
|
||||
- -log-level=info
|
||||
livenessProbe:
|
||||
exec:
|
||||
command:
|
||||
- wget
|
||||
- --quiet
|
||||
- --tries=1
|
||||
- --timeout=2
|
||||
- --spider
|
||||
- http://localhost:8080/healthz
|
||||
timeoutSeconds: 5
|
||||
readinessProbe:
|
||||
exec:
|
||||
command:
|
||||
- wget
|
||||
- --quiet
|
||||
- --tries=1
|
||||
- --timeout=2
|
||||
- --spider
|
||||
- http://localhost:8080/healthz
|
||||
timeoutSeconds: 5
|
||||
resources:
|
||||
limits:
|
||||
memory: "512Mi"
|
||||
cpu: "1000m"
|
||||
requests:
|
||||
memory: "32Mi"
|
||||
cpu: "10m"
|
||||
securityContext:
|
||||
readOnlyRootFilesystem: true
|
||||
runAsUser: 10001
|
||||
@@ -1,21 +0,0 @@
|
||||
# Patterns to ignore when building packages.
|
||||
# This supports shell glob matching, relative path matching, and
|
||||
# negation (prefixed with !). Only one pattern per line.
|
||||
.DS_Store
|
||||
# Common VCS dirs
|
||||
.git/
|
||||
.gitignore
|
||||
.bzr/
|
||||
.bzrignore
|
||||
.hg/
|
||||
.hgignore
|
||||
.svn/
|
||||
# Common backup files
|
||||
*.swp
|
||||
*.bak
|
||||
*.tmp
|
||||
*~
|
||||
# Various IDEs
|
||||
.project
|
||||
.idea/
|
||||
*.tmproj
|
||||
@@ -1,29 +0,0 @@
|
||||
apiVersion: v1
|
||||
name: flagger
|
||||
version: 1.38.0
|
||||
appVersion: 1.38.0
|
||||
kubeVersion: ">=1.19.0-0"
|
||||
engine: gotpl
|
||||
description: Flagger is a progressive delivery operator for Kubernetes
|
||||
home: https://flagger.app
|
||||
icon: https://raw.githubusercontent.com/fluxcd/flagger/main/docs/logo/flagger-icon.png
|
||||
sources:
|
||||
- https://github.com/fluxcd/flagger
|
||||
maintainers:
|
||||
- name: stefanprodan
|
||||
url: https://github.com/stefanprodan
|
||||
email: stefanprodan@users.noreply.github.com
|
||||
keywords:
|
||||
- flagger
|
||||
- istio
|
||||
- appmesh
|
||||
- linkerd
|
||||
- kuma
|
||||
- osm
|
||||
- smi
|
||||
- gloo
|
||||
- contour
|
||||
- nginx
|
||||
- traefik
|
||||
- gitops
|
||||
- canary
|
||||
@@ -1,201 +0,0 @@
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
@@ -1,207 +0,0 @@
|
||||
# Flagger
|
||||
|
||||
[Flagger](https://github.com/fluxcd/flagger) is a progressive delivery tool that automates the release process
|
||||
for applications running on Kubernetes. It reduces the risk of introducing a new software version in production
|
||||
by gradually shifting traffic to the new version while measuring metrics and running conformance tests.
|
||||
|
||||
Flagger implements several deployment strategies (Canary releases, A/B testing, Blue/Green mirroring)
|
||||
and integrates with various Kubernetes ingress controllers, service mesh and monitoring solutions.
|
||||
|
||||
Flagger is a [Cloud Native Computing Foundation](https://cncf.io/) project
|
||||
and part of [Flux](https://fluxcd.io) family of GitOps tools.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
* Kubernetes >= 1.19
|
||||
|
||||
## Installing the Chart
|
||||
|
||||
Add Flagger Helm repository:
|
||||
|
||||
```console
|
||||
$ helm repo add flagger https://flagger.app
|
||||
```
|
||||
|
||||
Install Flagger's custom resource definitions:
|
||||
|
||||
```console
|
||||
$ kubectl apply -f https://raw.githubusercontent.com/fluxcd/flagger/main/artifacts/flagger/crd.yaml
|
||||
```
|
||||
|
||||
To install Flagger for **Istio**:
|
||||
|
||||
```console
|
||||
$ helm upgrade -i flagger flagger/flagger \
|
||||
--namespace=istio-system \
|
||||
--set meshProvider=istio \
|
||||
--set metricsServer=http://prometheus:9090
|
||||
```
|
||||
|
||||
To install Flagger for **Linkerd** (requires Linkerd Viz extension):
|
||||
|
||||
```console
|
||||
# Note that linkerdAuthPolicy.create=true is only required for Linkerd 2.12 and
|
||||
# later
|
||||
$ helm upgrade -i flagger flagger/flagger \
|
||||
--namespace=flagger-system \
|
||||
--set meshProvider=linkerd \
|
||||
--set metricsServer=http://prometheus.linkerd-viz:9090 \
|
||||
--set linkerdAuthPolicy.create=true
|
||||
```
|
||||
|
||||
To install Flagger for **AWS App Mesh**:
|
||||
|
||||
```console
|
||||
$ helm upgrade -i flagger flagger/flagger \
|
||||
--namespace=appmesh-system \
|
||||
--set meshProvider=appmesh:v1beta2 \
|
||||
--set metricsServer=http://appmesh-prometheus:9090
|
||||
```
|
||||
|
||||
|
||||
To install Flagger for **Open Service Mesh** (requires OSM to have been installed with Prometheus):
|
||||
|
||||
```console
|
||||
$ helm upgrade -i flagger flagger/flagger \
|
||||
--namespace=osm-system \
|
||||
--set meshProvider=osm \
|
||||
--set metricsServer=http://osm-prometheus.osm-system.svc:7070
|
||||
```
|
||||
|
||||
To install Flagger for **Kuma Service Mesh** (requires Kuma to have been installed with Prometheus):
|
||||
|
||||
```console
|
||||
$ helm upgrade -i flagger flagger/flagger \
|
||||
--namespace=kuma-system \
|
||||
--set meshProvider=kuma \
|
||||
--set metricsServer=http://prometheus-server.kuma-metrics:80
|
||||
```
|
||||
|
||||
To install Flagger and Prometheus for **NGINX** Ingress (requires controller metrics enabled):
|
||||
|
||||
```console
|
||||
$ helm upgrade -i flagger flagger/flagger \
|
||||
--namespace=ingress-nginx \
|
||||
--set meshProvider=nginx \
|
||||
--set prometheus.install=true
|
||||
```
|
||||
|
||||
To install Flagger and Prometheus for **Gloo** (no longer requires Gloo discovery):
|
||||
|
||||
```console
|
||||
$ helm upgrade -i flagger flagger/flagger \
|
||||
--namespace=gloo-system \
|
||||
--set meshProvider=gloo \
|
||||
--set prometheus.install=true
|
||||
```
|
||||
|
||||
To install Flagger and Prometheus for **Contour**:
|
||||
|
||||
```console
|
||||
$ helm upgrade -i flagger flagger/flagger \
|
||||
--namespace=projectcontour \
|
||||
--set meshProvider=contour \
|
||||
--set ingressClass=contour \
|
||||
--set prometheus.install=true
|
||||
```
|
||||
|
||||
To install Flagger and Prometheus for **Traefik**:
|
||||
|
||||
```console
|
||||
$ helm upgrade -i flagger flagger/flagger \
|
||||
--namespace=traefik \
|
||||
--set prometheus.install=true \
|
||||
--set meshProvider=traefik
|
||||
```
|
||||
|
||||
The [configuration](#configuration) section lists the parameters that can be configured during installation.
|
||||
|
||||
## Uninstalling the Chart
|
||||
|
||||
To uninstall/delete the `flagger` deployment:
|
||||
|
||||
```console
|
||||
$ helm delete flagger
|
||||
```
|
||||
|
||||
The command removes all the Kubernetes components associated with the chart and deletes the release.
|
||||
|
||||
## Configuration
|
||||
|
||||
The following tables lists the configurable parameters of the Flagger chart and their default values.
|
||||
|
||||
| Parameter | Description | Default |
|
||||
|--------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------|---------------------------------------|
|
||||
| `image.repository` | Image repository | `ghcr.io/fluxcd/flagger` |
|
||||
| `image.tag` | Image tag | `<VERSION>` |
|
||||
| `image.pullPolicy` | Image pull policy | `IfNotPresent` |
|
||||
| `logLevel` | Log level | `info` |
|
||||
| `metricsServer` | Prometheus URL, used when `prometheus.install` is `false` | `http://prometheus.istio-system:9090` |
|
||||
| `prometheus.install` | If `true`, installs Prometheus configured to scrape all pods in the custer | `false` |
|
||||
| `prometheus.retention` | Prometheus data retention | `2h` |
|
||||
| `selectorLabels` | List of labels that Flagger uses to create pod selectors | `app,name,app.kubernetes.io/name` |
|
||||
| `serviceMonitor.enabled` | If `true`, creates service and serviceMonitor for monitoring Flagger metrics | `false` |
|
||||
| `serviceMonitor.honorLabels` | If `true`, label conflicts are resolved by keeping label values from the scraped data and ignoring the conflicting server-side labels | `false` |
|
||||
| `serviceMonitor.namespace` | Namespace Servicemonitor is installed in | the same namespace |
|
||||
| `serviceMonitor.labels` | labels for the ServiceMonitor passed to Prometheus Operator | `{}` |
|
||||
| `configTracking.enabled` | If `true`, flagger will track changes in Secrets and ConfigMaps referenced in the target deployment | `true` |
|
||||
| `eventWebhook` | If set, Flagger will publish events to the given webhook | None |
|
||||
| `slack.url` | Slack incoming webhook | None |
|
||||
| `slack.proxyUrl` | Slack proxy url | None |
|
||||
| `slack.channel` | Slack channel | None |
|
||||
| `slack.user` | Slack username | `flagger` |
|
||||
| `msteams.url` | Microsoft Teams incoming webhook | None |
|
||||
| `msteams.proxyUrl` | Microsoft Teams proxy url | None |
|
||||
| `clusterName` | When specified, Flagger will add the cluster name to alerts | `""` |
|
||||
| `podMonitor.enabled` | If `true`, create a PodMonitor for [monitoring the metrics](https://docs.flagger.app/usage/monitoring#metrics) | `false` |
|
||||
| `podMonitor.namespace` | Namespace where the PodMonitor is created | the same namespace |
|
||||
| `podMonitor.interval` | Interval at which metrics should be scraped | `15s` |
|
||||
| `podMonitor.podMonitor` | Additional labels to add to the PodMonitor | `{}` |
|
||||
| `podMonitor.honorLabels` | If `true`, label conflicts are resolved by keeping label values from the scraped data and ignoring the conflicting server-side labels | `false` |
|
||||
| `leaderElection.enabled` | If `true`, Flagger will run in HA mode | `false` |
|
||||
| `leaderElection.replicaCount` | Number of replicas | `1` |
|
||||
| `serviceAccount.create` | If `true`, Flagger will create service account | `true` |
|
||||
| `serviceAccount.name` | The name of the service account to create or use. If not set and `serviceAccount.create` is `true`, a name is generated using the Flagger fullname | `""` |
|
||||
| `serviceAccount.annotations` | Annotations for service account | `{}` |
|
||||
| `ingressAnnotationsPrefix` | Annotations prefix for ingresses | `custom.ingress.kubernetes.io` |
|
||||
| `includeLabelPrefix` | List of prefixes of labels that are copied when creating primary deployments or daemonsets. Use * to include all | `""` |
|
||||
| `rbac.create` | If `true`, create and use RBAC resources | `true` |
|
||||
| `rbac.pspEnabled` | If `true`, create and use a restricted pod security policy | `false` |
|
||||
| `crd.create` | If `true`, create Flagger's CRDs (should be enabled for Helm v2 only) | `false` |
|
||||
| `resources.requests/cpu` | Pod CPU request | `10m` |
|
||||
| `resources.requests/memory` | Pod memory request | `32Mi` |
|
||||
| `resources.limits/cpu` | Pod CPU limit | `1000m` |
|
||||
| `resources.limits/memory` | Pod memory limit | `512Mi` |
|
||||
| `affinity` | Node/pod affinities | prefer spread across hosts |
|
||||
| `nodeSelector` | Node labels for pod assignment | `{}` |
|
||||
| `threadiness` | Number of controller workers | `2` |
|
||||
| `tolerations` | List of node taints to tolerate | `[]` |
|
||||
| `controlplane.kubeconfig.secretName` | The name of the Kubernetes secret containing the service mesh control plane kubeconfig | None |
|
||||
| `controlplane.kubeconfig.key` | The name of Kubernetes secret data key that contains the service mesh control plane kubeconfig | `kubeconfig` |
|
||||
| `ingressAnnotationsPrefix` | Annotations prefix for NGINX ingresses | None |
|
||||
| `ingressClass` | Ingress class used for annotating HTTPProxy objects, e.g. `contour` | None |
|
||||
| `podPriorityClassName` | PriorityClass name for pod priority configuration | "" |
|
||||
| `podDisruptionBudget.enabled` | A PodDisruptionBudget will be created if `true` | `false` |
|
||||
| `podDisruptionBudget.minAvailable` | The minimal number of available replicas that will be set in the PodDisruptionBudget | `1` |
|
||||
| `podDisruptionBudget.minAvailable` | The minimal number of available replicas that will be set in the PodDisruptionBudget | `1` |
|
||||
| `noCrossNamespaceRefs` | If `true`, cross namespace references to custom resources will be disabled | `false` |
|
||||
| `namespace` | When specified, Flagger will restrict itself to watching Canary objects from that namespace | `""` |
|
||||
|
||||
Specify each parameter using the `--set key=value[,key=value]` argument to `helm upgrade`. For example,
|
||||
|
||||
```console
|
||||
$ helm upgrade -i flagger flagger/flagger \
|
||||
--namespace flagger-system \
|
||||
--set slack.url=https://hooks.slack.com/services/YOUR/SLACK/WEBHOOK \
|
||||
--set slack.channel=general
|
||||
```
|
||||
|
||||
Alternatively, a YAML file that specifies the values for the above parameters can be provided while installing the chart. For example,
|
||||
|
||||
```console
|
||||
$ helm upgrade -i flagger flagger/flagger \
|
||||
--namespace istio-system \
|
||||
-f values.yaml
|
||||
```
|
||||
|
||||
> **Tip**: You can use the default [values.yaml](values.yaml)
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1 +0,0 @@
|
||||
Flagger installed
|
||||
@@ -1,42 +0,0 @@
|
||||
{{/*
|
||||
Create chart name and version as used by the chart label.
|
||||
*/}}
|
||||
{{- define "flagger.chart" -}}
|
||||
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Expand the name of the chart.
|
||||
*/}}
|
||||
{{- define "flagger.name" -}}
|
||||
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Create a default fully qualified app name.
|
||||
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
|
||||
If release name contains chart name it will be used as a full name.
|
||||
*/}}
|
||||
{{- define "flagger.fullname" -}}
|
||||
{{- if .Values.fullnameOverride -}}
|
||||
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
|
||||
{{- else -}}
|
||||
{{- $name := default .Chart.Name .Values.nameOverride -}}
|
||||
{{- if contains $name .Release.Name -}}
|
||||
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
|
||||
{{- else -}}
|
||||
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Create the name of the service account to use
|
||||
*/}}
|
||||
{{- define "flagger.serviceAccountName" -}}
|
||||
{{- if .Values.serviceAccount.create -}}
|
||||
{{ default (include "flagger.fullname" .) .Values.serviceAccount.name }}
|
||||
{{- else -}}
|
||||
{{ default "default" .Values.serviceAccount.name }}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
@@ -1,16 +0,0 @@
|
||||
{{- if .Values.serviceAccount.create }}
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: {{ template "flagger.serviceAccountName" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
{{- if .Values.serviceAccount.annotations }}
|
||||
annotations:
|
||||
{{ toYaml .Values.serviceAccount.annotations | indent 4 }}
|
||||
{{- end }}
|
||||
labels:
|
||||
helm.sh/chart: {{ template "flagger.chart" . }}
|
||||
app.kubernetes.io/name: {{ template "flagger.name" . }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
{{- end }}
|
||||
@@ -1,16 +0,0 @@
|
||||
{{- if .Values.linkerdAuthPolicy.create }}
|
||||
apiVersion: policy.linkerd.io/v1alpha1
|
||||
kind: AuthorizationPolicy
|
||||
metadata:
|
||||
namespace: {{ .Values.linkerdAuthPolicy.namespace }}
|
||||
name: prometheus-admin-flagger
|
||||
spec:
|
||||
targetRef:
|
||||
group: policy.linkerd.io
|
||||
kind: Server
|
||||
name: prometheus-admin
|
||||
requiredAuthenticationRefs:
|
||||
- kind: ServiceAccount
|
||||
name: {{ template "flagger.serviceAccountName" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
{{- end }}
|
||||
@@ -1,6 +0,0 @@
|
||||
{{- if .Values.crd.create -}}
|
||||
{{- range $path, $bytes := .Files.Glob "crds/*.yaml" -}}
|
||||
{{ $.Files.Get $path }}
|
||||
---
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
@@ -1,184 +0,0 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: {{ include "flagger.fullname" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels:
|
||||
helm.sh/chart: {{ template "flagger.chart" . }}
|
||||
app.kubernetes.io/name: {{ template "flagger.name" . }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
app.kubernetes.io/version: {{ .Chart.AppVersion }}
|
||||
{{- with .Values.annotations }}
|
||||
annotations:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
replicas: {{ .Values.leaderElection.replicaCount }}
|
||||
{{- if eq .Values.leaderElection.enabled false }}
|
||||
strategy:
|
||||
type: Recreate
|
||||
{{- end }}
|
||||
selector:
|
||||
matchLabels:
|
||||
app.kubernetes.io/name: {{ template "flagger.name" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/name: {{ template "flagger.name" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
app.kubernetes.io/version: {{ .Chart.AppVersion }}
|
||||
{{- if .Values.podLabels }}
|
||||
{{- range $key, $value := .Values.podLabels }}
|
||||
{{ $key }}: {{ $value | quote }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
annotations:
|
||||
{{- if .Values.podAnnotations }}
|
||||
{{ toYaml .Values.podAnnotations | indent 8 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
serviceAccountName: {{ template "flagger.serviceAccountName" . }}
|
||||
{{- if .Values.affinity }}
|
||||
affinity:
|
||||
{{- tpl (toYaml .Values.affinity) . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- if .Values.image.pullSecret }}
|
||||
imagePullSecrets:
|
||||
- name: {{ .Values.image.pullSecret }}
|
||||
{{- end }}
|
||||
{{- if .Values.controlplane.kubeconfig.secretName }}
|
||||
volumes:
|
||||
- name: kubeconfig
|
||||
secret:
|
||||
secretName: "{{ .Values.controlplane.kubeconfig.secretName }}"
|
||||
{{- end }}
|
||||
{{- if .Values.additionalVolumes }}
|
||||
{{- toYaml .Values.additionalVolumes | nindent 8 -}}
|
||||
{{- end }}
|
||||
{{- if .Values.podPriorityClassName }}
|
||||
priorityClassName: {{ .Values.podPriorityClassName }}
|
||||
{{- end }}
|
||||
containers:
|
||||
- name: flagger
|
||||
{{- if .Values.securityContext.enabled }}
|
||||
securityContext:
|
||||
{{ toYaml .Values.securityContext.context | indent 12 }}
|
||||
{{- end }}
|
||||
{{- if .Values.controlplane.kubeconfig.secretName }}
|
||||
volumeMounts:
|
||||
- name: kubeconfig
|
||||
mountPath: "/tmp/controlplane"
|
||||
{{- end }}
|
||||
image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
|
||||
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
||||
ports:
|
||||
- name: http
|
||||
containerPort: 8080
|
||||
command:
|
||||
- ./flagger
|
||||
- -log-level={{ .Values.logLevel }}
|
||||
{{- if .Values.meshProvider }}
|
||||
- -mesh-provider={{ .Values.meshProvider }}
|
||||
{{- end }}
|
||||
{{- if .Values.prometheus.install }}
|
||||
- -metrics-server=http://{{ template "flagger.fullname" . }}-prometheus:9090
|
||||
{{- else }}
|
||||
- -metrics-server={{ .Values.metricsServer }}
|
||||
{{- end }}
|
||||
{{- if .Values.selectorLabels }}
|
||||
- -selector-labels={{ .Values.selectorLabels }}
|
||||
{{- end }}
|
||||
{{- if .Values.configTracking }}
|
||||
- -enable-config-tracking={{ .Values.configTracking.enabled }}
|
||||
{{- end }}
|
||||
{{- if .Values.namespace }}
|
||||
- -namespace={{ .Values.namespace }}
|
||||
{{- end }}
|
||||
{{- if .Values.slack.url }}
|
||||
- -slack-url={{ .Values.slack.url }}
|
||||
{{- end }}
|
||||
{{- if .Values.slack.proxyUrl }}
|
||||
- -slack-proxy-url={{ .Values.slack.proxyUrl }}
|
||||
{{- end }}
|
||||
{{- if .Values.slack.user }}
|
||||
- -slack-user={{ .Values.slack.user }}
|
||||
{{- end }}
|
||||
{{- if .Values.slack.channel }}
|
||||
- -slack-channel={{ .Values.slack.channel }}
|
||||
{{- end }}
|
||||
{{- if .Values.msteams.url }}
|
||||
- -msteams-url={{ .Values.msteams.url }}
|
||||
{{- end }}
|
||||
{{- if .Values.msteams.proxyUrl }}
|
||||
- -msteams-proxy-url={{ .Values.msteams.proxyUrl }}
|
||||
{{- end }}
|
||||
{{- if .Values.leaderElection.enabled }}
|
||||
- -enable-leader-election=true
|
||||
- -leader-election-namespace={{ .Release.Namespace }}
|
||||
{{- end }}
|
||||
{{- if .Values.ingressAnnotationsPrefix }}
|
||||
- -ingress-annotations-prefix={{ .Values.ingressAnnotationsPrefix }}
|
||||
{{- end }}
|
||||
{{- if .Values.includeLabelPrefix }}
|
||||
- -include-label-prefix={{ .Values.includeLabelPrefix }}
|
||||
{{- end }}
|
||||
{{- if .Values.ingressClass }}
|
||||
- -ingress-class={{ .Values.ingressClass }}
|
||||
{{- end }}
|
||||
{{- if .Values.eventWebhook }}
|
||||
- -event-webhook={{ .Values.eventWebhook }}
|
||||
{{- end }}
|
||||
{{- if .Values.kubeconfigQPS }}
|
||||
- -kubeconfig-qps={{ .Values.kubeconfigQPS }}
|
||||
{{- end }}
|
||||
{{- if .Values.kubeconfigBurst }}
|
||||
- -kubeconfig-burst={{ .Values.kubeconfigBurst }}
|
||||
{{- end }}
|
||||
{{- if .Values.controlplane.kubeconfig.secretName }}
|
||||
- -kubeconfig-service-mesh=/tmp/controlplane/{{ .Values.controlplane.kubeconfig.key }}
|
||||
{{- end }}
|
||||
{{- if .Values.threadiness }}
|
||||
- -threadiness={{ .Values.threadiness }}
|
||||
{{- end }}
|
||||
{{- if .Values.clusterName }}
|
||||
- -cluster-name={{ .Values.clusterName }}
|
||||
{{- end }}
|
||||
{{- if .Values.noCrossNamespaceRefs }}
|
||||
- -no-cross-namespace-refs={{ .Values.noCrossNamespaceRefs }}
|
||||
{{- end }}
|
||||
livenessProbe:
|
||||
exec:
|
||||
command:
|
||||
- wget
|
||||
- --quiet
|
||||
- --tries=1
|
||||
- --timeout=4
|
||||
- --spider
|
||||
- http://localhost:8080/healthz
|
||||
timeoutSeconds: 5
|
||||
readinessProbe:
|
||||
exec:
|
||||
command:
|
||||
- wget
|
||||
- --quiet
|
||||
- --tries=1
|
||||
- --timeout=4
|
||||
- --spider
|
||||
- http://localhost:8080/healthz
|
||||
timeoutSeconds: 5
|
||||
{{- if .Values.env }}
|
||||
env:
|
||||
{{ toYaml .Values.env | indent 12 }}
|
||||
{{- end }}
|
||||
resources:
|
||||
{{ toYaml .Values.resources | indent 12 }}
|
||||
{{- with .Values.nodeSelector }}
|
||||
nodeSelector:
|
||||
{{ toYaml . | indent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.tolerations }}
|
||||
tolerations:
|
||||
{{ toYaml . | indent 8 }}
|
||||
{{- end }}
|
||||
@@ -1,16 +0,0 @@
|
||||
{{- if .Values.podDisruptionBudget.enabled }}
|
||||
{{- if .Capabilities.APIVersions.Has "policy/v1/PodDisruptionBudget" -}}
|
||||
apiVersion: policy/v1
|
||||
{{- else }}
|
||||
apiVersion: policy/v1beta1
|
||||
{{- end }}
|
||||
kind: PodDisruptionBudget
|
||||
metadata:
|
||||
name: {{ template "flagger.name" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
spec:
|
||||
minAvailable: {{ .Values.podDisruptionBudget.minAvailable }}
|
||||
selector:
|
||||
matchLabels:
|
||||
app.kubernetes.io/name: {{ template "flagger.name" . }}
|
||||
{{- end }}
|
||||
@@ -1,28 +0,0 @@
|
||||
{{- if .Values.podMonitor.enabled }}
|
||||
apiVersion: monitoring.coreos.com/v1
|
||||
kind: PodMonitor
|
||||
metadata:
|
||||
labels:
|
||||
helm.sh/chart: {{ template "flagger.chart" . }}
|
||||
app.kubernetes.io/name: {{ template "flagger.name" . }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
{{- range $k, $v := .Values.podMonitor.additionalLabels }}
|
||||
{{ $k }}: {{ $v | quote }}
|
||||
{{- end }}
|
||||
name: {{ include "flagger.fullname" . }}
|
||||
namespace: {{ .Values.podMonitor.namespace | default .Release.Namespace }}
|
||||
spec:
|
||||
podMetricsEndpoints:
|
||||
- interval: {{ .Values.podMonitor.interval }}
|
||||
path: /metrics
|
||||
port: http
|
||||
honorLabels: {{ .Values.podMonitor.honorLabels }}
|
||||
namespaceSelector:
|
||||
matchNames:
|
||||
- {{ .Release.Namespace }}
|
||||
selector:
|
||||
matchLabels:
|
||||
app.kubernetes.io/name: {{ template "flagger.name" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
{{- end }}
|
||||
@@ -1,291 +0,0 @@
|
||||
{{- if .Values.prometheus.install }}
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: {{ template "flagger.fullname" . }}-prometheus
|
||||
labels:
|
||||
helm.sh/chart: {{ template "flagger.chart" . }}
|
||||
app.kubernetes.io/name: {{ template "flagger.name" . }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources:
|
||||
- nodes
|
||||
- services
|
||||
- endpoints
|
||||
- pods
|
||||
- nodes/proxy
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups: [""]
|
||||
resources:
|
||||
- configmaps
|
||||
verbs: ["get"]
|
||||
- nonResourceURLs: ["/metrics"]
|
||||
verbs: ["get"]
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: {{ template "flagger.fullname" . }}-prometheus
|
||||
labels:
|
||||
helm.sh/chart: {{ template "flagger.chart" . }}
|
||||
app.kubernetes.io/name: {{ template "flagger.name" . }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: {{ template "flagger.fullname" . }}-prometheus
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: {{ template "flagger.serviceAccountName" . }}-prometheus
|
||||
namespace: {{ .Release.Namespace }}
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: {{ template "flagger.serviceAccountName" . }}-prometheus
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels:
|
||||
helm.sh/chart: {{ template "flagger.chart" . }}
|
||||
app.kubernetes.io/name: {{ template "flagger.name" . }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: {{ template "flagger.fullname" . }}-prometheus
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels:
|
||||
helm.sh/chart: {{ template "flagger.chart" . }}
|
||||
app.kubernetes.io/name: {{ template "flagger.name" . }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
data:
|
||||
prometheus.yml: |-
|
||||
global:
|
||||
scrape_interval: 5s
|
||||
scrape_configs:
|
||||
|
||||
# Scrape config for AppMesh Envoy sidecar
|
||||
- job_name: 'appmesh-envoy'
|
||||
metrics_path: /stats/prometheus
|
||||
kubernetes_sd_configs:
|
||||
- role: pod
|
||||
|
||||
relabel_configs:
|
||||
- source_labels: [__meta_kubernetes_pod_container_name]
|
||||
action: keep
|
||||
regex: '^envoy$'
|
||||
- source_labels: [__address__, __meta_kubernetes_pod_annotation_prometheus_io_port]
|
||||
action: replace
|
||||
regex: ([^:]+)(?::\d+)?;(\d+)
|
||||
replacement: ${1}:9901
|
||||
target_label: __address__
|
||||
- action: labelmap
|
||||
regex: __meta_kubernetes_pod_label_(.+)
|
||||
- source_labels: [__meta_kubernetes_namespace]
|
||||
action: replace
|
||||
target_label: kubernetes_namespace
|
||||
- source_labels: [__meta_kubernetes_pod_name]
|
||||
action: replace
|
||||
target_label: kubernetes_pod_name
|
||||
|
||||
# Exclude high cardinality metrics
|
||||
metric_relabel_configs:
|
||||
- source_labels: [ cluster_name ]
|
||||
regex: '(outbound|inbound|prometheus_stats).*'
|
||||
action: drop
|
||||
- source_labels: [ tcp_prefix ]
|
||||
regex: '(outbound|inbound|prometheus_stats).*'
|
||||
action: drop
|
||||
- source_labels: [ listener_address ]
|
||||
regex: '(.+)'
|
||||
action: drop
|
||||
- source_labels: [ http_conn_manager_listener_prefix ]
|
||||
regex: '(.+)'
|
||||
action: drop
|
||||
- source_labels: [ http_conn_manager_prefix ]
|
||||
regex: '(.+)'
|
||||
action: drop
|
||||
- source_labels: [ __name__ ]
|
||||
regex: 'envoy_tls.*'
|
||||
action: drop
|
||||
- source_labels: [ __name__ ]
|
||||
regex: 'envoy_tcp_downstream.*'
|
||||
action: drop
|
||||
- source_labels: [ __name__ ]
|
||||
regex: 'envoy_http_(stats|admin).*'
|
||||
action: drop
|
||||
- source_labels: [ __name__ ]
|
||||
regex: 'envoy_cluster_(lb|retry|bind|internal|max|original).*'
|
||||
action: drop
|
||||
|
||||
# Scrape config for API servers
|
||||
- job_name: 'kubernetes-apiservers'
|
||||
kubernetes_sd_configs:
|
||||
- role: endpoints
|
||||
namespaces:
|
||||
names:
|
||||
- default
|
||||
scheme: https
|
||||
tls_config:
|
||||
ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
|
||||
insecure_skip_verify: true
|
||||
bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
|
||||
relabel_configs:
|
||||
- source_labels: [__meta_kubernetes_service_name, __meta_kubernetes_endpoint_port_name]
|
||||
action: keep
|
||||
regex: kubernetes;https
|
||||
|
||||
# scrape config for cAdvisor
|
||||
- job_name: 'kubernetes-cadvisor'
|
||||
scheme: https
|
||||
tls_config:
|
||||
ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
|
||||
insecure_skip_verify: true
|
||||
bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
|
||||
kubernetes_sd_configs:
|
||||
- role: node
|
||||
relabel_configs:
|
||||
- action: labelmap
|
||||
regex: __meta_kubernetes_node_label_(.+)
|
||||
- target_label: __address__
|
||||
replacement: kubernetes.default.svc:443
|
||||
- source_labels: [__meta_kubernetes_node_name]
|
||||
regex: (.+)
|
||||
target_label: __metrics_path__
|
||||
replacement: /api/v1/nodes/${1}/proxy/metrics/cadvisor
|
||||
# exclude high cardinality metrics
|
||||
metric_relabel_configs:
|
||||
- source_labels: [__name__]
|
||||
regex: (container|machine)_(cpu|memory|network|fs)_(.+)
|
||||
action: keep
|
||||
- source_labels: [__name__]
|
||||
regex: container_memory_failures_total
|
||||
action: drop
|
||||
|
||||
# scrape config for pods
|
||||
- job_name: kubernetes-pods
|
||||
kubernetes_sd_configs:
|
||||
- role: pod
|
||||
relabel_configs:
|
||||
- action: keep
|
||||
regex: true
|
||||
source_labels:
|
||||
- __meta_kubernetes_pod_annotation_prometheus_io_scrape
|
||||
- source_labels: [ __address__ ]
|
||||
regex: '.*9901.*'
|
||||
action: drop
|
||||
- action: replace
|
||||
regex: (.+)
|
||||
source_labels:
|
||||
- __meta_kubernetes_pod_annotation_prometheus_io_path
|
||||
target_label: __metrics_path__
|
||||
- action: replace
|
||||
regex: ([^:]+)(?::\d+)?;(\d+)
|
||||
replacement: $1:$2
|
||||
source_labels:
|
||||
- __address__
|
||||
- __meta_kubernetes_pod_annotation_prometheus_io_port
|
||||
target_label: __address__
|
||||
- action: labelmap
|
||||
regex: __meta_kubernetes_pod_label_(.+)
|
||||
- action: replace
|
||||
source_labels:
|
||||
- __meta_kubernetes_namespace
|
||||
target_label: kubernetes_namespace
|
||||
- action: replace
|
||||
source_labels:
|
||||
- __meta_kubernetes_pod_name
|
||||
target_label: kubernetes_pod_name
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: {{ template "flagger.fullname" . }}-prometheus
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels:
|
||||
helm.sh/chart: {{ template "flagger.chart" . }}
|
||||
app.kubernetes.io/name: {{ template "flagger.name" . }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app.kubernetes.io/name: {{ template "flagger.name" . }}-prometheus
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/name: {{ template "flagger.name" . }}-prometheus
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
annotations:
|
||||
appmesh.k8s.aws/sidecarInjectorWebhook: disabled
|
||||
sidecar.istio.io/inject: "false"
|
||||
spec:
|
||||
serviceAccountName: {{ template "flagger.serviceAccountName" . }}-prometheus
|
||||
containers:
|
||||
- name: prometheus
|
||||
image: {{ .Values.prometheus.image }}
|
||||
imagePullPolicy: IfNotPresent
|
||||
args:
|
||||
- '--storage.tsdb.retention={{ .Values.prometheus.retention }}'
|
||||
- '--config.file=/etc/prometheus/prometheus.yml'
|
||||
ports:
|
||||
- containerPort: 9090
|
||||
name: http
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /-/healthy
|
||||
port: 9090
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /-/ready
|
||||
port: 9090
|
||||
resources:
|
||||
requests:
|
||||
cpu: 10m
|
||||
memory: 128Mi
|
||||
volumeMounts:
|
||||
- name: config-volume
|
||||
mountPath: /etc/prometheus
|
||||
- name: data-volume
|
||||
mountPath: /prometheus/data
|
||||
{{- if .Values.prometheus.securityContext.enabled }}
|
||||
securityContext:
|
||||
{{ toYaml .Values.prometheus.securityContext.context | indent 12 }}
|
||||
{{- end }}
|
||||
{{- if .Values.prometheus.pullSecret }}
|
||||
imagePullSecrets:
|
||||
- name: {{ .Values.prometheus.pullSecret }}
|
||||
{{- end }}
|
||||
volumes:
|
||||
- name: config-volume
|
||||
configMap:
|
||||
name: {{ template "flagger.fullname" . }}-prometheus
|
||||
- name: data-volume
|
||||
emptyDir: {}
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: {{ template "flagger.fullname" . }}-prometheus
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels:
|
||||
helm.sh/chart: {{ template "flagger.chart" . }}
|
||||
app.kubernetes.io/name: {{ template "flagger.name" . }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
spec:
|
||||
selector:
|
||||
app.kubernetes.io/name: {{ template "flagger.name" . }}-prometheus
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
ports:
|
||||
- name: http
|
||||
protocol: TCP
|
||||
port: 9090
|
||||
{{- end }}
|
||||
@@ -1,67 +0,0 @@
|
||||
{{- if .Values.rbac.pspEnabled }}
|
||||
apiVersion: policy/v1beta1
|
||||
kind: PodSecurityPolicy
|
||||
metadata:
|
||||
name: {{ template "flagger.fullname" . }}
|
||||
labels:
|
||||
helm.sh/chart: {{ template "flagger.chart" . }}
|
||||
app.kubernetes.io/name: {{ template "flagger.name" . }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
annotations:
|
||||
seccomp.security.alpha.kubernetes.io/allowedProfileNames: '*'
|
||||
spec:
|
||||
privileged: false
|
||||
hostIPC: false
|
||||
hostNetwork: false
|
||||
hostPID: false
|
||||
readOnlyRootFilesystem: false
|
||||
allowPrivilegeEscalation: false
|
||||
allowedCapabilities:
|
||||
- '*'
|
||||
fsGroup:
|
||||
rule: RunAsAny
|
||||
runAsUser:
|
||||
rule: RunAsAny
|
||||
seLinux:
|
||||
rule: RunAsAny
|
||||
supplementalGroups:
|
||||
rule: RunAsAny
|
||||
volumes:
|
||||
- '*'
|
||||
---
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: {{ template "flagger.fullname" . }}-psp
|
||||
labels:
|
||||
helm.sh/chart: {{ template "flagger.chart" . }}
|
||||
app.kubernetes.io/name: {{ template "flagger.name" . }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
rules:
|
||||
- apiGroups: ['policy']
|
||||
resources: ['podsecuritypolicies']
|
||||
verbs: ['use']
|
||||
resourceNames:
|
||||
- {{ template "flagger.fullname" . }}
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
name: {{ template "flagger.fullname" . }}-psp
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels:
|
||||
helm.sh/chart: {{ template "flagger.chart" . }}
|
||||
app.kubernetes.io/name: {{ template "flagger.name" . }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: {{ template "flagger.fullname" . }}-psp
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: {{ template "flagger.serviceAccountName" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
{{- end }}
|
||||
@@ -1,297 +0,0 @@
|
||||
{{- if .Values.rbac.create }}
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: {{ template "flagger.fullname" . }}
|
||||
labels:
|
||||
helm.sh/chart: {{ template "flagger.chart" . }}
|
||||
app.kubernetes.io/name: {{ template "flagger.name" . }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- events
|
||||
- configmaps
|
||||
- configmaps/finalizers
|
||||
- secrets
|
||||
- secrets/finalizers
|
||||
- services
|
||||
- services/finalizers
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- create
|
||||
- update
|
||||
- patch
|
||||
- delete
|
||||
- apiGroups:
|
||||
- "coordination.k8s.io"
|
||||
resources:
|
||||
- leases
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- create
|
||||
- update
|
||||
- patch
|
||||
- delete
|
||||
- apiGroups:
|
||||
- apps
|
||||
resources:
|
||||
- daemonsets
|
||||
- daemonsets/finalizers
|
||||
- deployments
|
||||
- deployments/finalizers
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- create
|
||||
- update
|
||||
- patch
|
||||
- delete
|
||||
- apiGroups:
|
||||
- autoscaling
|
||||
resources:
|
||||
- horizontalpodautoscalers
|
||||
- horizontalpodautoscalers/finalizers
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- create
|
||||
- update
|
||||
- patch
|
||||
- delete
|
||||
- apiGroups:
|
||||
- extensions
|
||||
- networking.k8s.io
|
||||
resources:
|
||||
- ingresses
|
||||
- ingresses/finalizers
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- create
|
||||
- update
|
||||
- patch
|
||||
- delete
|
||||
- apiGroups:
|
||||
- flagger.app
|
||||
resources:
|
||||
- canaries
|
||||
- canaries/status
|
||||
- canaries/finalizers
|
||||
- metrictemplates
|
||||
- metrictemplates/status
|
||||
- alertproviders
|
||||
- alertproviders/status
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- create
|
||||
- update
|
||||
- patch
|
||||
- delete
|
||||
- apiGroups:
|
||||
- networking.istio.io
|
||||
resources:
|
||||
- virtualservices
|
||||
- virtualservices/finalizers
|
||||
- destinationrules
|
||||
- destinationrules/finalizers
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- create
|
||||
- update
|
||||
- patch
|
||||
- delete
|
||||
- apiGroups:
|
||||
- appmesh.k8s.aws
|
||||
resources:
|
||||
- virtualnodes
|
||||
- virtualnodes/finalizers
|
||||
- virtualrouters
|
||||
- virtualrouters/finalizers
|
||||
- virtualservices
|
||||
- virtualservices/finalizers
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- create
|
||||
- update
|
||||
- patch
|
||||
- delete
|
||||
- apiGroups:
|
||||
- split.smi-spec.io
|
||||
resources:
|
||||
- trafficsplits
|
||||
- trafficsplits/finalizers
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- create
|
||||
- update
|
||||
- patch
|
||||
- delete
|
||||
- apiGroups:
|
||||
- specs.smi-spec.io
|
||||
resources:
|
||||
- httproutegroups
|
||||
- httproutegroups/finalizers
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- create
|
||||
- update
|
||||
- patch
|
||||
- delete
|
||||
- apiGroups:
|
||||
- gloo.solo.io
|
||||
resources:
|
||||
- upstreams
|
||||
- upstreams/finalizers
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- create
|
||||
- update
|
||||
- patch
|
||||
- delete
|
||||
- apiGroups:
|
||||
- gateway.solo.io
|
||||
resources:
|
||||
- routetables
|
||||
- routetables/finalizers
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- create
|
||||
- update
|
||||
- patch
|
||||
- delete
|
||||
- apiGroups:
|
||||
- projectcontour.io
|
||||
resources:
|
||||
- httpproxies
|
||||
- httpproxies/finalizers
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- create
|
||||
- update
|
||||
- patch
|
||||
- delete
|
||||
- apiGroups:
|
||||
- traefik.containo.us
|
||||
resources:
|
||||
- traefikservices
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- create
|
||||
- update
|
||||
- patch
|
||||
- delete
|
||||
- apiGroups:
|
||||
- kuma.io
|
||||
resources:
|
||||
- trafficroutes
|
||||
- trafficroutes/finalizers
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- create
|
||||
- update
|
||||
- patch
|
||||
- delete
|
||||
- apiGroups:
|
||||
- gateway.networking.k8s.io
|
||||
resources:
|
||||
- httproutes
|
||||
- httproutes/finalizers
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- create
|
||||
- update
|
||||
- patch
|
||||
- delete
|
||||
- apiGroups:
|
||||
- keda.sh
|
||||
resources:
|
||||
- scaledobjects
|
||||
- scaledobjects/finalizers
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- create
|
||||
- update
|
||||
- patch
|
||||
- delete
|
||||
- apiGroups:
|
||||
- apisix.apache.org
|
||||
resources:
|
||||
- apisixroutes
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- create
|
||||
- update
|
||||
- patch
|
||||
- delete
|
||||
- apiGroups:
|
||||
- metrics.keptn.sh
|
||||
resources:
|
||||
- keptnmetrics
|
||||
- analyses
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- create
|
||||
- update
|
||||
- patch
|
||||
- delete
|
||||
- nonResourceURLs:
|
||||
- /version
|
||||
verbs:
|
||||
- get
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: {{ template "flagger.fullname" . }}
|
||||
labels:
|
||||
helm.sh/chart: {{ template "flagger.chart" . }}
|
||||
app.kubernetes.io/name: {{ template "flagger.name" . }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: {{ template "flagger.fullname" . }}
|
||||
subjects:
|
||||
- name: {{ template "flagger.serviceAccountName" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
kind: ServiceAccount
|
||||
{{- end }}
|
||||
@@ -1,19 +0,0 @@
|
||||
{{- if .Values.serviceMonitor.enabled }}
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: {{ template "flagger.name" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels:
|
||||
app.kubernetes.io/name: {{ template "flagger.name" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
spec:
|
||||
ports:
|
||||
- name: http
|
||||
port: 8080
|
||||
targetPort: http
|
||||
protocol: TCP
|
||||
selector:
|
||||
app.kubernetes.io/name: {{ template "flagger.name" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
{{- end }}
|
||||
@@ -1,29 +0,0 @@
|
||||
{{- if .Values.serviceMonitor.enabled }}
|
||||
apiVersion: monitoring.coreos.com/v1
|
||||
kind: ServiceMonitor
|
||||
metadata:
|
||||
name: {{ template "flagger.name" . }}
|
||||
{{- if .Values.serviceMonitor.namespace }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
{{- end }}
|
||||
labels:
|
||||
app.kubernetes.io/name: {{ template "flagger.name" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
{{- with .Values.serviceMonitor.labels }}
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
endpoints:
|
||||
- path: /metrics
|
||||
port: http
|
||||
interval: 30s
|
||||
scrapeTimeout: 30s
|
||||
honorLabels: {{ .Values.serviceMonitor.honorLabels }}
|
||||
namespaceSelector:
|
||||
matchNames:
|
||||
- {{ .Release.Namespace }}
|
||||
selector:
|
||||
matchLabels:
|
||||
app.kubernetes.io/name: {{ template "flagger.name" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
{{- end }}
|
||||
@@ -1,205 +0,0 @@
|
||||
# Default values for flagger.
|
||||
|
||||
## Deployment annotations
|
||||
# annotations: {}
|
||||
|
||||
image:
|
||||
repository: ghcr.io/fluxcd/flagger
|
||||
tag: 1.38.0
|
||||
pullPolicy: IfNotPresent
|
||||
pullSecret:
|
||||
|
||||
# accepted values are debug, info, warning, error (defaults to info)
|
||||
logLevel: info
|
||||
|
||||
podAnnotations:
|
||||
prometheus.io/scrape: "true"
|
||||
prometheus.io/port: "8080"
|
||||
appmesh.k8s.aws/sidecarInjectorWebhook: disabled
|
||||
linkerd.io/inject: enabled
|
||||
|
||||
# priority class name for pod priority configuration
|
||||
podPriorityClassName: ""
|
||||
|
||||
metricsServer: "http://prometheus:9090"
|
||||
|
||||
# creates serviceMonitor for monitoring Flagger metrics
|
||||
serviceMonitor:
|
||||
enabled: false
|
||||
honorLabels: false
|
||||
# Set the namespace the ServiceMonitor should be deployed
|
||||
# namespace: monitoring
|
||||
# Set labels for the ServiceMonitor, use this to define your scrape label for Prometheus Operator
|
||||
# labels:
|
||||
|
||||
# accepted values are kubernetes, istio, linkerd, appmesh, contour, nginx, gloo, skipper, traefik, apisix, osm
|
||||
meshProvider: ""
|
||||
|
||||
# single namespace restriction
|
||||
namespace: ""
|
||||
|
||||
# list of pod labels that Flagger uses to create pod selectors
|
||||
# defaults to: app,name,app.kubernetes.io/name
|
||||
selectorLabels: ""
|
||||
|
||||
# when enabled, flagger will track changes in Secrets and ConfigMaps referenced in the target deployment (enabled by default)
|
||||
configTracking:
|
||||
enabled: true
|
||||
|
||||
# annotations prefix for NGINX ingresses
|
||||
ingressAnnotationsPrefix: ""
|
||||
|
||||
# ingress class used for annotating HTTPProxy objects
|
||||
ingressClass: ""
|
||||
|
||||
# when enabled, it will add a security context for the flagger pod. You may
|
||||
# need to disable this if you are running flagger on OpenShift
|
||||
securityContext:
|
||||
enabled: true
|
||||
context:
|
||||
readOnlyRootFilesystem: true
|
||||
runAsUser: 10001
|
||||
|
||||
# when specified, flagger will publish events to the provided webhook
|
||||
eventWebhook: ""
|
||||
|
||||
# when specified, flagger will add the cluster name to alerts
|
||||
clusterName: ""
|
||||
|
||||
slack:
|
||||
user: flagger
|
||||
channel:
|
||||
# incoming webhook https://api.slack.com/incoming-webhooks
|
||||
url:
|
||||
proxy:
|
||||
|
||||
msteams:
|
||||
# MS Teams incoming webhook URL
|
||||
url:
|
||||
|
||||
podMonitor:
|
||||
enabled: false
|
||||
namespace:
|
||||
interval: 15s
|
||||
additionalLabels: {}
|
||||
honorLabels: false
|
||||
|
||||
#env:
|
||||
#- name: SLACK_URL
|
||||
# valueFrom:
|
||||
# secretKeyRef:
|
||||
# name: slack
|
||||
# key: url
|
||||
#- name: SLACK_PROXY_URL
|
||||
# valueFrom:
|
||||
# secretKeyRef:
|
||||
# name: slack
|
||||
# key: proxy-url
|
||||
#- name: MSTEAMS_URL
|
||||
# valueFrom:
|
||||
# secretKeyRef:
|
||||
# name: msteams
|
||||
# key: url
|
||||
#- name: MSTEAMS_PROXY_URL
|
||||
# valueFrom:
|
||||
# secretKeyRef:
|
||||
# name: msteams
|
||||
# key: proxy-url
|
||||
#- name: EVENT_WEBHOOK_URL
|
||||
# valueFrom:
|
||||
# secretKeyRef:
|
||||
# name: eventwebhook
|
||||
# key: url
|
||||
env: []
|
||||
|
||||
leaderElection:
|
||||
enabled: false
|
||||
replicaCount: 1
|
||||
|
||||
serviceAccount:
|
||||
# serviceAccount.create: Whether to create a service account or not
|
||||
create: true
|
||||
# serviceAccount.name: The name of the service account to create or use
|
||||
name: ""
|
||||
# serviceAccount.annotations: Annotations for service account
|
||||
annotations: {}
|
||||
|
||||
rbac:
|
||||
# rbac.create: `true` if rbac resources should be created
|
||||
create: true
|
||||
# rbac.pspEnabled: `true` if PodSecurityPolicy resources should be created
|
||||
pspEnabled: false
|
||||
|
||||
crd:
|
||||
# crd.create: `true` if custom resource definitions should be created
|
||||
create: false
|
||||
|
||||
linkerdAuthPolicy:
|
||||
# linkerdAuthPolicy.create: Whether to create an AuthorizationPolicy in
|
||||
# linkerd viz' namespace to allow flagger to reach viz' prometheus service
|
||||
create: false
|
||||
# linkerdAuthPolicy.namespace: linkerd-viz' namespace
|
||||
namespace: linkerd-viz
|
||||
|
||||
nameOverride: ""
|
||||
fullnameOverride: ""
|
||||
|
||||
resources:
|
||||
limits:
|
||||
memory: "512Mi"
|
||||
cpu: "1000m"
|
||||
requests:
|
||||
memory: "32Mi"
|
||||
cpu: "10m"
|
||||
|
||||
nodeSelector: {}
|
||||
|
||||
tolerations: []
|
||||
|
||||
affinity:
|
||||
podAntiAffinity:
|
||||
preferredDuringSchedulingIgnoredDuringExecution:
|
||||
- weight: 100
|
||||
podAffinityTerm:
|
||||
labelSelector:
|
||||
matchLabels:
|
||||
app.kubernetes.io/name: '{{ template "flagger.name" . }}'
|
||||
app.kubernetes.io/instance: '{{ .Release.Name }}'
|
||||
topologyKey: kubernetes.io/hostname
|
||||
|
||||
prometheus:
|
||||
# to be used with ingress controllers
|
||||
install: false
|
||||
image: docker.io/prom/prometheus:v2.41.0
|
||||
pullSecret:
|
||||
retention: 2h
|
||||
# when enabled, it will add a security context for the prometheus pod
|
||||
securityContext:
|
||||
enabled: false
|
||||
context:
|
||||
readOnlyRootFilesystem: true
|
||||
runAsUser: 10001
|
||||
|
||||
kubeconfigQPS: ""
|
||||
kubeconfigBurst: ""
|
||||
|
||||
# Multi-cluster service mesh (shared control plane single-network)
|
||||
controlplane:
|
||||
kubeconfig:
|
||||
# controlplane.kubeconfig.secretName: The name of the secret containing the mesh control plane kubeconfig
|
||||
secretName: ""
|
||||
# controlplane.kubeconfig.key: The name of secret data key that contains the mesh control plane kubeconfig
|
||||
key: "kubeconfig"
|
||||
|
||||
podDisruptionBudget:
|
||||
enabled: false
|
||||
minAvailable: 1
|
||||
|
||||
podLabels: {}
|
||||
|
||||
noCrossNamespaceRefs: false
|
||||
|
||||
#Placeholder to supply additional volumes to the flagger pod
|
||||
additionalVolumes: {}
|
||||
# - name: tmpfs
|
||||
# emptyDir: {}
|
||||
@@ -1,21 +0,0 @@
|
||||
# Patterns to ignore when building packages.
|
||||
# This supports shell glob matching, relative path matching, and
|
||||
# negation (prefixed with !). Only one pattern per line.
|
||||
.DS_Store
|
||||
# Common VCS dirs
|
||||
.git/
|
||||
.gitignore
|
||||
.bzr/
|
||||
.bzrignore
|
||||
.hg/
|
||||
.hgignore
|
||||
.svn/
|
||||
# Common backup files
|
||||
*.swp
|
||||
*.bak
|
||||
*.tmp
|
||||
*~
|
||||
# Various IDEs
|
||||
.project
|
||||
.idea/
|
||||
*.tmproj
|
||||
@@ -1,20 +0,0 @@
|
||||
apiVersion: v1
|
||||
name: grafana
|
||||
version: 1.7.0
|
||||
appVersion: 7.2.0
|
||||
description: Grafana dashboards for monitoring Flagger canary deployments
|
||||
icon: https://raw.githubusercontent.com/fluxcd/flagger/main/docs/logo/flagger-icon.png
|
||||
home: https://flagger.app
|
||||
sources:
|
||||
- https://github.com/fluxcd/flagger
|
||||
maintainers:
|
||||
- name: stefanprodan
|
||||
url: https://github.com/stefanprodan
|
||||
email: stefanprodan@users.noreply.github.com
|
||||
keywords:
|
||||
- flagger
|
||||
- grafana
|
||||
- canary
|
||||
- istio
|
||||
- appmesh
|
||||
|
||||
@@ -1,81 +0,0 @@
|
||||
# Flagger Grafana
|
||||
|
||||
Grafana dashboards for monitoring progressive deployments powered by Flagger and Prometheus.
|
||||
|
||||

|
||||
|
||||
## Prerequisites
|
||||
|
||||
* Kubernetes >= 1.11
|
||||
* Prometheus >= 2.6
|
||||
|
||||
## Installing the Chart
|
||||
|
||||
Add Flagger Helm repository:
|
||||
|
||||
```console
|
||||
helm repo add flagger https://flagger.app
|
||||
```
|
||||
|
||||
To install the chart for Istio run:
|
||||
|
||||
```console
|
||||
helm upgrade -i flagger-grafana flagger/grafana \
|
||||
--namespace=istio-system \
|
||||
--set url=http://prometheus:9090
|
||||
```
|
||||
|
||||
To install the chart for AWS App Mesh run:
|
||||
|
||||
```console
|
||||
helm upgrade -i flagger-grafana flagger/grafana \
|
||||
--namespace=appmesh-system \
|
||||
--set url=http://appmesh-prometheus:9090
|
||||
```
|
||||
|
||||
The command deploys Grafana on the Kubernetes cluster in the default namespace.
|
||||
The [configuration](#configuration) section lists the parameters that can be configured during installation.
|
||||
|
||||
## Uninstalling the Chart
|
||||
|
||||
To uninstall/delete the `flagger-grafana` deployment:
|
||||
|
||||
```console
|
||||
helm delete --purge flagger-grafana
|
||||
```
|
||||
|
||||
The command removes all the Kubernetes components associated with the chart and deletes the release.
|
||||
|
||||
## Configuration
|
||||
|
||||
The following tables lists the configurable parameters of the Grafana chart and their default values.
|
||||
|
||||
Parameter | Description | Default
|
||||
--- | --- | ---
|
||||
`image.repository` | Image repository | `grafana/grafana`
|
||||
`image.pullPolicy` | Image pull policy | `IfNotPresent`
|
||||
`image.tag` | Image tag | `<VERSION>`
|
||||
`replicaCount` | desired number of pods | `1`
|
||||
`resources` | pod resources | `none`
|
||||
`tolerations` | List of node taints to tolerate | `[]`
|
||||
`affinity` | node/pod affinities | `node`
|
||||
`nodeSelector` | node labels for pod assignment | `{}`
|
||||
`service.type` | type of service | `ClusterIP`
|
||||
`url` | Prometheus URL | `http://prometheus:9090`
|
||||
|
||||
Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example,
|
||||
|
||||
```console
|
||||
helm install flagger/grafana --name flagger-grafana \
|
||||
--set token=WEAVE-CLOUD-TOKEN
|
||||
```
|
||||
|
||||
Alternatively, a YAML file that specifies the values for the above parameters can be provided while installing the chart. For example,
|
||||
|
||||
```console
|
||||
helm install flagger/grafana --name flagger-grafana -f values.yaml
|
||||
```
|
||||
|
||||
> **Tip**: You can use the default [values.yaml](values.yaml)
|
||||
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@@ -1,7 +0,0 @@
|
||||
1. Run the port forward command:
|
||||
|
||||
kubectl -n {{ .Release.Namespace }} port-forward svc/{{ .Release.Name }} 3000:80
|
||||
|
||||
2. Navigate to:
|
||||
|
||||
http://localhost:3000
|
||||
@@ -1,32 +0,0 @@
|
||||
{{/* vim: set filetype=mustache: */}}
|
||||
{{/*
|
||||
Expand the name of the chart.
|
||||
*/}}
|
||||
{{- define "grafana.name" -}}
|
||||
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Create a default fully qualified app name.
|
||||
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
|
||||
If release name contains chart name it will be used as a full name.
|
||||
*/}}
|
||||
{{- define "grafana.fullname" -}}
|
||||
{{- if .Values.fullnameOverride -}}
|
||||
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
|
||||
{{- else -}}
|
||||
{{- $name := default .Chart.Name .Values.nameOverride -}}
|
||||
{{- if contains $name .Release.Name -}}
|
||||
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
|
||||
{{- else -}}
|
||||
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Create chart name and version as used by the chart label.
|
||||
*/}}
|
||||
{{- define "grafana.chart" -}}
|
||||
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
|
||||
{{- end -}}
|
||||
@@ -1,6 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: {{ template "grafana.fullname" . }}-dashboards
|
||||
data:
|
||||
{{ (.Files.Glob "dashboards/*").AsConfig | indent 2 }}
|
||||
@@ -1,32 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: {{ template "grafana.fullname" . }}-datasources
|
||||
data:
|
||||
datasources.yaml: |-
|
||||
apiVersion: 1
|
||||
|
||||
deleteDatasources:
|
||||
- name: prometheus
|
||||
{{- if .Values.token }}
|
||||
datasources:
|
||||
- name: prometheus
|
||||
type: prometheus
|
||||
access: proxy
|
||||
url: https://cloud.weave.works/api/prom
|
||||
isDefault: true
|
||||
editable: true
|
||||
version: 1
|
||||
basicAuth: true
|
||||
basicAuthUser: weave
|
||||
basicAuthPassword: {{ .Values.token }}
|
||||
{{- else }}
|
||||
datasources:
|
||||
- name: prometheus
|
||||
type: prometheus
|
||||
access: proxy
|
||||
url: {{ .Values.url }}
|
||||
isDefault: true
|
||||
editable: true
|
||||
version: 1
|
||||
{{- end }}
|
||||
@@ -1,93 +0,0 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: {{ template "grafana.fullname" . }}
|
||||
labels:
|
||||
app: {{ template "grafana.fullname" . }}
|
||||
chart: {{ template "grafana.chart" . }}
|
||||
release: {{ .Release.Name }}
|
||||
heritage: {{ .Release.Service }}
|
||||
spec:
|
||||
replicas: {{ .Values.replicaCount }}
|
||||
selector:
|
||||
matchLabels:
|
||||
app: {{ template "grafana.fullname" . }}
|
||||
release: {{ .Release.Name }}
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: {{ template "grafana.fullname" . }}
|
||||
release: {{ .Release.Name }}
|
||||
annotations:
|
||||
prometheus.io/scrape: 'false'
|
||||
{{- if .Values.podAnnotations }}
|
||||
{{ toYaml .Values.podAnnotations | indent 8 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
containers:
|
||||
- name: {{ .Chart.Name }}
|
||||
image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
|
||||
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
||||
ports:
|
||||
- name: http
|
||||
containerPort: 3000
|
||||
protocol: TCP
|
||||
# livenessProbe:
|
||||
# httpGet:
|
||||
# path: /
|
||||
# port: http
|
||||
# readinessProbe:
|
||||
# httpGet:
|
||||
# path: /
|
||||
# port: http
|
||||
env:
|
||||
- name: GF_PATHS_PROVISIONING
|
||||
value: /etc/grafana/provisioning/
|
||||
{{- if .Values.password }}
|
||||
- name: GF_SECURITY_ADMIN_USER
|
||||
value: {{ .Values.user }}
|
||||
- name: GF_SECURITY_ADMIN_PASSWORD
|
||||
value: {{ .Values.password }}
|
||||
{{- else }}
|
||||
- name: GF_AUTH_BASIC_ENABLED
|
||||
value: "false"
|
||||
- name: GF_AUTH_ANONYMOUS_ENABLED
|
||||
value: "true"
|
||||
- name: GF_AUTH_ANONYMOUS_ORG_ROLE
|
||||
value: Admin
|
||||
{{- end }}
|
||||
volumeMounts:
|
||||
- name: grafana
|
||||
mountPath: /var/lib/grafana
|
||||
- name: dashboards
|
||||
mountPath: /etc/grafana/dashboards
|
||||
- name: datasources
|
||||
mountPath: /etc/grafana/provisioning/datasources
|
||||
- name: providers
|
||||
mountPath: /etc/grafana/provisioning/dashboards
|
||||
resources:
|
||||
{{ toYaml .Values.resources | indent 12 }}
|
||||
{{- with .Values.nodeSelector }}
|
||||
nodeSelector:
|
||||
{{ toYaml . | indent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.affinity }}
|
||||
affinity:
|
||||
{{ toYaml . | indent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.tolerations }}
|
||||
tolerations:
|
||||
{{ toYaml . | indent 8 }}
|
||||
{{- end }}
|
||||
volumes:
|
||||
- name: grafana
|
||||
emptyDir: {}
|
||||
- name: dashboards
|
||||
configMap:
|
||||
name: {{ template "grafana.fullname" . }}-dashboards
|
||||
- name: providers
|
||||
configMap:
|
||||
name: {{ template "grafana.fullname" . }}-providers
|
||||
- name: datasources
|
||||
configMap:
|
||||
name: {{ template "grafana.fullname" . }}-datasources
|
||||
@@ -1,17 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: {{ template "grafana.fullname" . }}-providers
|
||||
data:
|
||||
providers.yaml: |+
|
||||
apiVersion: 1
|
||||
|
||||
providers:
|
||||
- name: 'default'
|
||||
orgId: 1
|
||||
folder: ''
|
||||
type: file
|
||||
disableDeletion: false
|
||||
editable: true
|
||||
options:
|
||||
path: /etc/grafana/dashboards
|
||||
@@ -1,19 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: {{ template "grafana.fullname" . }}
|
||||
labels:
|
||||
app: {{ template "grafana.name" . }}
|
||||
chart: {{ template "grafana.chart" . }}
|
||||
release: {{ .Release.Name }}
|
||||
heritage: {{ .Release.Service }}
|
||||
spec:
|
||||
type: {{ .Values.service.type }}
|
||||
ports:
|
||||
- port: {{ .Values.service.port }}
|
||||
targetPort: http
|
||||
protocol: TCP
|
||||
name: http
|
||||
selector:
|
||||
app: {{ template "grafana.fullname" . }}
|
||||
release: {{ .Release.Name }}
|
||||
@@ -1,39 +0,0 @@
|
||||
# Default values for grafana.
|
||||
# This is a YAML-formatted file.
|
||||
# Declare variables to be passed into your templates.
|
||||
|
||||
replicaCount: 1
|
||||
|
||||
image:
|
||||
repository: grafana/grafana
|
||||
tag: 7.3.4
|
||||
pullPolicy: IfNotPresent
|
||||
|
||||
podAnnotations: {}
|
||||
|
||||
service:
|
||||
type: ClusterIP
|
||||
port: 80
|
||||
|
||||
resources: {}
|
||||
# limits:
|
||||
# cpu: 100m
|
||||
# memory: 128Mi
|
||||
# requests:
|
||||
# cpu: 100m
|
||||
# memory: 128Mi
|
||||
|
||||
nodeSelector: {}
|
||||
|
||||
tolerations: []
|
||||
|
||||
affinity: {}
|
||||
|
||||
user: admin
|
||||
password:
|
||||
|
||||
# Prometheus instance
|
||||
url: http://prometheus:9090
|
||||
|
||||
# Weave Cloud instance token
|
||||
token:
|
||||
@@ -1,22 +0,0 @@
|
||||
# Patterns to ignore when building packages.
|
||||
# This supports shell glob matching, relative path matching, and
|
||||
# negation (prefixed with !). Only one pattern per line.
|
||||
.DS_Store
|
||||
# Common VCS dirs
|
||||
.git/
|
||||
.gitignore
|
||||
.bzr/
|
||||
.bzrignore
|
||||
.hg/
|
||||
.hgignore
|
||||
.svn/
|
||||
# Common backup files
|
||||
*.swp
|
||||
*.bak
|
||||
*.tmp
|
||||
*~
|
||||
# Various IDEs
|
||||
.project
|
||||
.idea/
|
||||
*.tmproj
|
||||
.vscode/
|
||||
@@ -1,25 +0,0 @@
|
||||
apiVersion: v1
|
||||
name: loadtester
|
||||
version: 0.33.0
|
||||
appVersion: 0.33.0
|
||||
kubeVersion: ">=1.19.0-0"
|
||||
engine: gotpl
|
||||
description: Flagger's load testing services based on rakyll/hey and bojand/ghz that generates traffic during canary analysis when configured as a webhook.
|
||||
home: https://docs.flagger.app
|
||||
icon: https://raw.githubusercontent.com/fluxcd/flagger/main/docs/logo/flagger-icon.png
|
||||
sources:
|
||||
- https://github.com/fluxcd/flagger
|
||||
maintainers:
|
||||
- name: stefanprodan
|
||||
url: https://github.com/stefanprodan
|
||||
email: stefanprodan@users.noreply.github.com
|
||||
keywords:
|
||||
- flagger
|
||||
- istio
|
||||
- appmesh
|
||||
- linkerd
|
||||
- gloo
|
||||
- osm
|
||||
- smi
|
||||
- gitops
|
||||
- load testing
|
||||
@@ -1,92 +0,0 @@
|
||||
# Flagger load testing service
|
||||
|
||||
[Flagger's](https://github.com/fluxcd/flagger) load testing service is based on
|
||||
[rakyll/hey](https://github.com/rakyll/hey) and
|
||||
[bojand/ghz](https://github.com/bojand/ghz).
|
||||
It can be used to generate HTTP and gRPC traffic during canary analysis when configured as a webhook.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
* Kubernetes >= 1.19
|
||||
|
||||
## Installing the Chart
|
||||
|
||||
Add Flagger Helm repository:
|
||||
|
||||
```console
|
||||
helm repo add flagger https://flagger.app
|
||||
```
|
||||
|
||||
To install the chart with the release name `flagger-loadtester`:
|
||||
|
||||
```console
|
||||
helm upgrade -i flagger-loadtester flagger/loadtester
|
||||
```
|
||||
|
||||
The command deploys loadtester on the Kubernetes cluster in the default namespace.
|
||||
|
||||
> **Tip**: Note that the namespace where you deploy the load tester should
|
||||
> have the Istio, App Mesh, Linkerd or Open Service Mesh sidecar injection enabled
|
||||
|
||||
The [configuration](#configuration) section lists the parameters that can be configured during installation.
|
||||
|
||||
## Uninstalling the Chart
|
||||
|
||||
To uninstall/delete the `flagger-loadtester` deployment:
|
||||
|
||||
```console
|
||||
helm delete flagger-loadtester
|
||||
```
|
||||
|
||||
The command removes all the Kubernetes components associated with the chart and deletes the release.
|
||||
|
||||
## Configuration
|
||||
|
||||
The following tables lists the configurable parameters of the load tester chart and their default values.
|
||||
|
||||
| Parameter | Description | Default |
|
||||
|------------------------------------|--------------------------------------------------------------------------------------|-------------------------------------|
|
||||
| `image.repository` | Image repository | `ghcr.io/fluxcd/flagger-loadtester` |
|
||||
| `image.pullPolicy` | Image pull policy | `IfNotPresent` |
|
||||
| `image.tag` | Image tag | `<VERSION>` |
|
||||
| `replicaCount` | Desired number of pods | `1` |
|
||||
| `serviceAccountName` | Kubernetes service account name | `none` |
|
||||
| `resources.requests.cpu` | CPU requests | `10m` |
|
||||
| `resources.requests.memory` | Memory requests | `64Mi` |
|
||||
| `tolerations` | List of node taints to tolerate | `[]` |
|
||||
| `affinity` | node/pod affinities | `node` |
|
||||
| `nodeSelector` | Node labels for pod assignment | `{}` |
|
||||
| `service.type` | Type of service | `ClusterIP` |
|
||||
| `service.port` | ClusterIP port | `80` |
|
||||
| `cmd.timeout` | Command execution timeout | `1h` |
|
||||
| `cmd.namespaceRegexp` | Restrict access to canaries in matching namespaces | "" |
|
||||
| `logLevel` | Log level can be debug, info, warning, error or panic | `info` |
|
||||
| `appmesh.enabled` | Create AWS App Mesh v1beta2 virtual node | `false` |
|
||||
| `appmesh.backends` | AWS App Mesh virtual services | `none` |
|
||||
| `istio.enabled` | Create Istio virtual service | `false` |
|
||||
| `istio.host` | Loadtester hostname | `flagger-loadtester.flagger` |
|
||||
| `istio.gateway.enabled` | Create Istio gateway in namespace | `false` |
|
||||
| `istio.tls.enabled` | Enable TLS in gateway ( TLS secrets should be in namespace ) | `false` |
|
||||
| `istio.tls.httpsRedirect` | Redirect traffic to TLS port | `false` |
|
||||
| `podPriorityClassName` | PriorityClass name for pod priority configuration | "" |
|
||||
| `securityContext.enabled` | Add securityContext to container | "" |
|
||||
| `securityContext.context` | securityContext to add | "" |
|
||||
| `podDisruptionBudget.enabled` | A PodDisruptionBudget will be created if `true` | `false` |
|
||||
| `podDisruptionBudget.minAvailable` | The minimal number of available replicas that will be set in the PodDisruptionBudget | `1` |
|
||||
|
||||
Specify each parameter using the `--set key=value[,key=value]` argument to `helm upgrade`. For example,
|
||||
|
||||
```console
|
||||
helm upgrade -i flagger-loadtester flagger/loadtester \
|
||||
--set "appmesh.enabled=true" \
|
||||
--set "appmesh.backends[0]=podinfo" \
|
||||
--set "appmesh.backends[1]=podinfo-canary"
|
||||
```
|
||||
|
||||
Alternatively, a YAML file that specifies the values for the above parameters can be provided while installing the chart. For example,
|
||||
|
||||
```console
|
||||
helm install flagger/loadtester --name flagger-loadtester -f values.yaml
|
||||
```
|
||||
|
||||
> **Tip**: You can use the default [values.yaml](values.yaml)
|
||||
@@ -1 +0,0 @@
|
||||
Flagger's load testing service is available at http://{{ include "loadtester.fullname" . }}.{{ .Release.Namespace }}/
|
||||
@@ -1,32 +0,0 @@
|
||||
{{/* vim: set filetype=mustache: */}}
|
||||
{{/*
|
||||
Expand the name of the chart.
|
||||
*/}}
|
||||
{{- define "loadtester.name" -}}
|
||||
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Create a default fully qualified app name.
|
||||
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
|
||||
If release name contains chart name it will be used as a full name.
|
||||
*/}}
|
||||
{{- define "loadtester.fullname" -}}
|
||||
{{- if .Values.fullnameOverride -}}
|
||||
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
|
||||
{{- else -}}
|
||||
{{- $name := default .Chart.Name .Values.nameOverride -}}
|
||||
{{- if contains $name .Release.Name -}}
|
||||
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
|
||||
{{- else -}}
|
||||
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Create chart name and version as used by the chart label.
|
||||
*/}}
|
||||
{{- define "loadtester.chart" -}}
|
||||
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
|
||||
{{- end -}}
|
||||
@@ -1,27 +0,0 @@
|
||||
{{- if .Values.appmesh.enabled }}
|
||||
apiVersion: appmesh.k8s.aws/v1beta2
|
||||
kind: VirtualNode
|
||||
metadata:
|
||||
name: {{ include "loadtester.fullname" . }}
|
||||
labels:
|
||||
app.kubernetes.io/name: {{ include "loadtester.name" . }}
|
||||
helm.sh/chart: {{ include "loadtester.chart" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
spec:
|
||||
podSelector:
|
||||
matchLabels:
|
||||
app: {{ include "loadtester.name" . }}
|
||||
logging:
|
||||
accessLog:
|
||||
file:
|
||||
path: /dev/stdout
|
||||
{{- if .Values.appmesh.backends }}
|
||||
backends:
|
||||
{{- range .Values.appmesh.backends }}
|
||||
- virtualService:
|
||||
virtualServiceRef:
|
||||
name: {{ . }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
@@ -1,104 +0,0 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: {{ include "loadtester.fullname" . }}
|
||||
labels:
|
||||
app.kubernetes.io/name: {{ include "loadtester.name" . }}
|
||||
helm.sh/chart: {{ include "loadtester.chart" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
spec:
|
||||
replicas: {{ .Values.replicaCount }}
|
||||
selector:
|
||||
matchLabels:
|
||||
app: {{ include "loadtester.name" . }}
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: {{ include "loadtester.name" . }}
|
||||
app.kubernetes.io/name: {{ include "loadtester.name" . }}
|
||||
{{- range $key, $value := .Values.podLabels }}
|
||||
{{ $key }}: {{ $value | quote }}
|
||||
{{- end }}
|
||||
annotations:
|
||||
appmesh.k8s.aws/ports: "444"
|
||||
openservicemesh.io/inbound-port-exclusion-list: "80, 8080"
|
||||
{{- if .Values.podAnnotations }}
|
||||
{{ toYaml .Values.podAnnotations | indent 8 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
{{- if .Values.serviceAccountName }}
|
||||
serviceAccountName: {{ .Values.serviceAccountName }}
|
||||
{{- else if .Values.rbac.create }}
|
||||
serviceAccountName: {{ include "loadtester.fullname" . }}
|
||||
{{- end }}
|
||||
{{- if .Values.podPriorityClassName }}
|
||||
priorityClassName: {{ .Values.podPriorityClassName }}
|
||||
{{- end }}
|
||||
containers:
|
||||
- name: {{ .Chart.Name }}
|
||||
{{- if .Values.securityContext.enabled }}
|
||||
securityContext:
|
||||
{{ toYaml .Values.securityContext.context | indent 12 }}
|
||||
{{- end }}
|
||||
image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
|
||||
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
||||
ports:
|
||||
- name: http
|
||||
containerPort: 8080
|
||||
command:
|
||||
- ./loadtester
|
||||
- -port=8080
|
||||
- -log-level={{ .Values.logLevel }}
|
||||
- -timeout={{ .Values.cmd.timeout }}
|
||||
- -namespace-regexp={{ .Values.cmd.namespaceRegexp }}
|
||||
livenessProbe:
|
||||
exec:
|
||||
command:
|
||||
- wget
|
||||
- --quiet
|
||||
- --tries=1
|
||||
- --timeout=4
|
||||
- --spider
|
||||
- http://localhost:8080/healthz
|
||||
timeoutSeconds: 5
|
||||
readinessProbe:
|
||||
exec:
|
||||
command:
|
||||
- wget
|
||||
- --quiet
|
||||
- --tries=1
|
||||
- --timeout=4
|
||||
- --spider
|
||||
- http://localhost:8080/healthz
|
||||
timeoutSeconds: 5
|
||||
{{- if .Values.env }}
|
||||
env:
|
||||
{{- toYaml .Values.env | nindent 12 }}
|
||||
{{- end }}
|
||||
resources:
|
||||
{{- toYaml .Values.resources | nindent 12 }}
|
||||
{{- with .Values.volumeMounts }}
|
||||
volumeMounts:
|
||||
{{- toYaml . | nindent 12 }}
|
||||
{{- end }}
|
||||
{{- if .Values.image.pullSecret }}
|
||||
imagePullSecrets:
|
||||
- name: {{ .Values.image.pullSecret }}
|
||||
{{- end }}
|
||||
{{ with .Values.volumes }}
|
||||
volumes:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.nodeSelector }}
|
||||
nodeSelector:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.affinity }}
|
||||
affinity:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.tolerations }}
|
||||
tolerations:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
@@ -1,30 +0,0 @@
|
||||
{{- if and (.Values.istio.enabled) (.Values.istio.gateway.enabled) }}
|
||||
apiVersion: networking.istio.io/v1beta1
|
||||
kind: Gateway
|
||||
metadata:
|
||||
name: {{ include "loadtester.fullname" . }}
|
||||
spec:
|
||||
selector:
|
||||
istio: ingressgateway
|
||||
servers:
|
||||
- port:
|
||||
number: 80
|
||||
name: http-default
|
||||
protocol: HTTP
|
||||
hosts:
|
||||
- {{ .Values.istio.host }}
|
||||
{{- if .Values.istio.tls.enabled }}
|
||||
- port:
|
||||
number: 443
|
||||
name: https-default
|
||||
protocol: HTTPS
|
||||
tls:
|
||||
httpsRedirect: {{ .Values.istio.tls.httpsRedirect }}
|
||||
mode: SIMPLE
|
||||
serverCertificate: "sds"
|
||||
privateKey: "sds"
|
||||
credentialName: {{ include "loadtester.fullname" . }}
|
||||
hosts:
|
||||
- {{ .Values.istio.host }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
@@ -1,17 +0,0 @@
|
||||
{{- if .Values.istio.enabled }}
|
||||
apiVersion: networking.istio.io/v1beta1
|
||||
kind: VirtualService
|
||||
metadata:
|
||||
name: {{ include "loadtester.fullname" . }}
|
||||
spec:
|
||||
gateways:
|
||||
- {{ include "loadtester.fullname" . }}
|
||||
hosts:
|
||||
- {{ .Values.istio.host }}
|
||||
http:
|
||||
- route:
|
||||
- destination:
|
||||
host: {{ include "loadtester.fullname" . }}
|
||||
port:
|
||||
number: {{ .Values.service.port }}
|
||||
{{- end }}
|
||||
@@ -1,15 +0,0 @@
|
||||
{{- if .Values.podDisruptionBudget.enabled }}
|
||||
{{- if .Capabilities.APIVersions.Has "policy/v1/PodDisruptionBudget" -}}
|
||||
apiVersion: policy/v1
|
||||
{{- else }}
|
||||
apiVersion: policy/v1beta1
|
||||
{{- end }}
|
||||
kind: PodDisruptionBudget
|
||||
metadata:
|
||||
name: {{ include "loadtester.fullname" . }}
|
||||
spec:
|
||||
minAvailable: {{ .Values.podDisruptionBudget.minAvailable }}
|
||||
selector:
|
||||
matchLabels:
|
||||
app.kubernetes.io/name: {{ include "loadtester.name" . }}
|
||||
{{- end }}
|
||||
@@ -1,57 +0,0 @@
|
||||
---
|
||||
{{- if .Values.rbac.create }}
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
{{- if eq .Values.rbac.scope "cluster" }}
|
||||
kind: ClusterRole
|
||||
{{- else }}
|
||||
kind: Role
|
||||
{{- end }}
|
||||
metadata:
|
||||
name: {{ template "loadtester.fullname" . }}
|
||||
labels:
|
||||
helm.sh/chart: {{ template "loadtester.chart" . }}
|
||||
app.kubernetes.io/name: {{ template "loadtester.name" . }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
rules:
|
||||
{{ toYaml .Values.rbac.rules | indent 2 }}
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
{{- if eq .Values.rbac.scope "cluster" }}
|
||||
kind: ClusterRoleBinding
|
||||
{{- else }}
|
||||
kind: RoleBinding
|
||||
{{- end }}
|
||||
metadata:
|
||||
name: {{ template "loadtester.fullname" . }}
|
||||
labels:
|
||||
helm.sh/chart: {{ template "loadtester.chart" . }}
|
||||
app.kubernetes.io/name: {{ template "loadtester.name" . }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
{{- if eq .Values.rbac.scope "cluster" }}
|
||||
kind: ClusterRole
|
||||
{{- else }}
|
||||
kind: Role
|
||||
{{- end }}
|
||||
name: {{ template "loadtester.fullname" . }}
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: {{ template "loadtester.fullname" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: {{ template "loadtester.fullname" . }}
|
||||
labels:
|
||||
helm.sh/chart: {{ template "loadtester.chart" . }}
|
||||
app.kubernetes.io/name: {{ template "loadtester.name" . }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
{{- if .Values.rbac.serviceAccountAnnotations }}
|
||||
annotations: {{ tpl (toYaml .Values.rbac.serviceAccountAnnotations) . | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
@@ -1,18 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: {{ include "loadtester.fullname" . }}
|
||||
labels:
|
||||
app.kubernetes.io/name: {{ include "loadtester.name" . }}
|
||||
helm.sh/chart: {{ include "loadtester.chart" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
spec:
|
||||
type: {{ .Values.service.type }}
|
||||
ports:
|
||||
- port: {{ .Values.service.port }}
|
||||
targetPort: http
|
||||
protocol: TCP
|
||||
name: http
|
||||
selector:
|
||||
app: {{ include "loadtester.name" . }}
|
||||
@@ -1,27 +0,0 @@
|
||||
{{- if .Values.meshName }}
|
||||
apiVersion: appmesh.k8s.aws/v1beta1
|
||||
kind: VirtualNode
|
||||
metadata:
|
||||
name: {{ include "loadtester.fullname" . }}
|
||||
labels:
|
||||
app.kubernetes.io/name: {{ include "loadtester.name" . }}
|
||||
helm.sh/chart: {{ include "loadtester.chart" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
spec:
|
||||
meshName: {{ .Values.meshName }}
|
||||
listeners:
|
||||
- portMapping:
|
||||
port: 444
|
||||
protocol: http
|
||||
serviceDiscovery:
|
||||
dns:
|
||||
hostName: {{ include "loadtester.fullname" . }}.{{ .Release.Namespace }}
|
||||
{{- if .Values.backends }}
|
||||
backends:
|
||||
{{- range .Values.backends }}
|
||||
- virtualService:
|
||||
virtualServiceName: {{ . }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
@@ -1,96 +0,0 @@
|
||||
replicaCount: 1
|
||||
|
||||
image:
|
||||
repository: ghcr.io/fluxcd/flagger-loadtester
|
||||
tag: 0.33.0
|
||||
pullPolicy: IfNotPresent
|
||||
pullSecret:
|
||||
|
||||
podLabels: {}
|
||||
|
||||
podAnnotations:
|
||||
prometheus.io/scrape: "true"
|
||||
prometheus.io/port: "8080"
|
||||
|
||||
podPriorityClassName: ""
|
||||
|
||||
logLevel: info
|
||||
cmd:
|
||||
timeout: 1h
|
||||
namespaceRegexp: ""
|
||||
|
||||
nameOverride: ""
|
||||
fullnameOverride: ""
|
||||
|
||||
env: []
|
||||
|
||||
service:
|
||||
type: ClusterIP
|
||||
port: 80
|
||||
|
||||
resources:
|
||||
requests:
|
||||
cpu: 10m
|
||||
memory: 64Mi
|
||||
|
||||
volumes: []
|
||||
volumeMounts: []
|
||||
|
||||
nodeSelector: {}
|
||||
|
||||
tolerations: []
|
||||
|
||||
affinity: {}
|
||||
|
||||
rbac:
|
||||
# rbac.create: `true` if rbac resources should be created
|
||||
create: false
|
||||
# rbac.scope: `cluster` to create cluster-scope rbac resources (ClusterRole/ClusterRoleBinding)
|
||||
# otherwise, namespace-scope rbac resources will be created (Role/RoleBinding)
|
||||
scope:
|
||||
# rbac.rules: array of rules to apply to the role. example:
|
||||
# rules:
|
||||
# - apiGroups: [""]
|
||||
# resources: ["pods"]
|
||||
# verbs: ["list", "get"]
|
||||
rules: []
|
||||
# annotations to add to the service account
|
||||
serviceAccountAnnotations: {}
|
||||
|
||||
# name of an existing service account to use - if not creating rbac resources
|
||||
serviceAccountName: ""
|
||||
|
||||
# App Mesh virtual node settings (to be used for AppMesh v1beta1)
|
||||
meshName: ""
|
||||
#backends:
|
||||
# - app1.namespace
|
||||
# - app2.namespace
|
||||
|
||||
# App Mesh virtual node settings (to be used for AppMesh v1beta2)
|
||||
appmesh:
|
||||
enabled: false
|
||||
backends:
|
||||
- podinfo
|
||||
- podinfo-canary
|
||||
|
||||
#Istio virtual service and gatway settings. TLS secrets should be in namespace before enbaled it. ( secret format loadtester.fullname )
|
||||
istio:
|
||||
enabled: false
|
||||
host: flagger-loadtester.flagger
|
||||
gateway:
|
||||
enabled: false
|
||||
tls:
|
||||
enabled: false
|
||||
httpsRedirect: false
|
||||
|
||||
# when enabled, it will add a security context for the loadtester pod
|
||||
securityContext:
|
||||
enabled: false
|
||||
context:
|
||||
readOnlyRootFilesystem: true
|
||||
runAsUser: 100
|
||||
runAsGroup: 101
|
||||
|
||||
podDisruptionBudget:
|
||||
enabled: false
|
||||
minAvailable: 1
|
||||
@@ -1,21 +0,0 @@
|
||||
# Patterns to ignore when building packages.
|
||||
# This supports shell glob matching, relative path matching, and
|
||||
# negation (prefixed with !). Only one pattern per line.
|
||||
.DS_Store
|
||||
# Common VCS dirs
|
||||
.git/
|
||||
.gitignore
|
||||
.bzr/
|
||||
.bzrignore
|
||||
.hg/
|
||||
.hgignore
|
||||
.svn/
|
||||
# Common backup files
|
||||
*.swp
|
||||
*.bak
|
||||
*.tmp
|
||||
*~
|
||||
# Various IDEs
|
||||
.project
|
||||
.idea/
|
||||
*.tmproj
|
||||
@@ -1,14 +0,0 @@
|
||||
apiVersion: v1
|
||||
version: 6.1.3
|
||||
appVersion: 6.1.3
|
||||
name: podinfo
|
||||
engine: gotpl
|
||||
description: Flagger canary deployment demo application
|
||||
home: https://docs.flagger.app
|
||||
icon: https://raw.githubusercontent.com/fluxcd/flagger/main/docs/logo/flagger-icon.png
|
||||
sources:
|
||||
- https://github.com/stefanprodan/podinfo
|
||||
maintainers:
|
||||
- name: stefanprodan
|
||||
url: https://github.com/stefanprodan
|
||||
email: stefanprodan@users.noreply.github.com
|
||||
@@ -1,79 +0,0 @@
|
||||
# Podinfo
|
||||
|
||||
Podinfo is a tiny web application made with Go
|
||||
that showcases best practices of running canary deployments with Flagger and Istio.
|
||||
|
||||
## Installing the Chart
|
||||
|
||||
Add Flagger Helm repository:
|
||||
|
||||
```console
|
||||
helm repo add flagger https://flagger.app
|
||||
```
|
||||
|
||||
To install the chart with the release name `frontend`:
|
||||
|
||||
```console
|
||||
helm upgrade -i frontend flagger/podinfo \
|
||||
--namespace test \
|
||||
--set nameOverride=frontend \
|
||||
--set backend=http://backend.test:9898/echo \
|
||||
--set canary.enabled=true \
|
||||
--set canary.istioIngress.enabled=true \
|
||||
--set canary.istioIngress.gateway=istio-system/public-gateway \
|
||||
--set canary.istioIngress.host=frontend.istio.example.com
|
||||
```
|
||||
|
||||
To install the chart as `backend`:
|
||||
|
||||
```console
|
||||
helm upgrade -i backend flagger/podinfo \
|
||||
--namespace test \
|
||||
--set nameOverride=backend \
|
||||
--set canary.enabled=true
|
||||
```
|
||||
|
||||
## Uninstalling the Chart
|
||||
|
||||
To uninstall/delete the `frontend` deployment:
|
||||
|
||||
```console
|
||||
$ helm delete --purge frontend
|
||||
```
|
||||
|
||||
The command removes all the Kubernetes components associated with the chart and deletes the release.
|
||||
|
||||
## Configuration
|
||||
|
||||
The following tables lists the configurable parameters of the podinfo chart and their default values.
|
||||
|
||||
Parameter | Description | Default
|
||||
--- | --- | ---
|
||||
`image.repository` | image repository | `quay.io/stefanprodan/podinfo`
|
||||
`image.tag` | image tag | `<VERSION>`
|
||||
`image.pullPolicy` | image pull policy | `IfNotPresent`
|
||||
`hpa.enabled` | enables HPA | `true`
|
||||
`hpa.cpu` | target CPU usage per pod | `80`
|
||||
`hpa.memory` | target memory usage per pod | `512Mi`
|
||||
`hpa.minReplicas` | maximum pod replicas | `2`
|
||||
`hpa.maxReplicas` | maximum pod replicas | `4`
|
||||
`resources.requests/cpu` | pod CPU request | `1m`
|
||||
`resources.requests/memory` | pod memory request | `16Mi`
|
||||
`backend` | backend URL | None
|
||||
`faults.delay` | random HTTP response delays between 0 and 5 seconds | `false`
|
||||
`faults.error` | 1/3 chances of a random HTTP response error | `false`
|
||||
|
||||
Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example,
|
||||
|
||||
```console
|
||||
$ helm install flagger/podinfo --name frontend \
|
||||
--set=image.tag=1.4.1,hpa.enabled=false
|
||||
```
|
||||
|
||||
Alternatively, a YAML file that specifies the values for the above parameters can be provided while installing the chart. For example,
|
||||
|
||||
```console
|
||||
$ helm install flagger/podinfo --name frontend -f values.yaml
|
||||
```
|
||||
|
||||
|
||||
@@ -1 +0,0 @@
|
||||
podinfo {{ .Release.Name }} deployed!
|
||||
@@ -1,43 +0,0 @@
|
||||
{{/* vim: set filetype=mustache: */}}
|
||||
{{/*
|
||||
Expand the name of the chart.
|
||||
*/}}
|
||||
{{- define "podinfo.name" -}}
|
||||
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Create a default fully qualified app name.
|
||||
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
|
||||
If release name contains chart name it will be used as a full name.
|
||||
*/}}
|
||||
{{- define "podinfo.fullname" -}}
|
||||
{{- if .Values.fullnameOverride -}}
|
||||
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
|
||||
{{- else -}}
|
||||
{{- $name := default .Chart.Name .Values.nameOverride -}}
|
||||
{{- if contains $name .Release.Name -}}
|
||||
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
|
||||
{{- else -}}
|
||||
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Create chart name and version as used by the chart label.
|
||||
*/}}
|
||||
{{- define "podinfo.chart" -}}
|
||||
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Create chart name suffix.
|
||||
*/}}
|
||||
{{- define "podinfo.suffix" -}}
|
||||
{{- if .Values.canary.enabled -}}
|
||||
{{- "-primary" -}}
|
||||
{{- else -}}
|
||||
{{- "" -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
@@ -1,60 +0,0 @@
|
||||
{{- if .Values.canary.enabled }}
|
||||
apiVersion: flagger.app/v1beta1
|
||||
kind: Canary
|
||||
metadata:
|
||||
name: {{ template "podinfo.fullname" . }}
|
||||
labels:
|
||||
app: {{ template "podinfo.name" . }}
|
||||
chart: {{ template "podinfo.chart" . }}
|
||||
release: {{ .Release.Name }}
|
||||
heritage: {{ .Release.Service }}
|
||||
spec:
|
||||
targetRef:
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
name: {{ template "podinfo.fullname" . }}
|
||||
autoscalerRef:
|
||||
apiVersion: autoscaling/v2
|
||||
kind: HorizontalPodAutoscaler
|
||||
name: {{ template "podinfo.fullname" . }}
|
||||
service:
|
||||
port: {{ .Values.service.port }}
|
||||
{{- if .Values.canary.istioIngress.enabled }}
|
||||
gateways:
|
||||
- {{ .Values.canary.istioIngress.gateway }}
|
||||
hosts:
|
||||
- {{ .Values.canary.istioIngress.host }}
|
||||
{{- end }}
|
||||
trafficPolicy:
|
||||
tls:
|
||||
mode: {{ .Values.canary.istioTLS }}
|
||||
analysis:
|
||||
interval: {{ .Values.canary.analysis.interval }}
|
||||
threshold: {{ .Values.canary.analysis.threshold }}
|
||||
maxWeight: {{ .Values.canary.analysis.maxWeight }}
|
||||
stepWeight: {{ .Values.canary.analysis.stepWeight }}
|
||||
metrics:
|
||||
- name: request-success-rate
|
||||
threshold: {{ .Values.canary.thresholds.successRate }}
|
||||
interval: 1m
|
||||
- name: request-duration
|
||||
threshold: {{ .Values.canary.thresholds.latency }}
|
||||
interval: 1m
|
||||
webhooks:
|
||||
{{- if .Values.canary.helmtest.enabled }}
|
||||
- name: "helm test"
|
||||
type: pre-rollout
|
||||
url: {{ .Values.canary.helmtest.url }}
|
||||
timeout: 3m
|
||||
metadata:
|
||||
type: "helmv3"
|
||||
cmd: "test {{ .Release.Name }} -n {{ .Release.Namespace }}"
|
||||
{{- end }}
|
||||
{{- if .Values.canary.loadtest.enabled }}
|
||||
- name: load-test-get
|
||||
url: {{ .Values.canary.loadtest.url }}
|
||||
timeout: 5s
|
||||
metadata:
|
||||
cmd: "hey -z 1m -q 5 -c 2 http://{{ template "podinfo.fullname" . }}.{{ .Release.Namespace }}:{{ .Values.service.port }}"
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
@@ -1,15 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: {{ template "podinfo.fullname" . }}
|
||||
labels:
|
||||
app: {{ template "podinfo.name" . }}
|
||||
chart: {{ template "podinfo.chart" . }}
|
||||
release: {{ .Release.Name }}
|
||||
heritage: {{ .Release.Service }}
|
||||
data:
|
||||
config.yaml: |-
|
||||
# http settings
|
||||
http-client-timeout: 1m
|
||||
http-server-timeout: {{ .Values.httpServer.timeout }}
|
||||
http-server-shutdown-timeout: 5s
|
||||
@@ -1,101 +0,0 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: {{ template "podinfo.fullname" . }}
|
||||
labels:
|
||||
app: {{ template "podinfo.name" . }}
|
||||
chart: {{ template "podinfo.chart" . }}
|
||||
release: {{ .Release.Name }}
|
||||
heritage: {{ .Release.Service }}
|
||||
spec:
|
||||
strategy:
|
||||
type: RollingUpdate
|
||||
rollingUpdate:
|
||||
maxUnavailable: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: {{ template "podinfo.fullname" . }}
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: {{ template "podinfo.fullname" . }}
|
||||
annotations:
|
||||
prometheus.io/scrape: 'true'
|
||||
{{- if .Values.podAnnotations }}
|
||||
{{ toYaml .Values.podAnnotations | indent 8 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
terminationGracePeriodSeconds: 30
|
||||
containers:
|
||||
- name: podinfo
|
||||
image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
|
||||
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
||||
command:
|
||||
- ./podinfo
|
||||
- --port={{ .Values.service.port }}
|
||||
- --level={{ .Values.logLevel }}
|
||||
- --random-delay={{ .Values.faults.delay }}
|
||||
- --random-error={{ .Values.faults.error }}
|
||||
- --config-path=/podinfo/config
|
||||
{{- range .Values.backends }}
|
||||
- --backend-url={{ . }}
|
||||
{{- end }}
|
||||
env:
|
||||
- name: PODINFO_UI_COLOR
|
||||
value: "#34577c"
|
||||
{{- if .Values.message }}
|
||||
- name: PODINFO_UI_MESSAGE
|
||||
value: {{ .Values.message }}
|
||||
{{- end }}
|
||||
{{- if .Values.backend }}
|
||||
- name: PODINFO_BACKEND_URL
|
||||
value: {{ .Values.backend }}
|
||||
{{- end }}
|
||||
ports:
|
||||
- name: http
|
||||
containerPort: {{ .Values.service.port }}
|
||||
protocol: TCP
|
||||
livenessProbe:
|
||||
exec:
|
||||
command:
|
||||
- podcli
|
||||
- check
|
||||
- http
|
||||
- localhost:{{ .Values.service.port }}/healthz
|
||||
initialDelaySeconds: 5
|
||||
timeoutSeconds: 5
|
||||
readinessProbe:
|
||||
exec:
|
||||
command:
|
||||
- podcli
|
||||
- check
|
||||
- http
|
||||
- localhost:{{ .Values.service.port }}/readyz
|
||||
initialDelaySeconds: 5
|
||||
timeoutSeconds: 5
|
||||
volumeMounts:
|
||||
- name: data
|
||||
mountPath: /data
|
||||
- name: config
|
||||
mountPath: /podinfo/config
|
||||
readOnly: true
|
||||
resources:
|
||||
{{ toYaml .Values.resources | indent 12 }}
|
||||
{{- with .Values.nodeSelector }}
|
||||
nodeSelector:
|
||||
{{ toYaml . | indent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.affinity }}
|
||||
affinity:
|
||||
{{ toYaml . | indent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.tolerations }}
|
||||
tolerations:
|
||||
{{ toYaml . | indent 8 }}
|
||||
{{- end }}
|
||||
volumes:
|
||||
- name: data
|
||||
emptyDir: {}
|
||||
- name: config
|
||||
configMap:
|
||||
name: {{ template "podinfo.fullname" . }}
|
||||
@@ -1,31 +0,0 @@
|
||||
{{- if .Values.hpa.enabled -}}
|
||||
apiVersion: autoscaling/v2
|
||||
kind: HorizontalPodAutoscaler
|
||||
metadata:
|
||||
name: {{ template "podinfo.fullname" . }}
|
||||
labels:
|
||||
app: {{ template "podinfo.name" . }}
|
||||
chart: {{ template "podinfo.chart" . }}
|
||||
release: {{ .Release.Name }}
|
||||
heritage: {{ .Release.Service }}
|
||||
spec:
|
||||
scaleTargetRef:
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
name: {{ template "podinfo.fullname" . }}
|
||||
minReplicas: {{ .Values.hpa.minReplicas }}
|
||||
maxReplicas: {{ .Values.hpa.maxReplicas }}
|
||||
metrics:
|
||||
{{- if .Values.hpa.cpu }}
|
||||
- type: Resource
|
||||
resource:
|
||||
name: cpu
|
||||
targetAverageUtilization: {{ .Values.hpa.cpu }}
|
||||
{{- end }}
|
||||
{{- if .Values.hpa.memory }}
|
||||
- type: Resource
|
||||
resource:
|
||||
name: memory
|
||||
targetAverageValue: {{ .Values.hpa.memory }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
@@ -1,20 +0,0 @@
|
||||
{{- if and .Values.service.enabled (not .Values.canary.enabled) }}
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: {{ template "podinfo.fullname" . }}
|
||||
labels:
|
||||
app: {{ template "podinfo.name" . }}
|
||||
chart: {{ template "podinfo.chart" . }}
|
||||
release: {{ .Release.Name }}
|
||||
heritage: {{ .Release.Service }}
|
||||
spec:
|
||||
type: {{ .Values.service.type }}
|
||||
ports:
|
||||
- port: {{ .Values.service.port }}
|
||||
targetPort: http
|
||||
protocol: TCP
|
||||
name: http
|
||||
selector:
|
||||
app: {{ template "podinfo.fullname" . }}
|
||||
{{- end }}
|
||||
@@ -1,30 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: {{ template "podinfo.fullname" . }}-jwt-test-{{ randAlphaNum 5 | lower }}
|
||||
labels:
|
||||
heritage: {{ .Release.Service }}
|
||||
release: {{ .Release.Name }}
|
||||
chart: {{ .Chart.Name }}-{{ .Chart.Version }}
|
||||
app: {{ template "podinfo.name" . }}
|
||||
annotations:
|
||||
"helm.sh/hook": test-success
|
||||
sidecar.istio.io/inject: "false"
|
||||
linkerd.io/inject: disabled
|
||||
appmesh.k8s.aws/sidecarInjectorWebhook: disabled
|
||||
openservicemesh.io/sidecar-injection: disabled
|
||||
spec:
|
||||
containers:
|
||||
- name: tools
|
||||
image: giantswarm/tiny-tools
|
||||
command:
|
||||
- sh
|
||||
- -c
|
||||
- |
|
||||
TOKEN=$(curl -sd 'test' ${PODINFO_SVC}/token | jq -r .token) &&
|
||||
curl -H "Authorization: Bearer ${TOKEN}" ${PODINFO_SVC}/token/validate | grep test
|
||||
env:
|
||||
- name: PODINFO_SVC
|
||||
value: {{ template "podinfo.fullname" . }}:{{ .Values.service.port }}
|
||||
restartPolicy: Never
|
||||
|
||||
@@ -1,83 +0,0 @@
|
||||
# Default values for podinfo.
|
||||
image:
|
||||
repository: ghcr.io/stefanprodan/podinfo
|
||||
tag: 6.1.3
|
||||
pullPolicy: IfNotPresent
|
||||
|
||||
podAnnotations: {}
|
||||
|
||||
service:
|
||||
enabled: false
|
||||
type: ClusterIP
|
||||
port: 9898
|
||||
|
||||
hpa:
|
||||
enabled: true
|
||||
minReplicas: 2
|
||||
maxReplicas: 4
|
||||
cpu: 80
|
||||
memory: 512Mi
|
||||
|
||||
canary:
|
||||
enabled: false
|
||||
# Istio traffic policy tls can be DISABLE or ISTIO_MUTUAL
|
||||
istioTLS: DISABLE
|
||||
istioIngress:
|
||||
enabled: false
|
||||
# Istio ingress gateway name
|
||||
gateway: istio-system/public-gateway
|
||||
# external host name eg. podinfo.example.com
|
||||
host:
|
||||
analysis:
|
||||
# schedule interval (default 60s)
|
||||
interval: 15s
|
||||
# max number of failed metric checks before rollback
|
||||
threshold: 10
|
||||
# max traffic percentage routed to canary
|
||||
# percentage (0-100)
|
||||
maxWeight: 50
|
||||
# canary increment step
|
||||
# percentage (0-100)
|
||||
stepWeight: 5
|
||||
thresholds:
|
||||
# minimum req success rate (non 5xx responses)
|
||||
# percentage (0-100)
|
||||
successRate: 99
|
||||
# maximum req duration P99
|
||||
# milliseconds
|
||||
latency: 500
|
||||
loadtest:
|
||||
enabled: false
|
||||
# load tester address
|
||||
url: http://flagger-loadtester.test/
|
||||
helmtest:
|
||||
enabled: false
|
||||
# helm tester address
|
||||
url: http://flagger-helmtester.kube-system/
|
||||
|
||||
resources:
|
||||
limits:
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 32Mi
|
||||
|
||||
nodeSelector: {}
|
||||
|
||||
tolerations: []
|
||||
|
||||
affinity: {}
|
||||
|
||||
nameOverride: ""
|
||||
fullnameOverride: ""
|
||||
|
||||
logLevel: info
|
||||
backend: #http://backend-podinfo:9898/echo
|
||||
backends: []
|
||||
message: #UI greetings
|
||||
|
||||
faults:
|
||||
delay: false
|
||||
error: false
|
||||
|
||||
httpServer:
|
||||
timeout: 30s
|
||||
@@ -1,442 +0,0 @@
|
||||
/*
|
||||
Copyright 2020 The Flux authors
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"flag"
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/Masterminds/semver/v3"
|
||||
"github.com/go-logr/zapr"
|
||||
"go.uber.org/zap"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
_ "k8s.io/client-go/plugin/pkg/client/auth/gcp"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
"k8s.io/client-go/tools/leaderelection"
|
||||
"k8s.io/client-go/tools/leaderelection/resourcelock"
|
||||
"k8s.io/client-go/transport"
|
||||
_ "k8s.io/code-generator/cmd/client-gen/generators"
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
"github.com/fluxcd/flagger/pkg/canary"
|
||||
clientset "github.com/fluxcd/flagger/pkg/client/clientset/versioned"
|
||||
informers "github.com/fluxcd/flagger/pkg/client/informers/externalversions"
|
||||
"github.com/fluxcd/flagger/pkg/controller"
|
||||
"github.com/fluxcd/flagger/pkg/logger"
|
||||
"github.com/fluxcd/flagger/pkg/metrics/observers"
|
||||
"github.com/fluxcd/flagger/pkg/notifier"
|
||||
"github.com/fluxcd/flagger/pkg/router"
|
||||
"github.com/fluxcd/flagger/pkg/server"
|
||||
"github.com/fluxcd/flagger/pkg/signals"
|
||||
"github.com/fluxcd/flagger/pkg/version"
|
||||
)
|
||||
|
||||
var (
|
||||
masterURL string
|
||||
kubeconfig string
|
||||
kubeconfigQPS int
|
||||
kubeconfigBurst int
|
||||
metricsServer string
|
||||
controlLoopInterval time.Duration
|
||||
logLevel string
|
||||
port string
|
||||
msteamsURL string
|
||||
msteamsProxyURL string
|
||||
includeLabelPrefix string
|
||||
slackURL string
|
||||
slackToken string
|
||||
slackProxyURL string
|
||||
slackUser string
|
||||
slackChannel string
|
||||
eventWebhook string
|
||||
threadiness int
|
||||
zapReplaceGlobals bool
|
||||
zapEncoding string
|
||||
namespace string
|
||||
meshProvider string
|
||||
selectorLabels string
|
||||
ingressAnnotationsPrefix string
|
||||
ingressClass string
|
||||
enableLeaderElection bool
|
||||
leaderElectionNamespace string
|
||||
enableConfigTracking bool
|
||||
ver bool
|
||||
kubeconfigServiceMesh string
|
||||
clusterName string
|
||||
noCrossNamespaceRefs bool
|
||||
)
|
||||
|
||||
func init() {
|
||||
flag.StringVar(&kubeconfig, "kubeconfig", "", "Path to a kubeconfig. Only required if out-of-cluster.")
|
||||
flag.IntVar(&kubeconfigQPS, "kubeconfig-qps", 100, "Set QPS for kubeconfig.")
|
||||
flag.IntVar(&kubeconfigBurst, "kubeconfig-burst", 250, "Set Burst for kubeconfig.")
|
||||
flag.StringVar(&masterURL, "master", "", "The address of the Kubernetes API server. Overrides any value in kubeconfig. Only required if out-of-cluster.")
|
||||
flag.StringVar(&metricsServer, "metrics-server", "http://prometheus:9090", "Prometheus URL.")
|
||||
flag.DurationVar(&controlLoopInterval, "control-loop-interval", 10*time.Second, "Kubernetes API sync interval.")
|
||||
flag.StringVar(&logLevel, "log-level", "debug", "Log level can be: debug, info, warning, error.")
|
||||
flag.StringVar(&port, "port", "8080", "Port to listen on.")
|
||||
flag.StringVar(&slackURL, "slack-url", "", "Slack hook URL.")
|
||||
flag.StringVar(&slackToken, "slack-token", "", "Slack bot token.")
|
||||
flag.StringVar(&slackProxyURL, "slack-proxy-url", "", "Slack proxy URL.")
|
||||
flag.StringVar(&slackUser, "slack-user", "flagger", "Slack user name.")
|
||||
flag.StringVar(&slackChannel, "slack-channel", "", "Slack channel.")
|
||||
flag.StringVar(&eventWebhook, "event-webhook", "", "Webhook for publishing flagger events")
|
||||
flag.StringVar(&msteamsURL, "msteams-url", "", "MS Teams incoming webhook URL.")
|
||||
flag.StringVar(&msteamsProxyURL, "msteams-proxy-url", "", "MS Teams proxy URL.")
|
||||
flag.StringVar(&includeLabelPrefix, "include-label-prefix", "", "List of prefixes of labels that are copied when creating primary deployments or daemonsets. Use * to include all.")
|
||||
flag.IntVar(&threadiness, "threadiness", 2, "Worker concurrency.")
|
||||
flag.BoolVar(&zapReplaceGlobals, "zap-replace-globals", false, "Whether to change the logging level of the global zap logger.")
|
||||
flag.StringVar(&zapEncoding, "zap-encoding", "json", "Zap logger encoding.")
|
||||
flag.StringVar(&namespace, "namespace", "", "Namespace that flagger would watch canary object.")
|
||||
flag.StringVar(&meshProvider, "mesh-provider", "istio", "Service mesh provider, can be istio, linkerd, appmesh, contour, gloo, nginx, skipper, traefik, apisix, osm or kuma.")
|
||||
flag.StringVar(&selectorLabels, "selector-labels", "app,name,app.kubernetes.io/name", "List of pod labels that Flagger uses to create pod selectors.")
|
||||
flag.StringVar(&ingressAnnotationsPrefix, "ingress-annotations-prefix", "nginx.ingress.kubernetes.io", "Annotations prefix for NGINX ingresses.")
|
||||
flag.StringVar(&ingressClass, "ingress-class", "", "Ingress class used for annotating HTTPProxy objects.")
|
||||
flag.BoolVar(&enableLeaderElection, "enable-leader-election", false, "Enable leader election.")
|
||||
flag.StringVar(&leaderElectionNamespace, "leader-election-namespace", "kube-system", "Namespace used to create the leader election config map.")
|
||||
flag.BoolVar(&enableConfigTracking, "enable-config-tracking", true, "Enable secrets and configmaps tracking.")
|
||||
flag.BoolVar(&ver, "version", false, "Print version")
|
||||
flag.StringVar(&kubeconfigServiceMesh, "kubeconfig-service-mesh", "", "Path to a kubeconfig for the service mesh control plane cluster.")
|
||||
flag.StringVar(&clusterName, "cluster-name", "", "Cluster name to be included in alert msgs.")
|
||||
flag.BoolVar(&noCrossNamespaceRefs, "no-cross-namespace-refs", false, "When set to true, Flagger can only refer to resources in the same namespace.")
|
||||
}
|
||||
|
||||
func main() {
|
||||
klog.InitFlags(nil)
|
||||
flag.Parse()
|
||||
|
||||
if ver {
|
||||
fmt.Println("Flagger version", version.VERSION, "revision ", version.REVISION)
|
||||
os.Exit(0)
|
||||
}
|
||||
|
||||
logger, err := logger.NewLoggerWithEncoding(logLevel, zapEncoding)
|
||||
if err != nil {
|
||||
log.Fatalf("Error creating logger: %v", err)
|
||||
}
|
||||
if zapReplaceGlobals {
|
||||
zap.ReplaceGlobals(logger.Desugar())
|
||||
}
|
||||
|
||||
klog.SetLogger(zapr.NewLogger(logger.Desugar()))
|
||||
|
||||
defer logger.Sync()
|
||||
|
||||
stopCh := signals.SetupSignalHandler()
|
||||
|
||||
logger.Infof("Starting flagger version %s revision %s mesh provider %s", version.VERSION, version.REVISION, meshProvider)
|
||||
|
||||
cfg, err := clientcmd.BuildConfigFromFlags(masterURL, kubeconfig)
|
||||
if err != nil {
|
||||
logger.Fatalf("Error building kubeconfig: %v", err)
|
||||
}
|
||||
|
||||
cfg.QPS = float32(kubeconfigQPS)
|
||||
cfg.Burst = kubeconfigBurst
|
||||
|
||||
kubeClient, err := kubernetes.NewForConfig(cfg)
|
||||
if err != nil {
|
||||
logger.Fatalf("Error building kubernetes clientset: %v", err)
|
||||
}
|
||||
|
||||
flaggerClient, err := clientset.NewForConfig(cfg)
|
||||
if err != nil {
|
||||
logger.Fatalf("Error building flagger clientset: %s", err.Error())
|
||||
}
|
||||
|
||||
// use a remote cluster for routing if a service mesh kubeconfig is specified
|
||||
if kubeconfigServiceMesh == "" {
|
||||
kubeconfigServiceMesh = kubeconfig
|
||||
}
|
||||
serviceMeshCfg, err := clientcmd.BuildConfigFromFlags(masterURL, kubeconfigServiceMesh)
|
||||
if err != nil {
|
||||
logger.Fatalf("Error building host kubeconfig: %v", err)
|
||||
}
|
||||
|
||||
serviceMeshCfg.QPS = float32(kubeconfigQPS)
|
||||
serviceMeshCfg.Burst = kubeconfigBurst
|
||||
|
||||
meshClient, err := clientset.NewForConfig(serviceMeshCfg)
|
||||
if err != nil {
|
||||
logger.Fatalf("Error building mesh clientset: %v", err)
|
||||
}
|
||||
|
||||
verifyCRDs(flaggerClient, logger)
|
||||
verifyKubernetesVersion(kubeClient, logger)
|
||||
infos := startInformers(flaggerClient, logger, stopCh)
|
||||
|
||||
labels := strings.Split(selectorLabels, ",")
|
||||
if len(labels) < 1 {
|
||||
logger.Fatalf("At least one selector label is required")
|
||||
}
|
||||
|
||||
if namespace != "" {
|
||||
logger.Infof("Watching namespace %s", namespace)
|
||||
}
|
||||
|
||||
observerFactory, err := observers.NewFactory(metricsServer)
|
||||
if err != nil {
|
||||
logger.Fatalf("Error building prometheus client: %s", err.Error())
|
||||
}
|
||||
|
||||
ok, err := observerFactory.Client.IsOnline()
|
||||
if ok {
|
||||
logger.Infof("Connected to metrics server %s", metricsServer)
|
||||
} else {
|
||||
logger.Errorf("Metrics server %s unreachable %v", metricsServer, err)
|
||||
}
|
||||
|
||||
// setup Slack or MS Teams notifications
|
||||
notifierClient := initNotifier(logger)
|
||||
|
||||
// start HTTP server
|
||||
go server.ListenAndServe(port, 3*time.Second, logger, stopCh)
|
||||
|
||||
setOwnerRefs := true
|
||||
// Router shouldn't set OwnerRefs on resources that they create since the
|
||||
// service mesh/ingress controller is in a different cluster.
|
||||
if cfg.Host != serviceMeshCfg.Host {
|
||||
setOwnerRefs = false
|
||||
}
|
||||
|
||||
routerFactory := router.NewFactory(cfg, kubeClient, flaggerClient, ingressAnnotationsPrefix, ingressClass, logger, meshClient, setOwnerRefs)
|
||||
|
||||
var configTracker canary.Tracker
|
||||
if enableConfigTracking {
|
||||
configTracker = &canary.ConfigTracker{
|
||||
Logger: logger,
|
||||
KubeClient: kubeClient,
|
||||
FlaggerClient: flaggerClient,
|
||||
}
|
||||
} else {
|
||||
configTracker = &canary.NopTracker{}
|
||||
}
|
||||
|
||||
includeLabelPrefixArray := strings.Split(includeLabelPrefix, ",")
|
||||
|
||||
canaryFactory := canary.NewFactory(kubeClient, flaggerClient, configTracker, labels, includeLabelPrefixArray, logger)
|
||||
|
||||
c := controller.NewController(
|
||||
kubeClient,
|
||||
flaggerClient,
|
||||
infos,
|
||||
controlLoopInterval,
|
||||
logger,
|
||||
notifierClient,
|
||||
canaryFactory,
|
||||
routerFactory,
|
||||
observerFactory,
|
||||
meshProvider,
|
||||
version.VERSION,
|
||||
fromEnv("EVENT_WEBHOOK_URL", eventWebhook),
|
||||
clusterName,
|
||||
noCrossNamespaceRefs,
|
||||
cfg,
|
||||
)
|
||||
|
||||
// leader election context
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
// prevents new requests when leadership is lost
|
||||
cfg.Wrap(transport.ContextCanceller(ctx, fmt.Errorf("the leader is shutting down")))
|
||||
|
||||
// cancel leader election context on shutdown signals
|
||||
go func() {
|
||||
<-stopCh
|
||||
cancel()
|
||||
}()
|
||||
|
||||
// wrap controller run
|
||||
runController := func() {
|
||||
if err := c.Run(threadiness, stopCh); err != nil {
|
||||
logger.Fatalf("Error running controller: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// run controller when this instance wins the leader election
|
||||
if enableLeaderElection {
|
||||
ns := leaderElectionNamespace
|
||||
if namespace != "" {
|
||||
ns = namespace
|
||||
}
|
||||
startLeaderElection(ctx, runController, ns, kubeClient, logger)
|
||||
} else {
|
||||
runController()
|
||||
}
|
||||
}
|
||||
|
||||
func startInformers(flaggerClient clientset.Interface, logger *zap.SugaredLogger, stopCh <-chan struct{}) controller.Informers {
|
||||
flaggerInformerFactory := informers.NewSharedInformerFactoryWithOptions(flaggerClient, time.Second*30, informers.WithNamespace(namespace))
|
||||
|
||||
logger.Info("Waiting for canary informer cache to sync")
|
||||
canaryInformer := flaggerInformerFactory.Flagger().V1beta1().Canaries()
|
||||
go canaryInformer.Informer().Run(stopCh)
|
||||
if ok := cache.WaitForNamedCacheSync("flagger", stopCh, canaryInformer.Informer().HasSynced); !ok {
|
||||
logger.Fatalf("failed to wait for cache to sync")
|
||||
}
|
||||
|
||||
logger.Info("Waiting for metric template informer cache to sync")
|
||||
metricInformer := flaggerInformerFactory.Flagger().V1beta1().MetricTemplates()
|
||||
go metricInformer.Informer().Run(stopCh)
|
||||
if ok := cache.WaitForNamedCacheSync("flagger", stopCh, metricInformer.Informer().HasSynced); !ok {
|
||||
logger.Fatalf("failed to wait for cache to sync")
|
||||
}
|
||||
|
||||
logger.Info("Waiting for alert provider informer cache to sync")
|
||||
alertInformer := flaggerInformerFactory.Flagger().V1beta1().AlertProviders()
|
||||
go alertInformer.Informer().Run(stopCh)
|
||||
if ok := cache.WaitForNamedCacheSync("flagger", stopCh, alertInformer.Informer().HasSynced); !ok {
|
||||
logger.Fatalf("failed to wait for cache to sync")
|
||||
}
|
||||
|
||||
return controller.Informers{
|
||||
CanaryInformer: canaryInformer,
|
||||
MetricInformer: metricInformer,
|
||||
AlertInformer: alertInformer,
|
||||
}
|
||||
}
|
||||
|
||||
func startLeaderElection(ctx context.Context, run func(), ns string, kubeClient kubernetes.Interface, logger *zap.SugaredLogger) {
|
||||
configMapName := "flagger-leader-election"
|
||||
id, err := os.Hostname()
|
||||
if err != nil {
|
||||
logger.Fatalf("Error running controller: %v", err)
|
||||
}
|
||||
id = id + "_" + string(uuid.NewUUID())
|
||||
|
||||
lock, err := resourcelock.New(
|
||||
resourcelock.LeasesResourceLock,
|
||||
ns,
|
||||
configMapName,
|
||||
kubeClient.CoreV1(),
|
||||
kubeClient.CoordinationV1(),
|
||||
resourcelock.ResourceLockConfig{
|
||||
Identity: id,
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
logger.Fatalf("Error running controller: %v", err)
|
||||
}
|
||||
|
||||
logger.Infof("Starting leader election id: %s configmap: %s namespace: %s", id, configMapName, ns)
|
||||
leaderelection.RunOrDie(ctx, leaderelection.LeaderElectionConfig{
|
||||
Lock: lock,
|
||||
ReleaseOnCancel: true,
|
||||
LeaseDuration: 60 * time.Second,
|
||||
RenewDeadline: 15 * time.Second,
|
||||
RetryPeriod: 5 * time.Second,
|
||||
Callbacks: leaderelection.LeaderCallbacks{
|
||||
OnStartedLeading: func(ctx context.Context) {
|
||||
logger.Info("Acting as elected leader")
|
||||
run()
|
||||
},
|
||||
OnStoppedLeading: func() {
|
||||
logger.Infof("Leadership lost")
|
||||
os.Exit(1)
|
||||
},
|
||||
OnNewLeader: func(identity string) {
|
||||
if identity != id {
|
||||
logger.Infof("Another instance has been elected as leader: %v", identity)
|
||||
}
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func initNotifier(logger *zap.SugaredLogger) (client notifier.Interface) {
|
||||
provider := "slack"
|
||||
token := fromEnv("SLACK_TOKEN", slackToken)
|
||||
notifierURL := fromEnv("SLACK_URL", slackURL)
|
||||
notifierProxyURL := fromEnv("SLACK_PROXY_URL", slackProxyURL)
|
||||
if msteamsURL != "" || os.Getenv("MSTEAMS_URL") != "" {
|
||||
provider = "msteams"
|
||||
notifierURL = fromEnv("MSTEAMS_URL", msteamsURL)
|
||||
notifierProxyURL = fromEnv("MSTEAMS_PROXY_URL", msteamsProxyURL)
|
||||
}
|
||||
notifierFactory := notifier.NewFactory(notifierURL, token, notifierProxyURL, slackUser, slackChannel)
|
||||
|
||||
var err error
|
||||
client, err = notifierFactory.Notifier(provider)
|
||||
if err != nil {
|
||||
logger.Errorf("Notifier %v", err)
|
||||
} else if len(notifierURL) > 30 {
|
||||
logger.Infof("Notifications enabled for %s", notifierURL[0:30])
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func fromEnv(envVar string, defaultVal string) string {
|
||||
if v := os.Getenv(envVar); v != "" {
|
||||
return v
|
||||
}
|
||||
return defaultVal
|
||||
}
|
||||
|
||||
func verifyCRDs(flaggerClient clientset.Interface, logger *zap.SugaredLogger) {
|
||||
_, err := flaggerClient.FlaggerV1beta1().Canaries(namespace).List(context.TODO(), metav1.ListOptions{Limit: 1})
|
||||
if err != nil {
|
||||
logger.Fatalf("Canary CRD is not registered %v", err)
|
||||
}
|
||||
|
||||
_, err = flaggerClient.FlaggerV1beta1().MetricTemplates(namespace).List(context.TODO(), metav1.ListOptions{Limit: 1})
|
||||
if err != nil {
|
||||
logger.Fatalf("MetricTemplate CRD is not registered %v", err)
|
||||
}
|
||||
|
||||
_, err = flaggerClient.FlaggerV1beta1().AlertProviders(namespace).List(context.TODO(), metav1.ListOptions{Limit: 1})
|
||||
if err != nil {
|
||||
logger.Fatalf("AlertProvider CRD is not registered %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func verifyKubernetesVersion(kubeClient kubernetes.Interface, logger *zap.SugaredLogger) {
|
||||
ver, err := kubeClient.Discovery().ServerVersion()
|
||||
if err != nil {
|
||||
logger.Fatalf("Error calling Kubernetes API: %v", err)
|
||||
}
|
||||
|
||||
k8sVersionConstraint := "^1.11.0"
|
||||
|
||||
// We append -alpha.1 to the end of our version constraint so that prebuilds of later versions
|
||||
// are considered valid for our purposes, as well as some managed solutions like EKS where they provide
|
||||
// a version like `v1.12.6-eks-d69f1b`. It doesn't matter what the prelease value is here, just that it
|
||||
// exists in our constraint.
|
||||
semverConstraint, err := semver.NewConstraint(k8sVersionConstraint + "-alpha.1")
|
||||
if err != nil {
|
||||
logger.Fatalf("Error parsing kubernetes version constraint: %v", err)
|
||||
}
|
||||
|
||||
k8sSemver, err := semver.NewVersion(ver.GitVersion)
|
||||
if err != nil {
|
||||
logger.Fatalf("Error parsing kubernetes version as a semantic version: %v", err)
|
||||
}
|
||||
|
||||
if !semverConstraint.Check(k8sSemver) {
|
||||
logger.Fatalf("Unsupported version of kubernetes detected. Expected %s, got %v", k8sVersionConstraint, ver)
|
||||
}
|
||||
|
||||
logger.Infof("Connected to Kubernetes API %s", ver)
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user