mirror of
https://github.com/fluxcd/flagger.git
synced 2026-04-15 06:57:34 +00:00
Compare commits
259 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
210e21176b | ||
|
|
0a0c3835d6 | ||
|
|
531893b279 | ||
|
|
e6bb47f920 | ||
|
|
307813a628 | ||
|
|
38fc6b567f | ||
|
|
17015b23bf | ||
|
|
c9e53dd069 | ||
|
|
e26a10b481 | ||
|
|
281d869f54 | ||
|
|
91126d102d | ||
|
|
ba4646cddb | ||
|
|
438877674a | ||
|
|
da451a0cf4 | ||
|
|
5e1d00d4d2 | ||
|
|
00d54d268c | ||
|
|
174e9fdc93 | ||
|
|
f7fd6cce8c | ||
|
|
5dc336d609 | ||
|
|
ae6a683f23 | ||
|
|
5acf189fbe | ||
|
|
090329d0c9 | ||
|
|
96fd359b99 | ||
|
|
519f343fcc | ||
|
|
5d2a7ba9e7 | ||
|
|
1664ca436e | ||
|
|
84ae65c763 | ||
|
|
6085753d84 | ||
|
|
da706be4aa | ||
|
|
65e3bcb1d8 | ||
|
|
582f6eec77 | ||
|
|
4200c0159d | ||
|
|
cf8fe94fca | ||
|
|
30d553c6f3 | ||
|
|
f8f6a994dd | ||
|
|
085639bbde | ||
|
|
3bfa7c974d | ||
|
|
d29e475277 | ||
|
|
b7ba3ab063 | ||
|
|
9796903c78 | ||
|
|
2f25fab560 | ||
|
|
215c859619 | ||
|
|
7071d42152 | ||
|
|
08b1e52278 | ||
|
|
801f801e02 | ||
|
|
af5634962f | ||
|
|
fe7615afb4 | ||
|
|
fc6bedda23 | ||
|
|
a7f997c092 | ||
|
|
121eb767cb | ||
|
|
cd3a1d8478 | ||
|
|
6f6af25467 | ||
|
|
a0f1638f6c | ||
|
|
fc13276f0e | ||
|
|
8a0b92db19 | ||
|
|
2f0d34adb2 | ||
|
|
617f416291 | ||
|
|
7a438ad323 | ||
|
|
5776f0b64b | ||
|
|
96d190a789 | ||
|
|
d2038699c0 | ||
|
|
cb3b5cba90 | ||
|
|
8c881ab758 | ||
|
|
caefaf73aa | ||
|
|
e8d7001f5e | ||
|
|
ae0f20a445 | ||
|
|
4ddc12185f | ||
|
|
e81627a96d | ||
|
|
47be2a25f2 | ||
|
|
6832a4ffde | ||
|
|
bd58a47862 | ||
|
|
613fb92a25 | ||
|
|
250d9f2836 | ||
|
|
0cab25e44c | ||
|
|
cbf6b462e4 | ||
|
|
8695660c58 | ||
|
|
1216990f52 | ||
|
|
204228bc8f | ||
|
|
ebc26e9ea0 | ||
|
|
3c03119d2d | ||
|
|
644049092f | ||
|
|
578f447728 | ||
|
|
3bf926e419 | ||
|
|
48ee4f8bd2 | ||
|
|
b4964a0535 | ||
|
|
47ff00e9b9 | ||
|
|
6ca99a5ddb | ||
|
|
30b5054692 | ||
|
|
edf7b90c11 | ||
|
|
7f0f97d14d | ||
|
|
b03b75cd7e | ||
|
|
f0d2e60a9a | ||
|
|
328f1d9ea2 | ||
|
|
a14013f393 | ||
|
|
aef1d7904d | ||
|
|
dc478188c1 | ||
|
|
9fa6e775c0 | ||
|
|
584350623b | ||
|
|
919959b32c | ||
|
|
ec54eedf93 | ||
|
|
f311797215 | ||
|
|
059b5d0f89 | ||
|
|
7542640494 | ||
|
|
52493f181a | ||
|
|
5d95143536 | ||
|
|
a2c5861ca5 | ||
|
|
fcc07f02b0 | ||
|
|
3f43526aac | ||
|
|
cd07da9137 | ||
|
|
30ab182b2e | ||
|
|
2ddd9587f7 | ||
|
|
50800857b6 | ||
|
|
8f50521435 | ||
|
|
45ecaa9084 | ||
|
|
9c7db58d87 | ||
|
|
6b11e9714b | ||
|
|
7f5a9ed34a | ||
|
|
bc9a231d26 | ||
|
|
0bb3815f73 | ||
|
|
944cc8ef62 | ||
|
|
e97334d7c1 | ||
|
|
2dacf08c30 | ||
|
|
6f6590774e | ||
|
|
fe5bb3fd26 | ||
|
|
c638edd346 | ||
|
|
c02477a245 | ||
|
|
da6da9c839 | ||
|
|
d83293776d | ||
|
|
d5994ac127 | ||
|
|
36584826bb | ||
|
|
7a6fccb70d | ||
|
|
ca1971c085 | ||
|
|
97eaecec48 | ||
|
|
01d47808a7 | ||
|
|
7d2f3dea7a | ||
|
|
bce1d02b3b | ||
|
|
9a993b131d | ||
|
|
636a1d7576 | ||
|
|
fb621ec465 | ||
|
|
00e993c686 | ||
|
|
374a55d8f5 | ||
|
|
1e88e2fa72 | ||
|
|
a2326198f6 | ||
|
|
a0031d626a | ||
|
|
a2b58d59ab | ||
|
|
e8b17406b7 | ||
|
|
5245045d84 | ||
|
|
b57d39369b | ||
|
|
db72fe3d97 | ||
|
|
3a2f688c56 | ||
|
|
13a2a5073f | ||
|
|
418853fd0c | ||
|
|
cfb68a6e56 | ||
|
|
88b13274d7 | ||
|
|
056ba675a7 | ||
|
|
873b74561c | ||
|
|
8b42ce374d | ||
|
|
4871003ff1 | ||
|
|
b4e7ad5575 | ||
|
|
1a246060e2 | ||
|
|
6a3d74c645 | ||
|
|
2073bd2027 | ||
|
|
c63554c534 | ||
|
|
be8ed8a696 | ||
|
|
98530d9968 | ||
|
|
38adc513a6 | ||
|
|
eb12e3bde1 | ||
|
|
8b2839d36e | ||
|
|
f0fa2aa6bb | ||
|
|
33528b073f | ||
|
|
cf8783ea37 | ||
|
|
00355635f8 | ||
|
|
aa485f4bf1 | ||
|
|
273b05fb24 | ||
|
|
e470474d6f | ||
|
|
ddfd2fe2ec | ||
|
|
7533d0ae99 | ||
|
|
04ec7f0388 | ||
|
|
419000cc13 | ||
|
|
0dc8edb437 | ||
|
|
0759b6531b | ||
|
|
d8f984de7d | ||
|
|
82e490a875 | ||
|
|
c6dffd9d3e | ||
|
|
8ee3d5835a | ||
|
|
1209d7e42b | ||
|
|
cdc05ba506 | ||
|
|
a6fae0195f | ||
|
|
11375b6890 | ||
|
|
3811470ebf | ||
|
|
e2b08eb4dc | ||
|
|
38d3ca1022 | ||
|
|
df459c5fe6 | ||
|
|
d1d9c0e2a9 | ||
|
|
c1b1d7d448 | ||
|
|
e6b5ee2042 | ||
|
|
0170fc6166 | ||
|
|
4cc2ada2a2 | ||
|
|
a5d3e4f6a6 | ||
|
|
7c92b33886 | ||
|
|
0f0b9414ae | ||
|
|
6fbb67ee8c | ||
|
|
6634f1a9ae | ||
|
|
8da8138f77 | ||
|
|
588f4c477b | ||
|
|
fda1775d3a | ||
|
|
fc71d53c71 | ||
|
|
ab2a320659 | ||
|
|
7f50f81ac7 | ||
|
|
c36a13ccff | ||
|
|
47de726345 | ||
|
|
7a4fdbddc0 | ||
|
|
0dc6f33550 | ||
|
|
b2436eb0df | ||
|
|
cc673159d7 | ||
|
|
17c310d66d | ||
|
|
e7357c4e07 | ||
|
|
c44de2d7c3 | ||
|
|
d82b2c219a | ||
|
|
35c8957a55 | ||
|
|
8555f8250a | ||
|
|
8137a25b13 | ||
|
|
2db5573c0e | ||
|
|
1e382203b8 | ||
|
|
873903a4cb | ||
|
|
e5b8afc085 | ||
|
|
ded658fed9 | ||
|
|
88d8858900 | ||
|
|
737c185aa6 | ||
|
|
0006a68740 | ||
|
|
4db91f7062 | ||
|
|
b8c23967b7 | ||
|
|
2019d048a4 | ||
|
|
fe0a4eb20c | ||
|
|
a35b0e8639 | ||
|
|
4c0843f92a | ||
|
|
867c1af897 | ||
|
|
100308289f | ||
|
|
3d4739760d | ||
|
|
9f321dd685 | ||
|
|
ba6078f235 | ||
|
|
cd2f1a24bd | ||
|
|
b87a81b798 | ||
|
|
0f9dd61786 | ||
|
|
4869a9f3ae | ||
|
|
cd6f36302d | ||
|
|
e5fdc7a57d | ||
|
|
834a601311 | ||
|
|
a2784c533e | ||
|
|
8e3ee3439c | ||
|
|
f9d40cfe1b | ||
|
|
b26b49fac2 | ||
|
|
f68d647fd0 | ||
|
|
deb3fb01a2 | ||
|
|
3accd23a19 | ||
|
|
6a66113560 | ||
|
|
6a7f7415fa | ||
|
|
4654f2cba9 | ||
|
|
17557dc206 |
50
.cosign/README.md
Normal file
50
.cosign/README.md
Normal file
@@ -0,0 +1,50 @@
|
||||
# Flagger signed releases
|
||||
|
||||
Flagger releases published to GitHub Container Registry as multi-arch container images
|
||||
are signed using [cosign](https://github.com/sigstore/cosign).
|
||||
|
||||
## Verify Flagger images
|
||||
|
||||
Install the [cosign](https://github.com/sigstore/cosign) CLI:
|
||||
|
||||
```sh
|
||||
brew install sigstore/tap/cosign
|
||||
```
|
||||
|
||||
Verify a Flagger release with cosign CLI:
|
||||
|
||||
```sh
|
||||
cosign verify -key https://raw.githubusercontent.com/fluxcd/flagger/main/cosign/cosign.pub \
|
||||
ghcr.io/fluxcd/flagger:1.13.0
|
||||
```
|
||||
|
||||
Verify Flagger images before they get pulled on your Kubernetes clusters with [Kyverno](https://github.com/kyverno/kyverno/):
|
||||
|
||||
```yaml
|
||||
apiVersion: kyverno.io/v1
|
||||
kind: ClusterPolicy
|
||||
metadata:
|
||||
name: verify-flagger-image
|
||||
annotations:
|
||||
policies.kyverno.io/title: Verify Flagger Image
|
||||
policies.kyverno.io/category: Cosign
|
||||
policies.kyverno.io/severity: medium
|
||||
policies.kyverno.io/subject: Pod
|
||||
policies.kyverno.io/minversion: 1.4.2
|
||||
spec:
|
||||
validationFailureAction: enforce
|
||||
background: false
|
||||
rules:
|
||||
- name: verify-image
|
||||
match:
|
||||
resources:
|
||||
kinds:
|
||||
- Pod
|
||||
verifyImages:
|
||||
- image: "ghcr.io/fluxcd/flagger:*"
|
||||
key: |-
|
||||
-----BEGIN PUBLIC KEY-----
|
||||
MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEST+BqQ1XZhhVYx0YWQjdUJYIG5Lt
|
||||
iz2+UxRIqmKBqNmce2T+l45qyqOs99qfD7gLNGmkVZ4vtJ9bM7FxChFczg==
|
||||
-----END PUBLIC KEY-----
|
||||
```
|
||||
11
.cosign/cosign.key
Normal file
11
.cosign/cosign.key
Normal file
@@ -0,0 +1,11 @@
|
||||
-----BEGIN ENCRYPTED COSIGN PRIVATE KEY-----
|
||||
eyJrZGYiOnsibmFtZSI6InNjcnlwdCIsInBhcmFtcyI6eyJOIjozMjc2OCwiciI6
|
||||
OCwicCI6MX0sInNhbHQiOiIvK1MwbTNrU3pGMFFXdVVYQkFoY2gvTDc3NVJBSy9O
|
||||
cnkzUC9iMkxBZGF3PSJ9LCJjaXBoZXIiOnsibmFtZSI6Im5hY2wvc2VjcmV0Ym94
|
||||
Iiwibm9uY2UiOiJBNEFYL2IyU1BsMDBuY3JUNk45QkNOb0VLZTZLZEluRCJ9LCJj
|
||||
aXBoZXJ0ZXh0IjoiZ054UlJweXpraWtRMUVaRldsSnEvQXVUWTl0Vis2enBlWkIy
|
||||
dUFHREMzOVhUQlAwaWY5YStaZTE1V0NTT2FQZ01XQmtSZWhrQVVjQ3dZOGF2WTZa
|
||||
eFhZWWE3T1B4eFdidHJuSUVZM2hwZUk1M1dVQVZ6SXEzQjl0N0ZmV1JlVGsxdFlo
|
||||
b3hwQmxUSHY4U0c2azdPYk1aQnJleitzSGRWclF6YUdMdG12V1FOMTNZazRNb25i
|
||||
ZUpRSUJpUXFQTFg5NzFhSUlxU0dxYVhCanc9PSJ9
|
||||
-----END ENCRYPTED COSIGN PRIVATE KEY-----
|
||||
4
.cosign/cosign.pub
Normal file
4
.cosign/cosign.pub
Normal file
@@ -0,0 +1,4 @@
|
||||
-----BEGIN PUBLIC KEY-----
|
||||
MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEST+BqQ1XZhhVYx0YWQjdUJYIG5Lt
|
||||
iz2+UxRIqmKBqNmce2T+l45qyqOs99qfD7gLNGmkVZ4vtJ9bM7FxChFczg==
|
||||
-----END PUBLIC KEY-----
|
||||
@@ -13,3 +13,6 @@ redirects:
|
||||
usage/skipper-progressive-delivery: tutorials/skipper-progressive-delivery.md
|
||||
usage/crossover-progressive-delivery: tutorials/crossover-progressive-delivery.md
|
||||
usage/traefik-progressive-delivery: tutorials/traefik-progressive-delivery.md
|
||||
usage/osm-progressive-delivery: tutorials/osm-progressive-delivery.md
|
||||
usage/kuma-progressive-delivery: tutorials/kuma-progressive-delivery.md
|
||||
usage/gatewayapi-progressive-delivery: tutorials/gatewayapi-progressive-delivery.md
|
||||
|
||||
5
.github/workflows/build.yaml
vendored
5
.github/workflows/build.yaml
vendored
@@ -9,6 +9,9 @@ on:
|
||||
branches:
|
||||
- main
|
||||
|
||||
permissions:
|
||||
contents: read # for actions/checkout to fetch code
|
||||
|
||||
jobs:
|
||||
container:
|
||||
runs-on: ubuntu-latest
|
||||
@@ -25,7 +28,7 @@ jobs:
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.15.x
|
||||
go-version: 1.17.x
|
||||
- name: Download modules
|
||||
run: |
|
||||
go mod download
|
||||
|
||||
12
.github/workflows/e2e.yaml
vendored
12
.github/workflows/e2e.yaml
vendored
@@ -9,25 +9,37 @@ on:
|
||||
branches:
|
||||
- main
|
||||
|
||||
permissions:
|
||||
contents: read # for actions/checkout to fetch code
|
||||
|
||||
jobs:
|
||||
kind:
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
provider:
|
||||
# service mesh
|
||||
- istio
|
||||
- linkerd
|
||||
- osm
|
||||
- kuma
|
||||
# ingress controllers
|
||||
- contour
|
||||
- nginx
|
||||
- traefik
|
||||
- gloo
|
||||
- skipper
|
||||
- kubernetes
|
||||
- gatewayapi
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
- name: Setup Kubernetes
|
||||
uses: engineerd/setup-kind@v0.5.0
|
||||
with:
|
||||
version: "v0.11.1"
|
||||
image: kindest/node:v1.21.1@sha256:fae9a58f17f18f06aeac9772ca8b5ac680ebbed985e266f711d936e91d113bad
|
||||
- name: Build container image
|
||||
run: |
|
||||
docker build -t test/flagger:latest .
|
||||
|
||||
18
.github/workflows/helm.yaml
vendored
Normal file
18
.github/workflows/helm.yaml
vendored
Normal file
@@ -0,0 +1,18 @@
|
||||
name: helm
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
|
||||
permissions:
|
||||
contents: write # needed to push chart
|
||||
|
||||
jobs:
|
||||
build-push:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: Publish Helm charts
|
||||
uses: stefanprodan/helm-gh-pages@v1.3.0
|
||||
with:
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
charts_url: https://flagger.app
|
||||
56
.github/workflows/push-ld.yml
vendored
Normal file
56
.github/workflows/push-ld.yml
vendored
Normal file
@@ -0,0 +1,56 @@
|
||||
name: push-ld
|
||||
on:
|
||||
workflow_dispatch:
|
||||
|
||||
env:
|
||||
IMAGE: "ghcr.io/fluxcd/flagger-loadtester"
|
||||
|
||||
permissions:
|
||||
contents: write # needed to write releases
|
||||
packages: write # needed for ghcr access
|
||||
|
||||
jobs:
|
||||
build-push:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: Prepare
|
||||
id: prep
|
||||
run: |
|
||||
VERSION=$(grep 'VERSION' cmd/loadtester/main.go | head -1 | awk '{ print $4 }' | tr -d '"')
|
||||
echo ::set-output name=BUILD_DATE::$(date -u +'%Y-%m-%dT%H:%M:%SZ')
|
||||
echo ::set-output name=VERSION::${VERSION}
|
||||
- name: Setup QEMU
|
||||
uses: docker/setup-qemu-action@v1
|
||||
- name: Setup Docker Buildx
|
||||
id: buildx
|
||||
uses: docker/setup-buildx-action@v1
|
||||
- name: Login to GitHub Container Registry
|
||||
uses: docker/login-action@v1
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: fluxcdbot
|
||||
password: ${{ secrets.GHCR_TOKEN }}
|
||||
- name: Generate image meta
|
||||
id: meta
|
||||
uses: docker/metadata-action@v3
|
||||
with:
|
||||
images: |
|
||||
${{ env.IMAGE }}
|
||||
tags: |
|
||||
type=raw,value=${{ steps.prep.outputs.VERSION }}
|
||||
- name: Publish image
|
||||
uses: docker/build-push-action@v2
|
||||
with:
|
||||
push: true
|
||||
builder: ${{ steps.buildx.outputs.name }}
|
||||
context: .
|
||||
file: ./Dockerfile.loadtester
|
||||
platforms: linux/amd64,linux/arm64
|
||||
build-args: |
|
||||
REVISION=${{ github.sha }}
|
||||
tags: ${{ steps.meta.outputs.tags }}
|
||||
labels: ${{ steps.meta.outputs.labels }}
|
||||
- name: Check images
|
||||
run: |
|
||||
docker buildx imagetools inspect ${{ env.IMAGE }}:${{ steps.prep.outputs.VERSION }}
|
||||
61
.github/workflows/release.yml
vendored
61
.github/workflows/release.yml
vendored
@@ -4,34 +4,47 @@ on:
|
||||
tags:
|
||||
- 'v*'
|
||||
|
||||
permissions:
|
||||
contents: write # needed to write releases
|
||||
id-token: write # needed for keyless signing
|
||||
packages: write # needed for ghcr access
|
||||
|
||||
env:
|
||||
IMAGE: "ghcr.io/fluxcd/${{ github.event.repository.name }}"
|
||||
|
||||
jobs:
|
||||
build-push:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: sigstore/cosign-installer@main
|
||||
- name: Prepare
|
||||
id: prep
|
||||
run: |
|
||||
VERSION=$(grep 'VERSION' pkg/version/version.go | awk '{ print $4 }' | tr -d '"')
|
||||
CHANGELOG="https://github.com/fluxcd/flagger/blob/main/CHANGELOG.md#$(echo $VERSION | tr -d '.')"
|
||||
echo "[CHANGELOG](${CHANGELOG})" > notes.md
|
||||
echo ::set-output name=BUILD_DATE::$(date -u +'%Y-%m-%dT%H:%M:%SZ')
|
||||
echo ::set-output name=VERSION::${VERSION}
|
||||
echo ::set-output name=CHANGELOG::${CHANGELOG}
|
||||
- name: Setup QEMU
|
||||
uses: docker/setup-qemu-action@v1
|
||||
with:
|
||||
platforms: all
|
||||
- name: Setup Docker Buildx
|
||||
id: buildx
|
||||
uses: docker/setup-buildx-action@v1
|
||||
with:
|
||||
buildkitd-flags: "--debug"
|
||||
- name: Login to GitHub Container Registry
|
||||
uses: docker/login-action@v1
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: fluxcdbot
|
||||
password: ${{ secrets.GHCR_TOKEN }}
|
||||
- name: Generate image meta
|
||||
id: meta
|
||||
uses: docker/metadata-action@v3
|
||||
with:
|
||||
images: |
|
||||
${{ env.IMAGE }}
|
||||
tags: |
|
||||
type=raw,value=${{ steps.prep.outputs.VERSION }}
|
||||
- name: Publish image
|
||||
uses: docker/build-push-action@v2
|
||||
with:
|
||||
@@ -42,33 +55,31 @@ jobs:
|
||||
platforms: linux/amd64,linux/arm64,linux/arm/v7
|
||||
build-args: |
|
||||
REVISON=${{ github.sha }}
|
||||
tags: |
|
||||
ghcr.io/fluxcd/flagger:${{ steps.prep.outputs.VERSION }}
|
||||
labels: |
|
||||
org.opencontainers.image.title=${{ github.event.repository.name }}
|
||||
org.opencontainers.image.description=${{ github.event.repository.description }}
|
||||
org.opencontainers.image.url=${{ github.event.repository.html_url }}
|
||||
org.opencontainers.image.source=${{ github.event.repository.html_url }}
|
||||
org.opencontainers.image.revision=${{ github.sha }}
|
||||
org.opencontainers.image.version=${{ steps.prep.outputs.VERSION }}
|
||||
org.opencontainers.image.created=${{ steps.prep.outputs.BUILD_DATE }}
|
||||
tags: ${{ steps.meta.outputs.tags }}
|
||||
labels: ${{ steps.meta.outputs.labels }}
|
||||
- name: Sign image
|
||||
run: |
|
||||
echo -n "${{secrets.COSIGN_PASSWORD}}" | \
|
||||
cosign sign -key ./.cosign/cosign.key -a git_sha=$GITHUB_SHA \
|
||||
${{ env.IMAGE }}:${{ steps.prep.outputs.VERSION }}
|
||||
- name: Check images
|
||||
run: |
|
||||
docker buildx imagetools inspect ghcr.io/fluxcd/flagger:${{ steps.prep.outputs.VERSION }}
|
||||
docker buildx imagetools inspect ${{ env.IMAGE }}:${{ steps.prep.outputs.VERSION }}
|
||||
- name: Verifiy image signature
|
||||
run: |
|
||||
cosign verify -key ./.cosign/cosign.pub \
|
||||
${{ env.IMAGE }}:${{ steps.prep.outputs.VERSION }}
|
||||
- name: Publish Helm charts
|
||||
uses: stefanprodan/helm-gh-pages@v1.3.0
|
||||
with:
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
charts_url: https://flagger.app
|
||||
linting: off
|
||||
- name: Create release
|
||||
uses: actions/create-release@latest
|
||||
- uses: anchore/sbom-action/download-syft@v0
|
||||
- name: Create release and SBOM
|
||||
uses: goreleaser/goreleaser-action@v2
|
||||
with:
|
||||
version: latest
|
||||
args: release --release-notes=notes.md --rm-dist --skip-validate
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
with:
|
||||
tag_name: ${{ github.ref }}
|
||||
release_name: ${{ github.ref }}
|
||||
draft: false
|
||||
prerelease: false
|
||||
body: |
|
||||
[CHANGELOG](${{ steps.prep.outputs.CHANGELOG }})
|
||||
|
||||
4
.github/workflows/scan.yml
vendored
4
.github/workflows/scan.yml
vendored
@@ -8,6 +8,10 @@ on:
|
||||
schedule:
|
||||
- cron: '18 10 * * 3'
|
||||
|
||||
permissions:
|
||||
contents: read # for actions/checkout to fetch code
|
||||
security-events: write # for codeQL to write security events
|
||||
|
||||
jobs:
|
||||
fossa:
|
||||
name: FOSSA
|
||||
|
||||
@@ -1,14 +1,17 @@
|
||||
project_name: flagger
|
||||
|
||||
builds:
|
||||
- main: ./cmd/flagger
|
||||
binary: flagger
|
||||
ldflags: -s -w -X github.com/fluxcd/flagger/pkg/version.REVISION={{.Commit}}
|
||||
goos:
|
||||
- linux
|
||||
goarch:
|
||||
- amd64
|
||||
env:
|
||||
- CGO_ENABLED=0
|
||||
archives:
|
||||
- name_template: "{{ .Binary }}_{{ .Version }}_{{ .Os }}_{{ .Arch }}"
|
||||
files:
|
||||
- none*
|
||||
- skip: true
|
||||
|
||||
release:
|
||||
prerelease: auto
|
||||
|
||||
source:
|
||||
enabled: true
|
||||
name_template: "{{ .ProjectName }}_{{ .Version }}_source_code"
|
||||
|
||||
sboms:
|
||||
- id: source
|
||||
artifacts: source
|
||||
documents:
|
||||
- "{{ .ProjectName }}_{{ .Version }}_sbom.spdx.json"
|
||||
|
||||
301
CHANGELOG.md
301
CHANGELOG.md
@@ -2,6 +2,307 @@
|
||||
|
||||
All notable changes to this project are documented in this file.
|
||||
|
||||
## 1.19.0
|
||||
|
||||
**Release date:** 2022-03-14
|
||||
|
||||
This release comes with support for Kubernetes [Gateway API](https://gateway-api.sigs.k8s.io/) v1alpha2.
|
||||
For more details see the [Gateway API Progressive Delivery tutorial](https://docs.flagger.app/tutorials/gatewayapi-progressive-delivery).
|
||||
|
||||
#### Features
|
||||
|
||||
- Add Gateway API as a provider
|
||||
[#1108](https://github.com/fluxcd/flagger/pull/1108)
|
||||
|
||||
#### Improvements
|
||||
|
||||
- Add arm64 support for loadtester
|
||||
[#1128](https://github.com/fluxcd/flagger/pull/1128)
|
||||
- Restrict source namespaces in flagger-loadtester
|
||||
[#1119](https://github.com/fluxcd/flagger/pull/1119)
|
||||
- Remove support for Helm v2 in loadtester
|
||||
[#1130](https://github.com/fluxcd/flagger/pull/1130)
|
||||
|
||||
#### Fixes
|
||||
|
||||
- Fix potential canary finalizer duplication
|
||||
[#1125](https://github.com/fluxcd/flagger/pull/1125)
|
||||
- Use the primary replicas when scaling up the canary (no hpa)
|
||||
[#1110](https://github.com/fluxcd/flagger/pull/1110)
|
||||
|
||||
## 1.18.0
|
||||
|
||||
**Release date:** 2022-02-14
|
||||
|
||||
This release comes with a new API field called `canaryReadyThreshold`
|
||||
that allows setting the percentage of pods that need to be available
|
||||
to consider the canary deployment as ready.
|
||||
|
||||
Starting with version, the canary deployment labels, annotations and
|
||||
replicas fields are copied to the primary deployment at promotion time.
|
||||
|
||||
#### Features
|
||||
|
||||
- Add field `spec.analysis.canaryReadyThreshold` for configuring canary threshold
|
||||
[#1102](https://github.com/fluxcd/flagger/pull/1102)
|
||||
|
||||
#### Improvements
|
||||
|
||||
- Update metadata during subsequent promote
|
||||
[#1092](https://github.com/fluxcd/flagger/pull/1092)
|
||||
- Set primary deployment `replicas` when autoscaler isn't used
|
||||
[#1106](https://github.com/fluxcd/flagger/pull/1106)
|
||||
- Update `matchLabels` for `TopologySpreadContstraints` in Deployments
|
||||
[#1041](https://github.com/fluxcd/flagger/pull/1041)
|
||||
|
||||
#### Fixes
|
||||
|
||||
- Send warning and error alerts correctly
|
||||
[#1105](https://github.com/fluxcd/flagger/pull/1105)
|
||||
- Fix for when Prometheus returns NaN
|
||||
[#1095](https://github.com/fluxcd/flagger/pull/1095)
|
||||
- docs: Fix typo ExternalDNS
|
||||
[#1103](https://github.com/fluxcd/flagger/pull/1103)
|
||||
|
||||
## 1.17.0
|
||||
|
||||
**Release date:** 2022-01-11
|
||||
|
||||
This release comes with support for [Kuma Service Mesh](https://kuma.io/).
|
||||
For more details see the [Kuma Progressive Delivery tutorial](https://docs.flagger.app/tutorials/kuma-progressive-delivery).
|
||||
|
||||
To differentiate alerts based on the cluster name, you can configure Flagger with the `-cluster-name=my-cluster`
|
||||
command flag, or with Helm `--set clusterName=my-cluster`.
|
||||
|
||||
#### Features
|
||||
|
||||
- Add kuma support for progressive traffic shifting canaries
|
||||
[#1085](https://github.com/fluxcd/flagger/pull/1085)
|
||||
[#1093](https://github.com/fluxcd/flagger/pull/1093)
|
||||
|
||||
#### Improvements
|
||||
|
||||
- Publish a Software Bill of Materials (SBOM)
|
||||
[#1094](https://github.com/fluxcd/flagger/pull/1094)
|
||||
- Add cluster name to flagger cmd args for altering
|
||||
[#1041](https://github.com/fluxcd/flagger/pull/1041)
|
||||
|
||||
## 1.16.1
|
||||
|
||||
**Release date:** 2021-12-17
|
||||
|
||||
This release contains updates to Kubernetes packages (1.23.0), Alpine (3.15)
|
||||
and load tester components.
|
||||
|
||||
#### Improvements
|
||||
|
||||
- Release loadtester v0.21.0
|
||||
[#1083](https://github.com/fluxcd/flagger/pull/1083)
|
||||
- Add loadtester image pull secrets to Helm chart
|
||||
[#1076](https://github.com/fluxcd/flagger/pull/1076)
|
||||
- Update libraries included in the load tester to newer versions
|
||||
[#1063](https://github.com/fluxcd/flagger/pull/1063)
|
||||
[#1080](https://github.com/fluxcd/flagger/pull/1080)
|
||||
- Update Kubernetes packages to v1.23.0
|
||||
[#1078](https://github.com/fluxcd/flagger/pull/1078)
|
||||
- Update Alpine to 3.15
|
||||
[#1081](https://github.com/fluxcd/flagger/pull/1081)
|
||||
- Update Go to v1.17
|
||||
[#1077](https://github.com/fluxcd/flagger/pull/1077)
|
||||
|
||||
## 1.16.0
|
||||
|
||||
**Release date:** 2021-11-22
|
||||
|
||||
This release comes with a new API field called `primaryReadyThreshold`
|
||||
that allows setting the percentage of pods that need to be available
|
||||
to consider the primary deployment as ready.
|
||||
|
||||
#### Features
|
||||
|
||||
- Allow configuring threshold for primary
|
||||
[#1048](https://github.com/fluxcd/flagger/pull/1048)
|
||||
|
||||
#### Improvements
|
||||
|
||||
- Append to list of ownerReferences for primary configmaps and secrets
|
||||
[#1052](https://github.com/fluxcd/flagger/pull/1052)
|
||||
- Prevent Flux from overriding Flagger managed objects
|
||||
[#1049](https://github.com/fluxcd/flagger/pull/1049)
|
||||
- Add warning in docs about ExternalDNS + Istio configuration
|
||||
[#1044](https://github.com/fluxcd/flagger/pull/1044)
|
||||
|
||||
#### Fixes
|
||||
|
||||
- Mark `CanaryMetric.Threshold` as omitempty
|
||||
[#1047](https://github.com/fluxcd/flagger/pull/1047)
|
||||
- Replace `ioutil` in testing of gchat
|
||||
[#1045](https://github.com/fluxcd/flagger/pull/1045)
|
||||
|
||||
## 1.15.0
|
||||
|
||||
**Release date:** 2021-10-28
|
||||
|
||||
This release comes with support for NGINX ingress canary metrics.
|
||||
The nginx-ingress minimum supported version is now v1.0.2.
|
||||
|
||||
Starting with version, Flagger will use the `spec.service.apex.annotations`
|
||||
to annotate the generated apex VirtualService, TrafficSplit or HTTPProxy.
|
||||
|
||||
#### Features
|
||||
|
||||
- Use nginx controller canary metrics
|
||||
[#1023](https://github.com/fluxcd/flagger/pull/1023)
|
||||
- Add metadata annotations to generated apex objects
|
||||
[#1034](https://github.com/fluxcd/flagger/pull/1034)
|
||||
|
||||
#### Improvements
|
||||
|
||||
- Update load tester binaries (CVEs fix)
|
||||
[#1038](https://github.com/fluxcd/flagger/pull/1038)
|
||||
- Add podLabels to load tester Helm chart
|
||||
[#1036](https://github.com/fluxcd/flagger/pull/1036)
|
||||
|
||||
## 1.14.0
|
||||
|
||||
**Release date:** 2021-09-20
|
||||
|
||||
This release comes with support for extending the canary analysis with
|
||||
Dynatrace, InfluxDB and Google Cloud Monitoring (Stackdriver) metrics.
|
||||
|
||||
#### Features
|
||||
|
||||
- Add Stackdriver metric provider
|
||||
[#991](https://github.com/fluxcd/flagger/pull/991)
|
||||
- Add Influxdb metric provider
|
||||
[#1012](https://github.com/fluxcd/flagger/pull/1012)
|
||||
- Add Dynatrace metric provider
|
||||
[#1013](https://github.com/fluxcd/flagger/pull/1013)
|
||||
|
||||
#### Fixes
|
||||
|
||||
- Fix inline promql query
|
||||
[#1015](https://github.com/fluxcd/flagger/pull/1015)
|
||||
- Fix Istio load balancer settings mapping
|
||||
[#1016](https://github.com/fluxcd/flagger/pull/1016)
|
||||
|
||||
## 1.13.0
|
||||
|
||||
**Release date:** 2021-08-25
|
||||
|
||||
This release comes with support for [Open Service Mesh](https://openservicemesh.io).
|
||||
For more details see the [OSM Progressive Delivery tutorial](https://docs.flagger.app/tutorials/osm-progressive-delivery).
|
||||
|
||||
Starting with this version, Flagger container images are signed with
|
||||
[sigstore/cosign](https://github.com/sigstore/cosign), for more details see the
|
||||
[Flagger cosign docs](https://github.com/fluxcd/flagger/blob/main/.cosign/README.md).
|
||||
|
||||
#### Features
|
||||
|
||||
- Support OSM progressive traffic shifting in Flagger
|
||||
[#955](https://github.com/fluxcd/flagger/pull/955)
|
||||
[#977](https://github.com/fluxcd/flagger/pull/977)
|
||||
- Add support for Google Chat alerts
|
||||
[#953](https://github.com/fluxcd/flagger/pull/953)
|
||||
|
||||
#### Improvements
|
||||
|
||||
- Sign Flagger container images with cosign
|
||||
[#983](https://github.com/fluxcd/flagger/pull/983)
|
||||
- Update Gloo APIs and e2e tests to Gloo v1.8.9
|
||||
[#982](https://github.com/fluxcd/flagger/pull/982)
|
||||
- Update e2e tests to Istio v1.11, Contour v1.18, Linkerd v2.10.2 and NGINX v0.49.0
|
||||
[#979](https://github.com/fluxcd/flagger/pull/979)
|
||||
- Update e2e tests to Traefik to 2.4.9
|
||||
[#960](https://github.com/fluxcd/flagger/pull/960)
|
||||
- Add support for volumes/volumeMounts in loadtester Helm chart
|
||||
[#975](https://github.com/fluxcd/flagger/pull/975)
|
||||
- Add extra podLabels options to Flagger Helm Chart
|
||||
[#966](https://github.com/fluxcd/flagger/pull/966)
|
||||
|
||||
#### Fixes
|
||||
|
||||
- Fix for the http client proxy overriding the default client
|
||||
[#943](https://github.com/fluxcd/flagger/pull/943)
|
||||
- Drop deprecated io/ioutil
|
||||
[#964](https://github.com/fluxcd/flagger/pull/964)
|
||||
- Remove problematic nulls from Grafana dashboard
|
||||
[#952](https://github.com/fluxcd/flagger/pull/952)
|
||||
|
||||
## 1.12.1
|
||||
|
||||
**Release date:** 2021-06-17
|
||||
|
||||
This release comes with a fix to Flagger when used with Flux v2.
|
||||
|
||||
#### Improvements
|
||||
|
||||
- Update Go to v1.16 and Kubernetes packages to v1.21.1
|
||||
[#940](https://github.com/fluxcd/flagger/pull/940)
|
||||
|
||||
#### Fixes
|
||||
|
||||
- Remove the GitOps Toolkit metadata from generated objects
|
||||
[#939](https://github.com/fluxcd/flagger/pull/939)
|
||||
|
||||
## 1.12.0
|
||||
|
||||
**Release date:** 2021-06-16
|
||||
|
||||
This release comes with support for disabling the SSL certificate verification
|
||||
for the Prometheus and Graphite metric providers.
|
||||
|
||||
#### Improvements
|
||||
|
||||
- Add `insecureSkipVerify` option for Prometheus and Graphite
|
||||
[#935](https://github.com/fluxcd/flagger/pull/935)
|
||||
- Copy labels from Gloo upstreams
|
||||
[#932](https://github.com/fluxcd/flagger/pull/932)
|
||||
- Improve language and correct typos in FAQs docs
|
||||
[#925](https://github.com/fluxcd/flagger/pull/925)
|
||||
- Remove Flux GC markers from generated objects
|
||||
[#936](https://github.com/fluxcd/flagger/pull/936)
|
||||
|
||||
#### Fixes
|
||||
|
||||
- Require SMI TrafficSplit Service and Weight
|
||||
[#878](https://github.com/fluxcd/flagger/pull/878)
|
||||
|
||||
## 1.11.0
|
||||
|
||||
**Release date:** 2021-06-01
|
||||
|
||||
**Breaking change:** the minimum supported version of Kubernetes is v1.19.0.
|
||||
|
||||
This release comes with support for Kubernetes Ingress `networking.k8s.io/v1`.
|
||||
The Ingress from `networking.k8s.io/v1beta1` is no longer supported,
|
||||
affected integrations: **NGINX** and **Skipper** ingress controllers.
|
||||
|
||||
#### Improvements
|
||||
|
||||
- Upgrade Ingress to networking.k8s.io/v1
|
||||
[#917](https://github.com/fluxcd/flagger/pull/917)
|
||||
- Update Kubernetes manifests to rbac.authorization.k8s.io/v1
|
||||
[#920](https://github.com/fluxcd/flagger/pull/920)
|
||||
|
||||
## 1.10.0
|
||||
|
||||
**Release date:** 2021-05-28
|
||||
|
||||
This release comes with support for [Graphite](https://docs.flagger.app/usage/metrics#graphite) metric templates.
|
||||
|
||||
#### Features
|
||||
|
||||
- Add Graphite metrics provider
|
||||
[#915](https://github.com/fluxcd/flagger/pull/915)
|
||||
|
||||
#### Improvements
|
||||
|
||||
- ConfigTracker: Scan envFrom in init-containers
|
||||
[#914](https://github.com/fluxcd/flagger/pull/914)
|
||||
- e2e: Update Istio to v1.10 and Contour to v1.15
|
||||
[#914](https://github.com/fluxcd/flagger/pull/914)
|
||||
|
||||
## 1.9.0
|
||||
|
||||
**Release date:** 2021-05-14
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
FROM golang:1.15-alpine as builder
|
||||
FROM golang:1.17-alpine as builder
|
||||
|
||||
ARG TARGETPLATFORM
|
||||
ARG REVISON
|
||||
@@ -21,7 +21,7 @@ RUN CGO_ENABLED=0 go build \
|
||||
-ldflags "-s -w -X github.com/fluxcd/flagger/pkg/version.REVISION=${REVISON}" \
|
||||
-a -o flagger ./cmd/flagger
|
||||
|
||||
FROM alpine:3.13
|
||||
FROM alpine:3.15
|
||||
|
||||
RUN apk --no-cache add ca-certificates
|
||||
|
||||
|
||||
@@ -1,59 +1,59 @@
|
||||
FROM alpine:3.11 as build
|
||||
FROM golang:1.17-alpine as builder
|
||||
|
||||
RUN apk --no-cache add alpine-sdk perl curl
|
||||
ARG TARGETPLATFORM
|
||||
ARG TARGETARCH
|
||||
ARG REVISION
|
||||
|
||||
RUN curl -sSLo hey "https://storage.googleapis.com/hey-release/hey_linux_amd64" && \
|
||||
chmod +x hey && mv hey /usr/local/bin/hey
|
||||
RUN apk --no-cache add alpine-sdk perl curl bash tar
|
||||
|
||||
RUN HELM2_VERSION=2.16.8 && \
|
||||
curl -sSL "https://get.helm.sh/helm-v${HELM2_VERSION}-linux-amd64.tar.gz" | tar xvz && \
|
||||
chmod +x linux-amd64/helm && mv linux-amd64/helm /usr/local/bin/helm && \
|
||||
chmod +x linux-amd64/tiller && mv linux-amd64/tiller /usr/local/bin/tiller
|
||||
RUN HELM3_VERSION=3.7.2 && \
|
||||
curl -sSL "https://get.helm.sh/helm-v${HELM3_VERSION}-linux-${TARGETARCH}.tar.gz" | tar xvz && \
|
||||
chmod +x linux-${TARGETARCH}/helm && mv linux-${TARGETARCH}/helm /usr/local/bin/helm
|
||||
|
||||
RUN HELM3_VERSION=3.2.3 && \
|
||||
curl -sSL "https://get.helm.sh/helm-v${HELM3_VERSION}-linux-amd64.tar.gz" | tar xvz && \
|
||||
chmod +x linux-amd64/helm && mv linux-amd64/helm /usr/local/bin/helmv3
|
||||
|
||||
RUN GRPC_HEALTH_PROBE_VERSION=v0.3.1 && \
|
||||
wget -qO /usr/local/bin/grpc_health_probe https://github.com/grpc-ecosystem/grpc-health-probe/releases/download/${GRPC_HEALTH_PROBE_VERSION}/grpc_health_probe-linux-amd64 && \
|
||||
RUN GRPC_HEALTH_PROBE_VERSION=v0.4.6 && \
|
||||
wget -qO /usr/local/bin/grpc_health_probe https://github.com/grpc-ecosystem/grpc-health-probe/releases/download/${GRPC_HEALTH_PROBE_VERSION}/grpc_health_probe-linux-${TARGETARCH} && \
|
||||
chmod +x /usr/local/bin/grpc_health_probe
|
||||
|
||||
RUN GHZ_VERSION=0.39.0 && \
|
||||
curl -sSL "https://github.com/bojand/ghz/releases/download/v${GHZ_VERSION}/ghz_${GHZ_VERSION}_Linux_x86_64.tar.gz" | tar xz -C /tmp && \
|
||||
mv /tmp/ghz /usr/local/bin && chmod +x /usr/local/bin/ghz
|
||||
RUN GHZ_VERSION=0.105.0 && \
|
||||
curl -sSL "https://github.com/bojand/ghz/archive/refs/tags/v${GHZ_VERSION}.tar.gz" | tar xz -C /tmp && \
|
||||
cd /tmp/ghz-${GHZ_VERSION}/cmd/ghz && GOARCH=$TARGETARCH go build . && mv ghz /usr/local/bin && \
|
||||
chmod +x /usr/local/bin/ghz
|
||||
|
||||
RUN HELM_TILLER_VERSION=0.9.3 && \
|
||||
curl -sSL "https://github.com/rimusz/helm-tiller/archive/v${HELM_TILLER_VERSION}.tar.gz" | tar xz -C /tmp && \
|
||||
mv /tmp/helm-tiller-${HELM_TILLER_VERSION} /tmp/helm-tiller
|
||||
WORKDIR /workspace
|
||||
|
||||
RUN WRK_VERSION=4.0.2 && \
|
||||
cd /tmp && git clone -b ${WRK_VERSION} https://github.com/wg/wrk
|
||||
RUN cd /tmp/wrk && make
|
||||
# copy modules manifests
|
||||
COPY go.mod go.mod
|
||||
COPY go.sum go.sum
|
||||
|
||||
# cache modules
|
||||
RUN go mod download
|
||||
|
||||
# copy source code
|
||||
COPY cmd/ cmd/
|
||||
COPY pkg/ pkg/
|
||||
|
||||
# build
|
||||
RUN CGO_ENABLED=0 go build -o loadtester ./cmd/loadtester/*
|
||||
|
||||
FROM bash:5.0
|
||||
|
||||
ARG TARGETPLATFORM
|
||||
|
||||
RUN addgroup -S app && \
|
||||
adduser -S -g app app && \
|
||||
apk --no-cache add ca-certificates curl jq libgcc
|
||||
apk --no-cache add ca-certificates curl jq libgcc wrk hey
|
||||
|
||||
WORKDIR /home/app
|
||||
|
||||
COPY --from=bats/bats:v1.1.0 /opt/bats/ /opt/bats/
|
||||
RUN ln -s /opt/bats/bin/bats /usr/local/bin/
|
||||
|
||||
COPY --from=build /usr/local/bin/hey /usr/local/bin/
|
||||
COPY --from=build /tmp/wrk/wrk /usr/local/bin/
|
||||
COPY --from=build /usr/local/bin/helm /usr/local/bin/
|
||||
COPY --from=build /usr/local/bin/tiller /usr/local/bin/
|
||||
COPY --from=build /usr/local/bin/ghz /usr/local/bin/
|
||||
COPY --from=build /usr/local/bin/helmv3 /usr/local/bin/
|
||||
COPY --from=build /usr/local/bin/grpc_health_probe /usr/local/bin/
|
||||
COPY --from=build /tmp/helm-tiller /tmp/helm-tiller
|
||||
COPY --from=builder /usr/local/bin/helm /usr/local/bin/
|
||||
COPY --from=builder /usr/local/bin/ghz /usr/local/bin/
|
||||
COPY --from=builder /usr/local/bin/grpc_health_probe /usr/local/bin/
|
||||
|
||||
ADD https://raw.githubusercontent.com/grpc/grpc-proto/master/grpc/health/v1/health.proto /tmp/ghz/health.proto
|
||||
|
||||
COPY ./bin/loadtester .
|
||||
|
||||
RUN chown -R app:app ./
|
||||
RUN chown -R app:app /tmp/ghz
|
||||
|
||||
@@ -63,7 +63,6 @@ USER app
|
||||
RUN hey -n 1 -c 1 https://flagger.app > /dev/null && echo $? | grep 0
|
||||
RUN wrk -d 1s -c 1 -t 1 https://flagger.app > /dev/null && echo $? | grep 0
|
||||
|
||||
# install Helm v2 plugins
|
||||
RUN helm init --client-only && helm plugin install /tmp/helm-tiller
|
||||
COPY --from=builder --chown=app:app /workspace/loadtester .
|
||||
|
||||
ENTRYPOINT ["./loadtester"]
|
||||
|
||||
@@ -2,5 +2,7 @@ The maintainers are generally available in Slack at
|
||||
https://cloud-native.slack.com/messages/flagger/ (obtain an invitation
|
||||
at https://slack.cncf.io/).
|
||||
|
||||
Stefan Prodan, Weaveworks <stefan@weave.works> (Slack: @stefan Twitter: @stefanprodan)
|
||||
Takeshi Yoneda, DMM.com <cz.rk.t0415y.g@gmail.com> (Slack: @mathetake Twitter: @mathetake)
|
||||
In alphabetical order:
|
||||
|
||||
Stefan Prodan, Weaveworks <stefan@weave.works> (github: @stefanprodan, slack: stefanprodan)
|
||||
Takeshi Yoneda, Tetrate <takeshi@tetrate.io> (github: @mathetake, slack: mathetake)
|
||||
|
||||
14
Makefile
14
Makefile
@@ -6,6 +6,7 @@ build:
|
||||
CGO_ENABLED=0 go build -a -o ./bin/flagger ./cmd/flagger
|
||||
|
||||
fmt:
|
||||
go mod tidy
|
||||
gofmt -l -s -w ./
|
||||
goimports -l -w ./
|
||||
|
||||
@@ -29,12 +30,12 @@ crd:
|
||||
version-set:
|
||||
@next="$(TAG)" && \
|
||||
current="$(VERSION)" && \
|
||||
sed -i '' "s/$$current/$$next/g" pkg/version/version.go && \
|
||||
sed -i '' "s/flagger:$$current/flagger:$$next/g" artifacts/flagger/deployment.yaml && \
|
||||
sed -i '' "s/tag: $$current/tag: $$next/g" charts/flagger/values.yaml && \
|
||||
sed -i '' "s/appVersion: $$current/appVersion: $$next/g" charts/flagger/Chart.yaml && \
|
||||
sed -i '' "s/version: $$current/version: $$next/g" charts/flagger/Chart.yaml && \
|
||||
sed -i '' "s/newTag: $$current/newTag: $$next/g" kustomize/base/flagger/kustomization.yaml && \
|
||||
sed -i "s/$$current/$$next/g" pkg/version/version.go && \
|
||||
sed -i "s/flagger:$$current/flagger:$$next/g" artifacts/flagger/deployment.yaml && \
|
||||
sed -i "s/tag: $$current/tag: $$next/g" charts/flagger/values.yaml && \
|
||||
sed -i "s/appVersion: $$current/appVersion: $$next/g" charts/flagger/Chart.yaml && \
|
||||
sed -i "s/version: $$current/version: $$next/g" charts/flagger/Chart.yaml && \
|
||||
sed -i "s/newTag: $$current/newTag: $$next/g" kustomize/base/flagger/kustomization.yaml && \
|
||||
echo "Version $$next set in code, deployment, chart and kustomize"
|
||||
|
||||
release:
|
||||
@@ -42,7 +43,6 @@ release:
|
||||
git push origin "v$(VERSION)"
|
||||
|
||||
loadtester-build:
|
||||
CGO_ENABLED=0 GOOS=linux go build -a -installsuffix cgo -o ./bin/loadtester ./cmd/loadtester/*
|
||||
docker build -t ghcr.io/fluxcd/flagger-loadtester:$(LT_VERSION) . -f Dockerfile.loadtester
|
||||
|
||||
loadtester-push:
|
||||
|
||||
79
README.md
79
README.md
@@ -1,4 +1,4 @@
|
||||
# flagger
|
||||
# flagger
|
||||
|
||||
[](https://bestpractices.coreinfrastructure.org/projects/4783)
|
||||
[](https://github.com/fluxcd/flagger/actions)
|
||||
@@ -13,10 +13,10 @@ by gradually shifting traffic to the new version while measuring metrics and run
|
||||

|
||||
|
||||
Flagger implements several deployment strategies (Canary releases, A/B testing, Blue/Green mirroring)
|
||||
using a service mesh (App Mesh, Istio, Linkerd)
|
||||
using a service mesh (App Mesh, Istio, Linkerd, Open Service Mesh, Kuma)
|
||||
or an ingress controller (Contour, Gloo, NGINX, Skipper, Traefik) for traffic routing.
|
||||
For release analysis, Flagger can query Prometheus, Datadog, New Relic or CloudWatch
|
||||
and for alerting it uses Slack, MS Teams, Discord and Rocket.
|
||||
For release analysis, Flagger can query Prometheus, Datadog, New Relic, CloudWatch, Dynatrace,
|
||||
InfluxDB and Stackdriver and for alerting it uses Slack, MS Teams, Discord, Rocket and Google Chat.
|
||||
|
||||
Flagger is a [Cloud Native Computing Foundation](https://cncf.io/) project
|
||||
and part of [Flux](https://fluxcd.io) family of GitOps tools.
|
||||
@@ -38,6 +38,8 @@ Flagger documentation can be found at [docs.flagger.app](https://docs.flagger.ap
|
||||
* [App Mesh](https://docs.flagger.app/tutorials/appmesh-progressive-delivery)
|
||||
* [Istio](https://docs.flagger.app/tutorials/istio-progressive-delivery)
|
||||
* [Linkerd](https://docs.flagger.app/tutorials/linkerd-progressive-delivery)
|
||||
* [Open Service Mesh (OSM)](https://docs.flagger.app/tutorials/osm-progressive-delivery)
|
||||
* [Kuma Service Mesh](https://docs.flagger.app/tutorials/kuma-progressive-delivery)
|
||||
* [Contour](https://docs.flagger.app/tutorials/contour-progressive-delivery)
|
||||
* [Gloo](https://docs.flagger.app/tutorials/gloo-progressive-delivery)
|
||||
* [NGINX Ingress](https://docs.flagger.app/tutorials/nginx-progressive-delivery)
|
||||
@@ -70,7 +72,7 @@ metadata:
|
||||
namespace: test
|
||||
spec:
|
||||
# service mesh provider (optional)
|
||||
# can be: kubernetes, istio, linkerd, appmesh, nginx, skipper, contour, gloo, supergloo, traefik
|
||||
# can be: kubernetes, istio, linkerd, appmesh, nginx, skipper, contour, gloo, supergloo, traefik, osm
|
||||
# for SMI TrafficSplit can be: smi:v1alpha1, smi:v1alpha2, smi:v1alpha3
|
||||
provider: istio
|
||||
# deployment reference
|
||||
@@ -182,34 +184,46 @@ For more details on how the canary analysis and promotion works please [read the
|
||||
|
||||
**Service Mesh**
|
||||
|
||||
| Feature | App Mesh | Istio | Linkerd | SMI | Kubernetes CNI |
|
||||
| ------------------------------------------ | ------------------ | ------------------ | ------------------ | ----------------- | ----------------- |
|
||||
| Canary deployments (weighted traffic) | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_minus_sign: |
|
||||
| A/B testing (headers and cookies routing) | :heavy_check_mark: | :heavy_check_mark: | :heavy_minus_sign: | :heavy_minus_sign: | :heavy_minus_sign: |
|
||||
| Blue/Green deployments (traffic switch) | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: |
|
||||
| Blue/Green deployments (traffic mirroring) | :heavy_minus_sign: | :heavy_check_mark: | :heavy_minus_sign: | :heavy_minus_sign: | :heavy_minus_sign: |
|
||||
| Webhooks (acceptance/load testing) | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: |
|
||||
| Manual gating (approve/pause/resume) | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: |
|
||||
| Request success rate check (L7 metric) | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_minus_sign: | :heavy_minus_sign: |
|
||||
| Request duration check (L7 metric) | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_minus_sign: | :heavy_minus_sign: |
|
||||
| Custom metric checks | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: |
|
||||
|
||||
For SMI compatible service mesh solutions like Open Service Mesh, Consul Connect or Nginx Service Mesh,
|
||||
[Prometheus MetricTemplates](https://docs.flagger.app/usage/metrics#prometheus) can be used to implement
|
||||
the request success rate and request duration checks.
|
||||
| Feature | App Mesh | Istio | Linkerd | Kuma | OSM | Kubernetes CNI |
|
||||
|--------------------------------------------|--------------------|--------------------|--------------------|--------------------|--------------------|--------------------|
|
||||
| Canary deployments (weighted traffic) | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_minus_sign: |
|
||||
| A/B testing (headers and cookies routing) | :heavy_check_mark: | :heavy_check_mark: | :heavy_minus_sign: | :heavy_minus_sign: | :heavy_minus_sign: | :heavy_minus_sign: |
|
||||
| Blue/Green deployments (traffic switch) | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: |
|
||||
| Blue/Green deployments (traffic mirroring) | :heavy_minus_sign: | :heavy_check_mark: | :heavy_minus_sign: | :heavy_minus_sign: | :heavy_minus_sign: | :heavy_minus_sign: |
|
||||
| Webhooks (acceptance/load testing) | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: |
|
||||
| Manual gating (approve/pause/resume) | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: |
|
||||
| Request success rate check (L7 metric) | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_minus_sign: |
|
||||
| Request duration check (L7 metric) | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_minus_sign: |
|
||||
| Custom metric checks | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: |
|
||||
|
||||
**Ingress**
|
||||
|
||||
| Feature | Contour | Gloo | NGINX | Skipper | Traefik |
|
||||
| ------------------------------------------ | ------------------ | ------------------ | ------------------ | ------------------ | ------------------ |
|
||||
| Canary deployments (weighted traffic) | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: |
|
||||
| A/B testing (headers and cookies routing) | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_minus_sign: | :heavy_minus_sign: |
|
||||
| Blue/Green deployments (traffic switch) | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: |
|
||||
| Webhooks (acceptance/load testing) | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: |
|
||||
| Manual gating (approve/pause/resume) | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: |
|
||||
| Request success rate check (L7 metric) | :heavy_check_mark: | :heavy_check_mark: | :heavy_minus_sign: | :heavy_check_mark: | :heavy_check_mark: |
|
||||
| Request duration check (L7 metric) | :heavy_check_mark: | :heavy_check_mark: | :heavy_minus_sign: | :heavy_check_mark: | :heavy_check_mark: |
|
||||
| Custom metric checks | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: |
|
||||
| Feature | Contour | Gloo | NGINX | Skipper | Traefik |
|
||||
|-------------------------------------------|--------------------|--------------------|--------------------|--------------------|--------------------|
|
||||
| Canary deployments (weighted traffic) | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: |
|
||||
| A/B testing (headers and cookies routing) | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_minus_sign: | :heavy_minus_sign: |
|
||||
| Blue/Green deployments (traffic switch) | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: |
|
||||
| Webhooks (acceptance/load testing) | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: |
|
||||
| Manual gating (approve/pause/resume) | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: |
|
||||
| Request success rate check (L7 metric) | :heavy_check_mark: | :heavy_check_mark: | :heavy_minus_sign: | :heavy_check_mark: | :heavy_check_mark: |
|
||||
| Request duration check (L7 metric) | :heavy_check_mark: | :heavy_check_mark: | :heavy_minus_sign: | :heavy_check_mark: | :heavy_check_mark: |
|
||||
| Custom metric checks | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: |
|
||||
|
||||
**Networking Interface**
|
||||
|
||||
| Feature | Gateway API | SMI |
|
||||
|-----------------------------------------------|--------------------|--------------------|
|
||||
| Canary deployments (weighted traffic) | :heavy_check_mark: | :heavy_check_mark: |
|
||||
| A/B testing (headers and cookies routing) | :heavy_check_mark: | :heavy_minus_sign: |
|
||||
| Blue/Green deployments (traffic switch) | :heavy_check_mark: | :heavy_check_mark: |
|
||||
| Blue/Green deployments (traffic mirrroring) | :heavy_minus_sign: | :heavy_minus_sign: |
|
||||
| Webhooks (acceptance/load testing) | :heavy_check_mark: | :heavy_check_mark: |
|
||||
| Manual gating (approve/pause/resume) | :heavy_check_mark: | :heavy_check_mark: |
|
||||
| Request success rate check (L7 metric) | :heavy_minus_sign: | :heavy_minus_sign: |
|
||||
| Request duration check (L7 metric) | :heavy_minus_sign: | :heavy_minus_sign: |
|
||||
| Custom metric checks | :heavy_check_mark: | :heavy_check_mark: |
|
||||
|
||||
For all [Gateway API](https://gateway-api.sigs.k8s.io/) implementations like [Contour](https://projectcontour.io/guides/gateway-api/), [Istio](https://istio.io/latest/docs/tasks/traffic-management/ingress/gateway-api/) and [SMI](https://smi-spec.io) compatible service mesh solutions like [Consul Connect](https://www.consul.io/docs/connect) or [Nginx Service Mesh](https://docs.nginx.com/nginx-service-mesh/), [Prometheus MetricTemplates](https://docs.flagger.app/usage/metrics#prometheus) can be used to implement the request success rate and request duration checks.
|
||||
|
||||
### Roadmap
|
||||
|
||||
@@ -222,7 +236,6 @@ the request success rate and request duration checks.
|
||||
|
||||
#### Integrations
|
||||
|
||||
* Add support for Kubernetes [Ingress v2](https://github.com/kubernetes-sigs/service-apis)
|
||||
* Add support for ingress controllers like HAProxy and ALB
|
||||
* Add support for metrics providers like InfluxDB, Stackdriver, SignalFX
|
||||
|
||||
@@ -246,8 +259,8 @@ If you have any questions about Flagger and progressive delivery:
|
||||
* Read the Flagger [docs](https://docs.flagger.app).
|
||||
* Invite yourself to the [CNCF community slack](https://slack.cncf.io/)
|
||||
and join the [#flagger](https://cloud-native.slack.com/messages/flagger/) channel.
|
||||
* Check out the [Flux talks section](https://fluxcd.io/community/#talks) and to see a list of online talks,
|
||||
hands-on training and meetups.
|
||||
* Check out the **[Flux events calendar](https://fluxcd.io/#calendar)**, both with upcoming talks, events and meetings you can attend.
|
||||
* Or view the **[Flux resources section](https://fluxcd.io/resources)** with past events videos you can watch.
|
||||
* File an [issue](https://github.com/fluxcd/flagger/issues/new).
|
||||
|
||||
Your feedback is always welcome!
|
||||
|
||||
50
artifacts/examples/kuma-canary.yaml
Normal file
50
artifacts/examples/kuma-canary.yaml
Normal file
@@ -0,0 +1,50 @@
|
||||
apiVersion: flagger.app/v1beta1
|
||||
kind: Canary
|
||||
metadata:
|
||||
name: podinfo
|
||||
namespace: test
|
||||
annotations:
|
||||
kuma.io/mesh: default
|
||||
spec:
|
||||
targetRef:
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
name: podinfo
|
||||
progressDeadlineSeconds: 60
|
||||
service:
|
||||
port: 9898
|
||||
targetPort: 9898
|
||||
apex:
|
||||
annotations:
|
||||
9898.service.kuma.io/protocol: "http"
|
||||
canary:
|
||||
annotations:
|
||||
9898.service.kuma.io/protocol: "http"
|
||||
primary:
|
||||
annotations:
|
||||
9898.service.kuma.io/protocol: "http"
|
||||
analysis:
|
||||
interval: 15s
|
||||
threshold: 15
|
||||
maxWeight: 50
|
||||
stepWeight: 10
|
||||
metrics:
|
||||
- name: request-success-rate
|
||||
threshold: 99
|
||||
interval: 1m
|
||||
- name: request-duration
|
||||
threshold: 500
|
||||
interval: 30s
|
||||
webhooks:
|
||||
- name: acceptance-test
|
||||
type: pre-rollout
|
||||
url: http://flagger-loadtester.test/
|
||||
timeout: 30s
|
||||
metadata:
|
||||
type: bash
|
||||
cmd: "curl -sd 'test' http://podinfo-canary.test:9898/token | grep token"
|
||||
- name: load-test
|
||||
type: rollout
|
||||
url: http://flagger-loadtester.test/
|
||||
metadata:
|
||||
cmd: "hey -z 2m -q 10 -c 2 http://podinfo-canary.test:9898/"
|
||||
42
artifacts/examples/osm-canary-steps.yaml
Normal file
42
artifacts/examples/osm-canary-steps.yaml
Normal file
@@ -0,0 +1,42 @@
|
||||
apiVersion: flagger.app/v1beta1
|
||||
kind: Canary
|
||||
metadata:
|
||||
name: podinfo
|
||||
namespace: test
|
||||
spec:
|
||||
provider: osm
|
||||
targetRef:
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
name: podinfo
|
||||
progressDeadlineSeconds: 600
|
||||
service:
|
||||
port: 9898
|
||||
targetPort: 9898
|
||||
analysis:
|
||||
interval: 15s
|
||||
threshold: 10
|
||||
stepWeights: [5, 10, 15, 20, 25, 30, 35, 40, 45, 50, 55]
|
||||
metrics:
|
||||
- name: request-success-rate
|
||||
thresholdRange:
|
||||
min: 99
|
||||
interval: 1m
|
||||
- name: request-duration
|
||||
thresholdRange:
|
||||
max: 500
|
||||
interval: 30s
|
||||
webhooks:
|
||||
- name: acceptance-test
|
||||
type: pre-rollout
|
||||
url: http://flagger-loadtester.test/
|
||||
timeout: 15s
|
||||
metadata:
|
||||
type: bash
|
||||
cmd: "curl -sd 'test' http://podinfo-canary.test:9898/token | grep token"
|
||||
- name: load-test
|
||||
type: rollout
|
||||
url: http://flagger-loadtester.test/
|
||||
timeout: 5s
|
||||
metadata:
|
||||
cmd: "hey -z 1m -q 10 -c 2 http://podinfo-canary.test:9898/"
|
||||
43
artifacts/examples/osm-canary.yaml
Normal file
43
artifacts/examples/osm-canary.yaml
Normal file
@@ -0,0 +1,43 @@
|
||||
apiVersion: flagger.app/v1beta1
|
||||
kind: Canary
|
||||
metadata:
|
||||
name: podinfo
|
||||
namespace: test
|
||||
spec:
|
||||
provider: osm
|
||||
targetRef:
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
name: podinfo
|
||||
progressDeadlineSeconds: 600
|
||||
service:
|
||||
port: 9898
|
||||
targetPort: 9898
|
||||
analysis:
|
||||
interval: 15s
|
||||
threshold: 10
|
||||
maxWeight: 50
|
||||
stepWeight: 5
|
||||
metrics:
|
||||
- name: request-success-rate
|
||||
thresholdRange:
|
||||
min: 99
|
||||
interval: 1m
|
||||
- name: request-duration
|
||||
thresholdRange:
|
||||
max: 500
|
||||
interval: 30s
|
||||
webhooks:
|
||||
- name: acceptance-test
|
||||
type: pre-rollout
|
||||
url: http://flagger-loadtester.test/
|
||||
timeout: 15s
|
||||
metadata:
|
||||
type: bash
|
||||
cmd: "curl -sd 'test' http://podinfo-canary.test:9898/token | grep token"
|
||||
- name: load-test
|
||||
type: rollout
|
||||
url: http://flagger-loadtester.test/
|
||||
timeout: 5s
|
||||
metadata:
|
||||
cmd: "hey -z 1m -q 10 -c 2 http://podinfo-canary.test:9898/"
|
||||
@@ -6,7 +6,7 @@ metadata:
|
||||
labels:
|
||||
app: flagger
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: flagger
|
||||
@@ -187,12 +187,38 @@ rules:
|
||||
- update
|
||||
- patch
|
||||
- delete
|
||||
- apiGroups:
|
||||
- kuma.io
|
||||
resources:
|
||||
- trafficroutes
|
||||
- trafficroutes/finalizers
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- create
|
||||
- update
|
||||
- patch
|
||||
- delete
|
||||
- apiGroups:
|
||||
- gateway.networking.k8s.io
|
||||
resources:
|
||||
- httproutes
|
||||
- httproutes/finalizers
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- create
|
||||
- update
|
||||
- patch
|
||||
- delete
|
||||
- nonResourceURLs:
|
||||
- /version
|
||||
verbs:
|
||||
- get
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: flagger
|
||||
|
||||
@@ -495,6 +495,40 @@ spec:
|
||||
type: array
|
||||
items:
|
||||
type: string
|
||||
gatewayRefs:
|
||||
description: The list of parent Gateways for a HTTPRoute
|
||||
maxItems: 32
|
||||
type: array
|
||||
items:
|
||||
required:
|
||||
- name
|
||||
type: object
|
||||
properties:
|
||||
group:
|
||||
default: gateway.networking.k8s.io
|
||||
maxLength: 253
|
||||
pattern: ^$|^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$
|
||||
type: string
|
||||
kind:
|
||||
default: Gateway
|
||||
maxLength: 63
|
||||
minLength: 1
|
||||
pattern: ^[a-zA-Z]([-a-zA-Z0-9]*[a-zA-Z0-9])?$
|
||||
type: string
|
||||
name:
|
||||
maxLength: 253
|
||||
minLength: 1
|
||||
type: string
|
||||
namespace:
|
||||
maxLength: 63
|
||||
minLength: 1
|
||||
pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?$
|
||||
type: string
|
||||
sectionName:
|
||||
maxLength: 253
|
||||
minLength: 1
|
||||
pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$
|
||||
type: string
|
||||
corsPolicy:
|
||||
description: Istio Cross-Origin Resource Sharing policy (CORS)
|
||||
type: object
|
||||
@@ -826,6 +860,12 @@ spec:
|
||||
mirrorWeight:
|
||||
description: Weight of traffic to be mirrored
|
||||
type: number
|
||||
primaryReadyThreshold:
|
||||
description: Percentage of pods that need to be available to consider primary as ready
|
||||
type: number
|
||||
canaryReadyThreshold:
|
||||
description: Percentage of pods that need to be available to consider canary as ready
|
||||
type: number
|
||||
match:
|
||||
description: A/B testing match conditions
|
||||
type: array
|
||||
@@ -1104,8 +1144,11 @@ spec:
|
||||
- prometheus
|
||||
- influxdb
|
||||
- datadog
|
||||
- stackdriver
|
||||
- cloudwatch
|
||||
- newrelic
|
||||
- graphite
|
||||
- dynatrace
|
||||
address:
|
||||
description: API address of this provider
|
||||
type: string
|
||||
@@ -1121,6 +1164,9 @@ spec:
|
||||
region:
|
||||
description: Region of the provider
|
||||
type: string
|
||||
insecureSkipVerify:
|
||||
description: Disable SSL certificate validation for the provider address
|
||||
type: boolean
|
||||
query:
|
||||
description: Query of this metric template
|
||||
type: string
|
||||
@@ -1187,6 +1233,7 @@ spec:
|
||||
- msteams
|
||||
- discord
|
||||
- rocket
|
||||
- gchat
|
||||
channel:
|
||||
description: Alert channel for this provider
|
||||
type: string
|
||||
|
||||
@@ -22,7 +22,7 @@ spec:
|
||||
serviceAccountName: flagger
|
||||
containers:
|
||||
- name: flagger
|
||||
image: ghcr.io/fluxcd/flagger:1.9.0
|
||||
image: ghcr.io/fluxcd/flagger:1.19.0
|
||||
imagePullPolicy: IfNotPresent
|
||||
ports:
|
||||
- name: http
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
apiVersion: v1
|
||||
name: flagger
|
||||
version: 1.9.0
|
||||
appVersion: 1.9.0
|
||||
version: 1.19.0
|
||||
appVersion: 1.19.0
|
||||
kubeVersion: ">=1.16.0-0"
|
||||
engine: gotpl
|
||||
description: Flagger is a progressive delivery operator for Kubernetes
|
||||
@@ -22,5 +22,7 @@ keywords:
|
||||
- contour
|
||||
- nginx
|
||||
- traefik
|
||||
- osm
|
||||
- smi
|
||||
- gitops
|
||||
- canary
|
||||
|
||||
@@ -7,7 +7,7 @@ Flagger can run automated application analysis, testing, promotion and rollback
|
||||
* A/B Testing (HTTP headers and cookies traffic routing)
|
||||
* Blue/Green (traffic switching and mirroring)
|
||||
|
||||
Flagger works with service mesh solutions (Istio, Linkerd, AWS App Mesh) and with Kubernetes ingress controllers
|
||||
Flagger works with service mesh solutions (Istio, Linkerd, AWS App Mesh, Open Service Mesh) and with Kubernetes ingress controllers
|
||||
(NGINX, Skipper, Gloo, Contour, Traefik).
|
||||
Flagger can be configured to send alerts to various chat platforms such as Slack, Microsoft Teams, Discord and Rocket.
|
||||
|
||||
@@ -59,6 +59,25 @@ $ helm upgrade -i flagger flagger/flagger \
|
||||
--set metricsServer=http://appmesh-prometheus:9090
|
||||
```
|
||||
|
||||
|
||||
To install Flagger for **Open Service Mesh** (requires OSM to have been installed with Prometheus):
|
||||
|
||||
```console
|
||||
$ helm upgrade -i flagger flagger/flagger \
|
||||
--namespace=osm-system \
|
||||
--set meshProvider=osm \
|
||||
--set metricsServer=http://osm-prometheus.osm-system.svc:7070
|
||||
```
|
||||
|
||||
To install Flagger for **Kuma Service Mesh** (requires Kuma to have been installed with Prometheus):
|
||||
|
||||
```console
|
||||
$ helm upgrade -i flagger flagger/flagger \
|
||||
--namespace=kuma-system \
|
||||
--set meshProvider=kuma \
|
||||
--set metricsServer=http://prometheus-server.kuma-metrics:80
|
||||
```
|
||||
|
||||
To install Flagger and Prometheus for **NGINX** Ingress (requires controller metrics enabled):
|
||||
|
||||
```console
|
||||
@@ -112,53 +131,54 @@ The command removes all the Kubernetes components associated with the chart and
|
||||
|
||||
The following tables lists the configurable parameters of the Flagger chart and their default values.
|
||||
|
||||
Parameter | Description | Default
|
||||
--- | --- | ---
|
||||
`image.repository` | Image repository | `ghcr.io/fluxcd/flagger`
|
||||
`image.tag` | Image tag | `<VERSION>`
|
||||
`image.pullPolicy` | Image pull policy | `IfNotPresent`
|
||||
`logLevel` | Log level | `info`
|
||||
`metricsServer` | Prometheus URL, used when `prometheus.install` is `false` | `http://prometheus.istio-system:9090`
|
||||
`prometheus.install` | If `true`, installs Prometheus configured to scrape all pods in the custer | `false`
|
||||
`prometheus.retention` | Prometheus data retention | `2h`
|
||||
`selectorLabels` | List of labels that Flagger uses to create pod selectors | `app,name,app.kubernetes.io/name`
|
||||
`configTracking.enabled` | If `true`, flagger will track changes in Secrets and ConfigMaps referenced in the target deployment | `true`
|
||||
`eventWebhook` | If set, Flagger will publish events to the given webhook | None
|
||||
`slack.url` | Slack incoming webhook | None
|
||||
`slack.proxyUrl` | Slack proxy url | None
|
||||
`slack.channel` | Slack channel | None
|
||||
`slack.user` | Slack username | `flagger`
|
||||
`msteams.url` | Microsoft Teams incoming webhook | None
|
||||
`msteams.proxyUrl` | Microsoft Teams proxy url | None
|
||||
`podMonitor.enabled` | If `true`, create a PodMonitor for [monitoring the metrics](https://docs.flagger.app/usage/monitoring#metrics) | `false`
|
||||
`podMonitor.namespace` | Namespace where the PodMonitor is created | the same namespace
|
||||
`podMonitor.interval` | Interval at which metrics should be scraped | `15s`
|
||||
`podMonitor.podMonitor` | Additional labels to add to the PodMonitor | `{}`
|
||||
`leaderElection.enabled` | If `true`, Flagger will run in HA mode | `false`
|
||||
`leaderElection.replicaCount` | Number of replicas | `1`
|
||||
`serviceAccount.create` | If `true`, Flagger will create service account | `true`
|
||||
`serviceAccount.name` | The name of the service account to create or use. If not set and `serviceAccount.create` is `true`, a name is generated using the Flagger fullname | `""`
|
||||
`serviceAccount.annotations` | Annotations for service account | `{}`
|
||||
`ingressAnnotationsPrefix` | Annotations prefix for ingresses | `custom.ingress.kubernetes.io`
|
||||
`includeLabelPrefix` | List of prefixes of labels that are copied when creating primary deployments or daemonsets. Use * to include all | `""`
|
||||
`rbac.create` | If `true`, create and use RBAC resources | `true`
|
||||
`rbac.pspEnabled` | If `true`, create and use a restricted pod security policy | `false`
|
||||
`crd.create` | If `true`, create Flagger's CRDs (should be enabled for Helm v2 only) | `false`
|
||||
`resources.requests/cpu` | Pod CPU request | `10m`
|
||||
`resources.requests/memory` | Pod memory request | `32Mi`
|
||||
`resources.limits/cpu` | Pod CPU limit | `1000m`
|
||||
`resources.limits/memory` | Pod memory limit | `512Mi`
|
||||
`affinity` | Node/pod affinities | None
|
||||
`nodeSelector` | Node labels for pod assignment | `{}`
|
||||
`threadiness` | Number of controller workers | `2`
|
||||
`tolerations` | List of node taints to tolerate | `[]`
|
||||
`istio.kubeconfig.secretName` | The name of the Kubernetes secret containing the Istio shared control plane kubeconfig | None
|
||||
`istio.kubeconfig.key` | The name of Kubernetes secret data key that contains the Istio control plane kubeconfig | `kubeconfig`
|
||||
`ingressAnnotationsPrefix` | Annotations prefix for NGINX ingresses | None
|
||||
`ingressClass` | Ingress class used for annotating HTTPProxy objects, e.g. `contour` | None
|
||||
`podPriorityClassName` | PriorityClass name for pod priority configuration | ""
|
||||
`podDisruptionBudget.enabled` | A PodDisruptionBudget will be created if `true` | `false`
|
||||
`podDisruptionBudget.minAvailable` | The minimal number of available replicas that will be set in the PodDisruptionBudget | `1`
|
||||
| Parameter | Description | Default |
|
||||
|------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------|---------------------------------------|
|
||||
| `image.repository` | Image repository | `ghcr.io/fluxcd/flagger` |
|
||||
| `image.tag` | Image tag | `<VERSION>` |
|
||||
| `image.pullPolicy` | Image pull policy | `IfNotPresent` |
|
||||
| `logLevel` | Log level | `info` |
|
||||
| `metricsServer` | Prometheus URL, used when `prometheus.install` is `false` | `http://prometheus.istio-system:9090` |
|
||||
| `prometheus.install` | If `true`, installs Prometheus configured to scrape all pods in the custer | `false` |
|
||||
| `prometheus.retention` | Prometheus data retention | `2h` |
|
||||
| `selectorLabels` | List of labels that Flagger uses to create pod selectors | `app,name,app.kubernetes.io/name` |
|
||||
| `configTracking.enabled` | If `true`, flagger will track changes in Secrets and ConfigMaps referenced in the target deployment | `true` |
|
||||
| `eventWebhook` | If set, Flagger will publish events to the given webhook | None |
|
||||
| `slack.url` | Slack incoming webhook | None |
|
||||
| `slack.proxyUrl` | Slack proxy url | None |
|
||||
| `slack.channel` | Slack channel | None |
|
||||
| `slack.user` | Slack username | `flagger` |
|
||||
| `msteams.url` | Microsoft Teams incoming webhook | None |
|
||||
| `msteams.proxyUrl` | Microsoft Teams proxy url | None |
|
||||
| `clusterName` | When specified, Flagger will add the cluster name to alerts | `""` |
|
||||
| `podMonitor.enabled` | If `true`, create a PodMonitor for [monitoring the metrics](https://docs.flagger.app/usage/monitoring#metrics) | `false` |
|
||||
| `podMonitor.namespace` | Namespace where the PodMonitor is created | the same namespace |
|
||||
| `podMonitor.interval` | Interval at which metrics should be scraped | `15s` |
|
||||
| `podMonitor.podMonitor` | Additional labels to add to the PodMonitor | `{}` |
|
||||
| `leaderElection.enabled` | If `true`, Flagger will run in HA mode | `false` |
|
||||
| `leaderElection.replicaCount` | Number of replicas | `1` |
|
||||
| `serviceAccount.create` | If `true`, Flagger will create service account | `true` |
|
||||
| `serviceAccount.name` | The name of the service account to create or use. If not set and `serviceAccount.create` is `true`, a name is generated using the Flagger fullname | `""` |
|
||||
| `serviceAccount.annotations` | Annotations for service account | `{}` |
|
||||
| `ingressAnnotationsPrefix` | Annotations prefix for ingresses | `custom.ingress.kubernetes.io` |
|
||||
| `includeLabelPrefix` | List of prefixes of labels that are copied when creating primary deployments or daemonsets. Use * to include all | `""` |
|
||||
| `rbac.create` | If `true`, create and use RBAC resources | `true` |
|
||||
| `rbac.pspEnabled` | If `true`, create and use a restricted pod security policy | `false` |
|
||||
| `crd.create` | If `true`, create Flagger's CRDs (should be enabled for Helm v2 only) | `false` |
|
||||
| `resources.requests/cpu` | Pod CPU request | `10m` |
|
||||
| `resources.requests/memory` | Pod memory request | `32Mi` |
|
||||
| `resources.limits/cpu` | Pod CPU limit | `1000m` |
|
||||
| `resources.limits/memory` | Pod memory limit | `512Mi` |
|
||||
| `affinity` | Node/pod affinities | None |
|
||||
| `nodeSelector` | Node labels for pod assignment | `{}` |
|
||||
| `threadiness` | Number of controller workers | `2` |
|
||||
| `tolerations` | List of node taints to tolerate | `[]` |
|
||||
| `istio.kubeconfig.secretName` | The name of the Kubernetes secret containing the Istio shared control plane kubeconfig | None |
|
||||
| `istio.kubeconfig.key` | The name of Kubernetes secret data key that contains the Istio control plane kubeconfig | `kubeconfig` |
|
||||
| `ingressAnnotationsPrefix` | Annotations prefix for NGINX ingresses | None |
|
||||
| `ingressClass` | Ingress class used for annotating HTTPProxy objects, e.g. `contour` | None |
|
||||
| `podPriorityClassName` | PriorityClass name for pod priority configuration | "" |
|
||||
| `podDisruptionBudget.enabled` | A PodDisruptionBudget will be created if `true` | `false` |
|
||||
| `podDisruptionBudget.minAvailable` | The minimal number of available replicas that will be set in the PodDisruptionBudget | `1` |
|
||||
|
||||
Specify each parameter using the `--set key=value[,key=value]` argument to `helm upgrade`. For example,
|
||||
|
||||
|
||||
@@ -495,6 +495,40 @@ spec:
|
||||
type: array
|
||||
items:
|
||||
type: string
|
||||
gatewayRefs:
|
||||
description: The list of parent Gateways for a HTTPRoute
|
||||
maxItems: 32
|
||||
type: array
|
||||
items:
|
||||
required:
|
||||
- name
|
||||
type: object
|
||||
properties:
|
||||
group:
|
||||
default: gateway.networking.k8s.io
|
||||
maxLength: 253
|
||||
pattern: ^$|^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$
|
||||
type: string
|
||||
kind:
|
||||
default: Gateway
|
||||
maxLength: 63
|
||||
minLength: 1
|
||||
pattern: ^[a-zA-Z]([-a-zA-Z0-9]*[a-zA-Z0-9])?$
|
||||
type: string
|
||||
name:
|
||||
maxLength: 253
|
||||
minLength: 1
|
||||
type: string
|
||||
namespace:
|
||||
maxLength: 63
|
||||
minLength: 1
|
||||
pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?$
|
||||
type: string
|
||||
sectionName:
|
||||
maxLength: 253
|
||||
minLength: 1
|
||||
pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$
|
||||
type: string
|
||||
corsPolicy:
|
||||
description: Istio Cross-Origin Resource Sharing policy (CORS)
|
||||
type: object
|
||||
@@ -826,6 +860,12 @@ spec:
|
||||
mirrorWeight:
|
||||
description: Weight of traffic to be mirrored
|
||||
type: number
|
||||
primaryReadyThreshold:
|
||||
description: Percentage of pods that need to be available to consider primary as ready
|
||||
type: number
|
||||
canaryReadyThreshold:
|
||||
description: Percentage of pods that need to be available to consider canary as ready
|
||||
type: number
|
||||
match:
|
||||
description: A/B testing match conditions
|
||||
type: array
|
||||
@@ -1104,8 +1144,11 @@ spec:
|
||||
- prometheus
|
||||
- influxdb
|
||||
- datadog
|
||||
- stackdriver
|
||||
- cloudwatch
|
||||
- newrelic
|
||||
- graphite
|
||||
- dynatrace
|
||||
address:
|
||||
description: API address of this provider
|
||||
type: string
|
||||
@@ -1121,6 +1164,9 @@ spec:
|
||||
region:
|
||||
description: Region of the provider
|
||||
type: string
|
||||
insecureSkipVerify:
|
||||
description: Disable SSL certificate validation for the provider address
|
||||
type: boolean
|
||||
query:
|
||||
description: Query of this metric template
|
||||
type: string
|
||||
@@ -1187,6 +1233,7 @@ spec:
|
||||
- msteams
|
||||
- discord
|
||||
- rocket
|
||||
- gchat
|
||||
channel:
|
||||
description: Alert channel for this provider
|
||||
type: string
|
||||
|
||||
@@ -22,6 +22,11 @@ spec:
|
||||
labels:
|
||||
app.kubernetes.io/name: {{ template "flagger.name" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
{{- if .Values.podLabels }}
|
||||
{{- range $key, $value := .Values.podLabels }}
|
||||
{{ $key }}: {{ $value | quote }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
annotations:
|
||||
{{- if .Values.podAnnotations }}
|
||||
{{ toYaml .Values.podAnnotations | indent 8 }}
|
||||
@@ -133,6 +138,9 @@ spec:
|
||||
{{- if .Values.threadiness }}
|
||||
- -threadiness={{ .Values.threadiness }}
|
||||
{{- end }}
|
||||
{{- if .Values.clusterName }}
|
||||
- -cluster-name={{ .Values.clusterName }}
|
||||
{{- end }}
|
||||
livenessProbe:
|
||||
exec:
|
||||
command:
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
{{- if .Values.prometheus.install }}
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: {{ template "flagger.fullname" . }}-prometheus
|
||||
@@ -24,7 +24,7 @@ rules:
|
||||
- nonResourceURLs: ["/metrics"]
|
||||
verbs: ["get"]
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: {{ template "flagger.fullname" . }}-prometheus
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
{{- if .Values.rbac.create }}
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: {{ template "flagger.fullname" . }}
|
||||
@@ -195,12 +195,38 @@ rules:
|
||||
- update
|
||||
- patch
|
||||
- delete
|
||||
- apiGroups:
|
||||
- kuma.io
|
||||
resources:
|
||||
- trafficroutes
|
||||
- trafficroutes/finalizers
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- create
|
||||
- update
|
||||
- patch
|
||||
- delete
|
||||
- apiGroups:
|
||||
- gateway.networking.k8s.io
|
||||
resources:
|
||||
- httproutes
|
||||
- httproutes/finalizers
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- create
|
||||
- update
|
||||
- patch
|
||||
- delete
|
||||
- nonResourceURLs:
|
||||
- /version
|
||||
verbs:
|
||||
- get
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: {{ template "flagger.fullname" . }}
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
|
||||
image:
|
||||
repository: ghcr.io/fluxcd/flagger
|
||||
tag: 1.9.0
|
||||
tag: 1.19.0
|
||||
pullPolicy: IfNotPresent
|
||||
pullSecret:
|
||||
|
||||
@@ -19,7 +19,7 @@ podPriorityClassName: ""
|
||||
|
||||
metricsServer: "http://prometheus:9090"
|
||||
|
||||
# accepted values are kubernetes, istio, linkerd, appmesh, contour, nginx, gloo, skipper, traefik
|
||||
# accepted values are kubernetes, istio, linkerd, appmesh, contour, nginx, gloo, skipper, traefik, osm
|
||||
meshProvider: ""
|
||||
|
||||
# single namespace restriction
|
||||
@@ -50,6 +50,9 @@ securityContext:
|
||||
# when specified, flagger will publish events to the provided webhook
|
||||
eventWebhook: ""
|
||||
|
||||
# when specified, flagger will add the cluster name to alerts
|
||||
clusterName: ""
|
||||
|
||||
slack:
|
||||
user: flagger
|
||||
channel:
|
||||
@@ -135,7 +138,7 @@ tolerations: []
|
||||
prometheus:
|
||||
# to be used with ingress controllers
|
||||
install: false
|
||||
image: docker.io/prom/prometheus:v2.23.0
|
||||
image: docker.io/prom/prometheus:v2.33.5
|
||||
pullSecret:
|
||||
retention: 2h
|
||||
# when enabled, it will add a security context for the prometheus pod
|
||||
@@ -160,3 +163,5 @@ istio:
|
||||
podDisruptionBudget:
|
||||
enabled: false
|
||||
minAvailable: 1
|
||||
|
||||
podLabels: {}
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
apiVersion: v1
|
||||
name: grafana
|
||||
version: 1.5.0
|
||||
version: 1.6.0
|
||||
appVersion: 7.2.0
|
||||
description: Grafana dashboards for monitoring Flagger canary deployments
|
||||
icon: https://raw.githubusercontent.com/fluxcd/flagger/main/docs/logo/flagger-icon.png
|
||||
|
||||
@@ -1146,7 +1146,6 @@
|
||||
"list": [
|
||||
{
|
||||
"allValue": null,
|
||||
"current": null,
|
||||
"datasource": "prometheus",
|
||||
"definition": "query_result(sum(envoy_cluster_upstream_rq) by (kubernetes_namespace))",
|
||||
"hide": 0,
|
||||
@@ -1168,7 +1167,6 @@
|
||||
},
|
||||
{
|
||||
"allValue": null,
|
||||
"current": null,
|
||||
"datasource": "prometheus",
|
||||
"definition": "query_result(sum(envoy_cluster_upstream_rq{kubernetes_namespace=\"$namespace\",app=~\".*-primary\"}) by (app))",
|
||||
"hide": 0,
|
||||
@@ -1190,7 +1188,6 @@
|
||||
},
|
||||
{
|
||||
"allValue": null,
|
||||
"current": null,
|
||||
"datasource": "prometheus",
|
||||
"definition": "query_result(sum(envoy_cluster_upstream_rq{kubernetes_namespace=\"$namespace\",app!~\".*-primary\"}) by (app))",
|
||||
"hide": 0,
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
apiVersion: v1
|
||||
name: loadtester
|
||||
version: 0.19.0
|
||||
appVersion: 0.18.0
|
||||
version: 0.22.0
|
||||
appVersion: 0.22.0
|
||||
kubeVersion: ">=1.11.0-0"
|
||||
engine: gotpl
|
||||
description: Flagger's load testing services based on rakyll/hey and bojand/ghz that generates traffic during canary analysis when configured as a webhook.
|
||||
@@ -19,5 +19,7 @@ keywords:
|
||||
- appmesh
|
||||
- linkerd
|
||||
- gloo
|
||||
- osm
|
||||
- smi
|
||||
- gitops
|
||||
- load testing
|
||||
|
||||
@@ -26,7 +26,7 @@ helm upgrade -i flagger-loadtester flagger/loadtester
|
||||
The command deploys loadtester on the Kubernetes cluster in the default namespace.
|
||||
|
||||
> **Tip**: Note that the namespace where you deploy the load tester should
|
||||
> have the Istio, App Mesh or Linkerd sidecar injection enabled
|
||||
> have the Istio, App Mesh, Linkerd or Open Service Mesh sidecar injection enabled
|
||||
|
||||
The [configuration](#configuration) section lists the parameters that can be configured during installation.
|
||||
|
||||
@@ -59,6 +59,7 @@ Parameter | Description | Default
|
||||
`service.type` | Type of service | `ClusterIP`
|
||||
`service.port` | ClusterIP port | `80`
|
||||
`cmd.timeout` | Command execution timeout | `1h`
|
||||
`cmd.namespaceRegexp` | Restrict access to canaries in matching namespaces | ""
|
||||
`logLevel` | Log level can be debug, info, warning, error or panic | `info`
|
||||
`appmesh.enabled` | Create AWS App Mesh v1beta2 virtual node | `false`
|
||||
`appmesh.backends` | AWS App Mesh virtual services | `none`
|
||||
|
||||
@@ -17,8 +17,12 @@ spec:
|
||||
labels:
|
||||
app: {{ include "loadtester.name" . }}
|
||||
app.kubernetes.io/name: {{ include "loadtester.name" . }}
|
||||
{{- range $key, $value := .Values.podLabels }}
|
||||
{{ $key }}: {{ $value | quote }}
|
||||
{{- end }}
|
||||
annotations:
|
||||
appmesh.k8s.aws/ports: "444"
|
||||
openservicemesh.io/inbound-port-exclusion-list: "80, 8080"
|
||||
{{- if .Values.podAnnotations }}
|
||||
{{ toYaml .Values.podAnnotations | indent 8 }}
|
||||
{{- end }}
|
||||
@@ -47,6 +51,7 @@ spec:
|
||||
- -port=8080
|
||||
- -log-level={{ .Values.logLevel }}
|
||||
- -timeout={{ .Values.cmd.timeout }}
|
||||
- -namespace-regexp={{ .Values.cmd.namespaceRegexp }}
|
||||
livenessProbe:
|
||||
exec:
|
||||
command:
|
||||
@@ -73,6 +78,18 @@ spec:
|
||||
{{- end }}
|
||||
resources:
|
||||
{{- toYaml .Values.resources | nindent 12 }}
|
||||
{{- with .Values.volumeMounts }}
|
||||
volumeMounts:
|
||||
{{- toYaml . | nindent 12 }}
|
||||
{{- end }}
|
||||
{{- if .Values.image.pullSecret }}
|
||||
imagePullSecrets:
|
||||
- name: {{ .Values.image.pullSecret }}
|
||||
{{- end }}
|
||||
{{ with .Values.volumes }}
|
||||
volumes:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.nodeSelector }}
|
||||
nodeSelector:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
|
||||
@@ -2,8 +2,11 @@ replicaCount: 1
|
||||
|
||||
image:
|
||||
repository: ghcr.io/fluxcd/flagger-loadtester
|
||||
tag: 0.18.0
|
||||
tag: 0.22.0
|
||||
pullPolicy: IfNotPresent
|
||||
pullSecret:
|
||||
|
||||
podLabels: {}
|
||||
|
||||
podAnnotations:
|
||||
prometheus.io/scrape: "true"
|
||||
@@ -14,6 +17,7 @@ podPriorityClassName: ""
|
||||
logLevel: info
|
||||
cmd:
|
||||
timeout: 1h
|
||||
namespaceRegexp: ""
|
||||
|
||||
nameOverride: ""
|
||||
fullnameOverride: ""
|
||||
@@ -29,6 +33,9 @@ resources:
|
||||
cpu: 10m
|
||||
memory: 64Mi
|
||||
|
||||
volumes: []
|
||||
volumeMounts: []
|
||||
|
||||
nodeSelector: {}
|
||||
|
||||
tolerations: []
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
apiVersion: v1
|
||||
version: 5.0.0
|
||||
appVersion: 5.0.0
|
||||
version: 6.0.0
|
||||
appVersion: 6.0.0
|
||||
name: podinfo
|
||||
engine: gotpl
|
||||
description: Flagger canary deployment demo application
|
||||
|
||||
@@ -12,6 +12,7 @@ metadata:
|
||||
sidecar.istio.io/inject: "false"
|
||||
linkerd.io/inject: disabled
|
||||
appmesh.k8s.aws/sidecarInjectorWebhook: disabled
|
||||
openservicemesh.io/sidecar-injection: disabled
|
||||
spec:
|
||||
containers:
|
||||
- name: tools
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
# Default values for podinfo.
|
||||
image:
|
||||
repository: ghcr.io/stefanprodan/podinfo
|
||||
tag: 5.0.0
|
||||
tag: 6.0.0
|
||||
pullPolicy: IfNotPresent
|
||||
|
||||
podAnnotations: {}
|
||||
|
||||
@@ -83,6 +83,7 @@ var (
|
||||
enableConfigTracking bool
|
||||
ver bool
|
||||
kubeconfigServiceMesh string
|
||||
clusterName string
|
||||
)
|
||||
|
||||
func init() {
|
||||
@@ -106,7 +107,7 @@ func init() {
|
||||
flag.BoolVar(&zapReplaceGlobals, "zap-replace-globals", false, "Whether to change the logging level of the global zap logger.")
|
||||
flag.StringVar(&zapEncoding, "zap-encoding", "json", "Zap logger encoding.")
|
||||
flag.StringVar(&namespace, "namespace", "", "Namespace that flagger would watch canary object.")
|
||||
flag.StringVar(&meshProvider, "mesh-provider", "istio", "Service mesh provider, can be istio, linkerd, appmesh, contour, gloo, nginx, skipper or traefik.")
|
||||
flag.StringVar(&meshProvider, "mesh-provider", "istio", "Service mesh provider, can be istio, linkerd, appmesh, contour, gloo, nginx, skipper, traefik, osm or kuma.")
|
||||
flag.StringVar(&selectorLabels, "selector-labels", "app,name,app.kubernetes.io/name", "List of pod labels that Flagger uses to create pod selectors.")
|
||||
flag.StringVar(&ingressAnnotationsPrefix, "ingress-annotations-prefix", "nginx.ingress.kubernetes.io", "Annotations prefix for NGINX ingresses.")
|
||||
flag.StringVar(&ingressClass, "ingress-class", "", "Ingress class used for annotating HTTPProxy objects.")
|
||||
@@ -115,6 +116,7 @@ func init() {
|
||||
flag.BoolVar(&enableConfigTracking, "enable-config-tracking", true, "Enable secrets and configmaps tracking.")
|
||||
flag.BoolVar(&ver, "version", false, "Print version")
|
||||
flag.StringVar(&kubeconfigServiceMesh, "kubeconfig-service-mesh", "", "Path to a kubeconfig for the service mesh control plane cluster.")
|
||||
flag.StringVar(&clusterName, "cluster-name", "", "Cluster name to be included in alert msgs.")
|
||||
}
|
||||
|
||||
func main() {
|
||||
@@ -238,6 +240,7 @@ func main() {
|
||||
meshProvider,
|
||||
version.VERSION,
|
||||
fromEnv("EVENT_WEBHOOK_URL", eventWebhook),
|
||||
clusterName,
|
||||
)
|
||||
|
||||
// leader election context
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
Copyright 2020 The Flux authors
|
||||
Copyright 2020, 2022 The Flux authors
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
@@ -19,6 +19,7 @@ package main
|
||||
import (
|
||||
"flag"
|
||||
"log"
|
||||
"regexp"
|
||||
"time"
|
||||
|
||||
"github.com/fluxcd/flagger/pkg/loadtester"
|
||||
@@ -27,11 +28,12 @@ import (
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
var VERSION = "0.18.0"
|
||||
var VERSION = "0.22.0"
|
||||
var (
|
||||
logLevel string
|
||||
port string
|
||||
timeout time.Duration
|
||||
namespaceRegexp string
|
||||
zapReplaceGlobals bool
|
||||
zapEncoding string
|
||||
)
|
||||
@@ -40,6 +42,7 @@ func init() {
|
||||
flag.StringVar(&logLevel, "log-level", "debug", "Log level can be: debug, info, warning, error.")
|
||||
flag.StringVar(&port, "port", "9090", "Port to listen on.")
|
||||
flag.DurationVar(&timeout, "timeout", time.Hour, "Load test exec timeout.")
|
||||
flag.StringVar(&namespaceRegexp, "namespace-regexp", "", "Restrict access to canaries in matching namespaces.")
|
||||
flag.BoolVar(&zapReplaceGlobals, "zap-replace-globals", false, "Whether to change the logging level of the global zap logger.")
|
||||
flag.StringVar(&zapEncoding, "zap-encoding", "json", "Zap logger encoding.")
|
||||
}
|
||||
@@ -66,5 +69,12 @@ func main() {
|
||||
logger.Infof("Starting load tester v%s API on port %s", VERSION, port)
|
||||
|
||||
gateStorage := loadtester.NewGateStorage("in-memory")
|
||||
loadtester.ListenAndServe(port, time.Minute, logger, taskRunner, gateStorage, stopCh)
|
||||
|
||||
var namespaceRegexpCompiled *regexp.Regexp
|
||||
if namespaceRegexp != "" {
|
||||
namespaceRegexpCompiled = regexp.MustCompile(namespaceRegexp)
|
||||
}
|
||||
authorizer := loadtester.NewAuthorizer(namespaceRegexpCompiled)
|
||||
|
||||
loadtester.ListenAndServe(port, time.Minute, logger, taskRunner, gateStorage, authorizer, stopCh)
|
||||
}
|
||||
|
||||
BIN
docs/diagrams/flagger-gatewayapi-canary.png
Normal file
BIN
docs/diagrams/flagger-gatewayapi-canary.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 39 KiB |
BIN
docs/diagrams/flagger-kuma-canary.png
Normal file
BIN
docs/diagrams/flagger-kuma-canary.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 121 KiB |
BIN
docs/diagrams/flagger-osm-traffic-split.png
Normal file
BIN
docs/diagrams/flagger-osm-traffic-split.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 124 KiB |
Binary file not shown.
|
Before Width: | Height: | Size: 48 KiB After Width: | Height: | Size: 51 KiB |
@@ -10,9 +10,9 @@ version in production by gradually shifting traffic to the new version while mea
|
||||
and running conformance tests.
|
||||
|
||||
Flagger implements several deployment strategies (Canary releases, A/B testing, Blue/Green mirroring)
|
||||
using a service mesh (App Mesh, Istio, Linkerd)
|
||||
using a service mesh (App Mesh, Istio, Linkerd, Open Service Mesh)
|
||||
or an ingress controller (Contour, Gloo, NGINX, Skipper, Traefik) for traffic routing.
|
||||
For release analysis, Flagger can query Prometheus, Datadog, New Relic or CloudWatch
|
||||
For release analysis, Flagger can query Prometheus, Datadog, New Relic, CloudWatch or Graphite
|
||||
and for alerting it uses Slack, MS Teams, Discord and Rocket.
|
||||
|
||||

|
||||
@@ -26,16 +26,18 @@ and part of [Flux](https://fluxcd.io) family of GitOps tools.
|
||||
|
||||
## Getting started
|
||||
|
||||
To get started with Flagger, chose one of the supported routing providers and
|
||||
To get started with Flagger, choose one of the supported routing providers and
|
||||
[install](install/flagger-install-on-kubernetes.md) Flagger with Helm or Kustomize.
|
||||
|
||||
After install Flagger, you can follow one of these tutorials to get started:
|
||||
After installing Flagger, you can follow one of these tutorials to get started:
|
||||
|
||||
**Service mesh tutorials**
|
||||
|
||||
* [Istio](tutorials/istio-progressive-delivery.md)
|
||||
* [Linkerd](tutorials/linkerd-progressive-delivery.md)
|
||||
* [AWS App Mesh](tutorials/appmesh-progressive-delivery.md)
|
||||
* [Open Service Mesh](tutorials/osm-progressive-delivery.md)
|
||||
* [Kuma](tutorials/kuma-progressive-delivery.md)
|
||||
|
||||
**Ingress controller tutorials**
|
||||
|
||||
|
||||
@@ -30,6 +30,9 @@
|
||||
* [NGINX Canary Deployments](tutorials/nginx-progressive-delivery.md)
|
||||
* [Skipper Canary Deployments](tutorials/skipper-progressive-delivery.md)
|
||||
* [Traefik Canary Deployments](tutorials/traefik-progressive-delivery.md)
|
||||
* [Open Service Mesh Deployments](tutorials/osm-progressive-delivery.md)
|
||||
* [Kuma Canary Deployments](tutorials/kuma-progressive-delivery.md)
|
||||
* [Gateway API Canary Deployments](tutorials/gatewayapi-progressive-delivery.md)
|
||||
* [Blue/Green Deployments](tutorials/kubernetes-blue-green.md)
|
||||
* [Canary analysis with Prometheus Operator](tutorials/prometheus-operator.md)
|
||||
* [Zero downtime deployments](tutorials/zero-downtime-deployments.md)
|
||||
|
||||
@@ -8,17 +8,17 @@ Flagger is written in Go and uses Go modules for dependency management.
|
||||
|
||||
On your dev machine install the following tools:
|
||||
|
||||
* go >= 1.14
|
||||
* git >= 2.20
|
||||
* bash >= 5.0
|
||||
* make >= 3.81
|
||||
* kubectl >= 1.16
|
||||
* kustomize >= 3.5
|
||||
* helm >= 3.0
|
||||
* docker >= 19.03
|
||||
* go >= 1.17
|
||||
* git >;= 2.20
|
||||
* bash >= 5.0
|
||||
* make >= 3.81
|
||||
* kubectl >= 1.22
|
||||
* kustomize >= 4.4
|
||||
* helm >= 3.0
|
||||
* docker >= 19.03
|
||||
|
||||
You'll also need a Kubernetes cluster for testing Flagger.
|
||||
You can use Minikube, Kind, Docker desktop or any remote cluster (AKS/EKS/GKE/etc) Kubernetes version 1.16 or newer.
|
||||
You can use Minikube, Kind, Docker desktop or any remote cluster (AKS/EKS/GKE/etc) Kubernetes version 1.22 or newer.
|
||||
|
||||
To start contributing to Flagger, fork the [repository](https://github.com/fluxcd/flagger) on GitHub.
|
||||
|
||||
@@ -100,6 +100,8 @@ make codegen
|
||||
Run code formatters:
|
||||
|
||||
```bash
|
||||
go install golang.org/x/tools/cmd/goimports@latest
|
||||
|
||||
make fmt
|
||||
```
|
||||
|
||||
@@ -200,7 +202,7 @@ Run the Istio e2e tests:
|
||||
./test/istio/run.sh
|
||||
```
|
||||
|
||||
For each service mesh and ingress controller there is a dedicated e2e test suite,
|
||||
chose one that matches your changes from this [list](https://github.com/fluxcd/flagger/tree/main/test).
|
||||
For each service mesh and ingress controller, there is a dedicated e2e test suite,
|
||||
choose one that matches your changes from this [list](https://github.com/fluxcd/flagger/tree/main/test).
|
||||
|
||||
When you open a pull request on Flagger repo, the unit and integration tests will be run in CI.
|
||||
|
||||
@@ -13,8 +13,8 @@ Flagger implements the following deployment strategies:
|
||||
|
||||
#### When should I use A/B testing instead of progressive traffic shifting?
|
||||
|
||||
For frontend applications that require session affinity you should use HTTP headers or
|
||||
cookies match conditions to ensure a set of users will stay on the same version for
|
||||
For frontend applications that require session affinity, you should use HTTP headers or
|
||||
cookie match conditions to ensure a set of users will stay on the same version for
|
||||
the whole duration of the canary analysis.
|
||||
|
||||
#### Can I use Flagger to manage applications that live outside of a service mesh?
|
||||
@@ -49,16 +49,34 @@ spec:
|
||||
timestamp: "2020-03-10T14:24:48+0000"
|
||||
```
|
||||
|
||||
#### Why is there a downtime during the canary initializing process when analysis is disabled?
|
||||
#### How to change replicas for a deployment when not using HPA?
|
||||
|
||||
It is the intended behavior when the analysis is disabled, this allows instant rollback and also mimics the way a Kubernetes deployment initialization works.
|
||||
To avoid this: enable the analysis (`skipAnalysis: true`), wait for the initialization to finish, and disable it afterward (`skipAnalysis: false`).
|
||||
To change replicas for a deployment when not using HPA, you have to update the canary deployment with the desired replica count
|
||||
and trigger an analysis by annotating the template. After the analysis finishes, Flagger will promote the `spec.replicas` changes to the primary deployment.
|
||||
|
||||
Example:
|
||||
```yaml
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
spec:
|
||||
replicas: 4 #update replicas
|
||||
template:
|
||||
metadata:
|
||||
annotations:
|
||||
timestamp: "2022-02-10T14:24:48+0000" #add annotation to trigger analysis
|
||||
```
|
||||
|
||||
#### Why is there a window of downtime during the canary initializing process when analysis is disabled?
|
||||
|
||||
A window of downtime is the intended behavior when the analysis is disabled. This allows instant rollback and also mimics the way
|
||||
a Kubernetes deployment initialization works. To avoid this, enable the analysis (`skipAnalysis: true`), wait for the initialization
|
||||
to finish, and disable it afterward (`skipAnalysis: false`).
|
||||
|
||||
## Kubernetes services
|
||||
|
||||
#### How is an application exposed inside the cluster?
|
||||
|
||||
Assuming the app name is podinfo you can define a canary like:
|
||||
Assuming the app name is `podinfo`, you can define a canary like:
|
||||
|
||||
```yaml
|
||||
apiVersion: flagger.app/v1beta1
|
||||
@@ -84,19 +102,19 @@ spec:
|
||||
|
||||
If the `service.name` is not specified, then `targetRef.name` is used for
|
||||
the apex domain and canary/primary services name prefix.
|
||||
You should treat the service name as an immutable field, changing it could result in routing conflicts.
|
||||
You should treat the service name as an immutable field; changing its could result in routing conflicts.
|
||||
|
||||
Based on the canary spec service, Flagger generates the following Kubernetes ClusterIP service:
|
||||
|
||||
* `<service.name>.<namespace>.svc.cluster.local`
|
||||
* `<service.name>.<namespace>.svc.cluster.local`
|
||||
|
||||
selector `app=<name>-primary`
|
||||
|
||||
* `<service.name>-primary.<namespace>.svc.cluster.local`
|
||||
* `<service.name>-primary.<namespace>.svc.cluster.local`
|
||||
|
||||
selector `app=<name>-primary`
|
||||
|
||||
* `<service.name>-canary.<namespace>.svc.cluster.local`
|
||||
* `<service.name>-canary.<namespace>.svc.cluster.local`
|
||||
|
||||
selector `app=<name>`
|
||||
|
||||
@@ -152,7 +170,7 @@ and can be used for conformance testing or load testing.
|
||||
|
||||
## Multiple ports
|
||||
|
||||
#### My application listens on multiple ports, how can I expose them inside the cluster?
|
||||
#### My application listens on multiple ports. How can I expose them inside the cluster?
|
||||
|
||||
If port discovery is enabled, Flagger scans the deployment spec and extracts the containers ports excluding
|
||||
the port specified in the canary service and Envoy sidecar ports.
|
||||
@@ -219,8 +237,15 @@ spec:
|
||||
app: podinfo
|
||||
```
|
||||
|
||||
Besides `app` Flagger supports `name` and `app.kubernetes.io/name` selectors.
|
||||
If you use a different convention you can specify your label with the `-selector-labels` flag.
|
||||
Besides `app`, Flagger supports `name` and `app.kubernetes.io/name` selectors.
|
||||
If you use a different convention, you can specify your label with the `-selector-labels` flag.
|
||||
For example:
|
||||
|
||||
```
|
||||
flagger \
|
||||
-selector-labels=service,name,app.kubernetes.io/name \
|
||||
...
|
||||
```
|
||||
|
||||
#### Is pod affinity and anti affinity supported?
|
||||
|
||||
@@ -331,7 +356,7 @@ spec:
|
||||
|
||||
#### How does Flagger measure the request success rate and duration?
|
||||
|
||||
Flagger measures the request success rate and duration using Prometheus queries.
|
||||
By default, Flagger measures the request success rate and duration using Prometheus queries.
|
||||
|
||||
#### HTTP requests success rate percentage
|
||||
|
||||
@@ -360,8 +385,8 @@ sum(
|
||||
response_code!~"5.*"
|
||||
}[$interval]
|
||||
)
|
||||
)
|
||||
/
|
||||
)
|
||||
/
|
||||
sum(
|
||||
rate(
|
||||
istio_requests_total{
|
||||
@@ -384,8 +409,8 @@ sum(
|
||||
envoy_response_code!~"5.*"
|
||||
}[$interval]
|
||||
)
|
||||
)
|
||||
/
|
||||
)
|
||||
/
|
||||
sum(
|
||||
rate(
|
||||
envoy_cluster_upstream_rq{
|
||||
@@ -435,10 +460,10 @@ Spec:
|
||||
Istio query:
|
||||
|
||||
```javascript
|
||||
histogram_quantile(0.99,
|
||||
histogram_quantile(0.99,
|
||||
sum(
|
||||
irate(
|
||||
istio_request_duration_seconds_bucket{
|
||||
istio_request_duration_milliseconds_bucket{
|
||||
reporter="destination",
|
||||
destination_workload=~"$workload",
|
||||
destination_workload_namespace=~"$namespace"
|
||||
@@ -451,7 +476,7 @@ histogram_quantile(0.99,
|
||||
Envoy query (App Mesh, Contour and Gloo):
|
||||
|
||||
```javascript
|
||||
histogram_quantile(0.99,
|
||||
histogram_quantile(0.99,
|
||||
sum(
|
||||
irate(
|
||||
envoy_cluster_upstream_rq_time_bucket{
|
||||
@@ -467,8 +492,35 @@ histogram_quantile(0.99,
|
||||
|
||||
#### Can I use custom metrics?
|
||||
|
||||
The analysis can be extended with metrics provided by Prometheus, Datadog and AWS CloudWatch.
|
||||
For more details on how custom metrics can be used please read the [metrics docs](usage/metrics.md).
|
||||
The analysis can be extended with metrics provided by Prometheus, Datadog, AWS CloudWatch, New Relic and Graphite.
|
||||
For more details on how custom metrics can be used, please read the [metrics docs](usage/metrics.md).
|
||||
|
||||
#### Istio Gateway API
|
||||
|
||||
If you're using Istio with Gateway API, the Prometheus query needs to include `reporter="source"`. For example, to calculate HTTP requests error percentage, the query would be:
|
||||
|
||||
```javascript
|
||||
100 - sum(
|
||||
rate(
|
||||
istio_requests_total{
|
||||
reporter="source",
|
||||
destination_workload_namespace=~"$namespace",
|
||||
destination_workload=~"$workload",
|
||||
response_code!~"5.*"
|
||||
}[$interval]
|
||||
)
|
||||
)
|
||||
/
|
||||
sum(
|
||||
rate(
|
||||
istio_requests_total{
|
||||
reporter="source",
|
||||
destination_workload_namespace=~"$namespace",
|
||||
destination_workload=~"$workload"
|
||||
}[$interval]
|
||||
)
|
||||
) * 100
|
||||
```
|
||||
|
||||
## Istio routing
|
||||
|
||||
@@ -711,7 +763,7 @@ spec:
|
||||
weight: 0
|
||||
```
|
||||
|
||||
Therefore, The following virtual service forward the traffic to `/podinfo` by the above delegate VirtualService.
|
||||
Therefore, the following virtual service forwards the traffic to `/podinfo` by the above delegate VirtualService.
|
||||
|
||||
```yaml
|
||||
apiVersion: networking.istio.io/v1alpha3
|
||||
@@ -737,7 +789,7 @@ spec:
|
||||
namespace: test
|
||||
```
|
||||
|
||||
Note that pilot env `PILOT_ENABLE_VIRTUAL_SERVICE_DELEGATE` must also be set.
|
||||
Note that pilot env `PILOT_ENABLE_VIRTUAL_SERVICE_DELEGATE` must also be set.
|
||||
For the use of Istio Delegation, you can refer to the documentation of
|
||||
[Virtual Service](https://istio.io/latest/docs/reference/config/networking/virtual-service/#Delegate)
|
||||
and [pilot environment variables](https://istio.io/latest/docs/reference/commands/pilot-discovery/#envvars).
|
||||
@@ -746,8 +798,8 @@ and [pilot environment variables](https://istio.io/latest/docs/reference/command
|
||||
|
||||
#### How can I expose multiple canaries on the same external domain?
|
||||
|
||||
Assuming you have two apps, one that servers the main website and one that serves the REST API.
|
||||
For each app you can define a canary object as:
|
||||
Assuming you have two apps -- one that serves the main website and one that serves its REST API --
|
||||
you can define a canary object for each app as:
|
||||
|
||||
```yaml
|
||||
apiVersion: flagger.app/v1beta1
|
||||
@@ -791,7 +843,7 @@ Istio Pilot will
|
||||
[merge](https://istio.io/help/ops/traffic-management/deploy-guidelines/#multiple-virtual-services-and-destination-rules-for-the-same-host)
|
||||
the two services and the website rule will be moved to the end of the list in the merged configuration.
|
||||
|
||||
Note that host merging only works if the canaries are bounded to a ingress gateway other than the `mesh` gateway.
|
||||
Note that host merging only works if the canaries are bounded to an ingress gateway other than the `mesh` gateway.
|
||||
|
||||
## Istio Mutual TLS
|
||||
|
||||
@@ -809,7 +861,7 @@ spec:
|
||||
mode: ISTIO_MUTUAL
|
||||
```
|
||||
|
||||
If you run Istio in permissive mode you can disable TLS:
|
||||
If you run Istio in permissive mode, you can disable TLS:
|
||||
|
||||
```yaml
|
||||
apiVersion: flagger.app/v1beta1
|
||||
@@ -850,3 +902,42 @@ spec:
|
||||
mtls:
|
||||
mode: DISABLE
|
||||
```
|
||||
|
||||
## ExternalDNS
|
||||
|
||||
### Can I use annotations?
|
||||
|
||||
Flagger propagates annotations (and labels) to all the generated apex,
|
||||
primary and canary objects. This allows using external-dns annotations.
|
||||
|
||||
You can configure Flagger to set annotations with:
|
||||
|
||||
```yaml
|
||||
spec:
|
||||
service:
|
||||
apex:
|
||||
annotations:
|
||||
external-dns.alpha.kubernetes.io/hostname: "mydomain.com"
|
||||
primary:
|
||||
annotations:
|
||||
external-dns.alpha.kubernetes.io/hostname: "primary.mydomain.com"
|
||||
canary:
|
||||
annotations:
|
||||
external-dns.alpha.kubernetes.io/hostname: "canary.mydomain.com"
|
||||
```
|
||||
|
||||
### Multiple sources and Istio
|
||||
|
||||
**/!\\** The apex annotations are added to both the generated Kubernetes Services and the generated Istio
|
||||
VirtualServices objects. If you have configured external-dns to use both sources,
|
||||
this will create conflicts!
|
||||
|
||||
```yaml
|
||||
spec:
|
||||
containers:
|
||||
args:
|
||||
- --source=service # choose only one
|
||||
- --source=istio-virtualservice # of these two
|
||||
```
|
||||
|
||||
[Checkout ExternalDNS documentation](https://github.com/kubernetes-sigs/external-dns/blob/master/docs/tutorials/istio.md)
|
||||
|
||||
@@ -71,6 +71,16 @@ helm upgrade -i flagger flagger/flagger \
|
||||
--set metricsServer=http://appmesh-prometheus:9090
|
||||
```
|
||||
|
||||
Deploy Flagger for **Open Service Mesh (OSM)** (requires OSM to have been installed with Prometheus):
|
||||
|
||||
```console
|
||||
$ helm upgrade -i flagger flagger/flagger \
|
||||
--namespace=osm-system \
|
||||
--set crd.create=false \
|
||||
--set meshProvider=osm \
|
||||
--set metricsServer=http://osm-prometheus.osm-system.svc:7070
|
||||
```
|
||||
|
||||
You can install Flagger in any namespace as long as it can talk to the Prometheus service on port 9090.
|
||||
|
||||
For ingress controllers, the install instructions are:
|
||||
@@ -173,6 +183,14 @@ kustomize build https://github.com/fluxcd/flagger/kustomize/linkerd?ref=main | k
|
||||
|
||||
This deploys Flagger in the `linkerd` namespace and sets the metrics server URL to Linkerd's Prometheus instance.
|
||||
|
||||
Install Flagger for Open Service Mesh:
|
||||
|
||||
```bash
|
||||
kustomize build https://github.com/fluxcd/flagger/kustomize/osm?ref=main | kubectl apply -f -
|
||||
```
|
||||
|
||||
This deploys Flagger in the `osm-system` namespace and sets the metrics server URL to OSM's Prometheus instance.
|
||||
|
||||
If you want to install a specific Flagger release, add the version number to the URL:
|
||||
|
||||
```bash
|
||||
@@ -202,7 +220,7 @@ metadata:
|
||||
name: app
|
||||
namespace: test
|
||||
spec:
|
||||
# can be: kubernetes, istio, linkerd, appmesh, nginx, skipper, gloo, traefik
|
||||
# can be: kubernetes, istio, linkerd, appmesh, nginx, skipper, gloo, traefik, osm
|
||||
# use the kubernetes provider for Blue/Green style deployments
|
||||
provider: nginx
|
||||
```
|
||||
|
||||
@@ -242,7 +242,7 @@ Trigger a canary deployment by updating the container image:
|
||||
|
||||
```bash
|
||||
kubectl -n test set image deployment/podinfo \
|
||||
podinfod=stefanprodan/podinfo:3.1.1
|
||||
podinfod=ghcr.io/stefanprodan/podinfo:6.0.1
|
||||
```
|
||||
|
||||
Flagger detects that the deployment revision changed and starts a new rollout:
|
||||
@@ -307,7 +307,7 @@ Trigger a canary deployment:
|
||||
|
||||
```bash
|
||||
kubectl -n test set image deployment/podinfo \
|
||||
podinfod=stefanprodan/podinfo:3.1.2
|
||||
podinfod=ghcr.io/stefanprodan/podinfo:6.0.2
|
||||
```
|
||||
|
||||
Exec into the load tester pod with:
|
||||
@@ -399,7 +399,7 @@ Trigger a canary deployment by updating the container image:
|
||||
|
||||
```bash
|
||||
kubectl -n test set image deployment/podinfo \
|
||||
podinfod=stefanprodan/podinfo:3.1.3
|
||||
podinfod=ghcr.io/stefanprodan/podinfo:6.0.3
|
||||
```
|
||||
|
||||
Flagger detects that the deployment revision changed and starts the A/B test:
|
||||
|
||||
@@ -320,7 +320,7 @@ After a couple of seconds Flux will apply the Kubernetes resources from Git and
|
||||
A CI/CD pipeline for the `frontend` release could look like this:
|
||||
|
||||
* cut a release from the master branch of the podinfo code repo with the git tag `3.1.1`
|
||||
* CI builds the image and pushes the `podinfo:3.1.1` image to the container registry
|
||||
* CI builds the image and pushes the `podinfo:6.0.1` image to the container registry
|
||||
* Flux scans the registry and updates the Helm release `image.tag` to `3.1.1`
|
||||
* Flux commits and push the change to the cluster repo
|
||||
* Flux applies the updated Helm release on the cluster
|
||||
@@ -337,7 +337,7 @@ A canary deployment can fail due to any of the following reasons:
|
||||
|
||||
* the container image can't be downloaded
|
||||
* the deployment replica set is stuck for more then ten minutes \(eg. due to a container crash loop\)
|
||||
* the webooks \(acceptance tests, helm tests, load tests, etc\) are returning a non 2xx response
|
||||
* the webhooks \(acceptance tests, helm tests, load tests, etc\) are returning a non 2xx response
|
||||
* the HTTP success rate \(non 5xx responses\) metric drops under the threshold
|
||||
* the HTTP average duration metric goes over the threshold
|
||||
* the Istio telemetry service is unable to collect traffic metrics
|
||||
|
||||
@@ -224,7 +224,7 @@ Trigger a canary deployment by updating the container image:
|
||||
|
||||
```bash
|
||||
kubectl -n test set image deployment/podinfo \
|
||||
podinfod=stefanprodan/podinfo:3.1.1
|
||||
podinfod=ghcr.io/stefanprodan/podinfo:6.0.1
|
||||
```
|
||||
|
||||
Flagger detects that the deployment revision changed and starts a new rollout:
|
||||
@@ -281,7 +281,7 @@ Trigger a canary deployment:
|
||||
|
||||
```bash
|
||||
kubectl -n test set image deployment/podinfo \
|
||||
podinfod=stefanprodan/podinfo:3.1.2
|
||||
podinfod=ghcr.io/stefanprodan/podinfo:6.0.2
|
||||
```
|
||||
|
||||
Exec into the load tester pod with:
|
||||
@@ -369,7 +369,7 @@ Trigger a canary deployment by updating the container image:
|
||||
|
||||
```bash
|
||||
kubectl -n test set image deployment/podinfo \
|
||||
podinfod=stefanprodan/podinfo:3.1.3
|
||||
podinfod=ghcr.io/stefanprodan/podinfo:6.0.3
|
||||
```
|
||||
|
||||
Flagger detects that the deployment revision changed and starts the A/B test:
|
||||
|
||||
484
docs/gitbook/tutorials/gatewayapi-progressive-delivery.md
Normal file
484
docs/gitbook/tutorials/gatewayapi-progressive-delivery.md
Normal file
@@ -0,0 +1,484 @@
|
||||
# Gateway API Canary Deployments
|
||||
|
||||
This guide shows you how to use Gateway API and Flagger to automate canary deployments.
|
||||
|
||||

|
||||
|
||||
## Prerequisites
|
||||
|
||||
Flagger requires a Kubernetes cluster **v1.16** or newer and any mesh/ingress that implements the `v1alpha2` of Gateway API. We'll be using Contour for the sake of this tutorial, but you can use any other implementation.
|
||||
|
||||
Install the GatewayAPI CRDs:
|
||||
|
||||
```bash
|
||||
kubectl apply -k github.com/kubernetes-sigs/gateway-api/config/crd?ref=v0.4.1
|
||||
```
|
||||
|
||||
Install a cluster-wide GatewayClass; a Gateway belonging to the GatewayClass and Contour components in the `projectcontour` namespace:
|
||||
|
||||
```bash
|
||||
kubectl apply -f https://raw.githubusercontent.com/projectcontour/contour/release-1.20/examples/render/contour.yaml
|
||||
```
|
||||
|
||||
Install Flagger in the `flagger-system` namespace:
|
||||
|
||||
```bash
|
||||
kubectl apply -k github.com/fluxcd/flagger//kustomize/gatewayapi
|
||||
```
|
||||
|
||||
## Bootstrap
|
||||
|
||||
Flagger takes a Kubernetes deployment and optionally a horizontal pod autoscaler \(HPA\), then creates a series of objects \(Kubernetes deployments, ClusterIP services, HTTPRoutes for the Gateway\). These objects expose the application inside the mesh and drive the canary analysis and promotion.
|
||||
|
||||
Create a test namespace:
|
||||
|
||||
```bash
|
||||
kubectl create ns test
|
||||
```
|
||||
|
||||
Create a deployment and a horizontal pod autoscaler:
|
||||
|
||||
```bash
|
||||
kubectl apply -k https://github.com/fluxcd/flagger//kustomize/podinfo?ref=main
|
||||
```
|
||||
|
||||
Deploy the load testing service to generate traffic during the canary analysis:
|
||||
|
||||
```bash
|
||||
kubectl apply -k https://github.com/fluxcd/flagger//kustomize/tester?ref=main
|
||||
```
|
||||
|
||||
Create metric templates targeting the Prometheus server in the `flagger-system` namespace. The PromQL queries below are meant for `Envoy`, but you can [change it to your ingress/mesh provider](https://docs.flagger.app/faq#metrics) accordingly.
|
||||
|
||||
```yaml
|
||||
apiVersion: flagger.app/v1beta1
|
||||
kind: MetricTemplate
|
||||
metadata:
|
||||
name: latency
|
||||
namespace: flagger-system
|
||||
spec:
|
||||
provider:
|
||||
type: prometheus
|
||||
address: http://flagger-prometheus:9090
|
||||
query: |
|
||||
histogram_quantile(0.99,
|
||||
sum(
|
||||
rate(
|
||||
envoy_cluster_upstream_rq_time_bucket{
|
||||
envoy_cluster_name=~"{{ namespace }}_{{ target }}-canary_[0-9a-zA-Z-]+",
|
||||
}[{{ interval }}]
|
||||
)
|
||||
) by (le)
|
||||
)/1000
|
||||
---
|
||||
apiVersion: flagger.app/v1beta1
|
||||
kind: MetricTemplate
|
||||
metadata:
|
||||
name: error-rate
|
||||
namespace: flagger-system
|
||||
spec:
|
||||
provider:
|
||||
type: prometheus
|
||||
address: http://flagger-prometheus:9090
|
||||
query: |
|
||||
100 - sum(
|
||||
rate(
|
||||
envoy_cluster_upstream_rq{
|
||||
envoy_cluster_name=~"{{ namespace }}_{{ target }}-canary_[0-9a-zA-Z-]+",
|
||||
envoy_response_code!~"5.*"
|
||||
}[{{ interval }}]
|
||||
)
|
||||
)
|
||||
/
|
||||
sum(
|
||||
rate(
|
||||
envoy_cluster_upstream_rq{
|
||||
envoy_cluster_name=~"{{ namespace }}_{{ target }}-canary_[0-9a-zA-Z-]+",
|
||||
}[{{ interval }}]
|
||||
)
|
||||
)
|
||||
* 100
|
||||
```
|
||||
|
||||
Save the above resource as metric-templates.yaml and then apply it:
|
||||
|
||||
```bash
|
||||
kubectl apply -f metric-templates.yaml
|
||||
```
|
||||
|
||||
Create a canary custom resource \(replace "loaclproject.contour.io" with your own domain\):
|
||||
|
||||
```yaml
|
||||
apiVersion: flagger.app/v1beta1
|
||||
kind: Canary
|
||||
metadata:
|
||||
name: podinfo
|
||||
namespace: test
|
||||
spec:
|
||||
# deployment reference
|
||||
targetRef:
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
name: podinfo
|
||||
# the maximum time in seconds for the canary deployment
|
||||
# to make progress before it is rollback (default 600s)
|
||||
progressDeadlineSeconds: 60
|
||||
# HPA reference (optional)
|
||||
autoscalerRef:
|
||||
apiVersion: autoscaling/v2beta2
|
||||
kind: HorizontalPodAutoscaler
|
||||
name: podinfo
|
||||
service:
|
||||
# service port number
|
||||
port: 9898
|
||||
# container port number or name (optional)
|
||||
targetPort: 9898
|
||||
# Gateway API HTTPRoute host names
|
||||
hosts:
|
||||
- localproject.contour.io
|
||||
# Reference to the Gateway that the generated HTTPRoute would attach to.
|
||||
gatewayRefs:
|
||||
- name: contour
|
||||
namespace: projectcontour
|
||||
analysis:
|
||||
# schedule interval (default 60s)
|
||||
interval: 1m
|
||||
# max number of failed metric checks before rollback
|
||||
threshold: 5
|
||||
# max traffic percentage routed to canary
|
||||
# percentage (0-100)
|
||||
maxWeight: 50
|
||||
# canary increment step
|
||||
# percentage (0-100)
|
||||
stepWeight: 10
|
||||
metrics:
|
||||
- name: error-rate
|
||||
# max error rate (5xx responses)
|
||||
# percentage (0-100)
|
||||
templateRef:
|
||||
name: error-rate
|
||||
namespace: flagger-system
|
||||
thresholdRange:
|
||||
max: 1
|
||||
interval: 1m
|
||||
- name: latency
|
||||
templateRef:
|
||||
name: latency
|
||||
namespace: flagger-system
|
||||
# seconds
|
||||
thresholdRange:
|
||||
max: 0.5
|
||||
interval: 30s
|
||||
# testing (optional)
|
||||
webhooks:
|
||||
- name: smoke-test
|
||||
type: pre-rollout
|
||||
url: http://flagger-loadtester.test/
|
||||
timeout: 15s
|
||||
metadata:
|
||||
type: bash
|
||||
cmd: "curl -sd 'anon' http://podinfo-canary.test:9898/token | grep token"
|
||||
- name: load-test
|
||||
url: http://flagger-loadtester.test/
|
||||
timeout: 5s
|
||||
metadata:
|
||||
cmd: "hey -z 2m -q 10 -c 2 -host localproject.contour.io http://envoy.projectcontour/"
|
||||
```
|
||||
|
||||
Save the above resource as podinfo-canary.yaml and then apply it:
|
||||
|
||||
```bash
|
||||
kubectl apply -f ./podinfo-canary.yaml
|
||||
```
|
||||
|
||||
When the canary analysis starts, Flagger will call the pre-rollout webhooks before routing traffic to the canary. The canary analysis will run for five minutes while validating the HTTP metrics and rollout hooks every minute.
|
||||
|
||||
After a couple of seconds Flagger will create the canary objects:
|
||||
|
||||
```bash
|
||||
# applied
|
||||
deployment.apps/podinfo
|
||||
horizontalpodautoscaler.autoscaling/podinfo
|
||||
canary.flagger.app/podinfo
|
||||
|
||||
# generated
|
||||
deployment.apps/podinfo-primary
|
||||
horizontalpodautoscaler.autoscaling/podinfo-primary
|
||||
service/podinfo
|
||||
service/podinfo-canary
|
||||
service/podinfo-primary
|
||||
httproutes.gateway.networking.k8s.io/podinfo
|
||||
```
|
||||
|
||||
## Expose the app outside the cluster
|
||||
|
||||
Find the external address of Contour's Envoy load balancer:
|
||||
|
||||
```bash
|
||||
export ADDRESS="$(kubectl -n projectcontour get svc/envoy -ojson \
|
||||
| jq -r ".status.loadBalancer.ingress[].hostname")"
|
||||
echo $ADDRESS
|
||||
```
|
||||
|
||||
Configure your DNS server with a CNAME record \(AWS\) or A record \(GKE/AKS/DOKS\) and point a domain e.g. `localproject.contour.io` to the LB address.
|
||||
|
||||
Now you can access the podinfo UI using your domain address.
|
||||
|
||||
Note that you should be using HTTPS when exposing production workloads on internet. You can obtain free TLS certs from Let's Encrypt, read this [guide](https://github.com/stefanprodan/eks-contour-ingress) on how to configure cert-manager to secure Contour with TLS certificates.
|
||||
|
||||
If you're using a local cluster via kind/k3s you can port forward the Envoy LoadBalancer service:
|
||||
```bash
|
||||
kubectl port-forward -n projectcontour svc/envoy 8080:80
|
||||
```
|
||||
|
||||
Now you can access podinfo via `curl -H "Host: localproject.contour.io" localhost:8080`
|
||||
|
||||
## Automated canary promotion
|
||||
|
||||
Trigger a canary deployment by updating the container image:
|
||||
|
||||
```bash
|
||||
kubectl -n test set image deployment/podinfo \
|
||||
podinfod=stefanprodan/podinfo:6.0.1
|
||||
```
|
||||
|
||||
Flagger detects that the deployment revision changed and starts a new rollout:
|
||||
|
||||
```text
|
||||
kubectl -n test describe canary/podinfo
|
||||
|
||||
Status:
|
||||
Canary Weight: 0
|
||||
Failed Checks: 0
|
||||
Phase: Succeeded
|
||||
Events:
|
||||
Type Reason Age From Message
|
||||
---- ------ ---- ---- -------
|
||||
Normal Synced 3m flagger New revision detected podinfo.test
|
||||
Normal Synced 3m flagger Scaling up podinfo.test
|
||||
Warning Synced 3m flagger Waiting for podinfo.test rollout to finish: 0 of 1 updated replicas are available
|
||||
Normal Synced 3m flagger Advance podinfo.test canary weight 5
|
||||
Normal Synced 3m flagger Advance podinfo.test canary weight 10
|
||||
Normal Synced 3m flagger Advance podinfo.test canary weight 15
|
||||
Normal Synced 2m flagger Advance podinfo.test canary weight 20
|
||||
Normal Synced 2m flagger Advance podinfo.test canary weight 25
|
||||
Normal Synced 1m flagger Advance podinfo.test canary weight 30
|
||||
Normal Synced 1m flagger Advance podinfo.test canary weight 35
|
||||
Normal Synced 55s flagger Advance podinfo.test canary weight 40
|
||||
Normal Synced 45s flagger Advance podinfo.test canary weight 45
|
||||
Normal Synced 35s flagger Advance podinfo.test canary weight 50
|
||||
Normal Synced 25s flagger Copying podinfo.test template spec to podinfo-primary.test
|
||||
Warning Synced 15s flagger Waiting for podinfo-primary.test rollout to finish: 1 of 2 updated replicas are available
|
||||
Normal Synced 5s flagger Promotion completed! Scaling down podinfo.test
|
||||
```
|
||||
|
||||
**Note** that if you apply new changes to the deployment during the canary analysis, Flagger will restart the analysis.
|
||||
|
||||
A canary deployment is triggered by changes in any of the following objects:
|
||||
|
||||
* Deployment PodSpec \(container image, command, ports, env, resources, etc\)
|
||||
* ConfigMaps mounted as volumes or mapped to environment variables
|
||||
* Secrets mounted as volumes or mapped to environment variables
|
||||
|
||||
You can monitor how Flagger progressively changes the weights of the HTTPRoute object that is attahed to the Gateway with:
|
||||
|
||||
```bash
|
||||
watch kubectl get httproute -n test podinfo -o=jsonpath='{.spec.rules}'
|
||||
```
|
||||
|
||||
You can monitor all canaries with:
|
||||
|
||||
```bash
|
||||
watch kubectl get canaries --all-namespaces
|
||||
|
||||
NAMESPACE NAME STATUS WEIGHT LASTTRANSITIONTIME
|
||||
test podinfo Progressing 15 2022-01-16T14:05:07Z
|
||||
prod frontend Succeeded 0 2022-01-15T16:15:07Z
|
||||
prod backend Failed 0 2022-01-14T17:05:07Z
|
||||
```
|
||||
|
||||
## Automated rollback
|
||||
|
||||
During the canary analysis you can generate HTTP 500 errors and high latency to test if Flagger pauses the rollout.
|
||||
|
||||
Trigger another canary deployment:
|
||||
|
||||
```bash
|
||||
kubectl -n test set image deployment/podinfo \
|
||||
podinfod=stefanprodan/podinfo:6.0.2
|
||||
```
|
||||
|
||||
Exec into the load tester pod with:
|
||||
|
||||
```bash
|
||||
kubectl -n test exec -it flagger-loadtester-xx-xx sh
|
||||
```
|
||||
|
||||
Generate HTTP 500 errors:
|
||||
|
||||
```bash
|
||||
watch curl http://podinfo-canary:9898/status/500
|
||||
```
|
||||
|
||||
Generate latency:
|
||||
|
||||
```bash
|
||||
watch curl http://podinfo-canary:9898/delay/1
|
||||
```
|
||||
|
||||
When the number of failed checks reaches the canary analysis threshold, the traffic is routed back to the primary, the canary is scaled to zero and the rollout is marked as failed.
|
||||
|
||||
```text
|
||||
kubectl -n test describe canary/podinfo
|
||||
|
||||
Status:
|
||||
Canary Weight: 0
|
||||
Failed Checks: 10
|
||||
Phase: Failed
|
||||
Events:
|
||||
Type Reason Age From Message
|
||||
---- ------ ---- ---- -------
|
||||
Normal Synced 3m flagger Starting canary deployment for podinfo.test
|
||||
Normal Synced 3m flagger Advance podinfo.test canary weight 5
|
||||
Normal Synced 3m flagger Advance podinfo.test canary weight 10
|
||||
Normal Synced 3m flagger Advance podinfo.test canary weight 15
|
||||
Normal Synced 3m flagger Halt podinfo.test advancement error rate 69.17% > 1%
|
||||
Normal Synced 2m flagger Halt podinfo.test advancement error rate 61.39% > 1%
|
||||
Normal Synced 2m flagger Halt podinfo.test advancement error rate 55.06% > 1%
|
||||
Normal Synced 2m flagger Halt podinfo.test advancement error rate 47.00% > 1%
|
||||
Normal Synced 2m flagger (combined from similar events): Halt podinfo.test advancement error rate 38.08% > 1%
|
||||
Warning Synced 1m flagger Rolling back podinfo.test failed checks threshold reached 10
|
||||
Warning Synced 1m flagger Canary failed! Scaling down podinfo.test
|
||||
```
|
||||
|
||||
# A/B Testing
|
||||
|
||||
Besides weighted routing, Flagger can be configured to route traffic to the canary based on HTTP match conditions. In an A/B testing scenario, you'll be using HTTP headers or cookies to target a certain segment of your users. This is particularly useful for frontend applications that require session affinity.
|
||||
|
||||

|
||||
|
||||
Create a canary custom resource \(replace "loaclproject.contour.io" with your own domain\):
|
||||
|
||||
```yaml
|
||||
apiVersion: flagger.app/v1beta1
|
||||
kind: Canary
|
||||
metadata:
|
||||
name: podinfo
|
||||
namespace: test
|
||||
spec:
|
||||
# deployment reference
|
||||
targetRef:
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
name: podinfo
|
||||
# the maximum time in seconds for the canary deployment
|
||||
# to make progress before it is rollback (default 600s)
|
||||
progressDeadlineSeconds: 60
|
||||
# HPA reference (optional)
|
||||
autoscalerRef:
|
||||
apiVersion: autoscaling/v2beta2
|
||||
kind: HorizontalPodAutoscaler
|
||||
name: podinfo
|
||||
service:
|
||||
# service port number
|
||||
port: 9898
|
||||
# container port number or name (optional)
|
||||
targetPort: 9898
|
||||
# Gateway API HTTPRoute host names
|
||||
hosts:
|
||||
- localproject.contour.io
|
||||
# Reference to the Gateway that the generated HTTPRoute would attach to.
|
||||
gatewayRefs:
|
||||
- name: contour
|
||||
namespace: projectcontour
|
||||
analysis:
|
||||
# schedule interval (default 60s)
|
||||
interval: 1m
|
||||
# max number of failed metric checks before rollback
|
||||
threshold: 5
|
||||
# max traffic percentage routed to canary
|
||||
# percentage (0-100)
|
||||
maxWeight: 50
|
||||
# canary increment step
|
||||
# percentage (0-100)
|
||||
stepWeight: 10
|
||||
metrics:
|
||||
- name: error-rate
|
||||
# max error rate (5xx responses)
|
||||
# percentage (0-100)
|
||||
templateRef:
|
||||
name: error-rate
|
||||
namespace: flagger-system
|
||||
thresholdRange:
|
||||
max: 1
|
||||
interval: 1m
|
||||
- name: latency
|
||||
templateRef:
|
||||
name: latency
|
||||
namespace: flagger-system
|
||||
# seconds
|
||||
thresholdRange:
|
||||
max: 0.5
|
||||
interval: 30s
|
||||
# testing (optional)
|
||||
webhooks:
|
||||
- name: smoke-test
|
||||
type: pre-rollout
|
||||
url: http://flagger-loadtester.test/
|
||||
timeout: 15s
|
||||
metadata:
|
||||
type: bash
|
||||
cmd: "curl -sd 'anon' http://podinfo-canary.test:9898/token | grep token"
|
||||
- name: load-test
|
||||
url: http://flagger-loadtester.test/
|
||||
timeout: 5s
|
||||
metadata:
|
||||
cmd: "hey -z 2m -q 10 -c 2 -host localproject.contour.io -H 'X-Canary: insider' http://envoy.projectcontour/"
|
||||
```
|
||||
|
||||
The above configuration will run an analysis for ten minutes targeting those users that have an insider cookie.
|
||||
|
||||
Save the above resource as podinfo-ab-canary.yaml and then apply it:
|
||||
|
||||
```bash
|
||||
kubectl apply -f ./podinfo-ab-canary.yaml
|
||||
```
|
||||
|
||||
Trigger a canary deployment by updating the container image:
|
||||
|
||||
```bash
|
||||
kubectl -n test set image deployment/podinfo \
|
||||
podinfod=stefanprodan/podinfo:6.0.3
|
||||
```
|
||||
|
||||
Flagger detects that the deployment revision changed and starts a new rollout:
|
||||
|
||||
```text
|
||||
kubectl -n test describe canary/abtest
|
||||
|
||||
Status:
|
||||
Failed Checks: 0
|
||||
Phase: Succeeded
|
||||
Events:
|
||||
Type Reason Age From Message
|
||||
---- ------ ---- ---- -------
|
||||
Normal Synced 3m flagger New revision detected podinfo.test
|
||||
Normal Synced 3m flagger Scaling up podinfo.test
|
||||
Warning Synced 3m flagger Waiting for podinfo.test rollout to finish: 0 of 1 updated replicas are available
|
||||
Normal Synced 3m flagger Advance podinfo.test canary iteration 1/10
|
||||
Normal Synced 3m flagger Advance podinfo.test canary iteration 2/10
|
||||
Normal Synced 3m flagger Advance podinfo.test canary iteration 3/10
|
||||
Normal Synced 2m flagger Advance podinfo.test canary iteration 4/10
|
||||
Normal Synced 2m flagger Advance podinfo.test canary iteration 5/10
|
||||
Normal Synced 1m flagger Advance podinfo.test canary iteration 6/10
|
||||
Normal Synced 1m flagger Advance podinfo.test canary iteration 7/10
|
||||
Normal Synced 55s flagger Advance podinfo.test canary iteration 8/10
|
||||
Normal Synced 45s flagger Advance podinfo.test canary iteration 9/10
|
||||
Normal Synced 35s flagger Advance podinfo.test canary iteration 10/10
|
||||
Normal Synced 25s flagger Copying podinfo.test template spec to podinfo-primary.test
|
||||
Warning Synced 15s flagger Waiting for podinfo-primary.test rollout to finish: 1 of 2 updated replicas are available
|
||||
Normal Synced 5s flagger Promotion completed! Scaling down podinfo.test
|
||||
```
|
||||
|
||||
|
||||
The above procedures can be extended with [custom metrics](../usage/metrics.md) checks, [webhooks](../usage/webhooks.md), [manual promotion](../usage/webhooks.md#manual-gating) approval and [Slack or MS Teams](../usage/alerting.md) notifications.
|
||||
@@ -160,6 +160,8 @@ spec:
|
||||
cmd: "hey -z 2m -q 5 -c 2 -host app.example.com http://gateway-proxy.gloo-system"
|
||||
```
|
||||
|
||||
*Note: when using upstreamRef the following fields are copied over from the original upstream: `Labels, SslConfig, CircuitBreakers, ConnectionConfig, UseHttp2, InitialStreamWindowSize`*
|
||||
|
||||
Save the above resource as podinfo-canary.yaml and then apply it:
|
||||
|
||||
```bash
|
||||
@@ -207,7 +209,7 @@ Trigger a canary deployment by updating the container image:
|
||||
|
||||
```bash
|
||||
kubectl -n test set image deployment/podinfo \
|
||||
podinfod=stefanprodan/podinfo:3.1.1
|
||||
podinfod=ghcr.io/stefanprodan/podinfo:6.0.1
|
||||
```
|
||||
|
||||
Flagger detects that the deployment revision changed and starts a new rollout:
|
||||
@@ -262,7 +264,7 @@ Trigger another canary deployment:
|
||||
|
||||
```bash
|
||||
kubectl -n test set image deployment/podinfo \
|
||||
podinfod=stefanprodan/podinfo:3.1.2
|
||||
podinfod=ghcr.io/stefanprodan/podinfo:6.0.2
|
||||
```
|
||||
|
||||
Generate HTTP 500 errors:
|
||||
@@ -363,7 +365,7 @@ Trigger a canary deployment by updating the container image:
|
||||
|
||||
```bash
|
||||
kubectl -n test set image deployment/podinfo \
|
||||
podinfod=stefanprodan/podinfo:3.1.3
|
||||
podinfod=ghcr.io/stefanprodan/podinfo:6.0.3
|
||||
```
|
||||
|
||||
Generate 404s:
|
||||
@@ -425,7 +427,7 @@ Trigger a canary deployment by updating the container image:
|
||||
|
||||
```bash
|
||||
kubectl -n test set image deployment/podinfo \
|
||||
podinfod=stefanprodan/podinfo:3.1.4
|
||||
podinfod=ghcr.io/stefanprodan/podinfo:6.0.4
|
||||
```
|
||||
|
||||
Flagger detects that the deployment revision changed and starts the A/B test:
|
||||
|
||||
@@ -173,7 +173,7 @@ Trigger a canary deployment by updating the container image:
|
||||
|
||||
```bash
|
||||
kubectl -n test set image deployment/podinfo \
|
||||
podinfod=stefanprodan/podinfo:3.1.1
|
||||
podinfod=ghcr.io/stefanprodan/podinfo:6.0.1
|
||||
```
|
||||
|
||||
Flagger detects that the deployment revision changed and starts a new rollout:
|
||||
|
||||
@@ -13,6 +13,7 @@ Install Istio with telemetry support and Prometheus:
|
||||
```bash
|
||||
istioctl manifest install --set profile=default
|
||||
|
||||
# Suggestion: Please change release-1.8 in below command, to your real istio version.
|
||||
kubectl apply -f https://raw.githubusercontent.com/istio/istio/release-1.8/samples/addons/prometheus.yaml
|
||||
```
|
||||
|
||||
@@ -185,7 +186,7 @@ Trigger a canary deployment by updating the container image:
|
||||
|
||||
```bash
|
||||
kubectl -n test set image deployment/podinfo \
|
||||
podinfod=stefanprodan/podinfo:3.1.1
|
||||
podinfod=ghcr.io/stefanprodan/podinfo:6.0.1
|
||||
```
|
||||
|
||||
Flagger detects that the deployment revision changed and starts a new rollout:
|
||||
@@ -245,7 +246,7 @@ Trigger another canary deployment:
|
||||
|
||||
```bash
|
||||
kubectl -n test set image deployment/podinfo \
|
||||
podinfod=stefanprodan/podinfo:3.1.2
|
||||
podinfod=ghcr.io/stefanprodan/podinfo:6.0.2
|
||||
```
|
||||
|
||||
Exec into the load tester pod with:
|
||||
|
||||
@@ -171,7 +171,7 @@ Trigger a deployment by updating the container image:
|
||||
|
||||
```bash
|
||||
kubectl -n test set image deployment/podinfo \
|
||||
podinfod=stefanprodan/podinfo:3.1.1
|
||||
podinfod=ghcr.io/stefanprodan/podinfo:6.0.1
|
||||
```
|
||||
|
||||
Flagger detects that the deployment revision changed and starts a new rollout:
|
||||
@@ -311,7 +311,7 @@ Trigger a deployment by updating the container image:
|
||||
|
||||
```bash
|
||||
kubectl -n test set image deployment/podinfo \
|
||||
podinfod=stefanprodan/podinfo:3.1.3
|
||||
podinfod=ghcr.io/stefanprodan/podinfo:6.0.3
|
||||
```
|
||||
|
||||
Generate 404s:
|
||||
|
||||
252
docs/gitbook/tutorials/kuma-progressive-delivery.md
Normal file
252
docs/gitbook/tutorials/kuma-progressive-delivery.md
Normal file
@@ -0,0 +1,252 @@
|
||||
# Kuma Canary Deployments
|
||||
|
||||
This guide shows you how to use Kuma and Flagger to automate canary deployments.
|
||||
|
||||

|
||||
|
||||
## Prerequisites
|
||||
|
||||
Flagger requires a Kubernetes cluster **v1.16** or newer and Kuma **1.3** or newer.
|
||||
|
||||
Install Kuma and Prometheus (part of Kuma Metrics):
|
||||
|
||||
```bash
|
||||
kumactl install control-plane | kubectl apply -f -
|
||||
kumactl install metrics | kubectl apply -f -
|
||||
```
|
||||
|
||||
Install Flagger in the `kuma-system` namespace:
|
||||
|
||||
```bash
|
||||
kubectl apply -k github.com/fluxcd/flagger//kustomize/kuma
|
||||
```
|
||||
|
||||
## Bootstrap
|
||||
|
||||
Flagger takes a Kubernetes deployment and optionally a horizontal pod autoscaler (HPA),
|
||||
then creates a series of objects (Kubernetes deployments, ClusterIP services and Kuma `TrafficRoute`).
|
||||
These objects expose the application inside the mesh and drive the canary analysis and promotion.
|
||||
|
||||
Create a test namespace and enable Kuma sidecar injection:
|
||||
|
||||
```bash
|
||||
kubectl create ns test
|
||||
kubectl annotate namespace test kuma.io/sidecar-injection=enabled
|
||||
```
|
||||
|
||||
Install the load testing service to generate traffic during the canary analysis:
|
||||
|
||||
```bash
|
||||
kubectl apply -k https://github.com/fluxcd/flagger//kustomize/tester?ref=main
|
||||
```
|
||||
|
||||
Create a deployment and a horizontal pod autoscaler:
|
||||
|
||||
```bash
|
||||
kubectl apply -k https://github.com/fluxcd/flagger//kustomize/podinfo?ref=main
|
||||
```
|
||||
|
||||
Create a canary custom resource for the `podinfo` deployment:
|
||||
|
||||
```yaml
|
||||
apiVersion: flagger.app/v1beta1
|
||||
kind: Canary
|
||||
metadata:
|
||||
name: podinfo
|
||||
namespace: test
|
||||
annotations:
|
||||
kuma.io/mesh: default
|
||||
spec:
|
||||
targetRef:
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
name: podinfo
|
||||
progressDeadlineSeconds: 60
|
||||
service:
|
||||
port: 9898
|
||||
targetPort: 9898
|
||||
apex:
|
||||
annotations:
|
||||
9898.service.kuma.io/protocol: "http"
|
||||
canary:
|
||||
annotations:
|
||||
9898.service.kuma.io/protocol: "http"
|
||||
primary:
|
||||
annotations:
|
||||
9898.service.kuma.io/protocol: "http"
|
||||
analysis:
|
||||
# schedule interval (default 60s)
|
||||
interval: 30s
|
||||
# max number of failed metric checks before rollback
|
||||
threshold: 5
|
||||
# max traffic percentage routed to canary
|
||||
# percentage (0-100)
|
||||
maxWeight: 50
|
||||
# canary increment step
|
||||
# percentage (0-100)
|
||||
stepWeight: 5
|
||||
metrics:
|
||||
- name: request-success-rate
|
||||
threshold: 99
|
||||
interval: 1m
|
||||
- name: request-duration
|
||||
threshold: 500
|
||||
interval: 30s
|
||||
webhooks:
|
||||
- name: acceptance-test
|
||||
type: pre-rollout
|
||||
url: http://flagger-loadtester.test/
|
||||
timeout: 30s
|
||||
metadata:
|
||||
type: bash
|
||||
cmd: "curl -sd 'test' http://podinfo-canary.test:9898/token | grep token"
|
||||
- name: load-test
|
||||
type: rollout
|
||||
url: http://flagger-loadtester.test/
|
||||
metadata:
|
||||
cmd: "hey -z 2m -q 10 -c 2 http://podinfo-canary.test:9898/"
|
||||
```
|
||||
|
||||
Save the above resource as `podinfo-canary.yaml` and then apply it:
|
||||
|
||||
```bash
|
||||
kubectl apply -f ./podinfo-canary.yaml
|
||||
```
|
||||
|
||||
When the canary analysis starts, Flagger will call the pre-rollout webhooks before routing traffic to the canary. The canary analysis will run for five minutes while validating the HTTP metrics and rollout hooks every half a minute.
|
||||
|
||||
After a couple of seconds Flagger will create the canary objects:
|
||||
|
||||
```bash
|
||||
# applied
|
||||
deployment.apps/podinfo
|
||||
horizontalpodautoscaler.autoscaling/podinfo
|
||||
ingresses.extensions/podinfo
|
||||
canary.flagger.app/podinfo
|
||||
|
||||
# generated
|
||||
deployment.apps/podinfo-primary
|
||||
horizontalpodautoscaler.autoscaling/podinfo-primary
|
||||
service/podinfo
|
||||
service/podinfo-canary
|
||||
service/podinfo-primary
|
||||
trafficroutes.kuma.io/podinfo
|
||||
```
|
||||
|
||||
After the boostrap, the podinfo deployment will be scaled to zero and the traffic to `podinfo.test` will be routed to the primary pods. During the canary analysis, the `podinfo-canary.test` address can be used to target directly the canary pods.
|
||||
|
||||
## Automated canary promotion
|
||||
|
||||
Flagger implements a control loop that gradually shifts traffic to the canary while measuring key performance indicators like HTTP requests success rate, requests average duration and pod health. Based on analysis of the KPIs a canary is promoted or aborted, and the analysis result is published to Slack.
|
||||
|
||||

|
||||
|
||||
Trigger a canary deployment by updating the container image:
|
||||
|
||||
```bash
|
||||
kubectl -n test set image deployment/podinfo \
|
||||
podinfod=ghcr.io/stefanprodan/podinfo:6.0.1
|
||||
```
|
||||
|
||||
Flagger detects that the deployment revision changed and starts a new rollout:
|
||||
|
||||
```text
|
||||
kubectl -n test describe canary/podinfo
|
||||
|
||||
Status:
|
||||
Canary Weight: 0
|
||||
Failed Checks: 0
|
||||
Phase: Succeeded
|
||||
Events:
|
||||
New revision detected! Scaling up podinfo.test
|
||||
Waiting for podinfo.test rollout to finish: 0 of 1 updated replicas are available
|
||||
Pre-rollout check acceptance-test passed
|
||||
Advance podinfo.test canary weight 5
|
||||
Advance podinfo.test canary weight 10
|
||||
Advance podinfo.test canary weight 15
|
||||
Advance podinfo.test canary weight 20
|
||||
Advance podinfo.test canary weight 25
|
||||
Waiting for podinfo.test rollout to finish: 1 of 2 updated replicas are available
|
||||
Advance podinfo.test canary weight 30
|
||||
Advance podinfo.test canary weight 35
|
||||
Advance podinfo.test canary weight 40
|
||||
Advance podinfo.test canary weight 45
|
||||
Advance podinfo.test canary weight 50
|
||||
Copying podinfo.test template spec to podinfo-primary.test
|
||||
Waiting for podinfo-primary.test rollout to finish: 1 of 2 updated replicas are available
|
||||
Promotion completed! Scaling down podinfo.test
|
||||
```
|
||||
|
||||
**Note** that if you apply new changes to the deployment during the canary analysis, Flagger will restart the analysis.
|
||||
|
||||
A canary deployment is triggered by changes in any of the following objects:
|
||||
|
||||
* Deployment PodSpec \(container image, command, ports, env, resources, etc\)
|
||||
* ConfigMaps mounted as volumes or mapped to environment variables
|
||||
* Secrets mounted as volumes or mapped to environment variables
|
||||
|
||||
You can monitor all canaries with:
|
||||
|
||||
```bash
|
||||
watch kubectl get canaries --all-namespaces
|
||||
|
||||
NAMESPACE NAME STATUS WEIGHT LASTTRANSITIONTIME
|
||||
test podinfo Progressing 15 2019-06-30T14:05:07Z
|
||||
prod frontend Succeeded 0 2019-06-30T16:15:07Z
|
||||
prod backend Failed 0 2019-06-30T17:05:07Z
|
||||
```
|
||||
|
||||
## Automated rollback
|
||||
|
||||
During the canary analysis you can generate HTTP 500 errors and high latency to test if Flagger pauses and rolls back the faulted version.
|
||||
|
||||
Trigger another canary deployment:
|
||||
|
||||
```bash
|
||||
kubectl -n test set image deployment/podinfo \
|
||||
podinfod=ghcr.io/stefanprodan/podinfo:6.0.2
|
||||
```
|
||||
|
||||
Exec into the load tester pod with:
|
||||
|
||||
```bash
|
||||
kubectl -n test exec -it flagger-loadtester-xx-xx sh
|
||||
```
|
||||
|
||||
Generate HTTP 500 errors:
|
||||
|
||||
```bash
|
||||
watch -n 1 curl http://podinfo-canary.test:9898/status/500
|
||||
```
|
||||
|
||||
Generate latency:
|
||||
|
||||
```bash
|
||||
watch -n 1 curl http://podinfo-canary.test:9898/delay/1
|
||||
```
|
||||
|
||||
When the number of failed checks reaches the canary analysis threshold, the traffic is routed back to the primary, the canary is scaled to zero and the rollout is marked as failed.
|
||||
|
||||
```text
|
||||
kubectl -n test describe canary/podinfo
|
||||
|
||||
Status:
|
||||
Canary Weight: 0
|
||||
Failed Checks: 10
|
||||
Phase: Failed
|
||||
Events:
|
||||
Starting canary analysis for podinfo.test
|
||||
Pre-rollout check acceptance-test passed
|
||||
Advance podinfo.test canary weight 5
|
||||
Advance podinfo.test canary weight 10
|
||||
Advance podinfo.test canary weight 15
|
||||
Halt podinfo.test advancement success rate 69.17% < 99%
|
||||
Halt podinfo.test advancement success rate 61.39% < 99%
|
||||
Halt podinfo.test advancement success rate 55.06% < 99%
|
||||
Halt podinfo.test advancement request duration 1.20s > 0.5s
|
||||
Halt podinfo.test advancement request duration 1.45s > 0.5s
|
||||
Rolling back podinfo.test failed checks threshold reached 5
|
||||
Canary failed! Scaling down podinfo.test
|
||||
```
|
||||
|
||||
The above procedures can be extended with [custom metrics](../usage/metrics.md) checks, [webhooks](../usage/webhooks.md), [manual promotion](../usage/webhooks.md#manual-gating) approval and [Slack or MS Teams](../usage/alerting.md) notifications.
|
||||
@@ -152,7 +152,7 @@ Trigger a canary deployment by updating the container image:
|
||||
|
||||
```bash
|
||||
kubectl -n test set image deployment/podinfo \
|
||||
podinfod=stefanprodan/podinfo:3.1.1
|
||||
podinfod=ghcr.io/stefanprodan/podinfo:6.0.1
|
||||
```
|
||||
|
||||
Flagger detects that the deployment revision changed and starts a new rollout:
|
||||
@@ -211,7 +211,7 @@ Trigger another canary deployment:
|
||||
|
||||
```bash
|
||||
kubectl -n test set image deployment/podinfo \
|
||||
podinfod=stefanprodan/podinfo:3.1.2
|
||||
podinfod=ghcr.io/stefanprodan/podinfo:6.0.2
|
||||
```
|
||||
|
||||
Exec into the load tester pod with:
|
||||
@@ -297,7 +297,7 @@ Trigger a canary deployment by updating the container image:
|
||||
|
||||
```bash
|
||||
kubectl -n test set image deployment/podinfo \
|
||||
podinfod=stefanprodan/podinfo:3.1.3
|
||||
podinfod=ghcr.io/stefanprodan/podinfo:6.0.3
|
||||
```
|
||||
|
||||
Generate 404s:
|
||||
@@ -442,7 +442,7 @@ Trigger a canary deployment by updating the container image:
|
||||
|
||||
```bash
|
||||
kubectl -n test set image deployment/podinfo \
|
||||
podinfod=stefanprodan/podinfo:3.1.4
|
||||
podinfod=ghcr.io/stefanprodan/podinfo:6.0.4
|
||||
```
|
||||
|
||||
Flagger detects that the deployment revision changed and starts the A/B testing:
|
||||
|
||||
@@ -6,7 +6,7 @@ This guide shows you how to use the NGINX ingress controller and Flagger to auto
|
||||
|
||||
## Prerequisites
|
||||
|
||||
Flagger requires a Kubernetes cluster **v1.16** or newer and NGINX ingress **v0.41** or newer.
|
||||
Flagger requires a Kubernetes cluster **v1.19** or newer and NGINX ingress **v1.0.2** or newer.
|
||||
|
||||
Install the NGINX ingress controller with Helm v3:
|
||||
|
||||
@@ -59,7 +59,7 @@ helm upgrade -i flagger-loadtester flagger/loadtester \
|
||||
Create an ingress definition (replace `app.example.com` with your own domain):
|
||||
|
||||
```yaml
|
||||
apiVersion: networking.k8s.io/v1beta1
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: podinfo
|
||||
@@ -70,12 +70,16 @@ metadata:
|
||||
kubernetes.io/ingress.class: "nginx"
|
||||
spec:
|
||||
rules:
|
||||
- host: app.example.com
|
||||
- host: "app.example.com"
|
||||
http:
|
||||
paths:
|
||||
- backend:
|
||||
serviceName: podinfo
|
||||
servicePort: 80
|
||||
- pathType: Prefix
|
||||
path: "/"
|
||||
backend:
|
||||
service:
|
||||
name: podinfo
|
||||
port:
|
||||
number: 80
|
||||
```
|
||||
|
||||
Save the above resource as podinfo-ingress.yaml and then apply it:
|
||||
@@ -101,7 +105,7 @@ spec:
|
||||
name: podinfo
|
||||
# ingress reference
|
||||
ingressRef:
|
||||
apiVersion: networking.k8s.io/v1beta1
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: Ingress
|
||||
name: podinfo
|
||||
# HPA reference (optional)
|
||||
@@ -188,7 +192,7 @@ Trigger a canary deployment by updating the container image:
|
||||
|
||||
```bash
|
||||
kubectl -n test set image deployment/podinfo \
|
||||
podinfod=stefanprodan/podinfo:3.1.1
|
||||
podinfod=ghcr.io/stefanprodan/podinfo:6.0.1
|
||||
```
|
||||
|
||||
Flagger detects that the deployment revision changed and starts a new rollout:
|
||||
@@ -242,7 +246,7 @@ Trigger another canary deployment:
|
||||
|
||||
```bash
|
||||
kubectl -n test set image deployment/podinfo \
|
||||
podinfod=stefanprodan/podinfo:3.1.2
|
||||
podinfod=ghcr.io/stefanprodan/podinfo:6.0.2
|
||||
```
|
||||
|
||||
Generate HTTP 500 errors:
|
||||
@@ -330,7 +334,7 @@ Trigger a canary deployment by updating the container image:
|
||||
|
||||
```bash
|
||||
kubectl -n test set image deployment/podinfo \
|
||||
podinfod=stefanprodan/podinfo:3.1.3
|
||||
podinfod=ghcr.io/stefanprodan/podinfo:6.0.3
|
||||
```
|
||||
|
||||
Generate high response latency:
|
||||
@@ -403,7 +407,7 @@ Trigger a canary deployment by updating the container image:
|
||||
|
||||
```bash
|
||||
kubectl -n test set image deployment/podinfo \
|
||||
podinfod=stefanprodan/podinfo:3.1.4
|
||||
podinfod=ghcr.io/stefanprodan/podinfo:6.0.4
|
||||
```
|
||||
|
||||
Flagger detects that the deployment revision changed and starts the A/B testing:
|
||||
|
||||
363
docs/gitbook/tutorials/osm-progressive-delivery.md
Normal file
363
docs/gitbook/tutorials/osm-progressive-delivery.md
Normal file
@@ -0,0 +1,363 @@
|
||||
# Open Service Mesh Canary Deployments
|
||||
|
||||
This guide shows you how to use Open Service Mesh (OSM) and Flagger to automate canary deployments.
|
||||
|
||||

|
||||
|
||||
## Prerequisites
|
||||
|
||||
Flagger requires a Kubernetes cluster **v1.16** or newer and Open Service Mesh **0.9.1** or newer.
|
||||
|
||||
OSM must have permissive traffic policy enabled and have an instance of Prometheus for metrics.
|
||||
|
||||
- If the OSM CLI is being used for installation, install OSM using the following command:
|
||||
```bash
|
||||
osm install \
|
||||
--set=OpenServiceMesh.deployPrometheus=true \
|
||||
--set=OpenServiceMesh.enablePermissiveTrafficPolicy=true
|
||||
```
|
||||
- If a managed instance of OSM is being used:
|
||||
- [Bring your own instance](docs.openservicemesh.io/docs/guides/observability/metrics/#byo-prometheus) of Prometheus,
|
||||
setting the namespace to match the managed OSM controller namespace
|
||||
- Enable permissive traffic policy after installation by updating the OSM MeshConfig resource:
|
||||
```bash
|
||||
# Replace <osm-namespace> with OSM controller's namespace
|
||||
kubectl patch meshconfig osm-mesh-config -n <osm-namespace> -p '{"spec":{"traffic":{"enablePermissiveTrafficPolicyMode":true}}}' --type=merge
|
||||
```
|
||||
|
||||
To install Flagger in the default `osm-system` namespace, use:
|
||||
```bash
|
||||
kubectl apply -k https://github.com/fluxcd/flagger//kustomize/osm?ref=main
|
||||
```
|
||||
|
||||
Alternatively, if a non-default namespace or managed instance of OSM is in use, install Flagger with Helm, replacing the <osm-namespace>
|
||||
values as appropriate. If a custom instance of Prometheus is being used, replace `osm-prometheus` with the relevant Prometheus service name.
|
||||
```bash
|
||||
helm upgrade -i flagger flagger/flagger \
|
||||
--namespace=<osm-namespace> \
|
||||
--set meshProvider=osm \
|
||||
--set metricsServer=http://osm-prometheus.<osm-namespace>.svc:7070
|
||||
```
|
||||
|
||||
## Bootstrap
|
||||
|
||||
Flagger takes a Kubernetes deployment and optionally a horizontal pod autoscaler (HPA),
|
||||
then creates a series of objects (Kubernetes deployments, ClusterIP services and SMI traffic split).
|
||||
These objects expose the application inside the mesh and drive the canary analysis and promotion.
|
||||
|
||||
Create a `test` namespace and enable OSM namespace monitoring and metrics scraping for the namespace.
|
||||
|
||||
```bash
|
||||
kubectl create namespace test
|
||||
osm namespace add test
|
||||
osm metrics enable --namespace test
|
||||
```
|
||||
|
||||
Create a `podinfo` deployment and a horizontal pod autoscaler:
|
||||
|
||||
```bash
|
||||
kubectl apply -k https://github.com/fluxcd/flagger//kustomize/podinfo?ref=main
|
||||
```
|
||||
|
||||
Install the load testing service to generate traffic during the canary analysis:
|
||||
|
||||
```bash
|
||||
kubectl apply -k https://github.com/fluxcd/flagger//kustomize/tester?ref=main
|
||||
```
|
||||
|
||||
Create a canary custom resource for the `podinfo` deployment.
|
||||
The following `podinfo` canary custom resource instructs Flagger to:
|
||||
1. monitor any changes to the `podinfo` deployment created earlier,
|
||||
2. detect `podinfo` deployment revision changes, and
|
||||
3. start a Flagger canary analysis, rollout, and promotion if there were deployment revision changes.
|
||||
|
||||
```yaml
|
||||
apiVersion: flagger.app/v1beta1
|
||||
kind: Canary
|
||||
metadata:
|
||||
name: podinfo
|
||||
namespace: test
|
||||
spec:
|
||||
provider: osm
|
||||
# deployment reference
|
||||
targetRef:
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
name: podinfo
|
||||
# HPA reference (optional)
|
||||
autoscalerRef:
|
||||
apiVersion: autoscaling/v2beta2
|
||||
kind: HorizontalPodAutoscaler
|
||||
name: podinfo
|
||||
# the maximum time in seconds for the canary deployment
|
||||
# to make progress before it is rolled back (default 600s)
|
||||
progressDeadlineSeconds: 60
|
||||
service:
|
||||
# ClusterIP port number
|
||||
port: 9898
|
||||
# container port number or name (optional)
|
||||
targetPort: 9898
|
||||
analysis:
|
||||
# schedule interval (default 60s)
|
||||
interval: 30s
|
||||
# max number of failed metric checks before rollback
|
||||
threshold: 5
|
||||
# max traffic percentage routed to canary
|
||||
# percentage (0-100)
|
||||
maxWeight: 50
|
||||
# canary increment step
|
||||
# percentage (0-100)
|
||||
stepWeight: 5
|
||||
# OSM Prometheus checks
|
||||
metrics:
|
||||
- name: request-success-rate
|
||||
# minimum req success rate (non 5xx responses)
|
||||
# percentage (0-100)
|
||||
thresholdRange:
|
||||
min: 99
|
||||
interval: 1m
|
||||
- name: request-duration
|
||||
# maximum req duration P99
|
||||
# milliseconds
|
||||
thresholdRange:
|
||||
max: 500
|
||||
interval: 30s
|
||||
# testing (optional)
|
||||
webhooks:
|
||||
- name: acceptance-test
|
||||
type: pre-rollout
|
||||
url: http://flagger-loadtester.test/
|
||||
timeout: 30s
|
||||
metadata:
|
||||
type: bash
|
||||
cmd: "curl -sd 'test' http://podinfo-canary.test:9898/token | grep token"
|
||||
- name: load-test
|
||||
type: rollout
|
||||
url: http://flagger-loadtester.test/
|
||||
timeout: 5s
|
||||
metadata:
|
||||
cmd: "hey -z 2m -q 10 -c 2 http://podinfo-canary.test:9898/"
|
||||
```
|
||||
|
||||
Save the above resource as podinfo-canary.yaml and then apply it:
|
||||
|
||||
```bash
|
||||
kubectl apply -f ./podinfo-canary.yaml
|
||||
```
|
||||
|
||||
When the canary analysis starts, Flagger will call the pre-rollout webhooks before routing traffic to the canary.
|
||||
The canary analysis will run for five minutes while validating the HTTP metrics and rollout hooks every half a minute.
|
||||
|
||||
After a couple of seconds Flagger will create the canary objects.
|
||||
|
||||
```bash
|
||||
# applied
|
||||
deployment.apps/podinfo
|
||||
horizontalpodautoscaler.autoscaling/podinfo
|
||||
ingresses.extensions/podinfo
|
||||
canary.flagger.app/podinfo
|
||||
|
||||
# generated
|
||||
deployment.apps/podinfo-primary
|
||||
horizontalpodautoscaler.autoscaling/podinfo-primary
|
||||
service/podinfo
|
||||
service/podinfo-canary
|
||||
service/podinfo-primary
|
||||
trafficsplits.split.smi-spec.io/podinfo
|
||||
```
|
||||
|
||||
After the boostrap, the `podinfo` deployment will be scaled to zero and the traffic to `podinfo.test` will be routed to the primary pods.
|
||||
During the canary analysis, the `podinfo-canary.test` address can be used to target directly the canary pods.
|
||||
|
||||
## Automated Canary Promotion
|
||||
|
||||
Flagger implements a control loop that gradually shifts traffic to the canary while measuring key performance indicators like HTTP requests success rate, requests average duration and pod health.
|
||||
Based on analysis of the KPIs a canary is promoted or aborted.
|
||||
|
||||

|
||||
|
||||
Trigger a canary deployment by updating the container image:
|
||||
|
||||
```bash
|
||||
kubectl -n test set image deployment/podinfo \
|
||||
podinfod=ghcr.io/stefanprodan/podinfo:6.0.1
|
||||
```
|
||||
|
||||
Flagger detects that the deployment revision changed and starts a new rollout.
|
||||
|
||||
|
||||
```text
|
||||
kubectl -n test describe canary/podinfo
|
||||
|
||||
Status:
|
||||
Canary Weight: 0
|
||||
Failed Checks: 0
|
||||
Phase: Succeeded
|
||||
Events:
|
||||
New revision detected! Scaling up podinfo.test
|
||||
Waiting for podinfo.test rollout to finish: 0 of 1 updated replicas are available
|
||||
Pre-rollout check acceptance-test passed
|
||||
Advance podinfo.test canary weight 5
|
||||
Advance podinfo.test canary weight 10
|
||||
Advance podinfo.test canary weight 15
|
||||
Advance podinfo.test canary weight 20
|
||||
Advance podinfo.test canary weight 25
|
||||
Waiting for podinfo.test rollout to finish: 1 of 2 updated replicas are available
|
||||
Advance podinfo.test canary weight 30
|
||||
Advance podinfo.test canary weight 35
|
||||
Advance podinfo.test canary weight 40
|
||||
Advance podinfo.test canary weight 45
|
||||
Advance podinfo.test canary weight 50
|
||||
Copying podinfo.test template spec to podinfo-primary.test
|
||||
Waiting for podinfo-primary.test rollout to finish: 1 of 2 updated replicas are available
|
||||
Promotion completed! Scaling down podinfo.test
|
||||
```
|
||||
|
||||
**Note** that if you apply any new changes to the `podinfo` deployment during the canary analysis, Flagger will restart the analysis.
|
||||
|
||||
A canary deployment is triggered by changes in any of the following objects:
|
||||
|
||||
* Deployment PodSpec \(container image, command, ports, env, resources, etc\)
|
||||
* ConfigMaps mounted as volumes or mapped to environment variables
|
||||
* Secrets mounted as volumes or mapped to environment variables
|
||||
|
||||
You can monitor all canaries with:
|
||||
|
||||
```bash
|
||||
watch kubectl get canaries --all-namespaces
|
||||
|
||||
NAMESPACE NAME STATUS WEIGHT LASTTRANSITIONTIME
|
||||
test podinfo Progressing 15 2019-06-30T14:05:07Z
|
||||
prod frontend Succeeded 0 2019-06-30T16:15:07Z
|
||||
prod backend Failed 0 2019-06-30T17:05:07Z
|
||||
```
|
||||
|
||||
## Automated Rollback
|
||||
|
||||
During the canary analysis you can generate HTTP 500 errors and high latency to test if Flagger pauses and rolls back the faulted version.
|
||||
|
||||
Trigger another canary deployment:
|
||||
|
||||
```bash
|
||||
kubectl -n test set image deployment/podinfo \
|
||||
podinfod=ghcr.io/stefanprodan/podinfo:6.0.2
|
||||
```
|
||||
|
||||
Exec into the load tester pod with:
|
||||
|
||||
```bash
|
||||
kubectl -n test exec -it flagger-loadtester-xx-xx sh
|
||||
```
|
||||
|
||||
Repeatedly generate HTTP 500 errors until the `kubectl describe` output below shows canary rollout failure:
|
||||
|
||||
```bash
|
||||
watch -n 0.1 curl http://podinfo-canary.test:9898/status/500
|
||||
```
|
||||
|
||||
Repeatedly generate latency until canary rollout fails:
|
||||
|
||||
```bash
|
||||
watch -n 0.1 curl http://podinfo-canary.test:9898/delay/1
|
||||
```
|
||||
|
||||
When the number of failed checks reaches the canary analysis thresholds defined in the `podinfo` canary custom resource earlier, the traffic is routed back to the primary, the canary is scaled to zero and the rollout is marked as failed.
|
||||
|
||||
```text
|
||||
kubectl -n test describe canary/podinfo
|
||||
|
||||
Status:
|
||||
Canary Weight: 0
|
||||
Failed Checks: 10
|
||||
Phase: Failed
|
||||
Events:
|
||||
Starting canary analysis for podinfo.test
|
||||
Pre-rollout check acceptance-test passed
|
||||
Advance podinfo.test canary weight 5
|
||||
Advance podinfo.test canary weight 10
|
||||
Advance podinfo.test canary weight 15
|
||||
Halt podinfo.test advancement success rate 69.17% < 99%
|
||||
Halt podinfo.test advancement success rate 61.39% < 99%
|
||||
Halt podinfo.test advancement success rate 55.06% < 99%
|
||||
Halt podinfo.test advancement request duration 1.20s > 0.5s
|
||||
Halt podinfo.test advancement request duration 1.45s > 0.5s
|
||||
Rolling back podinfo.test failed checks threshold reached 5
|
||||
Canary failed! Scaling down podinfo.test
|
||||
```
|
||||
|
||||
## Custom Metrics
|
||||
|
||||
The canary analysis can be extended with Prometheus queries.
|
||||
|
||||
Let's define a check for 404 not found errors.
|
||||
Edit the canary analysis (`podinfo-canary.yaml` file) and add the following metric.
|
||||
For more information on creating additional custom metrics using OSM metrics, please check the [metrics available in OSM](https://docs.openservicemesh.io/docs/guides/observability/metrics/#available-metrics).
|
||||
|
||||
```yaml
|
||||
analysis:
|
||||
metrics:
|
||||
- name: "404s percentage"
|
||||
threshold: 3
|
||||
query: |
|
||||
100 - (
|
||||
sum(
|
||||
rate(
|
||||
osm_request_total{
|
||||
destination_namespace="test",
|
||||
destination_kind="Deployment",
|
||||
destination_name="podinfo",
|
||||
response_code!="404"
|
||||
}[1m]
|
||||
)
|
||||
)
|
||||
/
|
||||
sum(
|
||||
rate(
|
||||
osm_request_total{
|
||||
destination_namespace="test",
|
||||
destination_kind="Deployment",
|
||||
destination_name="podinfo"
|
||||
}[1m]
|
||||
)
|
||||
) * 100
|
||||
)
|
||||
```
|
||||
|
||||
The above configuration validates the canary version by checking if the HTTP 404 req/sec percentage is below three percent of the total traffic.
|
||||
If the 404s rate reaches the 3% threshold, then the analysis is aborted and the canary is marked as failed.
|
||||
|
||||
Trigger a canary deployment by updating the container image:
|
||||
|
||||
```bash
|
||||
kubectl -n test set image deployment/podinfo \
|
||||
podinfod=ghcr.io/stefanprodan/podinfo:6.0.3
|
||||
```
|
||||
|
||||
Exec into the load tester pod with:
|
||||
|
||||
```bash
|
||||
kubectl -n test exec -it flagger-loadtester-xx-xx sh
|
||||
```
|
||||
|
||||
Repeatedly generate 404s until canary rollout fails:
|
||||
|
||||
```bash
|
||||
watch -n 0.1 curl http://podinfo-canary.test:9898/status/404
|
||||
```
|
||||
|
||||
Watch Flagger logs to confirm successful canary rollback.
|
||||
|
||||
```text
|
||||
kubectl -n osm-system logs deployment/flagger -f | jq .msg
|
||||
|
||||
Starting canary deployment for podinfo.test
|
||||
Pre-rollout check acceptance-test passed
|
||||
Advance podinfo.test canary weight 5
|
||||
Halt podinfo.test advancement 404s percentage 6.20 > 3
|
||||
Halt podinfo.test advancement 404s percentage 6.45 > 3
|
||||
Halt podinfo.test advancement 404s percentage 7.22 > 3
|
||||
Halt podinfo.test advancement 404s percentage 6.50 > 3
|
||||
Halt podinfo.test advancement 404s percentage 6.34 > 3
|
||||
Rolling back podinfo.test failed checks threshold reached 5
|
||||
Canary failed! Scaling down podinfo.test
|
||||
```
|
||||
@@ -6,7 +6,7 @@ This guide shows you how to use the [Skipper ingress controller](https://opensou
|
||||
|
||||
## Prerequisites
|
||||
|
||||
Flagger requires a Kubernetes cluster **v1.16** or newer and Skipper ingress **0.11.40** or newer.
|
||||
Flagger requires a Kubernetes cluster **v1.19** or newer and Skipper ingress **v0.13** or newer.
|
||||
|
||||
Install Skipper ingress-controller using [upstream definition](https://opensource.zalando.com/skipper/kubernetes/ingress-controller/#install-skipper-as-ingress-controller).
|
||||
|
||||
@@ -36,7 +36,9 @@ kustomize build https://github.com/fluxcd/flagger/kustomize/kubernetes | kubectl
|
||||
|
||||
## Bootstrap
|
||||
|
||||
Flagger takes a Kubernetes deployment and optionally a horizontal pod autoscaler \(HPA\), then creates a series of objects \(Kubernetes deployments, ClusterIP services and canary ingress\). These objects expose the application outside the cluster and drive the canary analysis and promotion.
|
||||
Flagger takes a Kubernetes deployment and optionally a horizontal pod autoscaler (HPA),
|
||||
then creates a series of objects (Kubernetes deployments, ClusterIP services and canary ingress).
|
||||
These objects expose the application outside the cluster and drive the canary analysis and promotion.
|
||||
|
||||
Create a test namespace:
|
||||
|
||||
@@ -60,7 +62,7 @@ helm upgrade -i flagger-loadtester flagger/loadtester \
|
||||
Create an ingress definition \(replace `app.example.com` with your own domain\):
|
||||
|
||||
```yaml
|
||||
apiVersion: networking.k8s.io/v1beta1
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: podinfo
|
||||
@@ -71,12 +73,16 @@ metadata:
|
||||
kubernetes.io/ingress.class: "skipper"
|
||||
spec:
|
||||
rules:
|
||||
- host: app.example.com
|
||||
- host: "app.example.com"
|
||||
http:
|
||||
paths:
|
||||
- backend:
|
||||
serviceName: podinfo
|
||||
servicePort: 80
|
||||
- pathType: Prefix
|
||||
path: "/"
|
||||
backend:
|
||||
service:
|
||||
name: podinfo
|
||||
port:
|
||||
number: 80
|
||||
```
|
||||
|
||||
Save the above resource as podinfo-ingress.yaml and then apply it:
|
||||
@@ -85,7 +91,7 @@ Save the above resource as podinfo-ingress.yaml and then apply it:
|
||||
kubectl apply -f ./podinfo-ingress.yaml
|
||||
```
|
||||
|
||||
Create a canary custom resource \(replace `app.example.com` with your own domain\):
|
||||
Create a canary custom resource (replace `app.example.com` with your own domain):
|
||||
|
||||
```yaml
|
||||
apiVersion: flagger.app/v1beta1
|
||||
@@ -102,7 +108,7 @@ spec:
|
||||
name: podinfo
|
||||
# ingress reference
|
||||
ingressRef:
|
||||
apiVersion: networking.k8s.io/v1beta1
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: Ingress
|
||||
name: podinfo
|
||||
# HPA reference (optional)
|
||||
@@ -190,7 +196,9 @@ ingress.networking.k8s.io/podinfo-canary
|
||||
|
||||
## Automated canary promotion
|
||||
|
||||
Flagger implements a control loop that gradually shifts traffic to the canary while measuring key performance indicators like HTTP requests success rate, requests average duration and pod health. Based on analysis of the KPIs a canary is promoted or aborted, and the analysis result is published to Slack or MS Teams.
|
||||
Flagger implements a control loop that gradually shifts traffic to the canary while measuring
|
||||
key performance indicators like HTTP requests success rate, requests average duration and pod health.
|
||||
Based on analysis of the KPIs a canary is promoted or aborted, and the analysis result is published to Slack or MS Teams.
|
||||
|
||||

|
||||
|
||||
@@ -271,7 +279,8 @@ Generate latency:
|
||||
watch -n 1 curl http://app.example.com/delay/1
|
||||
```
|
||||
|
||||
When the number of failed checks reaches the canary analysis threshold, the traffic is routed back to the primary, the canary is scaled to zero and the rollout is marked as failed.
|
||||
When the number of failed checks reaches the canary analysis threshold, the traffic is routed back to the primary,
|
||||
the canary is scaled to zero and the rollout is marked as failed.
|
||||
|
||||
```text
|
||||
kubectl -n flagger-system logs deploy/flagger -f | jq .msg
|
||||
@@ -333,7 +342,8 @@ Edit the canary analysis and add the latency check:
|
||||
interval: 1m
|
||||
```
|
||||
|
||||
The threshold is set to 500ms so if the average request duration in the last minute goes over half a second then the analysis will fail and the canary will not be promoted.
|
||||
The threshold is set to 500ms so if the average request duration in the last minute goes over half a second
|
||||
then the analysis will fail and the canary will not be promoted.
|
||||
|
||||
Trigger a canary deployment by updating the container image:
|
||||
|
||||
@@ -367,4 +377,3 @@ Canary failed! Scaling down podinfo.test
|
||||
```
|
||||
|
||||
If you have alerting configured, Flagger will send a notification with the reason why the canary failed.
|
||||
|
||||
|
||||
@@ -13,9 +13,17 @@ Install Traefik with Helm v3:
|
||||
```bash
|
||||
helm repo add traefik https://helm.traefik.io/traefik
|
||||
kubectl create ns traefik
|
||||
helm upgrade -i traefik traefik/traefik \
|
||||
--namespace traefik \
|
||||
--set additionalArguments="{--metrics.prometheus=true}"
|
||||
|
||||
cat <<EOF | helm upgrade -i traefik traefik/traefik --namespace traefik -f -
|
||||
deployment:
|
||||
podAnnotations:
|
||||
prometheus.io/port: "9100"
|
||||
prometheus.io/scrape: "true"
|
||||
prometheus.io/path: "/metrics"
|
||||
metrics:
|
||||
prometheus:
|
||||
entryPoint: metrics
|
||||
EOF
|
||||
```
|
||||
|
||||
Install Flagger and the Prometheus add-on in the same namespace as Traefik:
|
||||
|
||||
@@ -20,9 +20,10 @@ Once the webhook has been generated. Flagger can be configured to send Slack not
|
||||
```bash
|
||||
helm upgrade -i flagger flagger/flagger \
|
||||
--set slack.url=https://hooks.slack.com/services/YOUR/SLACK/WEBHOOK \
|
||||
--set slack.proxy-url=my-http-proxy.com \ # optional http/s proxy
|
||||
--set slack.proxy=my-http-proxy.com \ # optional http/s proxy
|
||||
--set slack.channel=general \
|
||||
--set slack.user=flagger
|
||||
--set slack.user=flagger \
|
||||
--set clusterName=my-cluster
|
||||
```
|
||||
|
||||
Once configured with a Slack incoming **webhook**,
|
||||
@@ -127,6 +128,9 @@ Alert fields:
|
||||
When the severity is set to `warn`, Flagger will alert when waiting on manual confirmation or if the analysis fails.
|
||||
When the severity is set to `error`, Flagger will alert only if the canary analysis fails.
|
||||
|
||||
To differentiate alerts based on the cluster name, you can configure Flagger with the `-cluster-name=my-cluster`
|
||||
command flag, or with Helm `--set clusterName=my-cluster`.
|
||||
|
||||
## Prometheus Alert Manager
|
||||
|
||||
You can use Alertmanager to trigger alerts when a canary deployment failed:
|
||||
|
||||
@@ -3,11 +3,11 @@
|
||||
Flagger can run automated application analysis, promotion and rollback for the following deployment strategies:
|
||||
|
||||
* **Canary Release** \(progressive traffic shifting\)
|
||||
* Istio, Linkerd, App Mesh, NGINX, Skipper, Contour, Gloo Edge, Traefik
|
||||
* Istio, Linkerd, App Mesh, NGINX, Skipper, Contour, Gloo Edge, Traefik, Open Service Mesh, Kuma, Gateway API
|
||||
* **A/B Testing** \(HTTP headers and cookies traffic routing\)
|
||||
* Istio, App Mesh, NGINX, Contour, Gloo Edge
|
||||
* Istio, App Mesh, NGINX, Contour, Gloo Edge, Gateway API
|
||||
* **Blue/Green** \(traffic switching\)
|
||||
* Kubernetes CNI, Istio, Linkerd, App Mesh, NGINX, Contour, Gloo Edge
|
||||
* Kubernetes CNI, Istio, Linkerd, App Mesh, NGINX, Contour, Gloo Edge, Open Service Mesh, Gateway API
|
||||
* **Blue/Green Mirroring** \(traffic shadowing\)
|
||||
* Istio
|
||||
|
||||
@@ -326,7 +326,7 @@ Blue/Green rollout steps for service mesh:
|
||||
* run conformance tests for the canary pods
|
||||
* run load tests and metric checks for the canary pods every minute
|
||||
* abort the canary release if the failure threshold is reached
|
||||
* route traffic to canary
|
||||
* route traffic to canary (This doesn't happen when using the kubernetes provider)
|
||||
* promote canary spec over primary (blue)
|
||||
* wait for primary rollout
|
||||
* route traffic to primary
|
||||
|
||||
@@ -115,6 +115,10 @@ but disabling config-tracking using the per Secret/ConfigMap annotation may fit
|
||||
The autoscaler reference is optional, when specified,
|
||||
Flagger will pause the traffic increase while the target and primary deployments are scaled up or down.
|
||||
HPA can help reduce the resource usage during the canary analysis.
|
||||
When the autoscaler reference is specified, any changes made to the autoscaler are only made active
|
||||
in the primary autoscaler when a rollout for the deployment starts and completes successfully.
|
||||
Optionally, you can create two HPAs, one for canary and one for the primary to update the HPA without
|
||||
doing a new rollout. As the canary deployment will be scaled to 0, the HPA on the canary will be inactive.
|
||||
|
||||
The progress deadline represents the maximum time in seconds for the canary deployment to
|
||||
make progress before it is rolled back, defaults to ten minutes.
|
||||
@@ -184,6 +188,10 @@ spec:
|
||||
test: "test"
|
||||
```
|
||||
|
||||
Note that the `apex` annotations are added to both the generated Kubernetes Service and the
|
||||
generated service mesh/ingress object. This allows using external-dns with Istio `VirtualServices`
|
||||
and `TraefikServices`. Beware of configuration conflicts [here](../faq.md#ExternalDNS).
|
||||
|
||||
Besides port mapping and metadata, the service specification can
|
||||
contain URI match and rewrite rules, timeout and retry polices:
|
||||
|
||||
@@ -331,6 +339,14 @@ Spec:
|
||||
# total number of iterations
|
||||
# used for A/B Testing and Blue/Green
|
||||
iterations:
|
||||
# threshold of primary pods that need to be available to consider it ready
|
||||
# before starting rollout. this is optional and the default is 100
|
||||
# percentage (0-100)
|
||||
primaryReadyThreshold: 100
|
||||
# threshold of canary pods that need to be available to consider it ready
|
||||
# before starting rollout. this is optional and the default is 100
|
||||
# percentage (0-100)
|
||||
canaryReadyThreshold: 100
|
||||
# canary match conditions
|
||||
# used for A/B Testing
|
||||
match:
|
||||
|
||||
@@ -46,8 +46,9 @@ metadata:
|
||||
name: my-metric
|
||||
spec:
|
||||
provider:
|
||||
type: # can be prometheus or datadog
|
||||
type: # can be prometheus, datadog, etc
|
||||
address: # API URL
|
||||
insecureSkipVerify: # if set to true, disables the TLS cert validation
|
||||
secretRef:
|
||||
name: # name of the secret containing the API credentials
|
||||
query: # metric query
|
||||
@@ -401,3 +402,210 @@ Reference the template in the canary analysis:
|
||||
max: 5
|
||||
interval: 1m
|
||||
```
|
||||
|
||||
## Graphite
|
||||
|
||||
You can create custom metric checks using the Graphite provider.
|
||||
|
||||
Graphite template example:
|
||||
|
||||
```yaml
|
||||
apiVersion: flagger.app/v1beta1
|
||||
kind: MetricTemplate
|
||||
metadata:
|
||||
name: graphite-request-success-rate
|
||||
spec:
|
||||
provider:
|
||||
type: graphite
|
||||
address: http://graphite.monitoring
|
||||
query: |
|
||||
target=summarize(
|
||||
asPercent(
|
||||
sumSeries(
|
||||
stats.timers.httpServerRequests.app.{{target}}.exception.*.method.*.outcome.{CLIENT_ERROR,INFORMATIONAL,REDIRECTION,SUCCESS}.status.*.uri.*.count
|
||||
),
|
||||
sumSeries(
|
||||
stats.timers.httpServerRequests.app.{{target}}.exception.*.method.*.outcome.*.status.*.uri.*.count
|
||||
)
|
||||
),
|
||||
{{interval}},
|
||||
'avg'
|
||||
)
|
||||
```
|
||||
|
||||
Reference the template in the canary analysis:
|
||||
|
||||
```yaml
|
||||
analysis:
|
||||
metrics:
|
||||
- name: "success rate"
|
||||
templateRef:
|
||||
name: graphite-request-success-rate
|
||||
thresholdRange:
|
||||
min: 90
|
||||
interval: 1min
|
||||
```
|
||||
|
||||
## Graphite authentication
|
||||
|
||||
If your Graphite API requires basic authentication, you can create a secret in the same namespace
|
||||
as the `MetricTemplate` with the basic-auth credentials:
|
||||
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: graphite-basic-auth
|
||||
namespace: flagger
|
||||
data:
|
||||
username: your-user
|
||||
password: your-password
|
||||
```
|
||||
|
||||
Then, reference the secret in the `MetricTemplate`:
|
||||
|
||||
```yaml
|
||||
apiVersion: flagger.app/v1beta1
|
||||
kind: MetricTemplate
|
||||
metadata:
|
||||
name: my-metric
|
||||
namespace: flagger
|
||||
spec:
|
||||
provider:
|
||||
type: graphite
|
||||
address: http://graphite.monitoring
|
||||
secretRef:
|
||||
name: graphite-basic-auth
|
||||
```
|
||||
|
||||
## Google CLoud Monitoring (Stackdriver)
|
||||
|
||||
Enable Workload Identity on your cluster, create a service account key that has read access to the
|
||||
Cloud Monitoring API and then create an IAM policy binding between the GCP service account and the Flagger
|
||||
service account on Kubernetes. You can take a look at this [guide](https://cloud.google.com/kubernetes-engine/docs/how-to/workload-identity)
|
||||
|
||||
Annotate the flagger service account
|
||||
```shell script
|
||||
kubectl annotate serviceaccount flagger \
|
||||
--namespace <namespace> \
|
||||
iam.gke.io/gcp-service-account=<gcp-serviceaccount-name>@<project-id>.iam.gserviceaccount.com
|
||||
```
|
||||
|
||||
Alternatively, you can download the json keys and add it to your secret with the key `serviceAccountKey` (This method is not recommended).
|
||||
|
||||
Create a secret that contains your project-id (and, if workload identity is not enabled on your cluster,
|
||||
your [service account json](https://cloud.google.com/docs/authentication/production#create_service_account)).
|
||||
|
||||
```
|
||||
kubectl create secret generic gcloud-sa --from-literal=project=<project-id>
|
||||
```
|
||||
|
||||
Then reference the secret in the metric template.
|
||||
Note: The particular MQL query used here works if [Istio is installed on GKE](https://cloud.google.com/istio/docs/istio-on-gke/installing).
|
||||
```yaml
|
||||
apiVersion: flagger.app/v1beta1
|
||||
kind: MetricTemplate
|
||||
metadata:
|
||||
name: bytes-sent
|
||||
namespace: test
|
||||
spec:
|
||||
provider:
|
||||
type: stackdriver
|
||||
secretRef:
|
||||
name: gcloud-sa
|
||||
query: |
|
||||
fetch k8s_container
|
||||
| metric 'istio.io/service/server/response_latencies'
|
||||
| filter
|
||||
(metric.destination_service_name == '{{ service }}-canary'
|
||||
&& metric.destination_service_namespace == '{{ namespace }}')
|
||||
| align delta(1m)
|
||||
| every 1m
|
||||
| group_by [],
|
||||
[value_response_latencies_percentile:
|
||||
percentile(value.response_latencies, 99)]
|
||||
```
|
||||
|
||||
The reference for the query language can be found [here](https://cloud.google.com/monitoring/mql/reference)
|
||||
|
||||
## Influxdb
|
||||
|
||||
The influxdb provider uses the [flux](https://docs.influxdata.com/influxdb/v2.0/query-data/get-started/) scripting language.
|
||||
|
||||
Create a secret that contains your authentication token that can be gotthen from the InfluxDB UI.
|
||||
|
||||
```
|
||||
kubectl create secret generic gcloud-sa --from-literal=token=<token>
|
||||
```
|
||||
|
||||
Then reference the secret in the metric template.qq
|
||||
Note: The particular MQL query used here works if [Istio is installed on GKE](https://cloud.google.com/istio/docs/istio-on-gke/installing).
|
||||
```yaml
|
||||
apiVersion: flagger.app/v1beta1
|
||||
kind: MetricTemplate
|
||||
metadata:
|
||||
name: not-found
|
||||
namespace: test
|
||||
spec:
|
||||
provider:
|
||||
type: influxdb
|
||||
secretRef:
|
||||
name: influx-token
|
||||
query: |
|
||||
from(bucket: "default")
|
||||
|> range(start: -2h)
|
||||
|> filter(fn: (r) => r["_measurement"] == "istio_requests_total")
|
||||
|> filter(fn: (r) => r[" destination_workload_namespace"] == "{{ namespace }}")
|
||||
|> filter(fn: (r) => r["destination_workload"] == "{{ target }}")
|
||||
|> filter(fn: (r) => r["response_code"] == "500")
|
||||
|> count()
|
||||
|> yield(name: "count")
|
||||
```
|
||||
|
||||
## Dynatrace
|
||||
|
||||
You can create custom metric checks using the Dynatrace provider.
|
||||
|
||||
Create a secret with your Dynatrace token:
|
||||
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: dynatrace
|
||||
namespace: istio-system
|
||||
data:
|
||||
dynatrace_token: ZHQwYz...
|
||||
```
|
||||
|
||||
Dynatrace metric template example:
|
||||
|
||||
```yaml
|
||||
apiVersion: flagger.app/v1beta1
|
||||
kind: MetricTemplate
|
||||
metadata:
|
||||
name: response-time-95pct
|
||||
namespace: istio-system
|
||||
spec:
|
||||
provider:
|
||||
type: dynatrace
|
||||
address: https://xxxxxxxx.live.dynatrace.com
|
||||
secretRef:
|
||||
name: dynatrace
|
||||
query: |
|
||||
builtin:service.response.time:filter(eq(dt.entity.service,SERVICE-ABCDEFG0123456789)):percentile(95)
|
||||
```
|
||||
|
||||
Reference the template in the canary analysis:
|
||||
|
||||
```yaml
|
||||
analysis:
|
||||
metrics:
|
||||
- name: "response-time-95pct"
|
||||
templateRef:
|
||||
name: response-time-95pct
|
||||
namespace: istio-system
|
||||
thresholdRange:
|
||||
max: 1000
|
||||
interval: 1m
|
||||
```
|
||||
|
||||
@@ -143,7 +143,8 @@ helm repo add flagger https://flagger.app
|
||||
|
||||
helm upgrade -i flagger-loadtester flagger/loadtester \
|
||||
--namespace=test \
|
||||
--set cmd.timeout=1h
|
||||
--set cmd.timeout=1h \
|
||||
--set cmd.namespaceRegexp=''
|
||||
```
|
||||
|
||||
When deployed the load tester API will be available at `http://flagger-loadtester.test/`.
|
||||
@@ -253,6 +254,42 @@ to the nGrinder server and start a new performance test. the load tester will pe
|
||||
poll the nGrinder server for the status of the test,
|
||||
and prevent duplicate requests from being sent in subsequent analysis loops.
|
||||
|
||||
### K6 Load Tester
|
||||
|
||||
You can also delegate load testing to a third-party webhook. An example of this is the [`k6 webhook`](https://github.com/grafana/flagger-k6-webhook). This webhook uses [`k6`](https://k6.io/), a very featureful load tester, to run load or smoke tests on canaries. For all features available, see the source repository.
|
||||
|
||||
Here's an example integrating this webhook as a `pre-rollout` step, to load test a service before any traffic is sent to it:
|
||||
|
||||
```yaml
|
||||
webhooks:
|
||||
- name: k6-load-test
|
||||
timeout: 5m
|
||||
type: pre-rollout
|
||||
url: http://k6-loadtester.flagger/launch-test
|
||||
metadata:
|
||||
script: |
|
||||
import http from 'k6/http';
|
||||
import { sleep } from 'k6';
|
||||
export const options = {
|
||||
vus: 2,
|
||||
duration: '30s',
|
||||
thresholds: {
|
||||
http_req_duration: ['p(95)<50']
|
||||
},
|
||||
ext: {
|
||||
loadimpact: {
|
||||
name: '<cluster>/<your_service>',
|
||||
projectID: <project id>,
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
export default function () {
|
||||
http.get('http://<your_service>-canary.<namespace>:80/');
|
||||
sleep(0.10);
|
||||
}
|
||||
```
|
||||
|
||||
## Integration Testing
|
||||
|
||||
Flagger comes with a testing service that can run Helm tests, Bats tests or Concord tests when configured as a webhook.
|
||||
|
||||
79
go.mod
79
go.mod
@@ -1,21 +1,78 @@
|
||||
module github.com/fluxcd/flagger
|
||||
|
||||
go 1.15
|
||||
go 1.17
|
||||
|
||||
require (
|
||||
cloud.google.com/go/monitoring v0.1.0
|
||||
github.com/Masterminds/semver/v3 v3.0.3
|
||||
github.com/aws/aws-sdk-go v1.37.32
|
||||
github.com/davecgh/go-spew v1.1.1
|
||||
github.com/go-logr/zapr v0.3.0
|
||||
github.com/google/go-cmp v0.5.2
|
||||
github.com/prometheus/client_golang v1.9.0
|
||||
github.com/go-logr/zapr v1.2.0
|
||||
github.com/google/go-cmp v0.5.6
|
||||
github.com/googleapis/gax-go/v2 v2.0.5
|
||||
github.com/influxdata/influxdb-client-go/v2 v2.5.0
|
||||
github.com/prometheus/client_golang v1.11.1
|
||||
github.com/stretchr/testify v1.7.0
|
||||
go.uber.org/zap v1.14.1
|
||||
golang.org/x/tools v0.1.0 // indirect
|
||||
go.uber.org/zap v1.19.1
|
||||
google.golang.org/api v0.54.0
|
||||
google.golang.org/genproto v0.0.0-20210813162853-db860fec028c
|
||||
google.golang.org/grpc v1.39.1
|
||||
google.golang.org/protobuf v1.27.1
|
||||
gopkg.in/h2non/gock.v1 v1.0.15
|
||||
k8s.io/api v0.20.4
|
||||
k8s.io/apimachinery v0.20.4
|
||||
k8s.io/client-go v0.20.4
|
||||
k8s.io/code-generator v0.20.4
|
||||
k8s.io/klog/v2 v2.4.0
|
||||
k8s.io/api v0.23.3
|
||||
k8s.io/apimachinery v0.23.3
|
||||
k8s.io/client-go v0.23.3
|
||||
k8s.io/code-generator v0.23.3
|
||||
k8s.io/klog/v2 v2.40.1
|
||||
)
|
||||
|
||||
require (
|
||||
cloud.google.com/go v0.92.3 // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.1.1 // indirect
|
||||
github.com/deepmap/oapi-codegen v1.8.2 // indirect
|
||||
github.com/evanphx/json-patch v4.12.0+incompatible // indirect
|
||||
github.com/go-logr/logr v1.2.2 // indirect
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
|
||||
github.com/golang/protobuf v1.5.2 // indirect
|
||||
github.com/google/gofuzz v1.1.0 // indirect
|
||||
github.com/google/uuid v1.1.2 // indirect
|
||||
github.com/googleapis/gnostic v0.5.5 // indirect
|
||||
github.com/h2non/parth v0.0.0-20190131123155-b4df798d6542 // indirect
|
||||
github.com/imdario/mergo v0.3.5 // indirect
|
||||
github.com/influxdata/line-protocol v0.0.0-20200327222509-2487e7298839 // indirect
|
||||
github.com/jmespath/go-jmespath v0.4.0 // indirect
|
||||
github.com/json-iterator/go v1.1.12 // indirect
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||
github.com/pkg/errors v0.9.1 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
github.com/prometheus/client_model v0.2.0 // indirect
|
||||
github.com/prometheus/common v0.26.0 // indirect
|
||||
github.com/prometheus/procfs v0.6.0 // indirect
|
||||
github.com/spf13/pflag v1.0.5 // indirect
|
||||
go.opencensus.io v0.23.0 // indirect
|
||||
go.uber.org/atomic v1.7.0 // indirect
|
||||
go.uber.org/multierr v1.6.0 // indirect
|
||||
golang.org/x/mod v0.5.1 // indirect
|
||||
golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd // indirect
|
||||
golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f // indirect
|
||||
golang.org/x/sys v0.0.0-20220209214540-3681064d5158 // indirect
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 // indirect
|
||||
golang.org/x/text v0.3.7 // indirect
|
||||
golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac // indirect
|
||||
golang.org/x/tools v0.1.9 // indirect
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect
|
||||
google.golang.org/appengine v1.6.7 // indirect
|
||||
gopkg.in/inf.v0 v0.9.1 // indirect
|
||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect
|
||||
k8s.io/gengo v0.0.0-20211129171323-c02415ce4185 // indirect
|
||||
k8s.io/kube-openapi v0.0.0-20220124234850-424119656bbf // indirect
|
||||
k8s.io/utils v0.0.0-20211116205334-6203023598ed // indirect
|
||||
sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6 // indirect
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.2.1 // indirect
|
||||
sigs.k8s.io/yaml v1.3.0 // indirect
|
||||
)
|
||||
|
||||
@@ -7,7 +7,7 @@ set -o pipefail
|
||||
SCRIPT_ROOT=$(git rev-parse --show-toplevel)
|
||||
|
||||
# Grab code-generator version from go.sum.
|
||||
CODEGEN_VERSION=$(grep 'k8s.io/code-generator' go.sum | awk '{print $2}' | head -1)
|
||||
CODEGEN_VERSION=$(grep 'k8s.io/code-generator' go.sum | awk '{print $2}' | tail -1 | awk -F '/' '{print $1}')
|
||||
CODEGEN_PKG=$(echo `go env GOPATH`"/pkg/mod/k8s.io/code-generator@${CODEGEN_VERSION}")
|
||||
|
||||
echo ">> Using ${CODEGEN_PKG}"
|
||||
@@ -30,7 +30,7 @@ chmod +x ${CODEGEN_PKG}/generate-groups.sh
|
||||
|
||||
${CODEGEN_PKG}/generate-groups.sh all \
|
||||
github.com/fluxcd/flagger/pkg/client github.com/fluxcd/flagger/pkg/apis \
|
||||
"flagger:v1beta1 appmesh:v1beta2 appmesh:v1beta1 istio:v1alpha3 smi:v1alpha1 smi:v1alpha2 smi:v1alpha3 gloo/gloo:v1 gloo/gateway:v1 projectcontour:v1 traefik:v1alpha1" \
|
||||
"flagger:v1beta1 appmesh:v1beta2 appmesh:v1beta1 istio:v1alpha3 smi:v1alpha1 smi:v1alpha2 smi:v1alpha3 gloo/gloo:v1 gloo/gateway:v1 projectcontour:v1 traefik:v1alpha1 kuma:v1alpha1 gatewayapi:v1alpha2" \
|
||||
--output-base "${TEMP_DIR}" \
|
||||
--go-header-file ${SCRIPT_ROOT}/hack/boilerplate.go.txt
|
||||
|
||||
|
||||
@@ -34,6 +34,14 @@ kustomize build https://github.com/fluxcd/flagger/kustomize/linkerd?ref=main | k
|
||||
This deploys Flagger in the `linkerd` namespace and sets the metrics server URL to linkerd-viz extension's Prometheus instance
|
||||
which lives under `linkerd-viz` namespace by default.
|
||||
|
||||
Install Flagger for Open Service Mesh:
|
||||
|
||||
```bash
|
||||
kustomize build https://github.com/fluxcd/flagger/kustomize/osm?ref=main | kubectl apply -f -
|
||||
```
|
||||
|
||||
This deploys Flagger in the `osm-system` namespace and sets the metrics server URL to OSM's Prometheus instance.
|
||||
|
||||
If you want to install a specific Flagger release, add the version number to the URL:
|
||||
|
||||
```bash
|
||||
@@ -68,7 +76,7 @@ metadata:
|
||||
name: app
|
||||
namespace: test
|
||||
spec:
|
||||
# can be: kubernetes, istio, linkerd, appmesh, nginx, skipper, gloo
|
||||
# can be: kubernetes, istio, linkerd, appmesh, nginx, skipper, gloo, osm
|
||||
# use the kubernetes provider for Blue/Green style deployments
|
||||
provider: nginx
|
||||
```
|
||||
|
||||
@@ -13,7 +13,7 @@ spec:
|
||||
- -mesh-provider=appmesh
|
||||
- -metrics-server=http://appmesh-prometheus:9090
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: flagger
|
||||
|
||||
@@ -495,6 +495,40 @@ spec:
|
||||
type: array
|
||||
items:
|
||||
type: string
|
||||
gatewayRefs:
|
||||
description: The list of parent Gateways for a HTTPRoute
|
||||
maxItems: 32
|
||||
type: array
|
||||
items:
|
||||
required:
|
||||
- name
|
||||
type: object
|
||||
properties:
|
||||
group:
|
||||
default: gateway.networking.k8s.io
|
||||
maxLength: 253
|
||||
pattern: ^$|^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$
|
||||
type: string
|
||||
kind:
|
||||
default: Gateway
|
||||
maxLength: 63
|
||||
minLength: 1
|
||||
pattern: ^[a-zA-Z]([-a-zA-Z0-9]*[a-zA-Z0-9])?$
|
||||
type: string
|
||||
name:
|
||||
maxLength: 253
|
||||
minLength: 1
|
||||
type: string
|
||||
namespace:
|
||||
maxLength: 63
|
||||
minLength: 1
|
||||
pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?$
|
||||
type: string
|
||||
sectionName:
|
||||
maxLength: 253
|
||||
minLength: 1
|
||||
pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$
|
||||
type: string
|
||||
corsPolicy:
|
||||
description: Istio Cross-Origin Resource Sharing policy (CORS)
|
||||
type: object
|
||||
@@ -826,6 +860,12 @@ spec:
|
||||
mirrorWeight:
|
||||
description: Weight of traffic to be mirrored
|
||||
type: number
|
||||
primaryReadyThreshold:
|
||||
description: Percentage of pods that need to be available to consider primary as ready
|
||||
type: number
|
||||
canaryReadyThreshold:
|
||||
description: Percentage of pods that need to be available to consider canary as ready
|
||||
type: number
|
||||
match:
|
||||
description: A/B testing match conditions
|
||||
type: array
|
||||
@@ -1104,8 +1144,11 @@ spec:
|
||||
- prometheus
|
||||
- influxdb
|
||||
- datadog
|
||||
- stackdriver
|
||||
- cloudwatch
|
||||
- newrelic
|
||||
- graphite
|
||||
- dynatrace
|
||||
address:
|
||||
description: API address of this provider
|
||||
type: string
|
||||
@@ -1121,6 +1164,9 @@ spec:
|
||||
region:
|
||||
description: Region of the provider
|
||||
type: string
|
||||
insecureSkipVerify:
|
||||
description: Disable SSL certificate validation for the provider address
|
||||
type: boolean
|
||||
query:
|
||||
description: Query of this metric template
|
||||
type: string
|
||||
@@ -1187,6 +1233,7 @@ spec:
|
||||
- msteams
|
||||
- discord
|
||||
- rocket
|
||||
- gchat
|
||||
channel:
|
||||
description: Alert channel for this provider
|
||||
type: string
|
||||
|
||||
@@ -9,4 +9,4 @@ resources:
|
||||
images:
|
||||
- name: ghcr.io/fluxcd/flagger
|
||||
newName: ghcr.io/fluxcd/flagger
|
||||
newTag: 1.9.0
|
||||
newTag: 1.19.0
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: flagger
|
||||
@@ -177,12 +177,38 @@ rules:
|
||||
- update
|
||||
- patch
|
||||
- delete
|
||||
- apiGroups:
|
||||
- kuma.io
|
||||
resources:
|
||||
- trafficroutes
|
||||
- trafficroutes/finalizers
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- create
|
||||
- update
|
||||
- patch
|
||||
- delete
|
||||
- apiGroups:
|
||||
- gateway.networking.k8s.io
|
||||
resources:
|
||||
- httproutes
|
||||
- httproutes/finalizers
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- create
|
||||
- update
|
||||
- patch
|
||||
- delete
|
||||
- nonResourceURLs:
|
||||
- /version
|
||||
verbs:
|
||||
- get
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: flagger
|
||||
|
||||
@@ -19,7 +19,7 @@ spec:
|
||||
serviceAccountName: flagger-prometheus
|
||||
containers:
|
||||
- name: prometheus
|
||||
image: prom/prometheus:v2.23.0
|
||||
image: prom/prometheus:v2.33.5
|
||||
imagePullPolicy: IfNotPresent
|
||||
args:
|
||||
- '--storage.tsdb.retention=2h'
|
||||
@@ -49,4 +49,4 @@ spec:
|
||||
configMap:
|
||||
name: flagger-prometheus
|
||||
- name: data-volume
|
||||
emptyDir: {}
|
||||
emptyDir: {}
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: flagger-prometheus
|
||||
@@ -18,7 +18,7 @@ rules:
|
||||
- nonResourceURLs: ["/metrics"]
|
||||
verbs: ["get"]
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: flagger-prometheus
|
||||
|
||||
7
kustomize/gatewayapi/kustomization.yaml
Normal file
7
kustomize/gatewayapi/kustomization.yaml
Normal file
@@ -0,0 +1,7 @@
|
||||
bases:
|
||||
- ../base/flagger/
|
||||
- ../base/prometheus/
|
||||
resources:
|
||||
- namespace.yaml
|
||||
patchesStrategicMerge:
|
||||
- patch.yaml
|
||||
4
kustomize/gatewayapi/namespace.yaml
Normal file
4
kustomize/gatewayapi/namespace.yaml
Normal file
@@ -0,0 +1,4 @@
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: flagger-system
|
||||
14
kustomize/gatewayapi/patch.yaml
Normal file
14
kustomize/gatewayapi/patch.yaml
Normal file
@@ -0,0 +1,14 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: flagger
|
||||
spec:
|
||||
template:
|
||||
spec:
|
||||
containers:
|
||||
- name: flagger
|
||||
args:
|
||||
- -log-level=debug
|
||||
- -include-label-prefix=app.kubernetes.io
|
||||
- -mesh-provider=gatewayapi:v1alpha2
|
||||
- -metrics-server=http://flagger-prometheus:9090
|
||||
@@ -13,7 +13,7 @@ spec:
|
||||
- -mesh-provider=istio
|
||||
- -metrics-server=http://prometheus:9090
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: flagger
|
||||
|
||||
5
kustomize/kuma/kustomization.yaml
Normal file
5
kustomize/kuma/kustomization.yaml
Normal file
@@ -0,0 +1,5 @@
|
||||
namespace: kuma-system
|
||||
bases:
|
||||
- ../base/flagger/
|
||||
patchesStrategicMerge:
|
||||
- patch.yaml
|
||||
14
kustomize/kuma/patch.yaml
Normal file
14
kustomize/kuma/patch.yaml
Normal file
@@ -0,0 +1,14 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: flagger
|
||||
spec:
|
||||
template:
|
||||
spec:
|
||||
containers:
|
||||
- name: flagger
|
||||
args:
|
||||
- -log-level=info
|
||||
- -include-label-prefix=app.kubernetes.io
|
||||
- -mesh-provider=kuma
|
||||
- -metrics-server=http://prometheus-server.kuma-metrics:80
|
||||
@@ -13,7 +13,7 @@ spec:
|
||||
- -mesh-provider=linkerd
|
||||
- -metrics-server=http://prometheus.linkerd-viz:9090
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: flagger
|
||||
|
||||
5
kustomize/osm/kustomization.yaml
Normal file
5
kustomize/osm/kustomization.yaml
Normal file
@@ -0,0 +1,5 @@
|
||||
namespace: osm-system
|
||||
bases:
|
||||
- ../base/flagger/
|
||||
patchesStrategicMerge:
|
||||
- patch.yaml
|
||||
27
kustomize/osm/patch.yaml
Normal file
27
kustomize/osm/patch.yaml
Normal file
@@ -0,0 +1,27 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: flagger
|
||||
spec:
|
||||
template:
|
||||
spec:
|
||||
containers:
|
||||
- name: flagger
|
||||
args:
|
||||
- -log-level=info
|
||||
- -include-label-prefix=app.kubernetes.io
|
||||
- -mesh-provider=osm
|
||||
- -metrics-server=http://osm-prometheus.osm-system.svc:7070
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: flagger
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: flagger
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: flagger
|
||||
namespace: osm-system
|
||||
@@ -25,7 +25,7 @@ spec:
|
||||
spec:
|
||||
containers:
|
||||
- name: podinfod
|
||||
image: stefanprodan/podinfo:3.1.0
|
||||
image: ghcr.io/stefanprodan/podinfo:6.0.0
|
||||
imagePullPolicy: IfNotPresent
|
||||
ports:
|
||||
- name: http
|
||||
|
||||
@@ -15,10 +15,11 @@ spec:
|
||||
annotations:
|
||||
prometheus.io/scrape: "true"
|
||||
prometheus.io/port: "8080"
|
||||
openservicemesh.io/inbound-port-exclusion-list: "80, 8080"
|
||||
spec:
|
||||
containers:
|
||||
- name: loadtester
|
||||
image: ghcr.io/fluxcd/flagger-loadtester:0.18.0
|
||||
image: ghcr.io/fluxcd/flagger-loadtester:0.22.0
|
||||
imagePullPolicy: IfNotPresent
|
||||
ports:
|
||||
- name: http
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
//go:build !ignore_autogenerated
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
//go:build !ignore_autogenerated
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
|
||||
@@ -20,6 +20,7 @@ import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/fluxcd/flagger/pkg/apis/gatewayapi/v1alpha2"
|
||||
istiov1alpha3 "github.com/fluxcd/flagger/pkg/apis/istio/v1alpha3"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
@@ -29,6 +30,8 @@ const (
|
||||
CanaryKind = "Canary"
|
||||
ProgressDeadlineSeconds = 600
|
||||
AnalysisInterval = 60 * time.Second
|
||||
PrimaryReadyThreshold = 100
|
||||
CanaryReadyThreshold = 100
|
||||
MetricInterval = "1m"
|
||||
)
|
||||
|
||||
@@ -137,7 +140,12 @@ type CanaryService struct {
|
||||
// +optional
|
||||
Gateways []string `json:"gateways,omitempty"`
|
||||
|
||||
// Hosts attached to the generated Istio virtual service
|
||||
// Gateways that the HTTPRoute needs to attach itself to.
|
||||
// Must be specified while using the Gateway API as a provider.
|
||||
// +optional
|
||||
GatewayRefs []v1alpha2.ParentReference `json:"gatewayRefs,omitempty"`
|
||||
|
||||
// Hosts attached to the generated Istio virtual service or Gateway API HTTPRoute.
|
||||
// Defaults to the service name
|
||||
// +optional
|
||||
Hosts []string `json:"hosts,omitempty"`
|
||||
@@ -229,6 +237,12 @@ type CanaryAnalysis struct {
|
||||
// Max number of failed checks before the canary is terminated
|
||||
Threshold int `json:"threshold"`
|
||||
|
||||
// Percentage of pods that need to be available to consider primary as ready
|
||||
PrimaryReadyThreshold *int `json:"primaryReadyThreshold,omitempty"`
|
||||
|
||||
// Percentage of pods that need to be available to consider canary as ready
|
||||
CanaryReadyThreshold *int `json:"canaryReadyThreshold,omitempty"`
|
||||
|
||||
// Alert list for this canary analysis
|
||||
Alerts []CanaryAlert `json:"alerts,omitempty"`
|
||||
|
||||
@@ -254,7 +268,7 @@ type CanaryMetric struct {
|
||||
Interval string `json:"interval,omitempty"`
|
||||
|
||||
// Deprecated: Max value accepted for this metric (replaced by ThresholdRange)
|
||||
Threshold float64 `json:"threshold"`
|
||||
Threshold float64 `json:"threshold,omitempty"`
|
||||
|
||||
// Range value accepted for this metric
|
||||
// +optional
|
||||
@@ -309,7 +323,7 @@ const (
|
||||
RolloutHook HookType = "rollout"
|
||||
// PreRolloutHook execute webhook before routing traffic to canary
|
||||
PreRolloutHook HookType = "pre-rollout"
|
||||
// PreRolloutHook execute webhook after the canary analysis
|
||||
// PostRolloutHook execute webhook after the canary analysis
|
||||
PostRolloutHook HookType = "post-rollout"
|
||||
// ConfirmRolloutHook halt canary analysis until webhook returns HTTP 200
|
||||
ConfirmRolloutHook HookType = "confirm-rollout"
|
||||
@@ -440,6 +454,22 @@ func (c *Canary) GetAnalysisThreshold() int {
|
||||
return 1
|
||||
}
|
||||
|
||||
// GetAnalysisPrimaryReadyThreshold returns the canary primaryReadyThreshold (default 100)
|
||||
func (c *Canary) GetAnalysisPrimaryReadyThreshold() int {
|
||||
if c.GetAnalysis().PrimaryReadyThreshold != nil {
|
||||
return *c.GetAnalysis().PrimaryReadyThreshold
|
||||
}
|
||||
return PrimaryReadyThreshold
|
||||
}
|
||||
|
||||
// GetAnalysisCanaryReadyThreshold returns the canary canaryReadyThreshold (default 100)
|
||||
func (c *Canary) GetAnalysisCanaryReadyThreshold() int {
|
||||
if c.GetAnalysis().CanaryReadyThreshold != nil {
|
||||
return *c.GetAnalysis().CanaryReadyThreshold
|
||||
}
|
||||
return CanaryReadyThreshold
|
||||
}
|
||||
|
||||
// GetMetricInterval returns the metric interval default value (1m)
|
||||
func (c *Canary) GetMetricInterval() string {
|
||||
return MetricInterval
|
||||
|
||||
@@ -74,6 +74,10 @@ type MetricTemplateProvider struct {
|
||||
// Region of the provider
|
||||
// +optional
|
||||
Region string `json:"region,omitempty"`
|
||||
|
||||
// InsecureSkipVerify disables certificate verification for the provider
|
||||
// +optional
|
||||
InsecureSkipVerify bool `json:"insecureSkipVerify,omitempty"`
|
||||
}
|
||||
|
||||
// MetricTemplateModel is the query template model
|
||||
|
||||
@@ -11,4 +11,7 @@ const (
|
||||
KubernetesProvider string = "kubernetes"
|
||||
SkipperProvider string = "skipper"
|
||||
TraefikProvider string = "traefik"
|
||||
OsmProvider string = "osm"
|
||||
KumaProvider string = "kuma"
|
||||
GatewayAPIProvider string = "gatewayapi"
|
||||
)
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
//go:build !ignore_autogenerated
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
@@ -21,6 +22,7 @@ limitations under the License.
|
||||
package v1beta1
|
||||
|
||||
import (
|
||||
v1alpha2 "github.com/fluxcd/flagger/pkg/apis/gatewayapi/v1alpha2"
|
||||
v1alpha3 "github.com/fluxcd/flagger/pkg/apis/istio/v1alpha3"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||
@@ -202,6 +204,16 @@ func (in *CanaryAnalysis) DeepCopyInto(out *CanaryAnalysis) {
|
||||
*out = make([]int, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.PrimaryReadyThreshold != nil {
|
||||
in, out := &in.PrimaryReadyThreshold, &out.PrimaryReadyThreshold
|
||||
*out = new(int)
|
||||
**out = **in
|
||||
}
|
||||
if in.CanaryReadyThreshold != nil {
|
||||
in, out := &in.CanaryReadyThreshold, &out.CanaryReadyThreshold
|
||||
*out = new(int)
|
||||
**out = **in
|
||||
}
|
||||
if in.Alerts != nil {
|
||||
in, out := &in.Alerts, &out.Alerts
|
||||
*out = make([]CanaryAlert, len(*in))
|
||||
@@ -327,6 +339,13 @@ func (in *CanaryService) DeepCopyInto(out *CanaryService) {
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.GatewayRefs != nil {
|
||||
in, out := &in.GatewayRefs, &out.GatewayRefs
|
||||
*out = make([]v1alpha2.ParentReference, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
if in.Hosts != nil {
|
||||
in, out := &in.Hosts, &out.Hosts
|
||||
*out = make([]string, len(*in))
|
||||
|
||||
5
pkg/apis/gatewayapi/register.go
Normal file
5
pkg/apis/gatewayapi/register.go
Normal file
@@ -0,0 +1,5 @@
|
||||
package gatewayapi
|
||||
|
||||
const (
|
||||
GroupName = "gateway.networking.k8s.io"
|
||||
)
|
||||
6
pkg/apis/gatewayapi/v1alpha2/doc.go
Normal file
6
pkg/apis/gatewayapi/v1alpha2/doc.go
Normal file
@@ -0,0 +1,6 @@
|
||||
// +k8s:deepcopy-gen=package
|
||||
|
||||
// Package v1alpha2 contains API Schema definitions for the
|
||||
// gateway.networking.k8s.io API group.
|
||||
|
||||
package v1alpha2
|
||||
879
pkg/apis/gatewayapi/v1alpha2/httproute_types.go
Normal file
879
pkg/apis/gatewayapi/v1alpha2/httproute_types.go
Normal file
@@ -0,0 +1,879 @@
|
||||
/*
|
||||
Copyright 2020 The Kubernetes Authors.
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package v1alpha2
|
||||
|
||||
import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
// +genclient
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
// +kubebuilder:resource:categories=gateway-api
|
||||
// +kubebuilder:subresource:status
|
||||
// +kubebuilder:storageversion
|
||||
// +kubebuilder:printcolumn:name="Hostnames",type=string,JSONPath=`.spec.hostnames`
|
||||
// +kubebuilder:printcolumn:name="Age",type=date,JSONPath=`.metadata.creationTimestamp`
|
||||
|
||||
// HTTPRoute provides a way to route HTTP requests. This includes the capability
|
||||
// to match requests by hostname, path, header, or query param. Filters can be
|
||||
// used to specify additional processing steps. Backends specify where matching
|
||||
// requests should be routed.
|
||||
type HTTPRoute struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ObjectMeta `json:"metadata,omitempty"`
|
||||
|
||||
// Spec defines the desired state of HTTPRoute.
|
||||
Spec HTTPRouteSpec `json:"spec"`
|
||||
}
|
||||
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
// HTTPRouteList contains a list of HTTPRoute.
|
||||
type HTTPRouteList struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ListMeta `json:"metadata,omitempty"`
|
||||
Items []HTTPRoute `json:"items"`
|
||||
}
|
||||
|
||||
// HTTPRouteSpec defines the desired state of HTTPRoute
|
||||
type HTTPRouteSpec struct {
|
||||
CommonRouteSpec `json:",inline"`
|
||||
|
||||
// Hostnames defines a set of hostname that should match against the HTTP
|
||||
// Host header to select a HTTPRoute to process the request. This matches
|
||||
// the RFC 1123 definition of a hostname with 2 notable exceptions:
|
||||
//
|
||||
// 1. IPs are not allowed.
|
||||
// 2. A hostname may be prefixed with a wildcard label (`*.`). The wildcard
|
||||
// label must appear by itself as the first label.
|
||||
//
|
||||
// If a hostname is specified by both the Listener and HTTPRoute, there
|
||||
// must be at least one intersecting hostname for the HTTPRoute to be
|
||||
// attached to the Listener. For example:
|
||||
//
|
||||
// * A Listener with `test.example.com` as the hostname matches HTTPRoutes
|
||||
// that have either not specified any hostnames, or have specified at
|
||||
// least one of `test.example.com` or `*.example.com`.
|
||||
// * A Listener with `*.example.com` as the hostname matches HTTPRoutes
|
||||
// that have either not specified any hostnames or have specified at least
|
||||
// one hostname that matches the Listener hostname. For example,
|
||||
// `test.example.com` and `*.example.com` would both match. On the other
|
||||
// hand, `example.com` and `test.example.net` would not match.
|
||||
//
|
||||
// If both the Listener and HTTPRoute have specified hostnames, any
|
||||
// HTTPRoute hostnames that do not match the Listener hostname MUST be
|
||||
// ignored. For example, if a Listener specified `*.example.com`, and the
|
||||
// HTTPRoute specified `test.example.com` and `test.example.net`,
|
||||
// `test.example.net` must not be considered for a match.
|
||||
//
|
||||
// If both the Listener and HTTPRoute have specified hostnames, and none
|
||||
// match with the criteria above, then the HTTPRoute is not accepted. The
|
||||
// implementation must raise an 'Accepted' Condition with a status of
|
||||
// `False` in the corresponding RouteParentStatus.
|
||||
//
|
||||
// Support: Core
|
||||
//
|
||||
// +optional
|
||||
// +kubebuilder:validation:MaxItems=16
|
||||
Hostnames []Hostname `json:"hostnames,omitempty"`
|
||||
|
||||
// Rules are a list of HTTP matchers, filters and actions.
|
||||
//
|
||||
// +optional
|
||||
// +kubebuilder:validation:MaxItems=16
|
||||
// +kubebuilder:default={{matches: {{path: {type: "PathPrefix", value: "/"}}}}}
|
||||
Rules []HTTPRouteRule `json:"rules,omitempty"`
|
||||
}
|
||||
|
||||
// HTTPRouteRule defines semantics for matching an HTTP request based on
|
||||
// conditions (matches), processing it (filters), and forwarding the request to
|
||||
// an API object (backendRefs).
|
||||
type HTTPRouteRule struct {
|
||||
// Matches define conditions used for matching the rule against incoming
|
||||
// HTTP requests. Each match is independent, i.e. this rule will be matched
|
||||
// if **any** one of the matches is satisfied.
|
||||
//
|
||||
// For example, take the following matches configuration:
|
||||
//
|
||||
// ```
|
||||
// matches:
|
||||
// - path:
|
||||
// value: "/foo"
|
||||
// headers:
|
||||
// - name: "version"
|
||||
// value: "v2"
|
||||
// - path:
|
||||
// value: "/v2/foo"
|
||||
// ```
|
||||
//
|
||||
// For a request to match against this rule, a request must satisfy
|
||||
// EITHER of the two conditions:
|
||||
//
|
||||
// - path prefixed with `/foo` AND contains the header `version: v2`
|
||||
// - path prefix of `/v2/foo`
|
||||
//
|
||||
// See the documentation for HTTPRouteMatch on how to specify multiple
|
||||
// match conditions that should be ANDed together.
|
||||
//
|
||||
// If no matches are specified, the default is a prefix
|
||||
// path match on "/", which has the effect of matching every
|
||||
// HTTP request.
|
||||
//
|
||||
// Proxy or Load Balancer routing configuration generated from HTTPRoutes
|
||||
// MUST prioritize rules based on the following criteria, continuing on
|
||||
// ties. Precedence must be given to the the Rule with the largest number
|
||||
// of:
|
||||
//
|
||||
// * Characters in a matching non-wildcard hostname.
|
||||
// * Characters in a matching hostname.
|
||||
// * Characters in a matching path.
|
||||
// * Header matches.
|
||||
// * Query param matches.
|
||||
//
|
||||
// If ties still exist across multiple Routes, matching precedence MUST be
|
||||
// determined in order of the following criteria, continuing on ties:
|
||||
//
|
||||
// * The oldest Route based on creation timestamp.
|
||||
// * The Route appearing first in alphabetical order by
|
||||
// "{namespace}/{name}".
|
||||
//
|
||||
// If ties still exist within the Route that has been given precedence,
|
||||
// matching precedence MUST be granted to the first matching rule meeting
|
||||
// the above criteria.
|
||||
//
|
||||
// +optional
|
||||
// +kubebuilder:validation:MaxItems=8
|
||||
// +kubebuilder:default={{path:{ type: "PathPrefix", value: "/"}}}
|
||||
Matches []HTTPRouteMatch `json:"matches,omitempty"`
|
||||
|
||||
// Filters define the filters that are applied to requests that match
|
||||
// this rule.
|
||||
//
|
||||
// The effects of ordering of multiple behaviors are currently unspecified.
|
||||
// This can change in the future based on feedback during the alpha stage.
|
||||
//
|
||||
// Conformance-levels at this level are defined based on the type of filter:
|
||||
//
|
||||
// - ALL core filters MUST be supported by all implementations.
|
||||
// - Implementers are encouraged to support extended filters.
|
||||
// - Implementation-specific custom filters have no API guarantees across
|
||||
// implementations.
|
||||
//
|
||||
// Specifying a core filter multiple times has unspecified or custom
|
||||
// conformance.
|
||||
//
|
||||
// Support: Core
|
||||
//
|
||||
// +optional
|
||||
// +kubebuilder:validation:MaxItems=16
|
||||
Filters []HTTPRouteFilter `json:"filters,omitempty"`
|
||||
|
||||
// BackendRefs defines the backend(s) where matching requests should be
|
||||
// sent.
|
||||
|
||||
// If unspecified or invalid (refers to a non-existent resource or a Service
|
||||
// with no endpoints), the rule performs no forwarding. If there are also no
|
||||
// filters specified that would result in a response being sent, a HTTP 503
|
||||
// status code is returned. 503 responses must be sent so that the overall
|
||||
// weight is respected; if an invalid backend is requested to have 80% of
|
||||
// requests, then 80% of requests must get a 503 instead.
|
||||
//
|
||||
// Support: Core for Kubernetes Service
|
||||
// Support: Custom for any other resource
|
||||
//
|
||||
// Support for weight: Core
|
||||
//
|
||||
// +optional
|
||||
// +kubebuilder:validation:MaxItems=16
|
||||
BackendRefs []HTTPBackendRef `json:"backendRefs,omitempty"`
|
||||
}
|
||||
|
||||
// PathMatchType specifies the semantics of how HTTP paths should be compared.
|
||||
// Valid PathMatchType values are:
|
||||
//
|
||||
// * "Exact"
|
||||
// * "PathPrefix"
|
||||
// * "RegularExpression"
|
||||
//
|
||||
// PathPrefix and Exact paths must be syntactically valid:
|
||||
//
|
||||
// - Must begin with the `/` character
|
||||
// - Must not contain consecutive `/` characters (e.g. `/foo///`, `//`).
|
||||
//
|
||||
// +kubebuilder:validation:Enum=Exact;PathPrefix;RegularExpression
|
||||
type PathMatchType string
|
||||
|
||||
const (
|
||||
// Matches the URL path exactly and with case sensitivity.
|
||||
PathMatchExact PathMatchType = "Exact"
|
||||
|
||||
// Matches based on a URL path prefix split by `/`. Matching is
|
||||
// case sensitive and done on a path element by element basis. A
|
||||
// path element refers to the list of labels in the path split by
|
||||
// the `/` separator. When specified, a trailing `/` is ignored.
|
||||
//
|
||||
// For example. the paths `/abc`, `/abc/`, and `/abc/def` would all match
|
||||
// the prefix `/abc`, but the path `/abcd` would not.
|
||||
//
|
||||
// "PathPrefix" is semantically equivalent to the "Prefix" path type in the
|
||||
// Kubernetes Ingress API.
|
||||
PathMatchPathPrefix PathMatchType = "PathPrefix"
|
||||
|
||||
// Matches if the URL path matches the given regular expression with
|
||||
// case sensitivity.
|
||||
//
|
||||
// Since `"RegularExpression"` has custom conformance, implementations
|
||||
// can support POSIX, PCRE, RE2 or any other regular expression dialect.
|
||||
// Please read the implementation's documentation to determine the supported
|
||||
// dialect.
|
||||
PathMatchRegularExpression PathMatchType = "RegularExpression"
|
||||
)
|
||||
|
||||
// HTTPPathMatch describes how to select a HTTP route by matching the HTTP request path.
|
||||
type HTTPPathMatch struct {
|
||||
// Type specifies how to match against the path Value.
|
||||
//
|
||||
// Support: Core (Exact, PathPrefix)
|
||||
//
|
||||
// Support: Custom (RegularExpression)
|
||||
//
|
||||
// +optional
|
||||
// +kubebuilder:default=PathPrefix
|
||||
Type *PathMatchType `json:"type,omitempty"`
|
||||
|
||||
// Value of the HTTP path to match against.
|
||||
//
|
||||
// +optional
|
||||
// +kubebuilder:default="/"
|
||||
// +kubebuilder:validation:MaxLength=1024
|
||||
Value *string `json:"value,omitempty"`
|
||||
}
|
||||
|
||||
// HeaderMatchType specifies the semantics of how HTTP header values should be
|
||||
// compared. Valid HeaderMatchType values are:
|
||||
//
|
||||
// * "Exact"
|
||||
// * "RegularExpression"
|
||||
//
|
||||
// +kubebuilder:validation:Enum=Exact;RegularExpression
|
||||
type HeaderMatchType string
|
||||
|
||||
// HeaderMatchType constants.
|
||||
const (
|
||||
HeaderMatchExact HeaderMatchType = "Exact"
|
||||
HeaderMatchRegularExpression HeaderMatchType = "RegularExpression"
|
||||
)
|
||||
|
||||
// HTTPHeaderName is the name of an HTTP header.
|
||||
//
|
||||
// Valid values include:
|
||||
//
|
||||
// * "Authorization"
|
||||
// * "Set-Cookie"
|
||||
//
|
||||
// Invalid values include:
|
||||
//
|
||||
// * ":method" - ":" is an invalid character. This means that HTTP/2 pseudo
|
||||
// headers are not currently supported by this type.
|
||||
// * "/invalid" - "/" is an invalid character
|
||||
//
|
||||
// +kubebuilder:validation:MinLength=1
|
||||
// +kubebuilder:validation:MaxLength=256
|
||||
// +kubebuilder:validation:Pattern=`^[A-Za-z0-9!#$%&'*+\-.^_\x60|~]+$`
|
||||
type HTTPHeaderName string
|
||||
|
||||
// HTTPHeaderMatch describes how to select a HTTP route by matching HTTP request
|
||||
// headers.
|
||||
type HTTPHeaderMatch struct {
|
||||
// Type specifies how to match against the value of the header.
|
||||
//
|
||||
// Support: Core (Exact)
|
||||
//
|
||||
// Support: Custom (RegularExpression)
|
||||
//
|
||||
// Since RegularExpression HeaderMatchType has custom conformance,
|
||||
// implementations can support POSIX, PCRE or any other dialects of regular
|
||||
// expressions. Please read the implementation's documentation to determine
|
||||
// the supported dialect.
|
||||
//
|
||||
// +optional
|
||||
// +kubebuilder:default=Exact
|
||||
Type *HeaderMatchType `json:"type,omitempty"`
|
||||
|
||||
// Name is the name of the HTTP Header to be matched. Name matching MUST be
|
||||
// case insensitive. (See https://tools.ietf.org/html/rfc7230#section-3.2).
|
||||
//
|
||||
// If multiple entries specify equivalent header names, only the first
|
||||
// entry with an equivalent name MUST be considered for a match. Subsequent
|
||||
// entries with an equivalent header name MUST be ignored. Due to the
|
||||
// case-insensitivity of header names, "foo" and "Foo" are considered
|
||||
// equivalent.
|
||||
//
|
||||
// When a header is repeated in an HTTP request, it is
|
||||
// implementation-specific behavior as to how this is represented.
|
||||
// Generally, proxies should follow the guidance from the RFC:
|
||||
// https://www.rfc-editor.org/rfc/rfc7230.html#section-3.2.2 regarding
|
||||
// processing a repeated header, with special handling for "Set-Cookie".
|
||||
Name HTTPHeaderName `json:"name"`
|
||||
|
||||
// Value is the value of HTTP Header to be matched.
|
||||
//
|
||||
// +kubebuilder:validation:MinLength=1
|
||||
// +kubebuilder:validation:MaxLength=4096
|
||||
Value string `json:"value"`
|
||||
}
|
||||
|
||||
// QueryParamMatchType specifies the semantics of how HTTP query parameter
|
||||
// values should be compared. Valid QueryParamMatchType values are:
|
||||
//
|
||||
// * "Exact"
|
||||
// * "RegularExpression"
|
||||
//
|
||||
// +kubebuilder:validation:Enum=Exact;RegularExpression
|
||||
type QueryParamMatchType string
|
||||
|
||||
// QueryParamMatchType constants.
|
||||
const (
|
||||
QueryParamMatchExact QueryParamMatchType = "Exact"
|
||||
QueryParamMatchRegularExpression QueryParamMatchType = "RegularExpression"
|
||||
)
|
||||
|
||||
// HTTPQueryParamMatch describes how to select a HTTP route by matching HTTP
|
||||
// query parameters.
|
||||
type HTTPQueryParamMatch struct {
|
||||
// Type specifies how to match against the value of the query parameter.
|
||||
//
|
||||
// Support: Extended (Exact)
|
||||
//
|
||||
// Support: Custom (RegularExpression)
|
||||
//
|
||||
// Since RegularExpression QueryParamMatchType has custom conformance,
|
||||
// implementations can support POSIX, PCRE or any other dialects of regular
|
||||
// expressions. Please read the implementation's documentation to determine
|
||||
// the supported dialect.
|
||||
//
|
||||
// +optional
|
||||
// +kubebuilder:default=Exact
|
||||
Type *QueryParamMatchType `json:"type,omitempty"`
|
||||
|
||||
// Name is the name of the HTTP query param to be matched. This must be an
|
||||
// exact string match. (See
|
||||
// https://tools.ietf.org/html/rfc7230#section-2.7.3).
|
||||
//
|
||||
// +kubebuilder:validation:MinLength=1
|
||||
// +kubebuilder:validation:MaxLength=256
|
||||
Name string `json:"name"`
|
||||
|
||||
// Value is the value of HTTP query param to be matched.
|
||||
//
|
||||
// +kubebuilder:validation:MinLength=1
|
||||
// +kubebuilder:validation:MaxLength=1024
|
||||
Value string `json:"value"`
|
||||
}
|
||||
|
||||
// HTTPMethod describes how to select a HTTP route by matching the HTTP
|
||||
// method as defined by
|
||||
// [RFC 7231](https://datatracker.ietf.org/doc/html/rfc7231#section-4) and
|
||||
// [RFC 5789](https://datatracker.ietf.org/doc/html/rfc5789#section-2).
|
||||
// The value is expected in upper case.
|
||||
// +kubebuilder:validation:Enum=GET;HEAD;POST;PUT;DELETE;CONNECT;OPTIONS;TRACE;PATCH
|
||||
type HTTPMethod string
|
||||
|
||||
const (
|
||||
HTTPMethodGet HTTPMethod = "GET"
|
||||
HTTPMethodHead HTTPMethod = "HEAD"
|
||||
HTTPMethodPost HTTPMethod = "POST"
|
||||
HTTPMethodPut HTTPMethod = "PUT"
|
||||
HTTPMethodDelete HTTPMethod = "DELETE"
|
||||
HTTPMethodConnect HTTPMethod = "CONNECT"
|
||||
HTTPMethodOptions HTTPMethod = "OPTIONS"
|
||||
HTTPMethodTrace HTTPMethod = "TRACE"
|
||||
HTTPMethodPatch HTTPMethod = "PATCH"
|
||||
)
|
||||
|
||||
// HTTPRouteMatch defines the predicate used to match requests to a given
|
||||
// action. Multiple match types are ANDed together, i.e. the match will
|
||||
// evaluate to true only if all conditions are satisfied.
|
||||
//
|
||||
// For example, the match below will match a HTTP request only if its path
|
||||
// starts with `/foo` AND it contains the `version: v1` header:
|
||||
//
|
||||
// ```
|
||||
// match:
|
||||
// path:
|
||||
// value: "/foo"
|
||||
// headers:
|
||||
// - name: "version"
|
||||
// value "v1"
|
||||
// ```
|
||||
type HTTPRouteMatch struct {
|
||||
// Path specifies a HTTP request path matcher. If this field is not
|
||||
// specified, a default prefix match on the "/" path is provided.
|
||||
//
|
||||
// +optional
|
||||
// +kubebuilder:default={type: "PathPrefix", value: "/"}
|
||||
Path *HTTPPathMatch `json:"path,omitempty"`
|
||||
|
||||
// Headers specifies HTTP request header matchers. Multiple match values are
|
||||
// ANDed together, meaning, a request must match all the specified headers
|
||||
// to select the route.
|
||||
//
|
||||
// +listType=map
|
||||
// +listMapKey=name
|
||||
// +optional
|
||||
// +kubebuilder:validation:MaxItems=16
|
||||
Headers []HTTPHeaderMatch `json:"headers,omitempty"`
|
||||
|
||||
// QueryParams specifies HTTP query parameter matchers. Multiple match
|
||||
// values are ANDed together, meaning, a request must match all the
|
||||
// specified query parameters to select the route.
|
||||
//
|
||||
// +listType=map
|
||||
// +listMapKey=name
|
||||
// +optional
|
||||
// +kubebuilder:validation:MaxItems=16
|
||||
QueryParams []HTTPQueryParamMatch `json:"queryParams,omitempty"`
|
||||
|
||||
// Method specifies HTTP method matcher.
|
||||
// When specified, this route will be matched only if the request has the
|
||||
// specified method.
|
||||
//
|
||||
// Support: Extended
|
||||
//
|
||||
// +optional
|
||||
Method *HTTPMethod `json:"method,omitempty"`
|
||||
}
|
||||
|
||||
// HTTPRouteFilter defines processing steps that must be completed during the
|
||||
// request or response lifecycle. HTTPRouteFilters are meant as an extension
|
||||
// point to express processing that may be done in Gateway implementations. Some
|
||||
// examples include request or response modification, implementing
|
||||
// authentication strategies, rate-limiting, and traffic shaping. API
|
||||
// guarantee/conformance is defined based on the type of the filter.
|
||||
type HTTPRouteFilter struct {
|
||||
// Type identifies the type of filter to apply. As with other API fields,
|
||||
// types are classified into three conformance levels:
|
||||
//
|
||||
// - Core: Filter types and their corresponding configuration defined by
|
||||
// "Support: Core" in this package, e.g. "RequestHeaderModifier". All
|
||||
// implementations must support core filters.
|
||||
//
|
||||
// - Extended: Filter types and their corresponding configuration defined by
|
||||
// "Support: Extended" in this package, e.g. "RequestMirror". Implementers
|
||||
// are encouraged to support extended filters.
|
||||
//
|
||||
// - Custom: Filters that are defined and supported by specific vendors.
|
||||
// In the future, filters showing convergence in behavior across multiple
|
||||
// implementations will be considered for inclusion in extended or core
|
||||
// conformance levels. Filter-specific configuration for such filters
|
||||
// is specified using the ExtensionRef field. `Type` should be set to
|
||||
// "ExtensionRef" for custom filters.
|
||||
//
|
||||
// Implementers are encouraged to define custom implementation types to
|
||||
// extend the core API with implementation-specific behavior.
|
||||
//
|
||||
// If a reference to a custom filter type cannot be resolved, the filter
|
||||
// MUST NOT be skipped. Instead, requests that would have been processed by
|
||||
// that filter MUST receive a HTTP error response.
|
||||
//
|
||||
// +unionDiscriminator
|
||||
// +kubebuilder:validation:Enum=RequestHeaderModifier;RequestMirror;RequestRedirect;ExtensionRef
|
||||
// <gateway:experimental:validation:Enum=RequestHeaderModifier;RequestMirror;RequestRedirect;URLRewrite;ExtensionRef>
|
||||
Type HTTPRouteFilterType `json:"type"`
|
||||
|
||||
// RequestHeaderModifier defines a schema for a filter that modifies request
|
||||
// headers.
|
||||
//
|
||||
// Support: Core
|
||||
//
|
||||
// +optional
|
||||
RequestHeaderModifier *HTTPRequestHeaderFilter `json:"requestHeaderModifier,omitempty"`
|
||||
|
||||
// RequestMirror defines a schema for a filter that mirrors requests.
|
||||
// Requests are sent to the specified destination, but responses from
|
||||
// that destination are ignored.
|
||||
//
|
||||
// Support: Extended
|
||||
//
|
||||
// +optional
|
||||
RequestMirror *HTTPRequestMirrorFilter `json:"requestMirror,omitempty"`
|
||||
|
||||
// RequestRedirect defines a schema for a filter that responds to the
|
||||
// request with an HTTP redirection.
|
||||
//
|
||||
// Support: Core
|
||||
//
|
||||
// +optional
|
||||
RequestRedirect *HTTPRequestRedirectFilter `json:"requestRedirect,omitempty"`
|
||||
|
||||
// URLRewrite defines a schema for a filter that responds to the
|
||||
// request with an HTTP redirection.
|
||||
//
|
||||
// Support: Extended
|
||||
//
|
||||
// <gateway:experimental>
|
||||
// +optional
|
||||
URLRewrite *HTTPURLRewriteFilter `json:"urlRewrite,omitempty"`
|
||||
|
||||
// ExtensionRef is an optional, implementation-specific extension to the
|
||||
// "filter" behavior. For example, resource "myroutefilter" in group
|
||||
// "networking.example.net"). ExtensionRef MUST NOT be used for core and
|
||||
// extended filters.
|
||||
//
|
||||
// Support: Implementation-specific
|
||||
//
|
||||
// +optional
|
||||
ExtensionRef *LocalObjectReference `json:"extensionRef,omitempty"`
|
||||
}
|
||||
|
||||
// HTTPRouteFilterType identifies a type of HTTPRoute filter.
|
||||
type HTTPRouteFilterType string
|
||||
|
||||
const (
|
||||
// HTTPRouteFilterRequestHeaderModifier can be used to add or remove an HTTP
|
||||
// header from an HTTP request before it is sent to the upstream target.
|
||||
//
|
||||
// Support in HTTPRouteRule: Core
|
||||
//
|
||||
// Support in HTTPBackendRef: Extended
|
||||
HTTPRouteFilterRequestHeaderModifier HTTPRouteFilterType = "RequestHeaderModifier"
|
||||
|
||||
// HTTPRouteFilterRequestRedirect can be used to redirect a request to
|
||||
// another location. This filter can also be used for HTTP to HTTPS
|
||||
// redirects. This may not be used on the same Route rule or BackendRef as a
|
||||
// URLRewrite filter.
|
||||
//
|
||||
// Support in HTTPRouteRule: Core
|
||||
//
|
||||
// Support in HTTPBackendRef: Extended
|
||||
HTTPRouteFilterRequestRedirect HTTPRouteFilterType = "RequestRedirect"
|
||||
|
||||
// HTTPRouteFilterURLRewrite can be used to modify a request during
|
||||
// forwarding. At most one of these filters may be used on a Route rule.
|
||||
// This may not be used on the same Route rule or BackendRef as a
|
||||
// RequestRedirect filter.
|
||||
//
|
||||
// Support in HTTPRouteRule: Extended
|
||||
//
|
||||
// Support in HTTPBackendRef: Extended
|
||||
//
|
||||
// <gateway:experimental>
|
||||
HTTPRouteFilterURLRewrite HTTPRouteFilterType = "URLRewrite"
|
||||
|
||||
// HTTPRouteFilterRequestMirror can be used to mirror HTTP requests to a
|
||||
// different backend. The responses from this backend MUST be ignored by
|
||||
// the Gateway.
|
||||
//
|
||||
// Support in HTTPRouteRule: Extended
|
||||
//
|
||||
// Support in HTTPBackendRef: Extended
|
||||
HTTPRouteFilterRequestMirror HTTPRouteFilterType = "RequestMirror"
|
||||
|
||||
// HTTPRouteFilterExtensionRef should be used for configuring custom
|
||||
// HTTP filters.
|
||||
//
|
||||
// Support in HTTPRouteRule: Custom
|
||||
//
|
||||
// Support in HTTPBackendRef: Custom
|
||||
HTTPRouteFilterExtensionRef HTTPRouteFilterType = "ExtensionRef"
|
||||
)
|
||||
|
||||
// HTTPHeader represents an HTTP Header name and value as defined by RFC 7230.
|
||||
type HTTPHeader struct {
|
||||
// Name is the name of the HTTP Header to be matched. Name matching MUST be
|
||||
// case insensitive. (See https://tools.ietf.org/html/rfc7230#section-3.2).
|
||||
//
|
||||
// If multiple entries specify equivalent header names, the first entry with
|
||||
// an equivalent name MUST be considered for a match. Subsequent entries
|
||||
// with an equivalent header name MUST be ignored. Due to the
|
||||
// case-insensitivity of header names, "foo" and "Foo" are considered
|
||||
// equivalent.
|
||||
Name HTTPHeaderName `json:"name"`
|
||||
|
||||
// Value is the value of HTTP Header to be matched.
|
||||
//
|
||||
// +kubebuilder:validation:MinLength=1
|
||||
// +kubebuilder:validation:MaxLength=4096
|
||||
Value string `json:"value"`
|
||||
}
|
||||
|
||||
// HTTPRequestHeaderFilter defines configuration for the RequestHeaderModifier
|
||||
// filter.
|
||||
type HTTPRequestHeaderFilter struct {
|
||||
// Set overwrites the request with the given header (name, value)
|
||||
// before the action.
|
||||
//
|
||||
// Input:
|
||||
// GET /foo HTTP/1.1
|
||||
// my-header: foo
|
||||
//
|
||||
// Config:
|
||||
// set:
|
||||
// - name: "my-header"
|
||||
// value: "bar"
|
||||
//
|
||||
// Output:
|
||||
// GET /foo HTTP/1.1
|
||||
// my-header: bar
|
||||
//
|
||||
// +optional
|
||||
// +listType=map
|
||||
// +listMapKey=name
|
||||
// +kubebuilder:validation:MaxItems=16
|
||||
Set []HTTPHeader `json:"set,omitempty"`
|
||||
|
||||
// Add adds the given header(s) (name, value) to the request
|
||||
// before the action. It appends to any existing values associated
|
||||
// with the header name.
|
||||
//
|
||||
// Input:
|
||||
// GET /foo HTTP/1.1
|
||||
// my-header: foo
|
||||
//
|
||||
// Config:
|
||||
// add:
|
||||
// - name: "my-header"
|
||||
// value: "bar"
|
||||
//
|
||||
// Output:
|
||||
// GET /foo HTTP/1.1
|
||||
// my-header: foo
|
||||
// my-header: bar
|
||||
//
|
||||
// +optional
|
||||
// +listType=map
|
||||
// +listMapKey=name
|
||||
// +kubebuilder:validation:MaxItems=16
|
||||
Add []HTTPHeader `json:"add,omitempty"`
|
||||
|
||||
// Remove the given header(s) from the HTTP request before the action. The
|
||||
// value of Remove is a list of HTTP header names. Note that the header
|
||||
// names are case-insensitive (see
|
||||
// https://datatracker.ietf.org/doc/html/rfc2616#section-4.2).
|
||||
//
|
||||
// Input:
|
||||
// GET /foo HTTP/1.1
|
||||
// my-header1: foo
|
||||
// my-header2: bar
|
||||
// my-header3: baz
|
||||
//
|
||||
// Config:
|
||||
// remove: ["my-header1", "my-header3"]
|
||||
//
|
||||
// Output:
|
||||
// GET /foo HTTP/1.1
|
||||
// my-header2: bar
|
||||
//
|
||||
// +optional
|
||||
// +kubebuilder:validation:MaxItems=16
|
||||
Remove []string `json:"remove,omitempty"`
|
||||
}
|
||||
|
||||
// HTTPPathModifierType defines the type of path redirect.
|
||||
type HTTPPathModifierType string
|
||||
|
||||
const (
|
||||
// This type of modifier indicates that the complete path will be replaced
|
||||
// by the path redirect value.
|
||||
AbsoluteHTTPPathModifier HTTPPathModifierType = "Absolute"
|
||||
|
||||
// This type of modifier indicates that any prefix path matches will be
|
||||
// replaced by the substitution value. For example, a path with a prefix
|
||||
// match of "/foo" and a ReplacePrefixMatch substitution of "/bar" will have
|
||||
// the "/foo" prefix replaced with "/bar" in matching requests.
|
||||
PrefixMatchHTTPPathModifier HTTPPathModifierType = "ReplacePrefixMatch"
|
||||
)
|
||||
|
||||
// HTTPPathModifier defines configuration for path modifiers.
|
||||
// <gateway:experimental>
|
||||
type HTTPPathModifier struct {
|
||||
// Type defines the type of path modifier.
|
||||
//
|
||||
// <gateway:experimental>
|
||||
// +kubebuilder:validation:Enum=Absolute;ReplacePrefixMatch
|
||||
Type HTTPPathModifierType `json:"type"`
|
||||
|
||||
// Substitution defines the HTTP path value to substitute. An empty value
|
||||
// ("") indicates that the portion of the path to be changed should be
|
||||
// removed from the resulting path. For example, a request to "/foo/bar"
|
||||
// with a prefix match of "/foo" would be modified to "/bar".
|
||||
//
|
||||
// <gateway:experimental>
|
||||
// +kubebuilder:validation:MaxLength=1024
|
||||
Substitution string `json:"substitution"`
|
||||
}
|
||||
|
||||
// HTTPRequestRedirect defines a filter that redirects a request. This filter
|
||||
// MUST not be used on the same Route rule as a HTTPURLRewrite filter.
|
||||
type HTTPRequestRedirectFilter struct {
|
||||
// Scheme is the scheme to be used in the value of the `Location`
|
||||
// header in the response.
|
||||
// When empty, the scheme of the request is used.
|
||||
//
|
||||
// Support: Extended
|
||||
//
|
||||
// +optional
|
||||
// +kubebuilder:validation:Enum=http;https
|
||||
Scheme *string `json:"scheme,omitempty"`
|
||||
|
||||
// Hostname is the hostname to be used in the value of the `Location`
|
||||
// header in the response.
|
||||
// When empty, the hostname of the request is used.
|
||||
//
|
||||
// Support: Core
|
||||
//
|
||||
// +optional
|
||||
Hostname *PreciseHostname `json:"hostname,omitempty"`
|
||||
|
||||
// Path defines parameters used to modify the path of the incoming request.
|
||||
// The modified path is then used to construct the `Location` header. When
|
||||
// empty, the request path is used as-is.
|
||||
//
|
||||
// Support: Extended
|
||||
//
|
||||
// <gateway:experimental>
|
||||
// +optional
|
||||
Path *HTTPPathModifier `json:"path,omitempty"`
|
||||
|
||||
// Port is the port to be used in the value of the `Location`
|
||||
// header in the response.
|
||||
// When empty, port (if specified) of the request is used.
|
||||
//
|
||||
// Support: Extended
|
||||
//
|
||||
// +optional
|
||||
Port *PortNumber `json:"port,omitempty"`
|
||||
|
||||
// StatusCode is the HTTP status code to be used in response.
|
||||
//
|
||||
// Support: Core
|
||||
//
|
||||
// +optional
|
||||
// +kubebuilder:default=302
|
||||
// +kubebuilder:validation:Enum=301;302
|
||||
StatusCode *int `json:"statusCode,omitempty"`
|
||||
}
|
||||
|
||||
// HTTPURLRewriteFilter defines a filter that modifies a request during
|
||||
// forwarding. At most one of these filters may be used on a Route rule. This
|
||||
// may not be used on the same Route rule as a HTTPRequestRedirect filter.
|
||||
//
|
||||
// <gateway:experimental>
|
||||
// Support: Extended
|
||||
type HTTPURLRewriteFilter struct {
|
||||
// Hostname is the value to be used to replace the Host header value during
|
||||
// forwarding.
|
||||
//
|
||||
// Support: Extended
|
||||
//
|
||||
// <gateway:experimental>
|
||||
// +optional
|
||||
Hostname *Hostname `json:"hostname,omitempty"`
|
||||
|
||||
// Path defines a path rewrite.
|
||||
//
|
||||
// Support: Extended
|
||||
//
|
||||
// <gateway:experimental>
|
||||
// +optional
|
||||
Path *HTTPPathModifier `json:"path,omitempty"`
|
||||
}
|
||||
|
||||
// HTTPRequestMirrorFilter defines configuration for the RequestMirror filter.
|
||||
type HTTPRequestMirrorFilter struct {
|
||||
// BackendRef references a resource where mirrored requests are sent.
|
||||
//
|
||||
// If the referent cannot be found, this BackendRef is invalid and must be
|
||||
// dropped from the Gateway. The controller must ensure the "ResolvedRefs"
|
||||
// condition on the Route status is set to `status: False` and not configure
|
||||
// this backend in the underlying implementation.
|
||||
//
|
||||
// If there is a cross-namespace reference to an *existing* object
|
||||
// that is not allowed by a ReferencePolicy, the controller must ensure the
|
||||
// "ResolvedRefs" condition on the Route is set to `status: False`,
|
||||
// with the "RefNotPermitted" reason and not configure this backend in the
|
||||
// underlying implementation.
|
||||
//
|
||||
// In either error case, the Message of the `ResolvedRefs` Condition
|
||||
// should be used to provide more detail about the problem.
|
||||
//
|
||||
// Support: Extended for Kubernetes Service
|
||||
// Support: Custom for any other resource
|
||||
BackendRef BackendObjectReference `json:"backendRef"`
|
||||
}
|
||||
|
||||
// HTTPBackendRef defines how a HTTPRoute should forward an HTTP request.
|
||||
type HTTPBackendRef struct {
|
||||
// BackendRef is a reference to a backend to forward matched requests to.
|
||||
//
|
||||
// If the referent cannot be found, this HTTPBackendRef is invalid and must
|
||||
// be dropped from the Gateway. The controller must ensure the
|
||||
// "ResolvedRefs" condition on the Route is set to `status: False` and not
|
||||
// configure this backend in the underlying implementation.
|
||||
//
|
||||
// If there is a cross-namespace reference to an *existing* object
|
||||
// that is not covered by a ReferencePolicy, the controller must ensure the
|
||||
// "ResolvedRefs" condition on the Route is set to `status: False`,
|
||||
// with the "RefNotPermitted" reason and not configure this backend in the
|
||||
// underlying implementation.
|
||||
//
|
||||
// In either error case, the Message of the `ResolvedRefs` Condition
|
||||
// should be used to provide more detail about the problem.
|
||||
//
|
||||
// Support: Custom
|
||||
//
|
||||
// +optional
|
||||
BackendRef `json:",inline"`
|
||||
|
||||
// Filters defined at this level should be executed if and only if the
|
||||
// request is being forwarded to the backend defined here.
|
||||
//
|
||||
// Support: Custom (For broader support of filters, use the Filters field
|
||||
// in HTTPRouteRule.)
|
||||
//
|
||||
// +optional
|
||||
// +kubebuilder:validation:MaxItems=16
|
||||
Filters []HTTPRouteFilter `json:"filters,omitempty"`
|
||||
}
|
||||
|
||||
// HTTPRouteStatus defines the observed state of HTTPRoute.
|
||||
type HTTPRouteStatus struct {
|
||||
RouteStatus `json:",inline"`
|
||||
}
|
||||
|
||||
// Hostname is the fully qualified domain name of a network host. This matches
|
||||
// the RFC 1123 definition of a hostname with 2 notable exceptions:
|
||||
//
|
||||
// 1. IPs are not allowed.
|
||||
// 2. A hostname may be prefixed with a wildcard label (`*.`). The wildcard
|
||||
// label must appear by itself as the first label.
|
||||
//
|
||||
// Hostname can be "precise" which is a domain name without the terminating
|
||||
// dot of a network host (e.g. "foo.example.com") or "wildcard", which is a
|
||||
// domain name prefixed with a single wildcard label (e.g. `*.example.com`).
|
||||
//
|
||||
// Note that as per RFC1035 and RFC1123, a *label* must consist of lower case
|
||||
// alphanumeric characters or '-', and must start and end with an alphanumeric
|
||||
// character. No other punctuation is allowed.
|
||||
//
|
||||
// +kubebuilder:validation:MinLength=1
|
||||
// +kubebuilder:validation:MaxLength=253
|
||||
// +kubebuilder:validation:Pattern=`^(\*\.)?[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$`
|
||||
type Hostname string
|
||||
|
||||
// PortNumber defines a network port.
|
||||
//
|
||||
// +kubebuilder:validation:Minimum=1
|
||||
// +kubebuilder:validation:Maximum=65535
|
||||
type PortNumber int32
|
||||
131
pkg/apis/gatewayapi/v1alpha2/object_reference_types.go
Normal file
131
pkg/apis/gatewayapi/v1alpha2/object_reference_types.go
Normal file
@@ -0,0 +1,131 @@
|
||||
/*
|
||||
Copyright 2020 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package v1alpha2
|
||||
|
||||
// LocalObjectReference identifies an API object within the namespace of the
|
||||
// referrer.
|
||||
// The API object must be valid in the cluster; the Group and Kind must
|
||||
// be registered in the cluster for this reference to be valid.
|
||||
//
|
||||
// References to objects with invalid Group and Kind are not valid, and must
|
||||
// be rejected by the implementation, with appropriate Conditions set
|
||||
// on the containing object.
|
||||
type LocalObjectReference struct {
|
||||
// Group is the group of the referent. For example, "networking.k8s.io".
|
||||
// When unspecified (empty string), core API group is inferred.
|
||||
Group Group `json:"group"`
|
||||
|
||||
// Kind is kind of the referent. For example "HTTPRoute" or "Service".
|
||||
Kind Kind `json:"kind"`
|
||||
|
||||
// Name is the name of the referent.
|
||||
Name ObjectName `json:"name"`
|
||||
}
|
||||
|
||||
// SecretObjectReference identifies an API object including its namespace,
|
||||
// defaulting to Secret.
|
||||
//
|
||||
// The API object must be valid in the cluster; the Group and Kind must
|
||||
// be registered in the cluster for this reference to be valid.
|
||||
//
|
||||
// References to objects with invalid Group and Kind are not valid, and must
|
||||
// be rejected by the implementation, with appropriate Conditions set
|
||||
// on the containing object.
|
||||
type SecretObjectReference struct {
|
||||
// Group is the group of the referent. For example, "networking.k8s.io".
|
||||
// When unspecified (empty string), core API group is inferred.
|
||||
//
|
||||
// +optional
|
||||
// +kubebuilder:default=""
|
||||
Group *Group `json:"group"`
|
||||
|
||||
// Kind is kind of the referent. For example "HTTPRoute" or "Service".
|
||||
//
|
||||
// +optional
|
||||
// +kubebuilder:default=Secret
|
||||
Kind *Kind `json:"kind"`
|
||||
|
||||
// Name is the name of the referent.
|
||||
Name ObjectName `json:"name"`
|
||||
|
||||
// Namespace is the namespace of the backend. When unspecified, the local
|
||||
// namespace is inferred.
|
||||
//
|
||||
// Note that when a namespace is specified, a ReferencePolicy object
|
||||
// is required in the referent namespace to allow that namespace's
|
||||
// owner to accept the reference. See the ReferencePolicy documentation
|
||||
// for details.
|
||||
//
|
||||
// Support: Core
|
||||
//
|
||||
// +optional
|
||||
Namespace *Namespace `json:"namespace,omitempty"`
|
||||
}
|
||||
|
||||
// BackendObjectReference defines how an ObjectReference that is
|
||||
// specific to BackendRef. It includes a few additional fields and features
|
||||
// than a regular ObjectReference.
|
||||
//
|
||||
// Note that when a namespace is specified, a ReferencePolicy object
|
||||
// is required in the referent namespace to allow that namespace's
|
||||
// owner to accept the reference. See the ReferencePolicy documentation
|
||||
// for details.
|
||||
//
|
||||
// The API object must be valid in the cluster; the Group and Kind must
|
||||
// be registered in the cluster for this reference to be valid.
|
||||
//
|
||||
// References to objects with invalid Group and Kind are not valid, and must
|
||||
// be rejected by the implementation, with appropriate Conditions set
|
||||
// on the containing object.
|
||||
type BackendObjectReference struct {
|
||||
// Group is the group of the referent. For example, "networking.k8s.io".
|
||||
// When unspecified (empty string), core API group is inferred.
|
||||
//
|
||||
// +optional
|
||||
// +kubebuilder:default=""
|
||||
Group *Group `json:"group,omitempty"`
|
||||
|
||||
// Kind is kind of the referent. For example "HTTPRoute" or "Service".
|
||||
//
|
||||
// +optional
|
||||
// +kubebuilder:default=Service
|
||||
Kind *Kind `json:"kind,omitempty"`
|
||||
|
||||
// Name is the name of the referent.
|
||||
Name ObjectName `json:"name"`
|
||||
|
||||
// Namespace is the namespace of the backend. When unspecified, the local
|
||||
// namespace is inferred.
|
||||
//
|
||||
// Note that when a namespace is specified, a ReferencePolicy object
|
||||
// is required in the referent namespace to allow that namespace's
|
||||
// owner to accept the reference. See the ReferencePolicy documentation
|
||||
// for details.
|
||||
//
|
||||
// Support: Core
|
||||
//
|
||||
// +optional
|
||||
Namespace *Namespace `json:"namespace,omitempty"`
|
||||
|
||||
// Port specifies the destination port number to use for this resource.
|
||||
// Port is required when the referent is a Kubernetes Service.
|
||||
// For other resources, destination port might be derived from the referent
|
||||
// resource or this field.
|
||||
//
|
||||
// +optional
|
||||
Port *PortNumber `json:"port,omitempty"`
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user