mirror of
https://github.com/fluxcd/flagger.git
synced 2026-04-15 06:57:34 +00:00
Compare commits
294 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
1e5d83ad21 | ||
|
|
68f0920548 | ||
|
|
0d25d84230 | ||
|
|
15a6f742e0 | ||
|
|
495a5b24f4 | ||
|
|
956daea9dd | ||
|
|
7b17286b96 | ||
|
|
e535b01de1 | ||
|
|
d151a1b5e4 | ||
|
|
7242fa7d5c | ||
|
|
9d4ebd9ddd | ||
|
|
b2e713dbc1 | ||
|
|
8bcc7bf9af | ||
|
|
3078f96830 | ||
|
|
8708e35287 | ||
|
|
a8b96f053d | ||
|
|
a487357bd5 | ||
|
|
e8aba087ac | ||
|
|
5b7a679944 | ||
|
|
8229852585 | ||
|
|
f1def19f25 | ||
|
|
44363d5d99 | ||
|
|
f3f62667bf | ||
|
|
3d8615735b | ||
|
|
d1b6b36bcd | ||
|
|
e4755a4567 | ||
|
|
2ced721cf1 | ||
|
|
cf267d0bbd | ||
|
|
49d59f3b45 | ||
|
|
699ea2b8aa | ||
|
|
064d867510 | ||
|
|
5f63c4ae63 | ||
|
|
4ebb38743d | ||
|
|
01a7f3606c | ||
|
|
699c577fa6 | ||
|
|
6879038a63 | ||
|
|
cc2f9456cf | ||
|
|
7994989b29 | ||
|
|
1206132e0c | ||
|
|
74cfbda40c | ||
|
|
1266ff48d8 | ||
|
|
b1315679b8 | ||
|
|
859fb7e160 | ||
|
|
32077636ff | ||
|
|
e263d6a169 | ||
|
|
8d517799b5 | ||
|
|
a89cd6d3ba | ||
|
|
4c2de0c716 | ||
|
|
059a304a07 | ||
|
|
317d53a71f | ||
|
|
202b6e7eb1 | ||
|
|
e59e3aedd4 | ||
|
|
2b45c2013c | ||
|
|
6786668684 | ||
|
|
27eb21ecc8 | ||
|
|
e7d8adecb4 | ||
|
|
aa574d469e | ||
|
|
5ba20c254a | ||
|
|
bb2cf39393 | ||
|
|
05d08d3ff1 | ||
|
|
7ec3774172 | ||
|
|
e9ffef29f6 | ||
|
|
64035b4942 | ||
|
|
925cc37c8f | ||
|
|
1574e29376 | ||
|
|
4a34158587 | ||
|
|
534196adde | ||
|
|
57bf2ab7d1 | ||
|
|
c65d072249 | ||
|
|
a50d7de86d | ||
|
|
e365c21322 | ||
|
|
7ce679678f | ||
|
|
685c816a12 | ||
|
|
5d3ab056f0 | ||
|
|
2587a3d3f1 | ||
|
|
58270dd4b9 | ||
|
|
86081708a4 | ||
|
|
686e8a3e8b | ||
|
|
0aecddb00e | ||
|
|
26518cecbf | ||
|
|
9d1db87592 | ||
|
|
e352010bfd | ||
|
|
58267752b1 | ||
|
|
2dd48c6e79 | ||
|
|
6c29c21184 | ||
|
|
85fe251991 | ||
|
|
69861e0c8a | ||
|
|
e440be17ae | ||
|
|
ce52408bbc | ||
|
|
badf7b9a4f | ||
|
|
3e9fe97ba3 | ||
|
|
ec7066b31b | ||
|
|
ec74dc5a33 | ||
|
|
cbdc2c5a7c | ||
|
|
228fbeeda4 | ||
|
|
b0d9825afa | ||
|
|
7509264d73 | ||
|
|
f08725d661 | ||
|
|
f2a9a8d645 | ||
|
|
e799030ae3 | ||
|
|
ec0657f436 | ||
|
|
fce46e26d4 | ||
|
|
e015a409fe | ||
|
|
82ff90ce26 | ||
|
|
63edc627ad | ||
|
|
c2b4287ce1 | ||
|
|
5b61f15f95 | ||
|
|
9c815f2252 | ||
|
|
d16c9696c3 | ||
|
|
14ccda5506 | ||
|
|
a496b99d6e | ||
|
|
882286dce7 | ||
|
|
8aa9ca92e3 | ||
|
|
52dd8f8c14 | ||
|
|
4d28b9074b | ||
|
|
381c19b952 | ||
|
|
50f9255af2 | ||
|
|
a7df3457ad | ||
|
|
647f624554 | ||
|
|
3d3e051f03 | ||
|
|
4c0b2beb63 | ||
|
|
ec44f64465 | ||
|
|
19d4e521a3 | ||
|
|
85a3b7c388 | ||
|
|
26ec719c67 | ||
|
|
66364bb2c9 | ||
|
|
f9f8d7e71e | ||
|
|
bdbd1fb1f0 | ||
|
|
b3112a53f1 | ||
|
|
f1f4e68673 | ||
|
|
9b56445621 | ||
|
|
f5f3d92d3d | ||
|
|
4d074799ca | ||
|
|
d38a2406a7 | ||
|
|
25ccfca835 | ||
|
|
487b6566ee | ||
|
|
14caeb12ad | ||
|
|
cf8fcd0539 | ||
|
|
d8387a351e | ||
|
|
300cd24493 | ||
|
|
fb66d24f89 | ||
|
|
f1fc8c067e | ||
|
|
da1ee05c0a | ||
|
|
57099ecd43 | ||
|
|
8c5b41bbe6 | ||
|
|
7bc716508c | ||
|
|
d82d9765e1 | ||
|
|
74e570c198 | ||
|
|
6adf51083e | ||
|
|
a5be82a7d3 | ||
|
|
83693668ed | ||
|
|
c2929694a6 | ||
|
|
82db9ff213 | ||
|
|
5e853bb589 | ||
|
|
9e1fad3947 | ||
|
|
a4f5a983ba | ||
|
|
08d7520458 | ||
|
|
283de16660 | ||
|
|
5e47ae287b | ||
|
|
e7e155048d | ||
|
|
8197073cf0 | ||
|
|
310111bb8d | ||
|
|
3dd667f3b3 | ||
|
|
e06334cd12 | ||
|
|
8d8b99dc78 | ||
|
|
3418488902 | ||
|
|
b96f6f0920 | ||
|
|
e593f2e258 | ||
|
|
7b6c37ea1f | ||
|
|
4dbeec02c8 | ||
|
|
1b2df99799 | ||
|
|
6d72050e81 | ||
|
|
b97a87a1b4 | ||
|
|
89b0487376 | ||
|
|
0ae53e415c | ||
|
|
915c200c7b | ||
|
|
a4941bd764 | ||
|
|
5123cbae00 | ||
|
|
135f96d507 | ||
|
|
aa08ea9160 | ||
|
|
fb80eea144 | ||
|
|
bebcf1c7d4 | ||
|
|
f39f0ef101 | ||
|
|
f2f4c8397d | ||
|
|
ae4613fa76 | ||
|
|
8b1155123d | ||
|
|
e65dfbb659 | ||
|
|
fe37bdd9c7 | ||
|
|
f449ee1878 | ||
|
|
47b6807471 | ||
|
|
f93708e90f | ||
|
|
5285b76746 | ||
|
|
1a4d8b965a | ||
|
|
11209fe05d | ||
|
|
09c1eec8f3 | ||
|
|
d3373447c3 | ||
|
|
d4e54fe966 | ||
|
|
a5c284cabb | ||
|
|
80bae41df4 | ||
|
|
f5c267144e | ||
|
|
25a33fe58f | ||
|
|
bab12dc99b | ||
|
|
1abb1f16d4 | ||
|
|
7cf843d6f4 | ||
|
|
a8444a6328 | ||
|
|
ca044d3577 | ||
|
|
76bac5d971 | ||
|
|
f68f291b3d | ||
|
|
b108672fad | ||
|
|
377a8f48e2 | ||
|
|
a098d04d64 | ||
|
|
5e4b70bd51 | ||
|
|
9ce931abb4 | ||
|
|
072d9b9850 | ||
|
|
1bb4afaeac | ||
|
|
4dd6102a0f | ||
|
|
4f64377480 | ||
|
|
31856a2f46 | ||
|
|
358391bfde | ||
|
|
7b2c005d9b | ||
|
|
c31ef8a788 | ||
|
|
e1bd004683 | ||
|
|
0cecab530f | ||
|
|
844090f842 | ||
|
|
aa48ad45b7 | ||
|
|
1967e4857b | ||
|
|
21923d6f87 | ||
|
|
a5912ccd89 | ||
|
|
e4252d8cbd | ||
|
|
b01e4cf9ec | ||
|
|
703cfd50b2 | ||
|
|
6a1b765a77 | ||
|
|
b2dc762937 | ||
|
|
498f065dea | ||
|
|
9d8941176b | ||
|
|
4d2a03c0b2 | ||
|
|
e0e2d5c0e6 | ||
|
|
9b97bff7b1 | ||
|
|
f23be1d0ec | ||
|
|
fa595e160c | ||
|
|
4ea5a48f43 | ||
|
|
6dd8a755c8 | ||
|
|
063d38dbd2 | ||
|
|
165c953239 | ||
|
|
a0fae153cf | ||
|
|
bfcf288561 | ||
|
|
560f884cc0 | ||
|
|
d79898848e | ||
|
|
c03d138cd0 | ||
|
|
22d192e7e3 | ||
|
|
a4babd6fc4 | ||
|
|
edd5515bd7 | ||
|
|
00dde2358a | ||
|
|
8e84262a32 | ||
|
|
541696f3f7 | ||
|
|
8051d03f08 | ||
|
|
a78d273aeb | ||
|
|
07bd3563cd | ||
|
|
8c690d1b21 | ||
|
|
a8b4e9cc6d | ||
|
|
30ed9fb75c | ||
|
|
0382d9c1ca | ||
|
|
95381e1892 | ||
|
|
7df1beef85 | ||
|
|
a1e519b352 | ||
|
|
e7f16a8c06 | ||
|
|
a3adae4af0 | ||
|
|
c7c0c76bd3 | ||
|
|
67cc965d31 | ||
|
|
d09969e3b4 | ||
|
|
41904b42f8 | ||
|
|
f638410782 | ||
|
|
48cc7995d7 | ||
|
|
793b93c665 | ||
|
|
e0186cbe2a | ||
|
|
2cc2b5dce8 | ||
|
|
ccdbbdb0ec | ||
|
|
13483321ac | ||
|
|
5547533197 | ||
|
|
c68998d75e | ||
|
|
20f2d3f2f9 | ||
|
|
cc7b35b44a | ||
|
|
67a2cd6a48 | ||
|
|
08deddc4fe | ||
|
|
77b2eb36a5 | ||
|
|
ab84ac207a | ||
|
|
8957d91e01 | ||
|
|
c7cbb729b7 | ||
|
|
eca6fa7958 | ||
|
|
ee535afcb9 | ||
|
|
18b64910d7 | ||
|
|
3ca75140d0 | ||
|
|
960f924448 | ||
|
|
eed128a8b4 |
3
.clomonitor.yml
Normal file
3
.clomonitor.yml
Normal file
@@ -0,0 +1,3 @@
|
||||
exemptions:
|
||||
- check: analytics
|
||||
reason: "We don't track people"
|
||||
@@ -1,50 +0,0 @@
|
||||
# Flagger signed releases
|
||||
|
||||
Flagger releases published to GitHub Container Registry as multi-arch container images
|
||||
are signed using [cosign](https://github.com/sigstore/cosign).
|
||||
|
||||
## Verify Flagger images
|
||||
|
||||
Install the [cosign](https://github.com/sigstore/cosign) CLI:
|
||||
|
||||
```sh
|
||||
brew install sigstore/tap/cosign
|
||||
```
|
||||
|
||||
Verify a Flagger release with cosign CLI:
|
||||
|
||||
```sh
|
||||
cosign verify -key https://raw.githubusercontent.com/fluxcd/flagger/main/cosign/cosign.pub \
|
||||
ghcr.io/fluxcd/flagger:1.13.0
|
||||
```
|
||||
|
||||
Verify Flagger images before they get pulled on your Kubernetes clusters with [Kyverno](https://github.com/kyverno/kyverno/):
|
||||
|
||||
```yaml
|
||||
apiVersion: kyverno.io/v1
|
||||
kind: ClusterPolicy
|
||||
metadata:
|
||||
name: verify-flagger-image
|
||||
annotations:
|
||||
policies.kyverno.io/title: Verify Flagger Image
|
||||
policies.kyverno.io/category: Cosign
|
||||
policies.kyverno.io/severity: medium
|
||||
policies.kyverno.io/subject: Pod
|
||||
policies.kyverno.io/minversion: 1.4.2
|
||||
spec:
|
||||
validationFailureAction: enforce
|
||||
background: false
|
||||
rules:
|
||||
- name: verify-image
|
||||
match:
|
||||
resources:
|
||||
kinds:
|
||||
- Pod
|
||||
verifyImages:
|
||||
- image: "ghcr.io/fluxcd/flagger:*"
|
||||
key: |-
|
||||
-----BEGIN PUBLIC KEY-----
|
||||
MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEST+BqQ1XZhhVYx0YWQjdUJYIG5Lt
|
||||
iz2+UxRIqmKBqNmce2T+l45qyqOs99qfD7gLNGmkVZ4vtJ9bM7FxChFczg==
|
||||
-----END PUBLIC KEY-----
|
||||
```
|
||||
@@ -1,11 +0,0 @@
|
||||
-----BEGIN ENCRYPTED COSIGN PRIVATE KEY-----
|
||||
eyJrZGYiOnsibmFtZSI6InNjcnlwdCIsInBhcmFtcyI6eyJOIjozMjc2OCwiciI6
|
||||
OCwicCI6MX0sInNhbHQiOiIvK1MwbTNrU3pGMFFXdVVYQkFoY2gvTDc3NVJBSy9O
|
||||
cnkzUC9iMkxBZGF3PSJ9LCJjaXBoZXIiOnsibmFtZSI6Im5hY2wvc2VjcmV0Ym94
|
||||
Iiwibm9uY2UiOiJBNEFYL2IyU1BsMDBuY3JUNk45QkNOb0VLZTZLZEluRCJ9LCJj
|
||||
aXBoZXJ0ZXh0IjoiZ054UlJweXpraWtRMUVaRldsSnEvQXVUWTl0Vis2enBlWkIy
|
||||
dUFHREMzOVhUQlAwaWY5YStaZTE1V0NTT2FQZ01XQmtSZWhrQVVjQ3dZOGF2WTZa
|
||||
eFhZWWE3T1B4eFdidHJuSUVZM2hwZUk1M1dVQVZ6SXEzQjl0N0ZmV1JlVGsxdFlo
|
||||
b3hwQmxUSHY4U0c2azdPYk1aQnJleitzSGRWclF6YUdMdG12V1FOMTNZazRNb25i
|
||||
ZUpRSUJpUXFQTFg5NzFhSUlxU0dxYVhCanc9PSJ9
|
||||
-----END ENCRYPTED COSIGN PRIVATE KEY-----
|
||||
@@ -1,4 +0,0 @@
|
||||
-----BEGIN PUBLIC KEY-----
|
||||
MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEST+BqQ1XZhhVYx0YWQjdUJYIG5Lt
|
||||
iz2+UxRIqmKBqNmce2T+l45qyqOs99qfD7gLNGmkVZ4vtJ9bM7FxChFczg==
|
||||
-----END PUBLIC KEY-----
|
||||
@@ -16,3 +16,4 @@ redirects:
|
||||
usage/osm-progressive-delivery: tutorials/osm-progressive-delivery.md
|
||||
usage/kuma-progressive-delivery: tutorials/kuma-progressive-delivery.md
|
||||
usage/gatewayapi-progressive-delivery: tutorials/gatewayapi-progressive-delivery.md
|
||||
usage/apisix-progressive-delivery: tutorials/apisix-progressive-delivery.md
|
||||
|
||||
7
.github/dependabot.yml
vendored
Normal file
7
.github/dependabot.yml
vendored
Normal file
@@ -0,0 +1,7 @@
|
||||
version: 2
|
||||
|
||||
updates:
|
||||
- package-ecosystem: "github-actions"
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: "weekly"
|
||||
16
.github/workflows/build.yaml
vendored
16
.github/workflows/build.yaml
vendored
@@ -10,31 +10,33 @@ on:
|
||||
- main
|
||||
|
||||
permissions:
|
||||
contents: read # for actions/checkout to fetch code
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
container:
|
||||
build-flagger:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v3
|
||||
- name: Restore Go cache
|
||||
uses: actions/cache@v1
|
||||
uses: actions/cache@v3.3.1
|
||||
with:
|
||||
path: ~/go/pkg/mod
|
||||
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-go-
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@v2
|
||||
uses: actions/setup-go@v4
|
||||
with:
|
||||
go-version: 1.17.x
|
||||
go-version: 1.19.x
|
||||
- name: Download modules
|
||||
run: |
|
||||
go mod download
|
||||
go install golang.org/x/tools/cmd/goimports
|
||||
- name: Run linters
|
||||
run: make test-fmt test-codegen
|
||||
- name: Verify CRDs
|
||||
run: make verify-crd
|
||||
- name: Run tests
|
||||
run: go test -race -coverprofile=coverage.txt -covermode=atomic $(go list ./pkg/...)
|
||||
- name: Check if working tree is dirty
|
||||
@@ -45,7 +47,7 @@ jobs:
|
||||
exit 1
|
||||
fi
|
||||
- name: Upload coverage to Codecov
|
||||
uses: codecov/codecov-action@v1
|
||||
uses: codecov/codecov-action@v3
|
||||
with:
|
||||
file: ./coverage.txt
|
||||
- name: Build container image
|
||||
|
||||
16
.github/workflows/e2e.yaml
vendored
16
.github/workflows/e2e.yaml
vendored
@@ -10,10 +10,10 @@ on:
|
||||
- main
|
||||
|
||||
permissions:
|
||||
contents: read # for actions/checkout to fetch code
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
kind:
|
||||
e2e-test:
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
fail-fast: false
|
||||
@@ -22,7 +22,6 @@ jobs:
|
||||
# service mesh
|
||||
- istio
|
||||
- linkerd
|
||||
- osm
|
||||
- kuma
|
||||
# ingress controllers
|
||||
- contour
|
||||
@@ -32,14 +31,17 @@ jobs:
|
||||
- skipper
|
||||
- kubernetes
|
||||
- gatewayapi
|
||||
- keda
|
||||
- apisix
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v3
|
||||
- name: Setup Kubernetes
|
||||
uses: engineerd/setup-kind@v0.5.0
|
||||
uses: helm/kind-action@v1.5.0
|
||||
with:
|
||||
version: "v0.11.1"
|
||||
image: kindest/node:v1.21.1@sha256:fae9a58f17f18f06aeac9772ca8b5ac680ebbed985e266f711d936e91d113bad
|
||||
version: v0.18.0
|
||||
cluster_name: kind
|
||||
node_image: kindest/node:v1.24.12@sha256:0bdca26bd7fe65c823640b14253ea7bac4baad9336b332c94850f84d8102f873
|
||||
- name: Build container image
|
||||
run: |
|
||||
docker build -t test/flagger:latest .
|
||||
|
||||
10
.github/workflows/helm.yaml
vendored
10
.github/workflows/helm.yaml
vendored
@@ -4,15 +4,17 @@ on:
|
||||
workflow_dispatch:
|
||||
|
||||
permissions:
|
||||
contents: write # needed to push chart
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
build-push:
|
||||
release-charts:
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: write
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/checkout@v3
|
||||
- name: Publish Helm charts
|
||||
uses: stefanprodan/helm-gh-pages@v1.3.0
|
||||
uses: stefanprodan/helm-gh-pages@v1.7.0
|
||||
with:
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
charts_url: https://flagger.app
|
||||
|
||||
31
.github/workflows/push-ld.yml
vendored
31
.github/workflows/push-ld.yml
vendored
@@ -6,41 +6,44 @@ env:
|
||||
IMAGE: "ghcr.io/fluxcd/flagger-loadtester"
|
||||
|
||||
permissions:
|
||||
contents: write # needed to write releases
|
||||
packages: write # needed for ghcr access
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
build-push:
|
||||
release-load-tester:
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
id-token: write
|
||||
packages: write
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/checkout@v3
|
||||
- uses: sigstore/cosign-installer@v2.8.1
|
||||
- name: Prepare
|
||||
id: prep
|
||||
run: |
|
||||
VERSION=$(grep 'VERSION' cmd/loadtester/main.go | head -1 | awk '{ print $4 }' | tr -d '"')
|
||||
echo ::set-output name=BUILD_DATE::$(date -u +'%Y-%m-%dT%H:%M:%SZ')
|
||||
echo ::set-output name=VERSION::${VERSION}
|
||||
echo "BUILD_DATE=$(date -u +'%Y-%m-%dT%H:%M:%SZ')" >> $GITHUB_OUTPUT
|
||||
echo "VERSION=${VERSION}" >> $GITHUB_OUTPUT
|
||||
- name: Setup QEMU
|
||||
uses: docker/setup-qemu-action@v1
|
||||
uses: docker/setup-qemu-action@v2
|
||||
- name: Setup Docker Buildx
|
||||
id: buildx
|
||||
uses: docker/setup-buildx-action@v1
|
||||
uses: docker/setup-buildx-action@v2
|
||||
- name: Login to GitHub Container Registry
|
||||
uses: docker/login-action@v1
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: fluxcdbot
|
||||
password: ${{ secrets.GHCR_TOKEN }}
|
||||
- name: Generate image meta
|
||||
id: meta
|
||||
uses: docker/metadata-action@v3
|
||||
uses: docker/metadata-action@v4
|
||||
with:
|
||||
images: |
|
||||
${{ env.IMAGE }}
|
||||
tags: |
|
||||
type=raw,value=${{ steps.prep.outputs.VERSION }}
|
||||
- name: Publish image
|
||||
uses: docker/build-push-action@v2
|
||||
uses: docker/build-push-action@v4
|
||||
with:
|
||||
push: true
|
||||
builder: ${{ steps.buildx.outputs.name }}
|
||||
@@ -51,6 +54,8 @@ jobs:
|
||||
REVISION=${{ github.sha }}
|
||||
tags: ${{ steps.meta.outputs.tags }}
|
||||
labels: ${{ steps.meta.outputs.labels }}
|
||||
- name: Check images
|
||||
- name: Sign image
|
||||
env:
|
||||
COSIGN_EXPERIMENTAL: 1
|
||||
run: |
|
||||
docker buildx imagetools inspect ${{ env.IMAGE }}:${{ steps.prep.outputs.VERSION }}
|
||||
cosign sign ${{ env.IMAGE }}:${{ steps.prep.outputs.VERSION }}
|
||||
|
||||
84
.github/workflows/release.yml
vendored
84
.github/workflows/release.yml
vendored
@@ -3,51 +3,66 @@ on:
|
||||
push:
|
||||
tags:
|
||||
- 'v*'
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
tag:
|
||||
description: 'image tag prefix'
|
||||
default: 'rc'
|
||||
required: true
|
||||
|
||||
permissions:
|
||||
contents: write # needed to write releases
|
||||
id-token: write # needed for keyless signing
|
||||
packages: write # needed for ghcr access
|
||||
contents: read
|
||||
|
||||
env:
|
||||
IMAGE: "ghcr.io/fluxcd/${{ github.event.repository.name }}"
|
||||
|
||||
jobs:
|
||||
build-push:
|
||||
release-flagger:
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: write # needed to write releases
|
||||
id-token: write # needed for keyless signing
|
||||
packages: write # needed for ghcr access
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: sigstore/cosign-installer@main
|
||||
- uses: actions/checkout@v3
|
||||
- uses: fluxcd/flux2/action@main
|
||||
- uses: sigstore/cosign-installer@v2.8.1
|
||||
- name: Prepare
|
||||
id: prep
|
||||
run: |
|
||||
VERSION=$(grep 'VERSION' pkg/version/version.go | awk '{ print $4 }' | tr -d '"')
|
||||
if [[ ${GITHUB_EVENT_NAME} = "workflow_dispatch" ]]; then
|
||||
VERSION="${{ github.event.inputs.tag }}-${GITHUB_SHA::8}"
|
||||
else
|
||||
VERSION=$(grep 'VERSION' pkg/version/version.go | awk '{ print $4 }' | tr -d '"')
|
||||
fi
|
||||
CHANGELOG="https://github.com/fluxcd/flagger/blob/main/CHANGELOG.md#$(echo $VERSION | tr -d '.')"
|
||||
echo "[CHANGELOG](${CHANGELOG})" > notes.md
|
||||
echo ::set-output name=BUILD_DATE::$(date -u +'%Y-%m-%dT%H:%M:%SZ')
|
||||
echo ::set-output name=VERSION::${VERSION}
|
||||
echo "BUILD_DATE=$(date -u +'%Y-%m-%dT%H:%M:%SZ')" >> $GITHUB_OUTPUT
|
||||
echo "VERSION=${VERSION}" >> $GITHUB_OUTPUT
|
||||
- name: Setup QEMU
|
||||
uses: docker/setup-qemu-action@v1
|
||||
uses: docker/setup-qemu-action@v2
|
||||
- name: Setup Docker Buildx
|
||||
id: buildx
|
||||
uses: docker/setup-buildx-action@v1
|
||||
uses: docker/setup-buildx-action@v2
|
||||
- name: Login to GitHub Container Registry
|
||||
uses: docker/login-action@v1
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: fluxcdbot
|
||||
password: ${{ secrets.GHCR_TOKEN }}
|
||||
- name: Generate image meta
|
||||
id: meta
|
||||
uses: docker/metadata-action@v3
|
||||
uses: docker/metadata-action@v4
|
||||
with:
|
||||
images: |
|
||||
${{ env.IMAGE }}
|
||||
tags: |
|
||||
type=raw,value=${{ steps.prep.outputs.VERSION }}
|
||||
- name: Publish image
|
||||
uses: docker/build-push-action@v2
|
||||
uses: docker/build-push-action@v4
|
||||
with:
|
||||
sbom: true
|
||||
provenance: true
|
||||
push: true
|
||||
builder: ${{ steps.buildx.outputs.name }}
|
||||
context: .
|
||||
@@ -58,26 +73,43 @@ jobs:
|
||||
tags: ${{ steps.meta.outputs.tags }}
|
||||
labels: ${{ steps.meta.outputs.labels }}
|
||||
- name: Sign image
|
||||
env:
|
||||
COSIGN_EXPERIMENTAL: 1
|
||||
run: |
|
||||
echo -n "${{secrets.COSIGN_PASSWORD}}" | \
|
||||
cosign sign -key ./.cosign/cosign.key -a git_sha=$GITHUB_SHA \
|
||||
${{ env.IMAGE }}:${{ steps.prep.outputs.VERSION }}
|
||||
- name: Check images
|
||||
run: |
|
||||
docker buildx imagetools inspect ${{ env.IMAGE }}:${{ steps.prep.outputs.VERSION }}
|
||||
- name: Verifiy image signature
|
||||
run: |
|
||||
cosign verify -key ./.cosign/cosign.pub \
|
||||
${{ env.IMAGE }}:${{ steps.prep.outputs.VERSION }}
|
||||
cosign sign ${{ env.IMAGE }}:${{ steps.prep.outputs.VERSION }}
|
||||
- name: Publish Helm charts
|
||||
uses: stefanprodan/helm-gh-pages@v1.3.0
|
||||
if: startsWith(github.ref, 'refs/tags/v')
|
||||
uses: stefanprodan/helm-gh-pages@v1.7.0
|
||||
with:
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
charts_url: https://flagger.app
|
||||
linting: off
|
||||
- uses: fluxcd/pkg/actions/helm@main
|
||||
with:
|
||||
version: 3.10.1
|
||||
- name: Publish signed Helm chart to GHCR
|
||||
if: startsWith(github.ref, 'refs/tags/v')
|
||||
env:
|
||||
COSIGN_EXPERIMENTAL: 1
|
||||
run: |
|
||||
helm package charts/flagger
|
||||
helm push flagger-${{ steps.prep.outputs.VERSION }}.tgz oci://ghcr.io/fluxcd/charts
|
||||
cosign sign ghcr.io/fluxcd/charts/flagger:${{ steps.prep.outputs.VERSION }}
|
||||
rm flagger-${{ steps.prep.outputs.VERSION }}.tgz
|
||||
- name: Publish signed manifests to GHCR
|
||||
if: startsWith(github.ref, 'refs/tags/v')
|
||||
env:
|
||||
COSIGN_EXPERIMENTAL: 1
|
||||
run: |
|
||||
flux push artifact oci://ghcr.io/fluxcd/flagger-manifests:${{ steps.prep.outputs.VERSION }} \
|
||||
--path="./kustomize" \
|
||||
--source="$(git config --get remote.origin.url)" \
|
||||
--revision="${{ steps.prep.outputs.VERSION }}/$(git rev-parse HEAD)"
|
||||
cosign sign ghcr.io/fluxcd/flagger-manifests:${{ steps.prep.outputs.VERSION }}
|
||||
- uses: anchore/sbom-action/download-syft@v0
|
||||
- name: Create release and SBOM
|
||||
uses: goreleaser/goreleaser-action@v2
|
||||
uses: goreleaser/goreleaser-action@v4
|
||||
if: startsWith(github.ref, 'refs/tags/v')
|
||||
with:
|
||||
version: latest
|
||||
args: release --release-notes=notes.md --rm-dist --skip-validate
|
||||
|
||||
26
.github/workflows/scan.yml
vendored
26
.github/workflows/scan.yml
vendored
@@ -9,33 +9,33 @@ on:
|
||||
- cron: '18 10 * * 3'
|
||||
|
||||
permissions:
|
||||
contents: read # for actions/checkout to fetch code
|
||||
security-events: write # for codeQL to write security events
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
fossa:
|
||||
name: FOSSA
|
||||
scan-fossa:
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
security-events: write
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/checkout@v3
|
||||
- name: Run FOSSA scan and upload build data
|
||||
uses: fossa-contrib/fossa-action@v1
|
||||
uses: fossa-contrib/fossa-action@v2
|
||||
with:
|
||||
# FOSSA Push-Only API Token
|
||||
fossa-api-key: 5ee8bf422db1471e0bcf2bcb289185de
|
||||
github-token: ${{ github.token }}
|
||||
|
||||
codeql:
|
||||
name: CodeQL
|
||||
scan-codeql:
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
security-events: write
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v3
|
||||
- name: Initialize CodeQL
|
||||
uses: github/codeql-action/init@v1
|
||||
uses: github/codeql-action/init@v2
|
||||
with:
|
||||
languages: go
|
||||
- name: Autobuild
|
||||
uses: github/codeql-action/autobuild@v1
|
||||
uses: github/codeql-action/autobuild@v2
|
||||
- name: Perform CodeQL Analysis
|
||||
uses: github/codeql-action/analyze@v1
|
||||
uses: github/codeql-action/analyze@v2
|
||||
|
||||
@@ -15,3 +15,16 @@ sboms:
|
||||
artifacts: source
|
||||
documents:
|
||||
- "{{ .ProjectName }}_{{ .Version }}_sbom.spdx.json"
|
||||
|
||||
signs:
|
||||
- cmd: cosign
|
||||
env:
|
||||
- COSIGN_EXPERIMENTAL=1
|
||||
certificate: '${artifact}.pem'
|
||||
args:
|
||||
- sign-blob
|
||||
- '--output-certificate=${certificate}'
|
||||
- '--output-signature=${signature}'
|
||||
- '${artifact}'
|
||||
artifacts: checksum
|
||||
output: true
|
||||
|
||||
482
CHANGELOG.md
482
CHANGELOG.md
@@ -2,6 +2,488 @@
|
||||
|
||||
All notable changes to this project are documented in this file.
|
||||
|
||||
## 1.31.0
|
||||
|
||||
**Release date:** 2023-05-10
|
||||
|
||||
⚠️ __Breaking Changes__
|
||||
|
||||
This release adds support for Linkerd 2.12 and later. Due to changes in Linkerd
|
||||
the default namespace for Flagger's installation had to be changed from
|
||||
`linkerd` to `flagger-system` and the `flagger` Deployment is now injected with
|
||||
the Linkerd proxy. Furthermore, installing Flagger for Linkerd will result in
|
||||
the creation of an `AuthorizationPolicy` that allows access to the Prometheus
|
||||
instance in the `linkerd-viz` namespace. To upgrade your Flagger installation,
|
||||
please see the below migration guide.
|
||||
|
||||
If you use Kustomize, then follow these steps:
|
||||
* `kubectl delete -n linkerd deploy/flagger`
|
||||
* `kubectl delete -n linkerd serviceaccount flagger`
|
||||
* If you're on Linkerd >= 2.12, you'll need to install the SMI extension to enable
|
||||
support for `TrafficSplit`s:
|
||||
```bash
|
||||
curl -sL https://linkerd.github.io/linkerd-smi/install | sh
|
||||
linkerd smi install | kubectl apply -f -
|
||||
```
|
||||
* `kubectl apply -k github.com/fluxcd/flagger//kustomize/linkerd`
|
||||
|
||||
Note: If you're on Linkerd < 2.12, this will report an error about missing CRDs.
|
||||
It is safe to ignore this error.
|
||||
|
||||
If you use Helm and are on Linkerd < 2.12, then you can use `helm upgrade` to do
|
||||
a regular upgrade.
|
||||
|
||||
If you use Helm and are on Linkerd >= 2.12, then follow these steps:
|
||||
* `helm uninstall flagger -n linkerd`
|
||||
* Install the Linkerd SMI extension:
|
||||
```bash
|
||||
helm repo add l5d-smi https://linkerd.github.io/linkerd-smi
|
||||
helm install linkerd-smi l5d-smi/linkerd-smi -n linkerd-smi --create-namespace
|
||||
```
|
||||
* Install Flagger in the `flagger-system` namespace
|
||||
and create an `AuthorizationPolicy`:
|
||||
```bash
|
||||
helm repo update flagger
|
||||
helm install flagger flagger/flagger \
|
||||
--namespace flagger-system \
|
||||
--set meshProvider=linkerd \
|
||||
--set metricsServer=http://prometheus.linkerd-viz:9090 \
|
||||
--set linkerdAuthPolicy.create=true
|
||||
```
|
||||
|
||||
Furthermore, a bug which led the `confirm-rollout` webhook to be executed at
|
||||
every step of the Canary instead of only being executed before the canary
|
||||
Deployment is scaled up, has been fixed.
|
||||
|
||||
#### Improvements
|
||||
|
||||
- Add support for Linkerd 2.13
|
||||
[#1417](https://github.com/fluxcd/flagger/pull/1417)
|
||||
|
||||
#### Fixes
|
||||
|
||||
- Fix the loadtester install with flux documentation
|
||||
[#1384](https://github.com/fluxcd/flagger/pull/1384)
|
||||
- Run `confirm-rollout` checks only before scaling up deployment
|
||||
[#1414](https://github.com/fluxcd/flagger/pull/1414)
|
||||
- e2e: Remove OSM tests
|
||||
[#1423](https://github.com/fluxcd/flagger/pull/1423)
|
||||
|
||||
## 1.30.0
|
||||
|
||||
**Release date:** 2023-04-12
|
||||
|
||||
This release fixes a bug related to the lack of updates to the generated
|
||||
object's metadata according to the metadata specified in `spec.service.apex`.
|
||||
Furthermore, a bug where labels were wrongfully copied over from the canary
|
||||
deployment to primary deployment when no value was provided for
|
||||
`--include-label-prefix` has been fixed.
|
||||
This release also makes Flagger compatible with Flux's helm-controller drift
|
||||
detection.
|
||||
|
||||
#### Improvements
|
||||
|
||||
- build(deps): bump actions/cache from 3.2.5 to 3.3.1
|
||||
[#1385](https://github.com/fluxcd/flagger/pull/1385)
|
||||
- helm: Added the option to supply additional volumes
|
||||
[#1393](https://github.com/fluxcd/flagger/pull/1393)
|
||||
- build(deps): bump actions/setup-go from 3 to 4
|
||||
[#1394](https://github.com/fluxcd/flagger/pull/1394)
|
||||
- update Kuma version and docs
|
||||
[#1402](https://github.com/fluxcd/flagger/pull/1402)
|
||||
- ci: bump k8s to 1.24 and kind to 1.18
|
||||
[#1406](https://github.com/fluxcd/flagger/pull/1406)
|
||||
- Helm: Allow configuring deployment `annotations`
|
||||
[#1411](https://github.com/fluxcd/flagger/pull/1411)
|
||||
- update dependencies
|
||||
[#1412](https://github.com/fluxcd/flagger/pull/1412)
|
||||
|
||||
#### Fixes
|
||||
|
||||
- Enable updates for labels and annotations
|
||||
[#1392](https://github.com/fluxcd/flagger/pull/1392)
|
||||
- Update flagger-install-with-flux.md
|
||||
[#1398](https://github.com/fluxcd/flagger/pull/1398)
|
||||
- avoid copying canary labels to primary on promotion
|
||||
[#1405](https://github.com/fluxcd/flagger/pull/1405)
|
||||
- Disable Flux helm drift detection for managed resources
|
||||
[#1408](https://github.com/fluxcd/flagger/pull/1408)
|
||||
|
||||
## 1.29.0
|
||||
|
||||
**Release date:** 2023-02-21
|
||||
|
||||
This release comes with support for template variables for analysis metrics.
|
||||
A canary analysis metric can reference a set of custom variables with
|
||||
`.spec.analysis.metrics[].templateVariables`. For more info see the [docs](https://fluxcd.io/flagger/usage/metrics/#custom-metrics).
|
||||
Furthemore, a bug related to Canary releases with session affinity has been
|
||||
fixed.
|
||||
|
||||
#### Improvements
|
||||
|
||||
- update dependencies
|
||||
[#1374](https://github.com/fluxcd/flagger/pull/1374)
|
||||
- build(deps): bump golang.org/x/net from 0.4.0 to 0.7.0
|
||||
[#1373](https://github.com/fluxcd/flagger/pull/1373)
|
||||
- build(deps): bump fossa-contrib/fossa-action from 1 to 2
|
||||
[#1372](https://github.com/fluxcd/flagger/pull/1372)
|
||||
- Allow custom affinities for flagger deployment in helm chart
|
||||
[#1371](https://github.com/fluxcd/flagger/pull/1371)
|
||||
- Add namespace to namespaced resources in helm chart
|
||||
[#1370](https://github.com/fluxcd/flagger/pull/1370)
|
||||
- build(deps): bump actions/cache from 3.2.4 to 3.2.5
|
||||
[#1366](https://github.com/fluxcd/flagger/pull/1366)
|
||||
- build(deps): bump actions/cache from 3.2.3 to 3.2.4
|
||||
[#1362](https://github.com/fluxcd/flagger/pull/1362)
|
||||
- build(deps): bump docker/build-push-action from 3 to 4
|
||||
[#1361](https://github.com/fluxcd/flagger/pull/1361)
|
||||
- modify release workflow to publish rc images
|
||||
[#1359](https://github.com/fluxcd/flagger/pull/1359)
|
||||
- build: Enable SBOM and SLSA Provenance
|
||||
[#1356](https://github.com/fluxcd/flagger/pull/1356)
|
||||
- Add support for custom variables in metric templates
|
||||
[#1355](https://github.com/fluxcd/flagger/pull/1355)
|
||||
- docs(readme.md): add additional tutorial
|
||||
[#1346](https://github.com/fluxcd/flagger/pull/1346)
|
||||
|
||||
#### Fixes
|
||||
|
||||
- use regex to match against headers in istio
|
||||
[#1364](https://github.com/fluxcd/flagger/pull/1364)
|
||||
|
||||
## 1.28.0
|
||||
|
||||
**Release date:** 2023-01-26
|
||||
|
||||
This release comes with support for setting a different autoscaling
|
||||
configuration for the primary workload.
|
||||
The `.spec.autoscalerRef.primaryScalerReplicas` is useful in the
|
||||
situation where the user does not want to scale the canary workload
|
||||
to the exact same size as the primary, especially when opting for a
|
||||
canary deployment pattern where only a small portion of traffic is
|
||||
routed to the canary workload pods.
|
||||
|
||||
#### Improvements
|
||||
|
||||
- Support for overriding primary scaler replicas
|
||||
[#1343](https://github.com/fluxcd/flagger/pull/1343)
|
||||
- Allow access to Prometheus in OpenShift via SA token
|
||||
[#1338](https://github.com/fluxcd/flagger/pull/1338)
|
||||
- Update Kubernetes packages to v1.26.1
|
||||
[#1352](https://github.com/fluxcd/flagger/pull/1352)
|
||||
|
||||
## 1.27.0
|
||||
|
||||
**Release date:** 2022-12-15
|
||||
|
||||
This release comes with support for Apachae APISIX. For more details see the
|
||||
[tutorial](https://fluxcd.io/flagger/tutorials/apisix-progressive-delivery).
|
||||
|
||||
#### Improvements
|
||||
|
||||
- [apisix] Implement router interface and observer interface
|
||||
[#1281](https://github.com/fluxcd/flagger/pull/1281)
|
||||
- Bump stefanprodan/helm-gh-pages from 1.6.0 to 1.7.0
|
||||
[#1326](https://github.com/fluxcd/flagger/pull/1326)
|
||||
- Release loadtester v0.28.0
|
||||
[#1328](https://github.com/fluxcd/flagger/pull/1328)
|
||||
|
||||
#### Fixes
|
||||
|
||||
- Update release docs
|
||||
[#1324](https://github.com/fluxcd/flagger/pull/1324)
|
||||
|
||||
## 1.26.0
|
||||
|
||||
**Release date:** 2022-11-23
|
||||
|
||||
This release comes with support Kubernetes [Gateway API](https://gateway-api.sigs.k8s.io/) v1beta1.
|
||||
For more details see the [Gateway API Progressive Delivery tutorial](https://docs.flagger.app/tutorials/gatewayapi-progressive-delivery).
|
||||
|
||||
Please note that starting with this version, the Gateway API v1alpha2 is considered deprecated
|
||||
and will be removed from Flagger after 6 months.
|
||||
|
||||
#### Improvements:
|
||||
|
||||
- Updated Gateway API from v1alpha2 to v1beta1
|
||||
[#1319](https://github.com/fluxcd/flagger/pull/1319)
|
||||
- Updated Gateway API docs to v1beta1
|
||||
[#1321](https://github.com/fluxcd/flagger/pull/1321)
|
||||
- Update dependencies
|
||||
[#1322](https://github.com/fluxcd/flagger/pull/1322)
|
||||
|
||||
#### Fixes:
|
||||
|
||||
- docs: Add `linkerd install --crds` to Linkerd tutorial
|
||||
[#1316](https://github.com/fluxcd/flagger/pull/1316)
|
||||
|
||||
## 1.25.0
|
||||
|
||||
**Release date:** 2022-11-16
|
||||
|
||||
This release introduces a new deployment strategy combining Canary releases with session affinity
|
||||
for Istio.
|
||||
|
||||
Furthermore, it contains a regression fix regarding metadata in alerts introduced in
|
||||
[#1275](https://github.com/fluxcd/flagger/pull/1275)
|
||||
|
||||
#### Improvements:
|
||||
|
||||
- Add support for session affinity during weighted routing with Istio
|
||||
[#1280](https://github.com/fluxcd/flagger/pull/1280)
|
||||
|
||||
#### Fixes:
|
||||
|
||||
- Fix cluster name inclusion in alerts metadata
|
||||
[#1306](https://github.com/fluxcd/flagger/pull/1306)
|
||||
- fix(faq): Update FAQ about zero downtime with correct values
|
||||
[#1302](https://github.com/fluxcd/flagger/pull/1302)
|
||||
|
||||
## 1.24.1
|
||||
|
||||
**Release date:** 2022-10-26
|
||||
|
||||
This release comes with a fix to Gloo routing when a custom service name id used.
|
||||
|
||||
In addition, the Gloo ingress end-to-end testing was updated to Gloo Helm chart v1.12.31.
|
||||
|
||||
#### Fixes:
|
||||
|
||||
- fix(gloo): Use correct route table name in case service name was overwritten
|
||||
[#1300](https://github.com/fluxcd/flagger/pull/1300)
|
||||
|
||||
## 1.24.0
|
||||
|
||||
**Release date:** 2022-10-23
|
||||
|
||||
Starting with this version, the Flagger release artifacts are published to
|
||||
GitHub Container Registry, and they are signed with Cosign and GitHub ODIC.
|
||||
|
||||
OCI artifacts:
|
||||
|
||||
- `ghcr.io/fluxcd/flagger:<version>` multi-arch container images
|
||||
- `ghcr.io/fluxcd/flagger-manifest:<version>` Kubernetes manifests
|
||||
- `ghcr.io/fluxcd/charts/flagger:<version>` Helm charts
|
||||
|
||||
To verify an OCI artifact with Cosign:
|
||||
|
||||
```shell
|
||||
export COSIGN_EXPERIMENTAL=1
|
||||
cosign verify ghcr.io/fluxcd/flagger:1.24.0
|
||||
cosign verify ghcr.io/fluxcd/flagger-manifests:1.24.0
|
||||
cosign verify ghcr.io/fluxcd/charts/flagger:1.24.0
|
||||
```
|
||||
|
||||
To deploy Flagger from its OCI artifacts the GitOps way,
|
||||
please see the [Flux installation guide](docs/gitbook/install/flagger-install-with-flux.md).
|
||||
|
||||
#### Improvements:
|
||||
|
||||
- docs: Add guide on how to install Flagger with Flux OCI
|
||||
[#1294](https://github.com/fluxcd/flagger/pull/1294)
|
||||
- ci: Publish signed Helm charts and manifests to GHCR
|
||||
[#1293](https://github.com/fluxcd/flagger/pull/1293)
|
||||
- ci: Sign release and containers with Cosign and GitHub OIDC
|
||||
[#1292](https://github.com/fluxcd/flagger/pull/1292)
|
||||
- ci: Adjust GitHub workflow permissions
|
||||
[#1286](https://github.com/fluxcd/flagger/pull/1286)
|
||||
- docs: Add link to Flux governance document
|
||||
[#1286](https://github.com/fluxcd/flagger/pull/1286)
|
||||
|
||||
## 1.23.0
|
||||
|
||||
**Release date:** 2022-10-20
|
||||
|
||||
This release comes with support for Slack bot token authentication.
|
||||
|
||||
#### Improvements:
|
||||
|
||||
- alerts: Add support for Slack bot token authentication
|
||||
[#1270](https://github.com/fluxcd/flagger/pull/1270)
|
||||
- loadtester: logCmdOutput to logger instead of stdout
|
||||
[#1267](https://github.com/fluxcd/flagger/pull/1267)
|
||||
- helm: Add app.kubernetes.io/version label to chart
|
||||
[#1264](https://github.com/fluxcd/flagger/pull/1264)
|
||||
- Update Go to 1.19
|
||||
[#1264](https://github.com/fluxcd/flagger/pull/1264)
|
||||
- Update Kubernetes packages to v1.25.3
|
||||
[#1283](https://github.com/fluxcd/flagger/pull/1283)
|
||||
- Bump Contour to v1.22 in e2e tests
|
||||
[#1282](https://github.com/fluxcd/flagger/pull/1282)
|
||||
|
||||
#### Fixes:
|
||||
|
||||
- gatewayapi: Fix reconciliation of nil hostnames
|
||||
[#1276](https://github.com/fluxcd/flagger/pull/1276)
|
||||
- alerts: Include cluster name in all alerts
|
||||
[#1275](https://github.com/fluxcd/flagger/pull/1275)
|
||||
|
||||
## 1.22.2
|
||||
|
||||
**Release date:** 2022-08-29
|
||||
|
||||
This release fixes a bug related scaling up the canary deployment when a
|
||||
reference to an autoscaler is specified.
|
||||
|
||||
Furthermore, it contains updates to packages used by the project, including
|
||||
updates to Helm and grpc-health-probe used in the loadtester.
|
||||
|
||||
CVEs fixed (originating from dependencies):
|
||||
* CVE-2022-37434
|
||||
* CVE-2022-27191
|
||||
* CVE-2021-33194
|
||||
* CVE-2021-44716
|
||||
* CVE-2022-29526
|
||||
* CVE-2022-1996
|
||||
|
||||
#### Fixes:
|
||||
|
||||
- If HPA is set, it uses HPA minReplicas when scaling up the canary
|
||||
[#1253](https://github.com/fluxcd/flagger/pull/1253)
|
||||
|
||||
#### Improvements:
|
||||
|
||||
- Release loadtester v0.23.0
|
||||
[#1246](https://github.com/fluxcd/flagger/pull/1246)
|
||||
- Add target and script to keep crds in sync
|
||||
[#1254](https://github.com/fluxcd/flagger/pull/1254)
|
||||
- docs: add knative support to roadmap
|
||||
[#1258](https://github.com/fluxcd/flagger/pull/1258)
|
||||
- Update dependencies
|
||||
[#1259](https://github.com/fluxcd/flagger/pull/1259)
|
||||
- Release loadtester v0.24.0
|
||||
[#1261](https://github.com/fluxcd/flagger/pull/1261)
|
||||
|
||||
## 1.22.1
|
||||
|
||||
**Release date:** 2022-08-01
|
||||
|
||||
This minor release fixes a bug related to the use of HPA v2beta2 and updates
|
||||
the KEDA ScaledObject API to include `MetricType` for `ScaleTriggers`.
|
||||
|
||||
Furthermore, the project has been updated to use Go 1.18 and Alpine 3.16.
|
||||
|
||||
#### Fixes:
|
||||
|
||||
- Update KEDA ScaledObject API to include MetricType for Triggers
|
||||
[#1241](https://github.com/fluxcd/flagger/pull/1241)
|
||||
- Fix fallback logic for HPAv2 to v2beta2
|
||||
[#1242](https://github.com/fluxcd/flagger/pull/1242)
|
||||
|
||||
#### Improvements:
|
||||
- Update Go to 1.18 and Alpine to 3.16
|
||||
[#1243](https://github.com/fluxcd/flagger/pull/1243)
|
||||
- Clarify HPA API requirement
|
||||
[#1239](https://github.com/fluxcd/flagger/pull/1239)
|
||||
- Update README
|
||||
[#1233](https://github.com/fluxcd/flagger/pull/1233)
|
||||
|
||||
## 1.22.0
|
||||
|
||||
**Release date:** 2022-07-11
|
||||
|
||||
This release with support for KEDA ScaledObjects as an alternative to HPAs. Check the
|
||||
[tutorial](https://docs.flagger.app/tutorials/keda-scaledobject) to understand it's usage
|
||||
with Flagger.
|
||||
|
||||
The `.spec.service.appProtocol` field can now be used to specify the [`appProtocol`](https://kubernetes.io/docs/concepts/services-networking/service/#application-protocol)
|
||||
of the services that Flagger generates.
|
||||
|
||||
In addition, a bug related to the Contour prometheus query for when service name is overwritten
|
||||
along with a bug related to a Contour `HTTPProxy` annotations have been fixed.
|
||||
|
||||
Furthermore, the installation guide for Alibaba ServiceMesh has been updated.
|
||||
|
||||
#### Improvements:
|
||||
|
||||
- feat: Add an optional `appProtocol` field to `spec.service`
|
||||
[#1185](https://github.com/fluxcd/flagger/pull/1185)
|
||||
- Update Kubernetes packages to v1.24.1
|
||||
[#1208](https://github.com/fluxcd/flagger/pull/1208)
|
||||
- charts: Add namespace parameter to parameters table
|
||||
[#1210](https://github.com/fluxcd/flagger/pull/1210)
|
||||
- Introduce `ScalerReconciler` and refactor HPA reconciliation
|
||||
[#1211](https://github.com/fluxcd/flagger/pull/1211)
|
||||
- e2e: Update providers and Kubernetes to v1.23
|
||||
[#1212](https://github.com/fluxcd/flagger/pull/1212)
|
||||
- Add support for KEDA ScaledObjects as an auto scaler
|
||||
[#1216](https://github.com/fluxcd/flagger/pull/1216)
|
||||
- include Contour retryOn in the sample canary
|
||||
[#1223](https://github.com/fluxcd/flagger/pull/1223)
|
||||
|
||||
#### Fixes:
|
||||
- fix contour prom query for when service name is overwritten
|
||||
[#1204](https://github.com/fluxcd/flagger/pull/1204)
|
||||
- fix contour httproxy annotations overwrite
|
||||
[#1205](https://github.com/fluxcd/flagger/pull/1205)
|
||||
- Fix primary HPA label reconciliation
|
||||
[#1215](https://github.com/fluxcd/flagger/pull/1215)
|
||||
- fix: add finalizers to canaries
|
||||
[#1219](https://github.com/fluxcd/flagger/pull/1219)
|
||||
- typo: boostrap -> bootstrap
|
||||
[#1220](https://github.com/fluxcd/flagger/pull/1220)
|
||||
- typo: controller
|
||||
[#1221](https://github.com/fluxcd/flagger/pull/1221)
|
||||
- update guide for flagger on aliyun ASM
|
||||
[#1222](https://github.com/fluxcd/flagger/pull/1222)
|
||||
- Reintroducing empty check for metric template references.
|
||||
[#1224](https://github.com/fluxcd/flagger/pull/1224)
|
||||
|
||||
|
||||
## 1.21.0
|
||||
|
||||
**Release date:** 2022-05-06
|
||||
|
||||
This release comes with an option to disable cross-namespace references to Kubernetes
|
||||
custom resources such as `AlertProivders` and `MetricProviders`. When running Flagger
|
||||
on multi-tenant environments it is advised to set the `-no-cross-namespace-refs=true` flag.
|
||||
|
||||
In addition, this version enables Flagger to target Istio and Kuma multi-cluster setups.
|
||||
When installing Flagger with Helm, the service mesh control plane kubeconfig secret
|
||||
can be specified using `--set controlplane.kubeconfig.secretName`.
|
||||
|
||||
#### Improvements
|
||||
|
||||
- Add flag to disable cross namespace refs to custom resources
|
||||
[#1181](https://github.com/fluxcd/flagger/pull/1181)
|
||||
- Rename kubeconfig section in helm values
|
||||
[#1188](https://github.com/fluxcd/flagger/pull/1188)
|
||||
- Update Flagger overview diagram
|
||||
[#1187](https://github.com/fluxcd/flagger/pull/1187)
|
||||
|
||||
#### Fixes
|
||||
|
||||
- Avoid setting owner refs if the service mesh/ingress is on a different cluster
|
||||
[#1183](https://github.com/fluxcd/flagger/pull/1183)
|
||||
|
||||
## 1.20.0
|
||||
|
||||
**Release date:** 2022-04-15
|
||||
|
||||
This release comes with improvements to the AppMesh, Contour and Istio integrations.
|
||||
|
||||
#### Improvements
|
||||
|
||||
- AppMesh: Add annotation to enable Envoy access logs
|
||||
[#1156](https://github.com/fluxcd/flagger/pull/1156)
|
||||
- Contour: Update the httproxy API and enable RetryOn
|
||||
[#1164](https://github.com/fluxcd/flagger/pull/1164)
|
||||
- Istio: Add destination port when port discovery and delegation are true
|
||||
[#1145](https://github.com/fluxcd/flagger/pull/1145)
|
||||
- Metrics: Add canary analysis result as Prometheus metrics
|
||||
[#1148](https://github.com/fluxcd/flagger/pull/1148)
|
||||
|
||||
#### Fixes
|
||||
|
||||
- Fix canary rollback behaviour
|
||||
[#1171](https://github.com/fluxcd/flagger/pull/1171)
|
||||
- Shorten the metric analysis cycle after confirm promotion gate is open
|
||||
[#1139](https://github.com/fluxcd/flagger/pull/1139)
|
||||
- Fix unit of time in the Istio Grafana dashboard
|
||||
[#1162](https://github.com/fluxcd/flagger/pull/1162)
|
||||
- Fix the service toggle condition in the podinfo helm chart
|
||||
[#1146](https://github.com/fluxcd/flagger/pull/1146)
|
||||
|
||||
## 1.19.0
|
||||
|
||||
**Release date:** 2022-03-14
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
FROM golang:1.17-alpine as builder
|
||||
FROM golang:1.19-alpine as builder
|
||||
|
||||
ARG TARGETPLATFORM
|
||||
ARG REVISON
|
||||
@@ -21,7 +21,7 @@ RUN CGO_ENABLED=0 go build \
|
||||
-ldflags "-s -w -X github.com/fluxcd/flagger/pkg/version.REVISION=${REVISON}" \
|
||||
-a -o flagger ./cmd/flagger
|
||||
|
||||
FROM alpine:3.15
|
||||
FROM alpine:3.17
|
||||
|
||||
RUN apk --no-cache add ca-certificates
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
FROM golang:1.17-alpine as builder
|
||||
FROM golang:1.19-alpine as builder
|
||||
|
||||
ARG TARGETPLATFORM
|
||||
ARG TARGETARCH
|
||||
@@ -6,15 +6,15 @@ ARG REVISION
|
||||
|
||||
RUN apk --no-cache add alpine-sdk perl curl bash tar
|
||||
|
||||
RUN HELM3_VERSION=3.7.2 && \
|
||||
RUN HELM3_VERSION=3.11.0 && \
|
||||
curl -sSL "https://get.helm.sh/helm-v${HELM3_VERSION}-linux-${TARGETARCH}.tar.gz" | tar xvz && \
|
||||
chmod +x linux-${TARGETARCH}/helm && mv linux-${TARGETARCH}/helm /usr/local/bin/helm
|
||||
|
||||
RUN GRPC_HEALTH_PROBE_VERSION=v0.4.6 && \
|
||||
RUN GRPC_HEALTH_PROBE_VERSION=v0.4.12 && \
|
||||
wget -qO /usr/local/bin/grpc_health_probe https://github.com/grpc-ecosystem/grpc-health-probe/releases/download/${GRPC_HEALTH_PROBE_VERSION}/grpc_health_probe-linux-${TARGETARCH} && \
|
||||
chmod +x /usr/local/bin/grpc_health_probe
|
||||
|
||||
RUN GHZ_VERSION=0.105.0 && \
|
||||
RUN GHZ_VERSION=0.109.0 && \
|
||||
curl -sSL "https://github.com/bojand/ghz/archive/refs/tags/v${GHZ_VERSION}.tar.gz" | tar xz -C /tmp && \
|
||||
cd /tmp/ghz-${GHZ_VERSION}/cmd/ghz && GOARCH=$TARGETARCH go build . && mv ghz /usr/local/bin && \
|
||||
chmod +x /usr/local/bin/ghz
|
||||
|
||||
5
GOVERNANCE.md
Normal file
5
GOVERNANCE.md
Normal file
@@ -0,0 +1,5 @@
|
||||
# Flagger Governance
|
||||
|
||||
The Flagger project is governed by the [Flux governance document](https://github.com/fluxcd/community/blob/main/GOVERNANCE.md),
|
||||
involvement is defined in the [Flux community roles document](chttps://github.com/fluxcd/community/blob/main/community-roles.md),
|
||||
and processes can be found in the [Flux process document](https://github.com/fluxcd/community/blob/main/PROCESS.md).
|
||||
@@ -6,3 +6,4 @@ In alphabetical order:
|
||||
|
||||
Stefan Prodan, Weaveworks <stefan@weave.works> (github: @stefanprodan, slack: stefanprodan)
|
||||
Takeshi Yoneda, Tetrate <takeshi@tetrate.io> (github: @mathetake, slack: mathetake)
|
||||
Sanskar Jaiswal, Weaveworks <sanskar.jaiswal@weave.works> (github: @aryan9600, slack: aryan9600)
|
||||
|
||||
9
Makefile
9
Makefile
@@ -5,6 +5,12 @@ LT_VERSION?=$(shell grep 'VERSION' cmd/loadtester/main.go | awk '{ print $$4 }'
|
||||
build:
|
||||
CGO_ENABLED=0 go build -a -o ./bin/flagger ./cmd/flagger
|
||||
|
||||
tidy:
|
||||
rm -f go.sum; go mod tidy -compat=1.19
|
||||
|
||||
vet:
|
||||
go vet ./...
|
||||
|
||||
fmt:
|
||||
go mod tidy
|
||||
gofmt -l -s -w ./
|
||||
@@ -27,6 +33,9 @@ crd:
|
||||
cat artifacts/flagger/crd.yaml > charts/flagger/crds/crd.yaml
|
||||
cat artifacts/flagger/crd.yaml > kustomize/base/flagger/crd.yaml
|
||||
|
||||
verify-crd:
|
||||
./hack/verify-crd.sh
|
||||
|
||||
version-set:
|
||||
@next="$(TAG)" && \
|
||||
current="$(VERSION)" && \
|
||||
|
||||
148
README.md
148
README.md
@@ -1,10 +1,11 @@
|
||||
# flagger
|
||||
# flagger
|
||||
|
||||
[](https://bestpractices.coreinfrastructure.org/projects/4783)
|
||||
[](https://github.com/fluxcd/flagger/actions)
|
||||
[](https://goreportcard.com/report/github.com/fluxcd/flagger)
|
||||
[](https://github.com/fluxcd/flagger/blob/main/LICENSE)
|
||||
[](https://github.com/fluxcd/flagger/releases)
|
||||
[](https://bestpractices.coreinfrastructure.org/projects/4783)
|
||||
[](https://goreportcard.com/report/github.com/fluxcd/flagger)
|
||||
[](https://app.fossa.com/projects/custom%2B162%2Fgithub.com%2Ffluxcd%2Fflagger?ref=badge_shield)
|
||||
[](https://artifacthub.io/packages/search?repo=flagger)
|
||||
[](https://clomonitor.io/projects/cncf/flagger)
|
||||
|
||||
Flagger is a progressive delivery tool that automates the release process for applications running on Kubernetes.
|
||||
It reduces the risk of introducing a new software version in production
|
||||
@@ -13,54 +14,53 @@ by gradually shifting traffic to the new version while measuring metrics and run
|
||||

|
||||
|
||||
Flagger implements several deployment strategies (Canary releases, A/B testing, Blue/Green mirroring)
|
||||
using a service mesh (App Mesh, Istio, Linkerd, Open Service Mesh, Kuma)
|
||||
or an ingress controller (Contour, Gloo, NGINX, Skipper, Traefik) for traffic routing.
|
||||
For release analysis, Flagger can query Prometheus, Datadog, New Relic, CloudWatch, Dynatrace,
|
||||
InfluxDB and Stackdriver and for alerting it uses Slack, MS Teams, Discord, Rocket and Google Chat.
|
||||
and integrates with various Kubernetes ingress controllers, service mesh, and monitoring solutions.
|
||||
|
||||
Flagger is a [Cloud Native Computing Foundation](https://cncf.io/) project
|
||||
and part of [Flux](https://fluxcd.io) family of GitOps tools.
|
||||
and part of the [Flux](https://fluxcd.io) family of GitOps tools.
|
||||
|
||||
### Documentation
|
||||
|
||||
Flagger documentation can be found at [docs.flagger.app](https://docs.flagger.app).
|
||||
Flagger documentation can be found at [fluxcd.io/flagger](https://fluxcd.io/flagger/).
|
||||
|
||||
* Install
|
||||
* [Flagger install on Kubernetes](https://docs.flagger.app/install/flagger-install-on-kubernetes)
|
||||
* [Flagger install on Kubernetes](https://fluxcd.io/flagger/install/flagger-install-on-kubernetes)
|
||||
* Usage
|
||||
* [How it works](https://docs.flagger.app/usage/how-it-works)
|
||||
* [Deployment strategies](https://docs.flagger.app/usage/deployment-strategies)
|
||||
* [Metrics analysis](https://docs.flagger.app/usage/metrics)
|
||||
* [Webhooks](https://docs.flagger.app/usage/webhooks)
|
||||
* [Alerting](https://docs.flagger.app/usage/alerting)
|
||||
* [Monitoring](https://docs.flagger.app/usage/monitoring)
|
||||
* [How it works](https://fluxcd.io/flagger/usage/how-it-works)
|
||||
* [Deployment strategies](https://fluxcd.io/flagger/usage/deployment-strategies)
|
||||
* [Metrics analysis](https://fluxcd.io/flagger/usage/metrics)
|
||||
* [Webhooks](https://fluxcd.io/flagger/usage/webhooks)
|
||||
* [Alerting](https://fluxcd.io/flagger/usage/alerting)
|
||||
* [Monitoring](https://fluxcd.io/flagger/usage/monitoring)
|
||||
* Tutorials
|
||||
* [App Mesh](https://docs.flagger.app/tutorials/appmesh-progressive-delivery)
|
||||
* [Istio](https://docs.flagger.app/tutorials/istio-progressive-delivery)
|
||||
* [Linkerd](https://docs.flagger.app/tutorials/linkerd-progressive-delivery)
|
||||
* [Open Service Mesh (OSM)](https://docs.flagger.app/tutorials/osm-progressive-delivery)
|
||||
* [Kuma Service Mesh](https://docs.flagger.app/tutorials/kuma-progressive-delivery)
|
||||
* [Contour](https://docs.flagger.app/tutorials/contour-progressive-delivery)
|
||||
* [Gloo](https://docs.flagger.app/tutorials/gloo-progressive-delivery)
|
||||
* [NGINX Ingress](https://docs.flagger.app/tutorials/nginx-progressive-delivery)
|
||||
* [Skipper](https://docs.flagger.app/tutorials/skipper-progressive-delivery)
|
||||
* [Traefik](https://docs.flagger.app/tutorials/traefik-progressive-delivery)
|
||||
* [Kubernetes Blue/Green](https://docs.flagger.app/tutorials/kubernetes-blue-green)
|
||||
* [App Mesh](https://fluxcd.io/flagger/tutorials/appmesh-progressive-delivery)
|
||||
* [Istio](https://fluxcd.io/flagger/tutorials/istio-progressive-delivery)
|
||||
* [Linkerd](https://fluxcd.io/flagger/tutorials/linkerd-progressive-delivery)
|
||||
* [Open Service Mesh (OSM)](https://dfluxcd.io/flagger/tutorials/osm-progressive-delivery)
|
||||
* [Kuma Service Mesh](https://fluxcd.io/flagger/tutorials/kuma-progressive-delivery)
|
||||
* [Contour](https://fluxcd.io/flagger/tutorials/contour-progressive-delivery)
|
||||
* [Gloo](https://fluxcd.io/flagger/tutorials/gloo-progressive-delivery)
|
||||
* [NGINX Ingress](https://fluxcd.io/flagger/tutorials/nginx-progressive-delivery)
|
||||
* [Skipper](https://fluxcd.io/flagger/tutorials/skipper-progressive-delivery)
|
||||
* [Traefik](https://fluxcd.io/flagger/tutorials/traefik-progressive-delivery)
|
||||
* [Gateway API](https://fluxcd.io/flagger/tutorials/gatewayapi-progressive-delivery/)
|
||||
* [Kubernetes Blue/Green](https://fluxcd.io/flagger/tutorials/kubernetes-blue-green)
|
||||
|
||||
### Who is using Flagger
|
||||
### Adopters
|
||||
|
||||
**Our list of production users has moved to <https://fluxcd.io/adopters/#flagger>**.
|
||||
|
||||
If you are using Flagger, please [submit a PR to add your organization](https://github.com/fluxcd/website/tree/main/adopters#readme) to the list!
|
||||
If you are using Flagger, please
|
||||
[submit a PR to add your organization](https://github.com/fluxcd/website/tree/main/adopters#readme) to the list!
|
||||
|
||||
### Canary CRD
|
||||
|
||||
Flagger takes a Kubernetes deployment and optionally a horizontal pod autoscaler (HPA),
|
||||
then creates a series of objects (Kubernetes deployments, ClusterIP services, service mesh or ingress routes).
|
||||
then creates a series of objects (Kubernetes deployments, ClusterIP services, service mesh, or ingress routes).
|
||||
These objects expose the application on the mesh and drive the canary analysis and promotion.
|
||||
|
||||
Flagger keeps track of ConfigMaps and Secrets referenced by a Kubernetes Deployment and triggers a canary analysis if any of those objects change.
|
||||
When promoting a workload in production, both code (container images) and configuration (config maps and secrets) are being synchronised.
|
||||
When promoting a workload in production, both code (container images) and configuration (config maps and secrets) are being synchronized.
|
||||
|
||||
For a deployment named _podinfo_, a canary promotion can be defined using Flagger's custom resource:
|
||||
|
||||
@@ -85,7 +85,7 @@ spec:
|
||||
progressDeadlineSeconds: 60
|
||||
# HPA reference (optional)
|
||||
autoscalerRef:
|
||||
apiVersion: autoscaling/v2beta1
|
||||
apiVersion: autoscaling/v2beta2
|
||||
kind: HorizontalPodAutoscaler
|
||||
name: podinfo
|
||||
service:
|
||||
@@ -178,7 +178,7 @@ spec:
|
||||
name: on-call-msteams
|
||||
```
|
||||
|
||||
For more details on how the canary analysis and promotion works please [read the docs](https://docs.flagger.app/usage/how-it-works).
|
||||
For more details on how the canary analysis and promotion works please [read the docs](https://fluxcd.io/flagger/usage/how-it-works).
|
||||
|
||||
### Features
|
||||
|
||||
@@ -198,16 +198,16 @@ For more details on how the canary analysis and promotion works please [read the
|
||||
|
||||
**Ingress**
|
||||
|
||||
| Feature | Contour | Gloo | NGINX | Skipper | Traefik |
|
||||
|-------------------------------------------|--------------------|--------------------|--------------------|--------------------|--------------------|
|
||||
| Canary deployments (weighted traffic) | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: |
|
||||
| A/B testing (headers and cookies routing) | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_minus_sign: | :heavy_minus_sign: |
|
||||
| Blue/Green deployments (traffic switch) | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: |
|
||||
| Webhooks (acceptance/load testing) | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: |
|
||||
| Manual gating (approve/pause/resume) | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: |
|
||||
| Request success rate check (L7 metric) | :heavy_check_mark: | :heavy_check_mark: | :heavy_minus_sign: | :heavy_check_mark: | :heavy_check_mark: |
|
||||
| Request duration check (L7 metric) | :heavy_check_mark: | :heavy_check_mark: | :heavy_minus_sign: | :heavy_check_mark: | :heavy_check_mark: |
|
||||
| Custom metric checks | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: |
|
||||
| Feature | Contour | Gloo | NGINX | Skipper | Traefik | Apache APISIX |
|
||||
|-------------------------------------------|--------------------|--------------------|--------------------|--------------------|--------------------|--------------------|
|
||||
| Canary deployments (weighted traffic) | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: |
|
||||
| A/B testing (headers and cookies routing) | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_minus_sign: | :heavy_minus_sign: | :heavy_minus_sign: |
|
||||
| Blue/Green deployments (traffic switch) | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: |
|
||||
| Webhooks (acceptance/load testing) | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: |
|
||||
| Manual gating (approve/pause/resume) | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: |
|
||||
| Request success rate check (L7 metric) | :heavy_check_mark: | :heavy_check_mark: | :heavy_minus_sign: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: |
|
||||
| Request duration check (L7 metric) | :heavy_check_mark: | :heavy_check_mark: | :heavy_minus_sign: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: |
|
||||
| Custom metric checks | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: |
|
||||
|
||||
**Networking Interface**
|
||||
|
||||
@@ -223,44 +223,58 @@ For more details on how the canary analysis and promotion works please [read the
|
||||
| Request duration check (L7 metric) | :heavy_minus_sign: | :heavy_minus_sign: |
|
||||
| Custom metric checks | :heavy_check_mark: | :heavy_check_mark: |
|
||||
|
||||
For all [Gateway API](https://gateway-api.sigs.k8s.io/) implementations like [Contour](https://projectcontour.io/guides/gateway-api/), [Istio](https://istio.io/latest/docs/tasks/traffic-management/ingress/gateway-api/) and [SMI](https://smi-spec.io) compatible service mesh solutions like [Consul Connect](https://www.consul.io/docs/connect) or [Nginx Service Mesh](https://docs.nginx.com/nginx-service-mesh/), [Prometheus MetricTemplates](https://docs.flagger.app/usage/metrics#prometheus) can be used to implement the request success rate and request duration checks.
|
||||
For all [Gateway API](https://gateway-api.sigs.k8s.io/) implementations like
|
||||
[Contour](https://projectcontour.io/guides/gateway-api/) or
|
||||
[Istio](https://istio.io/latest/docs/tasks/traffic-management/ingress/gateway-api/)
|
||||
and [SMI](https://smi-spec.io) compatible service mesh solutions like
|
||||
[Nginx Service Mesh](https://docs.nginx.com/nginx-service-mesh/),
|
||||
[Prometheus MetricTemplates](https://docs.flagger.app/usage/metrics#prometheus)
|
||||
can be used to implement the request success rate and request duration checks.
|
||||
|
||||
### Roadmap
|
||||
|
||||
#### [GitOps Toolkit](https://github.com/fluxcd/flux2) compatibility
|
||||
|
||||
* Migrate Flagger to Kubernetes controller-runtime and [kubebuilder](https://github.com/kubernetes-sigs/kubebuilder)
|
||||
* Make the Canary status compatible with [kstatus](https://github.com/kubernetes-sigs/cli-utils)
|
||||
* Make Flagger emit Kubernetes events compatible with Flux v2 notification API
|
||||
* Integrate Flagger into Flux v2 as the progressive delivery component
|
||||
- Migrate Flagger to Kubernetes controller-runtime and [kubebuilder](https://github.com/kubernetes-sigs/kubebuilder)
|
||||
- Make the Canary status compatible with [kstatus](https://github.com/kubernetes-sigs/cli-utils)
|
||||
- Make Flagger emit Kubernetes events compatible with Flux v2 notification API
|
||||
- Integrate Flagger into Flux v2 as the progressive delivery component
|
||||
|
||||
#### Integrations
|
||||
|
||||
* Add support for ingress controllers like HAProxy and ALB
|
||||
* Add support for metrics providers like InfluxDB, Stackdriver, SignalFX
|
||||
- Add support for ingress controllers like HAProxy, ALB, and Apache APISIX
|
||||
- Add support for Knative Serving
|
||||
|
||||
### Contributing
|
||||
|
||||
Flagger is Apache 2.0 licensed and accepts contributions via GitHub pull requests.
|
||||
To start contributing please read the [development guide](https://docs.flagger.app/dev/dev-guide).
|
||||
|
||||
When submitting bug reports please include as much details as possible:
|
||||
When submitting bug reports please include as many details as possible:
|
||||
|
||||
* which Flagger version
|
||||
* which Flagger CRD version
|
||||
* which Kubernetes version
|
||||
* what configuration (canary, ingress and workloads definitions)
|
||||
* what happened (Flagger and Proxy logs)
|
||||
- which Flagger version
|
||||
- which Kubernetes version
|
||||
- what configuration (canary, ingress and workloads definitions)
|
||||
- what happened (Flagger and Proxy logs)
|
||||
|
||||
### Getting Help
|
||||
### Communication
|
||||
|
||||
If you have any questions about Flagger and progressive delivery:
|
||||
Here is a list of good entry points into our community, how we stay in touch and how you can meet us as a team.
|
||||
|
||||
* Read the Flagger [docs](https://docs.flagger.app).
|
||||
* Invite yourself to the [CNCF community slack](https://slack.cncf.io/)
|
||||
and join the [#flagger](https://cloud-native.slack.com/messages/flagger/) channel.
|
||||
* Check out the **[Flux events calendar](https://fluxcd.io/#calendar)**, both with upcoming talks, events and meetings you can attend.
|
||||
* Or view the **[Flux resources section](https://fluxcd.io/resources)** with past events videos you can watch.
|
||||
* File an [issue](https://github.com/fluxcd/flagger/issues/new).
|
||||
- Slack: Join in and talk to us in the `#flagger` channel on [CNCF Slack](https://slack.cncf.io/).
|
||||
- Public meetings: We run weekly meetings - join one of the upcoming dev meetings from the [Flux calendar](https://fluxcd.io/#calendar).
|
||||
- Blog: Stay up to date with the latest news on [the Flux blog](https://fluxcd.io/blog/).
|
||||
- Mailing list: To be updated on Flux and Flagger progress regularly, please [join the flux-dev mailing list](https://lists.cncf.io/g/cncf-flux-dev).
|
||||
|
||||
Your feedback is always welcome!
|
||||
#### Subscribing to the flux-dev calendar
|
||||
|
||||
To add the meetings to your e.g. Google calendar
|
||||
|
||||
1. visit the [Flux calendar](https://lists.cncf.io/g/cncf-flux-dev/calendar)
|
||||
2. click on "Subscribe to Calendar" at the very bottom of the page
|
||||
3. copy the iCalendar URL
|
||||
4. open e.g. your Google calendar
|
||||
5. find the "add calendar" option
|
||||
6. choose "add by URL"
|
||||
7. paste iCalendar URL (ends with `.ics`)
|
||||
8. done
|
||||
|
||||
@@ -31,6 +31,18 @@ rules:
|
||||
- update
|
||||
- patch
|
||||
- delete
|
||||
- apiGroups:
|
||||
- "coordination.k8s.io"
|
||||
resources:
|
||||
- leases
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- create
|
||||
- update
|
||||
- patch
|
||||
- delete
|
||||
- apiGroups:
|
||||
- apps
|
||||
resources:
|
||||
@@ -78,6 +90,7 @@ rules:
|
||||
resources:
|
||||
- canaries
|
||||
- canaries/status
|
||||
- canaries/finalizers
|
||||
- metrictemplates
|
||||
- metrictemplates/status
|
||||
- alertproviders
|
||||
@@ -213,6 +226,31 @@ rules:
|
||||
- update
|
||||
- patch
|
||||
- delete
|
||||
- apiGroups:
|
||||
- keda.sh
|
||||
resources:
|
||||
- scaledobjects
|
||||
- scaledobjects/finalizers
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- create
|
||||
- update
|
||||
- patch
|
||||
- delete
|
||||
- apiGroups:
|
||||
- apisix.apache.org
|
||||
resources:
|
||||
- apisixroutes
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- create
|
||||
- update
|
||||
- patch
|
||||
- delete
|
||||
- nonResourceURLs:
|
||||
- /version
|
||||
verbs:
|
||||
|
||||
@@ -104,7 +104,7 @@ spec:
|
||||
name:
|
||||
type: string
|
||||
autoscalerRef:
|
||||
description: HPA selector
|
||||
description: Scaler selector
|
||||
type: object
|
||||
required: ["apiVersion", "kind", "name"]
|
||||
properties:
|
||||
@@ -114,8 +114,20 @@ spec:
|
||||
type: string
|
||||
enum:
|
||||
- HorizontalPodAutoscaler
|
||||
- ScaledObject
|
||||
name:
|
||||
type: string
|
||||
primaryScalerQueries:
|
||||
type: object
|
||||
additionalProperties:
|
||||
type: string
|
||||
primaryScalerReplicas:
|
||||
type: object
|
||||
properties:
|
||||
minReplicas:
|
||||
type: number
|
||||
maxReplicas:
|
||||
type: number
|
||||
ingressRef:
|
||||
description: Ingress selector
|
||||
type: object
|
||||
@@ -129,6 +141,19 @@ spec:
|
||||
- Ingress
|
||||
name:
|
||||
type: string
|
||||
routeRef:
|
||||
description: APISIX route selector
|
||||
type: object
|
||||
required: [ "apiVersion", "kind", "name" ]
|
||||
properties:
|
||||
apiVersion:
|
||||
type: string
|
||||
kind:
|
||||
type: string
|
||||
enum:
|
||||
- ApisixRoute
|
||||
name:
|
||||
type: string
|
||||
upstreamRef:
|
||||
description: Gloo Upstream selector
|
||||
type: object
|
||||
@@ -158,6 +183,9 @@ spec:
|
||||
portName:
|
||||
description: Container port name
|
||||
type: string
|
||||
appProtocol:
|
||||
description: Application protocol of the port
|
||||
type: string
|
||||
targetPort:
|
||||
description: Container target port name
|
||||
x-kubernetes-int-or-string: true
|
||||
@@ -942,6 +970,11 @@ spec:
|
||||
namespace:
|
||||
description: Namespace of this metric template
|
||||
type: string
|
||||
templateVariables:
|
||||
description: Additional variables to be used in the metrics query (key-value pairs)
|
||||
type: object
|
||||
additionalProperties:
|
||||
type: string
|
||||
alerts:
|
||||
description: Alert list for this canary analysis
|
||||
type: array
|
||||
@@ -1012,6 +1045,18 @@ spec:
|
||||
type: object
|
||||
additionalProperties:
|
||||
type: string
|
||||
sessionAffinity:
|
||||
description: SessionAffinity represents the session affinity settings for a canary run.
|
||||
type: object
|
||||
required: [ "cookieName" ]
|
||||
properties:
|
||||
cookieName:
|
||||
description: CookieName is the key that will be used for the session affinity cookie.
|
||||
type: string
|
||||
maxAge:
|
||||
description: MaxAge indicates the number of seconds until the session affinity cookie will expire.
|
||||
default: 86400
|
||||
type: number
|
||||
status:
|
||||
description: CanaryStatus defines the observed state of a canary.
|
||||
type: object
|
||||
@@ -1032,27 +1077,36 @@ spec:
|
||||
- Failed
|
||||
- Terminating
|
||||
- Terminated
|
||||
failedChecks:
|
||||
description: Failed check count of the current canary analysis
|
||||
type: number
|
||||
canaryWeight:
|
||||
description: Traffic weight routed to canary
|
||||
type: number
|
||||
iterations:
|
||||
description: Iteration count of the current canary analysis
|
||||
type: number
|
||||
trackedConfigs:
|
||||
description: TrackedConfig of this canary
|
||||
additionalProperties:
|
||||
type: string
|
||||
type: object
|
||||
canaryWeight:
|
||||
description: Traffic weight routed to canary
|
||||
type: number
|
||||
failedChecks:
|
||||
description: Failed check count of the current canary analysis
|
||||
type: number
|
||||
iterations:
|
||||
description: Iteration count of the current canary analysis
|
||||
type: number
|
||||
lastAppliedSpec:
|
||||
description: LastAppliedSpec of this canary
|
||||
type: string
|
||||
lastPromotedSpec:
|
||||
description: LastPromotedSpec of this canary
|
||||
type: string
|
||||
lastTransitionTime:
|
||||
description: LastTransitionTime of this canary
|
||||
format: date-time
|
||||
type: string
|
||||
sessionAffinityCookie:
|
||||
description: Session affinity cookie of the current canary run
|
||||
type: string
|
||||
previousSessionAffinityCookie:
|
||||
description: Session affinity cookie of the previous canary run
|
||||
type: string
|
||||
conditions:
|
||||
description: Status conditions of this canary
|
||||
type: array
|
||||
|
||||
@@ -22,7 +22,7 @@ spec:
|
||||
serviceAccountName: flagger
|
||||
containers:
|
||||
- name: flagger
|
||||
image: ghcr.io/fluxcd/flagger:1.19.0
|
||||
image: ghcr.io/fluxcd/flagger:1.31.0
|
||||
imagePullPolicy: IfNotPresent
|
||||
ports:
|
||||
- name: http
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
apiVersion: v1
|
||||
name: flagger
|
||||
version: 1.19.0
|
||||
appVersion: 1.19.0
|
||||
kubeVersion: ">=1.16.0-0"
|
||||
version: 1.31.0
|
||||
appVersion: 1.31.0
|
||||
kubeVersion: ">=1.19.0-0"
|
||||
engine: gotpl
|
||||
description: Flagger is a progressive delivery operator for Kubernetes
|
||||
home: https://flagger.app
|
||||
@@ -18,11 +18,12 @@ keywords:
|
||||
- istio
|
||||
- appmesh
|
||||
- linkerd
|
||||
- kuma
|
||||
- osm
|
||||
- smi
|
||||
- gloo
|
||||
- contour
|
||||
- nginx
|
||||
- traefik
|
||||
- osm
|
||||
- smi
|
||||
- gitops
|
||||
- canary
|
||||
|
||||
@@ -1,22 +1,18 @@
|
||||
# Flagger
|
||||
|
||||
[Flagger](https://github.com/fluxcd/flagger) is an operator that automates the release process of applications on Kubernetes.
|
||||
[Flagger](https://github.com/fluxcd/flagger) is a progressive delivery tool that automates the release process
|
||||
for applications running on Kubernetes. It reduces the risk of introducing a new software version in production
|
||||
by gradually shifting traffic to the new version while measuring metrics and running conformance tests.
|
||||
|
||||
Flagger can run automated application analysis, testing, promotion and rollback for the following deployment strategies:
|
||||
* Canary Release (progressive traffic shifting)
|
||||
* A/B Testing (HTTP headers and cookies traffic routing)
|
||||
* Blue/Green (traffic switching and mirroring)
|
||||
|
||||
Flagger works with service mesh solutions (Istio, Linkerd, AWS App Mesh, Open Service Mesh) and with Kubernetes ingress controllers
|
||||
(NGINX, Skipper, Gloo, Contour, Traefik).
|
||||
Flagger can be configured to send alerts to various chat platforms such as Slack, Microsoft Teams, Discord and Rocket.
|
||||
Flagger implements several deployment strategies (Canary releases, A/B testing, Blue/Green mirroring)
|
||||
and integrates with various Kubernetes ingress controllers, service mesh and monitoring solutions.
|
||||
|
||||
Flagger is a [Cloud Native Computing Foundation](https://cncf.io/) project
|
||||
and part of [Flux](https://fluxcd.io) family of GitOps tools.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
* Kubernetes >= 1.16
|
||||
* Kubernetes >= 1.19
|
||||
|
||||
## Installing the Chart
|
||||
|
||||
@@ -44,10 +40,13 @@ $ helm upgrade -i flagger flagger/flagger \
|
||||
To install Flagger for **Linkerd** (requires Linkerd Viz extension):
|
||||
|
||||
```console
|
||||
# Note that linkerdAuthPolicy.create=true is only required for Linkerd 2.12 and
|
||||
# later
|
||||
$ helm upgrade -i flagger flagger/flagger \
|
||||
--namespace=linkerd \
|
||||
--namespace=flagger-system \
|
||||
--set meshProvider=linkerd \
|
||||
--set metricsServer=http://prometheus.linkerd-viz:9090
|
||||
--set metricsServer=http://prometheus.linkerd-viz:9090 \
|
||||
--set linkerdAuthPolicy.create=true
|
||||
```
|
||||
|
||||
To install Flagger for **AWS App Mesh**:
|
||||
@@ -131,54 +130,57 @@ The command removes all the Kubernetes components associated with the chart and
|
||||
|
||||
The following tables lists the configurable parameters of the Flagger chart and their default values.
|
||||
|
||||
| Parameter | Description | Default |
|
||||
|------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------|---------------------------------------|
|
||||
| `image.repository` | Image repository | `ghcr.io/fluxcd/flagger` |
|
||||
| `image.tag` | Image tag | `<VERSION>` |
|
||||
| `image.pullPolicy` | Image pull policy | `IfNotPresent` |
|
||||
| `logLevel` | Log level | `info` |
|
||||
| `metricsServer` | Prometheus URL, used when `prometheus.install` is `false` | `http://prometheus.istio-system:9090` |
|
||||
| `prometheus.install` | If `true`, installs Prometheus configured to scrape all pods in the custer | `false` |
|
||||
| `prometheus.retention` | Prometheus data retention | `2h` |
|
||||
| `selectorLabels` | List of labels that Flagger uses to create pod selectors | `app,name,app.kubernetes.io/name` |
|
||||
| `configTracking.enabled` | If `true`, flagger will track changes in Secrets and ConfigMaps referenced in the target deployment | `true` |
|
||||
| `eventWebhook` | If set, Flagger will publish events to the given webhook | None |
|
||||
| `slack.url` | Slack incoming webhook | None |
|
||||
| `slack.proxyUrl` | Slack proxy url | None |
|
||||
| `slack.channel` | Slack channel | None |
|
||||
| `slack.user` | Slack username | `flagger` |
|
||||
| `msteams.url` | Microsoft Teams incoming webhook | None |
|
||||
| `msteams.proxyUrl` | Microsoft Teams proxy url | None |
|
||||
| `clusterName` | When specified, Flagger will add the cluster name to alerts | `""` |
|
||||
| `podMonitor.enabled` | If `true`, create a PodMonitor for [monitoring the metrics](https://docs.flagger.app/usage/monitoring#metrics) | `false` |
|
||||
| `podMonitor.namespace` | Namespace where the PodMonitor is created | the same namespace |
|
||||
| `podMonitor.interval` | Interval at which metrics should be scraped | `15s` |
|
||||
| `podMonitor.podMonitor` | Additional labels to add to the PodMonitor | `{}` |
|
||||
| `leaderElection.enabled` | If `true`, Flagger will run in HA mode | `false` |
|
||||
| `leaderElection.replicaCount` | Number of replicas | `1` |
|
||||
| `serviceAccount.create` | If `true`, Flagger will create service account | `true` |
|
||||
| `serviceAccount.name` | The name of the service account to create or use. If not set and `serviceAccount.create` is `true`, a name is generated using the Flagger fullname | `""` |
|
||||
| `serviceAccount.annotations` | Annotations for service account | `{}` |
|
||||
| `ingressAnnotationsPrefix` | Annotations prefix for ingresses | `custom.ingress.kubernetes.io` |
|
||||
| `includeLabelPrefix` | List of prefixes of labels that are copied when creating primary deployments or daemonsets. Use * to include all | `""` |
|
||||
| `rbac.create` | If `true`, create and use RBAC resources | `true` |
|
||||
| `rbac.pspEnabled` | If `true`, create and use a restricted pod security policy | `false` |
|
||||
| `crd.create` | If `true`, create Flagger's CRDs (should be enabled for Helm v2 only) | `false` |
|
||||
| `resources.requests/cpu` | Pod CPU request | `10m` |
|
||||
| `resources.requests/memory` | Pod memory request | `32Mi` |
|
||||
| `resources.limits/cpu` | Pod CPU limit | `1000m` |
|
||||
| `resources.limits/memory` | Pod memory limit | `512Mi` |
|
||||
| `affinity` | Node/pod affinities | None |
|
||||
| `nodeSelector` | Node labels for pod assignment | `{}` |
|
||||
| `threadiness` | Number of controller workers | `2` |
|
||||
| `tolerations` | List of node taints to tolerate | `[]` |
|
||||
| `istio.kubeconfig.secretName` | The name of the Kubernetes secret containing the Istio shared control plane kubeconfig | None |
|
||||
| `istio.kubeconfig.key` | The name of Kubernetes secret data key that contains the Istio control plane kubeconfig | `kubeconfig` |
|
||||
| `ingressAnnotationsPrefix` | Annotations prefix for NGINX ingresses | None |
|
||||
| `ingressClass` | Ingress class used for annotating HTTPProxy objects, e.g. `contour` | None |
|
||||
| `podPriorityClassName` | PriorityClass name for pod priority configuration | "" |
|
||||
| `podDisruptionBudget.enabled` | A PodDisruptionBudget will be created if `true` | `false` |
|
||||
| `podDisruptionBudget.minAvailable` | The minimal number of available replicas that will be set in the PodDisruptionBudget | `1` |
|
||||
| Parameter | Description | Default |
|
||||
|--------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------|---------------------------------------|
|
||||
| `image.repository` | Image repository | `ghcr.io/fluxcd/flagger` |
|
||||
| `image.tag` | Image tag | `<VERSION>` |
|
||||
| `image.pullPolicy` | Image pull policy | `IfNotPresent` |
|
||||
| `logLevel` | Log level | `info` |
|
||||
| `metricsServer` | Prometheus URL, used when `prometheus.install` is `false` | `http://prometheus.istio-system:9090` |
|
||||
| `prometheus.install` | If `true`, installs Prometheus configured to scrape all pods in the custer | `false` |
|
||||
| `prometheus.retention` | Prometheus data retention | `2h` |
|
||||
| `selectorLabels` | List of labels that Flagger uses to create pod selectors | `app,name,app.kubernetes.io/name` |
|
||||
| `configTracking.enabled` | If `true`, flagger will track changes in Secrets and ConfigMaps referenced in the target deployment | `true` |
|
||||
| `eventWebhook` | If set, Flagger will publish events to the given webhook | None |
|
||||
| `slack.url` | Slack incoming webhook | None |
|
||||
| `slack.proxyUrl` | Slack proxy url | None |
|
||||
| `slack.channel` | Slack channel | None |
|
||||
| `slack.user` | Slack username | `flagger` |
|
||||
| `msteams.url` | Microsoft Teams incoming webhook | None |
|
||||
| `msteams.proxyUrl` | Microsoft Teams proxy url | None |
|
||||
| `clusterName` | When specified, Flagger will add the cluster name to alerts | `""` |
|
||||
| `podMonitor.enabled` | If `true`, create a PodMonitor for [monitoring the metrics](https://docs.flagger.app/usage/monitoring#metrics) | `false` |
|
||||
| `podMonitor.namespace` | Namespace where the PodMonitor is created | the same namespace |
|
||||
| `podMonitor.interval` | Interval at which metrics should be scraped | `15s` |
|
||||
| `podMonitor.podMonitor` | Additional labels to add to the PodMonitor | `{}` |
|
||||
| `leaderElection.enabled` | If `true`, Flagger will run in HA mode | `false` |
|
||||
| `leaderElection.replicaCount` | Number of replicas | `1` |
|
||||
| `serviceAccount.create` | If `true`, Flagger will create service account | `true` |
|
||||
| `serviceAccount.name` | The name of the service account to create or use. If not set and `serviceAccount.create` is `true`, a name is generated using the Flagger fullname | `""` |
|
||||
| `serviceAccount.annotations` | Annotations for service account | `{}` |
|
||||
| `ingressAnnotationsPrefix` | Annotations prefix for ingresses | `custom.ingress.kubernetes.io` |
|
||||
| `includeLabelPrefix` | List of prefixes of labels that are copied when creating primary deployments or daemonsets. Use * to include all | `""` |
|
||||
| `rbac.create` | If `true`, create and use RBAC resources | `true` |
|
||||
| `rbac.pspEnabled` | If `true`, create and use a restricted pod security policy | `false` |
|
||||
| `crd.create` | If `true`, create Flagger's CRDs (should be enabled for Helm v2 only) | `false` |
|
||||
| `resources.requests/cpu` | Pod CPU request | `10m` |
|
||||
| `resources.requests/memory` | Pod memory request | `32Mi` |
|
||||
| `resources.limits/cpu` | Pod CPU limit | `1000m` |
|
||||
| `resources.limits/memory` | Pod memory limit | `512Mi` |
|
||||
| `affinity` | Node/pod affinities | prefer spread across hosts |
|
||||
| `nodeSelector` | Node labels for pod assignment | `{}` |
|
||||
| `threadiness` | Number of controller workers | `2` |
|
||||
| `tolerations` | List of node taints to tolerate | `[]` |
|
||||
| `controlplane.kubeconfig.secretName` | The name of the Kubernetes secret containing the service mesh control plane kubeconfig | None |
|
||||
| `controlplane.kubeconfig.key` | The name of Kubernetes secret data key that contains the service mesh control plane kubeconfig | `kubeconfig` |
|
||||
| `ingressAnnotationsPrefix` | Annotations prefix for NGINX ingresses | None |
|
||||
| `ingressClass` | Ingress class used for annotating HTTPProxy objects, e.g. `contour` | None |
|
||||
| `podPriorityClassName` | PriorityClass name for pod priority configuration | "" |
|
||||
| `podDisruptionBudget.enabled` | A PodDisruptionBudget will be created if `true` | `false` |
|
||||
| `podDisruptionBudget.minAvailable` | The minimal number of available replicas that will be set in the PodDisruptionBudget | `1` |
|
||||
| `podDisruptionBudget.minAvailable` | The minimal number of available replicas that will be set in the PodDisruptionBudget | `1` |
|
||||
| `noCrossNamespaceRefs` | If `true`, cross namespace references to custom resources will be disabled | `false` |
|
||||
| `namespace` | When specified, Flagger will restrict itself to watching Canary objects from that namespace | `""` |
|
||||
|
||||
Specify each parameter using the `--set key=value[,key=value]` argument to `helm upgrade`. For example,
|
||||
|
||||
|
||||
@@ -104,7 +104,7 @@ spec:
|
||||
name:
|
||||
type: string
|
||||
autoscalerRef:
|
||||
description: HPA selector
|
||||
description: Scaler selector
|
||||
type: object
|
||||
required: ["apiVersion", "kind", "name"]
|
||||
properties:
|
||||
@@ -114,8 +114,20 @@ spec:
|
||||
type: string
|
||||
enum:
|
||||
- HorizontalPodAutoscaler
|
||||
- ScaledObject
|
||||
name:
|
||||
type: string
|
||||
primaryScalerQueries:
|
||||
type: object
|
||||
additionalProperties:
|
||||
type: string
|
||||
primaryScalerReplicas:
|
||||
type: object
|
||||
properties:
|
||||
minReplicas:
|
||||
type: number
|
||||
maxReplicas:
|
||||
type: number
|
||||
ingressRef:
|
||||
description: Ingress selector
|
||||
type: object
|
||||
@@ -129,6 +141,19 @@ spec:
|
||||
- Ingress
|
||||
name:
|
||||
type: string
|
||||
routeRef:
|
||||
description: APISIX route selector
|
||||
type: object
|
||||
required: [ "apiVersion", "kind", "name" ]
|
||||
properties:
|
||||
apiVersion:
|
||||
type: string
|
||||
kind:
|
||||
type: string
|
||||
enum:
|
||||
- ApisixRoute
|
||||
name:
|
||||
type: string
|
||||
upstreamRef:
|
||||
description: Gloo Upstream selector
|
||||
type: object
|
||||
@@ -158,6 +183,9 @@ spec:
|
||||
portName:
|
||||
description: Container port name
|
||||
type: string
|
||||
appProtocol:
|
||||
description: Application protocol of the port
|
||||
type: string
|
||||
targetPort:
|
||||
description: Container target port name
|
||||
x-kubernetes-int-or-string: true
|
||||
@@ -942,6 +970,11 @@ spec:
|
||||
namespace:
|
||||
description: Namespace of this metric template
|
||||
type: string
|
||||
templateVariables:
|
||||
description: Additional variables to be used in the metrics query (key-value pairs)
|
||||
type: object
|
||||
additionalProperties:
|
||||
type: string
|
||||
alerts:
|
||||
description: Alert list for this canary analysis
|
||||
type: array
|
||||
@@ -1012,6 +1045,18 @@ spec:
|
||||
type: object
|
||||
additionalProperties:
|
||||
type: string
|
||||
sessionAffinity:
|
||||
description: SessionAffinity represents the session affinity settings for a canary run.
|
||||
type: object
|
||||
required: [ "cookieName" ]
|
||||
properties:
|
||||
cookieName:
|
||||
description: CookieName is the key that will be used for the session affinity cookie.
|
||||
type: string
|
||||
maxAge:
|
||||
description: MaxAge indicates the number of seconds until the session affinity cookie will expire.
|
||||
default: 86400
|
||||
type: number
|
||||
status:
|
||||
description: CanaryStatus defines the observed state of a canary.
|
||||
type: object
|
||||
@@ -1032,27 +1077,36 @@ spec:
|
||||
- Failed
|
||||
- Terminating
|
||||
- Terminated
|
||||
failedChecks:
|
||||
description: Failed check count of the current canary analysis
|
||||
type: number
|
||||
canaryWeight:
|
||||
description: Traffic weight routed to canary
|
||||
type: number
|
||||
iterations:
|
||||
description: Iteration count of the current canary analysis
|
||||
type: number
|
||||
trackedConfigs:
|
||||
description: TrackedConfig of this canary
|
||||
additionalProperties:
|
||||
type: string
|
||||
type: object
|
||||
canaryWeight:
|
||||
description: Traffic weight routed to canary
|
||||
type: number
|
||||
failedChecks:
|
||||
description: Failed check count of the current canary analysis
|
||||
type: number
|
||||
iterations:
|
||||
description: Iteration count of the current canary analysis
|
||||
type: number
|
||||
lastAppliedSpec:
|
||||
description: LastAppliedSpec of this canary
|
||||
type: string
|
||||
lastPromotedSpec:
|
||||
description: LastPromotedSpec of this canary
|
||||
type: string
|
||||
lastTransitionTime:
|
||||
description: LastTransitionTime of this canary
|
||||
format: date-time
|
||||
type: string
|
||||
sessionAffinityCookie:
|
||||
description: Session affinity cookie of the current canary run
|
||||
type: string
|
||||
previousSessionAffinityCookie:
|
||||
description: Session affinity cookie of the previous canary run
|
||||
type: string
|
||||
conditions:
|
||||
description: Status conditions of this canary
|
||||
type: array
|
||||
|
||||
@@ -3,6 +3,7 @@ apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: {{ template "flagger.serviceAccountName" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
annotations:
|
||||
{{- if .Values.serviceAccount.annotations }}
|
||||
{{ toYaml .Values.serviceAccount.annotations | indent 4 }}
|
||||
|
||||
16
charts/flagger/templates/authorizationpolicy.yaml
Normal file
16
charts/flagger/templates/authorizationpolicy.yaml
Normal file
@@ -0,0 +1,16 @@
|
||||
{{- if .Values.linkerdAuthPolicy.create }}
|
||||
apiVersion: policy.linkerd.io/v1alpha1
|
||||
kind: AuthorizationPolicy
|
||||
metadata:
|
||||
namespace: {{ .Values.linkerdAuthPolicy.namespace }}
|
||||
name: prometheus-admin-flagger
|
||||
spec:
|
||||
targetRef:
|
||||
group: policy.linkerd.io
|
||||
kind: Server
|
||||
name: prometheus-admin
|
||||
requiredAuthenticationRefs:
|
||||
- kind: ServiceAccount
|
||||
name: {{ template "flagger.serviceAccountName" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
{{- end }}
|
||||
@@ -2,11 +2,17 @@ apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: {{ include "flagger.fullname" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels:
|
||||
helm.sh/chart: {{ template "flagger.chart" . }}
|
||||
app.kubernetes.io/name: {{ template "flagger.name" . }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
app.kubernetes.io/version: {{ .Chart.AppVersion }}
|
||||
{{- with .Values.annotations }}
|
||||
annotations:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
replicas: {{ .Values.leaderElection.replicaCount }}
|
||||
{{- if eq .Values.leaderElection.enabled false }}
|
||||
@@ -22,6 +28,7 @@ spec:
|
||||
labels:
|
||||
app.kubernetes.io/name: {{ template "flagger.name" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
app.kubernetes.io/version: {{ .Chart.AppVersion }}
|
||||
{{- if .Values.podLabels }}
|
||||
{{- range $key, $value := .Values.podLabels }}
|
||||
{{ $key }}: {{ $value | quote }}
|
||||
@@ -33,25 +40,22 @@ spec:
|
||||
{{- end }}
|
||||
spec:
|
||||
serviceAccountName: {{ template "flagger.serviceAccountName" . }}
|
||||
{{- if .Values.affinity }}
|
||||
affinity:
|
||||
podAntiAffinity:
|
||||
preferredDuringSchedulingIgnoredDuringExecution:
|
||||
- weight: 100
|
||||
podAffinityTerm:
|
||||
labelSelector:
|
||||
matchLabels:
|
||||
app.kubernetes.io/name: {{ template "flagger.name" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
topologyKey: kubernetes.io/hostname
|
||||
{{- tpl (toYaml .Values.affinity) . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- if .Values.image.pullSecret }}
|
||||
imagePullSecrets:
|
||||
- name: {{ .Values.image.pullSecret }}
|
||||
{{- end }}
|
||||
volumes:
|
||||
{{- if .Values.istio.kubeconfig.secretName }}
|
||||
{{- if .Values.controlplane.kubeconfig.secretName }}
|
||||
- name: kubeconfig
|
||||
secret:
|
||||
secretName: "{{ .Values.istio.kubeconfig.secretName }}"
|
||||
secretName: "{{ .Values.controlplane.kubeconfig.secretName }}"
|
||||
{{- end }}
|
||||
{{- if .Values.additionalVolumes }}
|
||||
{{- toYaml .Values.additionalVolumes | nindent 8 -}}
|
||||
{{- end }}
|
||||
{{- if .Values.podPriorityClassName }}
|
||||
priorityClassName: {{ .Values.podPriorityClassName }}
|
||||
@@ -63,9 +67,9 @@ spec:
|
||||
{{ toYaml .Values.securityContext.context | indent 12 }}
|
||||
{{- end }}
|
||||
volumeMounts:
|
||||
{{- if .Values.istio.kubeconfig.secretName }}
|
||||
{{- if .Values.controlplane.kubeconfig.secretName }}
|
||||
- name: kubeconfig
|
||||
mountPath: "/tmp/istio-host"
|
||||
mountPath: "/tmp/controlplane"
|
||||
{{- end }}
|
||||
image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
|
||||
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
||||
@@ -132,8 +136,8 @@ spec:
|
||||
{{- if .Values.kubeconfigBurst }}
|
||||
- -kubeconfig-burst={{ .Values.kubeconfigBurst }}
|
||||
{{- end }}
|
||||
{{- if .Values.istio.kubeconfig.secretName }}
|
||||
- -kubeconfig-service-mesh=/tmp/istio-host/{{ .Values.istio.kubeconfig.key }}
|
||||
{{- if .Values.controlplane.kubeconfig.secretName }}
|
||||
- -kubeconfig-service-mesh=/tmp/controlplane/{{ .Values.controlplane.kubeconfig.key }}
|
||||
{{- end }}
|
||||
{{- if .Values.threadiness }}
|
||||
- -threadiness={{ .Values.threadiness }}
|
||||
@@ -141,6 +145,9 @@ spec:
|
||||
{{- if .Values.clusterName }}
|
||||
- -cluster-name={{ .Values.clusterName }}
|
||||
{{- end }}
|
||||
{{- if .Values.noCrossNamespaceRefs }}
|
||||
- -no-cross-namespace-refs={{ .Values.noCrossNamespaceRefs }}
|
||||
{{- end }}
|
||||
livenessProbe:
|
||||
exec:
|
||||
command:
|
||||
|
||||
@@ -3,6 +3,7 @@ apiVersion: policy/v1beta1
|
||||
kind: PodDisruptionBudget
|
||||
metadata:
|
||||
name: {{ template "flagger.name" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
spec:
|
||||
minAvailable: {{ .Values.podDisruptionBudget.minAvailable }}
|
||||
selector:
|
||||
|
||||
@@ -50,6 +50,7 @@ apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
name: {{ template "flagger.fullname" . }}-psp
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels:
|
||||
helm.sh/chart: {{ template "flagger.chart" . }}
|
||||
app.kubernetes.io/name: {{ template "flagger.name" . }}
|
||||
|
||||
@@ -27,6 +27,18 @@ rules:
|
||||
- update
|
||||
- patch
|
||||
- delete
|
||||
- apiGroups:
|
||||
- "coordination.k8s.io"
|
||||
resources:
|
||||
- leases
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- create
|
||||
- update
|
||||
- patch
|
||||
- delete
|
||||
- apiGroups:
|
||||
- apps
|
||||
resources:
|
||||
@@ -74,6 +86,7 @@ rules:
|
||||
resources:
|
||||
- canaries
|
||||
- canaries/status
|
||||
- canaries/finalizers
|
||||
- metrictemplates
|
||||
- metrictemplates/status
|
||||
- alertproviders
|
||||
@@ -221,6 +234,31 @@ rules:
|
||||
- update
|
||||
- patch
|
||||
- delete
|
||||
- apiGroups:
|
||||
- keda.sh
|
||||
resources:
|
||||
- scaledobjects
|
||||
- scaledobjects/finalizers
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- create
|
||||
- update
|
||||
- patch
|
||||
- delete
|
||||
- apiGroups:
|
||||
- apisix.apache.org
|
||||
resources:
|
||||
- apisixroutes
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- create
|
||||
- update
|
||||
- patch
|
||||
- delete
|
||||
- nonResourceURLs:
|
||||
- /version
|
||||
verbs:
|
||||
|
||||
@@ -1,8 +1,11 @@
|
||||
# Default values for flagger.
|
||||
|
||||
## Deployment annotations
|
||||
# annotations: {}
|
||||
|
||||
image:
|
||||
repository: ghcr.io/fluxcd/flagger
|
||||
tag: 1.19.0
|
||||
tag: 1.31.0
|
||||
pullPolicy: IfNotPresent
|
||||
pullSecret:
|
||||
|
||||
@@ -13,13 +16,14 @@ podAnnotations:
|
||||
prometheus.io/scrape: "true"
|
||||
prometheus.io/port: "8080"
|
||||
appmesh.k8s.aws/sidecarInjectorWebhook: disabled
|
||||
linkerd.io/inject: enabled
|
||||
|
||||
# priority class name for pod priority configuration
|
||||
podPriorityClassName: ""
|
||||
|
||||
metricsServer: "http://prometheus:9090"
|
||||
|
||||
# accepted values are kubernetes, istio, linkerd, appmesh, contour, nginx, gloo, skipper, traefik, osm
|
||||
# accepted values are kubernetes, istio, linkerd, appmesh, contour, nginx, gloo, skipper, traefik, apisix, osm
|
||||
meshProvider: ""
|
||||
|
||||
# single namespace restriction
|
||||
@@ -120,6 +124,13 @@ crd:
|
||||
# crd.create: `true` if custom resource definitions should be created
|
||||
create: false
|
||||
|
||||
linkerdAuthPolicy:
|
||||
# linkerdAuthPolicy.create: Whether to create an AuthorizationPolicy in
|
||||
# linkerd viz' namespace to allow flagger to reach viz' prometheus service
|
||||
create: false
|
||||
# linkerdAuthPolicy.namespace: linkerd-viz' namespace
|
||||
namespace: linkerd-viz
|
||||
|
||||
nameOverride: ""
|
||||
fullnameOverride: ""
|
||||
|
||||
@@ -135,10 +146,21 @@ nodeSelector: {}
|
||||
|
||||
tolerations: []
|
||||
|
||||
affinity:
|
||||
podAntiAffinity:
|
||||
preferredDuringSchedulingIgnoredDuringExecution:
|
||||
- weight: 100
|
||||
podAffinityTerm:
|
||||
labelSelector:
|
||||
matchLabels:
|
||||
app.kubernetes.io/name: '{{ template "flagger.name" . }}'
|
||||
app.kubernetes.io/instance: '{{ .Release.Name }}'
|
||||
topologyKey: kubernetes.io/hostname
|
||||
|
||||
prometheus:
|
||||
# to be used with ingress controllers
|
||||
install: false
|
||||
image: docker.io/prom/prometheus:v2.33.5
|
||||
image: docker.io/prom/prometheus:v2.41.0
|
||||
pullSecret:
|
||||
retention: 2h
|
||||
# when enabled, it will add a security context for the prometheus pod
|
||||
@@ -151,13 +173,12 @@ prometheus:
|
||||
kubeconfigQPS: ""
|
||||
kubeconfigBurst: ""
|
||||
|
||||
# Istio multi-cluster service mesh (shared control plane single-network)
|
||||
# https://istio.io/docs/setup/install/multicluster/shared-vpn/
|
||||
istio:
|
||||
# Multi-cluster service mesh (shared control plane single-network)
|
||||
controlplane:
|
||||
kubeconfig:
|
||||
# istio.kubeconfig.secretName: The name of the secret containing the Istio control plane kubeconfig
|
||||
# controlplane.kubeconfig.secretName: The name of the secret containing the mesh control plane kubeconfig
|
||||
secretName: ""
|
||||
# istio.kubeconfig.key: The name of secret data key that contains the Istio control plane kubeconfig
|
||||
# controlplane.kubeconfig.key: The name of secret data key that contains the mesh control plane kubeconfig
|
||||
key: "kubeconfig"
|
||||
|
||||
podDisruptionBudget:
|
||||
@@ -165,3 +186,10 @@ podDisruptionBudget:
|
||||
minAvailable: 1
|
||||
|
||||
podLabels: {}
|
||||
|
||||
noCrossNamespaceRefs: false
|
||||
|
||||
#Placeholder to supply additional volumes to the flagger pod
|
||||
additionalVolumes: {}
|
||||
# - name: tmpfs
|
||||
# emptyDir: {}
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
apiVersion: v1
|
||||
name: grafana
|
||||
version: 1.6.0
|
||||
version: 1.7.0
|
||||
appVersion: 7.2.0
|
||||
description: Grafana dashboards for monitoring Flagger canary deployments
|
||||
icon: https://raw.githubusercontent.com/fluxcd/flagger/main/docs/logo/flagger-icon.png
|
||||
|
||||
@@ -403,7 +403,7 @@
|
||||
"steppedLine": false,
|
||||
"targets": [
|
||||
{
|
||||
"expr": "histogram_quantile(0.50, sum(irate(istio_request_duration_milliseconds_bucket{reporter=\"destination\",destination_workload=~\"$primary\", destination_workload_namespace=~\"$namespace\"}[1m])) by (le))",
|
||||
"expr": "histogram_quantile(0.50, sum(irate(istio_request_duration_milliseconds_bucket{reporter=\"destination\",destination_workload=~\"$primary\", destination_workload_namespace=~\"$namespace\"}[1m])) by (le)) / 1000",
|
||||
"format": "time_series",
|
||||
"interval": "",
|
||||
"intervalFactor": 1,
|
||||
@@ -411,7 +411,7 @@
|
||||
"refId": "A"
|
||||
},
|
||||
{
|
||||
"expr": "histogram_quantile(0.90, sum(irate(istio_request_duration_milliseconds_bucket{reporter=\"destination\",destination_workload=~\"$primary\", destination_workload_namespace=~\"$namespace\"}[1m])) by (le))",
|
||||
"expr": "histogram_quantile(0.90, sum(irate(istio_request_duration_milliseconds_bucket{reporter=\"destination\",destination_workload=~\"$primary\", destination_workload_namespace=~\"$namespace\"}[1m])) by (le)) / 1000",
|
||||
"format": "time_series",
|
||||
"hide": false,
|
||||
"intervalFactor": 1,
|
||||
@@ -419,7 +419,7 @@
|
||||
"refId": "B"
|
||||
},
|
||||
{
|
||||
"expr": "histogram_quantile(0.99, sum(irate(istio_request_duration_milliseconds_bucket{reporter=\"destination\",destination_workload=~\"$primary\", destination_workload_namespace=~\"$namespace\"}[1m])) by (le))",
|
||||
"expr": "histogram_quantile(0.99, sum(irate(istio_request_duration_milliseconds_bucket{reporter=\"destination\",destination_workload=~\"$primary\", destination_workload_namespace=~\"$namespace\"}[1m])) by (le)) / 1000",
|
||||
"format": "time_series",
|
||||
"hide": false,
|
||||
"intervalFactor": 1,
|
||||
@@ -509,7 +509,7 @@
|
||||
"steppedLine": false,
|
||||
"targets": [
|
||||
{
|
||||
"expr": "histogram_quantile(0.50, sum(irate(istio_request_duration_milliseconds_bucket{reporter=\"destination\",destination_workload=~\"$canary\", destination_workload_namespace=~\"$namespace\"}[1m])) by (le))",
|
||||
"expr": "histogram_quantile(0.50, sum(irate(istio_request_duration_milliseconds_bucket{reporter=\"destination\",destination_workload=~\"$canary\", destination_workload_namespace=~\"$namespace\"}[1m])) by (le)) / 1000",
|
||||
"format": "time_series",
|
||||
"interval": "",
|
||||
"intervalFactor": 1,
|
||||
@@ -517,7 +517,7 @@
|
||||
"refId": "A"
|
||||
},
|
||||
{
|
||||
"expr": "histogram_quantile(0.90, sum(irate(istio_request_duration_milliseconds_bucket{reporter=\"destination\",destination_workload=~\"$canary\", destination_workload_namespace=~\"$namespace\"}[1m])) by (le))",
|
||||
"expr": "histogram_quantile(0.90, sum(irate(istio_request_duration_milliseconds_bucket{reporter=\"destination\",destination_workload=~\"$canary\", destination_workload_namespace=~\"$namespace\"}[1m])) by (le)) / 1000",
|
||||
"format": "time_series",
|
||||
"hide": false,
|
||||
"intervalFactor": 1,
|
||||
@@ -525,7 +525,7 @@
|
||||
"refId": "B"
|
||||
},
|
||||
{
|
||||
"expr": "histogram_quantile(0.99, sum(irate(istio_request_duration_milliseconds_bucket{reporter=\"destination\",destination_workload=~\"$canary\", destination_workload_namespace=~\"$namespace\"}[1m])) by (le))",
|
||||
"expr": "histogram_quantile(0.99, sum(irate(istio_request_duration_milliseconds_bucket{reporter=\"destination\",destination_workload=~\"$canary\", destination_workload_namespace=~\"$namespace\"}[1m])) by (le)) / 1000",
|
||||
"format": "time_series",
|
||||
"hide": false,
|
||||
"intervalFactor": 1,
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
apiVersion: v1
|
||||
name: loadtester
|
||||
version: 0.22.0
|
||||
appVersion: 0.22.0
|
||||
kubeVersion: ">=1.11.0-0"
|
||||
version: 0.28.1
|
||||
appVersion: 0.28.1
|
||||
kubeVersion: ">=1.19.0-0"
|
||||
engine: gotpl
|
||||
description: Flagger's load testing services based on rakyll/hey and bojand/ghz that generates traffic during canary analysis when configured as a webhook.
|
||||
home: https://docs.flagger.app
|
||||
|
||||
@@ -7,7 +7,7 @@ It can be used to generate HTTP and gRPC traffic during canary analysis when con
|
||||
|
||||
## Prerequisites
|
||||
|
||||
* Kubernetes >= 1.11
|
||||
* Kubernetes >= 1.19
|
||||
|
||||
## Installing the Chart
|
||||
|
||||
@@ -44,35 +44,35 @@ The command removes all the Kubernetes components associated with the chart and
|
||||
|
||||
The following tables lists the configurable parameters of the load tester chart and their default values.
|
||||
|
||||
Parameter | Description | Default
|
||||
--- | --- | ---
|
||||
`image.repository` | Image repository | `quay.io/stefanprodan/flagger-loadtester`
|
||||
`image.pullPolicy` | Image pull policy | `IfNotPresent`
|
||||
`image.tag` | Image tag | `<VERSION>`
|
||||
`replicaCount` | Desired number of pods | `1`
|
||||
`serviceAccountName` | Kubernetes service account name | `none`
|
||||
`resources.requests.cpu` | CPU requests | `10m`
|
||||
`resources.requests.memory` | Memory requests | `64Mi`
|
||||
`tolerations` | List of node taints to tolerate | `[]`
|
||||
`affinity` | node/pod affinities | `node`
|
||||
`nodeSelector` | Node labels for pod assignment | `{}`
|
||||
`service.type` | Type of service | `ClusterIP`
|
||||
`service.port` | ClusterIP port | `80`
|
||||
`cmd.timeout` | Command execution timeout | `1h`
|
||||
`cmd.namespaceRegexp` | Restrict access to canaries in matching namespaces | ""
|
||||
`logLevel` | Log level can be debug, info, warning, error or panic | `info`
|
||||
`appmesh.enabled` | Create AWS App Mesh v1beta2 virtual node | `false`
|
||||
`appmesh.backends` | AWS App Mesh virtual services | `none`
|
||||
`istio.enabled` | Create Istio virtual service | `false`
|
||||
`istio.host` | Loadtester hostname | `flagger-loadtester.flagger`
|
||||
`istio.gateway.enabled` | Create Istio gateway in namespace | `false`
|
||||
`istio.tls.enabled` | Enable TLS in gateway ( TLS secrets should be in namespace ) | `false`
|
||||
`istio.tls.httpsRedirect` | Redirect traffic to TLS port | `false`
|
||||
`podPriorityClassName` | PriorityClass name for pod priority configuration | ""
|
||||
`securityContext.enabled` | Add securityContext to container | ""
|
||||
`securityContext.context` | securityContext to add | ""
|
||||
`podDisruptionBudget.enabled` | A PodDisruptionBudget will be created if `true` | `false`
|
||||
`podDisruptionBudget.minAvailable` | The minimal number of available replicas that will be set in the PodDisruptionBudget | `1`
|
||||
| Parameter | Description | Default |
|
||||
|------------------------------------|--------------------------------------------------------------------------------------|-------------------------------------|
|
||||
| `image.repository` | Image repository | `ghcr.io/fluxcd/flagger-loadtester` |
|
||||
| `image.pullPolicy` | Image pull policy | `IfNotPresent` |
|
||||
| `image.tag` | Image tag | `<VERSION>` |
|
||||
| `replicaCount` | Desired number of pods | `1` |
|
||||
| `serviceAccountName` | Kubernetes service account name | `none` |
|
||||
| `resources.requests.cpu` | CPU requests | `10m` |
|
||||
| `resources.requests.memory` | Memory requests | `64Mi` |
|
||||
| `tolerations` | List of node taints to tolerate | `[]` |
|
||||
| `affinity` | node/pod affinities | `node` |
|
||||
| `nodeSelector` | Node labels for pod assignment | `{}` |
|
||||
| `service.type` | Type of service | `ClusterIP` |
|
||||
| `service.port` | ClusterIP port | `80` |
|
||||
| `cmd.timeout` | Command execution timeout | `1h` |
|
||||
| `cmd.namespaceRegexp` | Restrict access to canaries in matching namespaces | "" |
|
||||
| `logLevel` | Log level can be debug, info, warning, error or panic | `info` |
|
||||
| `appmesh.enabled` | Create AWS App Mesh v1beta2 virtual node | `false` |
|
||||
| `appmesh.backends` | AWS App Mesh virtual services | `none` |
|
||||
| `istio.enabled` | Create Istio virtual service | `false` |
|
||||
| `istio.host` | Loadtester hostname | `flagger-loadtester.flagger` |
|
||||
| `istio.gateway.enabled` | Create Istio gateway in namespace | `false` |
|
||||
| `istio.tls.enabled` | Enable TLS in gateway ( TLS secrets should be in namespace ) | `false` |
|
||||
| `istio.tls.httpsRedirect` | Redirect traffic to TLS port | `false` |
|
||||
| `podPriorityClassName` | PriorityClass name for pod priority configuration | "" |
|
||||
| `securityContext.enabled` | Add securityContext to container | "" |
|
||||
| `securityContext.context` | securityContext to add | "" |
|
||||
| `podDisruptionBudget.enabled` | A PodDisruptionBudget will be created if `true` | `false` |
|
||||
| `podDisruptionBudget.minAvailable` | The minimal number of available replicas that will be set in the PodDisruptionBudget | `1` |
|
||||
|
||||
Specify each parameter using the `--set key=value[,key=value]` argument to `helm upgrade`. For example,
|
||||
|
||||
|
||||
@@ -2,7 +2,7 @@ replicaCount: 1
|
||||
|
||||
image:
|
||||
repository: ghcr.io/fluxcd/flagger-loadtester
|
||||
tag: 0.22.0
|
||||
tag: 0.28.1
|
||||
pullPolicy: IfNotPresent
|
||||
pullSecret:
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
apiVersion: v1
|
||||
version: 6.0.0
|
||||
appVersion: 6.0.0
|
||||
version: 6.1.3
|
||||
appVersion: 6.1.3
|
||||
name: podinfo
|
||||
engine: gotpl
|
||||
description: Flagger canary deployment demo application
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
{{- if not .Values.canary.enabled }}
|
||||
{{- if and .Values.service.enabled (not .Values.canary.enabled) }}
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
# Default values for podinfo.
|
||||
image:
|
||||
repository: ghcr.io/stefanprodan/podinfo
|
||||
tag: 6.0.0
|
||||
tag: 6.1.3
|
||||
pullPolicy: IfNotPresent
|
||||
|
||||
podAnnotations: {}
|
||||
|
||||
@@ -66,6 +66,7 @@ var (
|
||||
msteamsProxyURL string
|
||||
includeLabelPrefix string
|
||||
slackURL string
|
||||
slackToken string
|
||||
slackProxyURL string
|
||||
slackUser string
|
||||
slackChannel string
|
||||
@@ -84,6 +85,7 @@ var (
|
||||
ver bool
|
||||
kubeconfigServiceMesh string
|
||||
clusterName string
|
||||
noCrossNamespaceRefs bool
|
||||
)
|
||||
|
||||
func init() {
|
||||
@@ -96,6 +98,7 @@ func init() {
|
||||
flag.StringVar(&logLevel, "log-level", "debug", "Log level can be: debug, info, warning, error.")
|
||||
flag.StringVar(&port, "port", "8080", "Port to listen on.")
|
||||
flag.StringVar(&slackURL, "slack-url", "", "Slack hook URL.")
|
||||
flag.StringVar(&slackToken, "slack-token", "", "Slack bot token.")
|
||||
flag.StringVar(&slackProxyURL, "slack-proxy-url", "", "Slack proxy URL.")
|
||||
flag.StringVar(&slackUser, "slack-user", "flagger", "Slack user name.")
|
||||
flag.StringVar(&slackChannel, "slack-channel", "", "Slack channel.")
|
||||
@@ -107,7 +110,7 @@ func init() {
|
||||
flag.BoolVar(&zapReplaceGlobals, "zap-replace-globals", false, "Whether to change the logging level of the global zap logger.")
|
||||
flag.StringVar(&zapEncoding, "zap-encoding", "json", "Zap logger encoding.")
|
||||
flag.StringVar(&namespace, "namespace", "", "Namespace that flagger would watch canary object.")
|
||||
flag.StringVar(&meshProvider, "mesh-provider", "istio", "Service mesh provider, can be istio, linkerd, appmesh, contour, gloo, nginx, skipper, traefik, osm or kuma.")
|
||||
flag.StringVar(&meshProvider, "mesh-provider", "istio", "Service mesh provider, can be istio, linkerd, appmesh, contour, gloo, nginx, skipper, traefik, apisix, osm or kuma.")
|
||||
flag.StringVar(&selectorLabels, "selector-labels", "app,name,app.kubernetes.io/name", "List of pod labels that Flagger uses to create pod selectors.")
|
||||
flag.StringVar(&ingressAnnotationsPrefix, "ingress-annotations-prefix", "nginx.ingress.kubernetes.io", "Annotations prefix for NGINX ingresses.")
|
||||
flag.StringVar(&ingressClass, "ingress-class", "", "Ingress class used for annotating HTTPProxy objects.")
|
||||
@@ -117,6 +120,7 @@ func init() {
|
||||
flag.BoolVar(&ver, "version", false, "Print version")
|
||||
flag.StringVar(&kubeconfigServiceMesh, "kubeconfig-service-mesh", "", "Path to a kubeconfig for the service mesh control plane cluster.")
|
||||
flag.StringVar(&clusterName, "cluster-name", "", "Cluster name to be included in alert msgs.")
|
||||
flag.BoolVar(&noCrossNamespaceRefs, "no-cross-namespace-refs", false, "When set to true, Flagger can only refer to resources in the same namespace.")
|
||||
}
|
||||
|
||||
func main() {
|
||||
@@ -166,15 +170,15 @@ func main() {
|
||||
if kubeconfigServiceMesh == "" {
|
||||
kubeconfigServiceMesh = kubeconfig
|
||||
}
|
||||
cfgHost, err := clientcmd.BuildConfigFromFlags(masterURL, kubeconfigServiceMesh)
|
||||
serviceMeshCfg, err := clientcmd.BuildConfigFromFlags(masterURL, kubeconfigServiceMesh)
|
||||
if err != nil {
|
||||
logger.Fatalf("Error building host kubeconfig: %v", err)
|
||||
}
|
||||
|
||||
cfgHost.QPS = float32(kubeconfigQPS)
|
||||
cfgHost.Burst = kubeconfigBurst
|
||||
serviceMeshCfg.QPS = float32(kubeconfigQPS)
|
||||
serviceMeshCfg.Burst = kubeconfigBurst
|
||||
|
||||
meshClient, err := clientset.NewForConfig(cfgHost)
|
||||
meshClient, err := clientset.NewForConfig(serviceMeshCfg)
|
||||
if err != nil {
|
||||
logger.Fatalf("Error building mesh clientset: %v", err)
|
||||
}
|
||||
@@ -210,7 +214,14 @@ func main() {
|
||||
// start HTTP server
|
||||
go server.ListenAndServe(port, 3*time.Second, logger, stopCh)
|
||||
|
||||
routerFactory := router.NewFactory(cfg, kubeClient, flaggerClient, ingressAnnotationsPrefix, ingressClass, logger, meshClient)
|
||||
setOwnerRefs := true
|
||||
// Router shouldn't set OwnerRefs on resources that they create since the
|
||||
// service mesh/ingress controller is in a different cluster.
|
||||
if cfg.Host != serviceMeshCfg.Host {
|
||||
setOwnerRefs = false
|
||||
}
|
||||
|
||||
routerFactory := router.NewFactory(cfg, kubeClient, flaggerClient, ingressAnnotationsPrefix, ingressClass, logger, meshClient, setOwnerRefs)
|
||||
|
||||
var configTracker canary.Tracker
|
||||
if enableConfigTracking {
|
||||
@@ -241,6 +252,7 @@ func main() {
|
||||
version.VERSION,
|
||||
fromEnv("EVENT_WEBHOOK_URL", eventWebhook),
|
||||
clusterName,
|
||||
noCrossNamespaceRefs,
|
||||
)
|
||||
|
||||
// leader election context
|
||||
@@ -315,7 +327,7 @@ func startLeaderElection(ctx context.Context, run func(), ns string, kubeClient
|
||||
id = id + "_" + string(uuid.NewUUID())
|
||||
|
||||
lock, err := resourcelock.New(
|
||||
resourcelock.ConfigMapsResourceLock,
|
||||
resourcelock.ConfigMapsLeasesResourceLock,
|
||||
ns,
|
||||
configMapName,
|
||||
kubeClient.CoreV1(),
|
||||
@@ -355,6 +367,7 @@ func startLeaderElection(ctx context.Context, run func(), ns string, kubeClient
|
||||
|
||||
func initNotifier(logger *zap.SugaredLogger) (client notifier.Interface) {
|
||||
provider := "slack"
|
||||
token := fromEnv("SLACK_TOKEN", slackToken)
|
||||
notifierURL := fromEnv("SLACK_URL", slackURL)
|
||||
notifierProxyURL := fromEnv("SLACK_PROXY_URL", slackProxyURL)
|
||||
if msteamsURL != "" || os.Getenv("MSTEAMS_URL") != "" {
|
||||
@@ -362,7 +375,7 @@ func initNotifier(logger *zap.SugaredLogger) (client notifier.Interface) {
|
||||
notifierURL = fromEnv("MSTEAMS_URL", msteamsURL)
|
||||
notifierProxyURL = fromEnv("MSTEAMS_PROXY_URL", msteamsProxyURL)
|
||||
}
|
||||
notifierFactory := notifier.NewFactory(notifierURL, notifierProxyURL, slackUser, slackChannel)
|
||||
notifierFactory := notifier.NewFactory(notifierURL, token, notifierProxyURL, slackUser, slackChannel)
|
||||
|
||||
var err error
|
||||
client, err = notifierFactory.Notifier(provider)
|
||||
|
||||
@@ -28,7 +28,7 @@ import (
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
var VERSION = "0.22.0"
|
||||
var VERSION = "0.28.1"
|
||||
var (
|
||||
logLevel string
|
||||
port string
|
||||
|
||||
BIN
docs/diagrams/flagger-apisix-overview.png
Normal file
BIN
docs/diagrams/flagger-apisix-overview.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 7.4 MiB |
Binary file not shown.
|
Before Width: | Height: | Size: 51 KiB After Width: | Height: | Size: 56 KiB |
@@ -10,16 +10,17 @@ version in production by gradually shifting traffic to the new version while mea
|
||||
and running conformance tests.
|
||||
|
||||
Flagger implements several deployment strategies (Canary releases, A/B testing, Blue/Green mirroring)
|
||||
using a service mesh (App Mesh, Istio, Linkerd, Open Service Mesh)
|
||||
or an ingress controller (Contour, Gloo, NGINX, Skipper, Traefik) for traffic routing.
|
||||
For release analysis, Flagger can query Prometheus, Datadog, New Relic, CloudWatch or Graphite
|
||||
and for alerting it uses Slack, MS Teams, Discord and Rocket.
|
||||
using a service mesh (App Mesh, Istio, Linkerd, Kuma, Open Service Mesh)
|
||||
or an ingress controller (Contour, Gloo, NGINX, Skipper, Traefik, APISIX) for traffic routing.
|
||||
For release analysis, Flagger can query Prometheus, InfluxDB, Datadog, New Relic, CloudWatch, Stackdriver
|
||||
or Graphite and for alerting it uses Slack, MS Teams, Discord and Rocket.
|
||||
|
||||

|
||||
|
||||
Flagger can be configured with Kubernetes custom resources and is compatible with
|
||||
any CI/CD solutions made for Kubernetes. Since Flagger is declarative and reacts to Kubernetes events,
|
||||
it can be used in **GitOps** pipelines together with tools like Flux, JenkinsX, Carvel, Argo, etc.
|
||||
it can be used in **GitOps** pipelines together with tools like [Flux](install/flagger-install-with-flux.md),
|
||||
JenkinsX, Carvel, Argo, etc.
|
||||
|
||||
Flagger is a [Cloud Native Computing Foundation](https://cncf.io/) project
|
||||
and part of [Flux](https://fluxcd.io) family of GitOps tools.
|
||||
@@ -36,6 +37,7 @@ After installing Flagger, you can follow one of these tutorials to get started:
|
||||
* [Istio](tutorials/istio-progressive-delivery.md)
|
||||
* [Linkerd](tutorials/linkerd-progressive-delivery.md)
|
||||
* [AWS App Mesh](tutorials/appmesh-progressive-delivery.md)
|
||||
* [AWS App Mesh: Canary Deployment Using Flagger](https://www.eksworkshop.com/advanced/340_appmesh_flagger/)
|
||||
* [Open Service Mesh](tutorials/osm-progressive-delivery.md)
|
||||
* [Kuma](tutorials/kuma-progressive-delivery.md)
|
||||
|
||||
@@ -46,6 +48,7 @@ After installing Flagger, you can follow one of these tutorials to get started:
|
||||
* [NGINX Ingress](tutorials/nginx-progressive-delivery.md)
|
||||
* [Skipper Ingress](tutorials/skipper-progressive-delivery.md)
|
||||
* [Traefik](tutorials/traefik-progressive-delivery.md)
|
||||
* [Apache APISIX](tutorials/apisix-progressive-delivery.md)
|
||||
|
||||
**Hands-on GitOps workshops**
|
||||
|
||||
|
||||
@@ -6,6 +6,7 @@
|
||||
## Install
|
||||
|
||||
* [Flagger Install on Kubernetes](install/flagger-install-on-kubernetes.md)
|
||||
* [Flagger Install with Flux](install/flagger-install-with-flux.md)
|
||||
* [Flagger Install on GKE Istio](install/flagger-install-on-google-cloud.md)
|
||||
* [Flagger Install on EKS App Mesh](install/flagger-install-on-eks-appmesh.md)
|
||||
* [Flagger Install on Alibaba ServiceMesh](install/flagger-install-on-alibaba-servicemesh.md)
|
||||
@@ -30,11 +31,13 @@
|
||||
* [NGINX Canary Deployments](tutorials/nginx-progressive-delivery.md)
|
||||
* [Skipper Canary Deployments](tutorials/skipper-progressive-delivery.md)
|
||||
* [Traefik Canary Deployments](tutorials/traefik-progressive-delivery.md)
|
||||
* [Apache APISIX Canary Deployments](tutorials/apisix-progressive-delivery.md)
|
||||
* [Open Service Mesh Deployments](tutorials/osm-progressive-delivery.md)
|
||||
* [Kuma Canary Deployments](tutorials/kuma-progressive-delivery.md)
|
||||
* [Gateway API Canary Deployments](tutorials/gatewayapi-progressive-delivery.md)
|
||||
* [Blue/Green Deployments](tutorials/kubernetes-blue-green.md)
|
||||
* [Canary analysis with Prometheus Operator](tutorials/prometheus-operator.md)
|
||||
* [Canary analysis with KEDA ScaledObjects](tutorials/keda-scaledobject.md)
|
||||
* [Zero downtime deployments](tutorials/zero-downtime-deployments.md)
|
||||
|
||||
## Dev
|
||||
|
||||
@@ -8,7 +8,7 @@ Flagger is written in Go and uses Go modules for dependency management.
|
||||
|
||||
On your dev machine install the following tools:
|
||||
|
||||
* go >= 1.17
|
||||
* go >= 1.19
|
||||
* git >;= 2.20
|
||||
* bash >= 5.0
|
||||
* make >= 3.81
|
||||
|
||||
@@ -4,13 +4,28 @@ This document describes how to release Flagger.
|
||||
|
||||
## Release
|
||||
|
||||
### Flagger
|
||||
|
||||
To release a new Flagger version (e.g. `2.0.0`) follow these steps:
|
||||
|
||||
* create a branch `git checkout -b prep-2.0.0`
|
||||
* create a branch `git checkout -b release-2.0.0`
|
||||
* set the version in code and manifests `TAG=2.0.0 make version-set`
|
||||
* commit changes and merge PR
|
||||
* checkout master `git checkout main && git pull`
|
||||
* tag master `make release`
|
||||
* checkout main `git checkout main && git pull`
|
||||
* tag main `make release`
|
||||
|
||||
### Flagger load tester
|
||||
|
||||
To release a new Flagger load tester version (e.g. `2.0.0`) follow these steps:
|
||||
|
||||
* create a branch `git checkout -b release-ld-2.0.0`
|
||||
* set the version in code (`cmd/loadtester/main.go#VERSION`)
|
||||
* set the version in the Helm chart (`charts/loadtester/Chart.yaml` and `values.yaml`)
|
||||
* set the version in manifests (`kustomize/tester/deployment.yaml`)
|
||||
* commit changes and push the branch upstream
|
||||
* in GitHub UI, navigate to Actions and run the `push-ld` workflow selecting the release branch
|
||||
* after the workflow finishes, open the PR which will run the e2e tests using the new tester version
|
||||
* merge the PR if the tests pass
|
||||
|
||||
## CI
|
||||
|
||||
@@ -18,7 +33,9 @@ After the tag has been pushed to GitHub, the CI release pipeline does the follow
|
||||
|
||||
* creates a GitHub release
|
||||
* pushes the Flagger binary and change log to GitHub release
|
||||
* pushes the Flagger container image to Docker Hub
|
||||
* pushes the Flagger container image to GitHub Container Registry
|
||||
* pushed the Flagger install manifests to GitHub Container Registry
|
||||
* signs all OCI artifacts and release assets with Cosign and GitHub OIDC
|
||||
* pushes the Helm chart to github-pages branch
|
||||
* GitHub pages publishes the new chart version on the Helm repository
|
||||
|
||||
@@ -32,3 +49,6 @@ After a Flagger release, publish the docs with:
|
||||
* `git checkout docs`
|
||||
* `git rebase main`
|
||||
* `git push origin docs`
|
||||
|
||||
Lastly open a PR with all the docs changes on [fluxcd/website](https://github.com/fluxcd/website) to
|
||||
update [fluxcd.io/flagger](https://fluxcd.io/flagger/).
|
||||
|
||||
@@ -69,8 +69,19 @@ spec:
|
||||
#### Why is there a window of downtime during the canary initializing process when analysis is disabled?
|
||||
|
||||
A window of downtime is the intended behavior when the analysis is disabled. This allows instant rollback and also mimics the way
|
||||
a Kubernetes deployment initialization works. To avoid this, enable the analysis (`skipAnalysis: true`), wait for the initialization
|
||||
to finish, and disable it afterward (`skipAnalysis: false`).
|
||||
a Kubernetes deployment initialization works. To avoid this, enable the analysis (`skipAnalysis: false`), wait for the initialization
|
||||
to finish, and disable it afterward (`skipAnalysis: true`).
|
||||
|
||||
#### How to disable cross namespace references?
|
||||
|
||||
Flagger by default can access resources across namespaces (`AlertProivder`, `MetricProvider` and Gloo `Upsteream`).
|
||||
If you're in a multi-tenant environment and wish to disable this, you can do so through the `no-cross-namespace-refs` flag.
|
||||
|
||||
```
|
||||
flagger \
|
||||
-no-cross-namespace-refs=true \
|
||||
...
|
||||
```
|
||||
|
||||
## Kubernetes services
|
||||
|
||||
|
||||
@@ -4,106 +4,19 @@ This guide walks you through setting up Flagger on Alibaba ServiceMesh.
|
||||
|
||||
## Prerequisites
|
||||
- Created an ACK([Alibabacloud Container Service for Kubernetes](https://cs.console.aliyun.com)) cluster instance.
|
||||
- Created an ASM([Alibaba ServiceMesh](https://servicemesh.console.aliyun.com)) instance, and added ACK cluster.
|
||||
- Create an ASM([Alibaba ServiceMesh](https://servicemesh.console.aliyun.com)) enterprise instance and add ACK cluster.
|
||||
|
||||
### Variables declaration
|
||||
- `$ACK_CONFIG`: the kubeconfig file path of ACK, which be treated as`$HOME/.kube/config` in the rest of guide.
|
||||
- `$MESH_CONFIG`: the kubeconfig file path of ASM.
|
||||
- `$ISTIO_RELEASE`: see https://github.com/istio/istio/releases
|
||||
- `$FLAGGER_SRC`: see https://github.com/fluxcd/flagger
|
||||
|
||||
## Install Prometheus
|
||||
Install Prometheus:
|
||||
### Enable Data-plane KubeAPI access in ASM
|
||||
|
||||
```bash
|
||||
kubectl apply -f $ISTIO_RELEASE/samples/addons/prometheus.yaml
|
||||
```
|
||||
In the Alibaba Cloud Service Mesh (ASM) console, on the basic information page, make sure Data-plane KubeAPI access is enabled. When enabled, the Istio resources of the control plane can be managed through the Kubeconfig of the data plane cluster.
|
||||
|
||||
it' same with the below cmd:
|
||||
## Enable Prometheus
|
||||
|
||||
```bash
|
||||
kubectl --kubeconfig "$ACK_CONFIG" apply -f $ISTIO_RELEASE/samples/addons/prometheus.yaml
|
||||
```
|
||||
|
||||
Append the below configs to `scrape_configs` in prometheus configmap, to support telemetry:
|
||||
```yaml
|
||||
scrape_configs:
|
||||
# Mixer scrapping. Defaults to Prometheus and mixer on same namespace.
|
||||
- job_name: 'istio-mesh'
|
||||
kubernetes_sd_configs:
|
||||
- role: endpoints
|
||||
namespaces:
|
||||
names:
|
||||
- istio-system
|
||||
relabel_configs:
|
||||
- source_labels: [__meta_kubernetes_service_name, __meta_kubernetes_endpoint_port_name]
|
||||
action: keep
|
||||
regex: istio-telemetry;prometheus
|
||||
# Scrape config for envoy stats
|
||||
- job_name: 'envoy-stats'
|
||||
metrics_path: /stats/prometheus
|
||||
kubernetes_sd_configs:
|
||||
- role: pod
|
||||
relabel_configs:
|
||||
- source_labels: [__meta_kubernetes_pod_container_port_name]
|
||||
action: keep
|
||||
regex: '.*-envoy-prom'
|
||||
- source_labels: [__address__, __meta_kubernetes_pod_annotation_prometheus_io_port]
|
||||
action: replace
|
||||
regex: ([^:]+)(?::\d+)?;(\d+)
|
||||
replacement: $1:15090
|
||||
target_label: __address__
|
||||
- action: labeldrop
|
||||
regex: __meta_kubernetes_pod_label_(.+)
|
||||
- source_labels: [__meta_kubernetes_namespace]
|
||||
action: replace
|
||||
target_label: namespace
|
||||
- source_labels: [__meta_kubernetes_pod_name]
|
||||
action: replace
|
||||
target_label: pod_name
|
||||
- job_name: 'istio-policy'
|
||||
kubernetes_sd_configs:
|
||||
- role: endpoints
|
||||
namespaces:
|
||||
names:
|
||||
- istio-system
|
||||
relabel_configs:
|
||||
- source_labels: [__meta_kubernetes_service_name, __meta_kubernetes_endpoint_port_name]
|
||||
action: keep
|
||||
regex: istio-policy;http-policy-monitoring
|
||||
- job_name: 'istio-telemetry'
|
||||
kubernetes_sd_configs:
|
||||
- role: endpoints
|
||||
namespaces:
|
||||
names:
|
||||
- istio-system
|
||||
relabel_configs:
|
||||
- source_labels: [__meta_kubernetes_service_name, __meta_kubernetes_endpoint_port_name]
|
||||
action: keep
|
||||
regex: istio-telemetry;http-monitoring
|
||||
- job_name: 'pilot'
|
||||
kubernetes_sd_configs:
|
||||
- role: endpoints
|
||||
namespaces:
|
||||
names:
|
||||
- istio-system
|
||||
relabel_configs:
|
||||
- source_labels: [__meta_kubernetes_service_name, __meta_kubernetes_endpoint_port_name]
|
||||
action: keep
|
||||
regex: istiod;http-monitoring
|
||||
- source_labels: [__meta_kubernetes_service_label_app]
|
||||
target_label: app
|
||||
- job_name: 'sidecar-injector'
|
||||
kubernetes_sd_configs:
|
||||
- role: endpoints
|
||||
namespaces:
|
||||
names:
|
||||
- istio-system
|
||||
relabel_configs:
|
||||
- source_labels: [__meta_kubernetes_service_name, __meta_kubernetes_endpoint_port_name]
|
||||
action: keep
|
||||
regex: istio-sidecar-injector;http-monitoring
|
||||
```
|
||||
In the Alibaba Cloud Service Mesh (ASM) console, click Settings to enable the collection of Prometheus monitoring metrics. You can use the self-built Prometheus monitoring, or you can use the Alibaba Cloud ARMS Prometheus monitoring plug-in that has joined the ACK cluster, and use ARMS Prometheus to collect monitoring indicators.
|
||||
|
||||
## Install Flagger
|
||||
|
||||
@@ -111,26 +24,34 @@ Add Flagger Helm repository:
|
||||
|
||||
```bash
|
||||
helm repo add flagger https://flagger.app
|
||||
helm repo update
|
||||
```
|
||||
|
||||
Install Flagger's Canary CRD:
|
||||
|
||||
```yaml
|
||||
kubectl apply -f $FLAGGER_SRC/artifacts/flagger/crd.yaml
|
||||
```bash
|
||||
kubectl apply -f https://raw.githubusercontent.com/fluxcd/flagger/v1.21.0/artifacts/flagger/crd.yaml
|
||||
```
|
||||
## Deploy Flagger for Istio
|
||||
|
||||
Deploy Flagger for Alibaba ServiceMesh:
|
||||
### Add data plane cluster to Alibaba Cloud Service Mesh (ASM)
|
||||
|
||||
In the Alibaba Cloud Service Mesh (ASM) console, click Cluster & Workload Management, select the Kubernetes cluster, select the target ACK cluster, and add it to ASM.
|
||||
|
||||
### Prometheus address
|
||||
|
||||
If you are using Alibaba Cloud Container Service for Kubernetes (ACK) ARMS Prometheus monitoring, replace {Region-ID} in the link below with your region ID, such as cn-hangzhou. {ACKID} is the ACK ID of the data plane cluster that you added to Alibaba Cloud Service Mesh (ASM). Visit the following links to query the public and intranet addresses monitored by ACK's ARMS Prometheus:
|
||||
[https://arms.console.aliyun.com/#/promDetail/{Region-ID}/{ACK-ID}/setting](https://arms.console.aliyun.com/)
|
||||
|
||||
An example of an intranet address is as follows:
|
||||
[http://{Region-ID}-intranet.arms.aliyuncs.com:9090/api/v1/prometheus/{Prometheus-ID}/{u-id}/{ACK-ID}/{Region-ID}](https://arms.console.aliyun.com/)
|
||||
|
||||
## Deploy Flagger
|
||||
Replace the value of metricsServer with your Prometheus address.
|
||||
|
||||
```bash
|
||||
cp $MESH_CONFIG kubeconfig
|
||||
kubectl -n istio-system create secret generic istio-kubeconfig --from-file kubeconfig
|
||||
kubectl -n istio-system label secret istio-kubeconfig istio/multiCluster=true
|
||||
helm upgrade -i flagger flagger/flagger \
|
||||
--namespace=istio-system \
|
||||
--set crd.create=false \
|
||||
--set meshProvider=istio \
|
||||
--set metricsServer=http://prometheus:9090 \
|
||||
--set istio.kubeconfig.secretName=istio-kubeconfig \
|
||||
--set istio.kubeconfig.key=kubeconfig
|
||||
```
|
||||
--set metricsServer=http://prometheus:9090
|
||||
```
|
||||
@@ -43,8 +43,8 @@ helm upgrade -i flagger flagger/flagger \
|
||||
--set crd.create=false \
|
||||
--set meshProvider=istio \
|
||||
--set metricsServer=http://istio-cluster-prometheus:9090 \
|
||||
--set istio.kubeconfig.secretName=istio-kubeconfig \
|
||||
--set istio.kubeconfig.key=kubeconfig
|
||||
--set controlplane.kubeconfig.secretName=istio-kubeconfig \
|
||||
--set controlplane.kubeconfig.key=kubeconfig
|
||||
```
|
||||
|
||||
Note that the Istio kubeconfig must be stored in a Kubernetes secret with a data key named `kubeconfig`.
|
||||
@@ -90,6 +90,7 @@ For ingress controllers, the install instructions are:
|
||||
* [NGINX](https://docs.flagger.app/tutorials/nginx-progressive-delivery)
|
||||
* [Skipper](https://docs.flagger.app/tutorials/skipper-progressive-delivery)
|
||||
* [Traefik](https://docs.flagger.app/tutorials/traefik-progressive-delivery)
|
||||
* [APISIX](https://docs.flagger.app/tutorials/apisix-progressive-delivery)
|
||||
|
||||
You can use the helm template command and apply the generated yaml with kubectl:
|
||||
|
||||
@@ -199,7 +200,7 @@ kustomize build https://github.com/fluxcd/flagger/kustomize/linkerd?ref=v1.0.0 |
|
||||
|
||||
**Generic installer**
|
||||
|
||||
Install Flagger and Prometheus for Contour, Gloo, NGINX, Skipper, or Traefik ingress:
|
||||
Install Flagger and Prometheus for Contour, Gloo, NGINX, Skipper, APISIX or Traefik ingress:
|
||||
|
||||
```bash
|
||||
kustomize build https://github.com/fluxcd/flagger/kustomize/kubernetes?ref=main | kubectl apply -f -
|
||||
@@ -220,7 +221,7 @@ metadata:
|
||||
name: app
|
||||
namespace: test
|
||||
spec:
|
||||
# can be: kubernetes, istio, linkerd, appmesh, nginx, skipper, gloo, traefik, osm
|
||||
# can be: kubernetes, istio, linkerd, appmesh, nginx, skipper, gloo, traefik, osm, apisix
|
||||
# use the kubernetes provider for Blue/Green style deployments
|
||||
provider: nginx
|
||||
```
|
||||
|
||||
158
docs/gitbook/install/flagger-install-with-flux.md
Normal file
158
docs/gitbook/install/flagger-install-with-flux.md
Normal file
@@ -0,0 +1,158 @@
|
||||
# Flagger Install on Kubernetes with Flux
|
||||
|
||||
This guide walks you through setting up Flagger on a Kubernetes cluster the GitOps way.
|
||||
You'll configure Flux to scan the Flagger OCI artifacts and deploy the
|
||||
latest stable version on Kubernetes.
|
||||
|
||||
## Flagger OCI artifacts
|
||||
|
||||
Flagger OCI artifacts (container images, Helm charts, Kustomize overlays) are published to
|
||||
GitHub Container Registry, and they are signed with Cosign at every release.
|
||||
|
||||
OCI artifacts
|
||||
|
||||
- `ghcr.io/fluxcd/flagger:<version>` multi-arch container images
|
||||
- `ghcr.io/fluxcd/flagger-manifest:<version>` Kubernetes manifests
|
||||
- `ghcr.io/fluxcd/charts/flagger:<version>` Helm charts
|
||||
|
||||
## Prerequisites
|
||||
|
||||
To follow this guide you’ll need a Kubernetes cluster with Flux installed on it.
|
||||
Please see the Flux [get started guide](https://fluxcd.io/flux/get-started/)
|
||||
or the Flux [installation guide](https://fluxcd.io/flux/installation/).
|
||||
|
||||
## Deploy Flagger with Flux
|
||||
|
||||
First define the namespace where Flagger will be installed:
|
||||
|
||||
```yaml
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: flagger-system
|
||||
labels:
|
||||
toolkit.fluxcd.io/tenant: sre-team
|
||||
```
|
||||
|
||||
Define a Flux `HelmRepository` that points to where the Flagger Helm charts are stored:
|
||||
|
||||
```yaml
|
||||
apiVersion: source.toolkit.fluxcd.io/v1beta2
|
||||
kind: HelmRepository
|
||||
metadata:
|
||||
name: flagger
|
||||
namespace: flux-system
|
||||
spec:
|
||||
interval: 1h
|
||||
url: oci://ghcr.io/fluxcd/charts
|
||||
type: oci
|
||||
```
|
||||
|
||||
Define a Flux `HelmRelease` that verifies and installs Flagger's latest version on the cluster:
|
||||
|
||||
```yaml
|
||||
---
|
||||
apiVersion: helm.toolkit.fluxcd.io/v2beta1
|
||||
kind: HelmRelease
|
||||
metadata:
|
||||
name: flagger
|
||||
namespace: flagger-system
|
||||
spec:
|
||||
interval: 1h
|
||||
releaseName: flagger
|
||||
install: # override existing Flagger CRDs
|
||||
crds: CreateReplace
|
||||
upgrade: # update Flagger CRDs
|
||||
crds: CreateReplace
|
||||
chart:
|
||||
spec:
|
||||
chart: flagger
|
||||
version: 1.x # update Flagger to the latest minor version
|
||||
interval: 6h # scan for new versions every six hours
|
||||
sourceRef:
|
||||
kind: HelmRepository
|
||||
name: flagger
|
||||
verify: # verify the chart signature with Cosign keyless
|
||||
provider: cosign
|
||||
values:
|
||||
nodeSelector:
|
||||
kubernetes.io/os: linux
|
||||
```
|
||||
|
||||
Copy the above manifests into a file called `flagger.yaml`, place the YAML file
|
||||
in the Git repository bootstrapped with Flux, then commit and push it to upstream.
|
||||
|
||||
After Flux reconciles the changes on your cluster, you can check if Flagger got deployed with:
|
||||
|
||||
```console
|
||||
$ helm list -n flagger-system
|
||||
NAME NAMESPACE REVISION STATUS CHART APP VERSION
|
||||
flagger flagger-system 1 deployed flagger-1.23.0 1.23.0
|
||||
```
|
||||
|
||||
To uninstall Flagger, delete the `flagger.yaml` from your repository, then Flux will uninstall
|
||||
the Helm release and will remove the namespace from your cluster.
|
||||
|
||||
## Deploy Flagger load tester with Flux
|
||||
|
||||
Flagger comes with a load testing service that generates traffic during analysis when configured as a webhook.
|
||||
|
||||
The load tester container images and deployment manifests are published to GitHub Container Registry.
|
||||
The container images and the manifests are signed with Cosign and GitHub Actions OIDC.
|
||||
|
||||
Assuming the applications managed by Flagger are in the `apps` namespace, you can configure Flux to
|
||||
deploy the load tester there.
|
||||
|
||||
Define a Flux `OCIRepository` that points to where the Flagger Kustomize overlays are stored:
|
||||
|
||||
```yaml
|
||||
---
|
||||
apiVersion: source.toolkit.fluxcd.io/v1beta2
|
||||
kind: OCIRepository
|
||||
metadata:
|
||||
name: flagger-loadtester
|
||||
namespace: apps
|
||||
spec:
|
||||
interval: 6h # scan for new versions every six hours
|
||||
url: oci://ghcr.io/fluxcd/flagger-manifests
|
||||
ref:
|
||||
semver: 1.x # update to the latest version
|
||||
verify: # verify the artifact signature with Cosign keyless
|
||||
provider: cosign
|
||||
```
|
||||
|
||||
Define a Flux `Kustomization` that deploys the Flagger load tester to the `apps` namespace:
|
||||
|
||||
```yaml
|
||||
---
|
||||
apiVersion: kustomize.toolkit.fluxcd.io/v1beta2
|
||||
kind: Kustomization
|
||||
metadata:
|
||||
name: flagger-loadtester
|
||||
namespace: apps
|
||||
spec:
|
||||
interval: 6h
|
||||
wait: true
|
||||
timeout: 5m
|
||||
prune: true
|
||||
sourceRef:
|
||||
kind: OCIRepository
|
||||
name: flagger-loadtester
|
||||
path: ./tester
|
||||
targetNamespace: apps
|
||||
```
|
||||
|
||||
Copy the above manifests into a file called `flagger-loadtester.yaml`, place the YAML file
|
||||
in the Git repository bootstrapped with Flux, then commit and push it to upstream.
|
||||
|
||||
After Flux reconciles the changes on your cluster, you can check if the load tester got deployed with:
|
||||
|
||||
```console
|
||||
$ flux -n apps get kustomization flagger-loadtester
|
||||
NAME READY MESSAGE
|
||||
flagger-loadtester True Applied revision: v1.23.0/a80af71e001
|
||||
```
|
||||
|
||||
To uninstall the load tester, delete the `flagger-loadtester.yaml` from your repository,
|
||||
and Flux will delete the load tester deployment from the cluster.
|
||||
351
docs/gitbook/tutorials/apisix-progressive-delivery.md
Normal file
351
docs/gitbook/tutorials/apisix-progressive-delivery.md
Normal file
@@ -0,0 +1,351 @@
|
||||
# Apache APISIX Canary Deployments
|
||||
|
||||
This guide shows you how to use the [Apache APISIX](https://apisix.apache.org/) and Flagger to automate canary deployments.
|
||||
|
||||

|
||||
|
||||
## Prerequisites
|
||||
|
||||
Flagger requires a Kubernetes cluster **v1.19** or newer and Apache APISIX **v2.15** or newer and Apache APISIX Ingress Controller **v1.5.0** or newer.
|
||||
|
||||
Install Apache APISIX and Apache APISIX Ingress Controller with Helm v3:
|
||||
|
||||
```bash
|
||||
helm repo add apisix https://charts.apiseven.com
|
||||
kubectl create ns apisix
|
||||
|
||||
helm upgrade -i apisix apisix/apisix --version=0.11.3 \
|
||||
--namespace apisix \
|
||||
--set apisix.podAnnotations."prometheus\.io/scrape"=true \
|
||||
--set apisix.podAnnotations."prometheus\.io/port"=9091 \
|
||||
--set apisix.podAnnotations."prometheus\.io/path"=/apisix/prometheus/metrics \
|
||||
--set pluginAttrs.prometheus.export_addr.ip=0.0.0.0 \
|
||||
--set pluginAttrs.prometheus.export_addr.port=9091 \
|
||||
--set pluginAttrs.prometheus.export_uri=/apisix/prometheus/metrics \
|
||||
--set pluginAttrs.prometheus.metric_prefix=apisix_ \
|
||||
--set ingress-controller.enabled=true \
|
||||
--set ingress-controller.config.apisix.serviceNamespace=apisix
|
||||
```
|
||||
|
||||
Install Flagger and the Prometheus add-on in the same namespace as Apache APISIX:
|
||||
|
||||
```bash
|
||||
helm repo add flagger https://flagger.app
|
||||
|
||||
helm upgrade -i flagger flagger/flagger \
|
||||
--namespace apisix \
|
||||
--set prometheus.install=true \
|
||||
--set meshProvider=apisix
|
||||
```
|
||||
|
||||
## Bootstrap
|
||||
|
||||
Flagger takes a Kubernetes deployment and optionally a horizontal pod autoscaler \(HPA\), then creates a series of objects \(Kubernetes deployments, ClusterIP services and an ApisixRoute\). These objects expose the application outside the cluster and drive the canary analysis and promotion.
|
||||
|
||||
Create a test namespace:
|
||||
|
||||
```bash
|
||||
kubectl create ns test
|
||||
```
|
||||
|
||||
Create a deployment and a horizontal pod autoscaler:
|
||||
|
||||
```bash
|
||||
kubectl apply -k https://github.com/fluxcd/flagger//kustomize/podinfo?ref=main
|
||||
```
|
||||
|
||||
Deploy the load testing service to generate traffic during the canary analysis:
|
||||
|
||||
```bash
|
||||
helm upgrade -i flagger-loadtester flagger/loadtester \
|
||||
--namespace=test
|
||||
```
|
||||
|
||||
Create an Apache APISIX `ApisixRoute`, Flagger will reference and generate the canary Apache APISIX `ApisixRoute` \(replace `app.example.com` with your own domain\):
|
||||
|
||||
```yaml
|
||||
apiVersion: apisix.apache.org/v2
|
||||
kind: ApisixRoute
|
||||
metadata:
|
||||
name: podinfo
|
||||
namespace: test
|
||||
spec:
|
||||
http:
|
||||
- backends:
|
||||
- serviceName: podinfo
|
||||
servicePort: 80
|
||||
match:
|
||||
hosts:
|
||||
- app.example.com
|
||||
methods:
|
||||
- GET
|
||||
paths:
|
||||
- /*
|
||||
name: method
|
||||
plugins:
|
||||
- name: prometheus
|
||||
enable: true
|
||||
config:
|
||||
disable: false
|
||||
prefer_name: true
|
||||
```
|
||||
|
||||
Save the above resource as podinfo-apisixroute.yaml and then apply it:
|
||||
|
||||
```bash
|
||||
kubectl apply -f ./podinfo-apisixroute.yaml
|
||||
```
|
||||
|
||||
Create a canary custom resource \(replace `app.example.com` with your own domain\):
|
||||
|
||||
```yaml
|
||||
apiVersion: flagger.app/v1beta1
|
||||
kind: Canary
|
||||
metadata:
|
||||
name: podinfo
|
||||
namespace: test
|
||||
spec:
|
||||
provider: apisix
|
||||
targetRef:
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
name: podinfo
|
||||
# apisix route reference
|
||||
routeRef:
|
||||
apiVersion: apisix.apache.org/v2
|
||||
kind: ApisixRoute
|
||||
name: podinfo
|
||||
# the maximum time in seconds for the canary deployment
|
||||
# to make progress before it is rollback (default 600s)
|
||||
progressDeadlineSeconds: 60
|
||||
service:
|
||||
# ClusterIP port number
|
||||
port: 80
|
||||
# container port number or name
|
||||
targetPort: 9898
|
||||
analysis:
|
||||
# schedule interval (default 60s)
|
||||
interval: 10s
|
||||
# max number of failed metric checks before rollback
|
||||
threshold: 10
|
||||
# max traffic percentage routed to canary
|
||||
# percentage (0-100)
|
||||
maxWeight: 50
|
||||
# canary increment step
|
||||
# percentage (0-100)
|
||||
stepWeight: 10
|
||||
# APISIX Prometheus checks
|
||||
metrics:
|
||||
- name: request-success-rate
|
||||
# minimum req success rate (non 5xx responses)
|
||||
# percentage (0-100)
|
||||
thresholdRange:
|
||||
min: 99
|
||||
interval: 1m
|
||||
- name: request-duration
|
||||
# builtin Prometheus check
|
||||
# maximum req duration P99
|
||||
# milliseconds
|
||||
thresholdRange:
|
||||
max: 500
|
||||
interval: 30s
|
||||
webhooks:
|
||||
- name: load-test
|
||||
url: http://flagger-loadtester.test/
|
||||
timeout: 5s
|
||||
type: rollout
|
||||
metadata:
|
||||
cmd: |-
|
||||
hey -z 1m -q 10 -c 2 -h2 -host app.example.com http://apisix-gateway.apisix/api/info
|
||||
```
|
||||
|
||||
Save the above resource as podinfo-canary.yaml and then apply it:
|
||||
|
||||
```bash
|
||||
kubectl apply -f ./podinfo-canary.yaml
|
||||
```
|
||||
|
||||
After a couple of seconds Flagger will create the canary objects:
|
||||
|
||||
```bash
|
||||
# applied
|
||||
deployment.apps/podinfo
|
||||
horizontalpodautoscaler.autoscaling/podinfo
|
||||
apisixroute/podinfo
|
||||
canary.flagger.app/podinfo
|
||||
|
||||
# generated
|
||||
deployment.apps/podinfo-primary
|
||||
horizontalpodautoscaler.autoscaling/podinfo-primary
|
||||
service/podinfo
|
||||
service/podinfo-canary
|
||||
service/podinfo-primary
|
||||
apisixroute/podinfo-podinfo-canary
|
||||
```
|
||||
|
||||
## Automated canary promotion
|
||||
|
||||
Flagger implements a control loop that gradually shifts traffic to the canary while measuring key performance indicators like HTTP requests success rate, requests average duration and pod health. Based on analysis of the KPIs a canary is promoted or aborted, and the analysis result is published to Slack or MS Teams.
|
||||
|
||||

|
||||
|
||||
Trigger a canary deployment by updating the container image:
|
||||
|
||||
```bash
|
||||
kubectl -n test set image deployment/podinfo \
|
||||
podinfod=stefanprodan/podinfo:6.0.1
|
||||
```
|
||||
|
||||
Flagger detects that the deployment revision changed and starts a new rollout:
|
||||
|
||||
```text
|
||||
kubectl -n test describe canary/podinfo
|
||||
|
||||
Status:
|
||||
Canary Weight: 0
|
||||
Conditions:
|
||||
Message: Canary analysis completed successfully, promotion finished.
|
||||
Reason: Succeeded
|
||||
Status: True
|
||||
Type: Promoted
|
||||
Failed Checks: 1
|
||||
Iterations: 0
|
||||
Phase: Succeeded
|
||||
|
||||
Events:
|
||||
Type Reason Age From Message
|
||||
---- ------ ---- ---- -------
|
||||
Warning Synced 2m59s flagger podinfo-primary.test not ready: waiting for rollout to finish: observed deployment generation less than desired generation
|
||||
Warning Synced 2m50s flagger podinfo-primary.test not ready: waiting for rollout to finish: 0 of 1 (readyThreshold 100%) updated replicas are available
|
||||
Normal Synced 2m40s (x3 over 2m59s) flagger all the metrics providers are available!
|
||||
Normal Synced 2m39s flagger Initialization done! podinfo.test
|
||||
Normal Synced 2m20s flagger New revision detected! Scaling up podinfo.test
|
||||
Warning Synced 2m (x2 over 2m10s) flagger canary deployment podinfo.test not ready: waiting for rollout to finish: 0 of 1 (readyThreshold 100%) updated replicas are available
|
||||
Normal Synced 110s flagger Starting canary analysis for podinfo.test
|
||||
Normal Synced 109s flagger Advance podinfo.test canary weight 10
|
||||
Warning Synced 100s flagger Halt advancement no values found for apisix metric request-success-rate probably podinfo.test is not receiving traffic: running query failed: no values found
|
||||
Normal Synced 90s flagger Advance podinfo.test canary weight 20
|
||||
Normal Synced 80s flagger Advance podinfo.test canary weight 30
|
||||
Normal Synced 69s flagger Advance podinfo.test canary weight 40
|
||||
Normal Synced 59s flagger Advance podinfo.test canary weight 50
|
||||
Warning Synced 30s (x2 over 40s) flagger podinfo-primary.test not ready: waiting for rollout to finish: 1 old replicas are pending termination
|
||||
Normal Synced 9s (x3 over 50s) flagger (combined from similar events): Promotion completed! Scaling down podinfo.test
|
||||
```
|
||||
|
||||
**Note** that if you apply new changes to the deployment during the canary analysis, Flagger will restart the analysis.
|
||||
|
||||
You can monitor all canaries with:
|
||||
|
||||
```bash
|
||||
watch kubectl get canaries --all-namespaces
|
||||
|
||||
NAMESPACE NAME STATUS WEIGHT LASTTRANSITIONTIME
|
||||
test podinfo-2 Progressing 10 2022-11-23T05:00:54Z
|
||||
test podinfo Succeeded 0 2022-11-23T06:00:54Z
|
||||
```
|
||||
|
||||
## Automated rollback
|
||||
|
||||
During the canary analysis you can generate HTTP 500 errors to test if Flagger pauses and rolls back the faulted version.
|
||||
|
||||
Trigger another canary deployment:
|
||||
|
||||
```bash
|
||||
kubectl -n test set image deployment/podinfo \
|
||||
podinfod=stefanprodan/podinfo:6.0.2
|
||||
```
|
||||
|
||||
Exec into the load tester pod with:
|
||||
|
||||
```bash
|
||||
kubectl -n test exec -it deploy/flagger-loadtester bash
|
||||
```
|
||||
|
||||
Generate HTTP 500 errors:
|
||||
|
||||
```bash
|
||||
hey -z 1m -c 5 -q 5 -host app.example.com http://apisix-gateway.apisix/status/500
|
||||
```
|
||||
|
||||
Generate latency:
|
||||
|
||||
```bash
|
||||
watch -n 1 curl -H \"host: app.example.com\" http://apisix-gateway.apisix/delay/1
|
||||
```
|
||||
|
||||
When the number of failed checks reaches the canary analysis threshold, the traffic is routed back to the primary, the canary is scaled to zero and the rollout is marked as failed.
|
||||
|
||||
```text
|
||||
kubectl -n apisix logs deploy/flagger -f | jq .msg
|
||||
|
||||
"New revision detected! Scaling up podinfo.test"
|
||||
"canary deployment podinfo.test not ready: waiting for rollout to finish: 0 of 1 (readyThreshold 100%) updated replicas are available"
|
||||
"Starting canary analysis for podinfo.test"
|
||||
"Advance podinfo.test canary weight 10"
|
||||
"Halt podinfo.test advancement success rate 0.00% < 99%"
|
||||
"Halt podinfo.test advancement success rate 26.76% < 99%"
|
||||
"Halt podinfo.test advancement success rate 34.19% < 99%"
|
||||
"Halt podinfo.test advancement success rate 37.32% < 99%"
|
||||
"Halt podinfo.test advancement success rate 39.04% < 99%"
|
||||
"Halt podinfo.test advancement success rate 40.13% < 99%"
|
||||
"Halt podinfo.test advancement success rate 48.28% < 99%"
|
||||
"Halt podinfo.test advancement success rate 50.35% < 99%"
|
||||
"Halt podinfo.test advancement success rate 56.92% < 99%"
|
||||
"Halt podinfo.test advancement success rate 67.70% < 99%"
|
||||
"Rolling back podinfo.test failed checks threshold reached 10"
|
||||
"Canary failed! Scaling down podinfo.test"
|
||||
```
|
||||
|
||||
## Custom metrics
|
||||
|
||||
The canary analysis can be extended with Prometheus queries.
|
||||
|
||||
Create a metric template and apply it on the cluster:
|
||||
|
||||
```yaml
|
||||
apiVersion: flagger.app/v1beta1
|
||||
kind: MetricTemplate
|
||||
metadata:
|
||||
name: not-found-percentage
|
||||
namespace: test
|
||||
spec:
|
||||
provider:
|
||||
type: prometheus
|
||||
address: http://flagger-prometheus.apisix:9090
|
||||
query: |
|
||||
sum(
|
||||
rate(
|
||||
apisix_http_status{
|
||||
route=~"{{ namespace }}_{{ route }}-{{ target }}-canary_.+",
|
||||
code!~"4.."
|
||||
}[{{ interval }}]
|
||||
)
|
||||
)
|
||||
/
|
||||
sum(
|
||||
rate(
|
||||
apisix_http_status{
|
||||
route=~"{{ namespace }}_{{ route }}-{{ target }}-canary_.+"
|
||||
}[{{ interval }}]
|
||||
)
|
||||
) * 100
|
||||
```
|
||||
|
||||
Edit the canary analysis and add the not found error rate check:
|
||||
|
||||
```yaml
|
||||
analysis:
|
||||
metrics:
|
||||
- name: "404s percentage"
|
||||
templateRef:
|
||||
name: not-found-percentage
|
||||
thresholdRange:
|
||||
max: 5
|
||||
interval: 1m
|
||||
```
|
||||
|
||||
The above configuration validates the canary by checking if the HTTP 404 req/sec percentage is below 5 percent of the total traffic. If the 404s rate reaches the 5% threshold, then the canary fails.
|
||||
|
||||
|
||||
The above procedures can be extended with more [custom metrics](../usage/metrics.md) checks, [webhooks](../usage/webhooks.md), [manual promotion](../usage/webhooks.md#manual-gating) approval and [Slack or MS Teams](../usage/alerting.md) notifications.
|
||||
|
||||
@@ -62,6 +62,9 @@ Create a canary definition:
|
||||
apiVersion: flagger.app/v1beta1
|
||||
kind: Canary
|
||||
metadata:
|
||||
annotations:
|
||||
# Enable Envoy access logging to stdout.
|
||||
appmesh.flagger.app/accesslog: enabled
|
||||
name: podinfo
|
||||
namespace: test
|
||||
spec:
|
||||
@@ -168,7 +171,7 @@ virtualservice.appmesh.k8s.aws/podinfo
|
||||
virtualservice.appmesh.k8s.aws/podinfo-canary
|
||||
```
|
||||
|
||||
After the boostrap, the podinfo deployment will be scaled to zero and the traffic to `podinfo.test`
|
||||
After the bootstrap, the podinfo deployment will be scaled to zero and the traffic to `podinfo.test`
|
||||
will be routed to the primary pods.
|
||||
During the canary analysis, the `podinfo-canary.test` address can be used to target directly the canary pods.
|
||||
|
||||
|
||||
@@ -90,6 +90,8 @@ spec:
|
||||
retries:
|
||||
attempts: 3
|
||||
perTryTimeout: 5s
|
||||
# supported values for retryOn - https://projectcontour.io/docs/main/config/api/#projectcontour.io/v1.RetryOn
|
||||
retryOn: "5xx"
|
||||
# define the canary analysis timing and KPIs
|
||||
analysis:
|
||||
# schedule interval (default 60s)
|
||||
@@ -157,7 +159,7 @@ service/podinfo-primary
|
||||
httpproxy.projectcontour.io/podinfo
|
||||
```
|
||||
|
||||
After the boostrap, the podinfo deployment will be scaled to zero and the traffic to `podinfo.test` will be routed to the primary pods. During the canary analysis, the `podinfo-canary.test` address can be used to target directly the canary pods.
|
||||
After the bootstrap, the podinfo deployment will be scaled to zero and the traffic to `podinfo.test` will be routed to the primary pods. During the canary analysis, the `podinfo-canary.test` address can be used to target directly the canary pods.
|
||||
|
||||
## Expose the app outside the cluster
|
||||
|
||||
|
||||
@@ -1,23 +1,24 @@
|
||||
# Gateway API Canary Deployments
|
||||
|
||||
This guide shows you how to use Gateway API and Flagger to automate canary deployments.
|
||||
This guide shows you how to use [Gateway API](https://gateway-api.sigs.k8s.io/) and Flagger to automate canary deployments and A/B testing.
|
||||
|
||||

|
||||
|
||||
## Prerequisites
|
||||
|
||||
Flagger requires a Kubernetes cluster **v1.16** or newer and any mesh/ingress that implements the `v1alpha2` of Gateway API. We'll be using Contour for the sake of this tutorial, but you can use any other implementation.
|
||||
Flagger requires a Kubernetes cluster **v1.19** or newer and any mesh/ingress that implements the `v1beta1` version of Gateway API. We'll be using Contour for the sake of this tutorial, but you can use any other implementation.
|
||||
|
||||
Install the GatewayAPI CRDs:
|
||||
> Note: Flagger supports `v1alpha2` version of Gateway API, but the alpha version has been deprecated and support will be dropped in a future release.
|
||||
|
||||
Install Contour, its Gateway provisioner and Gateway API CRDs in the `projectcontour` namespace:
|
||||
|
||||
```bash
|
||||
kubectl apply -k github.com/kubernetes-sigs/gateway-api/config/crd?ref=v0.4.1
|
||||
https://raw.githubusercontent.com/projectcontour/contour/release-1.23/examples/render/contour-gateway-provisioner.yaml
|
||||
```
|
||||
|
||||
Install a cluster-wide GatewayClass; a Gateway belonging to the GatewayClass and Contour components in the `projectcontour` namespace:
|
||||
|
||||
> Alternatively, you can also install the Gateway API CRDs from the upstream project:
|
||||
```bash
|
||||
kubectl apply -f https://raw.githubusercontent.com/projectcontour/contour/release-1.20/examples/render/contour.yaml
|
||||
kubectl apply -k github.com/kubernetes-sigs/gateway-api/config/crd?ref=v0.6.0
|
||||
```
|
||||
|
||||
Install Flagger in the `flagger-system` namespace:
|
||||
@@ -26,6 +27,36 @@ Install Flagger in the `flagger-system` namespace:
|
||||
kubectl apply -k github.com/fluxcd/flagger//kustomize/gatewayapi
|
||||
```
|
||||
|
||||
Create a `GatewayClass` that specifies information about the Gateway controller:
|
||||
|
||||
```yaml
|
||||
kind: GatewayClass
|
||||
apiVersion: gateway.networking.k8s.io/v1beta1
|
||||
metadata:
|
||||
name: contour
|
||||
spec:
|
||||
controllerName: projectcontour.io/gateway-controller
|
||||
```
|
||||
|
||||
Create a `Gateway` that configures load balancing, traffic ACL, etc:
|
||||
|
||||
```yaml
|
||||
kind: Gateway
|
||||
apiVersion: gateway.networking.k8s.io/v1beta1
|
||||
metadata:
|
||||
name: contour
|
||||
namespace: projectcontour
|
||||
spec:
|
||||
gatewayClassName: contour
|
||||
listeners:
|
||||
- name: http
|
||||
protocol: HTTP
|
||||
port: 80
|
||||
allowedRoutes:
|
||||
namespaces:
|
||||
from: All
|
||||
```
|
||||
|
||||
## Bootstrap
|
||||
|
||||
Flagger takes a Kubernetes deployment and optionally a horizontal pod autoscaler \(HPA\), then creates a series of objects \(Kubernetes deployments, ClusterIP services, HTTPRoutes for the Gateway\). These objects expose the application inside the mesh and drive the canary analysis and promotion.
|
||||
|
||||
@@ -292,6 +292,118 @@ Events:
|
||||
Warning Synced 1m flagger Canary failed! Scaling down podinfo.test
|
||||
```
|
||||
|
||||
## Session Affinity
|
||||
|
||||
While Flagger can perform weighted routing and A/B testing individually, with Istio it can combine the two leading to a Canary
|
||||
release with session affinity. For more information you can read the [deployment strategies docs](../usage/deployment-strategies.md#canary-release-with-session-affinity).
|
||||
|
||||
Create a canary custom resource \(replace app.example.com with your own domain\):
|
||||
|
||||
```yaml
|
||||
apiVersion: flagger.app/v1beta1
|
||||
kind: Canary
|
||||
metadata:
|
||||
name: podinfo
|
||||
namespace: test
|
||||
spec:
|
||||
# deployment reference
|
||||
targetRef:
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
name: podinfo
|
||||
# the maximum time in seconds for the canary deployment
|
||||
# to make progress before it is rollback (default 600s)
|
||||
progressDeadlineSeconds: 60
|
||||
# HPA reference (optional)
|
||||
autoscalerRef:
|
||||
apiVersion: autoscaling/v2beta2
|
||||
kind: HorizontalPodAutoscaler
|
||||
name: podinfo
|
||||
service:
|
||||
# service port number
|
||||
port: 9898
|
||||
# container port number or name (optional)
|
||||
targetPort: 9898
|
||||
# Istio gateways (optional)
|
||||
gateways:
|
||||
- public-gateway.istio-system.svc.cluster.local
|
||||
# Istio virtual service host names (optional)
|
||||
hosts:
|
||||
- app.example.com
|
||||
# Istio traffic policy (optional)
|
||||
trafficPolicy:
|
||||
tls:
|
||||
# use ISTIO_MUTUAL when mTLS is enabled
|
||||
mode: DISABLE
|
||||
# Istio retry policy (optional)
|
||||
retries:
|
||||
attempts: 3
|
||||
perTryTimeout: 1s
|
||||
retryOn: "gateway-error,connect-failure,refused-stream"
|
||||
analysis:
|
||||
# schedule interval (default 60s)
|
||||
interval: 1m
|
||||
# max number of failed metric checks before rollback
|
||||
threshold: 5
|
||||
# max traffic percentage routed to canary
|
||||
# percentage (0-100)
|
||||
maxWeight: 50
|
||||
# canary increment step
|
||||
# percentage (0-100)
|
||||
stepWeight: 10
|
||||
# session affinity config
|
||||
sessionAffinity:
|
||||
# name of the cookie used
|
||||
cookieName: flagger-cookie
|
||||
# max age of the cookie (in seconds)
|
||||
# optional; defaults to 86400
|
||||
maxAge: 21600
|
||||
metrics:
|
||||
- name: request-success-rate
|
||||
# minimum req success rate (non 5xx responses)
|
||||
# percentage (0-100)
|
||||
thresholdRange:
|
||||
min: 99
|
||||
interval: 1m
|
||||
- name: request-duration
|
||||
# maximum req duration P99
|
||||
# milliseconds
|
||||
thresholdRange:
|
||||
max: 500
|
||||
interval: 30s
|
||||
# testing (optional)
|
||||
webhooks:
|
||||
- name: acceptance-test
|
||||
type: pre-rollout
|
||||
url: http://flagger-loadtester.test/
|
||||
timeout: 30s
|
||||
metadata:
|
||||
type: bash
|
||||
cmd: "curl -sd 'test' http://podinfo-canary:9898/token | grep token"
|
||||
- name: load-test
|
||||
url: http://flagger-loadtester.test/
|
||||
timeout: 5s
|
||||
metadata:
|
||||
cmd: "hey -z 1m -q 10 -c 2 http://podinfo-canary.test:9898/"
|
||||
```
|
||||
|
||||
Save the above resource as podinfo-canary-session-affinity.yaml and then apply it:
|
||||
|
||||
```bash
|
||||
kubectl apply -f ./podinfo-canary-session-affinity.yaml
|
||||
```
|
||||
|
||||
Trigger a canary deployment by updating the container image:
|
||||
|
||||
```bash
|
||||
kubectl -n test set image deployment/podinfo \
|
||||
podinfod=ghcr.io/stefanprodan/podinfo:6.0.1
|
||||
```
|
||||
|
||||
You can load `app.example.com` in your browser and refresh it until you see the requests being served by `podinfo:6.0.1`.
|
||||
All subsequent requests after that will be served by `podinfo:6.0.1` and not `podinfo:6.0.0` because of the session affinity
|
||||
configured by Flagger with Istio.
|
||||
|
||||
## Traffic mirroring
|
||||
|
||||

|
||||
|
||||
242
docs/gitbook/tutorials/keda-scaledobject.md
Normal file
242
docs/gitbook/tutorials/keda-scaledobject.md
Normal file
@@ -0,0 +1,242 @@
|
||||
# Canary analysis with KEDA ScaledObjects
|
||||
|
||||
This guide shows you how to use Flagger with KEDA ScaledObjects to autoscale workloads during a Canary analysis run.
|
||||
We will be using a Blue/Green deployment strategy with the Kubernetes provider for the sake of this tutorial, but
|
||||
you can use any deployment strategy combined with any supported provider.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
Flagger requires a Kubernetes cluster **v1.16** or newer. For this tutorial, we'll need KEDA **2.7.1** or newer.
|
||||
|
||||
Install KEDA:
|
||||
|
||||
```bash
|
||||
helm repo add kedacore https://kedacore.github.io/charts
|
||||
kubectl create namespace keda
|
||||
helm install keda kedacore/keda --namespace keda
|
||||
```
|
||||
|
||||
Install Flagger:
|
||||
```bash
|
||||
helm repo add flagger https://flagger.app
|
||||
|
||||
helm upgrade -i flagger flagger/flagger \
|
||||
--namespace flagger \
|
||||
--set prometheus.install=true \
|
||||
--set meshProvider=kubernetes
|
||||
```
|
||||
|
||||
## Bootstrap
|
||||
|
||||
Flagger takes a Kubernetes deployment and a KEDA ScaledObject targeting the deployment. It then creates a series of objects
|
||||
(Kubernetes deployments, ClusterIP services and another KEDA ScaledObject targeting the created Deployment).
|
||||
These objects expose the application inside the mesh and drive the Canary analysis and Blue/Green promotion.
|
||||
|
||||
Create a test namespace:
|
||||
|
||||
```bash
|
||||
kubectl create ns test
|
||||
```
|
||||
|
||||
Create a deployment named `podinfo`:
|
||||
|
||||
```bash
|
||||
kubectl apply -n test -f https://raw.githubusercontent.com/fluxcd/flagger/main/kustomize/podinfo/deployment.yaml
|
||||
```
|
||||
|
||||
Deploy the load testing service to generate traffic during the analysis:
|
||||
|
||||
```bash
|
||||
kubectl apply -k https://github.com/fluxcd/flagger//kustomize/tester?ref=main
|
||||
```
|
||||
|
||||
Create a ScaledObject which targets the `podinfo` deployment and uses Prometheus as a trigger:
|
||||
```yaml
|
||||
apiVersion: keda.sh/v1alpha1
|
||||
kind: ScaledObject
|
||||
metadata:
|
||||
name: podinfo-so
|
||||
namespace: test
|
||||
spec:
|
||||
scaleTargetRef:
|
||||
name: podinfo
|
||||
pollingInterval: 10
|
||||
cooldownPeriod: 20
|
||||
minReplicaCount: 1
|
||||
maxReplicaCount: 3
|
||||
triggers:
|
||||
- type: prometheus
|
||||
metadata:
|
||||
name: prom-trigger
|
||||
serverAddress: http://flagger-prometheus.flagger-system:9090
|
||||
metricName: http_requests_total
|
||||
query: sum(rate(http_requests_total{ app="podinfo" }[30s]))
|
||||
threshold: '5'
|
||||
```
|
||||
|
||||
Create a canary custom resource for the `podinfo` deployment:
|
||||
|
||||
```yaml
|
||||
apiVersion: flagger.app/v1beta1
|
||||
kind: Canary
|
||||
metadata:
|
||||
name: podinfo
|
||||
namespace: test
|
||||
spec:
|
||||
provider: kubernetes
|
||||
# deployment reference
|
||||
targetRef:
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
name: podinfo
|
||||
# Scaler reference
|
||||
autoscalerRef:
|
||||
apiVersion: keda.sh/v1alpha1
|
||||
kind: ScaledObject
|
||||
# ScaledObject targeting the canary deployment
|
||||
name: podinfo-so
|
||||
# Mapping between trigger names and the related query to use for the generated
|
||||
# ScaledObject targeting the primary deployment. (Optional)
|
||||
primaryScalerQueries:
|
||||
prom-trigger: sum(rate(http_requests_total{ app="podinfo-primary" }[30s]))
|
||||
# Overriding replica scaling configuration for the generated ScaledObject
|
||||
# targeting the primary deployment. (Optional)
|
||||
primaryScalerReplicas:
|
||||
minReplicas: 2
|
||||
maxReplicas: 5
|
||||
# the maximum time in seconds for the canary deployment
|
||||
# to make progress before rollback (default 600s)
|
||||
progressDeadlineSeconds: 60
|
||||
service:
|
||||
port: 80
|
||||
targetPort: 9898
|
||||
name: podinfo-svc
|
||||
portDiscovery: true
|
||||
analysis:
|
||||
# schedule interval (default 60s)
|
||||
interval: 15s
|
||||
# max number of failed checks before rollback
|
||||
threshold: 5
|
||||
# number of checks to run before promotion
|
||||
iterations: 5
|
||||
# Prometheus checks based on
|
||||
# http_request_duration_seconds histogram
|
||||
metrics:
|
||||
- name: request-success-rate
|
||||
interval: 1m
|
||||
thresholdRange:
|
||||
min: 99
|
||||
- name: request-duration
|
||||
interval: 30s
|
||||
thresholdRange:
|
||||
max: 500
|
||||
# load testing hooks
|
||||
webhooks:
|
||||
- name: load-test
|
||||
url: http://flagger-loadtester.test/
|
||||
timeout: 5s
|
||||
metadata:
|
||||
type: cmd
|
||||
cmd: "hey -z 2m -q 20 -c 2 http://podinfo-svc-canary.test/"
|
||||
```
|
||||
|
||||
Save the above resource as `podinfo-canary.yaml` and then apply it:
|
||||
|
||||
```bash
|
||||
kubectl apply -f ./podinfo-canary.yaml
|
||||
```
|
||||
|
||||
After a couple of seconds Flagger will create the canary objects:
|
||||
|
||||
```bash
|
||||
# applied
|
||||
deployment.apps/podinfo
|
||||
scaledobject.keda.sh/podinfo-so
|
||||
canary.flagger.app/podinfo
|
||||
|
||||
# generated
|
||||
deployment.apps/podinfo-primary
|
||||
horizontalpodautoscaler.autoscaling/podinfo-primary
|
||||
service/podinfo
|
||||
service/podinfo-canary
|
||||
service/podinfo-primary
|
||||
scaledobject.keda.sh/podinfo-so-primary
|
||||
```
|
||||
|
||||
We refer to our ScaledObject for the canary deployment using `.spec.autoscalerRef`. Flagger will use this to generate a ScaledObject which will scale the primary deployment.
|
||||
By default, Flagger will try to guess the query to use for the primary ScaledObject, by replacing all mentions of `.spec.targetRef.Name` and `{.spec.targetRef.Name}-canary`
|
||||
with `{.spec.targetRef.Name}-primary`, for all triggers.
|
||||
For eg, if your ScaledObject has a trigger query defined as: `sum(rate(http_requests_total{ app="podinfo" }[30s]))` or `sum(rate(http_requests_total{ app="podinfo-primary" }[30s]))`, then the primary ScaledObject will have the same trigger with a query defined as `sum(rate(http_requests_total{ app="podinfo-primary" }[30s]))`.
|
||||
|
||||
If, the generated query does not meet your requirements, you can specify the query for autoscaling the primary deployment explicitly using
|
||||
`.spec.autoscalerRef.primaryScalerQueries`, which lets you define a query for each trigger. Please note that, your ScaledObject's `.spec.triggers[@].name` must
|
||||
not be blank, as Flagger needs that to identify each trigger uniquely.
|
||||
|
||||
In the situation when it is desired to have different scaling replica configuration between the canary and primary deployment ScaledObject you can use
|
||||
the `.spec.autoscalerRef.primaryScalerReplicas` to override these values for the generated primary ScaledObject.
|
||||
|
||||
After the boostrap, the podinfo deployment will be scaled to zero and the traffic to `podinfo.test` will be routed to the primary pods. To keep the podinfo deployment
|
||||
at 0 replicas and pause auto scaling, Flagger will add an annotation to your ScaledObject: `autoscaling.keda.sh/paused-replicas: 0`.
|
||||
During the canary analysis, the annotation is removed, to enable auto scaling for the podinfo deployment.
|
||||
The `podinfo-canary.test` address can be used to target directly the canary pods.
|
||||
When the canary analysis starts, Flagger will call the pre-rollout webhooks before routing traffic to the canary. The Blue/Green deployment will run for five iterations while validating the HTTP metrics and rollout hooks every 15 seconds.
|
||||
|
||||
|
||||
## Automated Blue/Green promotion
|
||||
|
||||
Trigger a deployment by updating the container image:
|
||||
|
||||
```bash
|
||||
kubectl -n test set image deployment/podinfo \
|
||||
podinfod=ghcr.io/stefanprodan/podinfo:6.0.1
|
||||
```
|
||||
|
||||
Flagger detects that the deployment revision changed and starts a new rollout:
|
||||
|
||||
```text
|
||||
kubectl -n test describe canary/podinfo
|
||||
|
||||
Events:
|
||||
|
||||
New revision detected podinfo.test
|
||||
Waiting for podinfo.test rollout to finish: 0 of 1 updated replicas are available
|
||||
Pre-rollout check acceptance-test passed
|
||||
Advance podinfo.test canary iteration 1/10
|
||||
Advance podinfo.test canary iteration 2/10
|
||||
Advance podinfo.test canary iteration 3/10
|
||||
Advance podinfo.test canary iteration 4/10
|
||||
Advance podinfo.test canary iteration 5/10
|
||||
Advance podinfo.test canary iteration 6/10
|
||||
Advance podinfo.test canary iteration 7/10
|
||||
Advance podinfo.test canary iteration 8/10
|
||||
Advance podinfo.test canary iteration 9/10
|
||||
Advance podinfo.test canary iteration 10/10
|
||||
Copying podinfo.test template spec to podinfo-primary.test
|
||||
Waiting for podinfo-primary.test rollout to finish: 1 of 2 updated replicas are available
|
||||
Promotion completed! Scaling down podinfo.test
|
||||
```
|
||||
|
||||
**Note** that if you apply new changes to the deployment during the canary analysis, Flagger will restart the analysis.
|
||||
|
||||
You can monitor all canaries with:
|
||||
|
||||
```bash
|
||||
watch kubectl get canaries --all-namespaces
|
||||
|
||||
NAMESPACE NAME STATUS WEIGHT LASTTRANSITIONTIME
|
||||
test podinfo Progressing 100 2019-06-16T14:05:07Z
|
||||
```
|
||||
|
||||
You can monitor the scaling of the deployments with:
|
||||
```bash
|
||||
watch kubectl -n test get deploy podinfo
|
||||
NAME READY UP-TO-DATE AVAILABLE AGE
|
||||
flagger-loadtester 1/1 1 1 4m21s
|
||||
podinfo 3/3 3 3 4m28s
|
||||
podinfo-primary 3/3 3 3 3m14s
|
||||
```
|
||||
|
||||
You can mointor how Flagger edits the annotations of your ScaledObject with:
|
||||
```bash
|
||||
watch "kubectl get -n test scaledobjects podinfo-so -o=jsonpath='{.metadata.annotations}'"
|
||||
```
|
||||
@@ -59,7 +59,8 @@ kubectl apply -k https://github.com/fluxcd/flagger//kustomize/podinfo?ref=main
|
||||
Deploy the load testing service to generate traffic during the analysis:
|
||||
|
||||
```bash
|
||||
kubectl apply -k https://github.com/fluxcd/flagger//kustomize/tester?ref=main
|
||||
helm upgrade -i flagger-loadtester flagger/loadtester \
|
||||
--namespace=test
|
||||
```
|
||||
|
||||
Create a canary custom resource:
|
||||
|
||||
@@ -6,13 +6,13 @@ This guide shows you how to use Kuma and Flagger to automate canary deployments.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
Flagger requires a Kubernetes cluster **v1.16** or newer and Kuma **1.3** or newer.
|
||||
Flagger requires a Kubernetes cluster **v1.19** or newer and Kuma **1.7** or newer.
|
||||
|
||||
Install Kuma and Prometheus (part of Kuma Metrics):
|
||||
|
||||
```bash
|
||||
kumactl install control-plane | kubectl apply -f -
|
||||
kumactl install metrics | kubectl apply -f -
|
||||
kumactl install observability --components "grafana,prometheus" | kubectl apply -f -
|
||||
```
|
||||
|
||||
Install Flagger in the `kuma-system` namespace:
|
||||
@@ -133,7 +133,7 @@ service/podinfo-primary
|
||||
trafficroutes.kuma.io/podinfo
|
||||
```
|
||||
|
||||
After the boostrap, the podinfo deployment will be scaled to zero and the traffic to `podinfo.test` will be routed to the primary pods. During the canary analysis, the `podinfo-canary.test` address can be used to target directly the canary pods.
|
||||
After the bootstrap, the podinfo deployment will be scaled to zero and the traffic to `podinfo.test` will be routed to the primary pods. During the canary analysis, the `podinfo-canary.test` address can be used to target directly the canary pods.
|
||||
|
||||
## Automated canary promotion
|
||||
|
||||
|
||||
@@ -8,19 +8,55 @@ This guide shows you how to use Linkerd and Flagger to automate canary deploymen
|
||||
|
||||
Flagger requires a Kubernetes cluster **v1.16** or newer and Linkerd **2.10** or newer.
|
||||
|
||||
Install Linkerd the Promethues (part of Linkerd Viz):
|
||||
Install Linkerd and Prometheus (part of Linkerd Viz):
|
||||
|
||||
```bash
|
||||
# For linkerd versions 2.12 and later, the CRDs need to be installed beforehand
|
||||
linkerd install --crds | kubectl apply -f -
|
||||
|
||||
linkerd install | kubectl apply -f -
|
||||
linkerd viz install | kubectl apply -f -
|
||||
|
||||
# For linkerd versions 2.12 and later, the SMI extension needs to be install in
|
||||
# order to enable TrafficSplits
|
||||
curl -sL https://linkerd.github.io/linkerd-smi/install | sh
|
||||
linkerd smi install | kubectl apply -f -
|
||||
```
|
||||
|
||||
Install Flagger in the linkerd namespace:
|
||||
Install Flagger in the flagger-system namespace:
|
||||
|
||||
```bash
|
||||
kubectl apply -k github.com/fluxcd/flagger//kustomize/linkerd
|
||||
```
|
||||
|
||||
If you prefer Helm, these are the commands to install Linkerd, Linkerd Viz,
|
||||
Linkerd-SMI and Flagger:
|
||||
|
||||
```bash
|
||||
helm repo add linkerd https://helm.linkerd.io/stable
|
||||
helm install linkerd-crds linkerd/linkerd-crds -n linkerd --create-namespace
|
||||
# See https://linkerd.io/2/tasks/generate-certificates/ for how to generate the
|
||||
# certs referred below
|
||||
helm install linkerd-control-plane linkerd/linkerd-control-plane \
|
||||
-n linkerd \
|
||||
--set-file identityTrustAnchorsPEM=ca.crt \
|
||||
--set-file identity.issuer.tls.crtPEM=issuer.crt \
|
||||
--set-file identity.issuer.tls.keyPEM=issuer.key \
|
||||
|
||||
helm install linkerd-viz linkerd/linkerd-viz -n linkerd-viz --create-namespace
|
||||
|
||||
helm repo add l5d-smi https://linkerd.github.io/linkerd-smi
|
||||
helm install linkerd-smi l5d-smi/linkerd-smi -n linkerd-smi --create-namespace
|
||||
|
||||
# Note that linkerdAuthPolicy.create=true is only required for Linkerd 2.12 and
|
||||
# later
|
||||
helm install flagger flagger/flagger \
|
||||
--n flagger-system \
|
||||
--set meshProvider=linkerd \
|
||||
--set metricsServer=http://prometheus.linkerd-viz:9090 \
|
||||
--set linkerdAuthPolicy.create=true
|
||||
```
|
||||
|
||||
## Bootstrap
|
||||
|
||||
Flagger takes a Kubernetes deployment and optionally a horizontal pod autoscaler (HPA),
|
||||
@@ -140,7 +176,7 @@ service/podinfo-primary
|
||||
trafficsplits.split.smi-spec.io/podinfo
|
||||
```
|
||||
|
||||
After the boostrap, the podinfo deployment will be scaled to zero and the traffic to `podinfo.test` will be routed to the primary pods. During the canary analysis, the `podinfo-canary.test` address can be used to target directly the canary pods.
|
||||
After the bootstrap, the podinfo deployment will be scaled to zero and the traffic to `podinfo.test` will be routed to the primary pods. During the canary analysis, the `podinfo-canary.test` address can be used to target directly the canary pods.
|
||||
|
||||
## Automated canary promotion
|
||||
|
||||
@@ -309,7 +345,7 @@ watch -n 1 curl http://podinfo-canary:9898/status/404
|
||||
Watch Flagger logs:
|
||||
|
||||
```text
|
||||
kubectl -n linkerd logs deployment/flagger -f | jq .msg
|
||||
kubectl -n flagger-system logs deployment/flagger -f | jq .msg
|
||||
|
||||
Starting canary deployment for podinfo.test
|
||||
Pre-rollout check acceptance-test passed
|
||||
|
||||
@@ -166,7 +166,7 @@ service/podinfo-primary
|
||||
trafficsplits.split.smi-spec.io/podinfo
|
||||
```
|
||||
|
||||
After the boostrap, the `podinfo` deployment will be scaled to zero and the traffic to `podinfo.test` will be routed to the primary pods.
|
||||
After the bootstrap, the `podinfo` deployment will be scaled to zero and the traffic to `podinfo.test` will be routed to the primary pods.
|
||||
During the canary analysis, the `podinfo-canary.test` address can be used to target directly the canary pods.
|
||||
|
||||
## Automated Canary Promotion
|
||||
|
||||
@@ -37,6 +37,8 @@ or if the analysis reached the maximum number of failed checks:
|
||||
|
||||

|
||||
|
||||
For using a Slack bot token, you should add `token` to a secret and use **secretRef**.
|
||||
|
||||
### Microsoft Teams
|
||||
|
||||
Flagger can be configured to send notifications to Microsoft Teams:
|
||||
@@ -73,6 +75,7 @@ spec:
|
||||
channel: on-call-alerts
|
||||
username: flagger
|
||||
# webhook address (ignored if secretRef is specified)
|
||||
# or https://slack.com/api/chat.postMessage if you use token in the secret
|
||||
address: https://hooks.slack.com/services/YOUR/SLACK/WEBHOOK
|
||||
# optional http/s proxy
|
||||
proxy: http://my-http-proxy.com
|
||||
@@ -87,6 +90,7 @@ metadata:
|
||||
namespace: flagger
|
||||
data:
|
||||
address: <encoded-url>
|
||||
token: <encoded-token>
|
||||
```
|
||||
|
||||
The alert provider **type** can be: `slack`, `msteams`, `rocket` or `discord`. When set to `discord`,
|
||||
|
||||
@@ -3,13 +3,15 @@
|
||||
Flagger can run automated application analysis, promotion and rollback for the following deployment strategies:
|
||||
|
||||
* **Canary Release** \(progressive traffic shifting\)
|
||||
* Istio, Linkerd, App Mesh, NGINX, Skipper, Contour, Gloo Edge, Traefik, Open Service Mesh, Kuma, Gateway API
|
||||
* Istio, Linkerd, App Mesh, NGINX, Skipper, Contour, Gloo Edge, Traefik, Open Service Mesh, Kuma, Gateway API, Apache APISIX
|
||||
* **A/B Testing** \(HTTP headers and cookies traffic routing\)
|
||||
* Istio, App Mesh, NGINX, Contour, Gloo Edge, Gateway API
|
||||
* **Blue/Green** \(traffic switching\)
|
||||
* Kubernetes CNI, Istio, Linkerd, App Mesh, NGINX, Contour, Gloo Edge, Open Service Mesh, Gateway API
|
||||
* **Blue/Green Mirroring** \(traffic shadowing\)
|
||||
* Istio
|
||||
* **Canary Release with Session Affinity** \(progressive traffic shifting combined with cookie based routing\)
|
||||
* Istio
|
||||
|
||||
For Canary releases and A/B testing you'll need a Layer 7 traffic management solution like
|
||||
a service mesh or an ingress controller. For Blue/Green deployments no service mesh or ingress controller is required.
|
||||
@@ -393,3 +395,59 @@ After the analysis finishes, the traffic is routed to the canary (green) before
|
||||
triggering the primary (blue) rolling update, this ensures a smooth transition
|
||||
to the new version avoiding dropping in-flight requests during the Kubernetes deployment rollout.
|
||||
|
||||
## Canary Release with Session Affinity
|
||||
|
||||
This deployment strategy mixes a Canary Release with A/B testing. A Canary Release is helpful when
|
||||
we're trying to expose new features to users progressively, but because of the very nature of its
|
||||
routing (weight based), users can land on the application's old version even after they have been
|
||||
routed to the new version previously. This can be annoying, or worse break how other services interact
|
||||
with our application. To address this issue, we borrow some things from A/B testing.
|
||||
|
||||
Since A/B testing is particularly helpful for applications that require session affinity, we integrate
|
||||
cookie based routing with regular weight based routing. This means once a user is exposed to the new
|
||||
version of our application (based on the traffic weights), they're always routed to that version, i.e.
|
||||
they're never routed back to the old version of our application.
|
||||
|
||||
You can enable this, by specifying `.spec.analsyis.sessionAffinity` in the Canary (only Istio is supported):
|
||||
|
||||
```yaml
|
||||
analysis:
|
||||
# schedule interval (default 60s)
|
||||
interval: 1m
|
||||
# max number of failed metric checks before rollback
|
||||
threshold: 10
|
||||
# max traffic percentage routed to canary
|
||||
# percentage (0-100)
|
||||
maxWeight: 50
|
||||
# canary increment step
|
||||
# percentage (0-100)
|
||||
stepWeight: 2
|
||||
# session affinity config
|
||||
sessionAffinity:
|
||||
# name of the cookie used
|
||||
cookieName: flagger-cookie
|
||||
# max age of the cookie (in seconds)
|
||||
# optional; defaults to 86400
|
||||
maxAge: 21600
|
||||
```
|
||||
|
||||
`.spec.analysis.sessionAffinity.cookieName` is the name of the Cookie that is stored. The value of the
|
||||
cookie is a randomly generated string of characters that act as a unique identifier. For the above
|
||||
config, the response header of a request routed to the canary deployment during a Canary run will look like:
|
||||
```
|
||||
Set-Cookie: flagger-cookie=LpsIaLdoNZ; Max-Age=21600
|
||||
```
|
||||
|
||||
After a Canary run is over and all traffic is shifted back to the primary deployment, all responses will
|
||||
have the following header:
|
||||
```
|
||||
Set-Cookie: flagger-cookie=LpsIaLdoNZ; Max-Age=-1
|
||||
```
|
||||
This tells the client to delete the cookie, making sure there are no junk cookies lying around in the user's
|
||||
system.
|
||||
|
||||
If a new Canary run is triggered, the response header will set a new cookie for all requests routed to
|
||||
the Canary deployment:
|
||||
```
|
||||
Set-Cookie: flagger-cookie=McxKdLQoIN; Max-Age=21600
|
||||
```
|
||||
|
||||
@@ -68,6 +68,9 @@ spec:
|
||||
apiVersion: autoscaling/v2beta2
|
||||
kind: HorizontalPodAutoscaler
|
||||
name: podinfo
|
||||
primaryScalerReplicas:
|
||||
minReplicas: 2
|
||||
maxReplicas: 5
|
||||
```
|
||||
|
||||
Based on the above configuration, Flagger generates the following Kubernetes objects:
|
||||
@@ -80,6 +83,11 @@ by default all traffic is routed to this version and the target deployment is sc
|
||||
Flagger will detect changes to the target deployment (including secrets and configmaps)
|
||||
and will perform a canary analysis before promoting the new version as primary.
|
||||
|
||||
Use `.spec.autoscalerRef.primaryScalerReplicas` to override the replica scaling
|
||||
configuration for the generated primary HorizontalPodAutoscaler. This is useful
|
||||
for situations when you want to have a different scaling configuration for the
|
||||
primary workload as opposed to using the same values from the original workload HorizontalPodAutoscaler.
|
||||
|
||||
**Note** that the target deployment must have a single label selector in the format `app: <DEPLOYMENT-NAME>`:
|
||||
|
||||
```yaml
|
||||
@@ -120,6 +128,8 @@ in the primary autoscaler when a rollout for the deployment starts and completes
|
||||
Optionally, you can create two HPAs, one for canary and one for the primary to update the HPA without
|
||||
doing a new rollout. As the canary deployment will be scaled to 0, the HPA on the canary will be inactive.
|
||||
|
||||
**Note** Flagger requires `autoscaling/v2` or `autoscaling/v2beta2` API version for HPAs.
|
||||
|
||||
The progress deadline represents the maximum time in seconds for the canary deployment to
|
||||
make progress before it is rolled back, defaults to ten minutes.
|
||||
|
||||
@@ -134,6 +144,7 @@ spec:
|
||||
name: podinfo
|
||||
port: 9898
|
||||
portName: http
|
||||
appProtocol: http
|
||||
targetPort: 9898
|
||||
portDiscovery: true
|
||||
```
|
||||
@@ -142,6 +153,7 @@ The container port from the target workload should match the `service.port` or `
|
||||
The `service.name` is optional, defaults to `spec.targetRef.name`.
|
||||
The `service.targetPort` can be a container port number or name.
|
||||
The `service.portName` is optional (defaults to `http`), if your workload uses gRPC then set the port name to `grpc`.
|
||||
The `service.appProtocol` is optional, more details can be found [here](https://kubernetes.io/docs/concepts/services-networking/service/#application-protocol).
|
||||
|
||||
If port discovery is enabled, Flagger scans the target workload and extracts the containers ports
|
||||
excluding the port specified in the canary service and service mesh sidecar ports.
|
||||
|
||||
@@ -29,7 +29,7 @@ Flagger comes with two builtin metric checks: HTTP request success rate and dura
|
||||
|
||||
For each metric you can specify a range of accepted values with `thresholdRange` and
|
||||
the window size or the time series with `interval`.
|
||||
The builtin checks are available for every service mesh / ingress controlle
|
||||
The builtin checks are available for every service mesh / ingress controller
|
||||
and are implemented with [Prometheus queries](../faq.md#metrics).
|
||||
|
||||
## Custom metrics
|
||||
@@ -62,6 +62,7 @@ The following variables are available in query templates:
|
||||
* `service` (canary.spec.service.name)
|
||||
* `ingress` (canary.spec.ingresRef.name)
|
||||
* `interval` (canary.spec.analysis.metrics[].interval)
|
||||
* `variables` (canary.spec.analysis.metrics[].templateVariables)
|
||||
|
||||
A canary analysis metric can reference a template with `templateRef`:
|
||||
|
||||
@@ -82,6 +83,50 @@ A canary analysis metric can reference a template with `templateRef`:
|
||||
interval: 1m
|
||||
```
|
||||
|
||||
A canary analysis metric can reference a set of custom variables with `templateVariables`. These variables will be then injected into the query defined in the referred `MetricTemplate` object during canary analysis:
|
||||
|
||||
```yaml
|
||||
analysis:
|
||||
metrics:
|
||||
- name: "my metric"
|
||||
templateRef:
|
||||
name: my-metric
|
||||
namespace: flagger
|
||||
# accepted values
|
||||
thresholdRange:
|
||||
min: 10
|
||||
max: 1000
|
||||
# metric query time window
|
||||
interval: 1m
|
||||
# custom variables used within the referenced metric template
|
||||
templateVariables:
|
||||
direction: inbound
|
||||
```
|
||||
|
||||
```yaml
|
||||
apiVersion: flagger.app/v1beta1
|
||||
kind: MetricTemplate
|
||||
metadata:
|
||||
name: my-metric
|
||||
spec:
|
||||
provider:
|
||||
type: prometheus
|
||||
address: http://prometheus.linkerd-viz:9090
|
||||
query: |
|
||||
histogram_quantile(
|
||||
0.99,
|
||||
sum(
|
||||
rate(
|
||||
response_latency_ms_bucket{
|
||||
namespace="{{ namespace }}",
|
||||
deployment=~"{{ target }}",
|
||||
direction="{{ variables.direction }}"
|
||||
}[{{ interval }}]
|
||||
)
|
||||
) by (le)
|
||||
)
|
||||
```
|
||||
|
||||
## Prometheus
|
||||
|
||||
You can create custom metric checks targeting a Prometheus server by
|
||||
@@ -184,13 +229,25 @@ as the `MetricTemplate` with the basic-auth credentials:
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: prom-basic-auth
|
||||
name: prom-auth
|
||||
namespace: flagger
|
||||
data:
|
||||
username: your-user
|
||||
password: your-password
|
||||
```
|
||||
|
||||
or if you require bearer token authentication (via a SA token):
|
||||
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: prom-auth
|
||||
namespace: flagger
|
||||
data:
|
||||
token: ey1234...
|
||||
```
|
||||
|
||||
Then reference the secret in the `MetricTemplate`:
|
||||
|
||||
```yaml
|
||||
@@ -204,7 +261,7 @@ spec:
|
||||
type: prometheus
|
||||
address: http://prometheus.monitoring:9090
|
||||
secretRef:
|
||||
name: prom-basic-auth
|
||||
name: prom-auth
|
||||
```
|
||||
|
||||
## Datadog
|
||||
@@ -478,7 +535,7 @@ spec:
|
||||
name: graphite-basic-auth
|
||||
```
|
||||
|
||||
## Google CLoud Monitoring (Stackdriver)
|
||||
## Google Cloud Monitoring (Stackdriver)
|
||||
|
||||
Enable Workload Identity on your cluster, create a service account key that has read access to the
|
||||
Cloud Monitoring API and then create an IAM policy binding between the GCP service account and the Flagger
|
||||
@@ -528,18 +585,20 @@ spec:
|
||||
|
||||
The reference for the query language can be found [here](https://cloud.google.com/monitoring/mql/reference)
|
||||
|
||||
## Influxdb
|
||||
## InfluxDB
|
||||
|
||||
The influxdb provider uses the [flux](https://docs.influxdata.com/influxdb/v2.0/query-data/get-started/) scripting language.
|
||||
The InfluxDB provider uses the [flux](https://docs.influxdata.com/influxdb/v2.0/query-data/get-started/) query language.
|
||||
|
||||
Create a secret that contains your authentication token that can be gotthen from the InfluxDB UI.
|
||||
Create a secret that contains your authentication token that can be found in the InfluxDB UI.
|
||||
|
||||
```
|
||||
kubectl create secret generic gcloud-sa --from-literal=token=<token>
|
||||
kubectl create secret generic influx-token --from-literal=token=<token>
|
||||
```
|
||||
|
||||
Then reference the secret in the metric template.qq
|
||||
Then reference the secret in the metric template.
|
||||
|
||||
Note: The particular MQL query used here works if [Istio is installed on GKE](https://cloud.google.com/istio/docs/istio-on-gke/installing).
|
||||
|
||||
```yaml
|
||||
apiVersion: flagger.app/v1beta1
|
||||
kind: MetricTemplate
|
||||
|
||||
@@ -117,4 +117,8 @@ flagger_canary_duration_seconds_bucket{name="podinfo",namespace="test",le="10"}
|
||||
flagger_canary_duration_seconds_bucket{name="podinfo",namespace="test",le="+Inf"} 6
|
||||
flagger_canary_duration_seconds_sum{name="podinfo",namespace="test"} 17.3561329
|
||||
flagger_canary_duration_seconds_count{name="podinfo",namespace="test"} 6
|
||||
|
||||
# Last canary metric analysis result per different metrics
|
||||
flagger_canary_metric_analysis{metric="podinfo-http-successful-rate",name="podinfo",namespace="test"} 1
|
||||
flagger_canary_metric_analysis{metric="podinfo-custom-metric",name="podinfo",namespace="test"} 0.918223108974359
|
||||
```
|
||||
|
||||
119
go.mod
119
go.mod
@@ -1,78 +1,89 @@
|
||||
module github.com/fluxcd/flagger
|
||||
|
||||
go 1.17
|
||||
go 1.19
|
||||
|
||||
require (
|
||||
cloud.google.com/go/monitoring v0.1.0
|
||||
github.com/Masterminds/semver/v3 v3.0.3
|
||||
github.com/aws/aws-sdk-go v1.37.32
|
||||
cloud.google.com/go/monitoring v1.13.0
|
||||
github.com/Masterminds/semver/v3 v3.2.1
|
||||
github.com/aws/aws-sdk-go v1.44.241
|
||||
github.com/davecgh/go-spew v1.1.1
|
||||
github.com/go-logr/zapr v1.2.0
|
||||
github.com/google/go-cmp v0.5.6
|
||||
github.com/googleapis/gax-go/v2 v2.0.5
|
||||
github.com/influxdata/influxdb-client-go/v2 v2.5.0
|
||||
github.com/prometheus/client_golang v1.11.1
|
||||
github.com/stretchr/testify v1.7.0
|
||||
go.uber.org/zap v1.19.1
|
||||
google.golang.org/api v0.54.0
|
||||
google.golang.org/genproto v0.0.0-20210813162853-db860fec028c
|
||||
google.golang.org/grpc v1.39.1
|
||||
google.golang.org/protobuf v1.27.1
|
||||
gopkg.in/h2non/gock.v1 v1.0.15
|
||||
k8s.io/api v0.23.3
|
||||
k8s.io/apimachinery v0.23.3
|
||||
k8s.io/client-go v0.23.3
|
||||
k8s.io/code-generator v0.23.3
|
||||
k8s.io/klog/v2 v2.40.1
|
||||
github.com/go-logr/zapr v1.2.3
|
||||
github.com/google/go-cmp v0.5.9
|
||||
github.com/googleapis/gax-go/v2 v2.8.0
|
||||
github.com/influxdata/influxdb-client-go/v2 v2.12.3
|
||||
github.com/prometheus/client_golang v1.14.0
|
||||
github.com/stretchr/testify v1.8.1
|
||||
go.uber.org/zap v1.24.0
|
||||
google.golang.org/api v0.117.0
|
||||
google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1
|
||||
google.golang.org/grpc v1.54.0
|
||||
google.golang.org/protobuf v1.30.0
|
||||
gopkg.in/h2non/gock.v1 v1.1.2
|
||||
k8s.io/api v0.26.1
|
||||
k8s.io/apimachinery v0.26.1
|
||||
k8s.io/client-go v0.26.1
|
||||
k8s.io/code-generator v0.26.1
|
||||
k8s.io/klog/v2 v2.90.1
|
||||
)
|
||||
|
||||
require (
|
||||
cloud.google.com/go v0.92.3 // indirect
|
||||
cloud.google.com/go/compute v1.19.0 // indirect
|
||||
cloud.google.com/go/compute/metadata v0.2.3 // indirect
|
||||
github.com/apapsch/go-jsonmerge/v2 v2.0.0 // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.1.1 // indirect
|
||||
github.com/deepmap/oapi-codegen v1.8.2 // indirect
|
||||
github.com/evanphx/json-patch v4.12.0+incompatible // indirect
|
||||
github.com/go-logr/logr v1.2.2 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.2.0 // indirect
|
||||
github.com/deepmap/oapi-codegen v1.12.4 // indirect
|
||||
github.com/emicklei/go-restful/v3 v3.10.2 // indirect
|
||||
github.com/evanphx/json-patch v5.6.0+incompatible // indirect
|
||||
github.com/go-logr/logr v1.2.4 // indirect
|
||||
github.com/go-openapi/jsonpointer v0.19.6 // indirect
|
||||
github.com/go-openapi/jsonreference v0.20.2 // indirect
|
||||
github.com/go-openapi/swag v0.22.3 // indirect
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
|
||||
github.com/golang/protobuf v1.5.2 // indirect
|
||||
github.com/google/gofuzz v1.1.0 // indirect
|
||||
github.com/google/uuid v1.1.2 // indirect
|
||||
github.com/googleapis/gnostic v0.5.5 // indirect
|
||||
github.com/golang/protobuf v1.5.3 // indirect
|
||||
github.com/google/gnostic v0.6.9 // indirect
|
||||
github.com/google/gofuzz v1.2.0 // indirect
|
||||
github.com/google/s2a-go v0.1.0 // indirect
|
||||
github.com/google/uuid v1.3.0 // indirect
|
||||
github.com/googleapis/enterprise-certificate-proxy v0.2.3 // indirect
|
||||
github.com/h2non/parth v0.0.0-20190131123155-b4df798d6542 // indirect
|
||||
github.com/imdario/mergo v0.3.5 // indirect
|
||||
github.com/influxdata/line-protocol v0.0.0-20200327222509-2487e7298839 // indirect
|
||||
github.com/imdario/mergo v0.3.15 // indirect
|
||||
github.com/influxdata/line-protocol v0.0.0-20210922203350-b1ad95c89adf // indirect
|
||||
github.com/jmespath/go-jmespath v0.4.0 // indirect
|
||||
github.com/josharian/intern v1.0.0 // indirect
|
||||
github.com/json-iterator/go v1.1.12 // indirect
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect
|
||||
github.com/mailru/easyjson v0.7.7 // indirect
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
|
||||
github.com/pkg/errors v0.9.1 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
github.com/prometheus/client_model v0.2.0 // indirect
|
||||
github.com/prometheus/common v0.26.0 // indirect
|
||||
github.com/prometheus/procfs v0.6.0 // indirect
|
||||
github.com/prometheus/client_model v0.3.0 // indirect
|
||||
github.com/prometheus/common v0.42.0 // indirect
|
||||
github.com/prometheus/procfs v0.9.0 // indirect
|
||||
github.com/spf13/pflag v1.0.5 // indirect
|
||||
go.opencensus.io v0.23.0 // indirect
|
||||
go.uber.org/atomic v1.7.0 // indirect
|
||||
go.uber.org/multierr v1.6.0 // indirect
|
||||
golang.org/x/mod v0.5.1 // indirect
|
||||
golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd // indirect
|
||||
golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f // indirect
|
||||
golang.org/x/sys v0.0.0-20220209214540-3681064d5158 // indirect
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 // indirect
|
||||
golang.org/x/text v0.3.7 // indirect
|
||||
golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac // indirect
|
||||
golang.org/x/tools v0.1.9 // indirect
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect
|
||||
go.opencensus.io v0.24.0 // indirect
|
||||
go.uber.org/atomic v1.10.0 // indirect
|
||||
go.uber.org/multierr v1.11.0 // indirect
|
||||
golang.org/x/crypto v0.8.0 // indirect
|
||||
golang.org/x/mod v0.10.0 // indirect
|
||||
golang.org/x/net v0.9.0 // indirect
|
||||
golang.org/x/oauth2 v0.7.0 // indirect
|
||||
golang.org/x/sys v0.7.0 // indirect
|
||||
golang.org/x/term v0.7.0 // indirect
|
||||
golang.org/x/text v0.9.0 // indirect
|
||||
golang.org/x/time v0.3.0 // indirect
|
||||
golang.org/x/tools v0.8.0 // indirect
|
||||
google.golang.org/appengine v1.6.7 // indirect
|
||||
gopkg.in/inf.v0 v0.9.1 // indirect
|
||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect
|
||||
k8s.io/gengo v0.0.0-20211129171323-c02415ce4185 // indirect
|
||||
k8s.io/kube-openapi v0.0.0-20220124234850-424119656bbf // indirect
|
||||
k8s.io/utils v0.0.0-20211116205334-6203023598ed // indirect
|
||||
sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6 // indirect
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.2.1 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
k8s.io/gengo v0.0.0-20230306165830-ab3349d207d4 // indirect
|
||||
k8s.io/kube-openapi v0.0.0-20230327201221-f5883ff37f0c // indirect
|
||||
k8s.io/utils v0.0.0-20230406110748-d93618cff8a2 // indirect
|
||||
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect
|
||||
sigs.k8s.io/yaml v1.3.0 // indirect
|
||||
)
|
||||
|
||||
@@ -30,7 +30,7 @@ chmod +x ${CODEGEN_PKG}/generate-groups.sh
|
||||
|
||||
${CODEGEN_PKG}/generate-groups.sh all \
|
||||
github.com/fluxcd/flagger/pkg/client github.com/fluxcd/flagger/pkg/apis \
|
||||
"flagger:v1beta1 appmesh:v1beta2 appmesh:v1beta1 istio:v1alpha3 smi:v1alpha1 smi:v1alpha2 smi:v1alpha3 gloo/gloo:v1 gloo/gateway:v1 projectcontour:v1 traefik:v1alpha1 kuma:v1alpha1 gatewayapi:v1alpha2" \
|
||||
"flagger:v1beta1 appmesh:v1beta2 appmesh:v1beta1 istio:v1alpha3 smi:v1alpha1 smi:v1alpha2 smi:v1alpha3 gloo/gloo:v1 gloo/gateway:v1 projectcontour:v1 traefik:v1alpha1 kuma:v1alpha1 gatewayapi:v1alpha2 gatewayapi:v1beta1 keda:v1alpha1 apisix:v2" \
|
||||
--output-base "${TEMP_DIR}" \
|
||||
--go-header-file ${SCRIPT_ROOT}/hack/boilerplate.go.txt
|
||||
|
||||
|
||||
19
hack/verify-crd.sh
Executable file
19
hack/verify-crd.sh
Executable file
@@ -0,0 +1,19 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
REPO_ROOT=$(git rev-parse --show-toplevel)
|
||||
|
||||
d=$(diff ${REPO_ROOT}/artifacts/flagger/crd.yaml ${REPO_ROOT}/charts/flagger/crds/crd.yaml)
|
||||
if [[ "$d" != "" ]]; then
|
||||
echo "⨯ ${REPO_ROOT}/artifacts/flagger/crd.yaml and ${REPO_ROOT}/charts/flagger/crds/crd.yaml don't match"
|
||||
echo "$d"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
d=$(diff ${REPO_ROOT}/artifacts/flagger/crd.yaml ${REPO_ROOT}/kustomize/base/flagger/crd.yaml)
|
||||
if [[ "$d" != "" ]]; then
|
||||
echo "⨯ ${REPO_ROOT}/artifacts/flagger/crd.yaml and ${REPO_ROOT}/kustomize/base/flagger/crd.yaml don't match"
|
||||
echo "$d"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "✔ CRDs verified"
|
||||
7
kustomize/apisix/kustomization.yaml
Normal file
7
kustomize/apisix/kustomization.yaml
Normal file
@@ -0,0 +1,7 @@
|
||||
bases:
|
||||
- ../base/flagger/
|
||||
- ../base/prometheus/
|
||||
resources:
|
||||
- namespace.yaml
|
||||
patchesStrategicMerge:
|
||||
- patch.yaml
|
||||
4
kustomize/apisix/namespace.yaml
Normal file
4
kustomize/apisix/namespace.yaml
Normal file
@@ -0,0 +1,4 @@
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: flagger-system
|
||||
@@ -10,5 +10,5 @@ spec:
|
||||
args:
|
||||
- -log-level=info
|
||||
- -include-label-prefix=app.kubernetes.io
|
||||
- -mesh-provider=kuma
|
||||
- -mesh-provider=apisix
|
||||
- -metrics-server=http://flagger-prometheus:9090
|
||||
@@ -104,7 +104,7 @@ spec:
|
||||
name:
|
||||
type: string
|
||||
autoscalerRef:
|
||||
description: HPA selector
|
||||
description: Scaler selector
|
||||
type: object
|
||||
required: ["apiVersion", "kind", "name"]
|
||||
properties:
|
||||
@@ -114,8 +114,20 @@ spec:
|
||||
type: string
|
||||
enum:
|
||||
- HorizontalPodAutoscaler
|
||||
- ScaledObject
|
||||
name:
|
||||
type: string
|
||||
primaryScalerQueries:
|
||||
type: object
|
||||
additionalProperties:
|
||||
type: string
|
||||
primaryScalerReplicas:
|
||||
type: object
|
||||
properties:
|
||||
minReplicas:
|
||||
type: number
|
||||
maxReplicas:
|
||||
type: number
|
||||
ingressRef:
|
||||
description: Ingress selector
|
||||
type: object
|
||||
@@ -129,6 +141,19 @@ spec:
|
||||
- Ingress
|
||||
name:
|
||||
type: string
|
||||
routeRef:
|
||||
description: APISIX route selector
|
||||
type: object
|
||||
required: [ "apiVersion", "kind", "name" ]
|
||||
properties:
|
||||
apiVersion:
|
||||
type: string
|
||||
kind:
|
||||
type: string
|
||||
enum:
|
||||
- ApisixRoute
|
||||
name:
|
||||
type: string
|
||||
upstreamRef:
|
||||
description: Gloo Upstream selector
|
||||
type: object
|
||||
@@ -158,6 +183,9 @@ spec:
|
||||
portName:
|
||||
description: Container port name
|
||||
type: string
|
||||
appProtocol:
|
||||
description: Application protocol of the port
|
||||
type: string
|
||||
targetPort:
|
||||
description: Container target port name
|
||||
x-kubernetes-int-or-string: true
|
||||
@@ -942,6 +970,11 @@ spec:
|
||||
namespace:
|
||||
description: Namespace of this metric template
|
||||
type: string
|
||||
templateVariables:
|
||||
description: Additional variables to be used in the metrics query (key-value pairs)
|
||||
type: object
|
||||
additionalProperties:
|
||||
type: string
|
||||
alerts:
|
||||
description: Alert list for this canary analysis
|
||||
type: array
|
||||
@@ -1012,6 +1045,18 @@ spec:
|
||||
type: object
|
||||
additionalProperties:
|
||||
type: string
|
||||
sessionAffinity:
|
||||
description: SessionAffinity represents the session affinity settings for a canary run.
|
||||
type: object
|
||||
required: [ "cookieName" ]
|
||||
properties:
|
||||
cookieName:
|
||||
description: CookieName is the key that will be used for the session affinity cookie.
|
||||
type: string
|
||||
maxAge:
|
||||
description: MaxAge indicates the number of seconds until the session affinity cookie will expire.
|
||||
default: 86400
|
||||
type: number
|
||||
status:
|
||||
description: CanaryStatus defines the observed state of a canary.
|
||||
type: object
|
||||
@@ -1032,27 +1077,36 @@ spec:
|
||||
- Failed
|
||||
- Terminating
|
||||
- Terminated
|
||||
failedChecks:
|
||||
description: Failed check count of the current canary analysis
|
||||
type: number
|
||||
canaryWeight:
|
||||
description: Traffic weight routed to canary
|
||||
type: number
|
||||
iterations:
|
||||
description: Iteration count of the current canary analysis
|
||||
type: number
|
||||
trackedConfigs:
|
||||
description: TrackedConfig of this canary
|
||||
additionalProperties:
|
||||
type: string
|
||||
type: object
|
||||
canaryWeight:
|
||||
description: Traffic weight routed to canary
|
||||
type: number
|
||||
failedChecks:
|
||||
description: Failed check count of the current canary analysis
|
||||
type: number
|
||||
iterations:
|
||||
description: Iteration count of the current canary analysis
|
||||
type: number
|
||||
lastAppliedSpec:
|
||||
description: LastAppliedSpec of this canary
|
||||
type: string
|
||||
lastPromotedSpec:
|
||||
description: LastPromotedSpec of this canary
|
||||
type: string
|
||||
lastTransitionTime:
|
||||
description: LastTransitionTime of this canary
|
||||
format: date-time
|
||||
type: string
|
||||
sessionAffinityCookie:
|
||||
description: Session affinity cookie of the current canary run
|
||||
type: string
|
||||
previousSessionAffinityCookie:
|
||||
description: Session affinity cookie of the previous canary run
|
||||
type: string
|
||||
conditions:
|
||||
description: Status conditions of this canary
|
||||
type: array
|
||||
|
||||
@@ -9,4 +9,4 @@ resources:
|
||||
images:
|
||||
- name: ghcr.io/fluxcd/flagger
|
||||
newName: ghcr.io/fluxcd/flagger
|
||||
newTag: 1.19.0
|
||||
newTag: 1.31.0
|
||||
|
||||
@@ -21,6 +21,18 @@ rules:
|
||||
- update
|
||||
- patch
|
||||
- delete
|
||||
- apiGroups:
|
||||
- "coordination.k8s.io"
|
||||
resources:
|
||||
- leases
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- create
|
||||
- update
|
||||
- patch
|
||||
- delete
|
||||
- apiGroups:
|
||||
- apps
|
||||
resources:
|
||||
@@ -68,6 +80,7 @@ rules:
|
||||
resources:
|
||||
- canaries
|
||||
- canaries/status
|
||||
- canaries/finalizers
|
||||
- metrictemplates
|
||||
- metrictemplates/status
|
||||
- alertproviders
|
||||
@@ -203,6 +216,31 @@ rules:
|
||||
- update
|
||||
- patch
|
||||
- delete
|
||||
- apiGroups:
|
||||
- keda.sh
|
||||
resources:
|
||||
- scaledobjects
|
||||
- scaledobjects/finalizers
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- create
|
||||
- update
|
||||
- patch
|
||||
- delete
|
||||
- apiGroups:
|
||||
- apisix.apache.org
|
||||
resources:
|
||||
- apisixroutes
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- create
|
||||
- update
|
||||
- patch
|
||||
- delete
|
||||
- nonResourceURLs:
|
||||
- /version
|
||||
verbs:
|
||||
|
||||
@@ -19,7 +19,7 @@ spec:
|
||||
serviceAccountName: flagger-prometheus
|
||||
containers:
|
||||
- name: prometheus
|
||||
image: prom/prometheus:v2.33.5
|
||||
image: prom/prometheus:v2.41.0
|
||||
imagePullPolicy: IfNotPresent
|
||||
args:
|
||||
- '--storage.tsdb.retention=2h'
|
||||
|
||||
@@ -8,7 +8,7 @@ spec:
|
||||
containers:
|
||||
- name: flagger
|
||||
args:
|
||||
- -log-level=debug
|
||||
- -log-level=info
|
||||
- -include-label-prefix=app.kubernetes.io
|
||||
- -mesh-provider=gatewayapi:v1alpha2
|
||||
- -mesh-provider=gatewayapi:v1beta1
|
||||
- -metrics-server=http://flagger-prometheus:9090
|
||||
|
||||
@@ -3,3 +3,5 @@ bases:
|
||||
- ../base/flagger/
|
||||
patchesStrategicMerge:
|
||||
- patch.yaml
|
||||
resources:
|
||||
- mesh.yaml
|
||||
|
||||
10
kustomize/kuma/mesh.yaml
Normal file
10
kustomize/kuma/mesh.yaml
Normal file
@@ -0,0 +1,10 @@
|
||||
apiVersion: kuma.io/v1alpha1
|
||||
kind: Mesh
|
||||
metadata:
|
||||
name: default
|
||||
spec:
|
||||
metrics:
|
||||
enabledBackend: prometheus-1
|
||||
backends:
|
||||
- name: prometheus-1
|
||||
type: prometheus
|
||||
@@ -11,4 +11,4 @@ spec:
|
||||
- -log-level=info
|
||||
- -include-label-prefix=app.kubernetes.io
|
||||
- -mesh-provider=kuma
|
||||
- -metrics-server=http://prometheus-server.kuma-metrics:80
|
||||
- -metrics-server=http://prometheus-server.mesh-observability:80
|
||||
|
||||
14
kustomize/linkerd/authorizationpolicy.yaml
Normal file
14
kustomize/linkerd/authorizationpolicy.yaml
Normal file
@@ -0,0 +1,14 @@
|
||||
apiVersion: policy.linkerd.io/v1alpha1
|
||||
kind: AuthorizationPolicy
|
||||
metadata:
|
||||
namespace: linkerd-viz
|
||||
name: prometheus-admin-flagger
|
||||
spec:
|
||||
targetRef:
|
||||
group: policy.linkerd.io
|
||||
kind: Server
|
||||
name: prometheus-admin
|
||||
requiredAuthenticationRefs:
|
||||
- kind: ServiceAccount
|
||||
name: flagger
|
||||
namespace: flagger-system
|
||||
@@ -1,5 +1,18 @@
|
||||
namespace: linkerd
|
||||
namespace: flagger-system
|
||||
bases:
|
||||
- ../base/flagger/
|
||||
- namespace.yaml
|
||||
- authorizationpolicy.yaml
|
||||
patchesStrategicMerge:
|
||||
- patch.yaml
|
||||
# restore overridden namespace field
|
||||
patchesJson6902:
|
||||
- target:
|
||||
group: policy.linkerd.io
|
||||
version: v1alpha1
|
||||
kind: AuthorizationPolicy
|
||||
name: prometheus-admin-flagger
|
||||
patch: |-
|
||||
- op: replace
|
||||
path: /metadata/namespace
|
||||
value: linkerd-viz
|
||||
|
||||
6
kustomize/linkerd/namespace.yaml
Normal file
6
kustomize/linkerd/namespace.yaml
Normal file
@@ -0,0 +1,6 @@
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
annotations:
|
||||
linkerd.io/inject: enabled
|
||||
name: flagger-system
|
||||
@@ -24,4 +24,4 @@ roleRef:
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: flagger
|
||||
namespace: linkerd
|
||||
namespace: flagger-system
|
||||
|
||||
@@ -19,7 +19,7 @@ spec:
|
||||
spec:
|
||||
containers:
|
||||
- name: loadtester
|
||||
image: ghcr.io/fluxcd/flagger-loadtester:0.22.0
|
||||
image: ghcr.io/fluxcd/flagger-loadtester:0.28.1
|
||||
imagePullPolicy: IfNotPresent
|
||||
ports:
|
||||
- name: http
|
||||
|
||||
21
pkg/apis/apisix/register.go
Normal file
21
pkg/apis/apisix/register.go
Normal file
@@ -0,0 +1,21 @@
|
||||
/*
|
||||
Copyright 2022 The Flux authors
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package apisix
|
||||
|
||||
const (
|
||||
GroupName = "apisix.apache.org"
|
||||
)
|
||||
21
pkg/apis/apisix/v2/doc.go
Normal file
21
pkg/apis/apisix/v2/doc.go
Normal file
@@ -0,0 +1,21 @@
|
||||
/*
|
||||
Copyright 2022 The Flux authors
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// +k8s:deepcopy-gen=package
|
||||
|
||||
// Package v2 is the v2 version of the API.
|
||||
// +groupName=apisix.apache.org
|
||||
package v2
|
||||
52
pkg/apis/apisix/v2/register.go
Normal file
52
pkg/apis/apisix/v2/register.go
Normal file
@@ -0,0 +1,52 @@
|
||||
/*
|
||||
Copyright 2022 The Flux authors
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package v2
|
||||
|
||||
import (
|
||||
"github.com/fluxcd/flagger/pkg/apis/apisix"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
)
|
||||
|
||||
// SchemeGroupVersion is group version used to register these objects
|
||||
var SchemeGroupVersion = schema.GroupVersion{Group: apisix.GroupName, Version: "v2"}
|
||||
|
||||
// Kind takes an unqualified kind and returns back a Group qualified GroupKind
|
||||
func Kind(kind string) schema.GroupKind {
|
||||
return SchemeGroupVersion.WithKind(kind).GroupKind()
|
||||
}
|
||||
|
||||
// Resource takes an unqualified resource and returns a Group qualified GroupResource
|
||||
func Resource(resource string) schema.GroupResource {
|
||||
return SchemeGroupVersion.WithResource(resource).GroupResource()
|
||||
}
|
||||
|
||||
var (
|
||||
SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes)
|
||||
AddToScheme = SchemeBuilder.AddToScheme
|
||||
)
|
||||
|
||||
// Adds the list of known types to Scheme.
|
||||
func addKnownTypes(scheme *runtime.Scheme) error {
|
||||
scheme.AddKnownTypes(SchemeGroupVersion,
|
||||
&ApisixRoute{},
|
||||
&ApisixRouteList{},
|
||||
)
|
||||
metav1.AddToGroupVersion(scheme, SchemeGroupVersion)
|
||||
return nil
|
||||
}
|
||||
245
pkg/apis/apisix/v2/types.go
Normal file
245
pkg/apis/apisix/v2/types.go
Normal file
@@ -0,0 +1,245 @@
|
||||
/*
|
||||
Copyright 2022 The Flux authors
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package v2
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
)
|
||||
|
||||
// +genclient
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
// +kubebuilder:subresource:status
|
||||
// ApisixRoute is used to define the route rules and upstreams for Apache APISIX.
|
||||
type ApisixRoute struct {
|
||||
metav1.TypeMeta `json:",inline" yaml:",inline"`
|
||||
metav1.ObjectMeta `json:"metadata,omitempty" yaml:"metadata,omitempty"`
|
||||
Spec ApisixRouteSpec `json:"spec,omitempty" yaml:"spec,omitempty"`
|
||||
Status ApisixStatus `json:"status,omitempty" yaml:"status,omitempty"`
|
||||
}
|
||||
|
||||
// ApisixStatus is the status report for Apisix ingress Resources
|
||||
type ApisixStatus struct {
|
||||
Conditions []metav1.Condition `json:"conditions,omitempty" yaml:"conditions,omitempty"`
|
||||
}
|
||||
|
||||
// ApisixRouteSpec is the spec definition for ApisixRouteSpec.
|
||||
type ApisixRouteSpec struct {
|
||||
HTTP []ApisixRouteHTTP `json:"http,omitempty" yaml:"http,omitempty"`
|
||||
Stream []ApisixRouteStream `json:"stream,omitempty" yaml:"stream,omitempty"`
|
||||
}
|
||||
|
||||
// ApisixRouteHTTP represents a single route in for HTTP traffic.
|
||||
type ApisixRouteHTTP struct {
|
||||
// The rule name, cannot be empty.
|
||||
Name string `json:"name" yaml:"name"`
|
||||
// Route priority, when multiple routes contains
|
||||
// same URI path (for path matching), route with
|
||||
// higher priority will take effect.
|
||||
Priority int `json:"priority,omitempty" yaml:"priority,omitempty"`
|
||||
Timeout *UpstreamTimeout `json:"timeout,omitempty" yaml:"timeout,omitempty"`
|
||||
Match ApisixRouteHTTPMatch `json:"match,omitempty" yaml:"match,omitempty"`
|
||||
// Backends represents potential backends to proxy after the route
|
||||
// rule matched. When number of backends are more than one, traffic-split
|
||||
// plugin in APISIX will be used to split traffic based on the backend weight.
|
||||
Backends []ApisixRouteHTTPBackend `json:"backends,omitempty" yaml:"backends,omitempty"`
|
||||
Websocket bool `json:"websocket" yaml:"websocket"`
|
||||
PluginConfigName string `json:"plugin_config_name,omitempty" yaml:"plugin_config_name,omitempty"`
|
||||
Plugins []ApisixRoutePlugin `json:"plugins,omitempty" yaml:"plugins,omitempty"`
|
||||
Authentication *ApisixRouteAuthentication `json:"authentication,omitempty" yaml:"authentication,omitempty"`
|
||||
}
|
||||
|
||||
// UpstreamTimeout is settings for the read, send and connect to the upstream.
|
||||
type UpstreamTimeout struct {
|
||||
Connect metav1.Duration `json:"connect,omitempty" yaml:"connect,omitempty"`
|
||||
Send metav1.Duration `json:"send,omitempty" yaml:"send,omitempty"`
|
||||
Read metav1.Duration `json:"read,omitempty" yaml:"read,omitempty"`
|
||||
}
|
||||
|
||||
// ApisixRouteHTTPBackend represents a HTTP backend (a Kuberentes Service).
|
||||
type ApisixRouteHTTPBackend struct {
|
||||
// The name (short) of the service, note cross namespace is forbidden,
|
||||
// so be sure the ApisixRoute and Service are in the same namespace.
|
||||
ServiceName string `json:"serviceName" yaml:"serviceName"`
|
||||
// The service port, could be the name or the port number.
|
||||
ServicePort intstr.IntOrString `json:"servicePort" yaml:"servicePort"`
|
||||
// The resolve granularity, can be "endpoints" or "service",
|
||||
// when set to "endpoints", the pod ips will be used; other
|
||||
// wise, the service ClusterIP or ExternalIP will be used,
|
||||
// default is endpoints.
|
||||
ResolveGranularity string `json:"resolveGranularity,omitempty" yaml:"resolveGranularity,omitempty"`
|
||||
// Weight of this backend.
|
||||
Weight *int `json:"weight" yaml:"weight"`
|
||||
// Subset specifies a subset for the target Service. The subset should be pre-defined
|
||||
// in ApisixUpstream about this service.
|
||||
Subset string `json:"subset,omitempty" yaml:"subset,omitempty"`
|
||||
}
|
||||
|
||||
// ApisixRouteHTTPMatch represents the match condition for hitting this route.
|
||||
type ApisixRouteHTTPMatch struct {
|
||||
// URI path predicates, at least one path should be
|
||||
// configured, path could be exact or prefix, for prefix path,
|
||||
// append "*" after it, for instance, "/foo*".
|
||||
Paths []string `json:"paths" yaml:"paths"`
|
||||
// HTTP request method predicates.
|
||||
Methods []string `json:"methods,omitempty" yaml:"methods,omitempty"`
|
||||
// HTTP Host predicates, host can be a wildcard domain or
|
||||
// an exact domain. For wildcard domain, only one generic
|
||||
// level is allowed, for instance, "*.foo.com" is valid but
|
||||
// "*.*.foo.com" is not.
|
||||
Hosts []string `json:"hosts,omitempty" yaml:"hosts,omitempty"`
|
||||
// Remote address predicates, items can be valid IPv4 address
|
||||
// or IPv6 address or CIDR.
|
||||
RemoteAddrs []string `json:"remoteAddrs,omitempty" yaml:"remoteAddrs,omitempty"`
|
||||
// NginxVars represents generic match predicates,
|
||||
// it uses Nginx variable systems, so any predicate
|
||||
// like headers, querystring and etc can be leveraged
|
||||
// here to match the route.
|
||||
// For instance, it can be:
|
||||
// nginxVars:
|
||||
// - subject: "$remote_addr"
|
||||
// op: in
|
||||
// value:
|
||||
// - "127.0.0.1"
|
||||
// - "10.0.5.11"
|
||||
NginxVars []ApisixRouteHTTPMatchExpr `json:"exprs,omitempty" yaml:"exprs,omitempty"`
|
||||
}
|
||||
|
||||
// ApisixRouteHTTPMatchExpr represents a binary route match expression .
|
||||
type ApisixRouteHTTPMatchExpr struct {
|
||||
// Subject is the expression subject, it can
|
||||
// be any string composed by literals and nginx
|
||||
// vars.
|
||||
Subject ApisixRouteHTTPMatchExprSubject `json:"subject" yaml:"subject"`
|
||||
// Op is the operator.
|
||||
Op string `json:"op" yaml:"op"`
|
||||
// Set is an array type object of the expression.
|
||||
// It should be used when the Op is "in" or "not_in";
|
||||
Set []string `json:"set" yaml:"set"`
|
||||
// Value is the normal type object for the expression,
|
||||
// it should be used when the Op is not "in" and "not_in".
|
||||
// Set and Value are exclusive so only of them can be set
|
||||
// in the same time.
|
||||
Value *string `json:"value" yaml:"value"`
|
||||
}
|
||||
|
||||
// ApisixRouteHTTPMatchExprSubject describes the route match expression subject.
|
||||
type ApisixRouteHTTPMatchExprSubject struct {
|
||||
// The subject scope, can be:
|
||||
// ScopeQuery, ScopeHeader, ScopePath
|
||||
// when subject is ScopePath, Name field
|
||||
// will be ignored.
|
||||
Scope string `json:"scope" yaml:"scope"`
|
||||
// The name of subject.
|
||||
Name string `json:"name" yaml:"name"`
|
||||
}
|
||||
|
||||
// ApisixRoutePlugin represents an APISIX plugin.
|
||||
type ApisixRoutePlugin struct {
|
||||
// The plugin name.
|
||||
Name string `json:"name" yaml:"name"`
|
||||
// Whether this plugin is in use, default is true.
|
||||
Enable bool `json:"enable" yaml:"enable"`
|
||||
// Plugin configuration.
|
||||
Config ApisixRoutePluginConfig `json:"config" yaml:"config"`
|
||||
}
|
||||
|
||||
// ApisixRoutePluginConfig is the configuration for
|
||||
// any plugins.
|
||||
type ApisixRoutePluginConfig map[string]interface{}
|
||||
|
||||
// ApisixRouteAuthentication is the authentication-related
|
||||
// configuration in ApisixRoute.
|
||||
type ApisixRouteAuthentication struct {
|
||||
Enable bool `json:"enable" yaml:"enable"`
|
||||
Type string `json:"type" yaml:"type"`
|
||||
KeyAuth ApisixRouteAuthenticationKeyAuth `json:"keyAuth,omitempty" yaml:"keyAuth,omitempty"`
|
||||
JwtAuth ApisixRouteAuthenticationJwtAuth `json:"jwtAuth,omitempty" yaml:"jwtAuth,omitempty"`
|
||||
}
|
||||
|
||||
// ApisixRouteAuthenticationKeyAuth is the keyAuth-related
|
||||
// configuration in ApisixRouteAuthentication.
|
||||
type ApisixRouteAuthenticationKeyAuth struct {
|
||||
Header string `json:"header,omitempty" yaml:"header,omitempty"`
|
||||
}
|
||||
|
||||
// ApisixRouteAuthenticationJwtAuth is the jwt auth related
|
||||
// configuration in ApisixRouteAuthentication.
|
||||
type ApisixRouteAuthenticationJwtAuth struct {
|
||||
Header string `json:"header,omitempty" yaml:"header,omitempty"`
|
||||
Query string `json:"query,omitempty" yaml:"query,omitempty"`
|
||||
Cookie string `json:"cookie,omitempty" yaml:"cookie,omitempty"`
|
||||
}
|
||||
|
||||
func (p ApisixRoutePluginConfig) DeepCopyInto(out *ApisixRoutePluginConfig) {
|
||||
b, _ := json.Marshal(&p)
|
||||
_ = json.Unmarshal(b, out)
|
||||
}
|
||||
|
||||
func (p *ApisixRoutePluginConfig) DeepCopy() *ApisixRoutePluginConfig {
|
||||
if p == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(ApisixRoutePluginConfig)
|
||||
p.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// ApisixRouteStream is the configuration for level 4 route
|
||||
type ApisixRouteStream struct {
|
||||
// The rule name, cannot be empty.
|
||||
Name string `json:"name" yaml:"name"`
|
||||
Protocol string `json:"protocol" yaml:"protocol"`
|
||||
Match ApisixRouteStreamMatch `json:"match" yaml:"match"`
|
||||
Backend ApisixRouteStreamBackend `json:"backend" yaml:"backend"`
|
||||
Plugins []ApisixRoutePlugin `json:"plugins,omitempty" yaml:"plugins,omitempty"`
|
||||
}
|
||||
|
||||
// ApisixRouteStreamMatch represents the match conditions of stream route.
|
||||
type ApisixRouteStreamMatch struct {
|
||||
// IngressPort represents the port listening on the Ingress proxy server.
|
||||
// It should be pre-defined as APISIX doesn't support dynamic listening.
|
||||
IngressPort int32 `json:"ingressPort" yaml:"ingressPort"`
|
||||
}
|
||||
|
||||
// ApisixRouteStreamBackend represents a TCP backend (a Kubernetes Service).
|
||||
type ApisixRouteStreamBackend struct {
|
||||
// The name (short) of the service, note cross namespace is forbidden,
|
||||
// so be sure the ApisixRoute and Service are in the same namespace.
|
||||
ServiceName string `json:"serviceName" yaml:"serviceName"`
|
||||
// The service port, could be the name or the port number.
|
||||
ServicePort intstr.IntOrString `json:"servicePort" yaml:"servicePort"`
|
||||
// The resolve granularity, can be "endpoints" or "service",
|
||||
// when set to "endpoints", the pod ips will be used; other
|
||||
// wise, the service ClusterIP or ExternalIP will be used,
|
||||
// default is endpoints.
|
||||
ResolveGranularity string `json:"resolveGranularity,omitempty" yaml:"resolveGranularity,omitempty"`
|
||||
// Subset specifies a subset for the target Service. The subset should be pre-defined
|
||||
// in ApisixUpstream about this service.
|
||||
Subset string `json:"subset,omitempty" yaml:"subset,omitempty"`
|
||||
}
|
||||
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
// ApisixRouteList contains a list of ApisixRoute.
|
||||
type ApisixRouteList struct {
|
||||
metav1.TypeMeta `json:",inline" yaml:",inline"`
|
||||
metav1.ListMeta `json:"metadata" yaml:"metadata"`
|
||||
Items []ApisixRoute `json:"items,omitempty" yaml:"items,omitempty"`
|
||||
}
|
||||
434
pkg/apis/apisix/v2/zz_generated.deepcopy.go
Normal file
434
pkg/apis/apisix/v2/zz_generated.deepcopy.go
Normal file
@@ -0,0 +1,434 @@
|
||||
//go:build !ignore_autogenerated
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright 2020 The Flux authors
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Code generated by deepcopy-gen. DO NOT EDIT.
|
||||
|
||||
package v2
|
||||
|
||||
import (
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||
)
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ApisixRoute) DeepCopyInto(out *ApisixRoute) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
|
||||
in.Spec.DeepCopyInto(&out.Spec)
|
||||
in.Status.DeepCopyInto(&out.Status)
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ApisixRoute.
|
||||
func (in *ApisixRoute) DeepCopy() *ApisixRoute {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(ApisixRoute)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *ApisixRoute) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ApisixRouteAuthentication) DeepCopyInto(out *ApisixRouteAuthentication) {
|
||||
*out = *in
|
||||
out.KeyAuth = in.KeyAuth
|
||||
out.JwtAuth = in.JwtAuth
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ApisixRouteAuthentication.
|
||||
func (in *ApisixRouteAuthentication) DeepCopy() *ApisixRouteAuthentication {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(ApisixRouteAuthentication)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ApisixRouteAuthenticationJwtAuth) DeepCopyInto(out *ApisixRouteAuthenticationJwtAuth) {
|
||||
*out = *in
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ApisixRouteAuthenticationJwtAuth.
|
||||
func (in *ApisixRouteAuthenticationJwtAuth) DeepCopy() *ApisixRouteAuthenticationJwtAuth {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(ApisixRouteAuthenticationJwtAuth)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ApisixRouteAuthenticationKeyAuth) DeepCopyInto(out *ApisixRouteAuthenticationKeyAuth) {
|
||||
*out = *in
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ApisixRouteAuthenticationKeyAuth.
|
||||
func (in *ApisixRouteAuthenticationKeyAuth) DeepCopy() *ApisixRouteAuthenticationKeyAuth {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(ApisixRouteAuthenticationKeyAuth)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ApisixRouteHTTP) DeepCopyInto(out *ApisixRouteHTTP) {
|
||||
*out = *in
|
||||
if in.Timeout != nil {
|
||||
in, out := &in.Timeout, &out.Timeout
|
||||
*out = new(UpstreamTimeout)
|
||||
**out = **in
|
||||
}
|
||||
in.Match.DeepCopyInto(&out.Match)
|
||||
if in.Backends != nil {
|
||||
in, out := &in.Backends, &out.Backends
|
||||
*out = make([]ApisixRouteHTTPBackend, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
if in.Plugins != nil {
|
||||
in, out := &in.Plugins, &out.Plugins
|
||||
*out = make([]ApisixRoutePlugin, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
if in.Authentication != nil {
|
||||
in, out := &in.Authentication, &out.Authentication
|
||||
*out = new(ApisixRouteAuthentication)
|
||||
**out = **in
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ApisixRouteHTTP.
|
||||
func (in *ApisixRouteHTTP) DeepCopy() *ApisixRouteHTTP {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(ApisixRouteHTTP)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ApisixRouteHTTPBackend) DeepCopyInto(out *ApisixRouteHTTPBackend) {
|
||||
*out = *in
|
||||
out.ServicePort = in.ServicePort
|
||||
if in.Weight != nil {
|
||||
in, out := &in.Weight, &out.Weight
|
||||
*out = new(int)
|
||||
**out = **in
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ApisixRouteHTTPBackend.
|
||||
func (in *ApisixRouteHTTPBackend) DeepCopy() *ApisixRouteHTTPBackend {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(ApisixRouteHTTPBackend)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ApisixRouteHTTPMatch) DeepCopyInto(out *ApisixRouteHTTPMatch) {
|
||||
*out = *in
|
||||
if in.Paths != nil {
|
||||
in, out := &in.Paths, &out.Paths
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.Methods != nil {
|
||||
in, out := &in.Methods, &out.Methods
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.Hosts != nil {
|
||||
in, out := &in.Hosts, &out.Hosts
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.RemoteAddrs != nil {
|
||||
in, out := &in.RemoteAddrs, &out.RemoteAddrs
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.NginxVars != nil {
|
||||
in, out := &in.NginxVars, &out.NginxVars
|
||||
*out = make([]ApisixRouteHTTPMatchExpr, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ApisixRouteHTTPMatch.
|
||||
func (in *ApisixRouteHTTPMatch) DeepCopy() *ApisixRouteHTTPMatch {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(ApisixRouteHTTPMatch)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ApisixRouteHTTPMatchExpr) DeepCopyInto(out *ApisixRouteHTTPMatchExpr) {
|
||||
*out = *in
|
||||
out.Subject = in.Subject
|
||||
if in.Set != nil {
|
||||
in, out := &in.Set, &out.Set
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.Value != nil {
|
||||
in, out := &in.Value, &out.Value
|
||||
*out = new(string)
|
||||
**out = **in
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ApisixRouteHTTPMatchExpr.
|
||||
func (in *ApisixRouteHTTPMatchExpr) DeepCopy() *ApisixRouteHTTPMatchExpr {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(ApisixRouteHTTPMatchExpr)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ApisixRouteHTTPMatchExprSubject) DeepCopyInto(out *ApisixRouteHTTPMatchExprSubject) {
|
||||
*out = *in
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ApisixRouteHTTPMatchExprSubject.
|
||||
func (in *ApisixRouteHTTPMatchExprSubject) DeepCopy() *ApisixRouteHTTPMatchExprSubject {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(ApisixRouteHTTPMatchExprSubject)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ApisixRouteList) DeepCopyInto(out *ApisixRouteList) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
in.ListMeta.DeepCopyInto(&out.ListMeta)
|
||||
if in.Items != nil {
|
||||
in, out := &in.Items, &out.Items
|
||||
*out = make([]ApisixRoute, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ApisixRouteList.
|
||||
func (in *ApisixRouteList) DeepCopy() *ApisixRouteList {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(ApisixRouteList)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *ApisixRouteList) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ApisixRoutePlugin) DeepCopyInto(out *ApisixRoutePlugin) {
|
||||
*out = *in
|
||||
in.Config.DeepCopyInto(&out.Config)
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ApisixRoutePlugin.
|
||||
func (in *ApisixRoutePlugin) DeepCopy() *ApisixRoutePlugin {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(ApisixRoutePlugin)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ApisixRouteSpec) DeepCopyInto(out *ApisixRouteSpec) {
|
||||
*out = *in
|
||||
if in.HTTP != nil {
|
||||
in, out := &in.HTTP, &out.HTTP
|
||||
*out = make([]ApisixRouteHTTP, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
if in.Stream != nil {
|
||||
in, out := &in.Stream, &out.Stream
|
||||
*out = make([]ApisixRouteStream, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ApisixRouteSpec.
|
||||
func (in *ApisixRouteSpec) DeepCopy() *ApisixRouteSpec {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(ApisixRouteSpec)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ApisixRouteStream) DeepCopyInto(out *ApisixRouteStream) {
|
||||
*out = *in
|
||||
out.Match = in.Match
|
||||
out.Backend = in.Backend
|
||||
if in.Plugins != nil {
|
||||
in, out := &in.Plugins, &out.Plugins
|
||||
*out = make([]ApisixRoutePlugin, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ApisixRouteStream.
|
||||
func (in *ApisixRouteStream) DeepCopy() *ApisixRouteStream {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(ApisixRouteStream)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ApisixRouteStreamBackend) DeepCopyInto(out *ApisixRouteStreamBackend) {
|
||||
*out = *in
|
||||
out.ServicePort = in.ServicePort
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ApisixRouteStreamBackend.
|
||||
func (in *ApisixRouteStreamBackend) DeepCopy() *ApisixRouteStreamBackend {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(ApisixRouteStreamBackend)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ApisixRouteStreamMatch) DeepCopyInto(out *ApisixRouteStreamMatch) {
|
||||
*out = *in
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ApisixRouteStreamMatch.
|
||||
func (in *ApisixRouteStreamMatch) DeepCopy() *ApisixRouteStreamMatch {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(ApisixRouteStreamMatch)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ApisixStatus) DeepCopyInto(out *ApisixStatus) {
|
||||
*out = *in
|
||||
if in.Conditions != nil {
|
||||
in, out := &in.Conditions, &out.Conditions
|
||||
*out = make([]v1.Condition, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ApisixStatus.
|
||||
func (in *ApisixStatus) DeepCopy() *ApisixStatus {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(ApisixStatus)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *UpstreamTimeout) DeepCopyInto(out *UpstreamTimeout) {
|
||||
*out = *in
|
||||
out.Connect = in.Connect
|
||||
out.Send = in.Send
|
||||
out.Read = in.Read
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UpstreamTimeout.
|
||||
func (in *UpstreamTimeout) DeepCopy() *UpstreamTimeout {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(UpstreamTimeout)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
@@ -3,3 +3,7 @@ package appmesh
|
||||
const (
|
||||
GroupName = "appmesh.k8s.aws"
|
||||
)
|
||||
|
||||
const AccessLogAnnotation = "appmesh.flagger.app/accesslog"
|
||||
|
||||
const EnabledValue = "enabled"
|
||||
|
||||
@@ -20,7 +20,7 @@ import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/fluxcd/flagger/pkg/apis/gatewayapi/v1alpha2"
|
||||
"github.com/fluxcd/flagger/pkg/apis/gatewayapi/v1beta1"
|
||||
istiov1alpha3 "github.com/fluxcd/flagger/pkg/apis/istio/v1alpha3"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
@@ -70,15 +70,19 @@ type CanarySpec struct {
|
||||
MetricsServer string `json:"metricsServer,omitempty"`
|
||||
|
||||
// TargetRef references a target resource
|
||||
TargetRef CrossNamespaceObjectReference `json:"targetRef"`
|
||||
TargetRef LocalObjectReference `json:"targetRef"`
|
||||
|
||||
// AutoscalerRef references an autoscaling resource
|
||||
// +optional
|
||||
AutoscalerRef *CrossNamespaceObjectReference `json:"autoscalerRef,omitempty"`
|
||||
AutoscalerRef *AutoscalerRefernce `json:"autoscalerRef,omitempty"`
|
||||
|
||||
// Reference to NGINX ingress resource
|
||||
// +optional
|
||||
IngressRef *CrossNamespaceObjectReference `json:"ingressRef,omitempty"`
|
||||
IngressRef *LocalObjectReference `json:"ingressRef,omitempty"`
|
||||
|
||||
// Reference to APISIX route resource
|
||||
// +optional
|
||||
RouteRef *LocalObjectReference `json:"routeRef,omitempty"`
|
||||
|
||||
// Reference to Gloo Upstream resource. Upstream config is copied from
|
||||
// the referenced upstream to the upstreams generated by flagger.
|
||||
@@ -128,6 +132,11 @@ type CanaryService struct {
|
||||
// +optional
|
||||
TargetPort intstr.IntOrString `json:"targetPort,omitempty"`
|
||||
|
||||
// AppProtocol of the service
|
||||
// https://kubernetes.io/docs/concepts/services-networking/service/#application-protocol
|
||||
// +optional
|
||||
AppProtocol string `json:"appProtocol,omitempty"`
|
||||
|
||||
// PortDiscovery adds all container ports to the generated Kubernetes service
|
||||
PortDiscovery bool `json:"portDiscovery"`
|
||||
|
||||
@@ -143,7 +152,7 @@ type CanaryService struct {
|
||||
// Gateways that the HTTPRoute needs to attach itself to.
|
||||
// Must be specified while using the Gateway API as a provider.
|
||||
// +optional
|
||||
GatewayRefs []v1alpha2.ParentReference `json:"gatewayRefs,omitempty"`
|
||||
GatewayRefs []v1beta1.ParentReference `json:"gatewayRefs,omitempty"`
|
||||
|
||||
// Hosts attached to the generated Istio virtual service or Gateway API HTTPRoute.
|
||||
// Defaults to the service name
|
||||
@@ -257,6 +266,20 @@ type CanaryAnalysis struct {
|
||||
// A/B testing HTTP header match conditions
|
||||
// +optional
|
||||
Match []istiov1alpha3.HTTPMatchRequest `json:"match,omitempty"`
|
||||
|
||||
// SessionAffinity represents the session affinity settings for a canary run.
|
||||
// +optional
|
||||
SessionAffinity *SessionAffinity `json:"sessionAffinity,omitempty"`
|
||||
}
|
||||
|
||||
type SessionAffinity struct {
|
||||
// CookieName is the key that will be used for the session affinity cookie.
|
||||
CookieName string `json:"cookieName,omitempty"`
|
||||
// MaxAge indicates the number of seconds until the session affinity cookie will expire.
|
||||
// ref: https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Set-Cookie#attributes
|
||||
// The default value is 86,400 seconds, i.e. a day.
|
||||
// +optional
|
||||
MaxAge int `json:"maxAge,omitempty"`
|
||||
}
|
||||
|
||||
// CanaryMetric holds the reference to metrics used for canary analysis
|
||||
@@ -281,6 +304,10 @@ type CanaryMetric struct {
|
||||
// TemplateRef references a metric template object
|
||||
// +optional
|
||||
TemplateRef *CrossNamespaceObjectReference `json:"templateRef,omitempty"`
|
||||
|
||||
// TemplateVariables provides a map of key/value pairs that can be used to inject variables into a metric query.
|
||||
// +optional
|
||||
TemplateVariables map[string]string `json:"templateVariables,omitempty"`
|
||||
}
|
||||
|
||||
// CanaryThresholdRange defines the range used for metrics validation
|
||||
@@ -393,12 +420,67 @@ type CrossNamespaceObjectReference struct {
|
||||
Namespace string `json:"namespace,omitempty"`
|
||||
}
|
||||
|
||||
// LocalObjectReference contains enough information to let you locate the typed
|
||||
// referenced object in the same namespace.
|
||||
type LocalObjectReference struct {
|
||||
// API version of the referent
|
||||
// +optional
|
||||
APIVersion string `json:"apiVersion,omitempty"`
|
||||
|
||||
// Kind of the referent
|
||||
// +optional
|
||||
Kind string `json:"kind,omitempty"`
|
||||
|
||||
// Name of the referent
|
||||
Name string `json:"name"`
|
||||
}
|
||||
|
||||
type AutoscalerRefernce struct {
|
||||
// API version of the scaler
|
||||
// +optional
|
||||
APIVersion string `json:"apiVersion,omitempty"`
|
||||
|
||||
// Kind of the scaler
|
||||
// +optional
|
||||
Kind string `json:"kind,omitempty"`
|
||||
|
||||
// Name of the scaler
|
||||
Name string `json:"name"`
|
||||
|
||||
// PrimaryScalerQueries maps a unique id to a query for the primary
|
||||
// scaler, if a scaler supports scaling using queries.
|
||||
// +optional
|
||||
PrimaryScalerQueries map[string]string `json:"primaryScalerQueries"`
|
||||
|
||||
// PrimaryScalerReplicas defines overrides for the primary
|
||||
// autoscaler replicas.
|
||||
// +optional
|
||||
PrimaryScalerReplicas *ScalerReplicas `json:"primaryScalerReplicas,omitempty"`
|
||||
}
|
||||
|
||||
// ScalerReplicas holds overrides for autoscaler replicas
|
||||
type ScalerReplicas struct {
|
||||
// +optional
|
||||
MinReplicas *int32 `json:"minReplicas,omitempty"`
|
||||
// +optional
|
||||
MaxReplicas *int32 `json:"maxReplicas,omitempty"`
|
||||
}
|
||||
|
||||
// CustomMetadata holds labels and annotations to set on generated objects.
|
||||
type CustomMetadata struct {
|
||||
Labels map[string]string `json:"labels,omitempty"`
|
||||
Annotations map[string]string `json:"annotations,omitempty"`
|
||||
}
|
||||
|
||||
// GetMaxAge returns the max age of a cookie in seconds.
|
||||
func (s *SessionAffinity) GetMaxAge() int {
|
||||
if s.MaxAge == 0 {
|
||||
// 24 hours * 60 mins * 60 seconds
|
||||
return 86400
|
||||
}
|
||||
return s.MaxAge
|
||||
}
|
||||
|
||||
// GetServiceNames returns the apex, primary and canary Kubernetes service names
|
||||
func (c *Canary) GetServiceNames() (apexName, primaryName, canaryName string) {
|
||||
apexName = c.Spec.TargetRef.Name
|
||||
|
||||
@@ -82,12 +82,14 @@ type MetricTemplateProvider struct {
|
||||
|
||||
// MetricTemplateModel is the query template model
|
||||
type MetricTemplateModel struct {
|
||||
Name string `json:"name"`
|
||||
Namespace string `json:"namespace"`
|
||||
Target string `json:"target"`
|
||||
Service string `json:"service"`
|
||||
Ingress string `json:"ingress"`
|
||||
Interval string `json:"interval"`
|
||||
Name string `json:"name"`
|
||||
Namespace string `json:"namespace"`
|
||||
Target string `json:"target"`
|
||||
Service string `json:"service"`
|
||||
Ingress string `json:"ingress"`
|
||||
Route string `json:"route"`
|
||||
Interval string `json:"interval"`
|
||||
Variables map[string]string `json:"variables"`
|
||||
}
|
||||
|
||||
// TemplateFunctions returns a map of functions, one for each model field
|
||||
@@ -98,7 +100,9 @@ func (mtm *MetricTemplateModel) TemplateFunctions() template.FuncMap {
|
||||
"target": func() string { return mtm.Target },
|
||||
"service": func() string { return mtm.Service },
|
||||
"ingress": func() string { return mtm.Ingress },
|
||||
"route": func() string { return mtm.Route },
|
||||
"interval": func() string { return mtm.Interval },
|
||||
"variables": func() map[string]string { return mtm.Variables },
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package v1beta1
|
||||
|
||||
const (
|
||||
ApisixProvider string = "apisix"
|
||||
AppMeshProvider string = "appmesh"
|
||||
LinkerdProvider string = "linkerd"
|
||||
IstioProvider string = "istio"
|
||||
|
||||
@@ -74,6 +74,10 @@ type CanaryStatus struct {
|
||||
CanaryWeight int `json:"canaryWeight"`
|
||||
Iterations int `json:"iterations"`
|
||||
// +optional
|
||||
PreviousSessionAffinityCookie string `json:"previousSessionAffinityCookie,omitempty"`
|
||||
// +optional
|
||||
SessionAffinityCookie string `json:"sessionAffinityCookie,omitempty"`
|
||||
// +optional
|
||||
TrackedConfigs *map[string]string `json:"trackedConfigs,omitempty"`
|
||||
// +optional
|
||||
LastAppliedSpec string `json:"lastAppliedSpec,omitempty"`
|
||||
|
||||
@@ -22,7 +22,7 @@ limitations under the License.
|
||||
package v1beta1
|
||||
|
||||
import (
|
||||
v1alpha2 "github.com/fluxcd/flagger/pkg/apis/gatewayapi/v1alpha2"
|
||||
gatewayapiv1beta1 "github.com/fluxcd/flagger/pkg/apis/gatewayapi/v1beta1"
|
||||
v1alpha3 "github.com/fluxcd/flagger/pkg/apis/istio/v1alpha3"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||
@@ -151,6 +151,34 @@ func (in *AlertProviderStatus) DeepCopy() *AlertProviderStatus {
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *AutoscalerRefernce) DeepCopyInto(out *AutoscalerRefernce) {
|
||||
*out = *in
|
||||
if in.PrimaryScalerQueries != nil {
|
||||
in, out := &in.PrimaryScalerQueries, &out.PrimaryScalerQueries
|
||||
*out = make(map[string]string, len(*in))
|
||||
for key, val := range *in {
|
||||
(*out)[key] = val
|
||||
}
|
||||
}
|
||||
if in.PrimaryScalerReplicas != nil {
|
||||
in, out := &in.PrimaryScalerReplicas, &out.PrimaryScalerReplicas
|
||||
*out = new(ScalerReplicas)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutoscalerRefernce.
|
||||
func (in *AutoscalerRefernce) DeepCopy() *AutoscalerRefernce {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(AutoscalerRefernce)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *Canary) DeepCopyInto(out *Canary) {
|
||||
*out = *in
|
||||
@@ -240,6 +268,11 @@ func (in *CanaryAnalysis) DeepCopyInto(out *CanaryAnalysis) {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
if in.SessionAffinity != nil {
|
||||
in, out := &in.SessionAffinity, &out.SessionAffinity
|
||||
*out = new(SessionAffinity)
|
||||
**out = **in
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
@@ -317,6 +350,13 @@ func (in *CanaryMetric) DeepCopyInto(out *CanaryMetric) {
|
||||
*out = new(CrossNamespaceObjectReference)
|
||||
**out = **in
|
||||
}
|
||||
if in.TemplateVariables != nil {
|
||||
in, out := &in.TemplateVariables, &out.TemplateVariables
|
||||
*out = make(map[string]string, len(*in))
|
||||
for key, val := range *in {
|
||||
(*out)[key] = val
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
@@ -341,7 +381,7 @@ func (in *CanaryService) DeepCopyInto(out *CanaryService) {
|
||||
}
|
||||
if in.GatewayRefs != nil {
|
||||
in, out := &in.GatewayRefs, &out.GatewayRefs
|
||||
*out = make([]v1alpha2.ParentReference, len(*in))
|
||||
*out = make([]gatewayapiv1beta1.ParentReference, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
@@ -422,12 +462,17 @@ func (in *CanarySpec) DeepCopyInto(out *CanarySpec) {
|
||||
out.TargetRef = in.TargetRef
|
||||
if in.AutoscalerRef != nil {
|
||||
in, out := &in.AutoscalerRef, &out.AutoscalerRef
|
||||
*out = new(CrossNamespaceObjectReference)
|
||||
**out = **in
|
||||
*out = new(AutoscalerRefernce)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.IngressRef != nil {
|
||||
in, out := &in.IngressRef, &out.IngressRef
|
||||
*out = new(CrossNamespaceObjectReference)
|
||||
*out = new(LocalObjectReference)
|
||||
**out = **in
|
||||
}
|
||||
if in.RouteRef != nil {
|
||||
in, out := &in.RouteRef, &out.RouteRef
|
||||
*out = new(LocalObjectReference)
|
||||
**out = **in
|
||||
}
|
||||
if in.UpstreamRef != nil {
|
||||
@@ -621,6 +666,22 @@ func (in *CustomMetadata) DeepCopy() *CustomMetadata {
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *LocalObjectReference) DeepCopyInto(out *LocalObjectReference) {
|
||||
*out = *in
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LocalObjectReference.
|
||||
func (in *LocalObjectReference) DeepCopy() *LocalObjectReference {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(LocalObjectReference)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *MetricTemplate) DeepCopyInto(out *MetricTemplate) {
|
||||
*out = *in
|
||||
@@ -703,6 +764,13 @@ func (in *MetricTemplateList) DeepCopyObject() runtime.Object {
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *MetricTemplateModel) DeepCopyInto(out *MetricTemplateModel) {
|
||||
*out = *in
|
||||
if in.Variables != nil {
|
||||
in, out := &in.Variables, &out.Variables
|
||||
*out = make(map[string]string, len(*in))
|
||||
for key, val := range *in {
|
||||
(*out)[key] = val
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
@@ -776,3 +844,45 @@ func (in *MetricTemplateStatus) DeepCopy() *MetricTemplateStatus {
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ScalerReplicas) DeepCopyInto(out *ScalerReplicas) {
|
||||
*out = *in
|
||||
if in.MinReplicas != nil {
|
||||
in, out := &in.MinReplicas, &out.MinReplicas
|
||||
*out = new(int32)
|
||||
**out = **in
|
||||
}
|
||||
if in.MaxReplicas != nil {
|
||||
in, out := &in.MaxReplicas, &out.MaxReplicas
|
||||
*out = new(int32)
|
||||
**out = **in
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScalerReplicas.
|
||||
func (in *ScalerReplicas) DeepCopy() *ScalerReplicas {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(ScalerReplicas)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *SessionAffinity) DeepCopyInto(out *SessionAffinity) {
|
||||
*out = *in
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SessionAffinity.
|
||||
func (in *SessionAffinity) DeepCopy() *SessionAffinity {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(SessionAffinity)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
@@ -284,9 +284,9 @@ const (
|
||||
//
|
||||
// Invalid values include:
|
||||
//
|
||||
// * ":method" - ":" is an invalid character. This means that HTTP/2 pseudo
|
||||
// headers are not currently supported by this type.
|
||||
// * "/invalid" - "/" is an invalid character
|
||||
// - ":method" - ":" is an invalid character. This means that HTTP/2 pseudo
|
||||
// headers are not currently supported by this type.
|
||||
// - "/invalid" - "/" is an invalid character
|
||||
//
|
||||
// +kubebuilder:validation:MinLength=1
|
||||
// +kubebuilder:validation:MaxLength=256
|
||||
@@ -411,11 +411,13 @@ const (
|
||||
//
|
||||
// ```
|
||||
// match:
|
||||
// path:
|
||||
// value: "/foo"
|
||||
// headers:
|
||||
// - name: "version"
|
||||
// value "v1"
|
||||
//
|
||||
// path:
|
||||
// value: "/foo"
|
||||
// headers:
|
||||
// - name: "version"
|
||||
// value "v1"
|
||||
//
|
||||
// ```
|
||||
type HTTPRouteMatch struct {
|
||||
// Path specifies a HTTP request path matcher. If this field is not
|
||||
@@ -855,9 +857,9 @@ type HTTPRouteStatus struct {
|
||||
// Hostname is the fully qualified domain name of a network host. This matches
|
||||
// the RFC 1123 definition of a hostname with 2 notable exceptions:
|
||||
//
|
||||
// 1. IPs are not allowed.
|
||||
// 2. A hostname may be prefixed with a wildcard label (`*.`). The wildcard
|
||||
// label must appear by itself as the first label.
|
||||
// 1. IPs are not allowed.
|
||||
// 2. A hostname may be prefixed with a wildcard label (`*.`). The wildcard
|
||||
// label must appear by itself as the first label.
|
||||
//
|
||||
// Hostname can be "precise" which is a domain name without the terminating
|
||||
// dot of a network host (e.g. "foo.example.com") or "wildcard", which is a
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user