Compare commits
898 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
7d8c95c6b7 | ||
|
|
53a60d0128 | ||
|
|
3a27fd147f | ||
|
|
1a727d294c | ||
|
|
b170bf479c | ||
|
|
ae5e39bb3d | ||
|
|
7fcba13050 | ||
|
|
8fedc5cdd7 | ||
|
|
fce1781ebf | ||
|
|
461abc9e10 | ||
|
|
6b89d56074 | ||
|
|
bb7ad65462 | ||
|
|
6fcbe192a7 | ||
|
|
253123bdff | ||
|
|
ae72e15049 | ||
|
|
c30e6552d7 | ||
|
|
60cb38a773 | ||
|
|
6031abc3a9 | ||
|
|
e4c355e772 | ||
|
|
f9fe9a1635 | ||
|
|
34830b2448 | ||
|
|
9ed5fcdaa3 | ||
|
|
3dc0658a80 | ||
|
|
751f52ec25 | ||
|
|
e1839fd9c3 | ||
|
|
b445c8eeaf | ||
|
|
3d99d2fddd | ||
|
|
b77230227f | ||
|
|
ea219506a5 | ||
|
|
fab45404e6 | ||
|
|
0dd9fe9301 | ||
|
|
278e7d31bc | ||
|
|
c7dad5b532 | ||
|
|
b672363a37 | ||
|
|
939c32b390 | ||
|
|
e08cc1d798 | ||
|
|
28291d42d8 | ||
|
|
5c481cd6f9 | ||
|
|
eeeac3543a | ||
|
|
0743ef72b3 | ||
|
|
ffbc25efda | ||
|
|
16f54923b2 | ||
|
|
f9f10e842e | ||
|
|
56200f6d0f | ||
|
|
2cf112841c | ||
|
|
a67c36ed99 | ||
|
|
27daa2ca46 | ||
|
|
ed38a79545 | ||
|
|
6f165a10de | ||
|
|
89c1ddee79 | ||
|
|
1b8e7653d3 | ||
|
|
98f8514258 | ||
|
|
d9c8a09d3e | ||
|
|
2ac22f831f | ||
|
|
e0de40dcb0 | ||
|
|
8f9bb5b1bc | ||
|
|
f21bc1de3e | ||
|
|
1fc7ac5847 | ||
|
|
1dc270c2e6 | ||
|
|
50d1331ba6 | ||
|
|
bc78156535 | ||
|
|
0df8af8d04 | ||
|
|
633f639383 | ||
|
|
d03cc73386 | ||
|
|
eaf5bb992c | ||
|
|
22618ccb11 | ||
|
|
f5af225ffc | ||
|
|
40a34199fe | ||
|
|
d7357a7377 | ||
|
|
12ee6cbc86 | ||
|
|
f1c8807c0d | ||
|
|
8276bfa5a5 | ||
|
|
2c4b7a69a2 | ||
|
|
660ed7486b | ||
|
|
21acd7e3d6 | ||
|
|
40e2802c3d | ||
|
|
d99d37b219 | ||
|
|
45618b90db | ||
|
|
ff4051f728 | ||
|
|
2ea13a477b | ||
|
|
03d4acc77f | ||
|
|
16a607549e | ||
|
|
b57afd3b0f | ||
|
|
9000136233 | ||
|
|
14543cc8bf | ||
|
|
642ef6bb7d | ||
|
|
3ebbfb0a54 | ||
|
|
a52f497370 | ||
|
|
64b50813ff | ||
|
|
9244d6de65 | ||
|
|
3b6b550d64 | ||
|
|
282f2b36f0 | ||
|
|
0a76f808b8 | ||
|
|
a85887de3c | ||
|
|
febc327673 | ||
|
|
6d5aabff05 | ||
|
|
51d0bb2c92 | ||
|
|
dc947fb164 | ||
|
|
0138e2e6c4 | ||
|
|
d4bd0f2ef8 | ||
|
|
30f4b25925 | ||
|
|
25fd9be1db | ||
|
|
4d497b2a9d | ||
|
|
0ef356706a | ||
|
|
ebf43ef104 | ||
|
|
7754cdb89a | ||
|
|
c6b5b39187 | ||
|
|
a6a7a20737 | ||
|
|
c04ff05aa4 | ||
|
|
b4bc93d0a8 | ||
|
|
6ee00e14f9 | ||
|
|
a7d90c227f | ||
|
|
4c0a26b675 | ||
|
|
d4f766285d | ||
|
|
66fcea7581 | ||
|
|
9bfc531da0 | ||
|
|
398fc90cc0 | ||
|
|
682230e8c0 | ||
|
|
92daf5174c | ||
|
|
2ba00a33a7 | ||
|
|
8f838388e8 | ||
|
|
7cd14761d5 | ||
|
|
e99add460f | ||
|
|
b88e080a66 | ||
|
|
9941843385 | ||
|
|
a159421290 | ||
|
|
43cb4bc8e9 | ||
|
|
b719427337 | ||
|
|
b6ac5e19aa | ||
|
|
6a090bca51 | ||
|
|
e07a2618c2 | ||
|
|
9fcb6e9c93 | ||
|
|
a88e06db17 | ||
|
|
401d0490da | ||
|
|
3d1aedeb44 | ||
|
|
4015103815 | ||
|
|
74b98dab00 | ||
|
|
01dfa06891 | ||
|
|
90054b3b27 | ||
|
|
cff2032ac0 | ||
|
|
2d5e289142 | ||
|
|
f38183bfd1 | ||
|
|
c09a61a198 | ||
|
|
417f035afb | ||
|
|
28f2ab7bdb | ||
|
|
d6433a16b5 | ||
|
|
9b39cf16f1 | ||
|
|
d2cfcbde1a | ||
|
|
133fdecf56 | ||
|
|
3490d60e89 | ||
|
|
97d1ef0f18 | ||
|
|
ce976e28f0 | ||
|
|
adc60596f5 | ||
|
|
cf04e28774 | ||
|
|
ba29384dd4 | ||
|
|
86a4514932 | ||
|
|
61d81ff35a | ||
|
|
588f91ab7b | ||
|
|
24b968029e | ||
|
|
0ab3c07017 | ||
|
|
2d89870b14 | ||
|
|
8e86366484 | ||
|
|
e5dfbf4adc | ||
|
|
04f5c68a83 | ||
|
|
52293a35ad | ||
|
|
553184b82b | ||
|
|
6289f8e371 | ||
|
|
5e6815d531 | ||
|
|
66d69f3d22 | ||
|
|
9a0c6e7e54 | ||
|
|
2ddbaf3324 | ||
|
|
ab68d18230 | ||
|
|
214022ce7b | ||
|
|
9acc70efc3 | ||
|
|
4a12fc8499 | ||
|
|
407e28e632 | ||
|
|
82589a525d | ||
|
|
6651751fbe | ||
|
|
62fd5d2f77 | ||
|
|
0a616df01e | ||
|
|
f3be47d90b | ||
|
|
935d6f9746 | ||
|
|
ded722fb2d | ||
|
|
a437af030a | ||
|
|
e3a529e1c8 | ||
|
|
e153b8a3df | ||
|
|
c45be96f73 | ||
|
|
dfa403705d | ||
|
|
9a0f01079f | ||
|
|
b778013e07 | ||
|
|
29576900df | ||
|
|
5c70efb124 | ||
|
|
9a224a0c90 | ||
|
|
bd3249feae | ||
|
|
740477a757 | ||
|
|
27967d7780 | ||
|
|
0e6c88261f | ||
|
|
7780a85bfa | ||
|
|
1f073843bf | ||
|
|
5b03840db6 | ||
|
|
fb4af8217d | ||
|
|
757d90121b | ||
|
|
217db66a5e | ||
|
|
1a27295728 | ||
|
|
285ee6eee7 | ||
|
|
613f532b0d | ||
|
|
619253ebce | ||
|
|
3f06a0b344 | ||
|
|
cf6e241fa5 | ||
|
|
8128ab3785 | ||
|
|
c0a00e6970 | ||
|
|
2c3259bdb3 | ||
|
|
3d40ee1242 | ||
|
|
dd89cd5625 | ||
|
|
af1e210f08 | ||
|
|
4932527464 | ||
|
|
862c63e8c3 | ||
|
|
f946e0e9e8 | ||
|
|
0a2169965a | ||
|
|
169aea200c | ||
|
|
785db00796 | ||
|
|
a88c056e04 | ||
|
|
6584f452b7 | ||
|
|
16f8e15c98 | ||
|
|
5f8aeb878b | ||
|
|
d618cfcedd | ||
|
|
471da0abba | ||
|
|
b562ddd3e2 | ||
|
|
82e5e3ad93 | ||
|
|
fa30864580 | ||
|
|
dc9fc923e4 | ||
|
|
6f42af4ade | ||
|
|
28afd0acd6 | ||
|
|
0810972d31 | ||
|
|
64f393fd60 | ||
|
|
a2d147387c | ||
|
|
3a887bd79a | ||
|
|
9f5ad2ec23 | ||
|
|
d1de1d788d | ||
|
|
7e95b1a8a5 | ||
|
|
83b5800009 | ||
|
|
daab49730e | ||
|
|
825b5d103a | ||
|
|
eb8026e22b | ||
|
|
e9b8dee726 | ||
|
|
37621dead8 | ||
|
|
1f2c464b45 | ||
|
|
09b0937e18 | ||
|
|
0d0d0ef811 | ||
|
|
8935ef5e6a | ||
|
|
61eee5750b | ||
|
|
ad8e7d613a | ||
|
|
3e87c153db | ||
|
|
9189f17ff8 | ||
|
|
749e099ff0 | ||
|
|
63ec848b38 | ||
|
|
8c078e898b | ||
|
|
e784f88045 | ||
|
|
c5369e9113 | ||
|
|
d196fae71c | ||
|
|
d7bf6a2474 | ||
|
|
d796c206d3 | ||
|
|
55db424082 | ||
|
|
750a1e53aa | ||
|
|
450abb60b9 | ||
|
|
ce70a50047 | ||
|
|
f465a6cdda | ||
|
|
c1f39443d6 | ||
|
|
6112ad9c54 | ||
|
|
8dbc72d7ff | ||
|
|
dc6dd0661a | ||
|
|
475aff81ae | ||
|
|
968a193f42 | ||
|
|
22a9fd3d12 | ||
|
|
8ada61edd1 | ||
|
|
cadce1a2c2 | ||
|
|
3a7fd48d3a | ||
|
|
15ef64eb14 | ||
|
|
c0e2096f92 | ||
|
|
e4c05c3034 | ||
|
|
794fea8cc6 | ||
|
|
56b6339f8c | ||
|
|
788e692e90 | ||
|
|
a517309557 | ||
|
|
ecdde862bf | ||
|
|
0bcc814154 | ||
|
|
aaafdca6ca | ||
|
|
22c96c5af5 | ||
|
|
04a1f2fa68 | ||
|
|
efc588001f | ||
|
|
d543c8ef95 | ||
|
|
da7015397c | ||
|
|
fe32b2162d | ||
|
|
68d306ca83 | ||
|
|
7ab0eb14ea | ||
|
|
0eaf054e8b | ||
|
|
a312f6a5e1 | ||
|
|
00fcf991a6 | ||
|
|
8dbd8d509b | ||
|
|
ff25d1ee92 | ||
|
|
2d3f039d80 | ||
|
|
69cb3cd881 | ||
|
|
225e968288 | ||
|
|
f0ffb67cff | ||
|
|
dc8fe81c91 | ||
|
|
f29c74b957 | ||
|
|
dfc0c96824 | ||
|
|
1093c64d5a | ||
|
|
993385036c | ||
|
|
0c8b5048dd | ||
|
|
c90da790c5 | ||
|
|
cef1bb8e67 | ||
|
|
34b544bb47 | ||
|
|
2fd45cd0d8 | ||
|
|
aec43794d8 | ||
|
|
d35ecbeba8 | ||
|
|
e1b9d64379 | ||
|
|
137d31ac79 | ||
|
|
b30855480f | ||
|
|
cc08d31622 | ||
|
|
d021e25757 | ||
|
|
7a0e95b498 | ||
|
|
c812dcffc1 | ||
|
|
e1c4257b68 | ||
|
|
a31a46f375 | ||
|
|
a41a7bb6a4 | ||
|
|
2992a99bbc | ||
|
|
eb302fe16e | ||
|
|
2e4fe73d34 | ||
|
|
aad4f54afa | ||
|
|
311cb5f2fd | ||
|
|
388c0ef344 | ||
|
|
3feaabea76 | ||
|
|
ddc337b01a | ||
|
|
29c94d5f5e | ||
|
|
7af4498fd4 | ||
|
|
7cce4fd6d8 | ||
|
|
5a809d7b3a | ||
|
|
1802c4b7be | ||
|
|
084daaf3f9 | ||
|
|
7fc007a123 | ||
|
|
6359d5ea19 | ||
|
|
1de46a0bd3 | ||
|
|
a8769f8cf5 | ||
|
|
67bc27f515 | ||
|
|
dfb5d0847a | ||
|
|
ee37069385 | ||
|
|
e5c0ffb693 | ||
|
|
ba7fedf762 | ||
|
|
b25e12d45d | ||
|
|
2944581a70 | ||
|
|
45038cbf9f | ||
|
|
0bdffc9e10 | ||
|
|
ca6867a6b1 | ||
|
|
eee3607ab7 | ||
|
|
17075e9006 | ||
|
|
cf037c60ab | ||
|
|
27f354cc24 | ||
|
|
3d583068aa | ||
|
|
00fd1f93a9 | ||
|
|
440b88103a | ||
|
|
8747d15417 | ||
|
|
ac3140b8a2 | ||
|
|
310ca7eae8 | ||
|
|
6d6fc94855 | ||
|
|
7d29af4f41 | ||
|
|
2b80c4756c | ||
|
|
879ea26cf6 | ||
|
|
7b7cdcf7cd | ||
|
|
93a3aaa86f | ||
|
|
d960666b68 | ||
|
|
d2564874ab | ||
|
|
e71ce18b9d | ||
|
|
59849f6c05 | ||
|
|
5a6e2d165b | ||
|
|
6384bfb4a2 | ||
|
|
4303f8edfd | ||
|
|
25754a3f03 | ||
|
|
b71f0ce721 | ||
|
|
9055f96eeb | ||
|
|
073ac2206f | ||
|
|
3814e8f19d | ||
|
|
1e5d83ad21 | ||
|
|
68f0920548 | ||
|
|
0d25d84230 | ||
|
|
15a6f742e0 | ||
|
|
495a5b24f4 | ||
|
|
956daea9dd | ||
|
|
7b17286b96 | ||
|
|
e535b01de1 | ||
|
|
d151a1b5e4 | ||
|
|
7242fa7d5c | ||
|
|
9d4ebd9ddd | ||
|
|
b2e713dbc1 | ||
|
|
8bcc7bf9af | ||
|
|
3078f96830 | ||
|
|
8708e35287 | ||
|
|
a8b96f053d | ||
|
|
a487357bd5 | ||
|
|
e8aba087ac | ||
|
|
5b7a679944 | ||
|
|
8229852585 | ||
|
|
f1def19f25 | ||
|
|
44363d5d99 | ||
|
|
f3f62667bf | ||
|
|
3d8615735b | ||
|
|
d1b6b36bcd | ||
|
|
e4755a4567 | ||
|
|
2ced721cf1 | ||
|
|
cf267d0bbd | ||
|
|
49d59f3b45 | ||
|
|
699ea2b8aa | ||
|
|
064d867510 | ||
|
|
5f63c4ae63 | ||
|
|
4ebb38743d | ||
|
|
01a7f3606c | ||
|
|
699c577fa6 | ||
|
|
6879038a63 | ||
|
|
cc2f9456cf | ||
|
|
7994989b29 | ||
|
|
1206132e0c | ||
|
|
74cfbda40c | ||
|
|
1266ff48d8 | ||
|
|
b1315679b8 | ||
|
|
859fb7e160 | ||
|
|
32077636ff | ||
|
|
e263d6a169 | ||
|
|
8d517799b5 | ||
|
|
a89cd6d3ba | ||
|
|
4c2de0c716 | ||
|
|
059a304a07 | ||
|
|
317d53a71f | ||
|
|
202b6e7eb1 | ||
|
|
e59e3aedd4 | ||
|
|
2b45c2013c | ||
|
|
6786668684 | ||
|
|
27eb21ecc8 | ||
|
|
e7d8adecb4 | ||
|
|
aa574d469e | ||
|
|
5ba20c254a | ||
|
|
bb2cf39393 | ||
|
|
05d08d3ff1 | ||
|
|
7ec3774172 | ||
|
|
e9ffef29f6 | ||
|
|
64035b4942 | ||
|
|
925cc37c8f | ||
|
|
1574e29376 | ||
|
|
4a34158587 | ||
|
|
534196adde | ||
|
|
57bf2ab7d1 | ||
|
|
c65d072249 | ||
|
|
a50d7de86d | ||
|
|
e365c21322 | ||
|
|
7ce679678f | ||
|
|
685c816a12 | ||
|
|
5d3ab056f0 | ||
|
|
2587a3d3f1 | ||
|
|
58270dd4b9 | ||
|
|
86081708a4 | ||
|
|
686e8a3e8b | ||
|
|
0aecddb00e | ||
|
|
26518cecbf | ||
|
|
9d1db87592 | ||
|
|
e352010bfd | ||
|
|
58267752b1 | ||
|
|
2dd48c6e79 | ||
|
|
6c29c21184 | ||
|
|
85fe251991 | ||
|
|
69861e0c8a | ||
|
|
e440be17ae | ||
|
|
ce52408bbc | ||
|
|
badf7b9a4f | ||
|
|
3e9fe97ba3 | ||
|
|
ec7066b31b | ||
|
|
ec74dc5a33 | ||
|
|
cbdc2c5a7c | ||
|
|
228fbeeda4 | ||
|
|
b0d9825afa | ||
|
|
7509264d73 | ||
|
|
f08725d661 | ||
|
|
f2a9a8d645 | ||
|
|
e799030ae3 | ||
|
|
ec0657f436 | ||
|
|
fce46e26d4 | ||
|
|
e015a409fe | ||
|
|
82ff90ce26 | ||
|
|
63edc627ad | ||
|
|
c2b4287ce1 | ||
|
|
5b61f15f95 | ||
|
|
9c815f2252 | ||
|
|
d16c9696c3 | ||
|
|
14ccda5506 | ||
|
|
a496b99d6e | ||
|
|
882286dce7 | ||
|
|
8aa9ca92e3 | ||
|
|
52dd8f8c14 | ||
|
|
4d28b9074b | ||
|
|
381c19b952 | ||
|
|
50f9255af2 | ||
|
|
a7df3457ad | ||
|
|
647f624554 | ||
|
|
3d3e051f03 | ||
|
|
4c0b2beb63 | ||
|
|
ec44f64465 | ||
|
|
19d4e521a3 | ||
|
|
85a3b7c388 | ||
|
|
26ec719c67 | ||
|
|
66364bb2c9 | ||
|
|
f9f8d7e71e | ||
|
|
bdbd1fb1f0 | ||
|
|
b3112a53f1 | ||
|
|
f1f4e68673 | ||
|
|
9b56445621 | ||
|
|
f5f3d92d3d | ||
|
|
4d074799ca | ||
|
|
d38a2406a7 | ||
|
|
25ccfca835 | ||
|
|
487b6566ee | ||
|
|
14caeb12ad | ||
|
|
cf8fcd0539 | ||
|
|
d8387a351e | ||
|
|
300cd24493 | ||
|
|
fb66d24f89 | ||
|
|
f1fc8c067e | ||
|
|
da1ee05c0a | ||
|
|
57099ecd43 | ||
|
|
8c5b41bbe6 | ||
|
|
7bc716508c | ||
|
|
d82d9765e1 | ||
|
|
74e570c198 | ||
|
|
6adf51083e | ||
|
|
a5be82a7d3 | ||
|
|
83693668ed | ||
|
|
c2929694a6 | ||
|
|
82db9ff213 | ||
|
|
5e853bb589 | ||
|
|
9e1fad3947 | ||
|
|
a4f5a983ba | ||
|
|
08d7520458 | ||
|
|
283de16660 | ||
|
|
5e47ae287b | ||
|
|
e7e155048d | ||
|
|
8197073cf0 | ||
|
|
310111bb8d | ||
|
|
3dd667f3b3 | ||
|
|
e06334cd12 | ||
|
|
8d8b99dc78 | ||
|
|
3418488902 | ||
|
|
b96f6f0920 | ||
|
|
e593f2e258 | ||
|
|
7b6c37ea1f | ||
|
|
4dbeec02c8 | ||
|
|
1b2df99799 | ||
|
|
6d72050e81 | ||
|
|
b97a87a1b4 | ||
|
|
89b0487376 | ||
|
|
0ae53e415c | ||
|
|
915c200c7b | ||
|
|
a4941bd764 | ||
|
|
5123cbae00 | ||
|
|
135f96d507 | ||
|
|
aa08ea9160 | ||
|
|
fb80eea144 | ||
|
|
bebcf1c7d4 | ||
|
|
f39f0ef101 | ||
|
|
f2f4c8397d | ||
|
|
ae4613fa76 | ||
|
|
8b1155123d | ||
|
|
e65dfbb659 | ||
|
|
fe37bdd9c7 | ||
|
|
f449ee1878 | ||
|
|
47b6807471 | ||
|
|
f93708e90f | ||
|
|
5285b76746 | ||
|
|
1a4d8b965a | ||
|
|
11209fe05d | ||
|
|
09c1eec8f3 | ||
|
|
d3373447c3 | ||
|
|
d4e54fe966 | ||
|
|
a5c284cabb | ||
|
|
80bae41df4 | ||
|
|
f5c267144e | ||
|
|
25a33fe58f | ||
|
|
bab12dc99b | ||
|
|
1abb1f16d4 | ||
|
|
7cf843d6f4 | ||
|
|
a8444a6328 | ||
|
|
ca044d3577 | ||
|
|
76bac5d971 | ||
|
|
f68f291b3d | ||
|
|
b108672fad | ||
|
|
377a8f48e2 | ||
|
|
a098d04d64 | ||
|
|
5e4b70bd51 | ||
|
|
9ce931abb4 | ||
|
|
072d9b9850 | ||
|
|
1bb4afaeac | ||
|
|
4dd6102a0f | ||
|
|
4f64377480 | ||
|
|
31856a2f46 | ||
|
|
358391bfde | ||
|
|
7b2c005d9b | ||
|
|
c31ef8a788 | ||
|
|
e1bd004683 | ||
|
|
0cecab530f | ||
|
|
844090f842 | ||
|
|
aa48ad45b7 | ||
|
|
1967e4857b | ||
|
|
21923d6f87 | ||
|
|
a5912ccd89 | ||
|
|
e4252d8cbd | ||
|
|
b01e4cf9ec | ||
|
|
703cfd50b2 | ||
|
|
6a1b765a77 | ||
|
|
b2dc762937 | ||
|
|
498f065dea | ||
|
|
9d8941176b | ||
|
|
4d2a03c0b2 | ||
|
|
e0e2d5c0e6 | ||
|
|
9b97bff7b1 | ||
|
|
f23be1d0ec | ||
|
|
fa595e160c | ||
|
|
4ea5a48f43 | ||
|
|
6dd8a755c8 | ||
|
|
063d38dbd2 | ||
|
|
165c953239 | ||
|
|
a0fae153cf | ||
|
|
bfcf288561 | ||
|
|
560f884cc0 | ||
|
|
d79898848e | ||
|
|
c03d138cd0 | ||
|
|
22d192e7e3 | ||
|
|
a4babd6fc4 | ||
|
|
edd5515bd7 | ||
|
|
00dde2358a | ||
|
|
8e84262a32 | ||
|
|
541696f3f7 | ||
|
|
8051d03f08 | ||
|
|
a78d273aeb | ||
|
|
07bd3563cd | ||
|
|
8c690d1b21 | ||
|
|
a8b4e9cc6d | ||
|
|
30ed9fb75c | ||
|
|
0382d9c1ca | ||
|
|
95381e1892 | ||
|
|
7df1beef85 | ||
|
|
a1e519b352 | ||
|
|
e7f16a8c06 | ||
|
|
a3adae4af0 | ||
|
|
c7c0c76bd3 | ||
|
|
67cc965d31 | ||
|
|
d09969e3b4 | ||
|
|
41904b42f8 | ||
|
|
f638410782 | ||
|
|
48cc7995d7 | ||
|
|
793b93c665 | ||
|
|
e0186cbe2a | ||
|
|
2cc2b5dce8 | ||
|
|
ccdbbdb0ec | ||
|
|
13483321ac | ||
|
|
5547533197 | ||
|
|
c68998d75e | ||
|
|
20f2d3f2f9 | ||
|
|
cc7b35b44a | ||
|
|
67a2cd6a48 | ||
|
|
08deddc4fe | ||
|
|
77b2eb36a5 | ||
|
|
ab84ac207a | ||
|
|
8957d91e01 | ||
|
|
c7cbb729b7 | ||
|
|
eca6fa7958 | ||
|
|
ee535afcb9 | ||
|
|
18b64910d7 | ||
|
|
3ca75140d0 | ||
|
|
960f924448 | ||
|
|
eed128a8b4 | ||
|
|
210e21176b | ||
|
|
0a0c3835d6 | ||
|
|
531893b279 | ||
|
|
e6bb47f920 | ||
|
|
307813a628 | ||
|
|
38fc6b567f | ||
|
|
17015b23bf | ||
|
|
c9e53dd069 | ||
|
|
e26a10b481 | ||
|
|
281d869f54 | ||
|
|
91126d102d | ||
|
|
ba4646cddb | ||
|
|
438877674a | ||
|
|
da451a0cf4 | ||
|
|
5e1d00d4d2 | ||
|
|
00d54d268c | ||
|
|
174e9fdc93 | ||
|
|
f7fd6cce8c | ||
|
|
5dc336d609 | ||
|
|
ae6a683f23 | ||
|
|
5acf189fbe | ||
|
|
090329d0c9 | ||
|
|
96fd359b99 | ||
|
|
519f343fcc | ||
|
|
5d2a7ba9e7 | ||
|
|
1664ca436e | ||
|
|
84ae65c763 | ||
|
|
6085753d84 | ||
|
|
da706be4aa | ||
|
|
65e3bcb1d8 | ||
|
|
582f6eec77 | ||
|
|
4200c0159d | ||
|
|
cf8fe94fca | ||
|
|
30d553c6f3 | ||
|
|
f8f6a994dd | ||
|
|
085639bbde | ||
|
|
3bfa7c974d | ||
|
|
d29e475277 | ||
|
|
b7ba3ab063 | ||
|
|
9796903c78 | ||
|
|
2f25fab560 | ||
|
|
215c859619 | ||
|
|
7071d42152 | ||
|
|
08b1e52278 | ||
|
|
801f801e02 | ||
|
|
af5634962f | ||
|
|
fe7615afb4 | ||
|
|
fc6bedda23 | ||
|
|
a7f997c092 | ||
|
|
121eb767cb | ||
|
|
cd3a1d8478 | ||
|
|
6f6af25467 | ||
|
|
a0f1638f6c | ||
|
|
fc13276f0e | ||
|
|
8a0b92db19 | ||
|
|
2f0d34adb2 | ||
|
|
617f416291 | ||
|
|
7a438ad323 | ||
|
|
5776f0b64b | ||
|
|
96d190a789 | ||
|
|
d2038699c0 | ||
|
|
cb3b5cba90 | ||
|
|
8c881ab758 | ||
|
|
caefaf73aa | ||
|
|
e8d7001f5e | ||
|
|
ae0f20a445 | ||
|
|
4ddc12185f | ||
|
|
e81627a96d | ||
|
|
47be2a25f2 | ||
|
|
6832a4ffde | ||
|
|
bd58a47862 | ||
|
|
613fb92a25 | ||
|
|
250d9f2836 | ||
|
|
0cab25e44c | ||
|
|
cbf6b462e4 | ||
|
|
8695660c58 | ||
|
|
1216990f52 | ||
|
|
204228bc8f | ||
|
|
ebc26e9ea0 | ||
|
|
3c03119d2d | ||
|
|
644049092f | ||
|
|
578f447728 | ||
|
|
3bf926e419 | ||
|
|
48ee4f8bd2 | ||
|
|
b4964a0535 | ||
|
|
47ff00e9b9 | ||
|
|
6ca99a5ddb | ||
|
|
30b5054692 | ||
|
|
edf7b90c11 | ||
|
|
7f0f97d14d | ||
|
|
b03b75cd7e | ||
|
|
f0d2e60a9a | ||
|
|
328f1d9ea2 | ||
|
|
a14013f393 | ||
|
|
aef1d7904d | ||
|
|
dc478188c1 | ||
|
|
9fa6e775c0 | ||
|
|
584350623b | ||
|
|
919959b32c | ||
|
|
ec54eedf93 | ||
|
|
f311797215 | ||
|
|
059b5d0f89 | ||
|
|
7542640494 | ||
|
|
52493f181a | ||
|
|
5d95143536 | ||
|
|
a2c5861ca5 | ||
|
|
fcc07f02b0 | ||
|
|
3f43526aac | ||
|
|
cd07da9137 | ||
|
|
30ab182b2e | ||
|
|
2ddd9587f7 | ||
|
|
50800857b6 | ||
|
|
8f50521435 | ||
|
|
45ecaa9084 | ||
|
|
9c7db58d87 | ||
|
|
6b11e9714b | ||
|
|
7f5a9ed34a | ||
|
|
bc9a231d26 | ||
|
|
0bb3815f73 | ||
|
|
944cc8ef62 | ||
|
|
e97334d7c1 | ||
|
|
2dacf08c30 | ||
|
|
6f6590774e | ||
|
|
fe5bb3fd26 | ||
|
|
c638edd346 | ||
|
|
c02477a245 | ||
|
|
da6da9c839 | ||
|
|
d83293776d | ||
|
|
d5994ac127 | ||
|
|
36584826bb | ||
|
|
7a6fccb70d | ||
|
|
ca1971c085 | ||
|
|
97eaecec48 | ||
|
|
01d47808a7 | ||
|
|
7d2f3dea7a | ||
|
|
bce1d02b3b | ||
|
|
9a993b131d | ||
|
|
636a1d7576 | ||
|
|
fb621ec465 | ||
|
|
00e993c686 | ||
|
|
374a55d8f5 | ||
|
|
1e88e2fa72 | ||
|
|
a2326198f6 | ||
|
|
a0031d626a | ||
|
|
a2b58d59ab | ||
|
|
e8b17406b7 | ||
|
|
5245045d84 | ||
|
|
b57d39369b | ||
|
|
db72fe3d97 | ||
|
|
3a2f688c56 | ||
|
|
13a2a5073f | ||
|
|
418853fd0c | ||
|
|
cfb68a6e56 | ||
|
|
88b13274d7 | ||
|
|
056ba675a7 | ||
|
|
873b74561c | ||
|
|
8b42ce374d | ||
|
|
4871003ff1 | ||
|
|
b4e7ad5575 | ||
|
|
1a246060e2 | ||
|
|
6a3d74c645 | ||
|
|
2073bd2027 | ||
|
|
c63554c534 | ||
|
|
be8ed8a696 | ||
|
|
98530d9968 | ||
|
|
38adc513a6 | ||
|
|
eb12e3bde1 | ||
|
|
8b2839d36e | ||
|
|
f0fa2aa6bb | ||
|
|
33528b073f | ||
|
|
cf8783ea37 | ||
|
|
00355635f8 | ||
|
|
aa485f4bf1 | ||
|
|
273b05fb24 | ||
|
|
e470474d6f | ||
|
|
ddfd2fe2ec | ||
|
|
7533d0ae99 | ||
|
|
04ec7f0388 | ||
|
|
419000cc13 | ||
|
|
0dc8edb437 | ||
|
|
0759b6531b | ||
|
|
d8f984de7d | ||
|
|
82e490a875 | ||
|
|
c6dffd9d3e | ||
|
|
8ee3d5835a | ||
|
|
1209d7e42b | ||
|
|
cdc05ba506 | ||
|
|
a6fae0195f | ||
|
|
11375b6890 | ||
|
|
3811470ebf | ||
|
|
e2b08eb4dc | ||
|
|
38d3ca1022 | ||
|
|
df459c5fe6 | ||
|
|
d1d9c0e2a9 | ||
|
|
c1b1d7d448 | ||
|
|
e6b5ee2042 | ||
|
|
0170fc6166 | ||
|
|
4cc2ada2a2 | ||
|
|
a5d3e4f6a6 | ||
|
|
7c92b33886 | ||
|
|
0f0b9414ae | ||
|
|
6fbb67ee8c | ||
|
|
6634f1a9ae | ||
|
|
8da8138f77 | ||
|
|
588f4c477b | ||
|
|
fda1775d3a | ||
|
|
fc71d53c71 | ||
|
|
ab2a320659 | ||
|
|
7f50f81ac7 | ||
|
|
c36a13ccff | ||
|
|
47de726345 | ||
|
|
7a4fdbddc0 | ||
|
|
0dc6f33550 | ||
|
|
b2436eb0df | ||
|
|
cc673159d7 | ||
|
|
17c310d66d | ||
|
|
e7357c4e07 | ||
|
|
c44de2d7c3 | ||
|
|
d82b2c219a | ||
|
|
35c8957a55 | ||
|
|
8555f8250a | ||
|
|
8137a25b13 |
3
.clomonitor.yml
Normal file
@@ -0,0 +1,3 @@
|
||||
exemptions:
|
||||
- check: analytics
|
||||
reason: "We don't track people"
|
||||
@@ -1,11 +1,11 @@
|
||||
root: ./docs/gitbook
|
||||
|
||||
|
||||
redirects:
|
||||
how-it-works: usage/how-it-works.md
|
||||
usage/progressive-delivery: tutorials/istio-progressive-delivery.md
|
||||
usage/ab-testing: tutorials/istio-ab-testing.md
|
||||
usage/blue-green: tutorials/kubernetes-blue-green.md
|
||||
usage/appmesh-progressive-delivery: tutorials/appmesh-progressive-delivery.md
|
||||
usage/linkerd-progressive-delivery: tutorials/linkerd-progressive-delivery.md
|
||||
usage/contour-progressive-delivery: tutorials/contour-progressive-delivery.md
|
||||
usage/gloo-progressive-delivery: tutorials/gloo-progressive-delivery.md
|
||||
@@ -13,3 +13,7 @@ redirects:
|
||||
usage/skipper-progressive-delivery: tutorials/skipper-progressive-delivery.md
|
||||
usage/crossover-progressive-delivery: tutorials/crossover-progressive-delivery.md
|
||||
usage/traefik-progressive-delivery: tutorials/traefik-progressive-delivery.md
|
||||
usage/kuma-progressive-delivery: tutorials/kuma-progressive-delivery.md
|
||||
usage/gatewayapi-progressive-delivery: tutorials/gatewayapi-progressive-delivery.md
|
||||
usage/apisix-progressive-delivery: tutorials/apisix-progressive-delivery.md
|
||||
usage/knative-progressive-delivery: tutorials/knative-progressive-delivery.md
|
||||
|
||||
2
.github/CODEOWNERS
vendored
@@ -1 +1 @@
|
||||
* @stefanprodan
|
||||
* @stefanprodan @aryan9600
|
||||
|
||||
17
.github/dependabot.yml
vendored
Normal file
@@ -0,0 +1,17 @@
|
||||
version: 2
|
||||
|
||||
updates:
|
||||
- package-ecosystem: "github-actions"
|
||||
directory: "/"
|
||||
labels: ["area/ci", "dependencies"]
|
||||
groups:
|
||||
# Group all updates together, so that they are all applied in a single PR.
|
||||
# Grouped updates are currently in beta and is subject to change.
|
||||
# xref: https://docs.github.com/en/code-security/dependabot/dependabot-version-updates/configuration-options-for-the-dependabot.yml-file#groups
|
||||
ci:
|
||||
patterns:
|
||||
- "*"
|
||||
schedule:
|
||||
# By default, this will be on a monday.
|
||||
interval: "weekly"
|
||||
|
||||
33
.github/workflows/build.yaml
vendored
@@ -9,29 +9,32 @@ on:
|
||||
branches:
|
||||
- main
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
container:
|
||||
runs-on: ubuntu-latest
|
||||
build-flagger:
|
||||
runs-on:
|
||||
group: "Default Larger Runners"
|
||||
labels: ubuntu-latest-16-cores
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
- name: Restore Go cache
|
||||
uses: actions/cache@v1
|
||||
with:
|
||||
path: ~/go/pkg/mod
|
||||
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-go-
|
||||
uses: actions/checkout@v5
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@v2
|
||||
uses: actions/setup-go@v6
|
||||
with:
|
||||
go-version: 1.15.x
|
||||
go-version: 1.25.x
|
||||
cache-dependency-path: |
|
||||
**/go.sum
|
||||
**/go.mod
|
||||
- name: Download modules
|
||||
run: |
|
||||
go mod download
|
||||
go install golang.org/x/tools/cmd/goimports
|
||||
go install golang.org/x/tools/cmd/goimports@latest
|
||||
- name: Run linters
|
||||
run: make test-fmt test-codegen
|
||||
run: make fmt test-codegen
|
||||
- name: Verify CRDs
|
||||
run: make verify-crd
|
||||
- name: Run tests
|
||||
run: go test -race -coverprofile=coverage.txt -covermode=atomic $(go list ./pkg/...)
|
||||
- name: Check if working tree is dirty
|
||||
@@ -42,7 +45,7 @@ jobs:
|
||||
exit 1
|
||||
fi
|
||||
- name: Upload coverage to Codecov
|
||||
uses: codecov/codecov-action@v1
|
||||
uses: codecov/codecov-action@v5
|
||||
with:
|
||||
file: ./coverage.txt
|
||||
- name: Build container image
|
||||
|
||||
34
.github/workflows/e2e.yaml
vendored
@@ -9,28 +9,50 @@ on:
|
||||
branches:
|
||||
- main
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
kind:
|
||||
runs-on: ubuntu-latest
|
||||
e2e-test:
|
||||
runs-on:
|
||||
group: "Default Larger Runners"
|
||||
labels: ubuntu-latest-16-cores
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
provider:
|
||||
# service mesh
|
||||
- istio
|
||||
- linkerd
|
||||
- kuma
|
||||
# ingress controllers
|
||||
- contour
|
||||
- nginx
|
||||
- traefik
|
||||
- gloo
|
||||
- skipper
|
||||
- kubernetes
|
||||
- gatewayapi
|
||||
- keda
|
||||
- apisix
|
||||
- knative
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v5
|
||||
- name: Setup Kubernetes
|
||||
uses: engineerd/setup-kind@v0.5.0
|
||||
uses: helm/kind-action@v1.12.0
|
||||
if: matrix.provider != 'skipper'
|
||||
with:
|
||||
version: "v0.11.0"
|
||||
image: kindest/node:v1.21.1@sha256:fae9a58f17f18f06aeac9772ca8b5ac680ebbed985e266f711d936e91d113bad
|
||||
version: v0.23.0
|
||||
cluster_name: kind
|
||||
node_image: kindest/node:v1.30.0@sha256:047357ac0cfea04663786a612ba1eaba9702bef25227a794b52890dd8bcd692e
|
||||
- name: Setup Kubernetes for skipper
|
||||
uses: helm/kind-action@v1.12.0
|
||||
if: matrix.provider == 'skipper'
|
||||
with:
|
||||
version: v0.23.0
|
||||
cluster_name: kind
|
||||
node_image: kindest/node:v1.24.12@sha256:0bdca26bd7fe65c823640b14253ea7bac4baad9336b332c94850f84d8102f873
|
||||
- name: Build container image
|
||||
run: |
|
||||
docker build -t test/flagger:latest .
|
||||
|
||||
20
.github/workflows/helm.yaml
vendored
Normal file
@@ -0,0 +1,20 @@
|
||||
name: helm
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
release-charts:
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: write
|
||||
steps:
|
||||
- uses: actions/checkout@v5
|
||||
- name: Publish Helm charts
|
||||
uses: stefanprodan/helm-gh-pages@v1.7.0
|
||||
with:
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
charts_url: https://flagger.app
|
||||
63
.github/workflows/push-ld.yml
vendored
Normal file
@@ -0,0 +1,63 @@
|
||||
name: push-ld
|
||||
on:
|
||||
workflow_dispatch:
|
||||
|
||||
env:
|
||||
IMAGE: "ghcr.io/fluxcd/flagger-loadtester"
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
release-load-tester:
|
||||
runs-on:
|
||||
group: "Default Larger Runners"
|
||||
permissions:
|
||||
id-token: write
|
||||
packages: write
|
||||
steps:
|
||||
- uses: actions/checkout@v5
|
||||
- uses: sigstore/cosign-installer@v3.10.0
|
||||
- name: Prepare
|
||||
id: prep
|
||||
run: |
|
||||
VERSION=$(grep 'VERSION' cmd/loadtester/main.go | head -1 | awk '{ print $4 }' | tr -d '"')
|
||||
echo "BUILD_DATE=$(date -u +'%Y-%m-%dT%H:%M:%SZ')" >> $GITHUB_OUTPUT
|
||||
echo "VERSION=${VERSION}" >> $GITHUB_OUTPUT
|
||||
- name: Setup QEMU
|
||||
uses: docker/setup-qemu-action@v3
|
||||
- name: Setup Docker Buildx
|
||||
id: buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
- name: Login to GitHub Container Registry
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: fluxcdbot
|
||||
password: ${{ secrets.GHCR_TOKEN }}
|
||||
- name: Generate image meta
|
||||
id: meta
|
||||
uses: docker/metadata-action@v5
|
||||
with:
|
||||
images: |
|
||||
${{ env.IMAGE }}
|
||||
tags: |
|
||||
type=raw,value=${{ steps.prep.outputs.VERSION }}
|
||||
- name: Publish image
|
||||
id: build-push
|
||||
uses: docker/build-push-action@v6
|
||||
with:
|
||||
push: true
|
||||
builder: ${{ steps.buildx.outputs.name }}
|
||||
context: .
|
||||
file: ./Dockerfile.loadtester
|
||||
platforms: linux/amd64,linux/arm64
|
||||
build-args: |
|
||||
REVISION=${{ github.sha }}
|
||||
tags: ${{ steps.meta.outputs.tags }}
|
||||
labels: ${{ steps.meta.outputs.labels }}
|
||||
- name: Sign image
|
||||
env:
|
||||
COSIGN_EXPERIMENTAL: 1
|
||||
run: |
|
||||
cosign sign --yes ${{ env.IMAGE }}@${{ steps.build-push.outputs.digest }}
|
||||
155
.github/workflows/release.yml
vendored
@@ -3,38 +3,74 @@ on:
|
||||
push:
|
||||
tags:
|
||||
- 'v*'
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
tag:
|
||||
description: 'image tag prefix'
|
||||
default: 'rc'
|
||||
required: true
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
env:
|
||||
IMAGE: "ghcr.io/fluxcd/${{ github.event.repository.name }}"
|
||||
|
||||
jobs:
|
||||
build-push:
|
||||
runs-on: ubuntu-latest
|
||||
release-flagger:
|
||||
outputs:
|
||||
hashes: ${{ steps.slsa.outputs.hashes }}
|
||||
runs-on:
|
||||
group: "Default Larger Runners"
|
||||
permissions:
|
||||
contents: write # needed to write releases
|
||||
id-token: write # needed for keyless signing
|
||||
packages: write # needed for ghcr access
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/checkout@v5
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@v6
|
||||
with:
|
||||
go-version: 1.25.x
|
||||
- uses: fluxcd/flux2/action@main
|
||||
- uses: sigstore/cosign-installer@v3.10.0
|
||||
- name: Prepare
|
||||
id: prep
|
||||
run: |
|
||||
VERSION=$(grep 'VERSION' pkg/version/version.go | awk '{ print $4 }' | tr -d '"')
|
||||
if [[ ${GITHUB_EVENT_NAME} = "workflow_dispatch" ]]; then
|
||||
VERSION="${{ github.event.inputs.tag }}-${GITHUB_SHA::8}"
|
||||
else
|
||||
VERSION=$(grep 'VERSION' pkg/version/version.go | awk '{ print $4 }' | tr -d '"')
|
||||
fi
|
||||
CHANGELOG="https://github.com/fluxcd/flagger/blob/main/CHANGELOG.md#$(echo $VERSION | tr -d '.')"
|
||||
echo ::set-output name=BUILD_DATE::$(date -u +'%Y-%m-%dT%H:%M:%SZ')
|
||||
echo ::set-output name=VERSION::${VERSION}
|
||||
echo ::set-output name=CHANGELOG::${CHANGELOG}
|
||||
echo "[CHANGELOG](${CHANGELOG})" > notes.md
|
||||
echo "BUILD_DATE=$(date -u +'%Y-%m-%dT%H:%M:%SZ')" >> $GITHUB_OUTPUT
|
||||
echo "VERSION=${VERSION}" >> $GITHUB_OUTPUT
|
||||
- name: Setup QEMU
|
||||
uses: docker/setup-qemu-action@v1
|
||||
with:
|
||||
platforms: all
|
||||
uses: docker/setup-qemu-action@v3
|
||||
- name: Setup Docker Buildx
|
||||
id: buildx
|
||||
uses: docker/setup-buildx-action@v1
|
||||
with:
|
||||
buildkitd-flags: "--debug"
|
||||
uses: docker/setup-buildx-action@v3
|
||||
- name: Login to GitHub Container Registry
|
||||
uses: docker/login-action@v1
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: fluxcdbot
|
||||
password: ${{ secrets.GHCR_TOKEN }}
|
||||
- name: Publish image
|
||||
uses: docker/build-push-action@v2
|
||||
- name: Generate image meta
|
||||
id: meta
|
||||
uses: docker/metadata-action@v5
|
||||
with:
|
||||
images: |
|
||||
${{ env.IMAGE }}
|
||||
tags: |
|
||||
type=raw,value=${{ steps.prep.outputs.VERSION }}
|
||||
- name: Publish image
|
||||
id: build-push
|
||||
uses: docker/build-push-action@v6
|
||||
with:
|
||||
sbom: true
|
||||
provenance: true
|
||||
push: true
|
||||
builder: ${{ steps.buildx.outputs.name }}
|
||||
context: .
|
||||
@@ -42,33 +78,76 @@ jobs:
|
||||
platforms: linux/amd64,linux/arm64,linux/arm/v7
|
||||
build-args: |
|
||||
REVISON=${{ github.sha }}
|
||||
tags: |
|
||||
ghcr.io/fluxcd/flagger:${{ steps.prep.outputs.VERSION }}
|
||||
labels: |
|
||||
org.opencontainers.image.title=${{ github.event.repository.name }}
|
||||
org.opencontainers.image.description=${{ github.event.repository.description }}
|
||||
org.opencontainers.image.url=${{ github.event.repository.html_url }}
|
||||
org.opencontainers.image.source=${{ github.event.repository.html_url }}
|
||||
org.opencontainers.image.revision=${{ github.sha }}
|
||||
org.opencontainers.image.version=${{ steps.prep.outputs.VERSION }}
|
||||
org.opencontainers.image.created=${{ steps.prep.outputs.BUILD_DATE }}
|
||||
- name: Check images
|
||||
tags: ${{ steps.meta.outputs.tags }}
|
||||
labels: ${{ steps.meta.outputs.labels }}
|
||||
- name: Sign image
|
||||
env:
|
||||
COSIGN_EXPERIMENTAL: 1
|
||||
run: |
|
||||
docker buildx imagetools inspect ghcr.io/fluxcd/flagger:${{ steps.prep.outputs.VERSION }}
|
||||
cosign sign --yes ${{ env.IMAGE }}@${{ steps.build-push.outputs.digest }}
|
||||
- name: Publish signed manifests to GHCR
|
||||
if: startsWith(github.ref, 'refs/tags/v')
|
||||
env:
|
||||
COSIGN_EXPERIMENTAL: 1
|
||||
run: |
|
||||
OCI_URL=$(flux push artifact \
|
||||
oci://ghcr.io/fluxcd/flagger-manifests:${{ steps.prep.outputs.VERSION }} \
|
||||
--path="./kustomize" \
|
||||
--source="$(git config --get remote.origin.url)" \
|
||||
--revision="${{ steps.prep.outputs.VERSION }}/$(git rev-parse HEAD)" \
|
||||
--output json | \
|
||||
jq -r '. | .repository + "@" + .digest')
|
||||
cosign sign --yes ${OCI_URL}
|
||||
- name: Publish Helm charts
|
||||
uses: stefanprodan/helm-gh-pages@v1.3.0
|
||||
if: startsWith(github.ref, 'refs/tags/v')
|
||||
uses: stefanprodan/helm-gh-pages@v1.7.0
|
||||
with:
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
charts_url: https://flagger.app
|
||||
linting: off
|
||||
- name: Create release
|
||||
uses: actions/create-release@latest
|
||||
- uses: fluxcd/pkg/actions/helm@main
|
||||
with:
|
||||
version: 3.12.3
|
||||
- name: Publish signed Helm chart to GHCR
|
||||
if: startsWith(github.ref, 'refs/tags/v')
|
||||
env:
|
||||
COSIGN_EXPERIMENTAL: 1
|
||||
run: |
|
||||
helm package charts/flagger
|
||||
helm push flagger-${{ steps.prep.outputs.VERSION }}.tgz oci://ghcr.io/fluxcd/charts |& tee .digest
|
||||
cosign sign --yes ghcr.io/fluxcd/charts/flagger@$(cat .digest | awk -F "[, ]+" '/Digest/{print $NF}')
|
||||
rm flagger-${{ steps.prep.outputs.VERSION }}.tgz
|
||||
rm .digest
|
||||
- uses: anchore/sbom-action/download-syft@v0
|
||||
- name: Create release and SBOM
|
||||
id: run-goreleaser
|
||||
uses: goreleaser/goreleaser-action@v6
|
||||
if: startsWith(github.ref, 'refs/tags/v')
|
||||
with:
|
||||
version: latest
|
||||
args: release --release-notes=notes.md --clean --skip=validate
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
with:
|
||||
tag_name: ${{ github.ref }}
|
||||
release_name: ${{ github.ref }}
|
||||
draft: false
|
||||
prerelease: false
|
||||
body: |
|
||||
[CHANGELOG](${{ steps.prep.outputs.CHANGELOG }})
|
||||
- name: Generate SLSA metadata
|
||||
id: slsa
|
||||
if: startsWith(github.ref, 'refs/tags/v')
|
||||
env:
|
||||
ARTIFACTS: "${{ steps.run-goreleaser.outputs.artifacts }}"
|
||||
run: |
|
||||
set -euo pipefail
|
||||
|
||||
hashes=$(echo -E $ARTIFACTS | jq --raw-output '.[] | {name, "digest": (.extra.Digest // .extra.Checksum)} | select(.digest) | {digest} + {name} | join(" ") | sub("^sha256:";"")' | base64 -w0)
|
||||
echo "hashes=$hashes" >> $GITHUB_OUTPUT
|
||||
|
||||
release-provenance:
|
||||
needs: [release-flagger]
|
||||
if: startsWith(github.ref, 'refs/tags/v')
|
||||
permissions:
|
||||
actions: read # for detecting the Github Actions environment.
|
||||
id-token: write # for creating OIDC tokens for signing.
|
||||
contents: write # for uploading attestations to GitHub releases.
|
||||
uses: slsa-framework/slsa-github-generator/.github/workflows/generator_generic_slsa3.yml@v2.1.0
|
||||
with:
|
||||
provenance-name: "provenance.intoto.jsonl"
|
||||
base64-subjects: "${{ needs.release-flagger.outputs.hashes }}"
|
||||
upload-assets: true
|
||||
|
||||
30
.github/workflows/scan.yml
vendored
@@ -8,30 +8,38 @@ on:
|
||||
schedule:
|
||||
- cron: '18 10 * * 3'
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
fossa:
|
||||
name: FOSSA
|
||||
scan-fossa:
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
security-events: write
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/checkout@v5
|
||||
- name: Run FOSSA scan and upload build data
|
||||
uses: fossa-contrib/fossa-action@v1
|
||||
uses: fossa-contrib/fossa-action@v3
|
||||
with:
|
||||
# FOSSA Push-Only API Token
|
||||
fossa-api-key: 5ee8bf422db1471e0bcf2bcb289185de
|
||||
github-token: ${{ github.token }}
|
||||
|
||||
codeql:
|
||||
name: CodeQL
|
||||
scan-codeql:
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
security-events: write
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v5
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@v6
|
||||
with:
|
||||
go-version: 1.25.x
|
||||
- name: Initialize CodeQL
|
||||
uses: github/codeql-action/init@v1
|
||||
uses: github/codeql-action/init@v4
|
||||
with:
|
||||
languages: go
|
||||
- name: Autobuild
|
||||
uses: github/codeql-action/autobuild@v1
|
||||
uses: github/codeql-action/autobuild@v4
|
||||
- name: Perform CodeQL Analysis
|
||||
uses: github/codeql-action/analyze@v1
|
||||
uses: github/codeql-action/analyze@v4
|
||||
|
||||
@@ -1,14 +1,31 @@
|
||||
project_name: flagger
|
||||
|
||||
builds:
|
||||
- main: ./cmd/flagger
|
||||
binary: flagger
|
||||
ldflags: -s -w -X github.com/fluxcd/flagger/pkg/version.REVISION={{.Commit}}
|
||||
goos:
|
||||
- linux
|
||||
goarch:
|
||||
- amd64
|
||||
- skip: true
|
||||
|
||||
release:
|
||||
prerelease: auto
|
||||
|
||||
source:
|
||||
enabled: true
|
||||
name_template: "{{ .ProjectName }}_{{ .Version }}_source_code"
|
||||
|
||||
sboms:
|
||||
- id: source
|
||||
artifacts: source
|
||||
documents:
|
||||
- "{{ .ProjectName }}_{{ .Version }}_sbom.spdx.json"
|
||||
|
||||
signs:
|
||||
- cmd: cosign
|
||||
env:
|
||||
- CGO_ENABLED=0
|
||||
archives:
|
||||
- name_template: "{{ .Binary }}_{{ .Version }}_{{ .Os }}_{{ .Arch }}"
|
||||
files:
|
||||
- none*
|
||||
- COSIGN_EXPERIMENTAL=1
|
||||
certificate: '${artifact}.pem'
|
||||
args:
|
||||
- sign-blob
|
||||
- "--yes"
|
||||
- '--output-certificate=${certificate}'
|
||||
- '--output-signature=${signature}'
|
||||
- '${artifact}'
|
||||
artifacts: checksum
|
||||
output: true
|
||||
|
||||
1213
CHANGELOG.md
14
Dockerfile
@@ -1,4 +1,11 @@
|
||||
FROM golang:1.15-alpine as builder
|
||||
ARG GO_VERSION=1.25
|
||||
ARG XX_VERSION=1.6.1
|
||||
|
||||
FROM --platform=$BUILDPLATFORM tonistiigi/xx:${XX_VERSION} AS xx
|
||||
FROM --platform=$BUILDPLATFORM golang:${GO_VERSION}-alpine AS builder
|
||||
|
||||
# copy build utilities
|
||||
COPY --from=xx / /
|
||||
|
||||
ARG TARGETPLATFORM
|
||||
ARG REVISON
|
||||
@@ -17,11 +24,12 @@ COPY cmd/ cmd/
|
||||
COPY pkg/ pkg/
|
||||
|
||||
# build
|
||||
RUN CGO_ENABLED=0 go build \
|
||||
ENV CGO_ENABLED=0
|
||||
RUN xx-go build \
|
||||
-ldflags "-s -w -X github.com/fluxcd/flagger/pkg/version.REVISION=${REVISON}" \
|
||||
-a -o flagger ./cmd/flagger
|
||||
|
||||
FROM alpine:3.13
|
||||
FROM alpine:3.22
|
||||
|
||||
RUN apk --no-cache add ca-certificates
|
||||
|
||||
|
||||
@@ -1,59 +1,64 @@
|
||||
FROM alpine:3.11 as build
|
||||
FROM golang:1.25-alpine AS builder
|
||||
|
||||
RUN apk --no-cache add alpine-sdk perl curl
|
||||
ARG TARGETPLATFORM
|
||||
ARG TARGETARCH
|
||||
ARG REVISION
|
||||
|
||||
RUN curl -sSLo hey "https://storage.googleapis.com/hey-release/hey_linux_amd64" && \
|
||||
chmod +x hey && mv hey /usr/local/bin/hey
|
||||
RUN apk --no-cache add alpine-sdk perl curl bash tar
|
||||
|
||||
RUN HELM2_VERSION=2.16.8 && \
|
||||
curl -sSL "https://get.helm.sh/helm-v${HELM2_VERSION}-linux-amd64.tar.gz" | tar xvz && \
|
||||
chmod +x linux-amd64/helm && mv linux-amd64/helm /usr/local/bin/helm && \
|
||||
chmod +x linux-amd64/tiller && mv linux-amd64/tiller /usr/local/bin/tiller
|
||||
RUN HELM3_VERSION=3.19.0 && \
|
||||
curl -sSL "https://get.helm.sh/helm-v${HELM3_VERSION}-linux-${TARGETARCH}.tar.gz" | tar xvz && \
|
||||
chmod +x linux-${TARGETARCH}/helm && mv linux-${TARGETARCH}/helm /usr/local/bin/helm
|
||||
|
||||
RUN HELM3_VERSION=3.2.3 && \
|
||||
curl -sSL "https://get.helm.sh/helm-v${HELM3_VERSION}-linux-amd64.tar.gz" | tar xvz && \
|
||||
chmod +x linux-amd64/helm && mv linux-amd64/helm /usr/local/bin/helmv3
|
||||
RUN KUBECTL_VERSION=v1.34.1 && \
|
||||
curl -LO "https://dl.k8s.io/release/${KUBECTL_VERSION}/bin/linux/${TARGETARCH}/kubectl" && \
|
||||
chmod +x kubectl && mv kubectl /usr/local/bin/kubectl
|
||||
|
||||
RUN GRPC_HEALTH_PROBE_VERSION=v0.3.1 && \
|
||||
wget -qO /usr/local/bin/grpc_health_probe https://github.com/grpc-ecosystem/grpc-health-probe/releases/download/${GRPC_HEALTH_PROBE_VERSION}/grpc_health_probe-linux-amd64 && \
|
||||
RUN GRPC_HEALTH_PROBE_VERSION=v0.4.35 && \
|
||||
wget -qO /usr/local/bin/grpc_health_probe https://github.com/grpc-ecosystem/grpc-health-probe/releases/download/${GRPC_HEALTH_PROBE_VERSION}/grpc_health_probe-linux-${TARGETARCH} && \
|
||||
chmod +x /usr/local/bin/grpc_health_probe
|
||||
|
||||
RUN GHZ_VERSION=0.39.0 && \
|
||||
curl -sSL "https://github.com/bojand/ghz/releases/download/v${GHZ_VERSION}/ghz_${GHZ_VERSION}_Linux_x86_64.tar.gz" | tar xz -C /tmp && \
|
||||
mv /tmp/ghz /usr/local/bin && chmod +x /usr/local/bin/ghz
|
||||
RUN GHZ_VERSION=0.120.0 && \
|
||||
curl -sSL "https://github.com/bojand/ghz/archive/refs/tags/v${GHZ_VERSION}.tar.gz" | tar xz -C /tmp && \
|
||||
cd /tmp/ghz-${GHZ_VERSION}/cmd/ghz && GOARCH=$TARGETARCH go build . && mv ghz /usr/local/bin && \
|
||||
chmod +x /usr/local/bin/ghz
|
||||
|
||||
RUN HELM_TILLER_VERSION=0.9.3 && \
|
||||
curl -sSL "https://github.com/rimusz/helm-tiller/archive/v${HELM_TILLER_VERSION}.tar.gz" | tar xz -C /tmp && \
|
||||
mv /tmp/helm-tiller-${HELM_TILLER_VERSION} /tmp/helm-tiller
|
||||
WORKDIR /workspace
|
||||
|
||||
RUN WRK_VERSION=4.0.2 && \
|
||||
cd /tmp && git clone -b ${WRK_VERSION} https://github.com/wg/wrk
|
||||
RUN cd /tmp/wrk && make
|
||||
# copy modules manifests
|
||||
COPY go.mod go.mod
|
||||
COPY go.sum go.sum
|
||||
|
||||
FROM bash:5.0
|
||||
# cache modules
|
||||
RUN go mod download
|
||||
|
||||
# copy source code
|
||||
COPY cmd/ cmd/
|
||||
COPY pkg/ pkg/
|
||||
|
||||
# build
|
||||
RUN CGO_ENABLED=0 go build -o loadtester ./cmd/loadtester/*
|
||||
|
||||
FROM bash:5.2
|
||||
|
||||
ARG TARGETPLATFORM
|
||||
|
||||
RUN addgroup -S app && \
|
||||
adduser -S -g app app && \
|
||||
apk --no-cache add ca-certificates curl jq libgcc
|
||||
apk --no-cache add ca-certificates curl jq libgcc wrk hey git
|
||||
|
||||
WORKDIR /home/app
|
||||
|
||||
COPY --from=bats/bats:v1.1.0 /opt/bats/ /opt/bats/
|
||||
COPY --from=bats/bats:1.11.1 /opt/bats/ /opt/bats/
|
||||
RUN ln -s /opt/bats/bin/bats /usr/local/bin/
|
||||
|
||||
COPY --from=build /usr/local/bin/hey /usr/local/bin/
|
||||
COPY --from=build /tmp/wrk/wrk /usr/local/bin/
|
||||
COPY --from=build /usr/local/bin/helm /usr/local/bin/
|
||||
COPY --from=build /usr/local/bin/tiller /usr/local/bin/
|
||||
COPY --from=build /usr/local/bin/ghz /usr/local/bin/
|
||||
COPY --from=build /usr/local/bin/helmv3 /usr/local/bin/
|
||||
COPY --from=build /usr/local/bin/grpc_health_probe /usr/local/bin/
|
||||
COPY --from=build /tmp/helm-tiller /tmp/helm-tiller
|
||||
COPY --from=builder /usr/local/bin/helm /usr/local/bin/
|
||||
COPY --from=builder /usr/local/bin/ghz /usr/local/bin/
|
||||
COPY --from=builder /usr/local/bin/grpc_health_probe /usr/local/bin/
|
||||
COPY --from=builder /usr/local/bin/kubectl /usr/local/bin/
|
||||
|
||||
ADD https://raw.githubusercontent.com/grpc/grpc-proto/master/grpc/health/v1/health.proto /tmp/ghz/health.proto
|
||||
|
||||
COPY ./bin/loadtester .
|
||||
|
||||
RUN chown -R app:app ./
|
||||
RUN chown -R app:app /tmp/ghz
|
||||
|
||||
@@ -63,7 +68,6 @@ USER app
|
||||
RUN hey -n 1 -c 1 https://flagger.app > /dev/null && echo $? | grep 0
|
||||
RUN wrk -d 1s -c 1 -t 1 https://flagger.app > /dev/null && echo $? | grep 0
|
||||
|
||||
# install Helm v2 plugins
|
||||
RUN helm init --client-only && helm plugin install /tmp/helm-tiller
|
||||
COPY --from=builder --chown=app:app /workspace/loadtester .
|
||||
|
||||
ENTRYPOINT ["./loadtester"]
|
||||
|
||||
5
GOVERNANCE.md
Normal file
@@ -0,0 +1,5 @@
|
||||
# Flagger Governance
|
||||
|
||||
The Flagger project is governed by the [Flux governance document](https://github.com/fluxcd/community/blob/main/GOVERNANCE.md),
|
||||
involvement is defined in the [Flux community roles document](chttps://github.com/fluxcd/community/blob/main/community-roles.md),
|
||||
and processes can be found in the [Flux process document](https://github.com/fluxcd/community/blob/main/PROCESS.md).
|
||||
@@ -2,5 +2,8 @@ The maintainers are generally available in Slack at
|
||||
https://cloud-native.slack.com/messages/flagger/ (obtain an invitation
|
||||
at https://slack.cncf.io/).
|
||||
|
||||
Stefan Prodan, Weaveworks <stefan@weave.works> (Slack: @stefan Twitter: @stefanprodan)
|
||||
Takeshi Yoneda, DMM.com <cz.rk.t0415y.g@gmail.com> (Slack: @mathetake Twitter: @mathetake)
|
||||
In alphabetical order:
|
||||
|
||||
Sanskar Jaiswal, Independent <jaiswalsanskar078@gmail.com> (github: @aryan9600, slack: aryan9600)
|
||||
Stefan Prodan, ControlPlane <stefan.prodan@gmail.com> (github: @stefanprodan, slack: stefanprodan)
|
||||
Takeshi Yoneda, Tetrate <takeshi@tetrate.io> (github: @mathetake, slack: mathetake)
|
||||
|
||||
36
Makefile
@@ -5,13 +5,14 @@ LT_VERSION?=$(shell grep 'VERSION' cmd/loadtester/main.go | awk '{ print $$4 }'
|
||||
build:
|
||||
CGO_ENABLED=0 go build -a -o ./bin/flagger ./cmd/flagger
|
||||
|
||||
fmt:
|
||||
gofmt -l -s -w ./
|
||||
goimports -l -w ./
|
||||
tidy:
|
||||
rm -f go.sum; go mod tidy -compat=1.25
|
||||
|
||||
test-fmt:
|
||||
gofmt -l -s ./ | grep ".*\.go"; if [ "$$?" = "0" ]; then exit 1; fi
|
||||
goimports -l ./ | grep ".*\.go"; if [ "$$?" = "0" ]; then exit 1; fi
|
||||
vet:
|
||||
go vet ./...
|
||||
|
||||
fmt:
|
||||
go fmt ./...
|
||||
|
||||
codegen:
|
||||
./hack/update-codegen.sh
|
||||
@@ -19,22 +20,30 @@ codegen:
|
||||
test-codegen:
|
||||
./hack/verify-codegen.sh
|
||||
|
||||
test: test-fmt test-codegen
|
||||
test: fmt test-codegen
|
||||
go test ./...
|
||||
|
||||
test-coverage: fmt test-codegen
|
||||
go test -coverprofile cover.out ./...
|
||||
go tool cover -html=cover.out
|
||||
rm cover.out
|
||||
|
||||
crd:
|
||||
cat artifacts/flagger/crd.yaml > charts/flagger/crds/crd.yaml
|
||||
cat artifacts/flagger/crd.yaml > kustomize/base/flagger/crd.yaml
|
||||
|
||||
verify-crd:
|
||||
./hack/verify-crd.sh
|
||||
|
||||
version-set:
|
||||
@next="$(TAG)" && \
|
||||
current="$(VERSION)" && \
|
||||
sed -i '' "s/$$current/$$next/g" pkg/version/version.go && \
|
||||
sed -i '' "s/flagger:$$current/flagger:$$next/g" artifacts/flagger/deployment.yaml && \
|
||||
sed -i '' "s/tag: $$current/tag: $$next/g" charts/flagger/values.yaml && \
|
||||
sed -i '' "s/appVersion: $$current/appVersion: $$next/g" charts/flagger/Chart.yaml && \
|
||||
sed -i '' "s/version: $$current/version: $$next/g" charts/flagger/Chart.yaml && \
|
||||
sed -i '' "s/newTag: $$current/newTag: $$next/g" kustomize/base/flagger/kustomization.yaml && \
|
||||
sed -i "s/$$current/$$next/g" pkg/version/version.go && \
|
||||
sed -i "s/flagger:$$current/flagger:$$next/g" artifacts/flagger/deployment.yaml && \
|
||||
sed -i "s/tag: $$current/tag: $$next/g" charts/flagger/values.yaml && \
|
||||
sed -i "s/appVersion: $$current/appVersion: $$next/g" charts/flagger/Chart.yaml && \
|
||||
sed -i "s/version: $$current/version: $$next/g" charts/flagger/Chart.yaml && \
|
||||
sed -i "s/newTag: $$current/newTag: $$next/g" kustomize/base/flagger/kustomization.yaml && \
|
||||
echo "Version $$next set in code, deployment, chart and kustomize"
|
||||
|
||||
release:
|
||||
@@ -42,7 +51,6 @@ release:
|
||||
git push origin "v$(VERSION)"
|
||||
|
||||
loadtester-build:
|
||||
CGO_ENABLED=0 GOOS=linux go build -a -installsuffix cgo -o ./bin/loadtester ./cmd/loadtester/*
|
||||
docker build -t ghcr.io/fluxcd/flagger-loadtester:$(LT_VERSION) . -f Dockerfile.loadtester
|
||||
|
||||
loadtester-push:
|
||||
|
||||
172
README.md
@@ -1,10 +1,11 @@
|
||||
# flagger
|
||||
# Flagger
|
||||
|
||||
[](https://bestpractices.coreinfrastructure.org/projects/4783)
|
||||
[](https://github.com/fluxcd/flagger/actions)
|
||||
[](https://goreportcard.com/report/github.com/fluxcd/flagger)
|
||||
[](https://github.com/fluxcd/flagger/blob/main/LICENSE)
|
||||
[](https://github.com/fluxcd/flagger/releases)
|
||||
[](https://bestpractices.coreinfrastructure.org/projects/4783)
|
||||
[](https://goreportcard.com/report/github.com/fluxcd/flagger)
|
||||
[](https://app.fossa.com/projects/custom%2B162%2Fgithub.com%2Ffluxcd%2Fflagger?ref=badge_shield)
|
||||
[](https://artifacthub.io/packages/search?repo=flagger)
|
||||
[](https://clomonitor.io/projects/cncf/flagger)
|
||||
|
||||
Flagger is a progressive delivery tool that automates the release process for applications running on Kubernetes.
|
||||
It reduces the risk of introducing a new software version in production
|
||||
@@ -13,52 +14,40 @@ by gradually shifting traffic to the new version while measuring metrics and run
|
||||

|
||||
|
||||
Flagger implements several deployment strategies (Canary releases, A/B testing, Blue/Green mirroring)
|
||||
using a service mesh (App Mesh, Istio, Linkerd)
|
||||
or an ingress controller (Contour, Gloo, NGINX, Skipper, Traefik) for traffic routing.
|
||||
For release analysis, Flagger can query Prometheus, Datadog, New Relic or CloudWatch
|
||||
and for alerting it uses Slack, MS Teams, Discord and Rocket.
|
||||
and integrates with various Kubernetes ingress controllers, service mesh, and monitoring solutions.
|
||||
|
||||
Flagger is a [Cloud Native Computing Foundation](https://cncf.io/) project
|
||||
and part of [Flux](https://fluxcd.io) family of GitOps tools.
|
||||
Flagger is a [Cloud Native Computing Foundation](https://cncf.io/) graduated project
|
||||
and part of the [Flux](https://fluxcd.io) family of GitOps tools.
|
||||
|
||||
### Documentation
|
||||
|
||||
Flagger documentation can be found at [docs.flagger.app](https://docs.flagger.app).
|
||||
The Flagger documentation can be found at [docs.flagger.app](https://docs.flagger.app/main).
|
||||
|
||||
* Install
|
||||
* [Flagger install on Kubernetes](https://docs.flagger.app/install/flagger-install-on-kubernetes)
|
||||
* [Flagger Install with Flux](https://docs.flagger.app/main/install/flagger-install-with-flux)
|
||||
* Usage
|
||||
* [How it works](https://docs.flagger.app/usage/how-it-works)
|
||||
* [Deployment strategies](https://docs.flagger.app/usage/deployment-strategies)
|
||||
* [Metrics analysis](https://docs.flagger.app/usage/metrics)
|
||||
* [Webhooks](https://docs.flagger.app/usage/webhooks)
|
||||
* [Alerting](https://docs.flagger.app/usage/alerting)
|
||||
* [Monitoring](https://docs.flagger.app/usage/monitoring)
|
||||
* Tutorials
|
||||
* [App Mesh](https://docs.flagger.app/tutorials/appmesh-progressive-delivery)
|
||||
* [Istio](https://docs.flagger.app/tutorials/istio-progressive-delivery)
|
||||
* [Linkerd](https://docs.flagger.app/tutorials/linkerd-progressive-delivery)
|
||||
* [Contour](https://docs.flagger.app/tutorials/contour-progressive-delivery)
|
||||
* [Gloo](https://docs.flagger.app/tutorials/gloo-progressive-delivery)
|
||||
* [NGINX Ingress](https://docs.flagger.app/tutorials/nginx-progressive-delivery)
|
||||
* [Skipper](https://docs.flagger.app/tutorials/skipper-progressive-delivery)
|
||||
* [Traefik](https://docs.flagger.app/tutorials/traefik-progressive-delivery)
|
||||
* [Kubernetes Blue/Green](https://docs.flagger.app/tutorials/kubernetes-blue-green)
|
||||
* [How it works](https://docs.flagger.app/main/usage/how-it-works)
|
||||
* [Deployment strategies](https://docs.flagger.app/main/usage/deployment-strategies)
|
||||
* [Metrics analysis](https://docs.flagger.app/main/usage/metrics)
|
||||
* [Webhooks](https://docs.flagger.app/main/usage/webhooks)
|
||||
* [Alerting](https://docs.flagger.app/main/usage/alerting)
|
||||
* [Monitoring](https://docs.flagger.app/main/usage/monitoring)
|
||||
|
||||
### Who is using Flagger
|
||||
### Adopters
|
||||
|
||||
**Our list of production users has moved to <https://fluxcd.io/adopters/#flagger>**.
|
||||
The list of production users can be found at [fluxcd.io/adopters/#flagger](https://fluxcd.io/adopters/#flagger).
|
||||
|
||||
If you are using Flagger, please [submit a PR to add your organization](https://github.com/fluxcd/website/tree/main/adopters#readme) to the list!
|
||||
If you are using Flagger, please
|
||||
[submit a PR to add your organization](https://github.com/fluxcd/website/blob/main/data/adopters/2-flagger.yaml) to the list!
|
||||
|
||||
### Canary CRD
|
||||
|
||||
Flagger takes a Kubernetes deployment and optionally a horizontal pod autoscaler (HPA),
|
||||
then creates a series of objects (Kubernetes deployments, ClusterIP services, service mesh or ingress routes).
|
||||
then creates a series of objects (Kubernetes deployments, ClusterIP services, service mesh, or ingress routes).
|
||||
These objects expose the application on the mesh and drive the canary analysis and promotion.
|
||||
|
||||
Flagger keeps track of ConfigMaps and Secrets referenced by a Kubernetes Deployment and triggers a canary analysis if any of those objects change.
|
||||
When promoting a workload in production, both code (container images) and configuration (config maps and secrets) are being synchronised.
|
||||
When promoting a workload in production, both code (container images) and configuration (config maps and secrets) are being synchronized.
|
||||
|
||||
For a deployment named _podinfo_, a canary promotion can be defined using Flagger's custom resource:
|
||||
|
||||
@@ -70,8 +59,8 @@ metadata:
|
||||
namespace: test
|
||||
spec:
|
||||
# service mesh provider (optional)
|
||||
# can be: kubernetes, istio, linkerd, appmesh, nginx, skipper, contour, gloo, supergloo, traefik
|
||||
# for SMI TrafficSplit can be: smi:v1alpha1, smi:v1alpha2, smi:v1alpha3
|
||||
# can be: kubernetes, istio, linkerd, kuma, knative, nginx, contour, gloo, traefik, skipper
|
||||
# for Gateway API implementations: gatewayapi:v1 and gatewayapi:v1beta1
|
||||
provider: istio
|
||||
# deployment reference
|
||||
targetRef:
|
||||
@@ -83,7 +72,7 @@ spec:
|
||||
progressDeadlineSeconds: 60
|
||||
# HPA reference (optional)
|
||||
autoscalerRef:
|
||||
apiVersion: autoscaling/v2beta1
|
||||
apiVersion: autoscaling/v2
|
||||
kind: HorizontalPodAutoscaler
|
||||
name: podinfo
|
||||
service:
|
||||
@@ -182,72 +171,93 @@ For more details on how the canary analysis and promotion works please [read the
|
||||
|
||||
**Service Mesh**
|
||||
|
||||
| Feature | App Mesh | Istio | Linkerd | SMI | Kubernetes CNI |
|
||||
| ------------------------------------------ | ------------------ | ------------------ | ------------------ | ----------------- | ----------------- |
|
||||
| Feature | Istio | Linkerd | Kuma | Knative | Kubernetes CNI |
|
||||
|--------------------------------------------|--------------------|--------------------|--------------------|--------------------|--------------------|
|
||||
| Canary deployments (weighted traffic) | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_minus_sign: |
|
||||
| A/B testing (headers and cookies routing) | :heavy_check_mark: | :heavy_check_mark: | :heavy_minus_sign: | :heavy_minus_sign: | :heavy_minus_sign: |
|
||||
| Blue/Green deployments (traffic switch) | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: |
|
||||
| Blue/Green deployments (traffic mirroring) | :heavy_minus_sign: | :heavy_check_mark: | :heavy_minus_sign: | :heavy_minus_sign: | :heavy_minus_sign: |
|
||||
| A/B testing (headers and cookies routing) | :heavy_check_mark: | :heavy_minus_sign: | :heavy_minus_sign: | :heavy_minus_sign: | :heavy_minus_sign: |
|
||||
| Blue/Green deployments (traffic switch) | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_minus_sign: | :heavy_check_mark: |
|
||||
| Blue/Green deployments (traffic mirroring) | :heavy_check_mark: | :heavy_minus_sign: | :heavy_minus_sign: | :heavy_minus_sign: | :heavy_minus_sign: |
|
||||
| Webhooks (acceptance/load testing) | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: |
|
||||
| Manual gating (approve/pause/resume) | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: |
|
||||
| Request success rate check (L7 metric) | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_minus_sign: | :heavy_minus_sign: |
|
||||
| Request duration check (L7 metric) | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_minus_sign: | :heavy_minus_sign: |
|
||||
| Request success rate check (L7 metric) | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_minus_sign: | :heavy_check_mark: |
|
||||
| Request duration check (L7 metric) | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_minus_sign: | :heavy_check_mark: |
|
||||
| Custom metric checks | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: |
|
||||
|
||||
For SMI compatible service mesh solutions like Open Service Mesh, Consul Connect or Nginx Service Mesh,
|
||||
[Prometheus MetricTemplates](https://docs.flagger.app/usage/metrics#prometheus) can be used to implement
|
||||
the request success rate and request duration checks.
|
||||
|
||||
**Ingress**
|
||||
|
||||
| Feature | Contour | Gloo | NGINX | Skipper | Traefik |
|
||||
| ------------------------------------------ | ------------------ | ------------------ | ------------------ | ------------------ | ------------------ |
|
||||
| Canary deployments (weighted traffic) | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: |
|
||||
| A/B testing (headers and cookies routing) | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_minus_sign: | :heavy_minus_sign: |
|
||||
| Blue/Green deployments (traffic switch) | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: |
|
||||
| Webhooks (acceptance/load testing) | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: |
|
||||
| Manual gating (approve/pause/resume) | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: |
|
||||
| Request success rate check (L7 metric) | :heavy_check_mark: | :heavy_check_mark: | :heavy_minus_sign: | :heavy_check_mark: | :heavy_check_mark: |
|
||||
| Request duration check (L7 metric) | :heavy_check_mark: | :heavy_check_mark: | :heavy_minus_sign: | :heavy_check_mark: | :heavy_check_mark: |
|
||||
| Custom metric checks | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: |
|
||||
| Feature | Contour | Gloo | NGINX | Skipper | Traefik | Apache APISIX |
|
||||
|-------------------------------------------|--------------------|--------------------|--------------------|--------------------|--------------------|--------------------|
|
||||
| Canary deployments (weighted traffic) | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: |
|
||||
| A/B testing (headers and cookies routing) | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_minus_sign: | :heavy_minus_sign: | :heavy_minus_sign: |
|
||||
| Blue/Green deployments (traffic switch) | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: |
|
||||
| Webhooks (acceptance/load testing) | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: |
|
||||
| Manual gating (approve/pause/resume) | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: |
|
||||
| Request success rate check (L7 metric) | :heavy_check_mark: | :heavy_check_mark: | :heavy_minus_sign: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: |
|
||||
| Request duration check (L7 metric) | :heavy_check_mark: | :heavy_check_mark: | :heavy_minus_sign: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: |
|
||||
| Custom metric checks | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: |
|
||||
|
||||
**Networking Interface**
|
||||
|
||||
| Feature | Gateway API | SMI |
|
||||
|--------------------------------------------|--------------------|--------------------|
|
||||
| Canary deployments (weighted traffic) | :heavy_check_mark: | :heavy_check_mark: |
|
||||
| Canary deployments with session affinity | :heavy_check_mark: | :heavy_minus_sign: |
|
||||
| A/B testing (headers and cookies routing) | :heavy_check_mark: | :heavy_minus_sign: |
|
||||
| Blue/Green deployments (traffic switch) | :heavy_check_mark: | :heavy_check_mark: |
|
||||
| Blue/Green deployments (traffic mirroring) | :heavy_minus_sign: | :heavy_minus_sign: |
|
||||
| Webhooks (acceptance/load testing) | :heavy_check_mark: | :heavy_check_mark: |
|
||||
| Manual gating (approve/pause/resume) | :heavy_check_mark: | :heavy_check_mark: |
|
||||
| Request success rate check (L7 metric) | :heavy_minus_sign: | :heavy_minus_sign: |
|
||||
| Request duration check (L7 metric) | :heavy_minus_sign: | :heavy_minus_sign: |
|
||||
| Custom metric checks | :heavy_check_mark: | :heavy_check_mark: |
|
||||
|
||||
For all the [Gateway API](https://gateway-api.sigs.k8s.io/) compatible ingress controllers and service meshes,
|
||||
the [Prometheus MetricTemplates](https://docs.flagger.app/usage/metrics#prometheus)
|
||||
can be used to implement the request success rate and request duration checks.
|
||||
|
||||
### Roadmap
|
||||
|
||||
#### [GitOps Toolkit](https://github.com/fluxcd/flux2) compatibility
|
||||
#### [GitOps Toolkit](https://fluxcd.io/flux/components/) compatibility
|
||||
|
||||
* Migrate Flagger to Kubernetes controller-runtime and [kubebuilder](https://github.com/kubernetes-sigs/kubebuilder)
|
||||
* Make the Canary status compatible with [kstatus](https://github.com/kubernetes-sigs/cli-utils)
|
||||
* Make Flagger emit Kubernetes events compatible with Flux v2 notification API
|
||||
* Integrate Flagger into Flux v2 as the progressive delivery component
|
||||
- Migrate Flagger to Kubernetes controller-runtime and [kubebuilder](https://github.com/kubernetes-sigs/kubebuilder)
|
||||
- Make the Canary status compatible with [kstatus](https://github.com/kubernetes-sigs/cli-utils)
|
||||
- Make Flagger emit Kubernetes events compatible with Flux v2 notification API
|
||||
- Integrate Flagger into Flux v2 as the progressive delivery component
|
||||
|
||||
#### Integrations
|
||||
|
||||
* Add support for Kubernetes [Ingress v2](https://github.com/kubernetes-sigs/service-apis)
|
||||
* Add support for ingress controllers like HAProxy and ALB
|
||||
* Add support for metrics providers like InfluxDB, Stackdriver, SignalFX
|
||||
- Migrate Linkerd, Kuma and other service mesh integrations to Gateway API
|
||||
|
||||
### Contributing
|
||||
|
||||
Flagger is Apache 2.0 licensed and accepts contributions via GitHub pull requests.
|
||||
To start contributing please read the [development guide](https://docs.flagger.app/dev/dev-guide).
|
||||
|
||||
When submitting bug reports please include as much details as possible:
|
||||
When submitting bug reports please include as many details as possible:
|
||||
|
||||
* which Flagger version
|
||||
* which Flagger CRD version
|
||||
* which Kubernetes version
|
||||
* what configuration (canary, ingress and workloads definitions)
|
||||
* what happened (Flagger and Proxy logs)
|
||||
- which Flagger version
|
||||
- which Kubernetes version
|
||||
- what configuration (canary, ingress and workloads definitions)
|
||||
- what happened (Flagger and Proxy logs)
|
||||
|
||||
### Getting Help
|
||||
### Communication
|
||||
|
||||
If you have any questions about Flagger and progressive delivery:
|
||||
Here is a list of good entry points into our community, how we stay in touch and how you can meet us as a team.
|
||||
|
||||
* Read the Flagger [docs](https://docs.flagger.app).
|
||||
* Invite yourself to the [CNCF community slack](https://slack.cncf.io/)
|
||||
and join the [#flagger](https://cloud-native.slack.com/messages/flagger/) channel.
|
||||
* Check out the [Flux talks section](https://fluxcd.io/community/#talks) and to see a list of online talks,
|
||||
hands-on training and meetups.
|
||||
* File an [issue](https://github.com/fluxcd/flagger/issues/new).
|
||||
- Slack: Join in and talk to us in the `#flagger` channel on [CNCF Slack](https://slack.cncf.io/).
|
||||
- Public meetings: We run weekly meetings - join one of the upcoming dev meetings from the [Flux calendar](https://fluxcd.io/#calendar).
|
||||
- Blog: Stay up to date with the latest news on [the Flux blog](https://fluxcd.io/blog/).
|
||||
- Mailing list: To be updated on Flux and Flagger progress regularly, please [join the flux-dev mailing list](https://lists.cncf.io/g/cncf-flux-dev).
|
||||
|
||||
Your feedback is always welcome!
|
||||
#### Subscribing to the flux-dev calendar
|
||||
|
||||
To add the meetings to your e.g. Google calendar
|
||||
|
||||
1. visit the [Flux calendar](https://lists.cncf.io/g/cncf-flux-dev/calendar)
|
||||
2. click on "Subscribe to Calendar" at the very bottom of the page
|
||||
3. copy the iCalendar URL
|
||||
4. open e.g. your Google calendar
|
||||
5. find the "add calendar" option
|
||||
6. choose "add by URL"
|
||||
7. paste iCalendar URL (ends with `.ics`)
|
||||
8. done
|
||||
|
||||
@@ -1,62 +0,0 @@
|
||||
apiVersion: flagger.app/v1beta1
|
||||
kind: Canary
|
||||
metadata:
|
||||
name: podinfo
|
||||
namespace: test
|
||||
spec:
|
||||
provider: appmesh
|
||||
progressDeadlineSeconds: 600
|
||||
targetRef:
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
name: podinfo
|
||||
autoscalerRef:
|
||||
apiVersion: autoscaling/v2beta1
|
||||
kind: HorizontalPodAutoscaler
|
||||
name: podinfo
|
||||
service:
|
||||
port: 80
|
||||
targetPort: 9898
|
||||
meshName: global
|
||||
retries:
|
||||
attempts: 3
|
||||
perTryTimeout: 5s
|
||||
retryOn: "gateway-error,client-error,stream-error"
|
||||
timeout: 35s
|
||||
match:
|
||||
- uri:
|
||||
prefix: /
|
||||
rewrite:
|
||||
uri: /
|
||||
analysis:
|
||||
interval: 15s
|
||||
threshold: 10
|
||||
iterations: 10
|
||||
match:
|
||||
- headers:
|
||||
x-canary:
|
||||
exact: "insider"
|
||||
metrics:
|
||||
- name: request-success-rate
|
||||
thresholdRange:
|
||||
min: 99
|
||||
interval: 1m
|
||||
- name: request-duration
|
||||
thresholdRange:
|
||||
max: 500
|
||||
interval: 30s
|
||||
webhooks:
|
||||
- name: conformance-test
|
||||
type: pre-rollout
|
||||
url: http://flagger-loadtester.test/
|
||||
timeout: 15s
|
||||
metadata:
|
||||
type: "bash"
|
||||
cmd: "curl -sd 'test' http://podinfo-canary.test/token | grep token"
|
||||
- name: load-test
|
||||
type: rollout
|
||||
url: http://flagger-loadtester.test/
|
||||
timeout: 5s
|
||||
metadata:
|
||||
type: cmd
|
||||
cmd: "hey -z 1m -q 10 -c 2 -H 'X-Canary: insider' http://podinfo-canary.test/"
|
||||
@@ -1,59 +0,0 @@
|
||||
apiVersion: flagger.app/v1beta1
|
||||
kind: Canary
|
||||
metadata:
|
||||
name: podinfo
|
||||
namespace: test
|
||||
spec:
|
||||
provider: appmesh
|
||||
progressDeadlineSeconds: 600
|
||||
targetRef:
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
name: podinfo
|
||||
autoscalerRef:
|
||||
apiVersion: autoscaling/v2beta1
|
||||
kind: HorizontalPodAutoscaler
|
||||
name: podinfo
|
||||
service:
|
||||
port: 80
|
||||
targetPort: http
|
||||
meshName: global
|
||||
retries:
|
||||
attempts: 3
|
||||
perTryTimeout: 5s
|
||||
retryOn: "gateway-error,client-error,stream-error"
|
||||
timeout: 35s
|
||||
match:
|
||||
- uri:
|
||||
prefix: /
|
||||
rewrite:
|
||||
uri: /
|
||||
analysis:
|
||||
interval: 15s
|
||||
threshold: 10
|
||||
maxWeight: 50
|
||||
stepWeight: 5
|
||||
metrics:
|
||||
- name: request-success-rate
|
||||
thresholdRange:
|
||||
min: 99
|
||||
interval: 1m
|
||||
- name: request-duration
|
||||
thresholdRange:
|
||||
max: 500
|
||||
interval: 30s
|
||||
webhooks:
|
||||
- name: conformance-test
|
||||
type: pre-rollout
|
||||
url: http://flagger-loadtester.test/
|
||||
timeout: 15s
|
||||
metadata:
|
||||
type: "bash"
|
||||
cmd: "curl -sd 'test' http://podinfo-canary.test/token | grep token"
|
||||
- name: load-test
|
||||
type: rollout
|
||||
url: http://flagger-loadtester.test/
|
||||
timeout: 5s
|
||||
metadata:
|
||||
type: cmd
|
||||
cmd: "hey -z 1m -q 10 -c 2 http://podinfo-canary.test/"
|
||||
@@ -10,7 +10,7 @@ spec:
|
||||
kind: Deployment
|
||||
name: podinfo
|
||||
autoscalerRef:
|
||||
apiVersion: autoscaling/v2beta1
|
||||
apiVersion: autoscaling/v2
|
||||
kind: HorizontalPodAutoscaler
|
||||
name: podinfo
|
||||
service:
|
||||
@@ -20,7 +20,7 @@ spec:
|
||||
portName: http
|
||||
portDiscovery: true
|
||||
gateways:
|
||||
- public-gateway.istio-system.svc.cluster.local
|
||||
- istio-system/public-gateway
|
||||
- mesh
|
||||
hosts:
|
||||
- app.example.com
|
||||
|
||||
@@ -11,7 +11,7 @@ spec:
|
||||
kind: Deployment
|
||||
name: podinfo
|
||||
autoscalerRef:
|
||||
apiVersion: autoscaling/v2beta1
|
||||
apiVersion: autoscaling/v2
|
||||
kind: HorizontalPodAutoscaler
|
||||
name: podinfo
|
||||
service:
|
||||
@@ -21,7 +21,7 @@ spec:
|
||||
portName: http
|
||||
portDiscovery: true
|
||||
gateways:
|
||||
- public-gateway.istio-system.svc.cluster.local
|
||||
- istio-system/public-gateway
|
||||
- mesh
|
||||
hosts:
|
||||
- app.example.com
|
||||
|
||||
50
artifacts/examples/kuma-canary.yaml
Normal file
@@ -0,0 +1,50 @@
|
||||
apiVersion: flagger.app/v1beta1
|
||||
kind: Canary
|
||||
metadata:
|
||||
name: podinfo
|
||||
namespace: test
|
||||
annotations:
|
||||
kuma.io/mesh: default
|
||||
spec:
|
||||
targetRef:
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
name: podinfo
|
||||
progressDeadlineSeconds: 60
|
||||
service:
|
||||
port: 9898
|
||||
targetPort: 9898
|
||||
apex:
|
||||
annotations:
|
||||
9898.service.kuma.io/protocol: "http"
|
||||
canary:
|
||||
annotations:
|
||||
9898.service.kuma.io/protocol: "http"
|
||||
primary:
|
||||
annotations:
|
||||
9898.service.kuma.io/protocol: "http"
|
||||
analysis:
|
||||
interval: 15s
|
||||
threshold: 15
|
||||
maxWeight: 50
|
||||
stepWeight: 10
|
||||
metrics:
|
||||
- name: request-success-rate
|
||||
threshold: 99
|
||||
interval: 1m
|
||||
- name: request-duration
|
||||
threshold: 500
|
||||
interval: 30s
|
||||
webhooks:
|
||||
- name: acceptance-test
|
||||
type: pre-rollout
|
||||
url: http://flagger-loadtester.test/
|
||||
timeout: 30s
|
||||
metadata:
|
||||
type: bash
|
||||
cmd: "curl -sd 'test' http://podinfo-canary.test:9898/token | grep token"
|
||||
- name: load-test
|
||||
type: rollout
|
||||
url: http://flagger-loadtester.test/
|
||||
metadata:
|
||||
cmd: "hey -z 2m -q 10 -c 2 http://podinfo-canary.test:9898/"
|
||||
@@ -11,7 +11,7 @@ spec:
|
||||
kind: Deployment
|
||||
name: podinfo
|
||||
autoscalerRef:
|
||||
apiVersion: autoscaling/v2beta1
|
||||
apiVersion: autoscaling/v2
|
||||
kind: HorizontalPodAutoscaler
|
||||
name: podinfo
|
||||
service:
|
||||
|
||||
@@ -11,7 +11,7 @@ spec:
|
||||
kind: Deployment
|
||||
name: podinfo
|
||||
autoscalerRef:
|
||||
apiVersion: autoscaling/v2beta1
|
||||
apiVersion: autoscaling/v2
|
||||
kind: HorizontalPodAutoscaler
|
||||
name: podinfo
|
||||
service:
|
||||
|
||||
@@ -31,6 +31,18 @@ rules:
|
||||
- update
|
||||
- patch
|
||||
- delete
|
||||
- apiGroups:
|
||||
- "coordination.k8s.io"
|
||||
resources:
|
||||
- leases
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- create
|
||||
- update
|
||||
- patch
|
||||
- delete
|
||||
- apiGroups:
|
||||
- apps
|
||||
resources:
|
||||
@@ -78,6 +90,7 @@ rules:
|
||||
resources:
|
||||
- canaries
|
||||
- canaries/status
|
||||
- canaries/finalizers
|
||||
- metrictemplates
|
||||
- metrictemplates/status
|
||||
- alertproviders
|
||||
@@ -187,6 +200,57 @@ rules:
|
||||
- update
|
||||
- patch
|
||||
- delete
|
||||
- apiGroups:
|
||||
- kuma.io
|
||||
resources:
|
||||
- trafficroutes
|
||||
- trafficroutes/finalizers
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- create
|
||||
- update
|
||||
- patch
|
||||
- delete
|
||||
- apiGroups:
|
||||
- gateway.networking.k8s.io
|
||||
resources:
|
||||
- httproutes
|
||||
- httproutes/finalizers
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- create
|
||||
- update
|
||||
- patch
|
||||
- delete
|
||||
- apiGroups:
|
||||
- keda.sh
|
||||
resources:
|
||||
- scaledobjects
|
||||
- scaledobjects/finalizers
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- create
|
||||
- update
|
||||
- patch
|
||||
- delete
|
||||
- apiGroups:
|
||||
- apisix.apache.org
|
||||
resources:
|
||||
- apisixroutes
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- create
|
||||
- update
|
||||
- patch
|
||||
- delete
|
||||
- nonResourceURLs:
|
||||
- /version
|
||||
verbs:
|
||||
|
||||
@@ -27,6 +27,10 @@ spec:
|
||||
- name: Weight
|
||||
type: string
|
||||
jsonPath: .status.canaryWeight
|
||||
- name: Suspended
|
||||
type: boolean
|
||||
jsonPath: .spec.suspend
|
||||
priority: 1
|
||||
- name: FailedChecks
|
||||
type: string
|
||||
jsonPath: .status.failedChecks
|
||||
@@ -76,7 +80,6 @@ spec:
|
||||
type: object
|
||||
required:
|
||||
- targetRef
|
||||
- service
|
||||
- analysis
|
||||
properties:
|
||||
provider:
|
||||
@@ -104,7 +107,7 @@ spec:
|
||||
name:
|
||||
type: string
|
||||
autoscalerRef:
|
||||
description: HPA selector
|
||||
description: Scaler selector
|
||||
type: object
|
||||
required: ["apiVersion", "kind", "name"]
|
||||
properties:
|
||||
@@ -114,8 +117,22 @@ spec:
|
||||
type: string
|
||||
enum:
|
||||
- HorizontalPodAutoscaler
|
||||
- ScaledObject
|
||||
name:
|
||||
type: string
|
||||
primaryScalerQueries:
|
||||
type: object
|
||||
additionalProperties:
|
||||
type: string
|
||||
primaryScalerReplicas:
|
||||
type: object
|
||||
properties:
|
||||
minReplicas:
|
||||
type: integer
|
||||
minimum: 1
|
||||
maxReplicas:
|
||||
type: integer
|
||||
minimum: 1
|
||||
ingressRef:
|
||||
description: Ingress selector
|
||||
type: object
|
||||
@@ -129,6 +146,19 @@ spec:
|
||||
- Ingress
|
||||
name:
|
||||
type: string
|
||||
routeRef:
|
||||
description: APISIX route selector
|
||||
type: object
|
||||
required: [ "apiVersion", "kind", "name" ]
|
||||
properties:
|
||||
apiVersion:
|
||||
type: string
|
||||
kind:
|
||||
type: string
|
||||
enum:
|
||||
- ApisixRoute
|
||||
name:
|
||||
type: string
|
||||
upstreamRef:
|
||||
description: Gloo Upstream selector
|
||||
type: object
|
||||
@@ -158,11 +188,24 @@ spec:
|
||||
portName:
|
||||
description: Container port name
|
||||
type: string
|
||||
appProtocol:
|
||||
description: Application protocol of the port
|
||||
type: string
|
||||
trafficDistribution:
|
||||
description: Traffic distribution of the service
|
||||
type: string
|
||||
enum:
|
||||
- PreferClose
|
||||
- PreferSameZone
|
||||
- PreferSameNode
|
||||
targetPort:
|
||||
description: Container target port name
|
||||
x-kubernetes-int-or-string: true
|
||||
portDiscovery:
|
||||
description: Enable port dicovery
|
||||
description: Enable port discovery
|
||||
type: boolean
|
||||
headless:
|
||||
description: Headless if set to true, generates headless Kubernetes services.
|
||||
type: boolean
|
||||
timeout:
|
||||
description: HTTP or gRPC request timeout
|
||||
@@ -450,6 +493,54 @@ spec:
|
||||
uri:
|
||||
format: string
|
||||
type: string
|
||||
authority:
|
||||
format: string
|
||||
type: string
|
||||
type:
|
||||
format: string
|
||||
type: string
|
||||
mirror:
|
||||
description: Mirror defines a schema for a filter that mirrors requests.
|
||||
type: array
|
||||
items:
|
||||
type: object
|
||||
properties:
|
||||
backendRef:
|
||||
properties:
|
||||
group:
|
||||
default: ""
|
||||
maxLength: 253
|
||||
pattern: ^$|^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$
|
||||
type: string
|
||||
kind:
|
||||
default: Service
|
||||
maxLength: 63
|
||||
minLength: 1
|
||||
pattern: ^[a-zA-Z]([-a-zA-Z0-9]*[a-zA-Z0-9])?$
|
||||
type: string
|
||||
name:
|
||||
maxLength: 253
|
||||
minLength: 1
|
||||
type: string
|
||||
namespace:
|
||||
maxLength: 63
|
||||
minLength: 1
|
||||
pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?$
|
||||
type: string
|
||||
port:
|
||||
format: int32
|
||||
maximum: 65535
|
||||
minimum: 1
|
||||
type: integer
|
||||
required:
|
||||
- name
|
||||
type: object
|
||||
x-kubernetes-validations:
|
||||
- message: Must have port for Service reference
|
||||
rule: '(size(self.group) == 0 && self.kind == ''Service'')
|
||||
? has(self.port) : true'
|
||||
required:
|
||||
- backendRef
|
||||
headers:
|
||||
description: Headers operations
|
||||
type: object
|
||||
@@ -495,6 +586,45 @@ spec:
|
||||
type: array
|
||||
items:
|
||||
type: string
|
||||
gatewayRefs:
|
||||
description: The list of parent Gateways for a HTTPRoute
|
||||
maxItems: 32
|
||||
type: array
|
||||
items:
|
||||
required:
|
||||
- name
|
||||
type: object
|
||||
properties:
|
||||
group:
|
||||
default: gateway.networking.k8s.io
|
||||
maxLength: 253
|
||||
pattern: ^$|^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$
|
||||
type: string
|
||||
kind:
|
||||
default: Gateway
|
||||
maxLength: 63
|
||||
minLength: 1
|
||||
pattern: ^[a-zA-Z]([-a-zA-Z0-9]*[a-zA-Z0-9])?$
|
||||
type: string
|
||||
name:
|
||||
maxLength: 253
|
||||
minLength: 1
|
||||
type: string
|
||||
namespace:
|
||||
maxLength: 63
|
||||
minLength: 1
|
||||
pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?$
|
||||
type: string
|
||||
sectionName:
|
||||
maxLength: 253
|
||||
minLength: 1
|
||||
pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$
|
||||
type: string
|
||||
port:
|
||||
format: int32
|
||||
maximum: 65535
|
||||
minimum: 1
|
||||
type: integer
|
||||
corsPolicy:
|
||||
description: Istio Cross-Origin Resource Sharing policy (CORS)
|
||||
type: object
|
||||
@@ -685,6 +815,10 @@ spec:
|
||||
- LEAST_CONN
|
||||
- RANDOM
|
||||
- PASSTHROUGH
|
||||
- LEAST_REQUEST
|
||||
type: string
|
||||
warmupDurationSecs:
|
||||
description: Represents the warmup duration of Service.
|
||||
type: string
|
||||
outlierDetection:
|
||||
description: Settings controlling eviction of unhealthy hosts from the load balancing pool.
|
||||
@@ -782,12 +916,27 @@ spec:
|
||||
type: object
|
||||
additionalProperties:
|
||||
type: string
|
||||
unmanagedMetadata:
|
||||
description: UnmanagedMetadata is a list of metadata keys that should be ignored by Flagger.
|
||||
type: object
|
||||
properties:
|
||||
annotations:
|
||||
type: array
|
||||
items:
|
||||
type: string
|
||||
labels:
|
||||
type: array
|
||||
items:
|
||||
type: string
|
||||
skipAnalysis:
|
||||
description: Skip analysis and promote canary
|
||||
type: boolean
|
||||
revertOnDeletion:
|
||||
description: Revert mutated resources to original spec on deletion
|
||||
type: boolean
|
||||
suspend:
|
||||
description: Suspend Canary disabling/pausing all canary runs
|
||||
type: boolean
|
||||
analysis:
|
||||
description: Canary analysis for this canary
|
||||
type: object
|
||||
@@ -826,6 +975,12 @@ spec:
|
||||
mirrorWeight:
|
||||
description: Weight of traffic to be mirrored
|
||||
type: number
|
||||
primaryReadyThreshold:
|
||||
description: Percentage of pods that need to be available to consider primary as ready
|
||||
type: number
|
||||
canaryReadyThreshold:
|
||||
description: Percentage of pods that need to be available to consider canary as ready
|
||||
type: number
|
||||
match:
|
||||
description: A/B testing match conditions
|
||||
type: array
|
||||
@@ -855,6 +1010,34 @@ spec:
|
||||
description: RE2 style regex-based match (https://github.com/google/re2/wiki/Syntax)
|
||||
format: string
|
||||
type: string
|
||||
queryParams:
|
||||
description: Query parameters for matching.
|
||||
type: object
|
||||
additionalProperties:
|
||||
oneOf:
|
||||
- not:
|
||||
anyOf:
|
||||
- required:
|
||||
- exact
|
||||
- required:
|
||||
- prefix
|
||||
- required:
|
||||
- regex
|
||||
- required:
|
||||
- exact
|
||||
- required:
|
||||
- prefix
|
||||
- required:
|
||||
- regex
|
||||
properties:
|
||||
exact:
|
||||
type: string
|
||||
prefix:
|
||||
type: string
|
||||
regex:
|
||||
description: RE2 style regex-based match (https://github.com/google/re2/wiki/Syntax).
|
||||
type: string
|
||||
type: object
|
||||
sourceLabels:
|
||||
description: Applicable only when the 'mesh' gateway is included in the service.gateways list
|
||||
type: object
|
||||
@@ -902,6 +1085,11 @@ spec:
|
||||
namespace:
|
||||
description: Namespace of this metric template
|
||||
type: string
|
||||
templateVariables:
|
||||
description: Additional variables to be used in the metrics query (key-value pairs)
|
||||
type: object
|
||||
additionalProperties:
|
||||
type: string
|
||||
alerts:
|
||||
description: Alert list for this canary analysis
|
||||
type: array
|
||||
@@ -967,11 +1155,54 @@ spec:
|
||||
description: Request timeout for this webhook
|
||||
type: string
|
||||
pattern: "^[0-9]+(m|s)"
|
||||
retries:
|
||||
description: Number of retries for this webhook
|
||||
type: number
|
||||
disableTLS:
|
||||
description: Disable TLS verification for this webhook
|
||||
type: boolean
|
||||
metadata:
|
||||
description: Metadata (key-value pairs) for this webhook
|
||||
type: object
|
||||
additionalProperties:
|
||||
type: string
|
||||
sessionAffinity:
|
||||
description: SessionAffinity represents the session affinity settings for a canary run.
|
||||
type: object
|
||||
required: [ "cookieName" ]
|
||||
properties:
|
||||
cookieName:
|
||||
description: CookieName is the key that will be used for the session affinity cookie.
|
||||
type: string
|
||||
primaryCookieName:
|
||||
description: CookieName is the key that will be used for the session affinity cookie.
|
||||
type: string
|
||||
domain:
|
||||
description: Domain defines the host to which the cookie will be sent.
|
||||
type: string
|
||||
httpOnly:
|
||||
description: HttpOnly forbids JavaScript from accessing the cookie, for example, through the Document.cookie property.
|
||||
type: boolean
|
||||
maxAge:
|
||||
description: MaxAge indicates the number of seconds until the session affinity cookie will expire.
|
||||
default: 86400
|
||||
type: number
|
||||
partitioned:
|
||||
description: Partitioned indicates that the cookie should be stored using partitioned storage.
|
||||
type: boolean
|
||||
path:
|
||||
description: Path indicates the path that must exist in the requested URL for the browser to send the Cookie header.
|
||||
type: string
|
||||
sameSite:
|
||||
description: SameSite controls whether or not a cookie is sent with cross-site requests.
|
||||
type: string
|
||||
enum:
|
||||
- Strict
|
||||
- Lax
|
||||
- None
|
||||
secure:
|
||||
description: "Secure indicates that the cookie is sent to the server only when a request is made with the https: scheme (except on localhost)"
|
||||
type: boolean
|
||||
status:
|
||||
description: CanaryStatus defines the observed state of a canary.
|
||||
type: object
|
||||
@@ -992,27 +1223,36 @@ spec:
|
||||
- Failed
|
||||
- Terminating
|
||||
- Terminated
|
||||
failedChecks:
|
||||
description: Failed check count of the current canary analysis
|
||||
type: number
|
||||
canaryWeight:
|
||||
description: Traffic weight routed to canary
|
||||
type: number
|
||||
iterations:
|
||||
description: Iteration count of the current canary analysis
|
||||
type: number
|
||||
trackedConfigs:
|
||||
description: TrackedConfig of this canary
|
||||
additionalProperties:
|
||||
type: string
|
||||
type: object
|
||||
canaryWeight:
|
||||
description: Traffic weight routed to canary
|
||||
type: number
|
||||
failedChecks:
|
||||
description: Failed check count of the current canary analysis
|
||||
type: number
|
||||
iterations:
|
||||
description: Iteration count of the current canary analysis
|
||||
type: number
|
||||
lastAppliedSpec:
|
||||
description: LastAppliedSpec of this canary
|
||||
type: string
|
||||
lastPromotedSpec:
|
||||
description: LastPromotedSpec of this canary
|
||||
type: string
|
||||
lastTransitionTime:
|
||||
description: LastTransitionTime of this canary
|
||||
format: date-time
|
||||
type: string
|
||||
sessionAffinityCookie:
|
||||
description: Session affinity cookie of the current canary run
|
||||
type: string
|
||||
previousSessionAffinityCookie:
|
||||
description: Session affinity cookie of the previous canary run
|
||||
type: string
|
||||
conditions:
|
||||
description: Status conditions of this canary
|
||||
type: array
|
||||
@@ -1104,12 +1344,23 @@ spec:
|
||||
- prometheus
|
||||
- influxdb
|
||||
- datadog
|
||||
- stackdriver
|
||||
- cloudwatch
|
||||
- newrelic
|
||||
- graphite
|
||||
- dynatrace
|
||||
- keptn
|
||||
- splunk
|
||||
address:
|
||||
description: API address of this provider
|
||||
type: string
|
||||
headers:
|
||||
description: Headers to add to HTTP(S) requests
|
||||
type: object
|
||||
additionalProperties:
|
||||
type: array
|
||||
items:
|
||||
type: string
|
||||
secretRef:
|
||||
description: Kubernetes secret reference containing the provider credentials
|
||||
type: object
|
||||
@@ -1122,6 +1373,9 @@ spec:
|
||||
region:
|
||||
description: Region of the provider
|
||||
type: string
|
||||
insecureSkipVerify:
|
||||
description: Disable SSL certificate validation for the provider address
|
||||
type: boolean
|
||||
query:
|
||||
description: Query of this metric template
|
||||
type: string
|
||||
@@ -1188,6 +1442,7 @@ spec:
|
||||
- msteams
|
||||
- discord
|
||||
- rocket
|
||||
- gchat
|
||||
channel:
|
||||
description: Alert channel for this provider
|
||||
type: string
|
||||
|
||||
@@ -22,7 +22,7 @@ spec:
|
||||
serviceAccountName: flagger
|
||||
containers:
|
||||
- name: flagger
|
||||
image: ghcr.io/fluxcd/flagger:1.11.0
|
||||
image: ghcr.io/fluxcd/flagger:1.42.0
|
||||
imagePullPolicy: IfNotPresent
|
||||
ports:
|
||||
- name: http
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
apiVersion: v1
|
||||
name: flagger
|
||||
version: 1.11.0
|
||||
appVersion: 1.11.0
|
||||
kubeVersion: ">=1.16.0-0"
|
||||
version: 1.42.0
|
||||
appVersion: 1.42.0
|
||||
kubeVersion: ">=1.19.0-0"
|
||||
engine: gotpl
|
||||
description: Flagger is a progressive delivery operator for Kubernetes
|
||||
home: https://flagger.app
|
||||
@@ -18,6 +18,8 @@ keywords:
|
||||
- istio
|
||||
- appmesh
|
||||
- linkerd
|
||||
- kuma
|
||||
- smi
|
||||
- gloo
|
||||
- contour
|
||||
- nginx
|
||||
|
||||
@@ -1,22 +1,18 @@
|
||||
# Flagger
|
||||
|
||||
[Flagger](https://github.com/fluxcd/flagger) is an operator that automates the release process of applications on Kubernetes.
|
||||
[Flagger](https://github.com/fluxcd/flagger) is a progressive delivery tool that automates the release process
|
||||
for applications running on Kubernetes. It reduces the risk of introducing a new software version in production
|
||||
by gradually shifting traffic to the new version while measuring metrics and running conformance tests.
|
||||
|
||||
Flagger can run automated application analysis, testing, promotion and rollback for the following deployment strategies:
|
||||
* Canary Release (progressive traffic shifting)
|
||||
* A/B Testing (HTTP headers and cookies traffic routing)
|
||||
* Blue/Green (traffic switching and mirroring)
|
||||
|
||||
Flagger works with service mesh solutions (Istio, Linkerd, AWS App Mesh) and with Kubernetes ingress controllers
|
||||
(NGINX, Skipper, Gloo, Contour, Traefik).
|
||||
Flagger can be configured to send alerts to various chat platforms such as Slack, Microsoft Teams, Discord and Rocket.
|
||||
Flagger implements several deployment strategies (Canary releases, A/B testing, Blue/Green mirroring)
|
||||
and integrates with various Kubernetes ingress controllers, service mesh and monitoring solutions.
|
||||
|
||||
Flagger is a [Cloud Native Computing Foundation](https://cncf.io/) project
|
||||
and part of [Flux](https://fluxcd.io) family of GitOps tools.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
* Kubernetes >= 1.16
|
||||
* Kubernetes >= 1.19
|
||||
|
||||
## Installing the Chart
|
||||
|
||||
@@ -44,10 +40,13 @@ $ helm upgrade -i flagger flagger/flagger \
|
||||
To install Flagger for **Linkerd** (requires Linkerd Viz extension):
|
||||
|
||||
```console
|
||||
# Note that linkerdAuthPolicy.create=true is only required for Linkerd 2.12 and
|
||||
# later
|
||||
$ helm upgrade -i flagger flagger/flagger \
|
||||
--namespace=linkerd \
|
||||
--namespace=flagger-system \
|
||||
--set meshProvider=linkerd \
|
||||
--set metricsServer=http://prometheus.linkerd-viz:9090
|
||||
--set metricsServer=http://prometheus.linkerd-viz:9090 \
|
||||
--set linkerdAuthPolicy.create=true
|
||||
```
|
||||
|
||||
To install Flagger for **AWS App Mesh**:
|
||||
@@ -59,6 +58,16 @@ $ helm upgrade -i flagger flagger/flagger \
|
||||
--set metricsServer=http://appmesh-prometheus:9090
|
||||
```
|
||||
|
||||
|
||||
To install Flagger for **Kuma Service Mesh** (requires Kuma to have been installed with Prometheus):
|
||||
|
||||
```console
|
||||
$ helm upgrade -i flagger flagger/flagger \
|
||||
--namespace=kuma-system \
|
||||
--set meshProvider=kuma \
|
||||
--set metricsServer=http://prometheus-server.kuma-metrics:80
|
||||
```
|
||||
|
||||
To install Flagger and Prometheus for **NGINX** Ingress (requires controller metrics enabled):
|
||||
|
||||
```console
|
||||
@@ -96,6 +105,15 @@ $ helm upgrade -i flagger flagger/flagger \
|
||||
--set meshProvider=traefik
|
||||
```
|
||||
|
||||
If you need to add labels to the flagger deployment or pods, you can pass the labels as parameters as shown below.
|
||||
|
||||
```console
|
||||
helm upgrade -i flagger flagger/flagger \
|
||||
<other parameters> \
|
||||
--set podLabels.<labelName>=<labelValue> \
|
||||
--set deploymentLabels.<labelName>=<labelValue>
|
||||
```
|
||||
|
||||
The [configuration](#configuration) section lists the parameters that can be configured during installation.
|
||||
|
||||
## Uninstalling the Chart
|
||||
@@ -112,53 +130,64 @@ The command removes all the Kubernetes components associated with the chart and
|
||||
|
||||
The following tables lists the configurable parameters of the Flagger chart and their default values.
|
||||
|
||||
Parameter | Description | Default
|
||||
--- | --- | ---
|
||||
`image.repository` | Image repository | `ghcr.io/fluxcd/flagger`
|
||||
`image.tag` | Image tag | `<VERSION>`
|
||||
`image.pullPolicy` | Image pull policy | `IfNotPresent`
|
||||
`logLevel` | Log level | `info`
|
||||
`metricsServer` | Prometheus URL, used when `prometheus.install` is `false` | `http://prometheus.istio-system:9090`
|
||||
`prometheus.install` | If `true`, installs Prometheus configured to scrape all pods in the custer | `false`
|
||||
`prometheus.retention` | Prometheus data retention | `2h`
|
||||
`selectorLabels` | List of labels that Flagger uses to create pod selectors | `app,name,app.kubernetes.io/name`
|
||||
`configTracking.enabled` | If `true`, flagger will track changes in Secrets and ConfigMaps referenced in the target deployment | `true`
|
||||
`eventWebhook` | If set, Flagger will publish events to the given webhook | None
|
||||
`slack.url` | Slack incoming webhook | None
|
||||
`slack.proxyUrl` | Slack proxy url | None
|
||||
`slack.channel` | Slack channel | None
|
||||
`slack.user` | Slack username | `flagger`
|
||||
`msteams.url` | Microsoft Teams incoming webhook | None
|
||||
`msteams.proxyUrl` | Microsoft Teams proxy url | None
|
||||
`podMonitor.enabled` | If `true`, create a PodMonitor for [monitoring the metrics](https://docs.flagger.app/usage/monitoring#metrics) | `false`
|
||||
`podMonitor.namespace` | Namespace where the PodMonitor is created | the same namespace
|
||||
`podMonitor.interval` | Interval at which metrics should be scraped | `15s`
|
||||
`podMonitor.podMonitor` | Additional labels to add to the PodMonitor | `{}`
|
||||
`leaderElection.enabled` | If `true`, Flagger will run in HA mode | `false`
|
||||
`leaderElection.replicaCount` | Number of replicas | `1`
|
||||
`serviceAccount.create` | If `true`, Flagger will create service account | `true`
|
||||
`serviceAccount.name` | The name of the service account to create or use. If not set and `serviceAccount.create` is `true`, a name is generated using the Flagger fullname | `""`
|
||||
`serviceAccount.annotations` | Annotations for service account | `{}`
|
||||
`ingressAnnotationsPrefix` | Annotations prefix for ingresses | `custom.ingress.kubernetes.io`
|
||||
`includeLabelPrefix` | List of prefixes of labels that are copied when creating primary deployments or daemonsets. Use * to include all | `""`
|
||||
`rbac.create` | If `true`, create and use RBAC resources | `true`
|
||||
`rbac.pspEnabled` | If `true`, create and use a restricted pod security policy | `false`
|
||||
`crd.create` | If `true`, create Flagger's CRDs (should be enabled for Helm v2 only) | `false`
|
||||
`resources.requests/cpu` | Pod CPU request | `10m`
|
||||
`resources.requests/memory` | Pod memory request | `32Mi`
|
||||
`resources.limits/cpu` | Pod CPU limit | `1000m`
|
||||
`resources.limits/memory` | Pod memory limit | `512Mi`
|
||||
`affinity` | Node/pod affinities | None
|
||||
`nodeSelector` | Node labels for pod assignment | `{}`
|
||||
`threadiness` | Number of controller workers | `2`
|
||||
`tolerations` | List of node taints to tolerate | `[]`
|
||||
`istio.kubeconfig.secretName` | The name of the Kubernetes secret containing the Istio shared control plane kubeconfig | None
|
||||
`istio.kubeconfig.key` | The name of Kubernetes secret data key that contains the Istio control plane kubeconfig | `kubeconfig`
|
||||
`ingressAnnotationsPrefix` | Annotations prefix for NGINX ingresses | None
|
||||
`ingressClass` | Ingress class used for annotating HTTPProxy objects, e.g. `contour` | None
|
||||
`podPriorityClassName` | PriorityClass name for pod priority configuration | ""
|
||||
`podDisruptionBudget.enabled` | A PodDisruptionBudget will be created if `true` | `false`
|
||||
`podDisruptionBudget.minAvailable` | The minimal number of available replicas that will be set in the PodDisruptionBudget | `1`
|
||||
| Parameter | Description | Default |
|
||||
|--------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------|---------------------------------------|
|
||||
| `image.repository` | Image repository | `ghcr.io/fluxcd/flagger` |
|
||||
| `image.tag` | Image tag | `<VERSION>` |
|
||||
| `image.pullPolicy` | Image pull policy | `IfNotPresent` |
|
||||
| `logLevel` | Log level | `info` |
|
||||
| `metricsServer` | Prometheus URL, used when `prometheus.install` is `false` | `http://prometheus.istio-system:9090` |
|
||||
| `prometheus.install` | If `true`, installs Prometheus configured to scrape all pods in the custer | `false` |
|
||||
| `prometheus.retention` | Prometheus data retention | `2h` |
|
||||
| `selectorLabels` | List of labels that Flagger uses to create pod selectors | `app,name,app.kubernetes.io/name` |
|
||||
| `serviceMonitor.enabled` | If `true`, creates service and serviceMonitor for monitoring Flagger metrics | `false` |
|
||||
| `serviceMonitor.honorLabels` | If `true`, label conflicts are resolved by keeping label values from the scraped data and ignoring the conflicting server-side labels | `false` |
|
||||
| `serviceMonitor.namespace` | Namespace Servicemonitor is installed in | the same namespace |
|
||||
| `serviceMonitor.labels` | labels for the ServiceMonitor passed to Prometheus Operator | `{}` |
|
||||
| `configTracking.enabled` | If `true`, flagger will track changes in Secrets and ConfigMaps referenced in the target deployment | `true` |
|
||||
| `eventWebhook` | If set, Flagger will publish events to the given webhook | None |
|
||||
| `slack.url` | Slack incoming webhook | None |
|
||||
| `slack.proxyUrl` | Slack proxy url | None |
|
||||
| `slack.channel` | Slack channel | None |
|
||||
| `slack.user` | Slack username | `flagger` |
|
||||
| `msteams.url` | Microsoft Teams incoming webhook | None |
|
||||
| `msteams.proxyUrl` | Microsoft Teams proxy url | None |
|
||||
| `clusterName` | When specified, Flagger will add the cluster name to alerts | `""` |
|
||||
| `podMonitor.enabled` | If `true`, create a PodMonitor for [monitoring the metrics](https://docs.flagger.app/usage/monitoring#metrics) | `false` |
|
||||
| `podMonitor.namespace` | Namespace where the PodMonitor is created | the same namespace |
|
||||
| `podMonitor.interval` | Interval at which metrics should be scraped | `15s` |
|
||||
| `podMonitor.podMonitor` | Additional labels to add to the PodMonitor | `{}` |
|
||||
| `podMonitor.honorLabels` | If `true`, label conflicts are resolved by keeping label values from the scraped data and ignoring the conflicting server-side labels | `false` |
|
||||
| `leaderElection.enabled` | If `true`, Flagger will run in HA mode | `false` |
|
||||
| `leaderElection.replicaCount` | Number of replicas | `1` |
|
||||
| `serviceAccount.create` | If `true`, Flagger will create service account | `true` |
|
||||
| `serviceAccount.name` | The name of the service account to create or use. If not set and `serviceAccount.create` is `true`, a name is generated using the Flagger fullname | `""` |
|
||||
| `serviceAccount.annotations` | Annotations for service account | `{}` |
|
||||
| `ingressAnnotationsPrefix` | Annotations prefix for ingresses | `custom.ingress.kubernetes.io` |
|
||||
| `includeLabelPrefix` | List of prefixes of labels that are copied when creating primary deployments or daemonsets. Use * to include all | `""` |
|
||||
| `rbac.create` | If `true`, create and use RBAC resources | `true` |
|
||||
| `rbac.pspEnabled` | If `true`, create and use a restricted pod security policy | `false` |
|
||||
| `crd.create` | If `true`, create Flagger's CRDs (should be enabled for Helm v2 only) | `false` |
|
||||
| `resources.requests/cpu` | Pod CPU request | `10m` |
|
||||
| `resources.requests/memory` | Pod memory request | `32Mi` |
|
||||
| `resources.limits/cpu` | Pod CPU limit | `1000m` |
|
||||
| `resources.limits/memory` | Pod memory limit | `512Mi` |
|
||||
| `affinity` | Node/pod affinities | prefer spread across hosts |
|
||||
| `nodeSelector` | Node labels for pod assignment | `{}` |
|
||||
| `threadiness` | Number of controller workers | `2` |
|
||||
| `tolerations` | List of node taints to tolerate | `[]` |
|
||||
| `controlplane.kubeconfig.secretName` | The name of the Kubernetes secret containing the service mesh control plane kubeconfig | None |
|
||||
| `controlplane.kubeconfig.key` | The name of Kubernetes secret data key that contains the service mesh control plane kubeconfig | `kubeconfig` |
|
||||
| `ingressAnnotationsPrefix` | Annotations prefix for NGINX ingresses | None |
|
||||
| `ingressClass` | Ingress class used for annotating HTTPProxy objects, e.g. `contour` | None |
|
||||
| `podPriorityClassName` | PriorityClass name for pod priority configuration | "" |
|
||||
| `podDisruptionBudget.enabled` | A PodDisruptionBudget will be created if `true` | `false` |
|
||||
| `podDisruptionBudget.minAvailable` | The minimal number of available replicas that will be set in the PodDisruptionBudget | `1` |
|
||||
| `podDisruptionBudget.minAvailable` | The minimal number of available replicas that will be set in the PodDisruptionBudget | `1` |
|
||||
| `noCrossNamespaceRefs` | If `true`, cross namespace references to custom resources will be disabled | `false` |
|
||||
| `namespace` | When specified, Flagger will restrict itself to watching Canary objects from that namespace | `""` |
|
||||
| `deploymentLabels` | Labels to add to Flagger deployment | `{}` |
|
||||
| `podLabels` | Labels to add to pods of Flagger deployment | `{}` |
|
||||
|
||||
Specify each parameter using the `--set key=value[,key=value]` argument to `helm upgrade`. For example,
|
||||
|
||||
|
||||
@@ -27,6 +27,10 @@ spec:
|
||||
- name: Weight
|
||||
type: string
|
||||
jsonPath: .status.canaryWeight
|
||||
- name: Suspended
|
||||
type: boolean
|
||||
jsonPath: .spec.suspend
|
||||
priority: 1
|
||||
- name: FailedChecks
|
||||
type: string
|
||||
jsonPath: .status.failedChecks
|
||||
@@ -76,7 +80,6 @@ spec:
|
||||
type: object
|
||||
required:
|
||||
- targetRef
|
||||
- service
|
||||
- analysis
|
||||
properties:
|
||||
provider:
|
||||
@@ -104,7 +107,7 @@ spec:
|
||||
name:
|
||||
type: string
|
||||
autoscalerRef:
|
||||
description: HPA selector
|
||||
description: Scaler selector
|
||||
type: object
|
||||
required: ["apiVersion", "kind", "name"]
|
||||
properties:
|
||||
@@ -114,8 +117,22 @@ spec:
|
||||
type: string
|
||||
enum:
|
||||
- HorizontalPodAutoscaler
|
||||
- ScaledObject
|
||||
name:
|
||||
type: string
|
||||
primaryScalerQueries:
|
||||
type: object
|
||||
additionalProperties:
|
||||
type: string
|
||||
primaryScalerReplicas:
|
||||
type: object
|
||||
properties:
|
||||
minReplicas:
|
||||
type: integer
|
||||
minimum: 1
|
||||
maxReplicas:
|
||||
type: integer
|
||||
minimum: 1
|
||||
ingressRef:
|
||||
description: Ingress selector
|
||||
type: object
|
||||
@@ -129,6 +146,19 @@ spec:
|
||||
- Ingress
|
||||
name:
|
||||
type: string
|
||||
routeRef:
|
||||
description: APISIX route selector
|
||||
type: object
|
||||
required: [ "apiVersion", "kind", "name" ]
|
||||
properties:
|
||||
apiVersion:
|
||||
type: string
|
||||
kind:
|
||||
type: string
|
||||
enum:
|
||||
- ApisixRoute
|
||||
name:
|
||||
type: string
|
||||
upstreamRef:
|
||||
description: Gloo Upstream selector
|
||||
type: object
|
||||
@@ -158,11 +188,24 @@ spec:
|
||||
portName:
|
||||
description: Container port name
|
||||
type: string
|
||||
appProtocol:
|
||||
description: Application protocol of the port
|
||||
type: string
|
||||
trafficDistribution:
|
||||
description: Traffic distribution of the service
|
||||
type: string
|
||||
enum:
|
||||
- PreferClose
|
||||
- PreferSameZone
|
||||
- PreferSameNode
|
||||
targetPort:
|
||||
description: Container target port name
|
||||
x-kubernetes-int-or-string: true
|
||||
portDiscovery:
|
||||
description: Enable port dicovery
|
||||
description: Enable port discovery
|
||||
type: boolean
|
||||
headless:
|
||||
description: Headless if set to true, generates headless Kubernetes services.
|
||||
type: boolean
|
||||
timeout:
|
||||
description: HTTP or gRPC request timeout
|
||||
@@ -450,6 +493,54 @@ spec:
|
||||
uri:
|
||||
format: string
|
||||
type: string
|
||||
authority:
|
||||
format: string
|
||||
type: string
|
||||
type:
|
||||
format: string
|
||||
type: string
|
||||
mirror:
|
||||
description: Mirror defines a schema for a filter that mirrors requests.
|
||||
type: array
|
||||
items:
|
||||
type: object
|
||||
properties:
|
||||
backendRef:
|
||||
properties:
|
||||
group:
|
||||
default: ""
|
||||
maxLength: 253
|
||||
pattern: ^$|^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$
|
||||
type: string
|
||||
kind:
|
||||
default: Service
|
||||
maxLength: 63
|
||||
minLength: 1
|
||||
pattern: ^[a-zA-Z]([-a-zA-Z0-9]*[a-zA-Z0-9])?$
|
||||
type: string
|
||||
name:
|
||||
maxLength: 253
|
||||
minLength: 1
|
||||
type: string
|
||||
namespace:
|
||||
maxLength: 63
|
||||
minLength: 1
|
||||
pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?$
|
||||
type: string
|
||||
port:
|
||||
format: int32
|
||||
maximum: 65535
|
||||
minimum: 1
|
||||
type: integer
|
||||
required:
|
||||
- name
|
||||
type: object
|
||||
x-kubernetes-validations:
|
||||
- message: Must have port for Service reference
|
||||
rule: '(size(self.group) == 0 && self.kind == ''Service'')
|
||||
? has(self.port) : true'
|
||||
required:
|
||||
- backendRef
|
||||
headers:
|
||||
description: Headers operations
|
||||
type: object
|
||||
@@ -495,6 +586,45 @@ spec:
|
||||
type: array
|
||||
items:
|
||||
type: string
|
||||
gatewayRefs:
|
||||
description: The list of parent Gateways for a HTTPRoute
|
||||
maxItems: 32
|
||||
type: array
|
||||
items:
|
||||
required:
|
||||
- name
|
||||
type: object
|
||||
properties:
|
||||
group:
|
||||
default: gateway.networking.k8s.io
|
||||
maxLength: 253
|
||||
pattern: ^$|^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$
|
||||
type: string
|
||||
kind:
|
||||
default: Gateway
|
||||
maxLength: 63
|
||||
minLength: 1
|
||||
pattern: ^[a-zA-Z]([-a-zA-Z0-9]*[a-zA-Z0-9])?$
|
||||
type: string
|
||||
name:
|
||||
maxLength: 253
|
||||
minLength: 1
|
||||
type: string
|
||||
namespace:
|
||||
maxLength: 63
|
||||
minLength: 1
|
||||
pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?$
|
||||
type: string
|
||||
sectionName:
|
||||
maxLength: 253
|
||||
minLength: 1
|
||||
pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$
|
||||
type: string
|
||||
port:
|
||||
format: int32
|
||||
maximum: 65535
|
||||
minimum: 1
|
||||
type: integer
|
||||
corsPolicy:
|
||||
description: Istio Cross-Origin Resource Sharing policy (CORS)
|
||||
type: object
|
||||
@@ -685,6 +815,10 @@ spec:
|
||||
- LEAST_CONN
|
||||
- RANDOM
|
||||
- PASSTHROUGH
|
||||
- LEAST_REQUEST
|
||||
type: string
|
||||
warmupDurationSecs:
|
||||
description: Represents the warmup duration of Service.
|
||||
type: string
|
||||
outlierDetection:
|
||||
description: Settings controlling eviction of unhealthy hosts from the load balancing pool.
|
||||
@@ -782,12 +916,27 @@ spec:
|
||||
type: object
|
||||
additionalProperties:
|
||||
type: string
|
||||
unmanagedMetadata:
|
||||
description: UnmanagedMetadata is a list of metadata keys that should be ignored by Flagger.
|
||||
type: object
|
||||
properties:
|
||||
annotations:
|
||||
type: array
|
||||
items:
|
||||
type: string
|
||||
labels:
|
||||
type: array
|
||||
items:
|
||||
type: string
|
||||
skipAnalysis:
|
||||
description: Skip analysis and promote canary
|
||||
type: boolean
|
||||
revertOnDeletion:
|
||||
description: Revert mutated resources to original spec on deletion
|
||||
type: boolean
|
||||
suspend:
|
||||
description: Suspend Canary disabling/pausing all canary runs
|
||||
type: boolean
|
||||
analysis:
|
||||
description: Canary analysis for this canary
|
||||
type: object
|
||||
@@ -826,6 +975,12 @@ spec:
|
||||
mirrorWeight:
|
||||
description: Weight of traffic to be mirrored
|
||||
type: number
|
||||
primaryReadyThreshold:
|
||||
description: Percentage of pods that need to be available to consider primary as ready
|
||||
type: number
|
||||
canaryReadyThreshold:
|
||||
description: Percentage of pods that need to be available to consider canary as ready
|
||||
type: number
|
||||
match:
|
||||
description: A/B testing match conditions
|
||||
type: array
|
||||
@@ -855,6 +1010,34 @@ spec:
|
||||
description: RE2 style regex-based match (https://github.com/google/re2/wiki/Syntax)
|
||||
format: string
|
||||
type: string
|
||||
queryParams:
|
||||
description: Query parameters for matching.
|
||||
type: object
|
||||
additionalProperties:
|
||||
oneOf:
|
||||
- not:
|
||||
anyOf:
|
||||
- required:
|
||||
- exact
|
||||
- required:
|
||||
- prefix
|
||||
- required:
|
||||
- regex
|
||||
- required:
|
||||
- exact
|
||||
- required:
|
||||
- prefix
|
||||
- required:
|
||||
- regex
|
||||
properties:
|
||||
exact:
|
||||
type: string
|
||||
prefix:
|
||||
type: string
|
||||
regex:
|
||||
description: RE2 style regex-based match (https://github.com/google/re2/wiki/Syntax).
|
||||
type: string
|
||||
type: object
|
||||
sourceLabels:
|
||||
description: Applicable only when the 'mesh' gateway is included in the service.gateways list
|
||||
type: object
|
||||
@@ -902,6 +1085,11 @@ spec:
|
||||
namespace:
|
||||
description: Namespace of this metric template
|
||||
type: string
|
||||
templateVariables:
|
||||
description: Additional variables to be used in the metrics query (key-value pairs)
|
||||
type: object
|
||||
additionalProperties:
|
||||
type: string
|
||||
alerts:
|
||||
description: Alert list for this canary analysis
|
||||
type: array
|
||||
@@ -967,11 +1155,54 @@ spec:
|
||||
description: Request timeout for this webhook
|
||||
type: string
|
||||
pattern: "^[0-9]+(m|s)"
|
||||
retries:
|
||||
description: Number of retries for this webhook
|
||||
type: number
|
||||
disableTLS:
|
||||
description: Disable TLS verification for this webhook
|
||||
type: boolean
|
||||
metadata:
|
||||
description: Metadata (key-value pairs) for this webhook
|
||||
type: object
|
||||
additionalProperties:
|
||||
type: string
|
||||
sessionAffinity:
|
||||
description: SessionAffinity represents the session affinity settings for a canary run.
|
||||
type: object
|
||||
required: [ "cookieName" ]
|
||||
properties:
|
||||
cookieName:
|
||||
description: CookieName is the key that will be used for the session affinity cookie.
|
||||
type: string
|
||||
primaryCookieName:
|
||||
description: CookieName is the key that will be used for the session affinity cookie.
|
||||
type: string
|
||||
domain:
|
||||
description: Domain defines the host to which the cookie will be sent.
|
||||
type: string
|
||||
httpOnly:
|
||||
description: HttpOnly forbids JavaScript from accessing the cookie, for example, through the Document.cookie property.
|
||||
type: boolean
|
||||
maxAge:
|
||||
description: MaxAge indicates the number of seconds until the session affinity cookie will expire.
|
||||
default: 86400
|
||||
type: number
|
||||
partitioned:
|
||||
description: Partitioned indicates that the cookie should be stored using partitioned storage.
|
||||
type: boolean
|
||||
path:
|
||||
description: Path indicates the path that must exist in the requested URL for the browser to send the Cookie header.
|
||||
type: string
|
||||
sameSite:
|
||||
description: SameSite controls whether or not a cookie is sent with cross-site requests.
|
||||
type: string
|
||||
enum:
|
||||
- Strict
|
||||
- Lax
|
||||
- None
|
||||
secure:
|
||||
description: "Secure indicates that the cookie is sent to the server only when a request is made with the https: scheme (except on localhost)"
|
||||
type: boolean
|
||||
status:
|
||||
description: CanaryStatus defines the observed state of a canary.
|
||||
type: object
|
||||
@@ -992,27 +1223,36 @@ spec:
|
||||
- Failed
|
||||
- Terminating
|
||||
- Terminated
|
||||
failedChecks:
|
||||
description: Failed check count of the current canary analysis
|
||||
type: number
|
||||
canaryWeight:
|
||||
description: Traffic weight routed to canary
|
||||
type: number
|
||||
iterations:
|
||||
description: Iteration count of the current canary analysis
|
||||
type: number
|
||||
trackedConfigs:
|
||||
description: TrackedConfig of this canary
|
||||
additionalProperties:
|
||||
type: string
|
||||
type: object
|
||||
canaryWeight:
|
||||
description: Traffic weight routed to canary
|
||||
type: number
|
||||
failedChecks:
|
||||
description: Failed check count of the current canary analysis
|
||||
type: number
|
||||
iterations:
|
||||
description: Iteration count of the current canary analysis
|
||||
type: number
|
||||
lastAppliedSpec:
|
||||
description: LastAppliedSpec of this canary
|
||||
type: string
|
||||
lastPromotedSpec:
|
||||
description: LastPromotedSpec of this canary
|
||||
type: string
|
||||
lastTransitionTime:
|
||||
description: LastTransitionTime of this canary
|
||||
format: date-time
|
||||
type: string
|
||||
sessionAffinityCookie:
|
||||
description: Session affinity cookie of the current canary run
|
||||
type: string
|
||||
previousSessionAffinityCookie:
|
||||
description: Session affinity cookie of the previous canary run
|
||||
type: string
|
||||
conditions:
|
||||
description: Status conditions of this canary
|
||||
type: array
|
||||
@@ -1104,12 +1344,23 @@ spec:
|
||||
- prometheus
|
||||
- influxdb
|
||||
- datadog
|
||||
- stackdriver
|
||||
- cloudwatch
|
||||
- newrelic
|
||||
- graphite
|
||||
- dynatrace
|
||||
- keptn
|
||||
- splunk
|
||||
address:
|
||||
description: API address of this provider
|
||||
type: string
|
||||
headers:
|
||||
description: Headers to add to HTTP(S) requests
|
||||
type: object
|
||||
additionalProperties:
|
||||
type: array
|
||||
items:
|
||||
type: string
|
||||
secretRef:
|
||||
description: Kubernetes secret reference containing the provider credentials
|
||||
type: object
|
||||
@@ -1122,6 +1373,9 @@ spec:
|
||||
region:
|
||||
description: Region of the provider
|
||||
type: string
|
||||
insecureSkipVerify:
|
||||
description: Disable SSL certificate validation for the provider address
|
||||
type: boolean
|
||||
query:
|
||||
description: Query of this metric template
|
||||
type: string
|
||||
@@ -1188,6 +1442,7 @@ spec:
|
||||
- msteams
|
||||
- discord
|
||||
- rocket
|
||||
- gchat
|
||||
channel:
|
||||
description: Alert channel for this provider
|
||||
type: string
|
||||
|
||||
@@ -3,8 +3,9 @@ apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: {{ template "flagger.serviceAccountName" . }}
|
||||
annotations:
|
||||
namespace: {{ .Release.Namespace }}
|
||||
{{- if .Values.serviceAccount.annotations }}
|
||||
annotations:
|
||||
{{ toYaml .Values.serviceAccount.annotations | indent 4 }}
|
||||
{{- end }}
|
||||
labels:
|
||||
|
||||
16
charts/flagger/templates/authorizationpolicy.yaml
Normal file
@@ -0,0 +1,16 @@
|
||||
{{- if .Values.linkerdAuthPolicy.create }}
|
||||
apiVersion: policy.linkerd.io/v1alpha1
|
||||
kind: AuthorizationPolicy
|
||||
metadata:
|
||||
namespace: {{ .Values.linkerdAuthPolicy.namespace }}
|
||||
name: prometheus-admin-flagger
|
||||
spec:
|
||||
targetRef:
|
||||
group: policy.linkerd.io
|
||||
kind: Server
|
||||
name: prometheus-admin
|
||||
requiredAuthenticationRefs:
|
||||
- kind: ServiceAccount
|
||||
name: {{ template "flagger.serviceAccountName" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
{{- end }}
|
||||
@@ -2,11 +2,22 @@ apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: {{ include "flagger.fullname" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels:
|
||||
helm.sh/chart: {{ template "flagger.chart" . }}
|
||||
app.kubernetes.io/name: {{ template "flagger.name" . }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
app.kubernetes.io/version: {{ .Chart.AppVersion }}
|
||||
{{- if .Values.deploymentLabels }}
|
||||
{{- range $key, $value := .Values.deploymentLabels }}
|
||||
{{ $key }}: {{ $value | quote }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- with .Values.annotations }}
|
||||
annotations:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
replicas: {{ .Values.leaderElection.replicaCount }}
|
||||
{{- if eq .Values.leaderElection.enabled false }}
|
||||
@@ -22,31 +33,34 @@ spec:
|
||||
labels:
|
||||
app.kubernetes.io/name: {{ template "flagger.name" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
app.kubernetes.io/version: {{ .Chart.AppVersion }}
|
||||
{{- if .Values.podLabels }}
|
||||
{{- range $key, $value := .Values.podLabels }}
|
||||
{{ $key }}: {{ $value | quote }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
annotations:
|
||||
{{- if .Values.podAnnotations }}
|
||||
{{ toYaml .Values.podAnnotations | indent 8 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
serviceAccountName: {{ template "flagger.serviceAccountName" . }}
|
||||
{{- if .Values.affinity }}
|
||||
affinity:
|
||||
podAntiAffinity:
|
||||
preferredDuringSchedulingIgnoredDuringExecution:
|
||||
- weight: 100
|
||||
podAffinityTerm:
|
||||
labelSelector:
|
||||
matchLabels:
|
||||
app.kubernetes.io/name: {{ template "flagger.name" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
topologyKey: kubernetes.io/hostname
|
||||
{{- tpl (toYaml .Values.affinity) . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- if .Values.image.pullSecret }}
|
||||
imagePullSecrets:
|
||||
- name: {{ .Values.image.pullSecret }}
|
||||
{{- end }}
|
||||
{{- if .Values.controlplane.kubeconfig.secretName }}
|
||||
volumes:
|
||||
{{- if .Values.istio.kubeconfig.secretName }}
|
||||
- name: kubeconfig
|
||||
secret:
|
||||
secretName: "{{ .Values.istio.kubeconfig.secretName }}"
|
||||
secretName: "{{ .Values.controlplane.kubeconfig.secretName }}"
|
||||
{{- end }}
|
||||
{{- if .Values.additionalVolumes }}
|
||||
{{- toYaml .Values.additionalVolumes | nindent 8 -}}
|
||||
{{- end }}
|
||||
{{- if .Values.podPriorityClassName }}
|
||||
priorityClassName: {{ .Values.podPriorityClassName }}
|
||||
@@ -57,11 +71,11 @@ spec:
|
||||
securityContext:
|
||||
{{ toYaml .Values.securityContext.context | indent 12 }}
|
||||
{{- end }}
|
||||
{{- if .Values.controlplane.kubeconfig.secretName }}
|
||||
volumeMounts:
|
||||
{{- if .Values.istio.kubeconfig.secretName }}
|
||||
- name: kubeconfig
|
||||
mountPath: "/tmp/istio-host"
|
||||
{{- end }}
|
||||
mountPath: "/tmp/controlplane"
|
||||
{{- end }}
|
||||
image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
|
||||
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
||||
ports:
|
||||
@@ -127,12 +141,18 @@ spec:
|
||||
{{- if .Values.kubeconfigBurst }}
|
||||
- -kubeconfig-burst={{ .Values.kubeconfigBurst }}
|
||||
{{- end }}
|
||||
{{- if .Values.istio.kubeconfig.secretName }}
|
||||
- -kubeconfig-service-mesh=/tmp/istio-host/{{ .Values.istio.kubeconfig.key }}
|
||||
{{- if .Values.controlplane.kubeconfig.secretName }}
|
||||
- -kubeconfig-service-mesh=/tmp/controlplane/{{ .Values.controlplane.kubeconfig.key }}
|
||||
{{- end }}
|
||||
{{- if .Values.threadiness }}
|
||||
- -threadiness={{ .Values.threadiness }}
|
||||
{{- end }}
|
||||
{{- if .Values.clusterName }}
|
||||
- -cluster-name={{ .Values.clusterName }}
|
||||
{{- end }}
|
||||
{{- if .Values.noCrossNamespaceRefs }}
|
||||
- -no-cross-namespace-refs={{ .Values.noCrossNamespaceRefs }}
|
||||
{{- end }}
|
||||
livenessProbe:
|
||||
exec:
|
||||
command:
|
||||
|
||||
@@ -1,8 +1,13 @@
|
||||
{{- if .Values.podDisruptionBudget.enabled }}
|
||||
{{- if .Capabilities.APIVersions.Has "policy/v1/PodDisruptionBudget" -}}
|
||||
apiVersion: policy/v1
|
||||
{{- else }}
|
||||
apiVersion: policy/v1beta1
|
||||
{{- end }}
|
||||
kind: PodDisruptionBudget
|
||||
metadata:
|
||||
name: {{ template "flagger.name" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
spec:
|
||||
minAvailable: {{ .Values.podDisruptionBudget.minAvailable }}
|
||||
selector:
|
||||
|
||||
@@ -17,6 +17,7 @@ spec:
|
||||
- interval: {{ .Values.podMonitor.interval }}
|
||||
path: /metrics
|
||||
port: http
|
||||
honorLabels: {{ .Values.podMonitor.honorLabels }}
|
||||
namespaceSelector:
|
||||
matchNames:
|
||||
- {{ .Release.Namespace }}
|
||||
|
||||
@@ -50,6 +50,7 @@ apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
name: {{ template "flagger.fullname" . }}-psp
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels:
|
||||
helm.sh/chart: {{ template "flagger.chart" . }}
|
||||
app.kubernetes.io/name: {{ template "flagger.name" . }}
|
||||
|
||||
@@ -27,6 +27,18 @@ rules:
|
||||
- update
|
||||
- patch
|
||||
- delete
|
||||
- apiGroups:
|
||||
- "coordination.k8s.io"
|
||||
resources:
|
||||
- leases
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- create
|
||||
- update
|
||||
- patch
|
||||
- delete
|
||||
- apiGroups:
|
||||
- apps
|
||||
resources:
|
||||
@@ -74,6 +86,7 @@ rules:
|
||||
resources:
|
||||
- canaries
|
||||
- canaries/status
|
||||
- canaries/finalizers
|
||||
- metrictemplates
|
||||
- metrictemplates/status
|
||||
- alertproviders
|
||||
@@ -184,7 +197,7 @@ rules:
|
||||
- patch
|
||||
- delete
|
||||
- apiGroups:
|
||||
- traefik.containo.us
|
||||
- traefik.io
|
||||
resources:
|
||||
- traefikservices
|
||||
verbs:
|
||||
@@ -195,10 +208,87 @@ rules:
|
||||
- update
|
||||
- patch
|
||||
- delete
|
||||
- apiGroups:
|
||||
- kuma.io
|
||||
resources:
|
||||
- trafficroutes
|
||||
- trafficroutes/finalizers
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- create
|
||||
- update
|
||||
- patch
|
||||
- delete
|
||||
- apiGroups:
|
||||
- gateway.networking.k8s.io
|
||||
resources:
|
||||
- httproutes
|
||||
- httproutes/finalizers
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- create
|
||||
- update
|
||||
- patch
|
||||
- delete
|
||||
- apiGroups:
|
||||
- keda.sh
|
||||
resources:
|
||||
- scaledobjects
|
||||
- scaledobjects/finalizers
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- create
|
||||
- update
|
||||
- patch
|
||||
- delete
|
||||
- apiGroups:
|
||||
- apisix.apache.org
|
||||
resources:
|
||||
- apisixroutes
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- create
|
||||
- update
|
||||
- patch
|
||||
- delete
|
||||
- apiGroups:
|
||||
- metrics.keptn.sh
|
||||
resources:
|
||||
- keptnmetrics
|
||||
- analyses
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- create
|
||||
- update
|
||||
- patch
|
||||
- delete
|
||||
- nonResourceURLs:
|
||||
- /version
|
||||
verbs:
|
||||
- get
|
||||
- apiGroups:
|
||||
- serving.knative.dev
|
||||
resources:
|
||||
- services
|
||||
verbs:
|
||||
- get
|
||||
- update
|
||||
- apiGroups:
|
||||
- serving.knative.dev
|
||||
resources:
|
||||
- revisions
|
||||
verbs:
|
||||
- get
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
|
||||
19
charts/flagger/templates/service.yaml
Normal file
@@ -0,0 +1,19 @@
|
||||
{{- if .Values.serviceMonitor.enabled }}
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: {{ template "flagger.name" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels:
|
||||
app.kubernetes.io/name: {{ template "flagger.name" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
spec:
|
||||
ports:
|
||||
- name: http
|
||||
port: 8080
|
||||
targetPort: http
|
||||
protocol: TCP
|
||||
selector:
|
||||
app.kubernetes.io/name: {{ template "flagger.name" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
{{- end }}
|
||||
29
charts/flagger/templates/servicemonitor.yaml
Normal file
@@ -0,0 +1,29 @@
|
||||
{{- if .Values.serviceMonitor.enabled }}
|
||||
apiVersion: monitoring.coreos.com/v1
|
||||
kind: ServiceMonitor
|
||||
metadata:
|
||||
name: {{ template "flagger.name" . }}
|
||||
{{- if .Values.serviceMonitor.namespace }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
{{- end }}
|
||||
labels:
|
||||
app.kubernetes.io/name: {{ template "flagger.name" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
{{- with .Values.serviceMonitor.labels }}
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
endpoints:
|
||||
- path: /metrics
|
||||
port: http
|
||||
interval: 30s
|
||||
scrapeTimeout: 30s
|
||||
honorLabels: {{ .Values.serviceMonitor.honorLabels }}
|
||||
namespaceSelector:
|
||||
matchNames:
|
||||
- {{ .Release.Namespace }}
|
||||
selector:
|
||||
matchLabels:
|
||||
app.kubernetes.io/name: {{ template "flagger.name" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
{{- end }}
|
||||
@@ -1,8 +1,11 @@
|
||||
# Default values for flagger.
|
||||
|
||||
## Deployment annotations
|
||||
# annotations: {}
|
||||
|
||||
image:
|
||||
repository: ghcr.io/fluxcd/flagger
|
||||
tag: 1.11.0
|
||||
tag: 1.42.0
|
||||
pullPolicy: IfNotPresent
|
||||
pullSecret:
|
||||
|
||||
@@ -13,13 +16,23 @@ podAnnotations:
|
||||
prometheus.io/scrape: "true"
|
||||
prometheus.io/port: "8080"
|
||||
appmesh.k8s.aws/sidecarInjectorWebhook: disabled
|
||||
linkerd.io/inject: enabled
|
||||
|
||||
# priority class name for pod priority configuration
|
||||
podPriorityClassName: ""
|
||||
|
||||
metricsServer: "http://prometheus:9090"
|
||||
|
||||
# accepted values are kubernetes, istio, linkerd, appmesh, contour, nginx, gloo, skipper, traefik
|
||||
# creates serviceMonitor for monitoring Flagger metrics
|
||||
serviceMonitor:
|
||||
enabled: false
|
||||
honorLabels: false
|
||||
# Set the namespace the ServiceMonitor should be deployed
|
||||
# namespace: monitoring
|
||||
# Set labels for the ServiceMonitor, use this to define your scrape label for Prometheus Operator
|
||||
# labels:
|
||||
|
||||
# accepted values are kubernetes, istio, linkerd, appmesh, contour, nginx, gloo, skipper, traefik, apisix
|
||||
meshProvider: ""
|
||||
|
||||
# single namespace restriction
|
||||
@@ -50,6 +63,9 @@ securityContext:
|
||||
# when specified, flagger will publish events to the provided webhook
|
||||
eventWebhook: ""
|
||||
|
||||
# when specified, flagger will add the cluster name to alerts
|
||||
clusterName: ""
|
||||
|
||||
slack:
|
||||
user: flagger
|
||||
channel:
|
||||
@@ -66,6 +82,7 @@ podMonitor:
|
||||
namespace:
|
||||
interval: 15s
|
||||
additionalLabels: {}
|
||||
honorLabels: false
|
||||
|
||||
#env:
|
||||
#- name: SLACK_URL
|
||||
@@ -117,6 +134,13 @@ crd:
|
||||
# crd.create: `true` if custom resource definitions should be created
|
||||
create: false
|
||||
|
||||
linkerdAuthPolicy:
|
||||
# linkerdAuthPolicy.create: Whether to create an AuthorizationPolicy in
|
||||
# linkerd viz' namespace to allow flagger to reach viz' prometheus service
|
||||
create: false
|
||||
# linkerdAuthPolicy.namespace: linkerd-viz' namespace
|
||||
namespace: linkerd-viz
|
||||
|
||||
nameOverride: ""
|
||||
fullnameOverride: ""
|
||||
|
||||
@@ -132,10 +156,21 @@ nodeSelector: {}
|
||||
|
||||
tolerations: []
|
||||
|
||||
affinity:
|
||||
podAntiAffinity:
|
||||
preferredDuringSchedulingIgnoredDuringExecution:
|
||||
- weight: 100
|
||||
podAffinityTerm:
|
||||
labelSelector:
|
||||
matchLabels:
|
||||
app.kubernetes.io/name: '{{ template "flagger.name" . }}'
|
||||
app.kubernetes.io/instance: '{{ .Release.Name }}'
|
||||
topologyKey: kubernetes.io/hostname
|
||||
|
||||
prometheus:
|
||||
# to be used with ingress controllers
|
||||
install: false
|
||||
image: docker.io/prom/prometheus:v2.23.0
|
||||
image: docker.io/prom/prometheus:v2.41.0
|
||||
pullSecret:
|
||||
retention: 2h
|
||||
# when enabled, it will add a security context for the prometheus pod
|
||||
@@ -148,15 +183,27 @@ prometheus:
|
||||
kubeconfigQPS: ""
|
||||
kubeconfigBurst: ""
|
||||
|
||||
# Istio multi-cluster service mesh (shared control plane single-network)
|
||||
# https://istio.io/docs/setup/install/multicluster/shared-vpn/
|
||||
istio:
|
||||
# Multi-cluster service mesh (shared control plane single-network)
|
||||
controlplane:
|
||||
kubeconfig:
|
||||
# istio.kubeconfig.secretName: The name of the secret containing the Istio control plane kubeconfig
|
||||
# controlplane.kubeconfig.secretName: The name of the secret containing the mesh control plane kubeconfig
|
||||
secretName: ""
|
||||
# istio.kubeconfig.key: The name of secret data key that contains the Istio control plane kubeconfig
|
||||
# controlplane.kubeconfig.key: The name of secret data key that contains the mesh control plane kubeconfig
|
||||
key: "kubeconfig"
|
||||
|
||||
podDisruptionBudget:
|
||||
enabled: false
|
||||
minAvailable: 1
|
||||
|
||||
# Additional labels to be added to pods
|
||||
podLabels: {}
|
||||
|
||||
# Additional labels to be added to deployments
|
||||
deploymentLabels: { }
|
||||
|
||||
noCrossNamespaceRefs: false
|
||||
|
||||
#Placeholder to supply additional volumes to the flagger pod
|
||||
additionalVolumes: {}
|
||||
# - name: tmpfs
|
||||
# emptyDir: {}
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
apiVersion: v1
|
||||
name: grafana
|
||||
version: 1.5.0
|
||||
version: 1.7.0
|
||||
appVersion: 7.2.0
|
||||
description: Grafana dashboards for monitoring Flagger canary deployments
|
||||
icon: https://raw.githubusercontent.com/fluxcd/flagger/main/docs/logo/flagger-icon.png
|
||||
|
||||
@@ -1146,7 +1146,6 @@
|
||||
"list": [
|
||||
{
|
||||
"allValue": null,
|
||||
"current": null,
|
||||
"datasource": "prometheus",
|
||||
"definition": "query_result(sum(envoy_cluster_upstream_rq) by (kubernetes_namespace))",
|
||||
"hide": 0,
|
||||
@@ -1168,7 +1167,6 @@
|
||||
},
|
||||
{
|
||||
"allValue": null,
|
||||
"current": null,
|
||||
"datasource": "prometheus",
|
||||
"definition": "query_result(sum(envoy_cluster_upstream_rq{kubernetes_namespace=\"$namespace\",app=~\".*-primary\"}) by (app))",
|
||||
"hide": 0,
|
||||
@@ -1190,7 +1188,6 @@
|
||||
},
|
||||
{
|
||||
"allValue": null,
|
||||
"current": null,
|
||||
"datasource": "prometheus",
|
||||
"definition": "query_result(sum(envoy_cluster_upstream_rq{kubernetes_namespace=\"$namespace\",app!~\".*-primary\"}) by (app))",
|
||||
"hide": 0,
|
||||
|
||||
@@ -403,7 +403,7 @@
|
||||
"steppedLine": false,
|
||||
"targets": [
|
||||
{
|
||||
"expr": "histogram_quantile(0.50, sum(irate(istio_request_duration_milliseconds_bucket{reporter=\"destination\",destination_workload=~\"$primary\", destination_workload_namespace=~\"$namespace\"}[1m])) by (le))",
|
||||
"expr": "histogram_quantile(0.50, sum(irate(istio_request_duration_milliseconds_bucket{reporter=\"destination\",destination_workload=~\"$primary\", destination_workload_namespace=~\"$namespace\"}[1m])) by (le)) / 1000",
|
||||
"format": "time_series",
|
||||
"interval": "",
|
||||
"intervalFactor": 1,
|
||||
@@ -411,7 +411,7 @@
|
||||
"refId": "A"
|
||||
},
|
||||
{
|
||||
"expr": "histogram_quantile(0.90, sum(irate(istio_request_duration_milliseconds_bucket{reporter=\"destination\",destination_workload=~\"$primary\", destination_workload_namespace=~\"$namespace\"}[1m])) by (le))",
|
||||
"expr": "histogram_quantile(0.90, sum(irate(istio_request_duration_milliseconds_bucket{reporter=\"destination\",destination_workload=~\"$primary\", destination_workload_namespace=~\"$namespace\"}[1m])) by (le)) / 1000",
|
||||
"format": "time_series",
|
||||
"hide": false,
|
||||
"intervalFactor": 1,
|
||||
@@ -419,7 +419,7 @@
|
||||
"refId": "B"
|
||||
},
|
||||
{
|
||||
"expr": "histogram_quantile(0.99, sum(irate(istio_request_duration_milliseconds_bucket{reporter=\"destination\",destination_workload=~\"$primary\", destination_workload_namespace=~\"$namespace\"}[1m])) by (le))",
|
||||
"expr": "histogram_quantile(0.99, sum(irate(istio_request_duration_milliseconds_bucket{reporter=\"destination\",destination_workload=~\"$primary\", destination_workload_namespace=~\"$namespace\"}[1m])) by (le)) / 1000",
|
||||
"format": "time_series",
|
||||
"hide": false,
|
||||
"intervalFactor": 1,
|
||||
@@ -509,7 +509,7 @@
|
||||
"steppedLine": false,
|
||||
"targets": [
|
||||
{
|
||||
"expr": "histogram_quantile(0.50, sum(irate(istio_request_duration_milliseconds_bucket{reporter=\"destination\",destination_workload=~\"$canary\", destination_workload_namespace=~\"$namespace\"}[1m])) by (le))",
|
||||
"expr": "histogram_quantile(0.50, sum(irate(istio_request_duration_milliseconds_bucket{reporter=\"destination\",destination_workload=~\"$canary\", destination_workload_namespace=~\"$namespace\"}[1m])) by (le)) / 1000",
|
||||
"format": "time_series",
|
||||
"interval": "",
|
||||
"intervalFactor": 1,
|
||||
@@ -517,7 +517,7 @@
|
||||
"refId": "A"
|
||||
},
|
||||
{
|
||||
"expr": "histogram_quantile(0.90, sum(irate(istio_request_duration_milliseconds_bucket{reporter=\"destination\",destination_workload=~\"$canary\", destination_workload_namespace=~\"$namespace\"}[1m])) by (le))",
|
||||
"expr": "histogram_quantile(0.90, sum(irate(istio_request_duration_milliseconds_bucket{reporter=\"destination\",destination_workload=~\"$canary\", destination_workload_namespace=~\"$namespace\"}[1m])) by (le)) / 1000",
|
||||
"format": "time_series",
|
||||
"hide": false,
|
||||
"intervalFactor": 1,
|
||||
@@ -525,7 +525,7 @@
|
||||
"refId": "B"
|
||||
},
|
||||
{
|
||||
"expr": "histogram_quantile(0.99, sum(irate(istio_request_duration_milliseconds_bucket{reporter=\"destination\",destination_workload=~\"$canary\", destination_workload_namespace=~\"$namespace\"}[1m])) by (le))",
|
||||
"expr": "histogram_quantile(0.99, sum(irate(istio_request_duration_milliseconds_bucket{reporter=\"destination\",destination_workload=~\"$canary\", destination_workload_namespace=~\"$namespace\"}[1m])) by (le)) / 1000",
|
||||
"format": "time_series",
|
||||
"hide": false,
|
||||
"intervalFactor": 1,
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
apiVersion: v1
|
||||
name: loadtester
|
||||
version: 0.19.0
|
||||
appVersion: 0.18.0
|
||||
kubeVersion: ">=1.11.0-0"
|
||||
version: 0.36.0
|
||||
appVersion: 0.36.0
|
||||
kubeVersion: ">=1.19.0-0"
|
||||
engine: gotpl
|
||||
description: Flagger's load testing services based on rakyll/hey and bojand/ghz that generates traffic during canary analysis when configured as a webhook.
|
||||
home: https://docs.flagger.app
|
||||
@@ -19,5 +19,6 @@ keywords:
|
||||
- appmesh
|
||||
- linkerd
|
||||
- gloo
|
||||
- smi
|
||||
- gitops
|
||||
- load testing
|
||||
|
||||
@@ -7,7 +7,7 @@ It can be used to generate HTTP and gRPC traffic during canary analysis when con
|
||||
|
||||
## Prerequisites
|
||||
|
||||
* Kubernetes >= 1.11
|
||||
* Kubernetes >= 1.19
|
||||
|
||||
## Installing the Chart
|
||||
|
||||
@@ -26,7 +26,7 @@ helm upgrade -i flagger-loadtester flagger/loadtester
|
||||
The command deploys loadtester on the Kubernetes cluster in the default namespace.
|
||||
|
||||
> **Tip**: Note that the namespace where you deploy the load tester should
|
||||
> have the Istio, App Mesh or Linkerd sidecar injection enabled
|
||||
> have the Istio, App Mesh, Linkerd or Open Service Mesh sidecar injection enabled
|
||||
|
||||
The [configuration](#configuration) section lists the parameters that can be configured during installation.
|
||||
|
||||
@@ -44,34 +44,37 @@ The command removes all the Kubernetes components associated with the chart and
|
||||
|
||||
The following tables lists the configurable parameters of the load tester chart and their default values.
|
||||
|
||||
Parameter | Description | Default
|
||||
--- | --- | ---
|
||||
`image.repository` | Image repository | `quay.io/stefanprodan/flagger-loadtester`
|
||||
`image.pullPolicy` | Image pull policy | `IfNotPresent`
|
||||
`image.tag` | Image tag | `<VERSION>`
|
||||
`replicaCount` | Desired number of pods | `1`
|
||||
`serviceAccountName` | Kubernetes service account name | `none`
|
||||
`resources.requests.cpu` | CPU requests | `10m`
|
||||
`resources.requests.memory` | Memory requests | `64Mi`
|
||||
`tolerations` | List of node taints to tolerate | `[]`
|
||||
`affinity` | node/pod affinities | `node`
|
||||
`nodeSelector` | Node labels for pod assignment | `{}`
|
||||
`service.type` | Type of service | `ClusterIP`
|
||||
`service.port` | ClusterIP port | `80`
|
||||
`cmd.timeout` | Command execution timeout | `1h`
|
||||
`logLevel` | Log level can be debug, info, warning, error or panic | `info`
|
||||
`appmesh.enabled` | Create AWS App Mesh v1beta2 virtual node | `false`
|
||||
`appmesh.backends` | AWS App Mesh virtual services | `none`
|
||||
`istio.enabled` | Create Istio virtual service | `false`
|
||||
`istio.host` | Loadtester hostname | `flagger-loadtester.flagger`
|
||||
`istio.gateway.enabled` | Create Istio gateway in namespace | `false`
|
||||
`istio.tls.enabled` | Enable TLS in gateway ( TLS secrets should be in namespace ) | `false`
|
||||
`istio.tls.httpsRedirect` | Redirect traffic to TLS port | `false`
|
||||
`podPriorityClassName` | PriorityClass name for pod priority configuration | ""
|
||||
`securityContext.enabled` | Add securityContext to container | ""
|
||||
`securityContext.context` | securityContext to add | ""
|
||||
`podDisruptionBudget.enabled` | A PodDisruptionBudget will be created if `true` | `false`
|
||||
`podDisruptionBudget.minAvailable` | The minimal number of available replicas that will be set in the PodDisruptionBudget | `1`
|
||||
| Parameter | Description | Default |
|
||||
|------------------------------------|--------------------------------------------------------------------------------------|-------------------------------------|
|
||||
| `image.repository` | Image repository | `ghcr.io/fluxcd/flagger-loadtester` |
|
||||
| `image.pullPolicy` | Image pull policy | `IfNotPresent` |
|
||||
| `image.tag` | Image tag | `<VERSION>` |
|
||||
| `replicaCount` | Desired number of pods | `1` |
|
||||
| `serviceAccountName` | Kubernetes service account name | `none` |
|
||||
| `resources.requests.cpu` | CPU requests | `10m` |
|
||||
| `resources.requests.memory` | Memory requests | `64Mi` |
|
||||
| `tolerations` | List of node taints to tolerate | `[]` |
|
||||
| `affinity` | node/pod affinities | `node` |
|
||||
| `nodeSelector` | Node labels for pod assignment | `{}` |
|
||||
| `service.type` | Type of service | `ClusterIP` |
|
||||
| `service.port` | ClusterIP port | `80` |
|
||||
| `cmd.timeout` | Command execution timeout | `1h` |
|
||||
| `cmd.namespaceRegexp` | Restrict access to canaries in matching namespaces | "" |
|
||||
| `logLevel` | Log level can be debug, info, warning, error or panic | `info` |
|
||||
| `appmesh.enabled` | Create AWS App Mesh v1beta2 virtual node | `false` |
|
||||
| `appmesh.backends` | AWS App Mesh virtual services | `none` |
|
||||
| `istio.enabled` | Create Istio virtual service | `false` |
|
||||
| `istio.host` | Loadtester hostname | `flagger-loadtester.flagger` |
|
||||
| `istio.gateway.enabled` | Create Istio gateway in namespace | `false` |
|
||||
| `istio.tls.enabled` | Enable TLS in gateway ( TLS secrets should be in namespace ) | `false` |
|
||||
| `istio.tls.httpsRedirect` | Redirect traffic to TLS port | `false` |
|
||||
| `podPriorityClassName` | PriorityClass name for pod priority configuration | "" |
|
||||
| `securityContext.enabled` | Add securityContext to container | `false` |
|
||||
| `SecurityContext.context` | securityContext to add | "" |
|
||||
| `podSecurityContext.enabled` | Add securityContext to pod | `false` |
|
||||
| `podSecurityContext.context` | securityContext to add | "" |
|
||||
| `podDisruptionBudget.enabled` | A PodDisruptionBudget will be created if `true` | `false` |
|
||||
| `podDisruptionBudget.minAvailable` | The minimal number of available replicas that will be set in the PodDisruptionBudget | `1` |
|
||||
|
||||
Specify each parameter using the `--set key=value[,key=value]` argument to `helm upgrade`. For example,
|
||||
|
||||
|
||||
@@ -17,10 +17,14 @@ spec:
|
||||
labels:
|
||||
app: {{ include "loadtester.name" . }}
|
||||
app.kubernetes.io/name: {{ include "loadtester.name" . }}
|
||||
{{- range $key, $value := .Values.podLabels }}
|
||||
{{ $key }}: {{ $value | quote }}
|
||||
{{- end }}
|
||||
annotations:
|
||||
appmesh.k8s.aws/ports: "444"
|
||||
openservicemesh.io/inbound-port-exclusion-list: "80, 8080"
|
||||
{{- if .Values.podAnnotations }}
|
||||
{{ toYaml .Values.podAnnotations | indent 8 }}
|
||||
{{- toYaml .Values.podAnnotations | nindent 8 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
{{- if .Values.serviceAccountName }}
|
||||
@@ -35,7 +39,7 @@ spec:
|
||||
- name: {{ .Chart.Name }}
|
||||
{{- if .Values.securityContext.enabled }}
|
||||
securityContext:
|
||||
{{ toYaml .Values.securityContext.context | indent 12 }}
|
||||
{{- toYaml .Values.securityContext.context | nindent 12 }}
|
||||
{{- end }}
|
||||
image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
|
||||
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
||||
@@ -47,6 +51,7 @@ spec:
|
||||
- -port=8080
|
||||
- -log-level={{ .Values.logLevel }}
|
||||
- -timeout={{ .Values.cmd.timeout }}
|
||||
- -namespace-regexp={{ .Values.cmd.namespaceRegexp }}
|
||||
livenessProbe:
|
||||
exec:
|
||||
command:
|
||||
@@ -73,6 +78,18 @@ spec:
|
||||
{{- end }}
|
||||
resources:
|
||||
{{- toYaml .Values.resources | nindent 12 }}
|
||||
{{- with .Values.volumeMounts }}
|
||||
volumeMounts:
|
||||
{{- toYaml . | nindent 12 }}
|
||||
{{- end }}
|
||||
{{- if .Values.image.pullSecret }}
|
||||
imagePullSecrets:
|
||||
- name: {{ .Values.image.pullSecret }}
|
||||
{{- end }}
|
||||
{{ with .Values.volumes }}
|
||||
volumes:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.nodeSelector }}
|
||||
nodeSelector:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
@@ -85,3 +102,7 @@ spec:
|
||||
tolerations:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- if .Values.podSecurityContext.enabled }}
|
||||
securityContext:
|
||||
{{- toYaml .Values.podSecurityContext.context | nindent 12 }}
|
||||
{{- end }}
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
{{- if and (.Values.istio.enabled) (.Values.istio.gateway.enabled) }}
|
||||
apiVersion: networking.istio.io/v1alpha3
|
||||
apiVersion: networking.istio.io/v1beta1
|
||||
kind: Gateway
|
||||
metadata:
|
||||
name: {{ include "loadtester.fullname" . }}
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
{{- if .Values.istio.enabled }}
|
||||
apiVersion: networking.istio.io/v1alpha3
|
||||
apiVersion: networking.istio.io/v1beta1
|
||||
kind: VirtualService
|
||||
metadata:
|
||||
name: {{ include "loadtester.fullname" . }}
|
||||
|
||||
@@ -1,5 +1,9 @@
|
||||
{{- if .Values.podDisruptionBudget.enabled }}
|
||||
{{- if .Capabilities.APIVersions.Has "policy/v1/PodDisruptionBudget" -}}
|
||||
apiVersion: policy/v1
|
||||
{{- else }}
|
||||
apiVersion: policy/v1beta1
|
||||
{{- end }}
|
||||
kind: PodDisruptionBudget
|
||||
metadata:
|
||||
name: {{ include "loadtester.fullname" . }}
|
||||
|
||||
@@ -51,4 +51,7 @@ metadata:
|
||||
app.kubernetes.io/name: {{ template "loadtester.name" . }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
{{- if .Values.rbac.serviceAccountAnnotations }}
|
||||
annotations: {{ tpl (toYaml .Values.rbac.serviceAccountAnnotations) . | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
@@ -2,8 +2,11 @@ replicaCount: 1
|
||||
|
||||
image:
|
||||
repository: ghcr.io/fluxcd/flagger-loadtester
|
||||
tag: 0.18.0
|
||||
tag: 0.36.0
|
||||
pullPolicy: IfNotPresent
|
||||
pullSecret:
|
||||
|
||||
podLabels: {}
|
||||
|
||||
podAnnotations:
|
||||
prometheus.io/scrape: "true"
|
||||
@@ -14,6 +17,7 @@ podPriorityClassName: ""
|
||||
logLevel: info
|
||||
cmd:
|
||||
timeout: 1h
|
||||
namespaceRegexp: ""
|
||||
|
||||
nameOverride: ""
|
||||
fullnameOverride: ""
|
||||
@@ -29,6 +33,9 @@ resources:
|
||||
cpu: 10m
|
||||
memory: 64Mi
|
||||
|
||||
volumes: []
|
||||
volumeMounts: []
|
||||
|
||||
nodeSelector: {}
|
||||
|
||||
tolerations: []
|
||||
@@ -47,6 +54,8 @@ rbac:
|
||||
# resources: ["pods"]
|
||||
# verbs: ["list", "get"]
|
||||
rules: []
|
||||
# annotations to add to the service account
|
||||
serviceAccountAnnotations: {}
|
||||
|
||||
# name of an existing service account to use - if not creating rbac resources
|
||||
serviceAccountName: ""
|
||||
@@ -82,6 +91,12 @@ securityContext:
|
||||
runAsUser: 100
|
||||
runAsGroup: 101
|
||||
|
||||
podSecurityContext:
|
||||
enabled: false
|
||||
context:
|
||||
fsGroup: 101
|
||||
fsGroupChangePolicy: "OnRootMismatch"
|
||||
|
||||
podDisruptionBudget:
|
||||
enabled: false
|
||||
minAvailable: 1
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
apiVersion: v1
|
||||
version: 5.0.0
|
||||
appVersion: 5.0.0
|
||||
version: 6.1.4
|
||||
appVersion: 6.1.3
|
||||
name: podinfo
|
||||
engine: gotpl
|
||||
description: Flagger canary deployment demo application
|
||||
|
||||
@@ -20,7 +20,7 @@ helm upgrade -i frontend flagger/podinfo \
|
||||
--set backend=http://backend.test:9898/echo \
|
||||
--set canary.enabled=true \
|
||||
--set canary.istioIngress.enabled=true \
|
||||
--set canary.istioIngress.gateway=public-gateway.istio-system.svc.cluster.local \
|
||||
--set canary.istioIngress.gateway=istio-system/public-gateway \
|
||||
--set canary.istioIngress.host=frontend.istio.example.com
|
||||
```
|
||||
|
||||
|
||||
@@ -14,7 +14,7 @@ spec:
|
||||
kind: Deployment
|
||||
name: {{ template "podinfo.fullname" . }}
|
||||
autoscalerRef:
|
||||
apiVersion: autoscaling/v2beta1
|
||||
apiVersion: autoscaling/v2
|
||||
kind: HorizontalPodAutoscaler
|
||||
name: {{ template "podinfo.fullname" . }}
|
||||
service:
|
||||
@@ -57,4 +57,4 @@ spec:
|
||||
metadata:
|
||||
cmd: "hey -z 1m -q 5 -c 2 http://{{ template "podinfo.fullname" . }}.{{ .Release.Namespace }}:{{ .Values.service.port }}"
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
{{- if .Values.hpa.enabled -}}
|
||||
apiVersion: autoscaling/v2beta1
|
||||
apiVersion: autoscaling/v2
|
||||
kind: HorizontalPodAutoscaler
|
||||
metadata:
|
||||
name: {{ template "podinfo.fullname" . }}
|
||||
@@ -20,12 +20,16 @@ spec:
|
||||
- type: Resource
|
||||
resource:
|
||||
name: cpu
|
||||
targetAverageUtilization: {{ .Values.hpa.cpu }}
|
||||
target:
|
||||
type: Utilization
|
||||
averageUtilization: {{ .Values.hpa.cpu }}
|
||||
{{- end }}
|
||||
{{- if .Values.hpa.memory }}
|
||||
- type: Resource
|
||||
resource:
|
||||
name: memory
|
||||
targetAverageValue: {{ .Values.hpa.memory }}
|
||||
target:
|
||||
type: AverageValue
|
||||
averageValue: {{ .Values.hpa.memory }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
{{- if not .Values.canary.enabled }}
|
||||
{{- if and .Values.service.enabled (not .Values.canary.enabled) }}
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
|
||||
@@ -12,6 +12,7 @@ metadata:
|
||||
sidecar.istio.io/inject: "false"
|
||||
linkerd.io/inject: disabled
|
||||
appmesh.k8s.aws/sidecarInjectorWebhook: disabled
|
||||
openservicemesh.io/sidecar-injection: disabled
|
||||
spec:
|
||||
containers:
|
||||
- name: tools
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
# Default values for podinfo.
|
||||
image:
|
||||
repository: ghcr.io/stefanprodan/podinfo
|
||||
tag: 5.0.0
|
||||
tag: 6.1.3
|
||||
pullPolicy: IfNotPresent
|
||||
|
||||
podAnnotations: {}
|
||||
@@ -25,7 +25,7 @@ canary:
|
||||
istioIngress:
|
||||
enabled: false
|
||||
# Istio ingress gateway name
|
||||
gateway: public-gateway.istio-system.svc.cluster.local
|
||||
gateway: istio-system/public-gateway
|
||||
# external host name eg. podinfo.example.com
|
||||
host:
|
||||
analysis:
|
||||
|
||||
@@ -51,6 +51,8 @@ import (
|
||||
"github.com/fluxcd/flagger/pkg/server"
|
||||
"github.com/fluxcd/flagger/pkg/signals"
|
||||
"github.com/fluxcd/flagger/pkg/version"
|
||||
|
||||
knative "knative.dev/serving/pkg/client/clientset/versioned"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -66,6 +68,7 @@ var (
|
||||
msteamsProxyURL string
|
||||
includeLabelPrefix string
|
||||
slackURL string
|
||||
slackToken string
|
||||
slackProxyURL string
|
||||
slackUser string
|
||||
slackChannel string
|
||||
@@ -83,6 +86,8 @@ var (
|
||||
enableConfigTracking bool
|
||||
ver bool
|
||||
kubeconfigServiceMesh string
|
||||
clusterName string
|
||||
noCrossNamespaceRefs bool
|
||||
)
|
||||
|
||||
func init() {
|
||||
@@ -95,6 +100,7 @@ func init() {
|
||||
flag.StringVar(&logLevel, "log-level", "debug", "Log level can be: debug, info, warning, error.")
|
||||
flag.StringVar(&port, "port", "8080", "Port to listen on.")
|
||||
flag.StringVar(&slackURL, "slack-url", "", "Slack hook URL.")
|
||||
flag.StringVar(&slackToken, "slack-token", "", "Slack bot token.")
|
||||
flag.StringVar(&slackProxyURL, "slack-proxy-url", "", "Slack proxy URL.")
|
||||
flag.StringVar(&slackUser, "slack-user", "flagger", "Slack user name.")
|
||||
flag.StringVar(&slackChannel, "slack-channel", "", "Slack channel.")
|
||||
@@ -106,7 +112,7 @@ func init() {
|
||||
flag.BoolVar(&zapReplaceGlobals, "zap-replace-globals", false, "Whether to change the logging level of the global zap logger.")
|
||||
flag.StringVar(&zapEncoding, "zap-encoding", "json", "Zap logger encoding.")
|
||||
flag.StringVar(&namespace, "namespace", "", "Namespace that flagger would watch canary object.")
|
||||
flag.StringVar(&meshProvider, "mesh-provider", "istio", "Service mesh provider, can be istio, linkerd, appmesh, contour, gloo, nginx, skipper or traefik.")
|
||||
flag.StringVar(&meshProvider, "mesh-provider", "istio", "Service mesh provider, can be istio, linkerd, appmesh, contour, knative, gloo, nginx, skipper, traefik, apisix, osm or kuma.")
|
||||
flag.StringVar(&selectorLabels, "selector-labels", "app,name,app.kubernetes.io/name", "List of pod labels that Flagger uses to create pod selectors.")
|
||||
flag.StringVar(&ingressAnnotationsPrefix, "ingress-annotations-prefix", "nginx.ingress.kubernetes.io", "Annotations prefix for NGINX ingresses.")
|
||||
flag.StringVar(&ingressClass, "ingress-class", "", "Ingress class used for annotating HTTPProxy objects.")
|
||||
@@ -115,6 +121,8 @@ func init() {
|
||||
flag.BoolVar(&enableConfigTracking, "enable-config-tracking", true, "Enable secrets and configmaps tracking.")
|
||||
flag.BoolVar(&ver, "version", false, "Print version")
|
||||
flag.StringVar(&kubeconfigServiceMesh, "kubeconfig-service-mesh", "", "Path to a kubeconfig for the service mesh control plane cluster.")
|
||||
flag.StringVar(&clusterName, "cluster-name", "", "Cluster name to be included in alert msgs.")
|
||||
flag.BoolVar(&noCrossNamespaceRefs, "no-cross-namespace-refs", false, "When set to true, Flagger can only refer to resources in the same namespace.")
|
||||
}
|
||||
|
||||
func main() {
|
||||
@@ -160,19 +168,24 @@ func main() {
|
||||
logger.Fatalf("Error building flagger clientset: %s", err.Error())
|
||||
}
|
||||
|
||||
knativeClient, err := knative.NewForConfig(cfg)
|
||||
if err != nil {
|
||||
logger.Fatalf("Error building knative clientset: %s", err.Error())
|
||||
}
|
||||
|
||||
// use a remote cluster for routing if a service mesh kubeconfig is specified
|
||||
if kubeconfigServiceMesh == "" {
|
||||
kubeconfigServiceMesh = kubeconfig
|
||||
}
|
||||
cfgHost, err := clientcmd.BuildConfigFromFlags(masterURL, kubeconfigServiceMesh)
|
||||
serviceMeshCfg, err := clientcmd.BuildConfigFromFlags(masterURL, kubeconfigServiceMesh)
|
||||
if err != nil {
|
||||
logger.Fatalf("Error building host kubeconfig: %v", err)
|
||||
}
|
||||
|
||||
cfgHost.QPS = float32(kubeconfigQPS)
|
||||
cfgHost.Burst = kubeconfigBurst
|
||||
serviceMeshCfg.QPS = float32(kubeconfigQPS)
|
||||
serviceMeshCfg.Burst = kubeconfigBurst
|
||||
|
||||
meshClient, err := clientset.NewForConfig(cfgHost)
|
||||
meshClient, err := clientset.NewForConfig(serviceMeshCfg)
|
||||
if err != nil {
|
||||
logger.Fatalf("Error building mesh clientset: %v", err)
|
||||
}
|
||||
@@ -208,7 +221,14 @@ func main() {
|
||||
// start HTTP server
|
||||
go server.ListenAndServe(port, 3*time.Second, logger, stopCh)
|
||||
|
||||
routerFactory := router.NewFactory(cfg, kubeClient, flaggerClient, ingressAnnotationsPrefix, ingressClass, logger, meshClient)
|
||||
setOwnerRefs := true
|
||||
// Router shouldn't set OwnerRefs on resources that they create since the
|
||||
// service mesh/ingress controller is in a different cluster.
|
||||
if cfg.Host != serviceMeshCfg.Host {
|
||||
setOwnerRefs = false
|
||||
}
|
||||
|
||||
routerFactory := router.NewFactory(cfg, kubeClient, flaggerClient, knativeClient, ingressAnnotationsPrefix, ingressClass, logger, meshClient, setOwnerRefs)
|
||||
|
||||
var configTracker canary.Tracker
|
||||
if enableConfigTracking {
|
||||
@@ -223,10 +243,11 @@ func main() {
|
||||
|
||||
includeLabelPrefixArray := strings.Split(includeLabelPrefix, ",")
|
||||
|
||||
canaryFactory := canary.NewFactory(kubeClient, flaggerClient, configTracker, labels, includeLabelPrefixArray, logger)
|
||||
canaryFactory := canary.NewFactory(kubeClient, flaggerClient, knativeClient, configTracker, labels, includeLabelPrefixArray, logger)
|
||||
|
||||
c := controller.NewController(
|
||||
kubeClient,
|
||||
knativeClient,
|
||||
flaggerClient,
|
||||
infos,
|
||||
controlLoopInterval,
|
||||
@@ -238,6 +259,9 @@ func main() {
|
||||
meshProvider,
|
||||
version.VERSION,
|
||||
fromEnv("EVENT_WEBHOOK_URL", eventWebhook),
|
||||
clusterName,
|
||||
noCrossNamespaceRefs,
|
||||
cfg,
|
||||
)
|
||||
|
||||
// leader election context
|
||||
@@ -312,7 +336,7 @@ func startLeaderElection(ctx context.Context, run func(), ns string, kubeClient
|
||||
id = id + "_" + string(uuid.NewUUID())
|
||||
|
||||
lock, err := resourcelock.New(
|
||||
resourcelock.ConfigMapsResourceLock,
|
||||
resourcelock.LeasesResourceLock,
|
||||
ns,
|
||||
configMapName,
|
||||
kubeClient.CoreV1(),
|
||||
@@ -352,6 +376,7 @@ func startLeaderElection(ctx context.Context, run func(), ns string, kubeClient
|
||||
|
||||
func initNotifier(logger *zap.SugaredLogger) (client notifier.Interface) {
|
||||
provider := "slack"
|
||||
token := fromEnv("SLACK_TOKEN", slackToken)
|
||||
notifierURL := fromEnv("SLACK_URL", slackURL)
|
||||
notifierProxyURL := fromEnv("SLACK_PROXY_URL", slackProxyURL)
|
||||
if msteamsURL != "" || os.Getenv("MSTEAMS_URL") != "" {
|
||||
@@ -359,7 +384,7 @@ func initNotifier(logger *zap.SugaredLogger) (client notifier.Interface) {
|
||||
notifierURL = fromEnv("MSTEAMS_URL", msteamsURL)
|
||||
notifierProxyURL = fromEnv("MSTEAMS_PROXY_URL", msteamsProxyURL)
|
||||
}
|
||||
notifierFactory := notifier.NewFactory(notifierURL, notifierProxyURL, slackUser, slackChannel)
|
||||
notifierFactory := notifier.NewFactory(notifierURL, token, notifierProxyURL, slackUser, slackChannel)
|
||||
|
||||
var err error
|
||||
client, err = notifierFactory.Notifier(provider)
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
Copyright 2020 The Flux authors
|
||||
Copyright 2020, 2022 The Flux authors
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
@@ -19,19 +19,22 @@ package main
|
||||
import (
|
||||
"flag"
|
||||
"log"
|
||||
"regexp"
|
||||
"time"
|
||||
|
||||
"go.uber.org/zap"
|
||||
|
||||
"github.com/fluxcd/flagger/pkg/loadtester"
|
||||
"github.com/fluxcd/flagger/pkg/logger"
|
||||
"github.com/fluxcd/flagger/pkg/signals"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
var VERSION = "0.18.0"
|
||||
var VERSION = "0.36.0"
|
||||
var (
|
||||
logLevel string
|
||||
port string
|
||||
timeout time.Duration
|
||||
namespaceRegexp string
|
||||
zapReplaceGlobals bool
|
||||
zapEncoding string
|
||||
)
|
||||
@@ -40,6 +43,7 @@ func init() {
|
||||
flag.StringVar(&logLevel, "log-level", "debug", "Log level can be: debug, info, warning, error.")
|
||||
flag.StringVar(&port, "port", "9090", "Port to listen on.")
|
||||
flag.DurationVar(&timeout, "timeout", time.Hour, "Load test exec timeout.")
|
||||
flag.StringVar(&namespaceRegexp, "namespace-regexp", "", "Restrict access to canaries in matching namespaces.")
|
||||
flag.BoolVar(&zapReplaceGlobals, "zap-replace-globals", false, "Whether to change the logging level of the global zap logger.")
|
||||
flag.StringVar(&zapEncoding, "zap-encoding", "json", "Zap logger encoding.")
|
||||
}
|
||||
@@ -66,5 +70,12 @@ func main() {
|
||||
logger.Infof("Starting load tester v%s API on port %s", VERSION, port)
|
||||
|
||||
gateStorage := loadtester.NewGateStorage("in-memory")
|
||||
loadtester.ListenAndServe(port, time.Minute, logger, taskRunner, gateStorage, stopCh)
|
||||
|
||||
var namespaceRegexpCompiled *regexp.Regexp
|
||||
if namespaceRegexp != "" {
|
||||
namespaceRegexpCompiled = regexp.MustCompile(namespaceRegexp)
|
||||
}
|
||||
authorizer := loadtester.NewAuthorizer(namespaceRegexpCompiled)
|
||||
|
||||
loadtester.ListenAndServe(port, time.Minute, logger, taskRunner, gateStorage, authorizer, stopCh)
|
||||
}
|
||||
|
||||
BIN
docs/diagrams/flagger-apisix-overview.png
Normal file
|
After Width: | Height: | Size: 7.4 MiB |
BIN
docs/diagrams/flagger-gatewayapi-canary.png
Normal file
|
After Width: | Height: | Size: 39 KiB |
BIN
docs/diagrams/flagger-kuma-canary.png
Normal file
|
After Width: | Height: | Size: 121 KiB |
|
Before Width: | Height: | Size: 41 KiB |
BIN
docs/diagrams/flagger-osm-traffic-split.png
Normal file
|
After Width: | Height: | Size: 124 KiB |
|
Before Width: | Height: | Size: 48 KiB After Width: | Height: | Size: 56 KiB |
@@ -10,32 +10,32 @@ version in production by gradually shifting traffic to the new version while mea
|
||||
and running conformance tests.
|
||||
|
||||
Flagger implements several deployment strategies (Canary releases, A/B testing, Blue/Green mirroring)
|
||||
using a service mesh (App Mesh, Istio, Linkerd)
|
||||
or an ingress controller (Contour, Gloo, NGINX, Skipper, Traefik) for traffic routing.
|
||||
For release analysis, Flagger can query Prometheus, Datadog, New Relic, CloudWatch or Graphite
|
||||
and for alerting it uses Slack, MS Teams, Discord and Rocket.
|
||||
using a service mesh or an ingress controller for traffic routing.
|
||||
For release analysis, Flagger can query Prometheus, InfluxDB, Datadog, New Relic, CloudWatch, Stackdriver
|
||||
or Graphite and for alerting it uses Slack, MS Teams, Discord and Rocket.
|
||||
|
||||

|
||||
|
||||
Flagger can be configured with Kubernetes custom resources and is compatible with
|
||||
any CI/CD solutions made for Kubernetes. Since Flagger is declarative and reacts to Kubernetes events,
|
||||
it can be used in **GitOps** pipelines together with tools like Flux, JenkinsX, Carvel, Argo, etc.
|
||||
it can be used in **GitOps** pipelines together with tools like [Flux CD](install/flagger-install-with-flux.md).
|
||||
|
||||
Flagger is a [Cloud Native Computing Foundation](https://cncf.io/) project
|
||||
Flagger is a [Cloud Native Computing Foundation](https://cncf.io/) graduated project
|
||||
and part of [Flux](https://fluxcd.io) family of GitOps tools.
|
||||
|
||||
## Getting started
|
||||
|
||||
To get started with Flagger, choose one of the supported routing providers and
|
||||
[install](install/flagger-install-on-kubernetes.md) Flagger with Helm or Kustomize.
|
||||
[install](install/flagger-install-with-flux.md) Flagger with Flux CD.
|
||||
|
||||
After installing Flagger, you can follow one of these tutorials to get started:
|
||||
|
||||
**Service mesh tutorials**
|
||||
|
||||
* [Gateway API](tutorials/gatewayapi-progressive-delivery.md)
|
||||
* [Istio](tutorials/istio-progressive-delivery.md)
|
||||
* [Linkerd](tutorials/linkerd-progressive-delivery.md)
|
||||
* [AWS App Mesh](tutorials/appmesh-progressive-delivery.md)
|
||||
* [Kuma](tutorials/kuma-progressive-delivery.md)
|
||||
|
||||
**Ingress controller tutorials**
|
||||
|
||||
@@ -44,9 +44,7 @@ After installing Flagger, you can follow one of these tutorials to get started:
|
||||
* [NGINX Ingress](tutorials/nginx-progressive-delivery.md)
|
||||
* [Skipper Ingress](tutorials/skipper-progressive-delivery.md)
|
||||
* [Traefik](tutorials/traefik-progressive-delivery.md)
|
||||
* [Apache APISIX](tutorials/apisix-progressive-delivery.md)
|
||||
|
||||
**Hands-on GitOps workshops**
|
||||
|
||||
* [Istio](https://github.com/stefanprodan/gitops-istio)
|
||||
* [Linkerd](https://helm.workshop.flagger.dev)
|
||||
* [AWS App Mesh](https://eks.handson.flagger.dev)
|
||||
The Linux Foundation has registered trademarks and uses trademarks. For a list of trademarks of The Linux Foundation,
|
||||
please see our [Trademark Usage page](https://www.linuxfoundation.org/legal/trademark-usage).
|
||||
|
||||
@@ -6,9 +6,7 @@
|
||||
## Install
|
||||
|
||||
* [Flagger Install on Kubernetes](install/flagger-install-on-kubernetes.md)
|
||||
* [Flagger Install on GKE Istio](install/flagger-install-on-google-cloud.md)
|
||||
* [Flagger Install on EKS App Mesh](install/flagger-install-on-eks-appmesh.md)
|
||||
* [Flagger Install on Alibaba ServiceMesh](install/flagger-install-on-alibaba-servicemesh.md)
|
||||
* [Flagger Install with Flux](install/flagger-install-with-flux.md)
|
||||
|
||||
## Usage
|
||||
|
||||
@@ -21,17 +19,21 @@
|
||||
|
||||
## Tutorials
|
||||
|
||||
* [Gateway API Canary Deployments](tutorials/gatewayapi-progressive-delivery.md)
|
||||
* [Istio Canary Deployments](tutorials/istio-progressive-delivery.md)
|
||||
* [Istio A/B Testing](tutorials/istio-ab-testing.md)
|
||||
* [Linkerd Canary Deployments](tutorials/linkerd-progressive-delivery.md)
|
||||
* [App Mesh Canary Deployments](tutorials/appmesh-progressive-delivery.md)
|
||||
* [Kuma Canary Deployments](tutorials/kuma-progressive-delivery.md)
|
||||
* [Knative Canary Deployments](tutorials/knative-progressive-delivery.md)
|
||||
* [Contour Canary Deployments](tutorials/contour-progressive-delivery.md)
|
||||
* [Gloo Canary Deployments](tutorials/gloo-progressive-delivery.md)
|
||||
* [NGINX Canary Deployments](tutorials/nginx-progressive-delivery.md)
|
||||
* [Skipper Canary Deployments](tutorials/skipper-progressive-delivery.md)
|
||||
* [Traefik Canary Deployments](tutorials/traefik-progressive-delivery.md)
|
||||
* [Apache APISIX Canary Deployments](tutorials/apisix-progressive-delivery.md)
|
||||
* [Blue/Green Deployments](tutorials/kubernetes-blue-green.md)
|
||||
* [Canary analysis with Prometheus Operator](tutorials/prometheus-operator.md)
|
||||
* [Canary analysis with KEDA ScaledObjects](tutorials/keda-scaledobject.md)
|
||||
* [Zero downtime deployments](tutorials/zero-downtime-deployments.md)
|
||||
|
||||
## Dev
|
||||
|
||||
@@ -8,17 +8,13 @@ Flagger is written in Go and uses Go modules for dependency management.
|
||||
|
||||
On your dev machine install the following tools:
|
||||
|
||||
* go >= 1.14
|
||||
* git >= 2.20
|
||||
* bash >= 5.0
|
||||
* make >= 3.81
|
||||
* kubectl >= 1.16
|
||||
* kustomize >= 3.5
|
||||
* helm >= 3.0
|
||||
* docker >= 19.03
|
||||
* go >= 1.25
|
||||
* kubectl >= 1.30
|
||||
* kustomize >= 5.0
|
||||
* helm >= 3.0
|
||||
|
||||
You'll also need a Kubernetes cluster for testing Flagger.
|
||||
You can use Minikube, Kind, Docker desktop or any remote cluster (AKS/EKS/GKE/etc) Kubernetes version 1.16 or newer.
|
||||
You can use Minikube, Kind, Docker desktop or any remote cluster (AKS/EKS/GKE/etc).
|
||||
|
||||
To start contributing to Flagger, fork the [repository](https://github.com/fluxcd/flagger) on GitHub.
|
||||
|
||||
@@ -100,6 +96,8 @@ make codegen
|
||||
Run code formatters:
|
||||
|
||||
```bash
|
||||
go install golang.org/x/tools/cmd/goimports@latest
|
||||
|
||||
make fmt
|
||||
```
|
||||
|
||||
@@ -193,7 +191,6 @@ docker build -t test/flagger:latest .
|
||||
kind load docker-image test/flagger:latest
|
||||
```
|
||||
|
||||
|
||||
Run the Istio e2e tests:
|
||||
|
||||
```bash
|
||||
|
||||
@@ -4,13 +4,28 @@ This document describes how to release Flagger.
|
||||
|
||||
## Release
|
||||
|
||||
### Flagger
|
||||
|
||||
To release a new Flagger version (e.g. `2.0.0`) follow these steps:
|
||||
|
||||
* create a branch `git checkout -b prep-2.0.0`
|
||||
* create a branch `git checkout -b release-2.0.0`
|
||||
* set the version in code and manifests `TAG=2.0.0 make version-set`
|
||||
* commit changes and merge PR
|
||||
* checkout master `git checkout main && git pull`
|
||||
* tag master `make release`
|
||||
* checkout main `git checkout main && git pull`
|
||||
* tag main `make release`
|
||||
|
||||
### Flagger load tester
|
||||
|
||||
To release a new Flagger load tester version (e.g. `2.0.0`) follow these steps:
|
||||
|
||||
* create a branch `git checkout -b release-ld-2.0.0`
|
||||
* set the version in code (`cmd/loadtester/main.go#VERSION`)
|
||||
* set the version in the Helm chart (`charts/loadtester/Chart.yaml` and `values.yaml`)
|
||||
* set the version in manifests (`kustomize/tester/deployment.yaml`)
|
||||
* commit changes and push the branch upstream
|
||||
* in GitHub UI, navigate to Actions and run the `push-ld` workflow selecting the release branch
|
||||
* after the workflow finishes, open the PR which will run the e2e tests using the new tester version
|
||||
* merge the PR if the tests pass
|
||||
|
||||
## CI
|
||||
|
||||
@@ -18,7 +33,9 @@ After the tag has been pushed to GitHub, the CI release pipeline does the follow
|
||||
|
||||
* creates a GitHub release
|
||||
* pushes the Flagger binary and change log to GitHub release
|
||||
* pushes the Flagger container image to Docker Hub
|
||||
* pushes the Flagger container image to GitHub Container Registry
|
||||
* pushed the Flagger install manifests to GitHub Container Registry
|
||||
* signs all OCI artifacts and release assets with Cosign and GitHub OIDC
|
||||
* pushes the Helm chart to github-pages branch
|
||||
* GitHub pages publishes the new chart version on the Helm repository
|
||||
|
||||
@@ -32,3 +49,6 @@ After a Flagger release, publish the docs with:
|
||||
* `git checkout docs`
|
||||
* `git rebase main`
|
||||
* `git push origin docs`
|
||||
|
||||
Lastly open a PR with all the docs changes on [fluxcd/website](https://github.com/fluxcd/website) to
|
||||
update [fluxcd.io/flagger](https://fluxcd.io/flagger/).
|
||||
|
||||
@@ -49,11 +49,39 @@ spec:
|
||||
timestamp: "2020-03-10T14:24:48+0000"
|
||||
```
|
||||
|
||||
#### How to change replicas for a deployment when not using HPA?
|
||||
|
||||
To change replicas for a deployment when not using HPA, you have to update the canary deployment with the desired replica count
|
||||
and trigger an analysis by annotating the template. After the analysis finishes, Flagger will promote the `spec.replicas` changes to the primary deployment.
|
||||
|
||||
Example:
|
||||
```yaml
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
spec:
|
||||
replicas: 4 #update replicas
|
||||
template:
|
||||
metadata:
|
||||
annotations:
|
||||
timestamp: "2022-02-10T14:24:48+0000" #add annotation to trigger analysis
|
||||
```
|
||||
|
||||
#### Why is there a window of downtime during the canary initializing process when analysis is disabled?
|
||||
|
||||
A window of downtime is the intended behavior when the analysis is disabled. This allows instant rollback and also mimics the way
|
||||
a Kubernetes deployment initialization works. To avoid this, enable the analysis (`skipAnalysis: true`), wait for the initialization
|
||||
to finish, and disable it afterward (`skipAnalysis: false`).
|
||||
a Kubernetes deployment initialization works. To avoid this, enable the analysis (`skipAnalysis: false`), wait for the initialization
|
||||
to finish, and disable it afterward (`skipAnalysis: true`).
|
||||
|
||||
#### How to disable cross namespace references?
|
||||
|
||||
Flagger by default can access resources across namespaces (`AlertProivder`, `MetricProvider` and Gloo `Upsteream`).
|
||||
If you're in a multi-tenant environment and wish to disable this, you can do so through the `no-cross-namespace-refs` flag.
|
||||
|
||||
```
|
||||
flagger \
|
||||
-no-cross-namespace-refs=true \
|
||||
...
|
||||
```
|
||||
|
||||
## Kubernetes services
|
||||
|
||||
@@ -85,19 +113,19 @@ spec:
|
||||
|
||||
If the `service.name` is not specified, then `targetRef.name` is used for
|
||||
the apex domain and canary/primary services name prefix.
|
||||
You should treat the service name as an immutable field, changing it could result in routing conflicts.
|
||||
You should treat the service name as an immutable field; changing its could result in routing conflicts.
|
||||
|
||||
Based on the canary spec service, Flagger generates the following Kubernetes ClusterIP service:
|
||||
|
||||
* `<service.name>.<namespace>.svc.cluster.local`
|
||||
* `<service.name>.<namespace>.svc.cluster.local`
|
||||
|
||||
selector `app=<name>-primary`
|
||||
|
||||
* `<service.name>-primary.<namespace>.svc.cluster.local`
|
||||
* `<service.name>-primary.<namespace>.svc.cluster.local`
|
||||
|
||||
selector `app=<name>-primary`
|
||||
|
||||
* `<service.name>-canary.<namespace>.svc.cluster.local`
|
||||
* `<service.name>-canary.<namespace>.svc.cluster.local`
|
||||
|
||||
selector `app=<name>`
|
||||
|
||||
@@ -221,7 +249,14 @@ spec:
|
||||
```
|
||||
|
||||
Besides `app`, Flagger supports `name` and `app.kubernetes.io/name` selectors.
|
||||
If you use a different convention you can specify your label with the `-selector-labels` flag.
|
||||
If you use a different convention, you can specify your label with the `-selector-labels` flag.
|
||||
For example:
|
||||
|
||||
```
|
||||
flagger \
|
||||
-selector-labels=service,name,app.kubernetes.io/name \
|
||||
...
|
||||
```
|
||||
|
||||
#### Is pod affinity and anti affinity supported?
|
||||
|
||||
@@ -332,7 +367,7 @@ spec:
|
||||
|
||||
#### How does Flagger measure the request success rate and duration?
|
||||
|
||||
Flagger measures the request success rate and duration using Prometheus queries.
|
||||
By default, Flagger measures the request success rate and duration using Prometheus queries.
|
||||
|
||||
#### HTTP requests success rate percentage
|
||||
|
||||
@@ -356,20 +391,20 @@ sum(
|
||||
rate(
|
||||
istio_requests_total{
|
||||
reporter="destination",
|
||||
destination_workload_namespace=~"$namespace",
|
||||
destination_workload=~"$workload",
|
||||
destination_workload_namespace=~"{{ namespace }}",
|
||||
destination_workload=~"{{ target }}",
|
||||
response_code!~"5.*"
|
||||
}[$interval]
|
||||
}[{{ interval }}]
|
||||
)
|
||||
)
|
||||
/
|
||||
)
|
||||
/
|
||||
sum(
|
||||
rate(
|
||||
istio_requests_total{
|
||||
reporter="destination",
|
||||
destination_workload_namespace=~"$namespace",
|
||||
destination_workload=~"$workload"
|
||||
}[$interval]
|
||||
destination_workload_namespace=~"{{ namespace }}",
|
||||
destination_workload=~"{{ target }}"
|
||||
}[{{ interval }}]
|
||||
)
|
||||
)
|
||||
```
|
||||
@@ -380,19 +415,19 @@ Envoy query (App Mesh):
|
||||
sum(
|
||||
rate(
|
||||
envoy_cluster_upstream_rq{
|
||||
kubernetes_namespace="$namespace",
|
||||
kubernetes_pod_name=~"$workload",
|
||||
kubernetes_namespace="{{ namespace }}",
|
||||
kubernetes_pod_name=~"{{ target }}",
|
||||
envoy_response_code!~"5.*"
|
||||
}[$interval]
|
||||
}[{{ interval }}]
|
||||
)
|
||||
)
|
||||
/
|
||||
)
|
||||
/
|
||||
sum(
|
||||
rate(
|
||||
envoy_cluster_upstream_rq{
|
||||
kubernetes_namespace="$namespace",
|
||||
kubernetes_pod_name=~"$workload"
|
||||
}[$interval]
|
||||
kubernetes_namespace="{{ namespace }}",
|
||||
kubernetes_pod_name=~"{{ target }}"
|
||||
}[{{ interval }}]
|
||||
)
|
||||
)
|
||||
```
|
||||
@@ -403,17 +438,17 @@ Envoy query (Contour and Gloo):
|
||||
sum(
|
||||
rate(
|
||||
envoy_cluster_upstream_rq{
|
||||
envoy_cluster_name=~"$namespace-$workload",
|
||||
envoy_cluster_name=~"{{ namespace }}-{{ target }}",
|
||||
envoy_response_code!~"5.*"
|
||||
}[$interval]
|
||||
}[{{ interval }}]
|
||||
)
|
||||
)
|
||||
/
|
||||
sum(
|
||||
rate(
|
||||
envoy_cluster_upstream_rq{
|
||||
envoy_cluster_name=~"$namespace-$workload",
|
||||
}[$interval]
|
||||
envoy_cluster_name=~"{{ namespace }}-{{ target }}",
|
||||
}[{{ interval }}]
|
||||
)
|
||||
)
|
||||
```
|
||||
@@ -436,14 +471,14 @@ Spec:
|
||||
Istio query:
|
||||
|
||||
```javascript
|
||||
histogram_quantile(0.99,
|
||||
histogram_quantile(0.99,
|
||||
sum(
|
||||
irate(
|
||||
istio_request_duration_seconds_bucket{
|
||||
istio_request_duration_milliseconds_bucket{
|
||||
reporter="destination",
|
||||
destination_workload=~"$workload",
|
||||
destination_workload_namespace=~"$namespace"
|
||||
}[$interval]
|
||||
destination_workload=~"{{ target }}",
|
||||
destination_workload_namespace=~"{{ namespace }}"
|
||||
}[{{ interval }}]
|
||||
)
|
||||
) by (le)
|
||||
)
|
||||
@@ -452,13 +487,13 @@ histogram_quantile(0.99,
|
||||
Envoy query (App Mesh, Contour and Gloo):
|
||||
|
||||
```javascript
|
||||
histogram_quantile(0.99,
|
||||
histogram_quantile(0.99,
|
||||
sum(
|
||||
irate(
|
||||
envoy_cluster_upstream_rq_time_bucket{
|
||||
kubernetes_pod_name=~"$workload",
|
||||
kubernetes_namespace=~"$namespace"
|
||||
}[$interval]
|
||||
kubernetes_pod_name=~"{{ target }}",
|
||||
kubernetes_namespace=~"{{ namespace }}"
|
||||
}[{{ interval }}]
|
||||
)
|
||||
) by (le)
|
||||
)
|
||||
@@ -469,7 +504,34 @@ histogram_quantile(0.99,
|
||||
#### Can I use custom metrics?
|
||||
|
||||
The analysis can be extended with metrics provided by Prometheus, Datadog, AWS CloudWatch, New Relic and Graphite.
|
||||
For more details on how custom metrics can be used please read the [metrics docs](usage/metrics.md).
|
||||
For more details on how custom metrics can be used, please read the [metrics docs](usage/metrics.md).
|
||||
|
||||
#### Istio Gateway API
|
||||
|
||||
If you're using Istio with Gateway API, the Prometheus query needs to include `reporter="source"`. For example, to calculate HTTP requests error percentage, the query would be:
|
||||
|
||||
```javascript
|
||||
100 - sum(
|
||||
rate(
|
||||
istio_requests_total{
|
||||
reporter="source",
|
||||
destination_workload_namespace=~"{{ namespace }}",
|
||||
destination_workload=~"{{ target }}",
|
||||
response_code!~"5.*"
|
||||
}[{{ interval }}]
|
||||
)
|
||||
)
|
||||
/
|
||||
sum(
|
||||
rate(
|
||||
istio_requests_total{
|
||||
reporter="source",
|
||||
destination_workload_namespace=~"{{ namespace }}",
|
||||
destination_workload=~"{{ target }}"
|
||||
}[{{ interval }}]
|
||||
)
|
||||
) * 100
|
||||
```
|
||||
|
||||
## Istio routing
|
||||
|
||||
@@ -496,7 +558,7 @@ spec:
|
||||
portName: http-frontend
|
||||
# Istio gateways (optional)
|
||||
gateways:
|
||||
- public-gateway.istio-system.svc.cluster.local
|
||||
- istio-system/public-gateway
|
||||
- mesh
|
||||
# Istio virtual service host names (optional)
|
||||
hosts:
|
||||
@@ -538,7 +600,7 @@ spec:
|
||||
For the above spec Flagger will generate the following virtual service:
|
||||
|
||||
```yaml
|
||||
apiVersion: networking.istio.io/v1alpha3
|
||||
apiVersion: networking.istio.io/v1beta1
|
||||
kind: VirtualService
|
||||
metadata:
|
||||
name: frontend
|
||||
@@ -552,7 +614,7 @@ metadata:
|
||||
uid: 3a4a40dd-3875-11e9-8e1d-42010a9c0fd1
|
||||
spec:
|
||||
gateways:
|
||||
- public-gateway.istio-system.svc.cluster.local
|
||||
- istio-system/public-gateway
|
||||
- mesh
|
||||
hosts:
|
||||
- frontend.example.com
|
||||
@@ -591,7 +653,7 @@ spec:
|
||||
For each destination in the virtual service a rule is generated:
|
||||
|
||||
```yaml
|
||||
apiVersion: networking.istio.io/v1alpha3
|
||||
apiVersion: networking.istio.io/v1beta1
|
||||
kind: DestinationRule
|
||||
metadata:
|
||||
name: frontend-primary
|
||||
@@ -602,7 +664,7 @@ spec:
|
||||
tls:
|
||||
mode: DISABLE
|
||||
---
|
||||
apiVersion: networking.istio.io/v1alpha3
|
||||
apiVersion: networking.istio.io/v1beta1
|
||||
kind: DestinationRule
|
||||
metadata:
|
||||
name: frontend-canary
|
||||
@@ -689,7 +751,7 @@ spec:
|
||||
Based on the above spec, Flagger will create the following virtual service:
|
||||
|
||||
```yaml
|
||||
apiVersion: networking.istio.io/v1alpha3
|
||||
apiVersion: networking.istio.io/v1beta1
|
||||
kind: VirtualService
|
||||
metadata:
|
||||
name: backend
|
||||
@@ -712,17 +774,17 @@ spec:
|
||||
weight: 0
|
||||
```
|
||||
|
||||
Therefore, The following virtual service forward the traffic to `/podinfo` by the above delegate VirtualService.
|
||||
Therefore, the following virtual service forwards the traffic to `/podinfo` by the above delegate VirtualService.
|
||||
|
||||
```yaml
|
||||
apiVersion: networking.istio.io/v1alpha3
|
||||
apiVersion: networking.istio.io/v1beta1
|
||||
kind: VirtualService
|
||||
metadata:
|
||||
name: frontend
|
||||
namespace: test
|
||||
spec:
|
||||
gateways:
|
||||
- public-gateway.istio-system.svc.cluster.local
|
||||
- istio-system/public-gateway
|
||||
- mesh
|
||||
hosts:
|
||||
- frontend.example.com
|
||||
@@ -738,7 +800,7 @@ spec:
|
||||
namespace: test
|
||||
```
|
||||
|
||||
Note that pilot env `PILOT_ENABLE_VIRTUAL_SERVICE_DELEGATE` must also be set.
|
||||
Note that pilot env `PILOT_ENABLE_VIRTUAL_SERVICE_DELEGATE` must also be set.
|
||||
For the use of Istio Delegation, you can refer to the documentation of
|
||||
[Virtual Service](https://istio.io/latest/docs/reference/config/networking/virtual-service/#Delegate)
|
||||
and [pilot environment variables](https://istio.io/latest/docs/reference/commands/pilot-discovery/#envvars).
|
||||
@@ -747,8 +809,8 @@ and [pilot environment variables](https://istio.io/latest/docs/reference/command
|
||||
|
||||
#### How can I expose multiple canaries on the same external domain?
|
||||
|
||||
Assuming you have two apps, one that servers the main website and one that serves the REST API.
|
||||
For each app you can define a canary object as:
|
||||
Assuming you have two apps -- one that serves the main website and one that serves its REST API --
|
||||
you can define a canary object for each app as:
|
||||
|
||||
```yaml
|
||||
apiVersion: flagger.app/v1beta1
|
||||
@@ -759,7 +821,7 @@ spec:
|
||||
service:
|
||||
port: 8080
|
||||
gateways:
|
||||
- public-gateway.istio-system.svc.cluster.local
|
||||
- istio-system/public-gateway
|
||||
hosts:
|
||||
- my-site.com
|
||||
match:
|
||||
@@ -776,7 +838,7 @@ spec:
|
||||
service:
|
||||
port: 8080
|
||||
gateways:
|
||||
- public-gateway.istio-system.svc.cluster.local
|
||||
- istio-system/public-gateway
|
||||
hosts:
|
||||
- my-site.com
|
||||
match:
|
||||
@@ -792,7 +854,7 @@ Istio Pilot will
|
||||
[merge](https://istio.io/help/ops/traffic-management/deploy-guidelines/#multiple-virtual-services-and-destination-rules-for-the-same-host)
|
||||
the two services and the website rule will be moved to the end of the list in the merged configuration.
|
||||
|
||||
Note that host merging only works if the canaries are bounded to a ingress gateway other than the `mesh` gateway.
|
||||
Note that host merging only works if the canaries are bounded to an ingress gateway other than the `mesh` gateway.
|
||||
|
||||
## Istio Mutual TLS
|
||||
|
||||
@@ -810,7 +872,7 @@ spec:
|
||||
mode: ISTIO_MUTUAL
|
||||
```
|
||||
|
||||
If you run Istio in permissive mode you can disable TLS:
|
||||
If you run Istio in permissive mode, you can disable TLS:
|
||||
|
||||
```yaml
|
||||
apiVersion: flagger.app/v1beta1
|
||||
@@ -851,3 +913,42 @@ spec:
|
||||
mtls:
|
||||
mode: DISABLE
|
||||
```
|
||||
|
||||
## ExternalDNS
|
||||
|
||||
### Can I use annotations?
|
||||
|
||||
Flagger propagates annotations (and labels) to all the generated apex,
|
||||
primary and canary objects. This allows using external-dns annotations.
|
||||
|
||||
You can configure Flagger to set annotations with:
|
||||
|
||||
```yaml
|
||||
spec:
|
||||
service:
|
||||
apex:
|
||||
annotations:
|
||||
external-dns.alpha.kubernetes.io/hostname: "mydomain.com"
|
||||
primary:
|
||||
annotations:
|
||||
external-dns.alpha.kubernetes.io/hostname: "primary.mydomain.com"
|
||||
canary:
|
||||
annotations:
|
||||
external-dns.alpha.kubernetes.io/hostname: "canary.mydomain.com"
|
||||
```
|
||||
|
||||
### Multiple sources and Istio
|
||||
|
||||
**/!\\** The apex annotations are added to both the generated Kubernetes Services and the generated Istio
|
||||
VirtualServices objects. If you have configured external-dns to use both sources,
|
||||
this will create conflicts!
|
||||
|
||||
```yaml
|
||||
spec:
|
||||
containers:
|
||||
args:
|
||||
- --source=service # choose only one
|
||||
- --source=istio-virtualservice # of these two
|
||||
```
|
||||
|
||||
[Checkout ExternalDNS documentation](https://github.com/kubernetes-sigs/external-dns/blob/master/docs/tutorials/istio.md)
|
||||
|
||||
@@ -1,136 +0,0 @@
|
||||
# Flagger Install on Alibaba ServiceMesh
|
||||
|
||||
This guide walks you through setting up Flagger on Alibaba ServiceMesh.
|
||||
|
||||
## Prerequisites
|
||||
- Created an ACK([Alibabacloud Container Service for Kubernetes](https://cs.console.aliyun.com)) cluster instance.
|
||||
- Created an ASM([Alibaba ServiceMesh](https://servicemesh.console.aliyun.com)) instance, and added ACK cluster.
|
||||
|
||||
### Variables declaration
|
||||
- `$ACK_CONFIG`: the kubeconfig file path of ACK, which be treated as`$HOME/.kube/config` in the rest of guide.
|
||||
- `$MESH_CONFIG`: the kubeconfig file path of ASM.
|
||||
- `$ISTIO_RELEASE`: see https://github.com/istio/istio/releases
|
||||
- `$FLAGGER_SRC`: see https://github.com/fluxcd/flagger
|
||||
|
||||
## Install Prometheus
|
||||
Install Prometheus:
|
||||
|
||||
```bash
|
||||
kubectl apply -f $ISTIO_RELEASE/samples/addons/prometheus.yaml
|
||||
```
|
||||
|
||||
it' same with the below cmd:
|
||||
|
||||
```bash
|
||||
kubectl --kubeconfig "$ACK_CONFIG" apply -f $ISTIO_RELEASE/samples/addons/prometheus.yaml
|
||||
```
|
||||
|
||||
Append the below configs to `scrape_configs` in prometheus configmap, to support telemetry:
|
||||
```yaml
|
||||
scrape_configs:
|
||||
# Mixer scrapping. Defaults to Prometheus and mixer on same namespace.
|
||||
- job_name: 'istio-mesh'
|
||||
kubernetes_sd_configs:
|
||||
- role: endpoints
|
||||
namespaces:
|
||||
names:
|
||||
- istio-system
|
||||
relabel_configs:
|
||||
- source_labels: [__meta_kubernetes_service_name, __meta_kubernetes_endpoint_port_name]
|
||||
action: keep
|
||||
regex: istio-telemetry;prometheus
|
||||
# Scrape config for envoy stats
|
||||
- job_name: 'envoy-stats'
|
||||
metrics_path: /stats/prometheus
|
||||
kubernetes_sd_configs:
|
||||
- role: pod
|
||||
relabel_configs:
|
||||
- source_labels: [__meta_kubernetes_pod_container_port_name]
|
||||
action: keep
|
||||
regex: '.*-envoy-prom'
|
||||
- source_labels: [__address__, __meta_kubernetes_pod_annotation_prometheus_io_port]
|
||||
action: replace
|
||||
regex: ([^:]+)(?::\d+)?;(\d+)
|
||||
replacement: $1:15090
|
||||
target_label: __address__
|
||||
- action: labeldrop
|
||||
regex: __meta_kubernetes_pod_label_(.+)
|
||||
- source_labels: [__meta_kubernetes_namespace]
|
||||
action: replace
|
||||
target_label: namespace
|
||||
- source_labels: [__meta_kubernetes_pod_name]
|
||||
action: replace
|
||||
target_label: pod_name
|
||||
- job_name: 'istio-policy'
|
||||
kubernetes_sd_configs:
|
||||
- role: endpoints
|
||||
namespaces:
|
||||
names:
|
||||
- istio-system
|
||||
relabel_configs:
|
||||
- source_labels: [__meta_kubernetes_service_name, __meta_kubernetes_endpoint_port_name]
|
||||
action: keep
|
||||
regex: istio-policy;http-policy-monitoring
|
||||
- job_name: 'istio-telemetry'
|
||||
kubernetes_sd_configs:
|
||||
- role: endpoints
|
||||
namespaces:
|
||||
names:
|
||||
- istio-system
|
||||
relabel_configs:
|
||||
- source_labels: [__meta_kubernetes_service_name, __meta_kubernetes_endpoint_port_name]
|
||||
action: keep
|
||||
regex: istio-telemetry;http-monitoring
|
||||
- job_name: 'pilot'
|
||||
kubernetes_sd_configs:
|
||||
- role: endpoints
|
||||
namespaces:
|
||||
names:
|
||||
- istio-system
|
||||
relabel_configs:
|
||||
- source_labels: [__meta_kubernetes_service_name, __meta_kubernetes_endpoint_port_name]
|
||||
action: keep
|
||||
regex: istiod;http-monitoring
|
||||
- source_labels: [__meta_kubernetes_service_label_app]
|
||||
target_label: app
|
||||
- job_name: 'sidecar-injector'
|
||||
kubernetes_sd_configs:
|
||||
- role: endpoints
|
||||
namespaces:
|
||||
names:
|
||||
- istio-system
|
||||
relabel_configs:
|
||||
- source_labels: [__meta_kubernetes_service_name, __meta_kubernetes_endpoint_port_name]
|
||||
action: keep
|
||||
regex: istio-sidecar-injector;http-monitoring
|
||||
```
|
||||
|
||||
## Install Flagger
|
||||
|
||||
Add Flagger Helm repository:
|
||||
|
||||
```bash
|
||||
helm repo add flagger https://flagger.app
|
||||
helm repo update
|
||||
```
|
||||
|
||||
Install Flagger's Canary CRD:
|
||||
|
||||
```yaml
|
||||
kubectl apply -f $FLAGGER_SRC/artifacts/flagger/crd.yaml
|
||||
```
|
||||
|
||||
Deploy Flagger for Alibaba ServiceMesh:
|
||||
|
||||
```bash
|
||||
cp $MESH_CONFIG kubeconfig
|
||||
kubectl -n istio-system create secret generic istio-kubeconfig --from-file kubeconfig
|
||||
kubectl -n istio-system label secret istio-kubeconfig istio/multiCluster=true
|
||||
helm upgrade -i flagger flagger/flagger \
|
||||
--namespace=istio-system \
|
||||
--set crd.create=false \
|
||||
--set meshProvider=istio \
|
||||
--set metricsServer=http://prometheus:9090 \
|
||||
--set istio.kubeconfig.secretName=istio-kubeconfig \
|
||||
--set istio.kubeconfig.key=kubeconfig
|
||||
```
|
||||
@@ -1,151 +0,0 @@
|
||||
# Flagger Install on EKS App Mesh
|
||||
|
||||
This guide walks you through setting up Flagger and AWS App Mesh on EKS.
|
||||
|
||||
## App Mesh
|
||||
|
||||
The App Mesh integration with EKS is made out of the following components:
|
||||
|
||||
* Kubernetes custom resources
|
||||
* `mesh.appmesh.k8s.aws` defines a logical boundary for network traffic between the services
|
||||
* `virtualnode.appmesh.k8s.aws` defines a logical pointer to a Kubernetes workload
|
||||
* `virtualservice.appmesh.k8s.aws` defines the routing rules for a workload inside the mesh
|
||||
* CRD controller - keeps the custom resources in sync with the App Mesh control plane
|
||||
* Admission controller - injects the Envoy sidecar and assigns Kubernetes pods to App Mesh virtual nodes
|
||||
* Telemetry service - Prometheus instance that collects and stores Envoy's metrics
|
||||
|
||||
## Create a Kubernetes cluster
|
||||
|
||||
In order to create an EKS cluster you can use [eksctl](https://eksctl.io).
|
||||
Eksctl is an open source command-line utility made by Weaveworks in collaboration with Amazon.
|
||||
|
||||
On MacOS you can install eksctl with Homebrew:
|
||||
|
||||
```bash
|
||||
brew tap weaveworks/tap
|
||||
brew install weaveworks/tap/eksctl
|
||||
```
|
||||
|
||||
Create an EKS cluster with:
|
||||
|
||||
```bash
|
||||
eksctl create cluster --name=appmesh \
|
||||
--region=us-west-2 \
|
||||
--nodes 3 \
|
||||
--node-volume-size=120 \
|
||||
--appmesh-access
|
||||
```
|
||||
|
||||
The above command will create a two nodes cluster with
|
||||
App Mesh [IAM policy](https://docs.aws.amazon.com/app-mesh/latest/userguide/MESH_IAM_user_policies.html)
|
||||
attached to the EKS node instance role.
|
||||
|
||||
Verify the install with:
|
||||
|
||||
```bash
|
||||
kubectl get nodes
|
||||
```
|
||||
|
||||
## Install Helm
|
||||
|
||||
Install the [Helm](https://docs.helm.sh/using_helm/#installing-helm) v3 command-line tool:
|
||||
|
||||
```text
|
||||
brew install helm
|
||||
```
|
||||
|
||||
Add the EKS repository to Helm:
|
||||
|
||||
```bash
|
||||
helm repo add eks https://aws.github.io/eks-charts
|
||||
```
|
||||
|
||||
## Enable horizontal pod auto-scaling
|
||||
|
||||
Install the Horizontal Pod Autoscaler (HPA) metrics provider:
|
||||
|
||||
```bash
|
||||
helm upgrade -i metrics-server stable/metrics-server \
|
||||
--namespace kube-system \
|
||||
--set args[0]=--kubelet-preferred-address-types=InternalIP
|
||||
```
|
||||
|
||||
After a minute, the metrics API should report CPU and memory usage for pods. You can very the metrics API with:
|
||||
|
||||
```bash
|
||||
kubectl -n kube-system top pods
|
||||
```
|
||||
|
||||
## Install the App Mesh components
|
||||
|
||||
Install the App Mesh CRDs:
|
||||
|
||||
```bash
|
||||
kubectl apply -k github.com/aws/eks-charts/stable/appmesh-controller//crds?ref=master
|
||||
```
|
||||
|
||||
Create the `appmesh-system` namespace:
|
||||
|
||||
```bash
|
||||
kubectl create ns appmesh-system
|
||||
```
|
||||
|
||||
Install the App Mesh controller:
|
||||
|
||||
```bash
|
||||
helm upgrade -i appmesh-controller eks/appmesh-controller \
|
||||
--wait --namespace appmesh-system
|
||||
```
|
||||
|
||||
In order to collect the App Mesh metrics that Flagger needs to run the canary analysis,
|
||||
you'll need to setup a Prometheus instance to scrape the Envoy sidecars.
|
||||
|
||||
Install the App Mesh Prometheus:
|
||||
|
||||
```bash
|
||||
helm upgrade -i appmesh-prometheus eks/appmesh-prometheus \
|
||||
--wait --namespace appmesh-system
|
||||
```
|
||||
|
||||
## Install Flagger
|
||||
|
||||
Add Flagger Helm repository:
|
||||
|
||||
```bash
|
||||
helm repo add flagger https://flagger.app
|
||||
```
|
||||
|
||||
Install Flagger's Canary CRD:
|
||||
|
||||
```yaml
|
||||
kubectl apply -f https://raw.githubusercontent.com/fluxcd/flagger/main/artifacts/flagger/crd.yaml
|
||||
```
|
||||
|
||||
Deploy Flagger in the _**appmesh-system**_ namespace:
|
||||
|
||||
```bash
|
||||
helm upgrade -i flagger flagger/flagger \
|
||||
--namespace=appmesh-system \
|
||||
--set crd.create=false \
|
||||
--set meshProvider=appmesh:v1beta2 \
|
||||
--set metricsServer=http://appmesh-prometheus:9090
|
||||
```
|
||||
|
||||
## Install Grafana
|
||||
|
||||
Deploy App Mesh Grafana that comes with a dashboard for monitoring Flagger's canary releases:
|
||||
|
||||
```bash
|
||||
helm upgrade -i appmesh-grafana eks/appmesh-grafana \
|
||||
--namespace appmesh-system
|
||||
```
|
||||
|
||||
You can access Grafana using port forwarding:
|
||||
|
||||
```bash
|
||||
kubectl -n appmesh-system port-forward svc/appmesh-grafana 3000:3000
|
||||
```
|
||||
|
||||
Now that you have Flagger running, you can try the
|
||||
[App Mesh canary deployments tutorial](https://docs.flagger.app/usage/appmesh-progressive-delivery).
|
||||
|
||||
@@ -1,400 +0,0 @@
|
||||
# Flagger Install on GKE Istio
|
||||
|
||||
This guide walks you through setting up Flagger and Istio on Google Kubernetes Engine.
|
||||
|
||||

|
||||
|
||||
## Prerequisites
|
||||
|
||||
You will be creating a cluster on Google’s Kubernetes Engine \(GKE\), if you don’t have an account you can sign up [here](https://cloud.google.com/free/) for free credits.
|
||||
|
||||
Login into Google Cloud, create a project and enable billing for it.
|
||||
|
||||
Install the [gcloud](https://cloud.google.com/sdk/) command line utility and configure your project with `gcloud init`.
|
||||
|
||||
Set the default project \(replace `PROJECT_ID` with your own project\):
|
||||
|
||||
```text
|
||||
gcloud config set project PROJECT_ID
|
||||
```
|
||||
|
||||
Set the default compute region and zone:
|
||||
|
||||
```text
|
||||
gcloud config set compute/region us-central1
|
||||
gcloud config set compute/zone us-central1-a
|
||||
```
|
||||
|
||||
Enable the Kubernetes and Cloud DNS services for your project:
|
||||
|
||||
```text
|
||||
gcloud services enable container.googleapis.com
|
||||
gcloud services enable dns.googleapis.com
|
||||
```
|
||||
|
||||
Install the kubectl command-line tool:
|
||||
|
||||
```text
|
||||
gcloud components install kubectl
|
||||
```
|
||||
|
||||
## GKE cluster setup
|
||||
|
||||
Create a cluster with the Istio add-on:
|
||||
|
||||
```bash
|
||||
K8S_VERSION=$(gcloud container get-server-config --format=json \
|
||||
| jq -r '.validMasterVersions[0]')
|
||||
|
||||
gcloud beta container clusters create istio \
|
||||
--cluster-version=${K8S_VERSION} \
|
||||
--zone=us-central1-a \
|
||||
--num-nodes=2 \
|
||||
--machine-type=n1-highcpu-4 \
|
||||
--preemptible \
|
||||
--no-enable-cloud-logging \
|
||||
--no-enable-cloud-monitoring \
|
||||
--disk-size=30 \
|
||||
--enable-autorepair \
|
||||
--addons=HorizontalPodAutoscaling,Istio \
|
||||
--istio-config=auth=MTLS_PERMISSIVE
|
||||
```
|
||||
|
||||
The above command will create a default node pool consisting of two `n1-highcpu-4` \(vCPU: 4, RAM 3.60GB, DISK: 30GB\) preemptible VMs. Preemptible VMs are up to 80% cheaper than regular instances and are terminated and replaced after a maximum of 24 hours.
|
||||
|
||||
Set up credentials for `kubectl`:
|
||||
|
||||
```bash
|
||||
gcloud container clusters get-credentials istio
|
||||
```
|
||||
|
||||
Create a cluster admin role binding:
|
||||
|
||||
```bash
|
||||
kubectl create clusterrolebinding "cluster-admin-$(whoami)" \
|
||||
--clusterrole=cluster-admin \
|
||||
--user="$(gcloud config get-value core/account)"
|
||||
```
|
||||
|
||||
Validate your setup with:
|
||||
|
||||
```bash
|
||||
kubectl -n istio-system get svc
|
||||
```
|
||||
|
||||
In a couple of seconds GCP should allocate an external IP to the `istio-ingressgateway` service.
|
||||
|
||||
## Cloud DNS setup
|
||||
|
||||
You will need an internet domain and access to the registrar to change the name servers to Google Cloud DNS.
|
||||
|
||||
Create a managed zone named `istio` in Cloud DNS \(replace `example.com` with your domain\):
|
||||
|
||||
```bash
|
||||
gcloud dns managed-zones create \
|
||||
--dns-name="example.com." \
|
||||
--description="Istio zone" "istio"
|
||||
```
|
||||
|
||||
Look up your zone's name servers:
|
||||
|
||||
```bash
|
||||
gcloud dns managed-zones describe istio
|
||||
```
|
||||
|
||||
Update your registrar's name server records with the records returned by the above command.
|
||||
|
||||
Wait for the name servers to change \(replace `example.com` with your domain\):
|
||||
|
||||
```bash
|
||||
watch dig +short NS example.com
|
||||
```
|
||||
|
||||
Create a static IP address named `istio-gateway` using the Istio ingress IP:
|
||||
|
||||
```bash
|
||||
export GATEWAY_IP=$(kubectl -n istio-system get svc/istio-ingressgateway -ojson \
|
||||
| jq -r .status.loadBalancer.ingress[0].ip)
|
||||
|
||||
gcloud compute addresses create istio-gateway --addresses ${GATEWAY_IP} --region us-central1
|
||||
```
|
||||
|
||||
Create the following DNS records \(replace `example.com` with your domain\):
|
||||
|
||||
```bash
|
||||
DOMAIN="example.com"
|
||||
|
||||
gcloud dns record-sets transaction start --zone=istio
|
||||
|
||||
gcloud dns record-sets transaction add --zone=istio \
|
||||
--name="${DOMAIN}" --ttl=300 --type=A ${GATEWAY_IP}
|
||||
|
||||
gcloud dns record-sets transaction add --zone=istio \
|
||||
--name="www.${DOMAIN}" --ttl=300 --type=A ${GATEWAY_IP}
|
||||
|
||||
gcloud dns record-sets transaction add --zone=istio \
|
||||
--name="*.${DOMAIN}" --ttl=300 --type=A ${GATEWAY_IP}
|
||||
|
||||
gcloud dns record-sets transaction execute --zone istio
|
||||
```
|
||||
|
||||
Verify that the wildcard DNS is working \(replace `example.com` with your domain\):
|
||||
|
||||
```bash
|
||||
watch host test.example.com
|
||||
```
|
||||
|
||||
## Install Helm
|
||||
|
||||
Install the [Helm](https://docs.helm.sh/using_helm/#installing-helm) command-line tool:
|
||||
|
||||
```text
|
||||
brew install kubernetes-helm
|
||||
```
|
||||
|
||||
Create a service account and a cluster role binding for Tiller:
|
||||
|
||||
```bash
|
||||
kubectl -n kube-system create sa tiller
|
||||
|
||||
kubectl create clusterrolebinding tiller-cluster-rule \
|
||||
--clusterrole=cluster-admin \
|
||||
--serviceaccount=kube-system:tiller
|
||||
```
|
||||
|
||||
Deploy Tiller in the `kube-system` namespace:
|
||||
|
||||
```bash
|
||||
helm init --service-account tiller
|
||||
```
|
||||
|
||||
You should consider using SSL between Helm and Tiller, for more information on securing your Helm installation see [docs.helm.sh](https://docs.helm.sh/using_helm/#securing-your-helm-installation).
|
||||
|
||||
## Install cert-manager
|
||||
|
||||
Jetstack's [cert-manager](https://github.com/jetstack/cert-manager) is a Kubernetes operator that automatically creates and manages TLS certs issued by Let’s Encrypt.
|
||||
|
||||
You'll be using cert-manager to provision a wildcard certificate for the Istio ingress gateway.
|
||||
|
||||
Install cert-manager's CRDs:
|
||||
|
||||
```bash
|
||||
CERT_REPO=https://raw.githubusercontent.com/jetstack/cert-manager
|
||||
|
||||
kubectl apply -f ${CERT_REPO}/release-0.10/deploy/manifests/00-crds.yaml
|
||||
```
|
||||
|
||||
Create the cert-manager namespace and disable resource validation:
|
||||
|
||||
```bash
|
||||
kubectl create namespace cert-manager
|
||||
|
||||
kubectl label namespace cert-manager certmanager.k8s.io/disable-validation=true
|
||||
```
|
||||
|
||||
Install cert-manager with Helm:
|
||||
|
||||
```bash
|
||||
helm repo add jetstack https://charts.jetstack.io && \
|
||||
helm repo update && \
|
||||
helm upgrade -i cert-manager \
|
||||
--namespace cert-manager \
|
||||
--version v0.10.0 \
|
||||
jetstack/cert-manager
|
||||
```
|
||||
|
||||
## Istio Gateway TLS setup
|
||||
|
||||

|
||||
|
||||
Create a generic Istio Gateway to expose services outside the mesh on HTTPS:
|
||||
|
||||
```bash
|
||||
REPO=https://raw.githubusercontent.com/fluxcd/flagger/main
|
||||
|
||||
kubectl apply -f ${REPO}/artifacts/gke/istio-gateway.yaml
|
||||
```
|
||||
|
||||
Create a service account with Cloud DNS admin role \(replace `my-gcp-project` with your project ID\):
|
||||
|
||||
```bash
|
||||
GCP_PROJECT=my-gcp-project
|
||||
|
||||
gcloud iam service-accounts create dns-admin \
|
||||
--display-name=dns-admin \
|
||||
--project=${GCP_PROJECT}
|
||||
|
||||
gcloud iam service-accounts keys create ./gcp-dns-admin.json \
|
||||
--iam-account=dns-admin@${GCP_PROJECT}.iam.gserviceaccount.com \
|
||||
--project=${GCP_PROJECT}
|
||||
|
||||
gcloud projects add-iam-policy-binding ${GCP_PROJECT} \
|
||||
--member=serviceAccount:dns-admin@${GCP_PROJECT}.iam.gserviceaccount.com \
|
||||
--role=roles/dns.admin
|
||||
```
|
||||
|
||||
Create a Kubernetes secret with the GCP Cloud DNS admin key:
|
||||
|
||||
```bash
|
||||
kubectl create secret generic cert-manager-credentials \
|
||||
--from-file=./gcp-dns-admin.json \
|
||||
--namespace=istio-system
|
||||
```
|
||||
|
||||
Create a letsencrypt issuer for CloudDNS \(replace `email@example.com` with a valid email address and `my-gcp-project`with your project ID\):
|
||||
|
||||
```yaml
|
||||
apiVersion: certmanager.k8s.io/v1alpha1
|
||||
kind: Issuer
|
||||
metadata:
|
||||
name: letsencrypt-prod
|
||||
namespace: istio-system
|
||||
spec:
|
||||
acme:
|
||||
server: https://acme-v02.api.letsencrypt.org/directory
|
||||
email: email@example.com
|
||||
privateKeySecretRef:
|
||||
name: letsencrypt-prod
|
||||
dns01:
|
||||
providers:
|
||||
- name: cloud-dns
|
||||
clouddns:
|
||||
serviceAccountSecretRef:
|
||||
name: cert-manager-credentials
|
||||
key: gcp-dns-admin.json
|
||||
project: my-gcp-project
|
||||
```
|
||||
|
||||
Save the above resource as letsencrypt-issuer.yaml and then apply it:
|
||||
|
||||
```text
|
||||
kubectl apply -f ./letsencrypt-issuer.yaml
|
||||
```
|
||||
|
||||
Create a wildcard certificate \(replace `example.com` with your domain\):
|
||||
|
||||
```yaml
|
||||
apiVersion: certmanager.k8s.io/v1alpha1
|
||||
kind: Certificate
|
||||
metadata:
|
||||
name: istio-gateway
|
||||
namespace: istio-system
|
||||
spec:
|
||||
secretName: istio-ingressgateway-certs
|
||||
issuerRef:
|
||||
name: letsencrypt-prod
|
||||
commonName: "*.example.com"
|
||||
acme:
|
||||
config:
|
||||
- dns01:
|
||||
provider: cloud-dns
|
||||
domains:
|
||||
- "*.example.com"
|
||||
- "example.com"
|
||||
```
|
||||
|
||||
Save the above resource as istio-gateway-cert.yaml and then apply it:
|
||||
|
||||
```text
|
||||
kubectl apply -f ./istio-gateway-cert.yaml
|
||||
```
|
||||
|
||||
In a couple of seconds cert-manager should fetch a wildcard certificate from letsencrypt.org:
|
||||
|
||||
```text
|
||||
kubectl -n istio-system describe certificate istio-gateway
|
||||
|
||||
Events:
|
||||
Type Reason Age From Message
|
||||
---- ------ ---- ---- -------
|
||||
Normal CertIssued 1m52s cert-manager Certificate issued successfully
|
||||
```
|
||||
|
||||
Recreate Istio ingress gateway pods:
|
||||
|
||||
```bash
|
||||
kubectl -n istio-system get pods -l istio=ingressgateway
|
||||
```
|
||||
|
||||
Note that Istio gateway doesn't reload the certificates from the TLS secret on cert-manager renewal. Since the GKE cluster is made out of preemptible VMs the gateway pods will be replaced once every 24h, if your not using preemptible nodes then you need to manually delete the gateway pods every two months before the certificate expires.
|
||||
|
||||
## Install Prometheus
|
||||
|
||||
The GKE Istio add-on does not include a Prometheus instance that scrapes the Istio telemetry service. Because Flagger uses the Istio HTTP metrics to run the canary analysis you have to deploy the following Prometheus configuration that's similar to the one that comes with the official Istio Helm chart.
|
||||
|
||||
Find the GKE Istio version with:
|
||||
|
||||
```bash
|
||||
kubectl -n istio-system get deploy istio-pilot -oyaml | grep image:
|
||||
```
|
||||
|
||||
Install Prometheus in istio-system namespace:
|
||||
|
||||
```bash
|
||||
kubectl -n istio-system apply -f \
|
||||
https://storage.googleapis.com/gke-release/istio/release/1.0.6-gke.3/patches/install-prometheus.yaml
|
||||
```
|
||||
|
||||
## Install Flagger and Grafana
|
||||
|
||||
Add Flagger Helm repository:
|
||||
|
||||
```bash
|
||||
helm repo add flagger https://flagger.app
|
||||
```
|
||||
|
||||
Install Flagger's Canary CRD:
|
||||
|
||||
```yaml
|
||||
kubectl apply -f https://raw.githubusercontent.com/fluxcd/flagger/main/artifacts/flagger/crd.yaml
|
||||
```
|
||||
|
||||
Deploy Flagger in the `istio-system` namespace with Slack notifications enabled:
|
||||
|
||||
```bash
|
||||
helm upgrade -i flagger flagger/flagger \
|
||||
--namespace=istio-system \
|
||||
--set crd.create=false \
|
||||
--set metricsServer=http://prometheus.istio-system:9090 \
|
||||
--set slack.url=https://hooks.slack.com/services/YOUR/SLACK/WEBHOOK \
|
||||
--set slack.channel=general \
|
||||
--set slack.user=flagger
|
||||
```
|
||||
|
||||
Deploy Grafana in the `istio-system` namespace:
|
||||
|
||||
```bash
|
||||
helm upgrade -i flagger-grafana flagger/grafana \
|
||||
--namespace=istio-system \
|
||||
--set url=http://prometheus.istio-system:9090 \
|
||||
--set user=admin \
|
||||
--set password=replace-me
|
||||
```
|
||||
|
||||
Expose Grafana through the public gateway by creating a virtual service \(replace `example.com` with your domain\):
|
||||
|
||||
```yaml
|
||||
apiVersion: networking.istio.io/v1alpha3
|
||||
kind: VirtualService
|
||||
metadata:
|
||||
name: grafana
|
||||
namespace: istio-system
|
||||
spec:
|
||||
hosts:
|
||||
- "grafana.example.com"
|
||||
gateways:
|
||||
- public-gateway.istio-system.svc.cluster.local
|
||||
http:
|
||||
- route:
|
||||
- destination:
|
||||
host: flagger-grafana
|
||||
```
|
||||
|
||||
Save the above resource as grafana-virtual-service.yaml and then apply it:
|
||||
|
||||
```bash
|
||||
kubectl apply -f ./grafana-virtual-service.yaml
|
||||
```
|
||||
|
||||
Navigate to `http://grafana.example.com` in your browser and you should be redirected to the HTTPS version.
|
||||
|
||||
@@ -1,10 +1,8 @@
|
||||
# Flagger Install on Kubernetes
|
||||
|
||||
This guide walks you through setting up Flagger on a Kubernetes cluster with Helm v3 or Kustomize.
|
||||
This guide walks you through setting up Flagger on a Kubernetes cluster with Helm or Kubectl.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
Flagger requires a Kubernetes cluster **v1.16** or newer.
|
||||
See the [Flux install guide](flagger-install-with-flux.md) for installing Flagger and keeping it up to date the GitOps way.
|
||||
|
||||
## Install Flagger with Helm
|
||||
|
||||
@@ -43,8 +41,8 @@ helm upgrade -i flagger flagger/flagger \
|
||||
--set crd.create=false \
|
||||
--set meshProvider=istio \
|
||||
--set metricsServer=http://istio-cluster-prometheus:9090 \
|
||||
--set istio.kubeconfig.secretName=istio-kubeconfig \
|
||||
--set istio.kubeconfig.key=kubeconfig
|
||||
--set controlplane.kubeconfig.secretName=istio-kubeconfig \
|
||||
--set controlplane.kubeconfig.key=kubeconfig
|
||||
```
|
||||
|
||||
Note that the Istio kubeconfig must be stored in a Kubernetes secret with a data key named `kubeconfig`.
|
||||
@@ -61,39 +59,25 @@ helm upgrade -i flagger flagger/flagger \
|
||||
--set metricsServer=http://linkerd-prometheus:9090
|
||||
```
|
||||
|
||||
Deploy Flagger for App Mesh:
|
||||
If you need to add labels to the flagger deployment or pods, you can pass the labels as parameters as shown below.
|
||||
|
||||
```bash
|
||||
```console
|
||||
helm upgrade -i flagger flagger/flagger \
|
||||
--namespace=appmesh-system \
|
||||
--set crd.create=false \
|
||||
--set meshProvider=appmesh \
|
||||
--set metricsServer=http://appmesh-prometheus:9090
|
||||
<other parameters> \
|
||||
--set podLabels.<labelName>=<labelValue> \
|
||||
--set deploymentLabels.<labelName>=<labelValue>
|
||||
```
|
||||
|
||||
You can install Flagger in any namespace as long as it can talk to the Prometheus service on port 9090.
|
||||
|
||||
For ingress controllers, the install instructions are:
|
||||
For ingress controllers, the installation instructions are:
|
||||
|
||||
* [Contour](https://docs.flagger.app/tutorials/contour-progressive-delivery)
|
||||
* [Gloo](https://docs.flagger.app/tutorials/gloo-progressive-delivery)
|
||||
* [NGINX](https://docs.flagger.app/tutorials/nginx-progressive-delivery)
|
||||
* [Skipper](https://docs.flagger.app/tutorials/skipper-progressive-delivery)
|
||||
* [Traefik](https://docs.flagger.app/tutorials/traefik-progressive-delivery)
|
||||
|
||||
You can use the helm template command and apply the generated yaml with kubectl:
|
||||
|
||||
```bash
|
||||
# generate
|
||||
helm fetch --untar --untardir . flagger/flagger &&
|
||||
helm template flagger ./flagger \
|
||||
--namespace=istio-system \
|
||||
--set metricsServer=http://prometheus.istio-system:9090 \
|
||||
> flagger.yaml
|
||||
|
||||
# apply
|
||||
kubectl apply -f flagger.yaml
|
||||
```
|
||||
* [APISIX](https://docs.flagger.app/tutorials/apisix-progressive-delivery)
|
||||
|
||||
To uninstall the Flagger release with Helm run:
|
||||
|
||||
@@ -106,7 +90,7 @@ The command removes all the Kubernetes components associated with the chart and
|
||||
> **Note** that on uninstall the Canary CRD will not be removed. Deleting the CRD will make Kubernetes
|
||||
> remove all the objects owned by Flagger like Istio virtual services, Kubernetes deployments and ClusterIP services.
|
||||
|
||||
If you want to remove all the objects created by Flagger you have delete the Canary CRD with kubectl:
|
||||
If you want to remove all the objects created by Flagger you have to delete the Canary CRD with kubectl:
|
||||
|
||||
```text
|
||||
kubectl delete crd canaries.flagger.app
|
||||
@@ -126,65 +110,18 @@ helm upgrade -i flagger-grafana flagger/grafana \
|
||||
--set password=change-me
|
||||
```
|
||||
|
||||
Or use helm template command and apply the generated yaml with kubectl:
|
||||
|
||||
```bash
|
||||
# generate
|
||||
helm fetch --untar --untardir . flagger/grafana &&
|
||||
helm template flagger-grafana ./grafana \
|
||||
--namespace=istio-system \
|
||||
> flagger-grafana.yaml
|
||||
|
||||
# apply
|
||||
kubectl apply -f flagger-grafana.yaml
|
||||
```
|
||||
|
||||
You can access Grafana using port forwarding:
|
||||
|
||||
```bash
|
||||
kubectl -n istio-system port-forward svc/flagger-grafana 3000:80
|
||||
```
|
||||
|
||||
## Install Flagger with Kustomize
|
||||
## Install Flagger with Kubectl
|
||||
|
||||
As an alternative to Helm, Flagger can be installed with Kustomize **3.5.0** or newer.
|
||||
|
||||
**Service mesh specific installers**
|
||||
|
||||
Install Flagger for Istio:
|
||||
Install Flagger and Prometheus using the Kustomize overlay from the GitHub repository:
|
||||
|
||||
```bash
|
||||
kustomize build https://github.com/fluxcd/flagger/kustomize/istio?ref=main | kubectl apply -f -
|
||||
```
|
||||
|
||||
Install Flagger for AWS App Mesh:
|
||||
|
||||
```bash
|
||||
kustomize build https://github.com/fluxcd/flagger/kustomize/appmesh?ref=main | kubectl apply -f -
|
||||
```
|
||||
|
||||
This deploys Flagger and sets the metrics server URL to App Mesh's Prometheus instance.
|
||||
|
||||
Install Flagger for Linkerd:
|
||||
|
||||
```bash
|
||||
kustomize build https://github.com/fluxcd/flagger/kustomize/linkerd?ref=main | kubectl apply -f -
|
||||
```
|
||||
|
||||
This deploys Flagger in the `linkerd` namespace and sets the metrics server URL to Linkerd's Prometheus instance.
|
||||
|
||||
If you want to install a specific Flagger release, add the version number to the URL:
|
||||
|
||||
```bash
|
||||
kustomize build https://github.com/fluxcd/flagger/kustomize/linkerd?ref=v1.0.0 | kubectl apply -f -
|
||||
```
|
||||
|
||||
**Generic installer**
|
||||
|
||||
Install Flagger and Prometheus for Contour, Gloo, NGINX, Skipper, or Traefik ingress:
|
||||
|
||||
```bash
|
||||
kustomize build https://github.com/fluxcd/flagger/kustomize/kubernetes?ref=main | kubectl apply -f -
|
||||
kubectl apply -k https://github.com/fluxcd/flagger/kustomize/kubernetes?ref=main
|
||||
```
|
||||
|
||||
This deploys Flagger and Prometheus in the `flagger-system` namespace,
|
||||
@@ -193,20 +130,6 @@ sets the metrics server URL to `http://flagger-prometheus.flagger-system:9090` a
|
||||
The Prometheus instance has a two hours data retention and is configured to scrape all pods in your cluster
|
||||
that have the `prometheus.io/scrape: "true"` annotation.
|
||||
|
||||
To target a different provider you can specify it in the canary custom resource:
|
||||
|
||||
```yaml
|
||||
apiVersion: flagger.app/v1beta1
|
||||
kind: Canary
|
||||
metadata:
|
||||
name: app
|
||||
namespace: test
|
||||
spec:
|
||||
# can be: kubernetes, istio, linkerd, appmesh, nginx, skipper, gloo, traefik
|
||||
# use the kubernetes provider for Blue/Green style deployments
|
||||
provider: nginx
|
||||
```
|
||||
|
||||
**Customized installer**
|
||||
|
||||
Create a kustomization file using Flagger as base and patch the container args:
|
||||
|
||||
153
docs/gitbook/install/flagger-install-with-flux.md
Normal file
@@ -0,0 +1,153 @@
|
||||
# Flagger Install on Kubernetes with Flux
|
||||
|
||||
This guide walks you through setting up Flagger on a Kubernetes cluster the GitOps way.
|
||||
You'll configure Flux to scan the Flagger OCI artifacts and deploy the
|
||||
latest stable version on Kubernetes.
|
||||
|
||||
## Flagger OCI artifacts
|
||||
|
||||
Flagger OCI artifacts (container images, Helm charts, Kustomize overlays) are published to
|
||||
GitHub Container Registry, and they are signed with Cosign at every release.
|
||||
|
||||
OCI artifacts
|
||||
|
||||
- `ghcr.io/fluxcd/flagger:<version>` multi-arch container images
|
||||
- `ghcr.io/fluxcd/flagger-manifest:<version>` Kubernetes manifests
|
||||
- `ghcr.io/fluxcd/charts/flagger:<version>` Helm charts
|
||||
|
||||
## Prerequisites
|
||||
|
||||
To follow this guide you’ll need a Kubernetes cluster with Flux installed on it.
|
||||
Please see the Flux [get started guide](https://fluxcd.io/flux/get-started/)
|
||||
or the Flux [installation guide](https://fluxcd.io/flux/installation/).
|
||||
|
||||
## Deploy Flagger with Flux
|
||||
|
||||
First define the namespace where Flagger will be installed:
|
||||
|
||||
```yaml
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: flagger-system
|
||||
labels:
|
||||
toolkit.fluxcd.io/tenant: sre-team
|
||||
```
|
||||
|
||||
Define a Flux `OCIRepository` that points to where the Flagger Helm charts are stored:
|
||||
|
||||
```yaml
|
||||
apiVersion: source.toolkit.fluxcd.io/v1
|
||||
kind: OCIRepository
|
||||
metadata:
|
||||
name: flagger
|
||||
namespace: flagger-system
|
||||
spec:
|
||||
interval: 1h
|
||||
url: oci://ghcr.io/fluxcd/charts/flagger
|
||||
layerSelector:
|
||||
mediaType: "application/vnd.cncf.helm.chart.content.v1.tar+gzip"
|
||||
operation: copy
|
||||
ref:
|
||||
semver: "1.x" # update to the latest version
|
||||
```
|
||||
|
||||
Define a Flux `HelmRelease` that verifies and installs Flagger's latest version on the cluster:
|
||||
|
||||
```yaml
|
||||
---
|
||||
apiVersion: helm.toolkit.fluxcd.io/v2
|
||||
kind: HelmRelease
|
||||
metadata:
|
||||
name: flagger
|
||||
namespace: flagger-system
|
||||
spec:
|
||||
interval: 12h
|
||||
releaseName: flagger
|
||||
install: # override existing Flagger CRDs
|
||||
crds: CreateReplace
|
||||
upgrade: # update Flagger CRDs
|
||||
crds: CreateReplace
|
||||
chartRef:
|
||||
kind: OCIRepository
|
||||
name: flagger
|
||||
values:
|
||||
nodeSelector:
|
||||
kubernetes.io/os: linux
|
||||
```
|
||||
|
||||
Copy the above manifests into a file called `flagger.yaml`, place the YAML file
|
||||
in the Git repository bootstrapped with Flux, then commit and push it to upstream.
|
||||
|
||||
After Flux reconciles the changes on your cluster, you can check if Flagger got deployed with:
|
||||
|
||||
```console
|
||||
$ helm list -n flagger-system
|
||||
NAME NAMESPACE REVISION STATUS CHART APP VERSION
|
||||
flagger flagger-system 1 deployed flagger-1.42.0 1.42.0
|
||||
```
|
||||
|
||||
To uninstall Flagger, delete the `flagger.yaml` from your repository, then Flux will uninstall
|
||||
the Helm release and will remove the namespace from your cluster.
|
||||
|
||||
## Deploy Flagger load tester with Flux
|
||||
|
||||
Flagger comes with a load testing service that generates traffic during analysis when configured as a webhook.
|
||||
|
||||
The load tester container images and deployment manifests are published to GitHub Container Registry.
|
||||
The container images and the manifests are signed with Cosign and GitHub Actions OIDC.
|
||||
|
||||
Assuming the applications managed by Flagger are in the `apps` namespace, you can configure Flux to
|
||||
deploy the load tester there.
|
||||
|
||||
Define a Flux `OCIRepository` that points to where the Flagger Kustomize overlays are stored:
|
||||
|
||||
```yaml
|
||||
---
|
||||
apiVersion: source.toolkit.fluxcd.io/v1
|
||||
kind: OCIRepository
|
||||
metadata:
|
||||
name: flagger-loadtester
|
||||
namespace: apps
|
||||
spec:
|
||||
interval: 6h # scan for new versions every six hours
|
||||
url: oci://ghcr.io/fluxcd/flagger-manifests
|
||||
ref:
|
||||
semver: "*" # update to the latest version
|
||||
```
|
||||
|
||||
Define a Flux `Kustomization` that deploys the Flagger load tester to the `apps` namespace:
|
||||
|
||||
```yaml
|
||||
---
|
||||
apiVersion: kustomize.toolkit.fluxcd.io/v1
|
||||
kind: Kustomization
|
||||
metadata:
|
||||
name: flagger-loadtester
|
||||
namespace: apps
|
||||
spec:
|
||||
targetNamespace: apps
|
||||
interval: 6h
|
||||
wait: true
|
||||
timeout: 5m
|
||||
prune: true
|
||||
sourceRef:
|
||||
kind: OCIRepository
|
||||
name: flagger-loadtester
|
||||
path: ./tester
|
||||
```
|
||||
|
||||
Copy the above manifests into a file called `flagger-loadtester.yaml`, place the YAML file
|
||||
in the Git repository bootstrapped with Flux, then commit and push it to upstream.
|
||||
|
||||
After Flux reconciles the changes on your cluster, you can check if the load tester got deployed with:
|
||||
|
||||
```console
|
||||
$ flux -n apps get kustomization flagger-loadtester
|
||||
NAME READY MESSAGE
|
||||
flagger-loadtester True Applied revision: v1.23.0/a80af71e001
|
||||
```
|
||||
|
||||
To uninstall the load tester, delete the `flagger-loadtester.yaml` from your repository,
|
||||
and Flux will delete the load tester deployment from the cluster.
|
||||
351
docs/gitbook/tutorials/apisix-progressive-delivery.md
Normal file
@@ -0,0 +1,351 @@
|
||||
# Apache APISIX Canary Deployments
|
||||
|
||||
This guide shows you how to use the [Apache APISIX](https://apisix.apache.org/) and Flagger to automate canary deployments.
|
||||
|
||||

|
||||
|
||||
## Prerequisites
|
||||
|
||||
Flagger requires a Kubernetes cluster **v1.19** or newer and Apache APISIX **v2.15** or newer and Apache APISIX Ingress Controller **v1.5.0** or newer.
|
||||
|
||||
Install Apache APISIX and Apache APISIX Ingress Controller with Helm v3:
|
||||
|
||||
```bash
|
||||
helm repo add apisix https://charts.apiseven.com
|
||||
kubectl create ns apisix
|
||||
|
||||
helm upgrade -i apisix apisix/apisix --version=0.11.3 \
|
||||
--namespace apisix \
|
||||
--set apisix.podAnnotations."prometheus\.io/scrape"=true \
|
||||
--set apisix.podAnnotations."prometheus\.io/port"=9091 \
|
||||
--set apisix.podAnnotations."prometheus\.io/path"=/apisix/prometheus/metrics \
|
||||
--set pluginAttrs.prometheus.export_addr.ip=0.0.0.0 \
|
||||
--set pluginAttrs.prometheus.export_addr.port=9091 \
|
||||
--set pluginAttrs.prometheus.export_uri=/apisix/prometheus/metrics \
|
||||
--set pluginAttrs.prometheus.metric_prefix=apisix_ \
|
||||
--set ingress-controller.enabled=true \
|
||||
--set ingress-controller.config.apisix.serviceNamespace=apisix
|
||||
```
|
||||
|
||||
Install Flagger and the Prometheus add-on in the same namespace as Apache APISIX:
|
||||
|
||||
```bash
|
||||
helm repo add flagger https://flagger.app
|
||||
|
||||
helm upgrade -i flagger flagger/flagger \
|
||||
--namespace apisix \
|
||||
--set prometheus.install=true \
|
||||
--set meshProvider=apisix
|
||||
```
|
||||
|
||||
## Bootstrap
|
||||
|
||||
Flagger takes a Kubernetes deployment and optionally a horizontal pod autoscaler \(HPA\), then creates a series of objects \(Kubernetes deployments, ClusterIP services and an ApisixRoute\). These objects expose the application outside the cluster and drive the canary analysis and promotion.
|
||||
|
||||
Create a test namespace:
|
||||
|
||||
```bash
|
||||
kubectl create ns test
|
||||
```
|
||||
|
||||
Create a deployment and a horizontal pod autoscaler:
|
||||
|
||||
```bash
|
||||
kubectl apply -k https://github.com/fluxcd/flagger//kustomize/podinfo?ref=main
|
||||
```
|
||||
|
||||
Deploy the load testing service to generate traffic during the canary analysis:
|
||||
|
||||
```bash
|
||||
helm upgrade -i flagger-loadtester flagger/loadtester \
|
||||
--namespace=test
|
||||
```
|
||||
|
||||
Create an Apache APISIX `ApisixRoute`, Flagger will reference and generate the canary Apache APISIX `ApisixRoute` \(replace `app.example.com` with your own domain\):
|
||||
|
||||
```yaml
|
||||
apiVersion: apisix.apache.org/v2
|
||||
kind: ApisixRoute
|
||||
metadata:
|
||||
name: podinfo
|
||||
namespace: test
|
||||
spec:
|
||||
http:
|
||||
- backends:
|
||||
- serviceName: podinfo
|
||||
servicePort: 80
|
||||
match:
|
||||
hosts:
|
||||
- app.example.com
|
||||
methods:
|
||||
- GET
|
||||
paths:
|
||||
- /*
|
||||
name: method
|
||||
plugins:
|
||||
- name: prometheus
|
||||
enable: true
|
||||
config:
|
||||
disable: false
|
||||
prefer_name: true
|
||||
```
|
||||
|
||||
Save the above resource as podinfo-apisixroute.yaml and then apply it:
|
||||
|
||||
```bash
|
||||
kubectl apply -f ./podinfo-apisixroute.yaml
|
||||
```
|
||||
|
||||
Create a canary custom resource \(replace `app.example.com` with your own domain\):
|
||||
|
||||
```yaml
|
||||
apiVersion: flagger.app/v1beta1
|
||||
kind: Canary
|
||||
metadata:
|
||||
name: podinfo
|
||||
namespace: test
|
||||
spec:
|
||||
provider: apisix
|
||||
targetRef:
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
name: podinfo
|
||||
# apisix route reference
|
||||
routeRef:
|
||||
apiVersion: apisix.apache.org/v2
|
||||
kind: ApisixRoute
|
||||
name: podinfo
|
||||
# the maximum time in seconds for the canary deployment
|
||||
# to make progress before it is rollback (default 600s)
|
||||
progressDeadlineSeconds: 60
|
||||
service:
|
||||
# ClusterIP port number
|
||||
port: 80
|
||||
# container port number or name
|
||||
targetPort: 9898
|
||||
analysis:
|
||||
# schedule interval (default 60s)
|
||||
interval: 10s
|
||||
# max number of failed metric checks before rollback
|
||||
threshold: 10
|
||||
# max traffic percentage routed to canary
|
||||
# percentage (0-100)
|
||||
maxWeight: 50
|
||||
# canary increment step
|
||||
# percentage (0-100)
|
||||
stepWeight: 10
|
||||
# APISIX Prometheus checks
|
||||
metrics:
|
||||
- name: request-success-rate
|
||||
# minimum req success rate (non 5xx responses)
|
||||
# percentage (0-100)
|
||||
thresholdRange:
|
||||
min: 99
|
||||
interval: 1m
|
||||
- name: request-duration
|
||||
# builtin Prometheus check
|
||||
# maximum req duration P99
|
||||
# milliseconds
|
||||
thresholdRange:
|
||||
max: 500
|
||||
interval: 30s
|
||||
webhooks:
|
||||
- name: load-test
|
||||
url: http://flagger-loadtester.test/
|
||||
timeout: 5s
|
||||
type: rollout
|
||||
metadata:
|
||||
cmd: |-
|
||||
hey -z 1m -q 10 -c 2 -h2 -host app.example.com http://apisix-gateway.apisix/api/info
|
||||
```
|
||||
|
||||
Save the above resource as podinfo-canary.yaml and then apply it:
|
||||
|
||||
```bash
|
||||
kubectl apply -f ./podinfo-canary.yaml
|
||||
```
|
||||
|
||||
After a couple of seconds Flagger will create the canary objects:
|
||||
|
||||
```bash
|
||||
# applied
|
||||
deployment.apps/podinfo
|
||||
horizontalpodautoscaler.autoscaling/podinfo
|
||||
apisixroute/podinfo
|
||||
canary.flagger.app/podinfo
|
||||
|
||||
# generated
|
||||
deployment.apps/podinfo-primary
|
||||
horizontalpodautoscaler.autoscaling/podinfo-primary
|
||||
service/podinfo
|
||||
service/podinfo-canary
|
||||
service/podinfo-primary
|
||||
apisixroute/podinfo-podinfo-canary
|
||||
```
|
||||
|
||||
## Automated canary promotion
|
||||
|
||||
Flagger implements a control loop that gradually shifts traffic to the canary while measuring key performance indicators like HTTP requests success rate, requests average duration and pod health. Based on analysis of the KPIs a canary is promoted or aborted, and the analysis result is published to Slack or MS Teams.
|
||||
|
||||

|
||||
|
||||
Trigger a canary deployment by updating the container image:
|
||||
|
||||
```bash
|
||||
kubectl -n test set image deployment/podinfo \
|
||||
podinfod=stefanprodan/podinfo:6.0.1
|
||||
```
|
||||
|
||||
Flagger detects that the deployment revision changed and starts a new rollout:
|
||||
|
||||
```text
|
||||
kubectl -n test describe canary/podinfo
|
||||
|
||||
Status:
|
||||
Canary Weight: 0
|
||||
Conditions:
|
||||
Message: Canary analysis completed successfully, promotion finished.
|
||||
Reason: Succeeded
|
||||
Status: True
|
||||
Type: Promoted
|
||||
Failed Checks: 1
|
||||
Iterations: 0
|
||||
Phase: Succeeded
|
||||
|
||||
Events:
|
||||
Type Reason Age From Message
|
||||
---- ------ ---- ---- -------
|
||||
Warning Synced 2m59s flagger podinfo-primary.test not ready: waiting for rollout to finish: observed deployment generation less than desired generation
|
||||
Warning Synced 2m50s flagger podinfo-primary.test not ready: waiting for rollout to finish: 0 of 1 (readyThreshold 100%) updated replicas are available
|
||||
Normal Synced 2m40s (x3 over 2m59s) flagger all the metrics providers are available!
|
||||
Normal Synced 2m39s flagger Initialization done! podinfo.test
|
||||
Normal Synced 2m20s flagger New revision detected! Scaling up podinfo.test
|
||||
Warning Synced 2m (x2 over 2m10s) flagger canary deployment podinfo.test not ready: waiting for rollout to finish: 0 of 1 (readyThreshold 100%) updated replicas are available
|
||||
Normal Synced 110s flagger Starting canary analysis for podinfo.test
|
||||
Normal Synced 109s flagger Advance podinfo.test canary weight 10
|
||||
Warning Synced 100s flagger Halt advancement no values found for apisix metric request-success-rate probably podinfo.test is not receiving traffic: running query failed: no values found
|
||||
Normal Synced 90s flagger Advance podinfo.test canary weight 20
|
||||
Normal Synced 80s flagger Advance podinfo.test canary weight 30
|
||||
Normal Synced 69s flagger Advance podinfo.test canary weight 40
|
||||
Normal Synced 59s flagger Advance podinfo.test canary weight 50
|
||||
Warning Synced 30s (x2 over 40s) flagger podinfo-primary.test not ready: waiting for rollout to finish: 1 old replicas are pending termination
|
||||
Normal Synced 9s (x3 over 50s) flagger (combined from similar events): Promotion completed! Scaling down podinfo.test
|
||||
```
|
||||
|
||||
**Note** that if you apply new changes to the deployment during the canary analysis, Flagger will restart the analysis.
|
||||
|
||||
You can monitor all canaries with:
|
||||
|
||||
```bash
|
||||
watch kubectl get canaries --all-namespaces
|
||||
|
||||
NAMESPACE NAME STATUS WEIGHT LASTTRANSITIONTIME
|
||||
test podinfo-2 Progressing 10 2022-11-23T05:00:54Z
|
||||
test podinfo Succeeded 0 2022-11-23T06:00:54Z
|
||||
```
|
||||
|
||||
## Automated rollback
|
||||
|
||||
During the canary analysis you can generate HTTP 500 errors to test if Flagger pauses and rolls back the faulted version.
|
||||
|
||||
Trigger another canary deployment:
|
||||
|
||||
```bash
|
||||
kubectl -n test set image deployment/podinfo \
|
||||
podinfod=stefanprodan/podinfo:6.0.2
|
||||
```
|
||||
|
||||
Exec into the load tester pod with:
|
||||
|
||||
```bash
|
||||
kubectl -n test exec -it deploy/flagger-loadtester bash
|
||||
```
|
||||
|
||||
Generate HTTP 500 errors:
|
||||
|
||||
```bash
|
||||
hey -z 1m -c 5 -q 5 -host app.example.com http://apisix-gateway.apisix/status/500
|
||||
```
|
||||
|
||||
Generate latency:
|
||||
|
||||
```bash
|
||||
watch -n 1 curl -H \"host: app.example.com\" http://apisix-gateway.apisix/delay/1
|
||||
```
|
||||
|
||||
When the number of failed checks reaches the canary analysis threshold, the traffic is routed back to the primary, the canary is scaled to zero and the rollout is marked as failed.
|
||||
|
||||
```text
|
||||
kubectl -n apisix logs deploy/flagger -f | jq .msg
|
||||
|
||||
"New revision detected! Scaling up podinfo.test"
|
||||
"canary deployment podinfo.test not ready: waiting for rollout to finish: 0 of 1 (readyThreshold 100%) updated replicas are available"
|
||||
"Starting canary analysis for podinfo.test"
|
||||
"Advance podinfo.test canary weight 10"
|
||||
"Halt podinfo.test advancement success rate 0.00% < 99%"
|
||||
"Halt podinfo.test advancement success rate 26.76% < 99%"
|
||||
"Halt podinfo.test advancement success rate 34.19% < 99%"
|
||||
"Halt podinfo.test advancement success rate 37.32% < 99%"
|
||||
"Halt podinfo.test advancement success rate 39.04% < 99%"
|
||||
"Halt podinfo.test advancement success rate 40.13% < 99%"
|
||||
"Halt podinfo.test advancement success rate 48.28% < 99%"
|
||||
"Halt podinfo.test advancement success rate 50.35% < 99%"
|
||||
"Halt podinfo.test advancement success rate 56.92% < 99%"
|
||||
"Halt podinfo.test advancement success rate 67.70% < 99%"
|
||||
"Rolling back podinfo.test failed checks threshold reached 10"
|
||||
"Canary failed! Scaling down podinfo.test"
|
||||
```
|
||||
|
||||
## Custom metrics
|
||||
|
||||
The canary analysis can be extended with Prometheus queries.
|
||||
|
||||
Create a metric template and apply it on the cluster:
|
||||
|
||||
```yaml
|
||||
apiVersion: flagger.app/v1beta1
|
||||
kind: MetricTemplate
|
||||
metadata:
|
||||
name: not-found-percentage
|
||||
namespace: test
|
||||
spec:
|
||||
provider:
|
||||
type: prometheus
|
||||
address: http://flagger-prometheus.apisix:9090
|
||||
query: |
|
||||
sum(
|
||||
rate(
|
||||
apisix_http_status{
|
||||
route=~"{{ namespace }}_{{ route }}-{{ target }}-canary_.+",
|
||||
code!~"4.."
|
||||
}[{{ interval }}]
|
||||
)
|
||||
)
|
||||
/
|
||||
sum(
|
||||
rate(
|
||||
apisix_http_status{
|
||||
route=~"{{ namespace }}_{{ route }}-{{ target }}-canary_.+"
|
||||
}[{{ interval }}]
|
||||
)
|
||||
) * 100
|
||||
```
|
||||
|
||||
Edit the canary analysis and add the not found error rate check:
|
||||
|
||||
```yaml
|
||||
analysis:
|
||||
metrics:
|
||||
- name: "404s percentage"
|
||||
templateRef:
|
||||
name: not-found-percentage
|
||||
thresholdRange:
|
||||
max: 5
|
||||
interval: 1m
|
||||
```
|
||||
|
||||
The above configuration validates the canary by checking if the HTTP 404 req/sec percentage is below 5 percent of the total traffic. If the 404s rate reaches the 5% threshold, then the canary fails.
|
||||
|
||||
|
||||
The above procedures can be extended with more [custom metrics](../usage/metrics.md) checks, [webhooks](../usage/webhooks.md), [manual promotion](../usage/webhooks.md#manual-gating) approval and [Slack or MS Teams](../usage/alerting.md) notifications.
|
||||
|
||||
@@ -1,431 +0,0 @@
|
||||
# App Mesh Canary Deployments
|
||||
|
||||
This guide shows you how to use App Mesh and Flagger to automate canary deployments.
|
||||
You'll need an EKS cluster (Kubernetes >= 1.16) configured with App Mesh,
|
||||
you can find the installation guide [here](https://docs.flagger.app/install/flagger-install-on-eks-appmesh).
|
||||
|
||||
## Bootstrap
|
||||
|
||||
Flagger takes a Kubernetes deployment and optionally a horizontal pod autoscaler (HPA),
|
||||
then creates a series of objects (Kubernetes deployments, ClusterIP services,
|
||||
App Mesh virtual nodes and services).
|
||||
These objects expose the application on the mesh and drive the canary analysis and promotion.
|
||||
The only App Mesh object you need to create by yourself is the mesh resource.
|
||||
|
||||
Create a mesh called `global`:
|
||||
|
||||
```bash
|
||||
cat << EOF | kubectl apply -f -
|
||||
apiVersion: appmesh.k8s.aws/v1beta2
|
||||
kind: Mesh
|
||||
metadata:
|
||||
name: global
|
||||
spec:
|
||||
namespaceSelector:
|
||||
matchLabels:
|
||||
appmesh.k8s.aws/sidecarInjectorWebhook: enabled
|
||||
EOF
|
||||
```
|
||||
|
||||
Create a test namespace with App Mesh sidecar injection enabled:
|
||||
|
||||
```bash
|
||||
cat << EOF | kubectl apply -f -
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: test
|
||||
labels:
|
||||
appmesh.k8s.aws/sidecarInjectorWebhook: enabled
|
||||
EOF
|
||||
```
|
||||
|
||||
Create a deployment and a horizontal pod autoscaler:
|
||||
|
||||
```bash
|
||||
kubectl apply -k https://github.com/fluxcd/flagger//kustomize/podinfo?ref=main
|
||||
```
|
||||
|
||||
Deploy the load testing service to generate traffic during the canary analysis:
|
||||
|
||||
```bash
|
||||
helm upgrade -i flagger-loadtester flagger/loadtester \
|
||||
--namespace=test \
|
||||
--set appmesh.enabled=true \
|
||||
--set "appmesh.backends[0]=podinfo" \
|
||||
--set "appmesh.backends[1]=podinfo-canary"
|
||||
```
|
||||
|
||||
Create a canary definition:
|
||||
|
||||
```yaml
|
||||
apiVersion: flagger.app/v1beta1
|
||||
kind: Canary
|
||||
metadata:
|
||||
name: podinfo
|
||||
namespace: test
|
||||
spec:
|
||||
# App Mesh API reference
|
||||
provider: appmesh:v1beta2
|
||||
# deployment reference
|
||||
targetRef:
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
name: podinfo
|
||||
# the maximum time in seconds for the canary deployment
|
||||
# to make progress before it is rollback (default 600s)
|
||||
progressDeadlineSeconds: 60
|
||||
# HPA reference (optional)
|
||||
autoscalerRef:
|
||||
apiVersion: autoscaling/v2beta2
|
||||
kind: HorizontalPodAutoscaler
|
||||
name: podinfo
|
||||
service:
|
||||
# container port
|
||||
port: 9898
|
||||
# App Mesh ingress timeout (optional)
|
||||
timeout: 15s
|
||||
# App Mesh retry policy (optional)
|
||||
retries:
|
||||
attempts: 3
|
||||
perTryTimeout: 5s
|
||||
retryOn: "gateway-error,client-error,stream-error"
|
||||
# App Mesh URI settings
|
||||
match:
|
||||
- uri:
|
||||
prefix: /
|
||||
rewrite:
|
||||
uri: /
|
||||
# define the canary analysis timing and KPIs
|
||||
analysis:
|
||||
# schedule interval (default 60s)
|
||||
interval: 1m
|
||||
# max number of failed metric checks before rollback
|
||||
threshold: 5
|
||||
# max traffic percentage routed to canary
|
||||
# percentage (0-100)
|
||||
maxWeight: 50
|
||||
# canary increment step
|
||||
# percentage (0-100)
|
||||
stepWeight: 5
|
||||
# App Mesh Prometheus checks
|
||||
metrics:
|
||||
- name: request-success-rate
|
||||
# minimum req success rate (non 5xx responses)
|
||||
# percentage (0-100)
|
||||
thresholdRange:
|
||||
min: 99
|
||||
interval: 1m
|
||||
- name: request-duration
|
||||
# maximum req duration P99
|
||||
# milliseconds
|
||||
thresholdRange:
|
||||
max: 500
|
||||
interval: 30s
|
||||
# testing (optional)
|
||||
webhooks:
|
||||
- name: acceptance-test
|
||||
type: pre-rollout
|
||||
url: http://flagger-loadtester.test/
|
||||
timeout: 30s
|
||||
metadata:
|
||||
type: bash
|
||||
cmd: "curl -sd 'test' http://podinfo-canary.test:9898/token | grep token"
|
||||
- name: load-test
|
||||
url: http://flagger-loadtester.test/
|
||||
timeout: 5s
|
||||
metadata:
|
||||
cmd: "hey -z 1m -q 10 -c 2 http://podinfo-canary.test:9898/"
|
||||
```
|
||||
|
||||
Save the above resource as podinfo-canary.yaml and then apply it:
|
||||
|
||||
```bash
|
||||
kubectl apply -f ./podinfo-canary.yaml
|
||||
```
|
||||
|
||||
After a couple of seconds Flagger will create the canary objects:
|
||||
|
||||
```bash
|
||||
# applied
|
||||
deployment.apps/podinfo
|
||||
horizontalpodautoscaler.autoscaling/podinfo
|
||||
canary.flagger.app/podinfo
|
||||
|
||||
# generated Kubernetes objects
|
||||
deployment.apps/podinfo-primary
|
||||
horizontalpodautoscaler.autoscaling/podinfo-primary
|
||||
service/podinfo
|
||||
service/podinfo-canary
|
||||
service/podinfo-primary
|
||||
|
||||
# generated App Mesh objects
|
||||
virtualnode.appmesh.k8s.aws/podinfo-canary
|
||||
virtualnode.appmesh.k8s.aws/podinfo-primary
|
||||
virtualrouter.appmesh.k8s.aws/podinfo
|
||||
virtualrouter.appmesh.k8s.aws/podinfo-canary
|
||||
virtualservice.appmesh.k8s.aws/podinfo
|
||||
virtualservice.appmesh.k8s.aws/podinfo-canary
|
||||
```
|
||||
|
||||
After the boostrap, the podinfo deployment will be scaled to zero and the traffic to `podinfo.test`
|
||||
will be routed to the primary pods.
|
||||
During the canary analysis, the `podinfo-canary.test` address can be used to target directly the canary pods.
|
||||
|
||||
App Mesh blocks all egress traffic by default.
|
||||
If your application needs to call another service, you have to create an App Mesh virtual service for it
|
||||
and add the virtual service name to the backend list.
|
||||
|
||||
```yaml
|
||||
service:
|
||||
port: 9898
|
||||
backends:
|
||||
- backend1
|
||||
- arn:aws:appmesh:eu-west-1:12345678910:mesh/my-mesh/virtualService/backend2
|
||||
```
|
||||
|
||||
## Setup App Mesh Gateway (optional)
|
||||
|
||||
In order to expose the podinfo app outside the mesh you can use the App Mesh Gateway.
|
||||
|
||||
Deploy the App Mesh Gateway behind an AWS NLB:
|
||||
|
||||
```bash
|
||||
helm upgrade -i appmesh-gateway eks/appmesh-gateway \
|
||||
--namespace test
|
||||
```
|
||||
|
||||
Find the gateway public address:
|
||||
|
||||
```bash
|
||||
export URL="http://$(kubectl -n test get svc/appmesh-gateway -ojson | jq -r ".status.loadBalancer.ingress[].hostname")"
|
||||
echo $URL
|
||||
```
|
||||
|
||||
Wait for the NLB to become active:
|
||||
|
||||
```bash
|
||||
watch curl -sS $URL
|
||||
```
|
||||
|
||||
Create a gateway route that points to the podinfo virtual service:
|
||||
|
||||
```yaml
|
||||
cat << EOF | kubectl apply -f -
|
||||
apiVersion: appmesh.k8s.aws/v1beta2
|
||||
kind: GatewayRoute
|
||||
metadata:
|
||||
name: podinfo
|
||||
namespace: test
|
||||
spec:
|
||||
httpRoute:
|
||||
match:
|
||||
prefix: "/"
|
||||
action:
|
||||
target:
|
||||
virtualService:
|
||||
virtualServiceRef:
|
||||
name: podinfo
|
||||
EOF
|
||||
```
|
||||
|
||||
Open your browser and navigate to the ingress address to access podinfo UI.
|
||||
|
||||
## Automated canary promotion
|
||||
|
||||
A canary deployment is triggered by changes in any of the following objects:
|
||||
|
||||
* Deployment PodSpec (container image, command, ports, env, resources, etc)
|
||||
* ConfigMaps and Secrets mounted as volumes or mapped to environment variables
|
||||
|
||||
Trigger a canary deployment by updating the container image:
|
||||
|
||||
```bash
|
||||
kubectl -n test set image deployment/podinfo \
|
||||
podinfod=stefanprodan/podinfo:3.1.1
|
||||
```
|
||||
|
||||
Flagger detects that the deployment revision changed and starts a new rollout:
|
||||
|
||||
```text
|
||||
kubectl -n test describe canary/podinfo
|
||||
|
||||
Status:
|
||||
Canary Weight: 0
|
||||
Failed Checks: 0
|
||||
Phase: Succeeded
|
||||
Events:
|
||||
New revision detected! Scaling up podinfo.test
|
||||
Waiting for podinfo.test rollout to finish: 0 of 1 updated replicas are available
|
||||
Pre-rollout check acceptance-test passed
|
||||
Advance podinfo.test canary weight 5
|
||||
Advance podinfo.test canary weight 10
|
||||
Advance podinfo.test canary weight 15
|
||||
Advance podinfo.test canary weight 20
|
||||
Advance podinfo.test canary weight 25
|
||||
Advance podinfo.test canary weight 30
|
||||
Advance podinfo.test canary weight 35
|
||||
Advance podinfo.test canary weight 40
|
||||
Advance podinfo.test canary weight 45
|
||||
Advance podinfo.test canary weight 50
|
||||
Copying podinfo.test template spec to podinfo-primary.test
|
||||
Waiting for podinfo-primary.test rollout to finish: 1 of 2 updated replicas are available
|
||||
Routing all traffic to primary
|
||||
Promotion completed! Scaling down podinfo.test
|
||||
```
|
||||
|
||||
When the canary analysis starts, Flagger will call the pre-rollout webhooks before routing traffic to the canary.
|
||||
|
||||
**Note** that if you apply new changes to the deployment during the canary analysis, Flagger will restart the analysis.
|
||||
|
||||
During the analysis the canary’s progress can be monitored with Grafana.
|
||||
The App Mesh dashboard URL is
|
||||
[http://localhost:3000/d/flagger-appmesh/appmesh-canary?refresh=10s&orgId=1&var-namespace=test&var-primary=podinfo-primary&var-canary=podinfo](http://localhost:3000/d/flagger-appmesh/appmesh-canary?refresh=10s&orgId=1&var-namespace=test&var-primary=podinfo-primary&var-canary=podinfo).
|
||||
|
||||

|
||||
|
||||
You can monitor all canaries with:
|
||||
|
||||
```bash
|
||||
watch kubectl get canaries --all-namespaces
|
||||
|
||||
NAMESPACE NAME STATUS WEIGHT
|
||||
test podinfo Progressing 15
|
||||
prod frontend Succeeded 0
|
||||
prod backend Failed 0
|
||||
```
|
||||
|
||||
If you’ve enabled the Slack notifications, you should receive the following messages:
|
||||
|
||||

|
||||
|
||||
## Automated rollback
|
||||
|
||||
During the canary analysis you can generate HTTP 500 errors or high latency to test if Flagger pauses the rollout.
|
||||
|
||||
Trigger a canary deployment:
|
||||
|
||||
```bash
|
||||
kubectl -n test set image deployment/podinfo \
|
||||
podinfod=stefanprodan/podinfo:3.1.2
|
||||
```
|
||||
|
||||
Exec into the load tester pod with:
|
||||
|
||||
```bash
|
||||
kubectl -n test exec -it deploy/flagger-loadtester bash
|
||||
```
|
||||
|
||||
Generate HTTP 500 errors:
|
||||
|
||||
```bash
|
||||
hey -z 1m -c 5 -q 5 http://podinfo-canary.test:9898/status/500
|
||||
```
|
||||
|
||||
Generate latency:
|
||||
|
||||
```bash
|
||||
watch -n 1 curl http://podinfo-canary.test:9898/delay/1
|
||||
```
|
||||
|
||||
When the number of failed checks reaches the canary analysis threshold, the traffic is routed back to the primary,
|
||||
the canary is scaled to zero and the rollout is marked as failed.
|
||||
|
||||
```text
|
||||
kubectl -n appmesh-system logs deploy/flagger -f | jq .msg
|
||||
|
||||
New revision detected! progressing canary analysis for podinfo.test
|
||||
Pre-rollout check acceptance-test passed
|
||||
Advance podinfo.test canary weight 5
|
||||
Advance podinfo.test canary weight 10
|
||||
Advance podinfo.test canary weight 15
|
||||
Halt podinfo.test advancement success rate 69.17% < 99%
|
||||
Halt podinfo.test advancement success rate 61.39% < 99%
|
||||
Halt podinfo.test advancement success rate 55.06% < 99%
|
||||
Halt podinfo.test advancement request duration 1.20s > 0.5s
|
||||
Halt podinfo.test advancement request duration 1.45s > 0.5s
|
||||
Rolling back podinfo.test failed checks threshold reached 5
|
||||
Canary failed! Scaling down podinfo.test
|
||||
```
|
||||
|
||||
If you’ve enabled the Slack notifications, you’ll receive a message if the progress deadline is exceeded,
|
||||
or if the analysis reached the maximum number of failed checks:
|
||||
|
||||

|
||||
|
||||
## A/B Testing
|
||||
|
||||
Besides weighted routing, Flagger can be configured to route traffic to the canary based on HTTP match conditions.
|
||||
In an A/B testing scenario, you'll be using HTTP headers or cookies to target a certain segment of your users.
|
||||
This is particularly useful for frontend applications that require session affinity.
|
||||
|
||||

|
||||
|
||||
Edit the canary analysis, remove the max/step weight and add the match conditions and iterations:
|
||||
|
||||
```yaml
|
||||
analysis:
|
||||
interval: 1m
|
||||
threshold: 5
|
||||
iterations: 10
|
||||
match:
|
||||
- headers:
|
||||
x-canary:
|
||||
exact: "insider"
|
||||
webhooks:
|
||||
- name: load-test
|
||||
url: http://flagger-loadtester.test/
|
||||
metadata:
|
||||
cmd: "hey -z 1m -q 10 -c 2 -H 'X-Canary: insider' http://podinfo.test:9898/"
|
||||
```
|
||||
|
||||
The above configuration will run an analysis for ten minutes targeting users that have a `X-Canary: insider` header.
|
||||
|
||||
You can also use a HTTP cookie, to target all users with a `canary` cookie set to `insider` the match condition should be:
|
||||
|
||||
```yaml
|
||||
match:
|
||||
- headers:
|
||||
cookie:
|
||||
regex: "^(.*?;)?(canary=insider)(;.*)?$"
|
||||
webhooks:
|
||||
- name: load-test
|
||||
url: http://flagger-loadtester.test/
|
||||
metadata:
|
||||
cmd: "hey -z 1m -q 10 -c 2 -H 'Cookie: canary=insider' http://podinfo.test:9898/"
|
||||
```
|
||||
|
||||
Trigger a canary deployment by updating the container image:
|
||||
|
||||
```bash
|
||||
kubectl -n test set image deployment/podinfo \
|
||||
podinfod=stefanprodan/podinfo:3.1.3
|
||||
```
|
||||
|
||||
Flagger detects that the deployment revision changed and starts the A/B test:
|
||||
|
||||
```text
|
||||
kubectl -n appmesh-system logs deploy/flagger -f | jq .msg
|
||||
|
||||
New revision detected! progressing canary analysis for podinfo.test
|
||||
Advance podinfo.test canary iteration 1/10
|
||||
Advance podinfo.test canary iteration 2/10
|
||||
Advance podinfo.test canary iteration 3/10
|
||||
Advance podinfo.test canary iteration 4/10
|
||||
Advance podinfo.test canary iteration 5/10
|
||||
Advance podinfo.test canary iteration 6/10
|
||||
Advance podinfo.test canary iteration 7/10
|
||||
Advance podinfo.test canary iteration 8/10
|
||||
Advance podinfo.test canary iteration 9/10
|
||||
Advance podinfo.test canary iteration 10/10
|
||||
Copying podinfo.test template spec to podinfo-primary.test
|
||||
Waiting for podinfo-primary.test rollout to finish: 1 of 2 updated replicas are available
|
||||
Routing all traffic to primary
|
||||
Promotion completed! Scaling down podinfo.test
|
||||
```
|
||||
|
||||
The above procedure can be extended with
|
||||
[custom metrics](../usage/metrics.md) checks,
|
||||
[webhooks](../usage/webhooks.md),
|
||||
[manual promotion](../usage/webhooks.md#manual-gating) approval and
|
||||
[Slack or MS Teams](../usage/alerting.md) notifications.
|
||||
@@ -1,347 +0,0 @@
|
||||
# Canaries with Helm charts and GitOps
|
||||
|
||||
This guide shows you how to package a web app into a Helm chart, trigger canary deployments on Helm upgrade and automate the chart release process with Weave Flux.
|
||||
|
||||
## Packaging
|
||||
|
||||
You'll be using the [podinfo](https://github.com/stefanprodan/k8s-podinfo) chart. This chart packages a web app made with Go, it's configuration, a horizontal pod autoscaler \(HPA\) and the canary configuration file.
|
||||
|
||||
```text
|
||||
├── Chart.yaml
|
||||
├── README.md
|
||||
├── templates
|
||||
│ ├── NOTES.txt
|
||||
│ ├── _helpers.tpl
|
||||
│ ├── canary.yaml
|
||||
│ ├── configmap.yaml
|
||||
│ ├── deployment.yaml
|
||||
│ ├── hpa.yaml
|
||||
│ ├── service.yaml
|
||||
│ └── tests
|
||||
│ ├── test-config.yaml
|
||||
│ └── test-pod.yaml
|
||||
└── values.yaml
|
||||
```
|
||||
|
||||
You can find the chart source [here](https://github.com/stefanprodan/flagger/tree/master/charts/podinfo).
|
||||
|
||||
## Install
|
||||
|
||||
Create a test namespace with Istio sidecar injection enabled:
|
||||
|
||||
```bash
|
||||
export REPO=https://raw.githubusercontent.com/fluxcd/flagger/main
|
||||
|
||||
kubectl apply -f ${REPO}/artifacts/namespaces/test.yaml
|
||||
```
|
||||
|
||||
Add Flagger Helm repository:
|
||||
|
||||
```bash
|
||||
helm repo add flagger https://flagger.app
|
||||
```
|
||||
|
||||
Install podinfo with the release name `frontend` \(replace `example.com` with your own domain\):
|
||||
|
||||
```bash
|
||||
helm upgrade -i frontend flagger/podinfo \
|
||||
--namespace test \
|
||||
--set nameOverride=frontend \
|
||||
--set backend=http://backend.test:9898/echo \
|
||||
--set canary.enabled=true \
|
||||
--set canary.istioIngress.enabled=true \
|
||||
--set canary.istioIngress.gateway=public-gateway.istio-system.svc.cluster.local \
|
||||
--set canary.istioIngress.host=frontend.istio.example.com
|
||||
```
|
||||
|
||||
Flagger takes a Kubernetes deployment and a horizontal pod autoscaler \(HPA\), then creates a series of objects \(Kubernetes deployments, ClusterIP services and Istio virtual services\). These objects expose the application on the mesh and drive the canary analysis and promotion.
|
||||
|
||||
```bash
|
||||
# generated by Helm
|
||||
configmap/frontend
|
||||
deployment.apps/frontend
|
||||
horizontalpodautoscaler.autoscaling/frontend
|
||||
canary.flagger.app/frontend
|
||||
|
||||
# generated by Flagger
|
||||
configmap/frontend-primary
|
||||
deployment.apps/frontend-primary
|
||||
horizontalpodautoscaler.autoscaling/frontend-primary
|
||||
service/frontend
|
||||
service/frontend-canary
|
||||
service/frontend-primary
|
||||
virtualservice.networking.istio.io/frontend
|
||||
```
|
||||
|
||||
When the `frontend-primary` deployment comes online, Flagger will route all traffic to the primary pods and scale to zero the `frontend` deployment.
|
||||
|
||||
Open your browser and navigate to the frontend URL:
|
||||
|
||||

|
||||
|
||||
Now let's install the `backend` release without exposing it outside the mesh:
|
||||
|
||||
```bash
|
||||
helm upgrade -i backend flagger/podinfo \
|
||||
--namespace test \
|
||||
--set nameOverride=backend \
|
||||
--set canary.enabled=true \
|
||||
--set canary.istioIngress.enabled=false
|
||||
```
|
||||
|
||||
Check if Flagger has successfully deployed the canaries:
|
||||
|
||||
```text
|
||||
kubectl -n test get canaries
|
||||
|
||||
NAME STATUS WEIGHT LASTTRANSITIONTIME
|
||||
backend Initialized 0 2019-02-12T18:53:18Z
|
||||
frontend Initialized 0 2019-02-12T17:50:50Z
|
||||
```
|
||||
|
||||
Click on the ping button in the `frontend` UI to trigger a HTTP POST request that will reach the `backend` app:
|
||||
|
||||

|
||||
|
||||
We'll use the `/echo` endpoint \(same as the one the ping button calls\) to generate load on both apps during a canary deployment.
|
||||
|
||||
## Upgrade
|
||||
|
||||
First let's install a load testing service that will generate traffic during analysis:
|
||||
|
||||
```bash
|
||||
helm upgrade -i flagger-loadtester flagger/loadtester \
|
||||
--namespace=test
|
||||
```
|
||||
|
||||
Install Flagger's helm test runner in the `kube-system` using `tiller` service account:
|
||||
|
||||
```bash
|
||||
helm upgrade -i flagger-helmtester flagger/loadtester \
|
||||
--namespace=kube-system \
|
||||
--set serviceAccountName=tiller
|
||||
```
|
||||
|
||||
Enable the load and helm tester and deploy a new `frontend` version:
|
||||
|
||||
```bash
|
||||
helm upgrade -i frontend flagger/podinfo/ \
|
||||
--namespace test \
|
||||
--reuse-values \
|
||||
--set canary.loadtest.enabled=true \
|
||||
--set canary.helmtest.enabled=true \
|
||||
--set image.tag=3.1.1
|
||||
```
|
||||
|
||||
Flagger detects that the deployment revision changed and starts the canary analysis:
|
||||
|
||||
```text
|
||||
kubectl -n istio-system logs deployment/flagger -f | jq .msg
|
||||
|
||||
New revision detected! Scaling up frontend.test
|
||||
Halt advancement frontend.test waiting for rollout to finish: 0 of 2 updated replicas are available
|
||||
Starting canary analysis for frontend.test
|
||||
Pre-rollout check helm test passed
|
||||
Advance frontend.test canary weight 5
|
||||
Advance frontend.test canary weight 10
|
||||
Advance frontend.test canary weight 15
|
||||
Advance frontend.test canary weight 20
|
||||
Advance frontend.test canary weight 25
|
||||
Advance frontend.test canary weight 30
|
||||
Advance frontend.test canary weight 35
|
||||
Advance frontend.test canary weight 40
|
||||
Advance frontend.test canary weight 45
|
||||
Advance frontend.test canary weight 50
|
||||
Copying frontend.test template spec to frontend-primary.test
|
||||
Halt advancement frontend-primary.test waiting for rollout to finish: 1 old replicas are pending termination
|
||||
Promotion completed! Scaling down frontend.test
|
||||
```
|
||||
|
||||
You can monitor the canary deployment with Grafana. Open the Flagger dashboard, select `test` from the namespace dropdown, `frontend-primary` from the primary dropdown and `frontend` from the canary dropdown.
|
||||
|
||||

|
||||
|
||||
Now trigger a canary deployment for the `backend` app, but this time you'll change a value in the configmap:
|
||||
|
||||
```bash
|
||||
helm upgrade -i backend flagger/podinfo/ \
|
||||
--namespace test \
|
||||
--reuse-values \
|
||||
--set canary.loadtest.enabled=true \
|
||||
--set canary.helmtest.enabled=true \
|
||||
--set httpServer.timeout=25s
|
||||
```
|
||||
|
||||
Generate HTTP 500 errors:
|
||||
|
||||
```bash
|
||||
kubectl -n test exec -it flagger-loadtester-xxx-yyy sh
|
||||
|
||||
watch curl http://backend-canary:9898/status/500
|
||||
```
|
||||
|
||||
Generate latency:
|
||||
|
||||
```bash
|
||||
kubectl -n test exec -it flagger-loadtester-xxx-yyy sh
|
||||
|
||||
watch curl http://backend-canary:9898/delay/1
|
||||
```
|
||||
|
||||
Flagger detects the config map change and starts a canary analysis. Flagger will pause the advancement when the HTTP success rate drops under 99% or when the average request duration in the last minute is over 500ms:
|
||||
|
||||
```text
|
||||
kubectl -n test describe canary backend
|
||||
|
||||
Events:
|
||||
|
||||
ConfigMap backend has changed
|
||||
New revision detected! Scaling up backend.test
|
||||
Starting canary analysis for backend.test
|
||||
Advance backend.test canary weight 5
|
||||
Advance backend.test canary weight 10
|
||||
Advance backend.test canary weight 15
|
||||
Advance backend.test canary weight 20
|
||||
Advance backend.test canary weight 25
|
||||
Advance backend.test canary weight 30
|
||||
Advance backend.test canary weight 35
|
||||
Halt backend.test advancement success rate 62.50% < 99%
|
||||
Halt backend.test advancement success rate 88.24% < 99%
|
||||
Advance backend.test canary weight 40
|
||||
Advance backend.test canary weight 45
|
||||
Halt backend.test advancement request duration 2.415s > 500ms
|
||||
Halt backend.test advancement request duration 2.42s > 500ms
|
||||
Advance backend.test canary weight 50
|
||||
ConfigMap backend-primary synced
|
||||
Copying backend.test template spec to backend-primary.test
|
||||
Promotion completed! Scaling down backend.test
|
||||
```
|
||||
|
||||

|
||||
|
||||
If the number of failed checks reaches the canary analysis threshold, the traffic is routed back to the primary, the canary is scaled to zero and the rollout is marked as failed.
|
||||
|
||||
```bash
|
||||
kubectl -n test get canary
|
||||
|
||||
NAME STATUS WEIGHT LASTTRANSITIONTIME
|
||||
backend Succeeded 0 2019-02-12T19:33:11Z
|
||||
frontend Failed 0 2019-02-12T19:47:20Z
|
||||
```
|
||||
|
||||
If you've enabled the Slack notifications, you'll receive an alert with the reason why the `backend` promotion failed.
|
||||
|
||||
## GitOps automation
|
||||
|
||||
Instead of using Helm CLI from a CI tool to perform the install and upgrade, you could use a Git based approach. GitOps is a way to do Continuous Delivery, it works by using Git as a source of truth for declarative infrastructure and workloads. In the [GitOps model](https://www.weave.works/technologies/gitops/), any change to production must be committed in source control prior to being applied on the cluster. This way rollback and audit logs are provided by Git.
|
||||
|
||||

|
||||
|
||||
In order to apply the GitOps pipeline model to Flagger canary deployments you'll need a Git repository with your workloads definitions in YAML format, a container registry where your CI system pushes immutable images and an operator that synchronizes the Git repo with the cluster state.
|
||||
|
||||
Create a git repository with the following content:
|
||||
|
||||
```text
|
||||
├── namespaces
|
||||
│ └── test.yaml
|
||||
└── releases
|
||||
└── test
|
||||
├── backend.yaml
|
||||
├── frontend.yaml
|
||||
├── loadtester.yaml
|
||||
└── helmtester.yaml
|
||||
```
|
||||
|
||||
Define the `frontend` release using Flux `HelmRelease` custom resource:
|
||||
|
||||
```yaml
|
||||
apiVersion: flux.weave.works/v1beta1
|
||||
kind: HelmRelease
|
||||
metadata:
|
||||
name: frontend
|
||||
namespace: test
|
||||
annotations:
|
||||
fluxcd.io/automated: "true"
|
||||
filter.fluxcd.io/chart-image: semver:~3.1
|
||||
spec:
|
||||
releaseName: frontend
|
||||
chart:
|
||||
git: https://github.com/weaveowrks/flagger
|
||||
ref: master
|
||||
path: charts/podinfo
|
||||
values:
|
||||
image:
|
||||
repository: stefanprodan/podinfo
|
||||
tag: 3.1.0
|
||||
backend: http://backend-podinfo:9898/echo
|
||||
canary:
|
||||
enabled: true
|
||||
istioIngress:
|
||||
enabled: true
|
||||
gateway: public-gateway.istio-system.svc.cluster.local
|
||||
host: frontend.istio.example.com
|
||||
loadtest:
|
||||
enabled: true
|
||||
helmtest:
|
||||
enabled: true
|
||||
```
|
||||
|
||||
In the `chart` section I've defined the release source by specifying the Helm repository \(hosted on GitHub Pages\), chart name and version. In the `values` section I've overwritten the defaults set in values.yaml.
|
||||
|
||||
With the `fluxcd.io` annotations I instruct Flux to automate this release. When an image tag in the sem ver range of `3.1.0 - 3.1.99` is pushed to Docker Hub, Flux will upgrade the Helm release and from there Flagger will pick up the change and start a canary deployment.
|
||||
|
||||
Install [Flux](https://github.com/fluxcd/flux) and its [Helm Operator](https://github.com/fluxcd/helm-operator) by specifying your Git repo URL:
|
||||
|
||||
```bash
|
||||
helm repo add fluxcd https://charts.fluxcd.io
|
||||
|
||||
helm install --name flux \
|
||||
--set git.url=git@github.com:<USERNAME>/<REPOSITORY> \
|
||||
--namespace fluxcd \
|
||||
fluxcd/flux
|
||||
|
||||
helm upgrade -i helm-operator fluxcd/helm-operator \
|
||||
--namespace fluxcd \
|
||||
--set git.ssh.secretName=flux-git-deploy
|
||||
```
|
||||
|
||||
At startup Flux generates a SSH key and logs the public key. Find the SSH public key with:
|
||||
|
||||
```bash
|
||||
kubectl -n fluxcd logs deployment/flux | grep identity.pub | cut -d '"' -f2
|
||||
```
|
||||
|
||||
In order to sync your cluster state with Git you need to copy the public key and create a deploy key with write access on your GitHub repository.
|
||||
|
||||
Open GitHub, navigate to your fork, go to _Setting > Deploy keys_ click on _Add deploy key_, check _Allow write access_, paste the Flux public key and click _Add key_.
|
||||
|
||||
After a couple of seconds Flux will apply the Kubernetes resources from Git and Flagger will launch the `frontend` and `backend` apps.
|
||||
|
||||
A CI/CD pipeline for the `frontend` release could look like this:
|
||||
|
||||
* cut a release from the master branch of the podinfo code repo with the git tag `3.1.1`
|
||||
* CI builds the image and pushes the `podinfo:3.1.1` image to the container registry
|
||||
* Flux scans the registry and updates the Helm release `image.tag` to `3.1.1`
|
||||
* Flux commits and push the change to the cluster repo
|
||||
* Flux applies the updated Helm release on the cluster
|
||||
* Flux Helm Operator picks up the change and calls Tiller to upgrade the release
|
||||
* Flagger detects a revision change and scales up the `frontend` deployment
|
||||
* Flagger runs the helm test before routing traffic to the canary service
|
||||
* Flagger starts the load test and runs the canary analysis
|
||||
* Based on the analysis result the canary deployment is promoted to production or rolled back
|
||||
* Flagger sends a Slack or MS Teams notification with the canary result
|
||||
|
||||
If the canary fails, fix the bug, do another patch release eg `3.1.2` and the whole process will run again.
|
||||
|
||||
A canary deployment can fail due to any of the following reasons:
|
||||
|
||||
* the container image can't be downloaded
|
||||
* the deployment replica set is stuck for more then ten minutes \(eg. due to a container crash loop\)
|
||||
* the webhooks \(acceptance tests, helm tests, load tests, etc\) are returning a non 2xx response
|
||||
* the HTTP success rate \(non 5xx responses\) metric drops under the threshold
|
||||
* the HTTP average duration metric goes over the threshold
|
||||
* the Istio telemetry service is unable to collect traffic metrics
|
||||
* the metrics server \(Prometheus\) can't be reached
|
||||
|
||||
If you want to find out more about managing Helm releases with Flux here are two in-depth guides: [gitops-helm](https://github.com/stefanprodan/gitops-helm) and [gitops-istio](https://github.com/stefanprodan/gitops-istio).
|
||||
|
||||
@@ -76,7 +76,7 @@ spec:
|
||||
name: podinfo
|
||||
# HPA reference
|
||||
autoscalerRef:
|
||||
apiVersion: autoscaling/v2beta2
|
||||
apiVersion: autoscaling/v2
|
||||
kind: HorizontalPodAutoscaler
|
||||
name: podinfo
|
||||
service:
|
||||
@@ -90,6 +90,8 @@ spec:
|
||||
retries:
|
||||
attempts: 3
|
||||
perTryTimeout: 5s
|
||||
# supported values for retryOn - https://projectcontour.io/docs/main/config/api/#projectcontour.io/v1.RetryOn
|
||||
retryOn: "5xx"
|
||||
# define the canary analysis timing and KPIs
|
||||
analysis:
|
||||
# schedule interval (default 60s)
|
||||
@@ -157,7 +159,7 @@ service/podinfo-primary
|
||||
httpproxy.projectcontour.io/podinfo
|
||||
```
|
||||
|
||||
After the boostrap, the podinfo deployment will be scaled to zero and the traffic to `podinfo.test` will be routed to the primary pods. During the canary analysis, the `podinfo-canary.test` address can be used to target directly the canary pods.
|
||||
After the bootstrap, the podinfo deployment will be scaled to zero and the traffic to `podinfo.test` will be routed to the primary pods. During the canary analysis, the `podinfo-canary.test` address can be used to target directly the canary pods.
|
||||
|
||||
## Expose the app outside the cluster
|
||||
|
||||
@@ -224,7 +226,7 @@ Trigger a canary deployment by updating the container image:
|
||||
|
||||
```bash
|
||||
kubectl -n test set image deployment/podinfo \
|
||||
podinfod=stefanprodan/podinfo:3.1.1
|
||||
podinfod=ghcr.io/stefanprodan/podinfo:6.0.1
|
||||
```
|
||||
|
||||
Flagger detects that the deployment revision changed and starts a new rollout:
|
||||
@@ -281,7 +283,7 @@ Trigger a canary deployment:
|
||||
|
||||
```bash
|
||||
kubectl -n test set image deployment/podinfo \
|
||||
podinfod=stefanprodan/podinfo:3.1.2
|
||||
podinfod=ghcr.io/stefanprodan/podinfo:6.0.2
|
||||
```
|
||||
|
||||
Exec into the load tester pod with:
|
||||
@@ -369,7 +371,7 @@ Trigger a canary deployment by updating the container image:
|
||||
|
||||
```bash
|
||||
kubectl -n test set image deployment/podinfo \
|
||||
podinfod=stefanprodan/podinfo:3.1.3
|
||||
podinfod=ghcr.io/stefanprodan/podinfo:6.0.3
|
||||
```
|
||||
|
||||
Flagger detects that the deployment revision changed and starts the A/B test:
|
||||
|
||||
845
docs/gitbook/tutorials/gatewayapi-progressive-delivery.md
Normal file
@@ -0,0 +1,845 @@
|
||||
|
||||
This guide shows you how to use [Gateway API](https://gateway-api.sigs.k8s.io/) and Flagger to automate canary deployments and A/B testing.
|
||||
|
||||

|
||||
|
||||
## Prerequisites
|
||||
|
||||
Flagger requires an ingress controller or service mesh that implements the Gateway API **HTTPRoute** (`v1` or `v1beta1`).
|
||||
|
||||
We'll be using Istio for the sake of this tutorial, but you can use any other implementation.
|
||||
|
||||
Install the Gateway API CRDs:
|
||||
|
||||
```bash
|
||||
# Suggestion: Change v1.4.0 in to the latest Gateway API version
|
||||
kubectl apply --server-side -k "github.com/kubernetes-sigs/gateway-api/config/crd?ref=v1.4.0"
|
||||
```
|
||||
|
||||
Install Istio:
|
||||
|
||||
```bash
|
||||
istioctl install --set profile=minimal -y
|
||||
|
||||
# Suggestion: Change release-1.27 in to the latest Istio version
|
||||
kubectl apply -f https://raw.githubusercontent.com/istio/istio/release-1.27/samples/addons/prometheus.yaml
|
||||
```
|
||||
|
||||
Install Flagger in the `flagger-system` namespace:
|
||||
|
||||
```bash
|
||||
kubectl create ns flagger-system
|
||||
|
||||
helm repo add flagger https://flagger.app
|
||||
helm upgrade -i flagger flagger/flagger \
|
||||
--namespace flagger-system \
|
||||
--set prometheus.install=false \
|
||||
--set meshProvider=gatewayapi:v1 \
|
||||
--set metricsServer=http://prometheus.istio-system:9090
|
||||
```
|
||||
|
||||
Create a namespace for the `Gateway`:
|
||||
|
||||
```bash
|
||||
kubectl create ns istio-ingress
|
||||
```
|
||||
|
||||
Create a `Gateway` that configures load balancing, traffic ACL, etc:
|
||||
|
||||
```yaml
|
||||
apiVersion: gateway.networking.k8s.io/v1
|
||||
kind: Gateway
|
||||
metadata:
|
||||
name: gateway
|
||||
namespace: istio-ingress
|
||||
spec:
|
||||
gatewayClassName: istio
|
||||
listeners:
|
||||
- name: default
|
||||
hostname: "*.example.com"
|
||||
port: 80
|
||||
protocol: HTTP
|
||||
allowedRoutes:
|
||||
namespaces:
|
||||
from: All
|
||||
```
|
||||
|
||||
## Bootstrap
|
||||
|
||||
Flagger takes a Kubernetes deployment and optionally a horizontal pod autoscaler \(HPA\),
|
||||
then creates a series of objects \(Kubernetes deployments, ClusterIP services, HTTPRoutes for the Gateway\).
|
||||
These objects expose the application inside the mesh and drive the canary analysis and promotion.
|
||||
|
||||
Create a test namespace:
|
||||
|
||||
```bash
|
||||
kubectl create ns test
|
||||
```
|
||||
|
||||
Create a deployment and a horizontal pod autoscaler:
|
||||
|
||||
```bash
|
||||
kubectl apply -k https://github.com/fluxcd/flagger//kustomize/podinfo?ref=main
|
||||
```
|
||||
|
||||
Deploy the load testing service to generate traffic during the canary analysis:
|
||||
|
||||
```bash
|
||||
kubectl apply -k https://github.com/fluxcd/flagger//kustomize/tester?ref=main
|
||||
```
|
||||
|
||||
Create metric templates targeting the Prometheus server in the `flagger-system` namespace.
|
||||
The PromQL queries below are meant for `Envoy`,
|
||||
but you can [change it to your ingress/mesh provider](https://docs.flagger.app/faq#metrics) accordingly.
|
||||
|
||||
```yaml
|
||||
apiVersion: flagger.app/v1beta1
|
||||
kind: MetricTemplate
|
||||
metadata:
|
||||
name: latency
|
||||
namespace: flagger-system
|
||||
spec:
|
||||
provider:
|
||||
type: prometheus
|
||||
address: http://prometheus.istio-system:9090
|
||||
query: |
|
||||
histogram_quantile(0.99,
|
||||
sum(
|
||||
rate(
|
||||
istio_request_duration_milliseconds_bucket{
|
||||
reporter="source",
|
||||
destination_workload_namespace=~"{{ namespace }}",
|
||||
destination_workload=~"{{ target }}",
|
||||
}[{{ interval }}]
|
||||
)
|
||||
) by (le)
|
||||
)/1000
|
||||
---
|
||||
apiVersion: flagger.app/v1beta1
|
||||
kind: MetricTemplate
|
||||
metadata:
|
||||
name: error-rate
|
||||
namespace: flagger-system
|
||||
spec:
|
||||
provider:
|
||||
type: prometheus
|
||||
address: http://prometheus.istio-system:9090
|
||||
query: |
|
||||
100 - sum(
|
||||
rate(
|
||||
istio_requests_total{
|
||||
reporter="source",
|
||||
destination_workload_namespace=~"{{ namespace }}",
|
||||
destination_workload=~"{{ target }}",
|
||||
response_code!~"5.*"
|
||||
}[{{ interval }}]
|
||||
)
|
||||
)
|
||||
/
|
||||
sum(
|
||||
rate(
|
||||
istio_requests_total{
|
||||
reporter="source",
|
||||
destination_workload_namespace=~"{{ namespace }}",
|
||||
destination_workload=~"{{ target }}",
|
||||
}[{{ interval }}]
|
||||
)
|
||||
)
|
||||
* 100
|
||||
```
|
||||
|
||||
Save the above resource as metric-templates.yaml and then apply it:
|
||||
|
||||
```bash
|
||||
kubectl apply -f metric-templates.yaml
|
||||
```
|
||||
|
||||
Create a Canary custom resource \(replace "www.example.com" with your own domain\):
|
||||
|
||||
```yaml
|
||||
apiVersion: flagger.app/v1beta1
|
||||
kind: Canary
|
||||
metadata:
|
||||
name: podinfo
|
||||
namespace: test
|
||||
spec:
|
||||
# deployment reference
|
||||
targetRef:
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
name: podinfo
|
||||
# the maximum time in seconds for the canary deployment
|
||||
# to make progress before it is rollback (default 600s)
|
||||
progressDeadlineSeconds: 60
|
||||
# HPA reference (optional)
|
||||
autoscalerRef:
|
||||
apiVersion: autoscaling/v2
|
||||
kind: HorizontalPodAutoscaler
|
||||
name: podinfo
|
||||
service:
|
||||
# service port number
|
||||
port: 9898
|
||||
# container port number or name (optional)
|
||||
targetPort: 9898
|
||||
# Gateway API HTTPRoute host names
|
||||
hosts:
|
||||
- www.example.com
|
||||
# Reference to the Gateway that the generated HTTPRoute would attach to.
|
||||
gatewayRefs:
|
||||
- name: gateway
|
||||
namespace: istio-ingress
|
||||
analysis:
|
||||
# schedule interval (default 60s)
|
||||
interval: 1m
|
||||
# max number of failed metric checks before rollback
|
||||
threshold: 5
|
||||
# max traffic percentage routed to canary
|
||||
# percentage (0-100)
|
||||
maxWeight: 50
|
||||
# canary increment step
|
||||
# percentage (0-100)
|
||||
stepWeight: 10
|
||||
metrics:
|
||||
- name: error-rate
|
||||
# max error rate (5xx responses)
|
||||
# percentage (0-100)
|
||||
templateRef:
|
||||
name: error-rate
|
||||
namespace: flagger-system
|
||||
thresholdRange:
|
||||
max: 1
|
||||
interval: 1m
|
||||
- name: latency
|
||||
templateRef:
|
||||
name: latency
|
||||
namespace: flagger-system
|
||||
# seconds
|
||||
thresholdRange:
|
||||
max: 0.5
|
||||
interval: 30s
|
||||
# testing (optional)
|
||||
webhooks:
|
||||
- name: smoke-test
|
||||
type: pre-rollout
|
||||
url: http://flagger-loadtester.test/
|
||||
timeout: 15s
|
||||
metadata:
|
||||
type: bash
|
||||
cmd: "curl -sd 'anon' http://podinfo-canary.test:9898/token | grep token"
|
||||
- name: load-test
|
||||
url: http://flagger-loadtester.test/
|
||||
timeout: 5s
|
||||
metadata:
|
||||
cmd: "hey -z 2m -q 10 -c 2 -host www.example.com http://gateway-istio.istio-ingress/"
|
||||
```
|
||||
|
||||
Save the above resource as podinfo-canary.yaml and then apply it:
|
||||
|
||||
```bash
|
||||
kubectl apply -f ./podinfo-canary.yaml
|
||||
```
|
||||
|
||||
When the canary analysis starts, Flagger will call the pre-rollout webhooks before routing traffic to the canary.
|
||||
The canary analysis will run for five minutes while validating the HTTP metrics and rollout hooks every minute.
|
||||
|
||||
After a couple of seconds Flagger will create the canary objects:
|
||||
|
||||
```bash
|
||||
# applied
|
||||
deployment.apps/podinfo
|
||||
horizontalpodautoscaler.autoscaling/podinfo
|
||||
canary.flagger.app/podinfo
|
||||
|
||||
# generated
|
||||
deployment.apps/podinfo-primary
|
||||
horizontalpodautoscaler.autoscaling/podinfo-primary
|
||||
service/podinfo
|
||||
service/podinfo-canary
|
||||
service/podinfo-primary
|
||||
httproutes.gateway.networking.k8s.io/podinfo
|
||||
```
|
||||
|
||||
## Expose the app outside the cluster
|
||||
|
||||
Find the external address of Istio's load balancer:
|
||||
|
||||
```bash
|
||||
export ADDRESS="$(kubectl -n istio-ingress get svc/gateway-istio -ojson \
|
||||
| jq -r ".status.loadBalancer.ingress[].hostname")"
|
||||
echo $ADDRESS
|
||||
```
|
||||
|
||||
Configure your DNS server with a CNAME record \(AWS\) or A record \(GKE/AKS/DOKS\) and
|
||||
point a domain e.g. `www.example.com` to the LB address.
|
||||
|
||||
Now you can access the podinfo UI using your domain address.
|
||||
|
||||
Note that you should be using HTTPS when exposing production workloads on internet.
|
||||
If you're using a local cluster you can port forward to the Envoy LoadBalancer service:
|
||||
|
||||
```bash
|
||||
kubectl port-forward -n istio-ingress svc/gateway-istio 8080:80
|
||||
```
|
||||
|
||||
Now you can access podinfo via `curl -H "Host: www.example.com" localhost:8080`.
|
||||
|
||||
## Automated canary promotion
|
||||
|
||||
With the application bootstrapped, Flagger will continuously monitor the deployment for changes.
|
||||
When a new revision is detected, Flagger will start a canary analysis and gradually shift traffic to the new version.
|
||||
|
||||

|
||||
|
||||
Trigger a canary deployment by updating the container image:
|
||||
|
||||
```bash
|
||||
kubectl -n test set image deployment/podinfo \
|
||||
podinfod=stefanprodan/podinfo:6.0.1
|
||||
```
|
||||
|
||||
Flagger detects that the deployment revision changed and starts a new rollout:
|
||||
|
||||
```text
|
||||
kubectl -n test describe canary/podinfo
|
||||
|
||||
Status:
|
||||
Canary Weight: 0
|
||||
Failed Checks: 0
|
||||
Phase: Succeeded
|
||||
Events:
|
||||
Type Reason Age From Message
|
||||
---- ------ ---- ---- -------
|
||||
Normal Synced 3m flagger New revision detected podinfo.test
|
||||
Normal Synced 3m flagger Scaling up podinfo.test
|
||||
Warning Synced 3m flagger Waiting for podinfo.test rollout to finish: 0 of 1 updated replicas are available
|
||||
Normal Synced 3m flagger Advance podinfo.test canary weight 5
|
||||
Normal Synced 3m flagger Advance podinfo.test canary weight 10
|
||||
Normal Synced 3m flagger Advance podinfo.test canary weight 15
|
||||
Normal Synced 2m flagger Advance podinfo.test canary weight 20
|
||||
Normal Synced 2m flagger Advance podinfo.test canary weight 25
|
||||
Normal Synced 1m flagger Advance podinfo.test canary weight 30
|
||||
Normal Synced 1m flagger Advance podinfo.test canary weight 35
|
||||
Normal Synced 55s flagger Advance podinfo.test canary weight 40
|
||||
Normal Synced 45s flagger Advance podinfo.test canary weight 45
|
||||
Normal Synced 35s flagger Advance podinfo.test canary weight 50
|
||||
Normal Synced 25s flagger Copying podinfo.test template spec to podinfo-primary.test
|
||||
Warning Synced 15s flagger Waiting for podinfo-primary.test rollout to finish: 1 of 2 updated replicas are available
|
||||
Normal Synced 5s flagger Promotion completed! Scaling down podinfo.test
|
||||
```
|
||||
|
||||
**Note** that if you apply new changes to the deployment during the canary analysis,
|
||||
Flagger will restart the analysis.
|
||||
|
||||
A canary deployment is triggered by changes in any of the following objects:
|
||||
|
||||
* Deployment PodSpec \(container image, command, ports, env, resources, etc\)
|
||||
* ConfigMaps mounted as volumes or mapped to environment variables
|
||||
* Secrets mounted as volumes or mapped to environment variables
|
||||
|
||||
You can monitor how Flagger progressively changes the weights of
|
||||
the HTTPRoute object that is attached to the Gateway with:
|
||||
|
||||
```bash
|
||||
watch kubectl get httproute -n test podinfo -o=jsonpath='{.spec.rules}'
|
||||
```
|
||||
|
||||
You can monitor all canaries with:
|
||||
|
||||
```bash
|
||||
watch kubectl get canaries --all-namespaces
|
||||
|
||||
NAMESPACE NAME STATUS WEIGHT LASTTRANSITIONTIME
|
||||
test podinfo Progressing 15 2025-10-16T14:05:07Z
|
||||
prod frontend Succeeded 0 2025-10-15T16:15:07Z
|
||||
prod backend Failed 0 2025-10-14T17:05:07Z
|
||||
```
|
||||
|
||||
## Automated rollback
|
||||
|
||||
During the canary analysis you can generate HTTP 500 errors and high latency to test if Flagger pauses the rollout.
|
||||
|
||||
Trigger another canary deployment:
|
||||
|
||||
```bash
|
||||
kubectl -n test set image deployment/podinfo \
|
||||
podinfod=stefanprodan/podinfo:6.0.2
|
||||
```
|
||||
|
||||
Exec into the load tester pod with:
|
||||
|
||||
```bash
|
||||
kubectl -n test exec -it flagger-loadtester-xx-xx sh
|
||||
```
|
||||
|
||||
Generate HTTP 500 errors:
|
||||
|
||||
```bash
|
||||
watch curl http://podinfo-canary:9898/status/500
|
||||
```
|
||||
|
||||
Generate latency:
|
||||
|
||||
```bash
|
||||
watch curl http://podinfo-canary:9898/delay/1
|
||||
```
|
||||
|
||||
When the number of failed checks reaches the canary analysis threshold,
|
||||
the traffic is routed back to the primary, the canary is scaled to zero and the rollout is marked as failed.
|
||||
|
||||
```text
|
||||
kubectl -n test describe canary/podinfo
|
||||
|
||||
Status:
|
||||
Canary Weight: 0
|
||||
Failed Checks: 10
|
||||
Phase: Failed
|
||||
Events:
|
||||
Type Reason Age From Message
|
||||
---- ------ ---- ---- -------
|
||||
Normal Synced 3m flagger Starting canary deployment for podinfo.test
|
||||
Normal Synced 3m flagger Advance podinfo.test canary weight 5
|
||||
Normal Synced 3m flagger Advance podinfo.test canary weight 10
|
||||
Normal Synced 3m flagger Advance podinfo.test canary weight 15
|
||||
Normal Synced 3m flagger Halt podinfo.test advancement error rate 69.17% > 1%
|
||||
Normal Synced 2m flagger Halt podinfo.test advancement error rate 61.39% > 1%
|
||||
Normal Synced 2m flagger Halt podinfo.test advancement error rate 55.06% > 1%
|
||||
Normal Synced 2m flagger Halt podinfo.test advancement error rate 47.00% > 1%
|
||||
Normal Synced 2m flagger (combined from similar events): Halt podinfo.test advancement error rate 38.08% > 1%
|
||||
Warning Synced 1m flagger Rolling back podinfo.test failed checks threshold reached 10
|
||||
Warning Synced 1m flagger Canary failed! Scaling down podinfo.test
|
||||
```
|
||||
|
||||
## A/B Testing
|
||||
|
||||
Besides weighted routing, Flagger can be configured to route traffic to the canary based on HTTP match conditions.
|
||||
In an A/B testing scenario, you'll be using HTTP headers and cookies to target a certain segment of your users.
|
||||
|
||||

|
||||
|
||||
Create a canary custom resource \(replace "www.example.com" with your own domain\):
|
||||
|
||||
```yaml
|
||||
apiVersion: flagger.app/v1beta1
|
||||
kind: Canary
|
||||
metadata:
|
||||
name: podinfo
|
||||
namespace: test
|
||||
spec:
|
||||
# deployment reference
|
||||
targetRef:
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
name: podinfo
|
||||
# the maximum time in seconds for the canary deployment
|
||||
# to make progress before it is rollback (default 600s)
|
||||
progressDeadlineSeconds: 60
|
||||
# HPA reference (optional)
|
||||
autoscalerRef:
|
||||
apiVersion: autoscaling/v2
|
||||
kind: HorizontalPodAutoscaler
|
||||
name: podinfo
|
||||
service:
|
||||
# service port number
|
||||
port: 9898
|
||||
# container port number or name (optional)
|
||||
targetPort: 9898
|
||||
# Gateway API HTTPRoute host names
|
||||
hosts:
|
||||
- www.example.com
|
||||
# Reference to the Gateway that the generated HTTPRoute would attach to.
|
||||
gatewayRefs:
|
||||
- name: gateway
|
||||
namespace: istio-ingress
|
||||
analysis:
|
||||
# schedule interval (default 60s)
|
||||
interval: 1m
|
||||
# total number of iterations
|
||||
iterations: 10
|
||||
# max number of failed iterations before rollback
|
||||
threshold: 2
|
||||
# canary match condition
|
||||
match:
|
||||
- headers:
|
||||
user-agent:
|
||||
regex: ".*Firefox.*"
|
||||
- headers:
|
||||
cookie:
|
||||
regex: "^(.*?;)?(type=insider)(;.*)?$"
|
||||
metrics:
|
||||
- name: error-rate
|
||||
# max error rate (5xx responses)
|
||||
# percentage (0-100)
|
||||
templateRef:
|
||||
name: error-rate
|
||||
namespace: flagger-system
|
||||
thresholdRange:
|
||||
max: 1
|
||||
interval: 1m
|
||||
- name: latency
|
||||
templateRef:
|
||||
name: latency
|
||||
namespace: flagger-system
|
||||
# seconds
|
||||
thresholdRange:
|
||||
max: 0.5
|
||||
interval: 30s
|
||||
# testing (optional)
|
||||
webhooks:
|
||||
- name: load-test
|
||||
url: http://flagger-loadtester.test/
|
||||
timeout: 5s
|
||||
metadata:
|
||||
cmd: "hey -z 2m -q 10 -c 2 -host www.example.com -H 'Cookie: type=insider' http://gateway-istio.istio-ingress/"
|
||||
```
|
||||
|
||||
The above configuration will run an analysis for ten minutes targeting those users that
|
||||
have an insider cookie or are using Firefox as a browser.
|
||||
|
||||
Save the above resource as podinfo-ab-canary.yaml and then apply it:
|
||||
|
||||
```bash
|
||||
kubectl apply -f ./podinfo-ab-canary.yaml
|
||||
```
|
||||
|
||||
Trigger a canary deployment by updating the container image:
|
||||
|
||||
```bash
|
||||
kubectl -n test set image deployment/podinfo \
|
||||
podinfod=stefanprodan/podinfo:6.0.3
|
||||
```
|
||||
|
||||
Flagger detects that the deployment revision changed and starts a new rollout:
|
||||
|
||||
```text
|
||||
kubectl -n test describe canary/podinfo
|
||||
|
||||
Status:
|
||||
Failed Checks: 0
|
||||
Phase: Succeeded
|
||||
Events:
|
||||
Type Reason Age From Message
|
||||
---- ------ ---- ---- -------
|
||||
Normal Synced 3m flagger New revision detected podinfo.test
|
||||
Normal Synced 3m flagger Scaling up podinfo.test
|
||||
Warning Synced 3m flagger Waiting for podinfo.test rollout to finish: 0 of 1 updated replicas are available
|
||||
Normal Synced 3m flagger Advance podinfo.test canary iteration 1/10
|
||||
Normal Synced 3m flagger Advance podinfo.test canary iteration 2/10
|
||||
Normal Synced 3m flagger Advance podinfo.test canary iteration 3/10
|
||||
Normal Synced 2m flagger Advance podinfo.test canary iteration 4/10
|
||||
Normal Synced 2m flagger Advance podinfo.test canary iteration 5/10
|
||||
Normal Synced 1m flagger Advance podinfo.test canary iteration 6/10
|
||||
Normal Synced 1m flagger Advance podinfo.test canary iteration 7/10
|
||||
Normal Synced 55s flagger Advance podinfo.test canary iteration 8/10
|
||||
Normal Synced 45s flagger Advance podinfo.test canary iteration 9/10
|
||||
Normal Synced 35s flagger Advance podinfo.test canary iteration 10/10
|
||||
Normal Synced 25s flagger Copying podinfo.test template spec to podinfo-primary.test
|
||||
Warning Synced 15s flagger Waiting for podinfo-primary.test rollout to finish: 1 of 2 updated replicas are available
|
||||
Normal Synced 5s flagger Promotion completed! Scaling down podinfo.test
|
||||
```
|
||||
|
||||
## Session Affinity
|
||||
|
||||
While Flagger can perform weighted routing and A/B testing individually,
|
||||
with Gateway API it can combine the two leading to a Canary release with session affinity.
|
||||
For more information you can read the [deployment strategies docs](../usage/deployment-strategies.md#canary-release-with-session-affinity).
|
||||
|
||||
> **Note:** Session Affinity requires a Gateway API implementation that supports
|
||||
> the [`ResponseHeaderModifier`](https://gateway-api.sigs.k8s.io/guides/http-header-modifier/) API.
|
||||
|
||||
Create a canary custom resource \(replace www.example.com with your own domain\):
|
||||
|
||||
```yaml
|
||||
apiVersion: flagger.app/v1beta1
|
||||
kind: Canary
|
||||
metadata:
|
||||
name: podinfo
|
||||
namespace: test
|
||||
spec:
|
||||
# deployment reference
|
||||
targetRef:
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
name: podinfo
|
||||
# the maximum time in seconds for the canary deployment
|
||||
# to make progress before it is rollback (default 600s)
|
||||
progressDeadlineSeconds: 60
|
||||
# HPA reference (optional)
|
||||
autoscalerRef:
|
||||
apiVersion: autoscaling/v2
|
||||
kind: HorizontalPodAutoscaler
|
||||
name: podinfo
|
||||
service:
|
||||
# service port number
|
||||
port: 9898
|
||||
# container port number or name (optional)
|
||||
targetPort: 9898
|
||||
# Gateway API HTTPRoute host names
|
||||
hosts:
|
||||
- www.example.com
|
||||
# Reference to the Gateway that the generated HTTPRoute would attach to.
|
||||
gatewayRefs:
|
||||
- name: gateway
|
||||
namespace: istio-ingress
|
||||
analysis:
|
||||
# schedule interval (default 60s)
|
||||
interval: 1m
|
||||
# max number of failed metric checks before rollback
|
||||
threshold: 5
|
||||
# max traffic percentage routed to canary
|
||||
# percentage (0-100)
|
||||
maxWeight: 50
|
||||
# canary increment step
|
||||
# percentage (0-100)
|
||||
stepWeight: 10
|
||||
# session affinity config
|
||||
sessionAffinity:
|
||||
# name of the cookie used
|
||||
cookieName: flagger-cookie
|
||||
# max age of the cookie (in seconds)
|
||||
# optional; defaults to 86400
|
||||
maxAge: 21600
|
||||
metrics:
|
||||
- name: error-rate
|
||||
# max error rate (5xx responses)
|
||||
# percentage (0-100)
|
||||
templateRef:
|
||||
name: error-rate
|
||||
namespace: flagger-system
|
||||
thresholdRange:
|
||||
max: 1
|
||||
interval: 1m
|
||||
- name: latency
|
||||
templateRef:
|
||||
name: latency
|
||||
namespace: flagger-system
|
||||
# seconds
|
||||
thresholdRange:
|
||||
max: 0.5
|
||||
interval: 30s
|
||||
# testing (optional)
|
||||
webhooks:
|
||||
- name: load-test
|
||||
url: http://flagger-loadtester.test/
|
||||
timeout: 5s
|
||||
metadata:
|
||||
cmd: "hey -z 2m -q 10 -c 2 -host www.example.com http://gateway-istio.istio-ingress/"
|
||||
```
|
||||
|
||||
Save the above resource as podinfo-canary-session-affinity.yaml and then apply it:
|
||||
|
||||
```bash
|
||||
kubectl apply -f ./podinfo-canary-session-affinity.yaml
|
||||
```
|
||||
|
||||
Trigger a canary deployment by updating the container image:
|
||||
|
||||
```bash
|
||||
kubectl -n test set image deployment/podinfo \
|
||||
podinfod=ghcr.io/stefanprodan/podinfo:6.0.1
|
||||
```
|
||||
|
||||
You can load `www.example.com` in your browser and refresh it until you see the requests being served by `podinfo:6.0.1`.
|
||||
All subsequent requests after that will be served by `podinfo:6.0.1` and not `podinfo:6.0.0` because of the session affinity
|
||||
configured by Flagger in the HTTPRoute object.
|
||||
|
||||
To configure stickiness for the Primary deployment to ensure fair weighted traffic routing, please
|
||||
checkout the [deployment strategies docs](../usage/deployment-strategies.md#canary-release-with-session-affinity).
|
||||
|
||||
## Traffic mirroring
|
||||
|
||||

|
||||
|
||||
For applications that perform read operations, Flagger can be configured to do B/G tests with traffic mirroring.
|
||||
|
||||
> **Note:** Traffic mirroring requires a Gateway API implementation that supports
|
||||
> the [`RequestMirror`](https://gateway-api.sigs.k8s.io/guides/http-request-mirroring/) filter.
|
||||
|
||||
You can enable mirroring by replacing `stepWeight` with `iterations` and by setting `analysis.mirror` to `true`:
|
||||
|
||||
```yaml
|
||||
apiVersion: flagger.app/v1beta1
|
||||
kind: Canary
|
||||
metadata:
|
||||
name: podinfo
|
||||
namespace: test
|
||||
spec:
|
||||
# deployment reference
|
||||
targetRef:
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
name: podinfo
|
||||
service:
|
||||
# service port number
|
||||
port: 9898
|
||||
# container port number or name (optional)
|
||||
targetPort: 9898
|
||||
# Gateway API HTTPRoute host names
|
||||
hosts:
|
||||
- www.example.com
|
||||
# Reference to the Gateway that the generated HTTPRoute would attach to.
|
||||
gatewayRefs:
|
||||
- name: gateway
|
||||
namespace: istio-ingress
|
||||
analysis:
|
||||
# schedule interval
|
||||
interval: 1m
|
||||
# max number of failed metric checks before rollback
|
||||
threshold: 5
|
||||
# total number of iterations
|
||||
iterations: 10
|
||||
# enable traffic shadowing
|
||||
mirror: true
|
||||
# Gateway API HTTPRoute host names
|
||||
metrics:
|
||||
- name: request-success-rate
|
||||
thresholdRange:
|
||||
min: 99
|
||||
interval: 1m
|
||||
- name: request-duration
|
||||
thresholdRange:
|
||||
max: 500
|
||||
interval: 1m
|
||||
webhooks:
|
||||
- name: load-test
|
||||
url: http://flagger-loadtester.test/
|
||||
timeout: 5s
|
||||
metadata:
|
||||
cmd: "hey -z 2m -q 10 -c 2 -host www.example.com http://gateway-istio.istio-ingress/"
|
||||
```
|
||||
|
||||
Gateway API traffic mirroring will copy each incoming request, sending one request to the primary and one to the canary service.
|
||||
The response from the primary is sent back to the user and the response from the canary is discarded.
|
||||
|
||||
Metrics are collected on both requests so that the deployment will only proceed if the canary metrics are within the threshold values.
|
||||
|
||||
The above procedures can be extended with [custom metrics](../usage/metrics.md) checks, [webhooks](../usage/webhooks.md), [manual promotion](../usage/webhooks.md#manual-gating) approval and [Slack or MS Teams](../usage/alerting.md) notifications.
|
||||
|
||||
## Customising the HTTPRoute
|
||||
|
||||
Besides the `hosts` and `gatewayRefs` fields, you can customize the generated HTTPRoute with various options
|
||||
exposed under the `spec.service` field of the Canary.
|
||||
|
||||
### Header Manipulation
|
||||
|
||||
You can configure request and response header manipulation using the `spec.service.headers` field of the Canary.
|
||||
|
||||
> **Note:** Header manipulation requires a Gateway API implementation that supports
|
||||
> the [`RequestHeaderModifier`](https://gateway-api.sigs.k8s.io/guides/http-header-modifier/) and [`ResponseHeaderModifier`](https://gateway-api.sigs.k8s.io/guides/http-header-modifier/) filters.
|
||||
|
||||
Example configuration:
|
||||
|
||||
```yaml
|
||||
apiVersion: flagger.app/v1beta1
|
||||
kind: Canary
|
||||
metadata:
|
||||
name: podinfo
|
||||
namespace: test
|
||||
spec:
|
||||
service:
|
||||
headers:
|
||||
request:
|
||||
add:
|
||||
x-custom-header: "custom-value"
|
||||
set:
|
||||
x-api-version: "v1"
|
||||
remove:
|
||||
- x-debug-header
|
||||
response:
|
||||
add:
|
||||
x-frame-options: "DENY"
|
||||
x-content-type-options: "nosniff"
|
||||
set:
|
||||
cache-control: "no-cache"
|
||||
remove:
|
||||
- x-powered-by
|
||||
```
|
||||
|
||||
### URL Rewriting
|
||||
|
||||
You can configure URL rewriting using the `spec.service.rewrite` field of the Canary to modify the path or hostname of requests.
|
||||
|
||||
> **Note:** URL rewriting requires a Gateway API implementation that supports
|
||||
> the [`URLRewrite`](https://gateway-api.sigs.k8s.io/guides/http-redirect-rewrite/?h=urlrewrite#rewrites) filter.
|
||||
|
||||
Example configuration:
|
||||
|
||||
```yaml
|
||||
apiVersion: flagger.app/v1beta1
|
||||
kind: Canary
|
||||
metadata:
|
||||
name: podinfo
|
||||
namespace: test
|
||||
spec:
|
||||
service:
|
||||
rewrite:
|
||||
# Rewrite the URI path
|
||||
uri: "/v2/api"
|
||||
# Optionally specify the rewrite type: "ReplaceFullPath" or "ReplacePrefixMatch"
|
||||
# Defaults to "ReplaceFullPath" if not specified
|
||||
type: "ReplaceFullPath"
|
||||
# Rewrite the hostname/authority header
|
||||
authority: "api.example.com"
|
||||
```
|
||||
|
||||
The `type` field determines how the URI rewriting is performed:
|
||||
|
||||
- **ReplaceFullPath**: Replaces the entire request path with the specified `uri` value
|
||||
- **ReplacePrefixMatch**: Replaces only the prefix portion of the path that was matched
|
||||
|
||||
Example with prefix replacement:
|
||||
|
||||
```yaml
|
||||
apiVersion: flagger.app/v1beta1
|
||||
kind: Canary
|
||||
metadata:
|
||||
name: podinfo
|
||||
namespace: test
|
||||
spec:
|
||||
service:
|
||||
rewrite:
|
||||
uri: "/api/v2"
|
||||
type: "ReplacePrefixMatch"
|
||||
```
|
||||
|
||||
When using `ReplacePrefixMatch`, if a request comes to `/old/path`, and the HTTPRoute matches the prefix `/old`,
|
||||
the request will be rewritten to `/api/v2/path`.
|
||||
|
||||
### CORS Policy
|
||||
|
||||
The cross-origin resource sharing policy can be configured the `spec.service.corsPolicy` field of the Canary.
|
||||
|
||||
> **Note:** Cross-origin resource sharing requires a Gateway API implementation that supports
|
||||
> the [`CORS`](https://gateway-api.sigs.k8s.io/geps/gep-1767/) filter.
|
||||
|
||||
Example configuration:
|
||||
|
||||
```yaml
|
||||
apiVersion: flagger.app/v1beta1
|
||||
kind: Canary
|
||||
metadata:
|
||||
name: podinfo
|
||||
namespace: test
|
||||
spec:
|
||||
service:
|
||||
corsPolicy:
|
||||
allowOrigin:
|
||||
- https://foo.example
|
||||
- http://foo.example
|
||||
allowMethods:
|
||||
- GET
|
||||
- PUT
|
||||
- POST
|
||||
- DELETE
|
||||
- PATCH
|
||||
- OPTIONS
|
||||
allowCredentials: true
|
||||
allowHeaders:
|
||||
- Keep-Alive
|
||||
- User-Agent
|
||||
- X-Requested-With
|
||||
- If-Modified-Since
|
||||
- Cache-Control
|
||||
- Content-Type
|
||||
- Authorization
|
||||
maxAge: 24h
|
||||
```
|
||||
@@ -110,7 +110,7 @@ spec:
|
||||
name: podinfo
|
||||
# HPA reference (optional)
|
||||
autoscalerRef:
|
||||
apiVersion: autoscaling/v2beta2
|
||||
apiVersion: autoscaling/v2
|
||||
kind: HorizontalPodAutoscaler
|
||||
name: podinfo
|
||||
service:
|
||||
@@ -160,6 +160,8 @@ spec:
|
||||
cmd: "hey -z 2m -q 5 -c 2 -host app.example.com http://gateway-proxy.gloo-system"
|
||||
```
|
||||
|
||||
*Note: when using upstreamRef the following fields are copied over from the original upstream: `Labels, SslConfig, CircuitBreakers, ConnectionConfig, UseHttp2, InitialStreamWindowSize`*
|
||||
|
||||
Save the above resource as podinfo-canary.yaml and then apply it:
|
||||
|
||||
```bash
|
||||
@@ -207,7 +209,7 @@ Trigger a canary deployment by updating the container image:
|
||||
|
||||
```bash
|
||||
kubectl -n test set image deployment/podinfo \
|
||||
podinfod=stefanprodan/podinfo:3.1.1
|
||||
podinfod=ghcr.io/stefanprodan/podinfo:6.0.1
|
||||
```
|
||||
|
||||
Flagger detects that the deployment revision changed and starts a new rollout:
|
||||
@@ -262,7 +264,7 @@ Trigger another canary deployment:
|
||||
|
||||
```bash
|
||||
kubectl -n test set image deployment/podinfo \
|
||||
podinfod=stefanprodan/podinfo:3.1.2
|
||||
podinfod=ghcr.io/stefanprodan/podinfo:6.0.2
|
||||
```
|
||||
|
||||
Generate HTTP 500 errors:
|
||||
@@ -363,7 +365,7 @@ Trigger a canary deployment by updating the container image:
|
||||
|
||||
```bash
|
||||
kubectl -n test set image deployment/podinfo \
|
||||
podinfod=stefanprodan/podinfo:3.1.3
|
||||
podinfod=ghcr.io/stefanprodan/podinfo:6.0.3
|
||||
```
|
||||
|
||||
Generate 404s:
|
||||
@@ -425,7 +427,7 @@ Trigger a canary deployment by updating the container image:
|
||||
|
||||
```bash
|
||||
kubectl -n test set image deployment/podinfo \
|
||||
podinfod=stefanprodan/podinfo:3.1.4
|
||||
podinfod=ghcr.io/stefanprodan/podinfo:6.0.4
|
||||
```
|
||||
|
||||
Flagger detects that the deployment revision changed and starts the A/B test:
|
||||
|
||||
@@ -15,7 +15,7 @@ Install Istio with telemetry support and Prometheus:
|
||||
```bash
|
||||
istioctl manifest install --set profile=default
|
||||
|
||||
kubectl apply -f https://raw.githubusercontent.com/istio/istio/release-1.8/samples/addons/prometheus.yaml
|
||||
kubectl apply -f https://raw.githubusercontent.com/istio/istio/release-1.18/samples/addons/prometheus.yaml
|
||||
```
|
||||
|
||||
Install Flagger in the `istio-system` namespace:
|
||||
@@ -84,7 +84,7 @@ spec:
|
||||
progressDeadlineSeconds: 60
|
||||
# HPA reference (optional)
|
||||
autoscalerRef:
|
||||
apiVersion: autoscaling/v2beta2
|
||||
apiVersion: autoscaling/v2
|
||||
kind: HorizontalPodAutoscaler
|
||||
name: podinfo
|
||||
service:
|
||||
@@ -92,7 +92,7 @@ spec:
|
||||
port: 9898
|
||||
# Istio gateways (optional)
|
||||
gateways:
|
||||
- public-gateway.istio-system.svc.cluster.local
|
||||
- istio-system/public-gateway
|
||||
# Istio virtual service host names (optional)
|
||||
hosts:
|
||||
- app.example.com
|
||||
@@ -173,13 +173,13 @@ Trigger a canary deployment by updating the container image:
|
||||
|
||||
```bash
|
||||
kubectl -n test set image deployment/podinfo \
|
||||
podinfod=stefanprodan/podinfo:3.1.1
|
||||
podinfod=ghcr.io/stefanprodan/podinfo:6.0.1
|
||||
```
|
||||
|
||||
Flagger detects that the deployment revision changed and starts a new rollout:
|
||||
|
||||
```text
|
||||
kubectl -n test describe canary/abtest
|
||||
kubectl -n test describe canary/podinfo
|
||||
|
||||
Status:
|
||||
Failed Checks: 0
|
||||
|
||||
@@ -13,7 +13,8 @@ Install Istio with telemetry support and Prometheus:
|
||||
```bash
|
||||
istioctl manifest install --set profile=default
|
||||
|
||||
kubectl apply -f https://raw.githubusercontent.com/istio/istio/release-1.8/samples/addons/prometheus.yaml
|
||||
# Suggestion: Please change release-1.8 in below command, to your real istio version.
|
||||
kubectl apply -f https://raw.githubusercontent.com/istio/istio/release-1.18/samples/addons/prometheus.yaml
|
||||
```
|
||||
|
||||
Install Flagger in the `istio-system` namespace:
|
||||
@@ -84,7 +85,7 @@ spec:
|
||||
progressDeadlineSeconds: 60
|
||||
# HPA reference (optional)
|
||||
autoscalerRef:
|
||||
apiVersion: autoscaling/v2beta2
|
||||
apiVersion: autoscaling/v2
|
||||
kind: HorizontalPodAutoscaler
|
||||
name: podinfo
|
||||
service:
|
||||
@@ -94,7 +95,7 @@ spec:
|
||||
targetPort: 9898
|
||||
# Istio gateways (optional)
|
||||
gateways:
|
||||
- public-gateway.istio-system.svc.cluster.local
|
||||
- istio-system/public-gateway
|
||||
# Istio virtual service host names (optional)
|
||||
hosts:
|
||||
- app.example.com
|
||||
@@ -185,7 +186,7 @@ Trigger a canary deployment by updating the container image:
|
||||
|
||||
```bash
|
||||
kubectl -n test set image deployment/podinfo \
|
||||
podinfod=stefanprodan/podinfo:3.1.1
|
||||
podinfod=ghcr.io/stefanprodan/podinfo:6.0.1
|
||||
```
|
||||
|
||||
Flagger detects that the deployment revision changed and starts a new rollout:
|
||||
@@ -245,7 +246,7 @@ Trigger another canary deployment:
|
||||
|
||||
```bash
|
||||
kubectl -n test set image deployment/podinfo \
|
||||
podinfod=stefanprodan/podinfo:3.1.2
|
||||
podinfod=ghcr.io/stefanprodan/podinfo:6.0.2
|
||||
```
|
||||
|
||||
Exec into the load tester pod with:
|
||||
@@ -291,6 +292,118 @@ Events:
|
||||
Warning Synced 1m flagger Canary failed! Scaling down podinfo.test
|
||||
```
|
||||
|
||||
## Session Affinity
|
||||
|
||||
While Flagger can perform weighted routing and A/B testing individually, with Istio it can combine the two leading to a Canary
|
||||
release with session affinity. For more information you can read the [deployment strategies docs](../usage/deployment-strategies.md#canary-release-with-session-affinity).
|
||||
|
||||
Create a canary custom resource \(replace app.example.com with your own domain\):
|
||||
|
||||
```yaml
|
||||
apiVersion: flagger.app/v1beta1
|
||||
kind: Canary
|
||||
metadata:
|
||||
name: podinfo
|
||||
namespace: test
|
||||
spec:
|
||||
# deployment reference
|
||||
targetRef:
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
name: podinfo
|
||||
# the maximum time in seconds for the canary deployment
|
||||
# to make progress before it is rollback (default 600s)
|
||||
progressDeadlineSeconds: 60
|
||||
# HPA reference (optional)
|
||||
autoscalerRef:
|
||||
apiVersion: autoscaling/v2
|
||||
kind: HorizontalPodAutoscaler
|
||||
name: podinfo
|
||||
service:
|
||||
# service port number
|
||||
port: 9898
|
||||
# container port number or name (optional)
|
||||
targetPort: 9898
|
||||
# Istio gateways (optional)
|
||||
gateways:
|
||||
- istio-system/public-gateway
|
||||
# Istio virtual service host names (optional)
|
||||
hosts:
|
||||
- app.example.com
|
||||
# Istio traffic policy (optional)
|
||||
trafficPolicy:
|
||||
tls:
|
||||
# use ISTIO_MUTUAL when mTLS is enabled
|
||||
mode: DISABLE
|
||||
# Istio retry policy (optional)
|
||||
retries:
|
||||
attempts: 3
|
||||
perTryTimeout: 1s
|
||||
retryOn: "gateway-error,connect-failure,refused-stream"
|
||||
analysis:
|
||||
# schedule interval (default 60s)
|
||||
interval: 1m
|
||||
# max number of failed metric checks before rollback
|
||||
threshold: 5
|
||||
# max traffic percentage routed to canary
|
||||
# percentage (0-100)
|
||||
maxWeight: 50
|
||||
# canary increment step
|
||||
# percentage (0-100)
|
||||
stepWeight: 10
|
||||
# session affinity config
|
||||
sessionAffinity:
|
||||
# name of the cookie used
|
||||
cookieName: flagger-cookie
|
||||
# max age of the cookie (in seconds)
|
||||
# optional; defaults to 86400
|
||||
maxAge: 21600
|
||||
metrics:
|
||||
- name: request-success-rate
|
||||
# minimum req success rate (non 5xx responses)
|
||||
# percentage (0-100)
|
||||
thresholdRange:
|
||||
min: 99
|
||||
interval: 1m
|
||||
- name: request-duration
|
||||
# maximum req duration P99
|
||||
# milliseconds
|
||||
thresholdRange:
|
||||
max: 500
|
||||
interval: 30s
|
||||
# testing (optional)
|
||||
webhooks:
|
||||
- name: acceptance-test
|
||||
type: pre-rollout
|
||||
url: http://flagger-loadtester.test/
|
||||
timeout: 30s
|
||||
metadata:
|
||||
type: bash
|
||||
cmd: "curl -sd 'test' http://podinfo-canary:9898/token | grep token"
|
||||
- name: load-test
|
||||
url: http://flagger-loadtester.test/
|
||||
timeout: 5s
|
||||
metadata:
|
||||
cmd: "hey -z 1m -q 10 -c 2 http://podinfo-canary.test:9898/"
|
||||
```
|
||||
|
||||
Save the above resource as podinfo-canary-session-affinity.yaml and then apply it:
|
||||
|
||||
```bash
|
||||
kubectl apply -f ./podinfo-canary-session-affinity.yaml
|
||||
```
|
||||
|
||||
Trigger a canary deployment by updating the container image:
|
||||
|
||||
```bash
|
||||
kubectl -n test set image deployment/podinfo \
|
||||
podinfod=ghcr.io/stefanprodan/podinfo:6.0.1
|
||||
```
|
||||
|
||||
You can load `app.example.com` in your browser and refresh it until you see the requests being served by `podinfo:6.0.1`.
|
||||
All subsequent requests after that will be served by `podinfo:6.0.1` and not `podinfo:6.0.0` because of the session affinity
|
||||
configured by Flagger with Istio.
|
||||
|
||||
## Traffic mirroring
|
||||
|
||||

|
||||
@@ -367,3 +480,61 @@ With the above configuration, Flagger will run a canary release with the followi
|
||||
|
||||
The above procedure can be extended with [custom metrics](../usage/metrics.md) checks, [webhooks](../usage/webhooks.md), [manual promotion](../usage/webhooks.md#manual-gating) approval and [Slack or MS Teams](../usage/alerting.md) notifications.
|
||||
|
||||
|
||||
## Canary Deployments for TCP Services
|
||||
|
||||
Performing a Canary deployment on a TCP (non HTTP) service is nearly identical to an HTTP Canary. Besides updating your `Gateway` document to support the `TCP` routing, the only difference is you have to set the `appProtocol` field to `TCP` inside of the `service` section of your `Canary` document.
|
||||
|
||||
#### Example:
|
||||
|
||||
```yaml
|
||||
apiVersion: networking.istio.io/v1alpha3
|
||||
kind: Gateway
|
||||
metadata:
|
||||
name: public-gateway
|
||||
namespace: istio-system
|
||||
spec:
|
||||
selector:
|
||||
istio: ingressgateway
|
||||
servers:
|
||||
- port:
|
||||
number: 7070
|
||||
name: tcp-service
|
||||
protocol: TCP # <== set the protocol to tcp here
|
||||
hosts:
|
||||
- "*"
|
||||
```
|
||||
|
||||
```yaml
|
||||
apiVersion: flagger.app/v1beta1
|
||||
kind: Canary
|
||||
# omitted for brevity
|
||||
spec:
|
||||
service:
|
||||
port: 7070
|
||||
appProtocol: TCP # <== set the appProtocol here
|
||||
targetPort: 7070
|
||||
portName: "tcp-service-port"
|
||||
```
|
||||
|
||||
If the `appProtocol` equals `TCP` then Flagger will treat this as a Canary deployment for a `TCP` service. When it creates the `VirtualService` document it will add a `TCP` section to route requests between the `primary` and `canary` services. See Istio documentation for more information on this [spec](https://istio.io/latest/docs/reference/config/networking/virtual-service/#TCPRoute).
|
||||
|
||||
The resulting `VirtualService` will include a `tcp` section similar to what is shown below:
|
||||
```yaml
|
||||
tcp:
|
||||
- route:
|
||||
- destination:
|
||||
host: tcp-service-primary
|
||||
port:
|
||||
number: 7070
|
||||
weight: 100
|
||||
- destination:
|
||||
host: tcp-service-canary
|
||||
port:
|
||||
number: 7070
|
||||
weight: 0
|
||||
```
|
||||
|
||||
Once the Canary analysis begins, Flagger will be able to adjust the weights inside of this `tcp` section to advance the Canary deployment until it either runs into an error (and is halted) or it successfully reaches the end of the analysis and is Promoted.
|
||||
|
||||
It is also important to note that if you set `appProtocol` to anything other than `TCP`, for example if you set it to `HTTP`, it will perform the Canary and treat it as an `HTTP` service. The same remains true if you do not set `appProtocol` at all. It will __ONLY__ treat a Canary as a `TCP` service if `appProtocal` equals `TCP`.
|
||||
243
docs/gitbook/tutorials/keda-scaledobject.md
Normal file
@@ -0,0 +1,243 @@
|
||||
# Canary analysis with KEDA ScaledObjects
|
||||
|
||||
This guide shows you how to use Flagger with KEDA ScaledObjects to autoscale workloads during a Canary analysis run.
|
||||
We will be using a Blue/Green deployment strategy with the Kubernetes provider for the sake of this tutorial, but
|
||||
you can use any deployment strategy combined with any supported provider.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
Flagger requires a Kubernetes cluster **v1.16** or newer. For this tutorial, we'll need KEDA **2.7.1** or newer.
|
||||
|
||||
Install KEDA:
|
||||
|
||||
```bash
|
||||
helm repo add kedacore https://kedacore.github.io/charts
|
||||
kubectl create namespace keda
|
||||
helm install keda kedacore/keda --namespace keda
|
||||
```
|
||||
|
||||
Install Flagger:
|
||||
```bash
|
||||
helm repo add flagger https://flagger.app
|
||||
|
||||
kubectl create namespace flagger
|
||||
helm upgrade -i flagger flagger/flagger \
|
||||
--namespace flagger \
|
||||
--set prometheus.install=true \
|
||||
--set meshProvider=kubernetes
|
||||
```
|
||||
|
||||
## Bootstrap
|
||||
|
||||
Flagger takes a Kubernetes deployment and a KEDA ScaledObject targeting the deployment. It then creates a series of objects
|
||||
(Kubernetes deployments, ClusterIP services and another KEDA ScaledObject targeting the created Deployment).
|
||||
These objects expose the application inside the mesh and drive the Canary analysis and Blue/Green promotion.
|
||||
|
||||
Create a test namespace:
|
||||
|
||||
```bash
|
||||
kubectl create ns test
|
||||
```
|
||||
|
||||
Create a deployment named `podinfo`:
|
||||
|
||||
```bash
|
||||
kubectl apply -n test -f https://raw.githubusercontent.com/fluxcd/flagger/main/kustomize/podinfo/deployment.yaml
|
||||
```
|
||||
|
||||
Deploy the load testing service to generate traffic during the analysis:
|
||||
|
||||
```bash
|
||||
kubectl apply -k https://github.com/fluxcd/flagger//kustomize/tester?ref=main
|
||||
```
|
||||
|
||||
Create a ScaledObject which targets the `podinfo` deployment and uses Prometheus as a trigger:
|
||||
```yaml
|
||||
apiVersion: keda.sh/v1alpha1
|
||||
kind: ScaledObject
|
||||
metadata:
|
||||
name: podinfo-so
|
||||
namespace: test
|
||||
spec:
|
||||
scaleTargetRef:
|
||||
name: podinfo
|
||||
pollingInterval: 10
|
||||
cooldownPeriod: 20
|
||||
minReplicaCount: 1
|
||||
maxReplicaCount: 3
|
||||
triggers:
|
||||
- type: prometheus
|
||||
metadata:
|
||||
name: prom-trigger
|
||||
serverAddress: http://flagger-prometheus.flagger:9090
|
||||
metricName: http_requests_total
|
||||
query: sum(rate(http_requests_total{ app="podinfo" }[30s]))
|
||||
threshold: '5'
|
||||
```
|
||||
|
||||
Create a canary custom resource for the `podinfo` deployment:
|
||||
|
||||
```yaml
|
||||
apiVersion: flagger.app/v1beta1
|
||||
kind: Canary
|
||||
metadata:
|
||||
name: podinfo
|
||||
namespace: test
|
||||
spec:
|
||||
provider: kubernetes
|
||||
# deployment reference
|
||||
targetRef:
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
name: podinfo
|
||||
# Scaler reference
|
||||
autoscalerRef:
|
||||
apiVersion: keda.sh/v1alpha1
|
||||
kind: ScaledObject
|
||||
# ScaledObject targeting the canary deployment
|
||||
name: podinfo-so
|
||||
# Mapping between trigger names and the related query to use for the generated
|
||||
# ScaledObject targeting the primary deployment. (Optional)
|
||||
primaryScalerQueries:
|
||||
prom-trigger: sum(rate(http_requests_total{ app="podinfo-primary" }[30s]))
|
||||
# Overriding replica scaling configuration for the generated ScaledObject
|
||||
# targeting the primary deployment. (Optional)
|
||||
primaryScalerReplicas:
|
||||
minReplicas: 2
|
||||
maxReplicas: 5
|
||||
# the maximum time in seconds for the canary deployment
|
||||
# to make progress before rollback (default 600s)
|
||||
progressDeadlineSeconds: 60
|
||||
service:
|
||||
port: 80
|
||||
targetPort: 9898
|
||||
name: podinfo-svc
|
||||
portDiscovery: true
|
||||
analysis:
|
||||
# schedule interval (default 60s)
|
||||
interval: 15s
|
||||
# max number of failed checks before rollback
|
||||
threshold: 5
|
||||
# number of checks to run before promotion
|
||||
iterations: 5
|
||||
# Prometheus checks based on
|
||||
# http_request_duration_seconds histogram
|
||||
metrics:
|
||||
- name: request-success-rate
|
||||
interval: 1m
|
||||
thresholdRange:
|
||||
min: 99
|
||||
- name: request-duration
|
||||
interval: 30s
|
||||
thresholdRange:
|
||||
max: 500
|
||||
# load testing hooks
|
||||
webhooks:
|
||||
- name: load-test
|
||||
url: http://flagger-loadtester.test/
|
||||
timeout: 5s
|
||||
metadata:
|
||||
type: cmd
|
||||
cmd: "hey -z 2m -q 20 -c 2 http://podinfo-svc-canary.test/"
|
||||
```
|
||||
|
||||
Save the above resource as `podinfo-canary.yaml` and then apply it:
|
||||
|
||||
```bash
|
||||
kubectl apply -f ./podinfo-canary.yaml
|
||||
```
|
||||
|
||||
After a couple of seconds Flagger will create the canary objects:
|
||||
|
||||
```bash
|
||||
# applied
|
||||
deployment.apps/podinfo
|
||||
scaledobject.keda.sh/podinfo-so
|
||||
canary.flagger.app/podinfo
|
||||
|
||||
# generated
|
||||
deployment.apps/podinfo-primary
|
||||
horizontalpodautoscaler.autoscaling/podinfo-primary
|
||||
service/podinfo
|
||||
service/podinfo-canary
|
||||
service/podinfo-primary
|
||||
scaledobject.keda.sh/podinfo-so-primary
|
||||
```
|
||||
|
||||
We refer to our ScaledObject for the canary deployment using `.spec.autoscalerRef`. Flagger will use this to generate a ScaledObject which will scale the primary deployment.
|
||||
By default, Flagger will try to guess the query to use for the primary ScaledObject, by replacing all mentions of `.spec.targetRef.Name` and `{.spec.targetRef.Name}-canary`
|
||||
with `{.spec.targetRef.Name}-primary`, for all triggers.
|
||||
For eg, if your ScaledObject has a trigger query defined as: `sum(rate(http_requests_total{ app="podinfo" }[30s]))` or `sum(rate(http_requests_total{ app="podinfo-primary" }[30s]))`, then the primary ScaledObject will have the same trigger with a query defined as `sum(rate(http_requests_total{ app="podinfo-primary" }[30s]))`.
|
||||
|
||||
If, the generated query does not meet your requirements, you can specify the query for autoscaling the primary deployment explicitly using
|
||||
`.spec.autoscalerRef.primaryScalerQueries`, which lets you define a query for each trigger. Please note that, your ScaledObject's `.spec.triggers[@].name` must
|
||||
not be blank, as Flagger needs that to identify each trigger uniquely.
|
||||
|
||||
In the situation when it is desired to have different scaling replica configuration between the canary and primary deployment ScaledObject you can use
|
||||
the `.spec.autoscalerRef.primaryScalerReplicas` to override these values for the generated primary ScaledObject.
|
||||
|
||||
After the boostrap, the podinfo deployment will be scaled to zero and the traffic to `podinfo.test` will be routed to the primary pods. To keep the podinfo deployment
|
||||
at 0 replicas and pause auto scaling, Flagger will add an annotation to your ScaledObject: `autoscaling.keda.sh/paused-replicas: 0`.
|
||||
During the canary analysis, the annotation is removed, to enable auto scaling for the podinfo deployment.
|
||||
The `podinfo-canary.test` address can be used to target directly the canary pods.
|
||||
When the canary analysis starts, Flagger will call the pre-rollout webhooks before routing traffic to the canary. The Blue/Green deployment will run for five iterations while validating the HTTP metrics and rollout hooks every 15 seconds.
|
||||
|
||||
|
||||
## Automated Blue/Green promotion
|
||||
|
||||
Trigger a deployment by updating the container image:
|
||||
|
||||
```bash
|
||||
kubectl -n test set image deployment/podinfo \
|
||||
podinfod=ghcr.io/stefanprodan/podinfo:6.0.1
|
||||
```
|
||||
|
||||
Flagger detects that the deployment revision changed and starts a new rollout:
|
||||
|
||||
```text
|
||||
kubectl -n test describe canary/podinfo
|
||||
|
||||
Events:
|
||||
|
||||
New revision detected podinfo.test
|
||||
Waiting for podinfo.test rollout to finish: 0 of 1 updated replicas are available
|
||||
Pre-rollout check acceptance-test passed
|
||||
Advance podinfo.test canary iteration 1/10
|
||||
Advance podinfo.test canary iteration 2/10
|
||||
Advance podinfo.test canary iteration 3/10
|
||||
Advance podinfo.test canary iteration 4/10
|
||||
Advance podinfo.test canary iteration 5/10
|
||||
Advance podinfo.test canary iteration 6/10
|
||||
Advance podinfo.test canary iteration 7/10
|
||||
Advance podinfo.test canary iteration 8/10
|
||||
Advance podinfo.test canary iteration 9/10
|
||||
Advance podinfo.test canary iteration 10/10
|
||||
Copying podinfo.test template spec to podinfo-primary.test
|
||||
Waiting for podinfo-primary.test rollout to finish: 1 of 2 updated replicas are available
|
||||
Promotion completed! Scaling down podinfo.test
|
||||
```
|
||||
|
||||
**Note** that if you apply new changes to the deployment during the canary analysis, Flagger will restart the analysis.
|
||||
|
||||
You can monitor all canaries with:
|
||||
|
||||
```bash
|
||||
watch kubectl get canaries --all-namespaces
|
||||
|
||||
NAMESPACE NAME STATUS WEIGHT LASTTRANSITIONTIME
|
||||
test podinfo Progressing 100 2019-06-16T14:05:07Z
|
||||
```
|
||||
|
||||
You can monitor the scaling of the deployments with:
|
||||
```bash
|
||||
watch kubectl -n test get deploy podinfo
|
||||
NAME READY UP-TO-DATE AVAILABLE AGE
|
||||
flagger-loadtester 1/1 1 1 4m21s
|
||||
podinfo 3/3 3 3 4m28s
|
||||
podinfo-primary 3/3 3 3 3m14s
|
||||
```
|
||||
|
||||
You can mointor how Flagger edits the annotations of your ScaledObject with:
|
||||
```bash
|
||||
watch "kubectl get -n test scaledobjects podinfo-so -o=jsonpath='{.metadata.annotations}'"
|
||||
```
|
||||
249
docs/gitbook/tutorials/knative-progressive-delivery.md
Normal file
@@ -0,0 +1,249 @@
|
||||
# Knative Canary Deployments
|
||||
|
||||
This guide shows you how to use [Knative](https://knative.dev/) and Flagger to automate canary deployments.
|
||||
|
||||

|
||||
|
||||
## Prerequisites
|
||||
|
||||
Flagger requires a Kubernetes cluster **v1.19** or newer and a Knative Serving installation that supports
|
||||
the resources with `serving.knative.dev/v1` as their API version.
|
||||
|
||||
Install Knative v1.17.0:
|
||||
|
||||
```bash
|
||||
kubectl apply -f https://github.com/knative/serving/releases/download/knative-v1.17.0/serving-crds.yaml
|
||||
kubectl apply -f https://github.com/knative/serving/releases/download/knative-v1.17.0/serving-core.yaml
|
||||
kubectl apply -f https://github.com/knative/net-kourier/releases/download/knative-v1.17.0/kourier.yaml
|
||||
kubectl patch configmap/config-network \
|
||||
--namespace knative-serving \
|
||||
--type merge \
|
||||
--patch '{"data":{"ingress-class":"kourier.ingress.networking.knative.dev"}}'
|
||||
```
|
||||
|
||||
|
||||
Install Flagger in the `flagger-system` namespace:
|
||||
|
||||
```bash
|
||||
kubectl apply -k github.com/fluxcd/flagger//kustomize/knative
|
||||
```
|
||||
|
||||
Create a namespace for your Kntive Service:
|
||||
|
||||
```bash
|
||||
kubectl create namespace test
|
||||
```
|
||||
|
||||
Create a Knative Service that deploys podinfo:
|
||||
|
||||
```yaml
|
||||
apiVersion: serving.knative.dev/v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: podinfo
|
||||
namespace: test
|
||||
spec:
|
||||
template:
|
||||
spec:
|
||||
containers:
|
||||
- image: ghcr.io/stefanprodan/podinfo:6.0.0
|
||||
ports:
|
||||
- containerPort: 9898
|
||||
protocol: TCP
|
||||
command:
|
||||
- ./podinfo
|
||||
- --port=9898
|
||||
- --port-metrics=9797
|
||||
- --grpc-port=9999
|
||||
- --grpc-service-name=podinfo
|
||||
- --level=info
|
||||
- --random-delay=false
|
||||
- --random-error=false
|
||||
```
|
||||
|
||||
Deploy the load testing service to generate traffic during the canary analysis:
|
||||
|
||||
```bash
|
||||
kubectl apply -k https://github.com/fluxcd/flagger//kustomize/tester?ref=main
|
||||
```
|
||||
|
||||
Create a Canary custom resource:
|
||||
|
||||
```yaml
|
||||
apiVersion: flagger.app/v1beta1
|
||||
kind: Canary
|
||||
metadata:
|
||||
name: podinfo
|
||||
namespace: test
|
||||
spec:
|
||||
provider: knative
|
||||
# knative service ref
|
||||
targetRef:
|
||||
apiVersion: serving.knative.dev/v1
|
||||
kind: Service
|
||||
name: podinfo
|
||||
# the maximum time in seconds for the canary deployment
|
||||
# to make progress before it is rollback (default 600s)
|
||||
progressDeadlineSeconds: 60
|
||||
analysis:
|
||||
# schedule interval (default 60s)
|
||||
interval: 15s
|
||||
# max number of failed metric checks before rollback
|
||||
threshold: 15
|
||||
# max traffic percentage routed to canary
|
||||
maxWeight: 50
|
||||
# canary increment step
|
||||
# percentage (0-100)
|
||||
stepWeight: 10
|
||||
metrics:
|
||||
- name: request-success-rate
|
||||
# min success rate (non-5xx responses)
|
||||
# percentage (0-100)
|
||||
thresholdRange:
|
||||
min: 99
|
||||
interval: 1m
|
||||
- name: request-duration
|
||||
# milliseconds
|
||||
thresholdRange:
|
||||
max: 500
|
||||
interval: 1m
|
||||
webhooks:
|
||||
- name: load-test
|
||||
url: http://flagger-loadtester.test/
|
||||
timeout: 5s
|
||||
metadata:
|
||||
type: cmd
|
||||
cmd: "hey -z 1m -q 5 -c 2 http://podinfo.test"
|
||||
logCmdOutput: "true"
|
||||
```
|
||||
|
||||
> Note: Please note that for a Canary resource with `.spec.provider` set to `knative`, the resource is only valid if the
|
||||
`.spec.targetRef.kind` is `Service` and `.spec.targetRef.apiVersion` is `serving.knative.dev/v1`.
|
||||
|
||||
Save the above resource as podinfo-canary.yaml and then apply it:
|
||||
|
||||
```bash
|
||||
kubectl apply -f ./podinfo-canary.yaml
|
||||
```
|
||||
|
||||
When the canary analysis starts, Flagger will call the pre-rollout webhooks before routing traffic to the canary.
|
||||
The canary analysis will run for five minutes while validating the HTTP metrics and rollout hooks every minute.
|
||||
|
||||
After a couple of seconds Flagger will make the following changes the Knative Service `podinfo`:
|
||||
|
||||
* Add an annotation to the object with the name `flagger.app/primary-revision`.
|
||||
* Modify the `.spec.traffic` section of the object such that it can manipulate the traffic spread between
|
||||
the primary and canary Knative Revision.
|
||||
|
||||
## Automated canary promotion
|
||||
|
||||
Trigger a canary deployment by updating the container image:
|
||||
|
||||
```bash
|
||||
kubectl -n test patch services.serving podinfo --type=json \
|
||||
-p '[{"op": "replace", "path": "/spec/template/spec/containers/0/image", "value": "ghcr.io/stefanprodan/podinfo:6.0.1"}]'
|
||||
```
|
||||
|
||||
Flagger detects that the deployment revision changed and starts a new rollout:
|
||||
|
||||
```text
|
||||
kubectl -n test describe canary/podinfo
|
||||
|
||||
Status:
|
||||
Canary Weight: 0
|
||||
Failed Checks: 0
|
||||
Phase: Succeeded
|
||||
Events:
|
||||
Type Reason Age From Message
|
||||
---- ------ ---- ---- -------
|
||||
Normal Synced 3m flagger New revision detected podinfo.test
|
||||
Normal Synced 3m flagger Scaling up podinfo.test
|
||||
Normal Synced 3m flagger Advance podinfo.test canary weight 5
|
||||
Normal Synced 3m flagger Advance podinfo.test canary weight 10
|
||||
Normal Synced 3m flagger Advance podinfo.test canary weight 15
|
||||
Normal Synced 2m flagger Advance podinfo.test canary weight 20
|
||||
Normal Synced 2m flagger Advance podinfo.test canary weight 25
|
||||
Normal Synced 1m flagger Advance podinfo.test canary weight 30
|
||||
Normal Synced 1m flagger Advance podinfo.test canary weight 35
|
||||
Normal Synced 55s flagger Advance podinfo.test canary weight 40
|
||||
Normal Synced 45s flagger Advance podinfo.test canary weight 45
|
||||
Normal Synced 35s flagger Advance podinfo.test canary weight 50
|
||||
Normal Synced 25s flagger Copying podinfo.test template spec to podinfo-primary.test
|
||||
Normal Synced 5s flagger Promotion completed! Scaling down podinfo.test
|
||||
```
|
||||
|
||||
A canary deployment is triggered everytime a new Knative Revision is created.
|
||||
|
||||
**Note** that if you apply new changes to the Knative Service during the canary analysis, Flagger will restart the analysis.
|
||||
|
||||
You can monitor how Flagger progressively changes the Knative Service object to spread traffic between Knative Revisions:
|
||||
|
||||
```bash
|
||||
watch kubectl get httproute -n test podinfo -o=jsonpath='{.spec.traffic}'
|
||||
```
|
||||
|
||||
You can monitor all canaries with:
|
||||
|
||||
```bash
|
||||
watch kubectl get canaries --all-namespaces
|
||||
|
||||
NAMESPACE NAME STATUS WEIGHT LASTTRANSITIONTIME
|
||||
test podinfo Progressing 15 2025-03-16T14:05:07Z
|
||||
prod frontend Succeeded 0 2025-03-16T16:15:07Z
|
||||
prod backend Failed 0 2025-03-16T17:05:07Z
|
||||
```
|
||||
|
||||
## Automated rollback
|
||||
|
||||
During the canary analysis you can generate HTTP 500 errors and high latency to test if Flagger pauses the rollout.
|
||||
|
||||
Trigger another canary deployment:
|
||||
|
||||
```bash
|
||||
kubectl -n test patch services.serving podinfo --type=json \
|
||||
-p '[{"op": "replace", "path": "/spec/template/spec/containers/0/image", "value": "ghcr.io/stefanprodan/podinfo:6.0.2"}]'
|
||||
```
|
||||
|
||||
Exec into the load tester pod with:
|
||||
|
||||
```bash
|
||||
kubectl -n test exec -it flagger-loadtester-xx-xx sh
|
||||
```
|
||||
|
||||
Generate HTTP 500 errors:
|
||||
|
||||
```bash
|
||||
watch curl http://podinfo-canary:9898/status/500
|
||||
```
|
||||
|
||||
Generate latency:
|
||||
|
||||
```bash
|
||||
watch curl http://podinfo-canary:9898/delay/1
|
||||
```
|
||||
|
||||
When the number of failed checks reaches the canary analysis threshold, the traffic is routed back to the primary
|
||||
Knative Revision and the rollout is marked as failed.
|
||||
|
||||
```text
|
||||
kubectl -n test describe canary/podinfo
|
||||
|
||||
Status:
|
||||
Canary Weight: 0
|
||||
Failed Checks: 10
|
||||
Phase: Failed
|
||||
Events:
|
||||
Type Reason Age From Message
|
||||
---- ------ ---- ---- -------
|
||||
Normal Synced 3m flagger Starting canary deployment for podinfo.test
|
||||
Normal Synced 3m flagger Advance podinfo.test canary weight 5
|
||||
Normal Synced 3m flagger Advance podinfo.test canary weight 10
|
||||
Normal Synced 3m flagger Advance podinfo.test canary weight 15
|
||||
Normal Synced 3m flagger Halt podinfo.test advancement error rate 69.17% > 1%
|
||||
Normal Synced 2m flagger Halt podinfo.test advancement error rate 61.39% > 1%
|
||||
Normal Synced 2m flagger Halt podinfo.test advancement error rate 55.06% > 1%
|
||||
Normal Synced 2m flagger Halt podinfo.test advancement error rate 47.00% > 1%
|
||||
Normal Synced 2m flagger (combined from similar events): Halt podinfo.test advancement error rate 38.08% > 1%
|
||||
Warning Synced 1m flagger Rolling back podinfo.test failed checks threshold reached 10
|
||||
Warning Synced 1m flagger Canary failed! Scaling down podinfo.test
|
||||
```
|
||||
@@ -59,7 +59,8 @@ kubectl apply -k https://github.com/fluxcd/flagger//kustomize/podinfo?ref=main
|
||||
Deploy the load testing service to generate traffic during the analysis:
|
||||
|
||||
```bash
|
||||
kubectl apply -k https://github.com/fluxcd/flagger//kustomize/tester?ref=main
|
||||
helm upgrade -i flagger-loadtester flagger/loadtester \
|
||||
--namespace=test
|
||||
```
|
||||
|
||||
Create a canary custom resource:
|
||||
@@ -71,7 +72,6 @@ metadata:
|
||||
name: podinfo
|
||||
namespace: test
|
||||
spec:
|
||||
# service mesh provider can be: kubernetes, istio, appmesh, nginx, gloo
|
||||
provider: kubernetes
|
||||
# deployment reference
|
||||
targetRef:
|
||||
@@ -83,7 +83,7 @@ spec:
|
||||
progressDeadlineSeconds: 60
|
||||
# HPA reference (optional)
|
||||
autoscalerRef:
|
||||
apiVersion: autoscaling/v2beta2
|
||||
apiVersion: autoscaling/v2
|
||||
kind: HorizontalPodAutoscaler
|
||||
name: podinfo
|
||||
service:
|
||||
@@ -171,7 +171,7 @@ Trigger a deployment by updating the container image:
|
||||
|
||||
```bash
|
||||
kubectl -n test set image deployment/podinfo \
|
||||
podinfod=stefanprodan/podinfo:3.1.1
|
||||
podinfod=ghcr.io/stefanprodan/podinfo:6.0.1
|
||||
```
|
||||
|
||||
Flagger detects that the deployment revision changed and starts a new rollout:
|
||||
@@ -311,7 +311,7 @@ Trigger a deployment by updating the container image:
|
||||
|
||||
```bash
|
||||
kubectl -n test set image deployment/podinfo \
|
||||
podinfod=stefanprodan/podinfo:3.1.3
|
||||
podinfod=ghcr.io/stefanprodan/podinfo:6.0.3
|
||||
```
|
||||
|
||||
Generate 404s:
|
||||
|
||||
252
docs/gitbook/tutorials/kuma-progressive-delivery.md
Normal file
@@ -0,0 +1,252 @@
|
||||
# Kuma Canary Deployments
|
||||
|
||||
This guide shows you how to use Kuma and Flagger to automate canary deployments.
|
||||
|
||||

|
||||
|
||||
## Prerequisites
|
||||
|
||||
Flagger requires a Kubernetes cluster **v1.19** or newer and Kuma **1.7** or newer.
|
||||
|
||||
Install Kuma and Prometheus (part of Kuma Metrics):
|
||||
|
||||
```bash
|
||||
kumactl install control-plane | kubectl apply -f -
|
||||
kumactl install observability --components "grafana,prometheus" | kubectl apply -f -
|
||||
```
|
||||
|
||||
Install Flagger in the `kong-mesh-system` namespace:
|
||||
|
||||
```bash
|
||||
kubectl apply -k github.com/fluxcd/flagger//kustomize/kuma
|
||||
```
|
||||
|
||||
## Bootstrap
|
||||
|
||||
Flagger takes a Kubernetes deployment and optionally a horizontal pod autoscaler (HPA),
|
||||
then creates a series of objects (Kubernetes deployments, ClusterIP services and Kuma `TrafficRoute`).
|
||||
These objects expose the application inside the mesh and drive the canary analysis and promotion.
|
||||
|
||||
Create a test namespace and enable Kuma sidecar injection:
|
||||
|
||||
```bash
|
||||
kubectl create ns test
|
||||
kubectl annotate namespace test kuma.io/sidecar-injection=enabled
|
||||
```
|
||||
|
||||
Install the load testing service to generate traffic during the canary analysis:
|
||||
|
||||
```bash
|
||||
kubectl apply -k https://github.com/fluxcd/flagger//kustomize/tester?ref=main
|
||||
```
|
||||
|
||||
Create a deployment and a horizontal pod autoscaler:
|
||||
|
||||
```bash
|
||||
kubectl apply -k https://github.com/fluxcd/flagger//kustomize/podinfo?ref=main
|
||||
```
|
||||
|
||||
Create a canary custom resource for the `podinfo` deployment:
|
||||
|
||||
```yaml
|
||||
apiVersion: flagger.app/v1beta1
|
||||
kind: Canary
|
||||
metadata:
|
||||
name: podinfo
|
||||
namespace: test
|
||||
annotations:
|
||||
kuma.io/mesh: default
|
||||
spec:
|
||||
targetRef:
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
name: podinfo
|
||||
progressDeadlineSeconds: 60
|
||||
service:
|
||||
port: 9898
|
||||
targetPort: 9898
|
||||
apex:
|
||||
annotations:
|
||||
9898.service.kuma.io/protocol: "http"
|
||||
canary:
|
||||
annotations:
|
||||
9898.service.kuma.io/protocol: "http"
|
||||
primary:
|
||||
annotations:
|
||||
9898.service.kuma.io/protocol: "http"
|
||||
analysis:
|
||||
# schedule interval (default 60s)
|
||||
interval: 30s
|
||||
# max number of failed metric checks before rollback
|
||||
threshold: 5
|
||||
# max traffic percentage routed to canary
|
||||
# percentage (0-100)
|
||||
maxWeight: 50
|
||||
# canary increment step
|
||||
# percentage (0-100)
|
||||
stepWeight: 5
|
||||
metrics:
|
||||
- name: request-success-rate
|
||||
threshold: 99
|
||||
interval: 1m
|
||||
- name: request-duration
|
||||
threshold: 500
|
||||
interval: 30s
|
||||
webhooks:
|
||||
- name: acceptance-test
|
||||
type: pre-rollout
|
||||
url: http://flagger-loadtester.test/
|
||||
timeout: 30s
|
||||
metadata:
|
||||
type: bash
|
||||
cmd: "curl -sd 'test' http://podinfo-canary.test:9898/token | grep token"
|
||||
- name: load-test
|
||||
type: rollout
|
||||
url: http://flagger-loadtester.test/
|
||||
metadata:
|
||||
cmd: "hey -z 2m -q 10 -c 2 http://podinfo-canary.test:9898/"
|
||||
```
|
||||
|
||||
Save the above resource as `podinfo-canary.yaml` and then apply it:
|
||||
|
||||
```bash
|
||||
kubectl apply -f ./podinfo-canary.yaml
|
||||
```
|
||||
|
||||
When the canary analysis starts, Flagger will call the pre-rollout webhooks before routing traffic to the canary. The canary analysis will run for five minutes while validating the HTTP metrics and rollout hooks every half a minute.
|
||||
|
||||
After a couple of seconds Flagger will create the canary objects:
|
||||
|
||||
```bash
|
||||
# applied
|
||||
deployment.apps/podinfo
|
||||
horizontalpodautoscaler.autoscaling/podinfo
|
||||
ingresses.extensions/podinfo
|
||||
canary.flagger.app/podinfo
|
||||
|
||||
# generated
|
||||
deployment.apps/podinfo-primary
|
||||
horizontalpodautoscaler.autoscaling/podinfo-primary
|
||||
service/podinfo
|
||||
service/podinfo-canary
|
||||
service/podinfo-primary
|
||||
trafficroutes.kuma.io/podinfo
|
||||
```
|
||||
|
||||
After the bootstrap, the podinfo deployment will be scaled to zero and the traffic to `podinfo.test` will be routed to the primary pods. During the canary analysis, the `podinfo-canary.test` address can be used to target directly the canary pods.
|
||||
|
||||
## Automated canary promotion
|
||||
|
||||
Flagger implements a control loop that gradually shifts traffic to the canary while measuring key performance indicators like HTTP requests success rate, requests average duration and pod health. Based on analysis of the KPIs a canary is promoted or aborted, and the analysis result is published to Slack.
|
||||
|
||||

|
||||
|
||||
Trigger a canary deployment by updating the container image:
|
||||
|
||||
```bash
|
||||
kubectl -n test set image deployment/podinfo \
|
||||
podinfod=ghcr.io/stefanprodan/podinfo:6.0.1
|
||||
```
|
||||
|
||||
Flagger detects that the deployment revision changed and starts a new rollout:
|
||||
|
||||
```text
|
||||
kubectl -n test describe canary/podinfo
|
||||
|
||||
Status:
|
||||
Canary Weight: 0
|
||||
Failed Checks: 0
|
||||
Phase: Succeeded
|
||||
Events:
|
||||
New revision detected! Scaling up podinfo.test
|
||||
Waiting for podinfo.test rollout to finish: 0 of 1 updated replicas are available
|
||||
Pre-rollout check acceptance-test passed
|
||||
Advance podinfo.test canary weight 5
|
||||
Advance podinfo.test canary weight 10
|
||||
Advance podinfo.test canary weight 15
|
||||
Advance podinfo.test canary weight 20
|
||||
Advance podinfo.test canary weight 25
|
||||
Waiting for podinfo.test rollout to finish: 1 of 2 updated replicas are available
|
||||
Advance podinfo.test canary weight 30
|
||||
Advance podinfo.test canary weight 35
|
||||
Advance podinfo.test canary weight 40
|
||||
Advance podinfo.test canary weight 45
|
||||
Advance podinfo.test canary weight 50
|
||||
Copying podinfo.test template spec to podinfo-primary.test
|
||||
Waiting for podinfo-primary.test rollout to finish: 1 of 2 updated replicas are available
|
||||
Promotion completed! Scaling down podinfo.test
|
||||
```
|
||||
|
||||
**Note** that if you apply new changes to the deployment during the canary analysis, Flagger will restart the analysis.
|
||||
|
||||
A canary deployment is triggered by changes in any of the following objects:
|
||||
|
||||
* Deployment PodSpec \(container image, command, ports, env, resources, etc\)
|
||||
* ConfigMaps mounted as volumes or mapped to environment variables
|
||||
* Secrets mounted as volumes or mapped to environment variables
|
||||
|
||||
You can monitor all canaries with:
|
||||
|
||||
```bash
|
||||
watch kubectl get canaries --all-namespaces
|
||||
|
||||
NAMESPACE NAME STATUS WEIGHT LASTTRANSITIONTIME
|
||||
test podinfo Progressing 15 2019-06-30T14:05:07Z
|
||||
prod frontend Succeeded 0 2019-06-30T16:15:07Z
|
||||
prod backend Failed 0 2019-06-30T17:05:07Z
|
||||
```
|
||||
|
||||
## Automated rollback
|
||||
|
||||
During the canary analysis you can generate HTTP 500 errors and high latency to test if Flagger pauses and rolls back the faulted version.
|
||||
|
||||
Trigger another canary deployment:
|
||||
|
||||
```bash
|
||||
kubectl -n test set image deployment/podinfo \
|
||||
podinfod=ghcr.io/stefanprodan/podinfo:6.0.2
|
||||
```
|
||||
|
||||
Exec into the load tester pod with:
|
||||
|
||||
```bash
|
||||
kubectl -n test exec -it flagger-loadtester-xx-xx sh
|
||||
```
|
||||
|
||||
Generate HTTP 500 errors:
|
||||
|
||||
```bash
|
||||
watch -n 1 curl http://podinfo-canary.test:9898/status/500
|
||||
```
|
||||
|
||||
Generate latency:
|
||||
|
||||
```bash
|
||||
watch -n 1 curl http://podinfo-canary.test:9898/delay/1
|
||||
```
|
||||
|
||||
When the number of failed checks reaches the canary analysis threshold, the traffic is routed back to the primary, the canary is scaled to zero and the rollout is marked as failed.
|
||||
|
||||
```text
|
||||
kubectl -n test describe canary/podinfo
|
||||
|
||||
Status:
|
||||
Canary Weight: 0
|
||||
Failed Checks: 10
|
||||
Phase: Failed
|
||||
Events:
|
||||
Starting canary analysis for podinfo.test
|
||||
Pre-rollout check acceptance-test passed
|
||||
Advance podinfo.test canary weight 5
|
||||
Advance podinfo.test canary weight 10
|
||||
Advance podinfo.test canary weight 15
|
||||
Halt podinfo.test advancement success rate 69.17% < 99%
|
||||
Halt podinfo.test advancement success rate 61.39% < 99%
|
||||
Halt podinfo.test advancement success rate 55.06% < 99%
|
||||
Halt podinfo.test advancement request duration 1.20s > 0.5s
|
||||
Halt podinfo.test advancement request duration 1.45s > 0.5s
|
||||
Rolling back podinfo.test failed checks threshold reached 5
|
||||
Canary failed! Scaling down podinfo.test
|
||||
```
|
||||
|
||||
The above procedures can be extended with [custom metrics](../usage/metrics.md) checks, [webhooks](../usage/webhooks.md), [manual promotion](../usage/webhooks.md#manual-gating) approval and [Slack or MS Teams](../usage/alerting.md) notifications.
|
||||
@@ -2,25 +2,54 @@
|
||||
|
||||
This guide shows you how to use Linkerd and Flagger to automate canary deployments.
|
||||
|
||||

|
||||
|
||||
## Prerequisites
|
||||
|
||||
Flagger requires a Kubernetes cluster **v1.16** or newer and Linkerd **2.10** or newer.
|
||||
Flagger requires a Kubernetes cluster **v1.21** or newer and Linkerd **2.14** or newer.
|
||||
|
||||
Install Linkerd the Promethues (part of Linkerd Viz):
|
||||
Install Linkerd and Prometheus (part of Linkerd Viz):
|
||||
|
||||
```bash
|
||||
# The CRDs need to be installed beforehand
|
||||
linkerd install --crds | kubectl apply -f -
|
||||
|
||||
linkerd install | kubectl apply -f -
|
||||
linkerd viz install | kubectl apply -f -
|
||||
|
||||
# For linkerd versions 2.12 and later, the SMI extension needs to be install in
|
||||
# order to enable TrafficSplits
|
||||
curl -sL https://linkerd.github.io/linkerd-smi/install | sh
|
||||
linkerd smi install | kubectl apply -f -
|
||||
```
|
||||
|
||||
Install Flagger in the linkerd namespace:
|
||||
Install Flagger in the flagger-system namespace:
|
||||
|
||||
```bash
|
||||
kubectl apply -k github.com/fluxcd/flagger//kustomize/linkerd
|
||||
```
|
||||
|
||||
If you prefer Helm, these are the commands to install Linkerd, Linkerd Viz,
|
||||
Linkerd-SMI and Flagger:
|
||||
|
||||
```bash
|
||||
helm repo add linkerd https://helm.linkerd.io/stable
|
||||
helm install linkerd-crds linkerd/linkerd-crds -n linkerd --create-namespace
|
||||
# See https://linkerd.io/2/tasks/generate-certificates/ for how to generate the
|
||||
# certs referred below
|
||||
helm install linkerd-control-plane linkerd/linkerd-control-plane \
|
||||
-n linkerd \
|
||||
--set-file identityTrustAnchorsPEM=ca.crt \
|
||||
--set-file identity.issuer.tls.crtPEM=issuer.crt \
|
||||
--set-file identity.issuer.tls.keyPEM=issuer.key \
|
||||
|
||||
helm install linkerd-viz linkerd/linkerd-viz -n linkerd-viz --create-namespace
|
||||
|
||||
helm install flagger flagger/flagger \
|
||||
--n flagger-system \
|
||||
--set meshProvider=gatewayapi:v1beta1 \
|
||||
--set metricsServer=http://prometheus.linkerd-viz:9090 \
|
||||
--set linkerdAuthPolicy.create=true
|
||||
```
|
||||
|
||||
## Bootstrap
|
||||
|
||||
Flagger takes a Kubernetes deployment and optionally a horizontal pod autoscaler (HPA),
|
||||
@@ -46,9 +75,65 @@ Create a deployment and a horizontal pod autoscaler:
|
||||
kubectl apply -k https://github.com/fluxcd/flagger//kustomize/podinfo?ref=main
|
||||
```
|
||||
|
||||
Create a canary custom resource for the podinfo deployment:
|
||||
Create a metrics template and canary custom resources for the podinfo deployment:
|
||||
|
||||
```yaml
|
||||
---
|
||||
apiVersion: flagger.app/v1beta1
|
||||
kind: MetricTemplate
|
||||
metadata:
|
||||
name: success-rate
|
||||
namespace: test
|
||||
spec:
|
||||
provider:
|
||||
type: prometheus
|
||||
address: http://prometheus.linkerd-viz:9090
|
||||
query: |
|
||||
sum(
|
||||
rate(
|
||||
response_total{
|
||||
namespace="{{ namespace }}",
|
||||
deployment=~"{{ target }}",
|
||||
classification!="failure",
|
||||
direction="{{ variables.direction }}"
|
||||
}[{{ interval }}]
|
||||
)
|
||||
)
|
||||
/
|
||||
sum(
|
||||
rate(
|
||||
response_total{
|
||||
namespace="{{ namespace }}",
|
||||
deployment=~"{{ target }}",
|
||||
direction="{{ variables.direction }}"
|
||||
}[{{ interval }}]
|
||||
)
|
||||
)
|
||||
* 100
|
||||
---
|
||||
apiVersion: flagger.app/v1beta1
|
||||
kind: MetricTemplate
|
||||
metadata:
|
||||
name: latency
|
||||
namespace: test
|
||||
spec:
|
||||
provider:
|
||||
type: prometheus
|
||||
address: http://prometheus.linkerd-viz:9090
|
||||
query: |
|
||||
histogram_quantile(
|
||||
0.99,
|
||||
sum(
|
||||
rate(
|
||||
response_latency_ms_bucket{
|
||||
namespace="{{ namespace }}",
|
||||
deployment=~"{{ target }}",
|
||||
direction="{{ variables.direction }}"
|
||||
}[{{ interval }}]
|
||||
)
|
||||
) by (le)
|
||||
)
|
||||
---
|
||||
apiVersion: flagger.app/v1beta1
|
||||
kind: Canary
|
||||
metadata:
|
||||
@@ -62,7 +147,7 @@ spec:
|
||||
name: podinfo
|
||||
# HPA reference (optional)
|
||||
autoscalerRef:
|
||||
apiVersion: autoscaling/v2beta2
|
||||
apiVersion: autoscaling/v2
|
||||
kind: HorizontalPodAutoscaler
|
||||
name: podinfo
|
||||
# the maximum time in seconds for the canary deployment
|
||||
@@ -73,6 +158,13 @@ spec:
|
||||
port: 9898
|
||||
# container port number or name (optional)
|
||||
targetPort: 9898
|
||||
# Reference to the Service that the generated HTTPRoute would attach to.
|
||||
gatewayRefs:
|
||||
- name: podinfo
|
||||
namespace: test
|
||||
group: core
|
||||
kind: Service
|
||||
port: 9898
|
||||
analysis:
|
||||
# schedule interval (default 60s)
|
||||
interval: 30s
|
||||
@@ -86,18 +178,28 @@ spec:
|
||||
stepWeight: 5
|
||||
# Linkerd Prometheus checks
|
||||
metrics:
|
||||
- name: request-success-rate
|
||||
- name: success-rate
|
||||
templateRef:
|
||||
name: success-rate
|
||||
namespace: test
|
||||
# minimum req success rate (non 5xx responses)
|
||||
# percentage (0-100)
|
||||
thresholdRange:
|
||||
min: 99
|
||||
interval: 1m
|
||||
- name: request-duration
|
||||
templateVariables:
|
||||
direction: inbound
|
||||
- name: latency
|
||||
templateRef:
|
||||
name: latency
|
||||
namespace: test
|
||||
# maximum req duration P99
|
||||
# milliseconds
|
||||
thresholdRange:
|
||||
max: 500
|
||||
interval: 30s
|
||||
templateVariables:
|
||||
direction: inbound
|
||||
# testing (optional)
|
||||
webhooks:
|
||||
- name: acceptance-test
|
||||
@@ -140,7 +242,7 @@ service/podinfo-primary
|
||||
trafficsplits.split.smi-spec.io/podinfo
|
||||
```
|
||||
|
||||
After the boostrap, the podinfo deployment will be scaled to zero and the traffic to `podinfo.test` will be routed to the primary pods. During the canary analysis, the `podinfo-canary.test` address can be used to target directly the canary pods.
|
||||
After the bootstrap, the podinfo deployment will be scaled to zero and the traffic to `podinfo.test` will be routed to the primary pods. During the canary analysis, the `podinfo-canary.test` address can be used to target directly the canary pods.
|
||||
|
||||
## Automated canary promotion
|
||||
|
||||
@@ -152,7 +254,7 @@ Trigger a canary deployment by updating the container image:
|
||||
|
||||
```bash
|
||||
kubectl -n test set image deployment/podinfo \
|
||||
podinfod=stefanprodan/podinfo:3.1.1
|
||||
podinfod=ghcr.io/stefanprodan/podinfo:6.0.1
|
||||
```
|
||||
|
||||
Flagger detects that the deployment revision changed and starts a new rollout:
|
||||
@@ -211,7 +313,7 @@ Trigger another canary deployment:
|
||||
|
||||
```bash
|
||||
kubectl -n test set image deployment/podinfo \
|
||||
podinfod=stefanprodan/podinfo:3.1.2
|
||||
podinfod=ghcr.io/stefanprodan/podinfo:6.0.2
|
||||
```
|
||||
|
||||
Exec into the load tester pod with:
|
||||
@@ -297,7 +399,7 @@ Trigger a canary deployment by updating the container image:
|
||||
|
||||
```bash
|
||||
kubectl -n test set image deployment/podinfo \
|
||||
podinfod=stefanprodan/podinfo:3.1.3
|
||||
podinfod=ghcr.io/stefanprodan/podinfo:6.0.3
|
||||
```
|
||||
|
||||
Generate 404s:
|
||||
@@ -309,7 +411,7 @@ watch -n 1 curl http://podinfo-canary:9898/status/404
|
||||
Watch Flagger logs:
|
||||
|
||||
```text
|
||||
kubectl -n linkerd logs deployment/flagger -f | jq .msg
|
||||
kubectl -n flagger-system logs deployment/flagger -f | jq .msg
|
||||
|
||||
Starting canary deployment for podinfo.test
|
||||
Pre-rollout check acceptance-test passed
|
||||
@@ -390,7 +492,7 @@ spec:
|
||||
kind: Deployment
|
||||
name: podinfo
|
||||
autoscalerRef:
|
||||
apiVersion: autoscaling/v2beta2
|
||||
apiVersion: autoscaling/v2
|
||||
kind: HorizontalPodAutoscaler
|
||||
name: podinfo
|
||||
service:
|
||||
@@ -442,7 +544,7 @@ Trigger a canary deployment by updating the container image:
|
||||
|
||||
```bash
|
||||
kubectl -n test set image deployment/podinfo \
|
||||
podinfod=stefanprodan/podinfo:3.1.4
|
||||
podinfod=ghcr.io/stefanprodan/podinfo:6.0.4
|
||||
```
|
||||
|
||||
Flagger detects that the deployment revision changed and starts the A/B testing:
|
||||
|
||||
@@ -6,7 +6,7 @@ This guide shows you how to use the NGINX ingress controller and Flagger to auto
|
||||
|
||||
## Prerequisites
|
||||
|
||||
Flagger requires a Kubernetes cluster **v1.19** or newer and NGINX ingress **v0.46** or newer.
|
||||
Flagger requires a Kubernetes cluster **v1.19** or newer and NGINX ingress **v1.0.2** or newer.
|
||||
|
||||
Install the NGINX ingress controller with Helm v3:
|
||||
|
||||
@@ -110,7 +110,7 @@ spec:
|
||||
name: podinfo
|
||||
# HPA reference (optional)
|
||||
autoscalerRef:
|
||||
apiVersion: autoscaling/v2beta2
|
||||
apiVersion: autoscaling/v2
|
||||
kind: HorizontalPodAutoscaler
|
||||
name: podinfo
|
||||
# the maximum time in seconds for the canary deployment
|
||||
@@ -192,7 +192,7 @@ Trigger a canary deployment by updating the container image:
|
||||
|
||||
```bash
|
||||
kubectl -n test set image deployment/podinfo \
|
||||
podinfod=stefanprodan/podinfo:3.1.1
|
||||
podinfod=ghcr.io/stefanprodan/podinfo:6.0.1
|
||||
```
|
||||
|
||||
Flagger detects that the deployment revision changed and starts a new rollout:
|
||||
@@ -246,7 +246,7 @@ Trigger another canary deployment:
|
||||
|
||||
```bash
|
||||
kubectl -n test set image deployment/podinfo \
|
||||
podinfod=stefanprodan/podinfo:3.1.2
|
||||
podinfod=ghcr.io/stefanprodan/podinfo:6.0.2
|
||||
```
|
||||
|
||||
Generate HTTP 500 errors:
|
||||
@@ -334,7 +334,7 @@ Trigger a canary deployment by updating the container image:
|
||||
|
||||
```bash
|
||||
kubectl -n test set image deployment/podinfo \
|
||||
podinfod=stefanprodan/podinfo:3.1.3
|
||||
podinfod=ghcr.io/stefanprodan/podinfo:6.0.3
|
||||
```
|
||||
|
||||
Generate high response latency:
|
||||
@@ -407,7 +407,7 @@ Trigger a canary deployment by updating the container image:
|
||||
|
||||
```bash
|
||||
kubectl -n test set image deployment/podinfo \
|
||||
podinfod=stefanprodan/podinfo:3.1.4
|
||||
podinfod=ghcr.io/stefanprodan/podinfo:6.0.4
|
||||
```
|
||||
|
||||
Flagger detects that the deployment revision changed and starts the A/B testing:
|
||||
|
||||
@@ -113,7 +113,7 @@ spec:
|
||||
name: podinfo
|
||||
# HPA reference (optional)
|
||||
autoscalerRef:
|
||||
apiVersion: autoscaling/v2beta2
|
||||
apiVersion: autoscaling/v2
|
||||
kind: HorizontalPodAutoscaler
|
||||
name: podinfo
|
||||
# the maximum time in seconds for the canary deployment
|
||||
|
||||
@@ -13,9 +13,17 @@ Install Traefik with Helm v3:
|
||||
```bash
|
||||
helm repo add traefik https://helm.traefik.io/traefik
|
||||
kubectl create ns traefik
|
||||
helm upgrade -i traefik traefik/traefik \
|
||||
--namespace traefik \
|
||||
--set additionalArguments="{--metrics.prometheus=true}"
|
||||
|
||||
cat <<EOF | helm upgrade -i traefik traefik/traefik --namespace traefik -f -
|
||||
deployment:
|
||||
podAnnotations:
|
||||
prometheus.io/port: "9100"
|
||||
prometheus.io/scrape: "true"
|
||||
prometheus.io/path: "/metrics"
|
||||
metrics:
|
||||
prometheus:
|
||||
entryPoint: metrics
|
||||
EOF
|
||||
```
|
||||
|
||||
Install Flagger and the Prometheus add-on in the same namespace as Traefik:
|
||||
@@ -55,7 +63,7 @@ helm upgrade -i flagger-loadtester flagger/loadtester \
|
||||
Create Traefik IngressRoute that references TraefikService generated by Flagger \(replace `app.example.com` with your own domain\):
|
||||
|
||||
```yaml
|
||||
apiVersion: traefik.containo.us/v1alpha1
|
||||
apiVersion: traefik.io/v1alpha1
|
||||
kind: IngressRoute
|
||||
metadata:
|
||||
name: podinfo
|
||||
@@ -95,7 +103,7 @@ spec:
|
||||
name: podinfo
|
||||
# HPA reference (optional)
|
||||
autoscalerRef:
|
||||
apiVersion: autoscaling/v2beta2
|
||||
apiVersion: autoscaling/v2
|
||||
kind: HorizontalPodAutoscaler
|
||||
name: podinfo
|
||||
# the maximum time in seconds for the canary deployment
|
||||
@@ -169,7 +177,7 @@ horizontalpodautoscaler.autoscaling/podinfo-primary
|
||||
service/podinfo
|
||||
service/podinfo-canary
|
||||
service/podinfo-primary
|
||||
traefikservice.traefik.containo.us/podinfo
|
||||
traefikservice.traefik.io/podinfo
|
||||
```
|
||||
|
||||
## Automated canary promotion
|
||||
|
||||
@@ -139,7 +139,7 @@ Note that without resource requests the horizontal pod autoscaler can't determin
|
||||
A production environment should be able to handle traffic bursts without impacting the quality of service. This can be achieved with Kubernetes autoscaling capabilities. Autoscaling in Kubernetes has two dimensions: the Cluster Autoscaler that deals with node scaling operations and the Horizontal Pod Autoscaler that automatically scales the number of pods in a deployment.
|
||||
|
||||
```yaml
|
||||
apiVersion: autoscaling/v2beta2
|
||||
apiVersion: autoscaling/v2
|
||||
kind: HorizontalPodAutoscaler
|
||||
spec:
|
||||
scaleTargetRef:
|
||||
@@ -172,7 +172,7 @@ spec:
|
||||
service:
|
||||
port: 9898
|
||||
gateways:
|
||||
- public-gateway.istio-system.svc.cluster.local
|
||||
- istio-system/public-gateway
|
||||
hosts:
|
||||
- app.example.com
|
||||
retries:
|
||||
|
||||
@@ -20,9 +20,10 @@ Once the webhook has been generated. Flagger can be configured to send Slack not
|
||||
```bash
|
||||
helm upgrade -i flagger flagger/flagger \
|
||||
--set slack.url=https://hooks.slack.com/services/YOUR/SLACK/WEBHOOK \
|
||||
--set slack.proxy-url=my-http-proxy.com \ # optional http/s proxy
|
||||
--set slack.proxy=my-http-proxy.com \ # optional http/s proxy
|
||||
--set slack.channel=general \
|
||||
--set slack.user=flagger
|
||||
--set slack.user=flagger \
|
||||
--set clusterName=my-cluster
|
||||
```
|
||||
|
||||
Once configured with a Slack incoming **webhook**,
|
||||
@@ -36,6 +37,8 @@ or if the analysis reached the maximum number of failed checks:
|
||||
|
||||

|
||||
|
||||
For using a Slack bot token, you should add `token` to a secret and use **secretRef**.
|
||||
|
||||
### Microsoft Teams
|
||||
|
||||
Flagger can be configured to send notifications to Microsoft Teams:
|
||||
@@ -72,6 +75,7 @@ spec:
|
||||
channel: on-call-alerts
|
||||
username: flagger
|
||||
# webhook address (ignored if secretRef is specified)
|
||||
# or https://slack.com/api/chat.postMessage if you use token in the secret
|
||||
address: https://hooks.slack.com/services/YOUR/SLACK/WEBHOOK
|
||||
# optional http/s proxy
|
||||
proxy: http://my-http-proxy.com
|
||||
@@ -86,6 +90,7 @@ metadata:
|
||||
namespace: flagger
|
||||
data:
|
||||
address: <encoded-url>
|
||||
token: <encoded-token>
|
||||
```
|
||||
|
||||
The alert provider **type** can be: `slack`, `msteams`, `rocket` or `discord`. When set to `discord`,
|
||||
@@ -127,6 +132,9 @@ Alert fields:
|
||||
When the severity is set to `warn`, Flagger will alert when waiting on manual confirmation or if the analysis fails.
|
||||
When the severity is set to `error`, Flagger will alert only if the canary analysis fails.
|
||||
|
||||
To differentiate alerts based on the cluster name, you can configure Flagger with the `-cluster-name=my-cluster`
|
||||
command flag, or with Helm `--set clusterName=my-cluster`.
|
||||
|
||||
## Prometheus Alert Manager
|
||||
|
||||
You can use Alertmanager to trigger alerts when a canary deployment failed:
|
||||
|
||||
@@ -3,13 +3,15 @@
|
||||
Flagger can run automated application analysis, promotion and rollback for the following deployment strategies:
|
||||
|
||||
* **Canary Release** \(progressive traffic shifting\)
|
||||
* Istio, Linkerd, App Mesh, NGINX, Skipper, Contour, Gloo Edge, Traefik
|
||||
* Istio, Linkerd, App Mesh, NGINX, Skipper, Contour, Gloo Edge, Traefik, Kuma, Gateway API, Apache APISIX, Knative
|
||||
* **A/B Testing** \(HTTP headers and cookies traffic routing\)
|
||||
* Istio, App Mesh, NGINX, Contour, Gloo Edge
|
||||
* Istio, App Mesh, NGINX, Contour, Gloo Edge, Gateway API
|
||||
* **Blue/Green** \(traffic switching\)
|
||||
* Kubernetes CNI, Istio, Linkerd, App Mesh, NGINX, Contour, Gloo Edge
|
||||
* Kubernetes CNI, Istio, Linkerd, App Mesh, NGINX, Contour, Gloo Edge, Gateway API
|
||||
* **Blue/Green Mirroring** \(traffic shadowing\)
|
||||
* Istio
|
||||
* Istio, Gateway API
|
||||
* **Canary Release with Session Affinity** \(progressive traffic shifting combined with cookie based routing\)
|
||||
* Istio, Gateway API
|
||||
|
||||
For Canary releases and A/B testing you'll need a Layer 7 traffic management solution like
|
||||
a service mesh or an ingress controller. For Blue/Green deployments no service mesh or ingress controller is required.
|
||||
@@ -124,11 +126,11 @@ the step and the maximum weight value in 0 to 100 range.
|
||||
Example:
|
||||
|
||||
```yaml
|
||||
canary:
|
||||
# canary.yaml
|
||||
spec:
|
||||
analysis:
|
||||
promotion:
|
||||
maxWeight: 50
|
||||
stepWeight: 20
|
||||
maxWeight: 50
|
||||
stepWeight: 20
|
||||
```
|
||||
|
||||
This configuration performs analysis starting from 20, increasing by 20 until weight goes above 50.
|
||||
@@ -146,10 +148,10 @@ In order to enable non-linear promotion a new parameter was introduced:
|
||||
Example:
|
||||
|
||||
```yaml
|
||||
canary:
|
||||
# canary.yaml
|
||||
spec:
|
||||
analysis:
|
||||
promotion:
|
||||
stepWeights: [1, 2, 10, 80]
|
||||
stepWeights: [1, 2, 10, 80]
|
||||
```
|
||||
|
||||
This configuration performs analysis starting from 1, going through `stepWeights` values till 80.
|
||||
@@ -326,7 +328,7 @@ Blue/Green rollout steps for service mesh:
|
||||
* run conformance tests for the canary pods
|
||||
* run load tests and metric checks for the canary pods every minute
|
||||
* abort the canary release if the failure threshold is reached
|
||||
* route traffic to canary
|
||||
* route traffic to canary (This doesn't happen when using the kubernetes provider)
|
||||
* promote canary spec over primary (blue)
|
||||
* wait for primary rollout
|
||||
* route traffic to primary
|
||||
@@ -351,8 +353,6 @@ you should consider what will happen if a write is duplicated and handled by the
|
||||
|
||||
To use mirroring, set `spec.analysis.mirror` to `true`.
|
||||
|
||||
Istio example:
|
||||
|
||||
```yaml
|
||||
analysis:
|
||||
# schedule interval (default 60s)
|
||||
@@ -361,9 +361,10 @@ Istio example:
|
||||
iterations: 10
|
||||
# max number of failed iterations before rollback
|
||||
threshold: 2
|
||||
# Traffic shadowing (compatible with Istio only)
|
||||
# Traffic shadowing
|
||||
mirror: true
|
||||
# Weight of the traffic mirrored to your canary (defaults to 100%)
|
||||
# Only applicable for Istio.
|
||||
mirrorWeight: 100
|
||||
```
|
||||
|
||||
@@ -393,3 +394,138 @@ After the analysis finishes, the traffic is routed to the canary (green) before
|
||||
triggering the primary (blue) rolling update, this ensures a smooth transition
|
||||
to the new version avoiding dropping in-flight requests during the Kubernetes deployment rollout.
|
||||
|
||||
## Canary Release with Session Affinity
|
||||
|
||||
This deployment strategy mixes a Canary Release with A/B testing. A Canary Release is helpful when
|
||||
we're trying to expose new features to users progressively, but because of the very nature of its
|
||||
routing (weight based), users can land on the application's old version even after they have been
|
||||
routed to the new version previously. This can be annoying, or worse break how other services interact
|
||||
with our application. To address this issue, we borrow some things from A/B testing.
|
||||
|
||||
Since A/B testing is particularly helpful for applications that require session affinity, we integrate
|
||||
cookie based routing with regular weight based routing. This means once a user is exposed to the new
|
||||
version of our application (based on the traffic weights), they're always routed to that version, i.e.
|
||||
they're never routed back to the old version of our application.
|
||||
|
||||
You can enable this, by specifying `.spec.analysis.sessionAffinity` in the Canary:
|
||||
|
||||
```yaml
|
||||
analysis:
|
||||
# schedule interval (default 60s)
|
||||
interval: 1m
|
||||
# max number of failed metric checks before rollback
|
||||
threshold: 10
|
||||
# max traffic percentage routed to canary
|
||||
# percentage (0-100)
|
||||
maxWeight: 50
|
||||
# canary increment step
|
||||
# percentage (0-100)
|
||||
stepWeight: 2
|
||||
# session affinity config
|
||||
sessionAffinity:
|
||||
# name of the cookie used
|
||||
cookieName: flagger-cookie
|
||||
# max age of the cookie (in seconds)
|
||||
# optional; defaults to 86400
|
||||
maxAge: 21600
|
||||
```
|
||||
|
||||
`.spec.analysis.sessionAffinity.cookieName` is the name of the Cookie that is stored. The value of the
|
||||
cookie is a randomly generated string of characters that act as a unique identifier. For the above
|
||||
config, the response header of a request routed to the canary deployment during a Canary run will look like:
|
||||
```
|
||||
Set-Cookie: flagger-cookie=LpsIaLdoNZ; Max-Age=21600
|
||||
```
|
||||
|
||||
After a Canary run is over and all traffic is shifted back to the primary deployment, all responses will
|
||||
have the following header:
|
||||
```
|
||||
Set-Cookie: flagger-cookie=LpsIaLdoNZ; Max-Age=-1
|
||||
```
|
||||
This tells the client to delete the cookie, making sure there are no junk cookies lying around in the user's
|
||||
system.
|
||||
|
||||
If a new Canary run is triggered, the response header will set a new cookie for all requests routed to
|
||||
the Canary deployment:
|
||||
```
|
||||
Set-Cookie: flagger-cookie=McxKdLQoIN; Max-Age=21600
|
||||
```
|
||||
|
||||
### Configuring stickiness for Primary deployment
|
||||
|
||||
The above strategy is helpful because it makes sure that any user that's routed to the Canary deployment
|
||||
once is always routed to that deployment. But, this can results in an imbalance in the traffic shifting,
|
||||
as over time, most of the traffic flows to the Canary deployment. To ensure fair traffic distribution, we
|
||||
can also configure stickiness for the Primary deployment. You can configure this by specifying a
|
||||
`primaryCookieName` field:
|
||||
|
||||
```yaml
|
||||
analysis:
|
||||
# schedule interval (default 60s)
|
||||
interval: 1m
|
||||
sessionAffinity:
|
||||
# name of the cookie used
|
||||
cookieName: flagger-cookie
|
||||
# max age of the cookie (in seconds)
|
||||
# optional; defaults to 86400
|
||||
maxAge: 21600
|
||||
# name of the cookie to use for the primary backend
|
||||
# optional; unset means no primary stickiness
|
||||
primaryCookieName: primary-flagger-cookie
|
||||
```
|
||||
|
||||
> Note: This is only supported for the Gateway API provider for now.
|
||||
|
||||
Let's understand what the above configuration does. All the session affinity stuff in the above section
|
||||
still occurs, but now the response header for requests routed to the primary deployment also include a
|
||||
`Set-Cookie` header:
|
||||
```
|
||||
Set-Cookie: primary-flagger-cookie=ApvLdqCoMF; Max-Age=60
|
||||
```
|
||||
|
||||
Note that the age of the cookie is the same as the Canary analysis's interval. This means that the cookie
|
||||
expires when a new steps of the analysis begins and a new cookie is generated like so:
|
||||
```
|
||||
Set-Cookie: primary-flagger-cookie=BRtlVaQoPC; Max-Age=60
|
||||
```
|
||||
|
||||
This ensures that, if the first request of a user during a particular step is routed to the primary deployment,
|
||||
then all subsequent requests will be routed to the same until the next step starts. During a new step, a new cookie
|
||||
value is generated which is then included in the headers of responses from the primary workload. This allows for
|
||||
weighted traffic routing to happen while ensuring that users don't ever switch back to the primary deployment from
|
||||
the canary deployment during a Canary analysis.
|
||||
|
||||
### Configuring additional cookie attributes
|
||||
|
||||
Depending on your use case, you may neet to set additional [cookie attributes](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Set-Cookie#attributes) in order for your application to route requests correctly.
|
||||
You may set the following attributes:
|
||||
|
||||
```yaml
|
||||
analysis:
|
||||
# schedule interval (default 60s)
|
||||
interval: 1m
|
||||
sessionAffinity:
|
||||
# name of the cookie used
|
||||
cookieName: flagger-cookie
|
||||
# max age of the cookie (in seconds)
|
||||
# optional; defaults to 86400
|
||||
maxAge: 21600
|
||||
# defines the host to which the cookie will be sent.
|
||||
# optional
|
||||
domain: fluxcd.io
|
||||
# forbids JavaScript from accessing the cookie, for example, through the Document.cookie property.
|
||||
# optional
|
||||
httpOnly: true
|
||||
# indicates that the cookie should be stored using partitioned storage.
|
||||
# optional
|
||||
partitioned: true
|
||||
# indicates the path that must exist in the requested URL for the browser to send the Cookie header.
|
||||
# optional
|
||||
path: /flagger
|
||||
# controls whether or not a cookie is sent with cross-site requests.
|
||||
# optional; valid values are Strict, Lax or None
|
||||
sameSite: Strict
|
||||
# indicates that the cookie is sent to the server only when a request is made with the https: scheme (except on localhost)
|
||||
# optional
|
||||
secure: true
|
||||
```
|
||||
|
||||
@@ -65,9 +65,12 @@ spec:
|
||||
kind: Deployment
|
||||
name: podinfo
|
||||
autoscalerRef:
|
||||
apiVersion: autoscaling/v2beta2
|
||||
apiVersion: autoscaling/v2
|
||||
kind: HorizontalPodAutoscaler
|
||||
name: podinfo
|
||||
primaryScalerReplicas:
|
||||
minReplicas: 2
|
||||
maxReplicas: 5
|
||||
```
|
||||
|
||||
Based on the above configuration, Flagger generates the following Kubernetes objects:
|
||||
@@ -80,6 +83,11 @@ by default all traffic is routed to this version and the target deployment is sc
|
||||
Flagger will detect changes to the target deployment (including secrets and configmaps)
|
||||
and will perform a canary analysis before promoting the new version as primary.
|
||||
|
||||
Use `.spec.autoscalerRef.primaryScalerReplicas` to override the replica scaling
|
||||
configuration for the generated primary HorizontalPodAutoscaler. This is useful
|
||||
for situations when you want to have a different scaling configuration for the
|
||||
primary workload as opposed to using the same values from the original workload HorizontalPodAutoscaler.
|
||||
|
||||
**Note** that the target deployment must have a single label selector in the format `app: <DEPLOYMENT-NAME>`:
|
||||
|
||||
```yaml
|
||||
@@ -115,6 +123,12 @@ but disabling config-tracking using the per Secret/ConfigMap annotation may fit
|
||||
The autoscaler reference is optional, when specified,
|
||||
Flagger will pause the traffic increase while the target and primary deployments are scaled up or down.
|
||||
HPA can help reduce the resource usage during the canary analysis.
|
||||
When the autoscaler reference is specified, any changes made to the autoscaler are only made active
|
||||
in the primary autoscaler when a rollout for the deployment starts and completes successfully.
|
||||
Optionally, you can create two HPAs, one for canary and one for the primary to update the HPA without
|
||||
doing a new rollout. As the canary deployment will be scaled to 0, the HPA on the canary will be inactive.
|
||||
|
||||
**Note** Flagger requires `autoscaling/v2` or `autoscaling/v2beta2` API version for HPAs.
|
||||
|
||||
The progress deadline represents the maximum time in seconds for the canary deployment to
|
||||
make progress before it is rolled back, defaults to ten minutes.
|
||||
@@ -130,14 +144,19 @@ spec:
|
||||
name: podinfo
|
||||
port: 9898
|
||||
portName: http
|
||||
appProtocol: http
|
||||
targetPort: 9898
|
||||
portDiscovery: true
|
||||
headless: false
|
||||
trafficDistribution: PreferClose
|
||||
```
|
||||
|
||||
The container port from the target workload should match the `service.port` or `service.targetPort`.
|
||||
The `service.name` is optional, defaults to `spec.targetRef.name`.
|
||||
The `service.targetPort` can be a container port number or name.
|
||||
The `service.portName` is optional (defaults to `http`), if your workload uses gRPC then set the port name to `grpc`.
|
||||
The `service.appProtocol` is optional, more details can be found [here](https://kubernetes.io/docs/concepts/services-networking/service/#application-protocol).
|
||||
The `service.trafficDistribution` is optional, more details can be found [here](https://kubernetes.io/docs/concepts/services-networking/service/#traffic-distribution).
|
||||
|
||||
If port discovery is enabled, Flagger scans the target workload and extracts the containers ports
|
||||
excluding the port specified in the canary service and service mesh sidecar ports.
|
||||
@@ -184,6 +203,17 @@ spec:
|
||||
test: "test"
|
||||
```
|
||||
|
||||
Note that the `apex` annotations are added to both the generated Kubernetes Service and the
|
||||
generated service mesh/ingress object. This allows using external-dns with Istio `VirtualServices`
|
||||
and `TraefikServices`. Beware of configuration conflicts [here](../faq.md#ExternalDNS).
|
||||
|
||||
Note that if any annotations or labels are added that are not specified here,
|
||||
Flagger will remove them during reconciliation. To specify metadata
|
||||
that should be ignored by Flagger, configure `unmanagedMetadata`.
|
||||
|
||||
If you want for the generated Kubernetes ClusterIP services to be [headless](https://kubernetes.io/docs/concepts/services-networking/service/#headless-services),
|
||||
then set `service.headless` to true.
|
||||
|
||||
Besides port mapping and metadata, the service specification can
|
||||
contain URI match and rewrite rules, timeout and retry polices:
|
||||
|
||||
@@ -331,6 +361,14 @@ Spec:
|
||||
# total number of iterations
|
||||
# used for A/B Testing and Blue/Green
|
||||
iterations:
|
||||
# threshold of primary pods that need to be available to consider it ready
|
||||
# before starting rollout. this is optional and the default is 100
|
||||
# percentage (0-100)
|
||||
primaryReadyThreshold: 100
|
||||
# threshold of canary pods that need to be available to consider it ready
|
||||
# before starting rollout. this is optional and the default is 100
|
||||
# percentage (0-100)
|
||||
canaryReadyThreshold: 100
|
||||
# canary match conditions
|
||||
# used for A/B Testing
|
||||
match:
|
||||
@@ -351,3 +389,10 @@ On each run, Flagger calls the webhooks, checks the metrics and if the failed ch
|
||||
stops the analysis and rolls back the canary.
|
||||
If alerting is configured, Flagger will post the analysis result using the alert providers.
|
||||
|
||||
## Canary suspend
|
||||
|
||||
The `suspend` field can be set to true to suspend the Canary. If a Canary is suspended,
|
||||
its reconciliation is completely paused. This means that changes to target workloads,
|
||||
tracked ConfigMaps and Secrets don't trigger a Canary run and changes to resources generated
|
||||
by Flagger are not corrected. If the Canary was suspended during an active Canary run,
|
||||
then the run is paused without disturbing the workloads or the traffic weights.
|
||||
|
||||
@@ -29,7 +29,7 @@ Flagger comes with two builtin metric checks: HTTP request success rate and dura
|
||||
|
||||
For each metric you can specify a range of accepted values with `thresholdRange` and
|
||||
the window size or the time series with `interval`.
|
||||
The builtin checks are available for every service mesh / ingress controlle
|
||||
The builtin checks are available for every service mesh / ingress controller
|
||||
and are implemented with [Prometheus queries](../faq.md#metrics).
|
||||
|
||||
## Custom metrics
|
||||
@@ -46,8 +46,9 @@ metadata:
|
||||
name: my-metric
|
||||
spec:
|
||||
provider:
|
||||
type: # can be prometheus or datadog
|
||||
type: # can be prometheus, datadog, etc
|
||||
address: # API URL
|
||||
insecureSkipVerify: # if set to true, disables the TLS cert validation
|
||||
secretRef:
|
||||
name: # name of the secret containing the API credentials
|
||||
query: # metric query
|
||||
@@ -61,6 +62,7 @@ The following variables are available in query templates:
|
||||
* `service` (canary.spec.service.name)
|
||||
* `ingress` (canary.spec.ingresRef.name)
|
||||
* `interval` (canary.spec.analysis.metrics[].interval)
|
||||
* `variables` (canary.spec.analysis.metrics[].templateVariables)
|
||||
|
||||
A canary analysis metric can reference a template with `templateRef`:
|
||||
|
||||
@@ -81,6 +83,50 @@ A canary analysis metric can reference a template with `templateRef`:
|
||||
interval: 1m
|
||||
```
|
||||
|
||||
A canary analysis metric can reference a set of custom variables with `templateVariables`. These variables will be then injected into the query defined in the referred `MetricTemplate` object during canary analysis:
|
||||
|
||||
```yaml
|
||||
analysis:
|
||||
metrics:
|
||||
- name: "my metric"
|
||||
templateRef:
|
||||
name: my-metric
|
||||
namespace: flagger
|
||||
# accepted values
|
||||
thresholdRange:
|
||||
min: 10
|
||||
max: 1000
|
||||
# metric query time window
|
||||
interval: 1m
|
||||
# custom variables used within the referenced metric template
|
||||
templateVariables:
|
||||
direction: inbound
|
||||
```
|
||||
|
||||
```yaml
|
||||
apiVersion: flagger.app/v1beta1
|
||||
kind: MetricTemplate
|
||||
metadata:
|
||||
name: my-metric
|
||||
spec:
|
||||
provider:
|
||||
type: prometheus
|
||||
address: http://prometheus.linkerd-viz:9090
|
||||
query: |
|
||||
histogram_quantile(
|
||||
0.99,
|
||||
sum(
|
||||
rate(
|
||||
response_latency_ms_bucket{
|
||||
namespace="{{ namespace }}",
|
||||
deployment=~"{{ target }}",
|
||||
direction="{{ variables.direction }}"
|
||||
}[{{ interval }}]
|
||||
)
|
||||
) by (le)
|
||||
)
|
||||
```
|
||||
|
||||
## Prometheus
|
||||
|
||||
You can create custom metric checks targeting a Prometheus server by
|
||||
@@ -183,13 +229,25 @@ as the `MetricTemplate` with the basic-auth credentials:
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: prom-basic-auth
|
||||
name: prom-auth
|
||||
namespace: flagger
|
||||
data:
|
||||
username: your-user
|
||||
password: your-password
|
||||
```
|
||||
|
||||
or if you require bearer token authentication (via a SA token):
|
||||
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: prom-auth
|
||||
namespace: flagger
|
||||
data:
|
||||
token: ey1234...
|
||||
```
|
||||
|
||||
Then reference the secret in the `MetricTemplate`:
|
||||
|
||||
```yaml
|
||||
@@ -203,7 +261,7 @@ spec:
|
||||
type: prometheus
|
||||
address: http://prometheus.monitoring:9090
|
||||
secretRef:
|
||||
name: prom-basic-auth
|
||||
name: prom-auth
|
||||
```
|
||||
|
||||
## Datadog
|
||||
@@ -476,3 +534,250 @@ spec:
|
||||
secretRef:
|
||||
name: graphite-basic-auth
|
||||
```
|
||||
|
||||
## Google Cloud Monitoring (Stackdriver)
|
||||
|
||||
Enable Workload Identity on your cluster, create a service account key that has read access to the
|
||||
Cloud Monitoring API and then create an IAM policy binding between the GCP service account and the Flagger
|
||||
service account on Kubernetes. You can take a look at this [guide](https://cloud.google.com/kubernetes-engine/docs/how-to/workload-identity)
|
||||
|
||||
Annotate the flagger service account
|
||||
```shell script
|
||||
kubectl annotate serviceaccount flagger \
|
||||
--namespace <namespace> \
|
||||
iam.gke.io/gcp-service-account=<gcp-serviceaccount-name>@<project-id>.iam.gserviceaccount.com
|
||||
```
|
||||
|
||||
Alternatively, you can download the json keys and add it to your secret with the key `serviceAccountKey` (This method is not recommended).
|
||||
|
||||
Create a secret that contains your project-id (and, if workload identity is not enabled on your cluster,
|
||||
your [service account json](https://cloud.google.com/docs/authentication/production#create_service_account)).
|
||||
|
||||
```
|
||||
kubectl create secret generic gcloud-sa --from-literal=project=<project-id>
|
||||
```
|
||||
|
||||
Then reference the secret in the metric template.
|
||||
Note: The particular MQL query used here works if [Istio is installed on GKE](https://cloud.google.com/istio/docs/istio-on-gke/installing).
|
||||
```yaml
|
||||
apiVersion: flagger.app/v1beta1
|
||||
kind: MetricTemplate
|
||||
metadata:
|
||||
name: bytes-sent
|
||||
namespace: test
|
||||
spec:
|
||||
provider:
|
||||
type: stackdriver
|
||||
secretRef:
|
||||
name: gcloud-sa
|
||||
query: |
|
||||
fetch k8s_container
|
||||
| metric 'istio.io/service/server/response_latencies'
|
||||
| filter
|
||||
(metric.destination_service_name == '{{ service }}-canary'
|
||||
&& metric.destination_service_namespace == '{{ namespace }}')
|
||||
| align delta(1m)
|
||||
| every 1m
|
||||
| group_by [],
|
||||
[value_response_latencies_percentile:
|
||||
percentile(value.response_latencies, 99)]
|
||||
```
|
||||
|
||||
The reference for the query language can be found [here](https://cloud.google.com/monitoring/mql/reference)
|
||||
|
||||
## InfluxDB
|
||||
|
||||
The InfluxDB provider uses the [flux](https://docs.influxdata.com/influxdb/v2.0/query-data/get-started/) query language.
|
||||
|
||||
Create a secret that contains your authentication token that can be found in the InfluxDB UI.
|
||||
|
||||
```
|
||||
kubectl create secret generic influx-token --from-literal=token=<token>
|
||||
```
|
||||
|
||||
Then reference the secret in the metric template.
|
||||
|
||||
Note: The particular MQL query used here works if [Istio is installed on GKE](https://cloud.google.com/istio/docs/istio-on-gke/installing).
|
||||
|
||||
```yaml
|
||||
apiVersion: flagger.app/v1beta1
|
||||
kind: MetricTemplate
|
||||
metadata:
|
||||
name: not-found
|
||||
namespace: test
|
||||
spec:
|
||||
provider:
|
||||
type: influxdb
|
||||
secretRef:
|
||||
name: influx-token
|
||||
query: |
|
||||
from(bucket: "default")
|
||||
|> range(start: -2h)
|
||||
|> filter(fn: (r) => r["_measurement"] == "istio_requests_total")
|
||||
|> filter(fn: (r) => r[" destination_workload_namespace"] == "{{ namespace }}")
|
||||
|> filter(fn: (r) => r["destination_workload"] == "{{ target }}")
|
||||
|> filter(fn: (r) => r["response_code"] == "500")
|
||||
|> count()
|
||||
|> yield(name: "count")
|
||||
```
|
||||
|
||||
## Dynatrace
|
||||
|
||||
You can create custom metric checks using the Dynatrace provider.
|
||||
|
||||
Create a secret with your Dynatrace token:
|
||||
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: dynatrace
|
||||
namespace: istio-system
|
||||
data:
|
||||
dynatrace_token: ZHQwYz...
|
||||
```
|
||||
|
||||
Dynatrace metric template example:
|
||||
|
||||
```yaml
|
||||
apiVersion: flagger.app/v1beta1
|
||||
kind: MetricTemplate
|
||||
metadata:
|
||||
name: response-time-95pct
|
||||
namespace: istio-system
|
||||
spec:
|
||||
provider:
|
||||
type: dynatrace
|
||||
address: https://xxxxxxxx.live.dynatrace.com
|
||||
secretRef:
|
||||
name: dynatrace
|
||||
query: |
|
||||
builtin:service.response.time:filter(eq(dt.entity.service,SERVICE-ABCDEFG0123456789)):percentile(95)
|
||||
```
|
||||
|
||||
Reference the template in the canary analysis:
|
||||
|
||||
```yaml
|
||||
analysis:
|
||||
metrics:
|
||||
- name: "response-time-95pct"
|
||||
templateRef:
|
||||
name: response-time-95pct
|
||||
namespace: istio-system
|
||||
thresholdRange:
|
||||
max: 1000
|
||||
interval: 1m
|
||||
```
|
||||
|
||||
## Keptn
|
||||
|
||||
You can create custom metric checks using the Keptn provider.
|
||||
This Provider allows to verify either the value of a single [KeptnMetric](https://keptn.sh/stable/docs/reference/crd-reference/metric/),
|
||||
representing the value of a single metric,
|
||||
or of a [Keptn Analysis](https://keptn.sh/stable/docs/reference/crd-reference/analysis/),
|
||||
which provides a flexible grading logic for analysing and prioritising a number of different
|
||||
metric values coming from different data sources.
|
||||
|
||||
This provider requires [Keptn](https://keptn.sh/stable/docs/installation/) to be installed in the cluster.
|
||||
|
||||
Example for a Keptn metric template:
|
||||
|
||||
```yaml
|
||||
apiVersion: flagger.app/v1beta1
|
||||
kind: MetricTemplate
|
||||
metadata:
|
||||
name: response-time
|
||||
namespace: istio-system
|
||||
spec:
|
||||
provider:
|
||||
type: keptn
|
||||
query: keptnmetric/my-namespace/response-time/2m/reporter=destination
|
||||
```
|
||||
|
||||
This will reference the `KeptnMetric` with the name `response-time` in
|
||||
the namespace `my-namespace`, which could look like the following:
|
||||
|
||||
```yaml
|
||||
apiVersion: metrics.keptn.sh/v1beta1
|
||||
kind: KeptnMetric
|
||||
metadata:
|
||||
name: response-time
|
||||
namespace: my-namespace
|
||||
spec:
|
||||
fetchIntervalSeconds: 10
|
||||
provider:
|
||||
name: my-prometheus-keptn-provider
|
||||
query: histogram_quantile(0.8, sum by(le) (rate(http_server_request_latency_seconds_bucket{status_code='200',
|
||||
job='simple-go-backend'}[5m[])))
|
||||
```
|
||||
|
||||
The `query` contains the following components, which are divided by `/` characters:
|
||||
|
||||
```
|
||||
<type>/<namespace>/<resource-name>/<timeframe>/<arguments>
|
||||
```
|
||||
|
||||
* **type (required)**: Must be either `keptnmetric` or `analysis`.
|
||||
* **namespace (required)**: The namespace of the referenced `KeptnMetric`/`AnalysisDefinition`.
|
||||
* **resource-name (required):** The name of the referenced `KeptnMetric`/`AnalysisDefinition`.
|
||||
* **timeframe (optional)**: The timeframe used for the Analysis.
|
||||
This will usually be set to the same value as the analysis interval of a `Canary`.
|
||||
Only relevant if the `type` is set to `analysis`.
|
||||
* **arguments (optional)**: Arguments to be passed to an `Analysis`.
|
||||
Arguments are passed as a list of key value pairs, separated by `;` characters,
|
||||
e.g. `foo=bar;bar=foo`.
|
||||
Only relevant if the `type` is set to `analysis`.
|
||||
|
||||
For the type `analysis`, the value returned by the provider is either `0`
|
||||
(if the analysis failed), or `1` (analysis passed).
|
||||
|
||||
## Splunk
|
||||
|
||||
You can create custom metric checks using the Splunk provider.
|
||||
|
||||
Create a secret that contains your authentication token that can be found in the Splunk o11y UI.
|
||||
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: splunk
|
||||
namespace: istio-system
|
||||
data:
|
||||
sf_token_key: your-access-token
|
||||
```
|
||||
|
||||
Splunk template example:
|
||||
|
||||
```yaml
|
||||
apiVersion: flagger.app/v1beta1
|
||||
kind: MetricTemplate
|
||||
metadata:
|
||||
name: success-rate
|
||||
namespace: istio-system
|
||||
spec:
|
||||
provider:
|
||||
type: splunk
|
||||
address: https://api.<REALM>.signalfx.com
|
||||
secretRef:
|
||||
name: splunk
|
||||
query: |
|
||||
total = data('traces.count', filter=filter('sf_service', '{{target}}')).sum().publish(enable=False)
|
||||
success = data('traces.count', filter=filter('sf_service', '{{target}}') and filter('sf_error', 'false')).sum().publish(enable=False)
|
||||
((success/total) * 100).publish()
|
||||
```
|
||||
The query format documentation can be found [here](https://dev.splunk.com/observability/docs/signalflow).
|
||||
|
||||
Reference the template in the canary analysis:
|
||||
|
||||
```yaml
|
||||
analysis:
|
||||
metrics:
|
||||
- name: "success rate"
|
||||
templateRef:
|
||||
name: success-rate
|
||||
namespace: istio-system
|
||||
thresholdRange:
|
||||
max: 99
|
||||
interval: 1m
|
||||
```
|
||||
|
||||
@@ -117,4 +117,14 @@ flagger_canary_duration_seconds_bucket{name="podinfo",namespace="test",le="10"}
|
||||
flagger_canary_duration_seconds_bucket{name="podinfo",namespace="test",le="+Inf"} 6
|
||||
flagger_canary_duration_seconds_sum{name="podinfo",namespace="test"} 17.3561329
|
||||
flagger_canary_duration_seconds_count{name="podinfo",namespace="test"} 6
|
||||
|
||||
# Last canary metric analysis result per different metrics
|
||||
flagger_canary_metric_analysis{metric="podinfo-http-successful-rate",name="podinfo",namespace="test"} 1
|
||||
flagger_canary_metric_analysis{metric="podinfo-custom-metric",name="podinfo",namespace="test"} 0.918223108974359
|
||||
|
||||
# Canary successes total counter
|
||||
flagger_canary_successes_total{name="podinfo",namespace="test",deployment_strategy="canary",analysis_status="completed"} 5
|
||||
|
||||
# Canary failures total counter
|
||||
flagger_canary_failures_total{name="podinfo",namespace="test",deployment_strategy="canary",analysis_status="completed"} 1
|
||||
```
|
||||
|
||||
@@ -41,6 +41,7 @@ Spec:
|
||||
- name: "start gate"
|
||||
type: confirm-rollout
|
||||
url: http://flagger-loadtester.test/gate/approve
|
||||
retries: 5
|
||||
- name: "helm test"
|
||||
type: pre-rollout
|
||||
url: http://flagger-helmtester.flagger/
|
||||
@@ -72,6 +73,7 @@ Spec:
|
||||
- name: "send to Slack"
|
||||
type: event
|
||||
url: http://event-recevier.notifications/slack
|
||||
retries: 3
|
||||
metadata:
|
||||
environment: "test"
|
||||
cluster: "flagger-test"
|
||||
@@ -83,16 +85,19 @@ Webhook payload (HTTP POST):
|
||||
|
||||
```javascript
|
||||
{
|
||||
"name": "podinfo",
|
||||
"namespace": "test",
|
||||
"phase": "Progressing",
|
||||
"metadata": {
|
||||
"test": "all",
|
||||
"token": "16688eb5e9f289f1991c"
|
||||
}
|
||||
"name": "podinfo",
|
||||
"namespace": "test",
|
||||
"phase": "Progressing",
|
||||
"checksum": "85d557f47b",
|
||||
"metadata": {
|
||||
"test": "all",
|
||||
"token": "16688eb5e9f289f1991c"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
The checksum field is hashed from the TrackedConfigs and LastAppliedSpec of the Canary, it can be used to identify a Canary for a specific configuration of the deployed resources.
|
||||
|
||||
Response status codes:
|
||||
|
||||
* 200-202 - advance canary by increasing the traffic weight
|
||||
@@ -107,6 +112,7 @@ Event payload (HTTP POST):
|
||||
"name": "string (canary name)",
|
||||
"namespace": "string (canary namespace)",
|
||||
"phase": "string (canary phase)",
|
||||
"checksum": "string (canary checksum"),
|
||||
"metadata": {
|
||||
"eventMessage": "string (canary event message)",
|
||||
"eventType": "string (canary event type)",
|
||||
@@ -118,6 +124,11 @@ Event payload (HTTP POST):
|
||||
The event receiver can create alerts based on the received phase
|
||||
(possible values: `Initialized`, `Waiting`, `Progressing`, `Promoting`, `Finalising`, `Succeeded` or `Failed`).
|
||||
|
||||
Options:
|
||||
* retries: The webhook request can be retried by specifying a positive integer in the `retries` field. This helps ensure reliability if the webhook fails due to transient network issues.
|
||||
|
||||
* disable TLS: Set `disableTLS` to `true` in the webhook spec to bypass TLS verification. This is useful in cases where the target service uses self-signed certificates, or you need to connect to an insecure service for testing purposes.
|
||||
|
||||
## Load Testing
|
||||
|
||||
For workloads that are not receiving constant traffic Flagger can be configured with a webhook,
|
||||
@@ -143,7 +154,8 @@ helm repo add flagger https://flagger.app
|
||||
|
||||
helm upgrade -i flagger-loadtester flagger/loadtester \
|
||||
--namespace=test \
|
||||
--set cmd.timeout=1h
|
||||
--set cmd.timeout=1h \
|
||||
--set cmd.namespaceRegexp=''
|
||||
```
|
||||
|
||||
When deployed the load tester API will be available at `http://flagger-loadtester.test/`.
|
||||
@@ -253,6 +265,42 @@ to the nGrinder server and start a new performance test. the load tester will pe
|
||||
poll the nGrinder server for the status of the test,
|
||||
and prevent duplicate requests from being sent in subsequent analysis loops.
|
||||
|
||||
### K6 Load Tester
|
||||
|
||||
You can also delegate load testing to a third-party webhook. An example of this is the [`k6 webhook`](https://github.com/grafana/flagger-k6-webhook). This webhook uses [`k6`](https://k6.io/), a very featureful load tester, to run load or smoke tests on canaries. For all features available, see the source repository.
|
||||
|
||||
Here's an example integrating this webhook as a `pre-rollout` step, to load test a service before any traffic is sent to it:
|
||||
|
||||
```yaml
|
||||
webhooks:
|
||||
- name: k6-load-test
|
||||
timeout: 5m
|
||||
type: pre-rollout
|
||||
url: http://k6-loadtester.flagger/launch-test
|
||||
metadata:
|
||||
script: |
|
||||
import http from 'k6/http';
|
||||
import { sleep } from 'k6';
|
||||
export const options = {
|
||||
vus: 2,
|
||||
duration: '30s',
|
||||
thresholds: {
|
||||
http_req_duration: ['p(95)<50']
|
||||
},
|
||||
ext: {
|
||||
loadimpact: {
|
||||
name: '<cluster>/<your_service>',
|
||||
projectID: <project id>,
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
export default function () {
|
||||
http.get('http://<your_service>-canary.<namespace>:80/');
|
||||
sleep(0.10);
|
||||
}
|
||||
```
|
||||
|
||||
## Integration Testing
|
||||
|
||||
Flagger comes with a testing service that can run Helm tests, Bats tests or Concord tests when configured as a webhook.
|
||||
@@ -350,6 +398,22 @@ This can be done via mounting a Kubernetes secret in the tester's Deployment.
|
||||
to see if the process has finished (Default is 5s). `pollTimeout` represents the time in seconds
|
||||
the web-hook will try to call Concord before timing out (Default is 30s).
|
||||
|
||||
If you need to start a Pod/Job to run tests, you can do so using `kubectl`.
|
||||
|
||||
```yaml
|
||||
analysis:
|
||||
webhooks:
|
||||
- name: "smoke test"
|
||||
type: pre-rollout
|
||||
url: http://flagger-kubectltester.kube-system/
|
||||
timeout: 3m
|
||||
metadata:
|
||||
type: "kubectl"
|
||||
cmd: "run test --image=alpine --overrides='{ "spec": { "serviceAccount": "default:default" } }'"
|
||||
```
|
||||
|
||||
Note that you need to setup RBAC for the load tester service account in order to run `kubectl` and `helm` commands.
|
||||
|
||||
## Manual Gating
|
||||
|
||||
For manual approval of a canary deployment you can use the `confirm-rollout` and `confirm-promotion` webhooks.
|
||||
|
||||