Compare commits
816 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
4463d41744 | ||
|
|
66663c2aaf | ||
|
|
d34a504516 | ||
|
|
9fc5f9ca31 | ||
|
|
3cf9fe7cb7 | ||
|
|
6d57b5c490 | ||
|
|
7254a5a7f6 | ||
|
|
671016834d | ||
|
|
41dc056c87 | ||
|
|
6757553685 | ||
|
|
1abceb8334 | ||
|
|
0939a1bcc9 | ||
|
|
d91012d79c | ||
|
|
6e9fe2c584 | ||
|
|
ccad05c92f | ||
|
|
8b9929008b | ||
|
|
a881446252 | ||
|
|
183c73f2c4 | ||
|
|
3e3eaebbf2 | ||
|
|
bcc5bbf6fa | ||
|
|
42c3052247 | ||
|
|
3593e20ad5 | ||
|
|
1f0819465b | ||
|
|
7e48d7537f | ||
|
|
45c69ce3f7 | ||
|
|
8699a07de9 | ||
|
|
fb92d31744 | ||
|
|
bb7c4b1909 | ||
|
|
84d74d1eb5 | ||
|
|
ddab72cd59 | ||
|
|
87d0b33327 | ||
|
|
225a9015bb | ||
|
|
c0b60b1497 | ||
|
|
0463c19825 | ||
|
|
8e70aa90c1 | ||
|
|
0a418eb88a | ||
|
|
040dbb8d03 | ||
|
|
64f2288bdd | ||
|
|
8008562a33 | ||
|
|
a39652724d | ||
|
|
691c3c4f36 | ||
|
|
f6fa5e3891 | ||
|
|
a305a0b705 | ||
|
|
dfe619e2ea | ||
|
|
2b3d425b70 | ||
|
|
6e55fea413 | ||
|
|
b6a08b6615 | ||
|
|
eaa6906516 | ||
|
|
62a7a92f2a | ||
|
|
3aeb0945c5 | ||
|
|
e8c85efeae | ||
|
|
6651f6452b | ||
|
|
0ca48d77be | ||
|
|
a9e0e018e3 | ||
|
|
122d11f445 | ||
|
|
b03555858c | ||
|
|
dcc5a40441 | ||
|
|
8c949f59de | ||
|
|
e8d91a0375 | ||
|
|
fae9aa664d | ||
|
|
c31e9e5a96 | ||
|
|
99fff98274 | ||
|
|
11d84bf35d | ||
|
|
e56ba480c7 | ||
|
|
b9f0517c5d | ||
|
|
6e66f02585 | ||
|
|
5922e96044 | ||
|
|
f36e7e414a | ||
|
|
606754d4a5 | ||
|
|
a3847e64df | ||
|
|
7a3f9f2e73 | ||
|
|
2e4e8b0bf9 | ||
|
|
951fe80115 | ||
|
|
c0a8149acb | ||
|
|
80b75b227d | ||
|
|
dff7de09f2 | ||
|
|
b3bbadfccf | ||
|
|
fc676e3cb7 | ||
|
|
860c82dff9 | ||
|
|
4829f5af7f | ||
|
|
c463b6b231 | ||
|
|
b2ca0c4c16 | ||
|
|
69875cb3dc | ||
|
|
9e33a116d4 | ||
|
|
dab3d53b65 | ||
|
|
e3f8bff6fc | ||
|
|
0648d81d34 | ||
|
|
ece5c4401e | ||
|
|
bfc64c7cf1 | ||
|
|
0a2c134ece | ||
|
|
8bea9253c3 | ||
|
|
e1dacc3983 | ||
|
|
0c6a7355e7 | ||
|
|
83046282c3 | ||
|
|
65c9817295 | ||
|
|
e4905d3d35 | ||
|
|
6bc0670a7a | ||
|
|
95ff6adc19 | ||
|
|
7ee51c7def | ||
|
|
dfa065b745 | ||
|
|
e3b03debde | ||
|
|
ef759305cb | ||
|
|
ad65497d4e | ||
|
|
163f5292b0 | ||
|
|
e07a82d024 | ||
|
|
046245a8b5 | ||
|
|
aa6a180bcc | ||
|
|
c4d28e14fc | ||
|
|
bc4bdcdc1c | ||
|
|
be22ff9951 | ||
|
|
f204fe53f4 | ||
|
|
28e7e89047 | ||
|
|
75d49304f3 | ||
|
|
04cbacb6e0 | ||
|
|
c46c7b9e21 | ||
|
|
919dafa567 | ||
|
|
dfdcfed26e | ||
|
|
a0a4d4cfc5 | ||
|
|
970a589fd3 | ||
|
|
56d2c0952a | ||
|
|
4871be0345 | ||
|
|
e3e112e279 | ||
|
|
d2cbd40d89 | ||
|
|
3786a49f00 | ||
|
|
ff4aa62061 | ||
|
|
9b6cfdeef7 | ||
|
|
9d89e0c83f | ||
|
|
559cbd0d36 | ||
|
|
caea00e47f | ||
|
|
b26542f38d | ||
|
|
bbab7ce855 | ||
|
|
afa2d079f6 | ||
|
|
108bf9ca65 | ||
|
|
438f952128 | ||
|
|
3e84799644 | ||
|
|
d6e80bac7f | ||
|
|
9b3b24bddf | ||
|
|
5c831ae482 | ||
|
|
78233fafd3 | ||
|
|
73c3e07859 | ||
|
|
10c61daee4 | ||
|
|
b1bb9fa114 | ||
|
|
a7f4b6d2ae | ||
|
|
b937c4ea8d | ||
|
|
e577311b64 | ||
|
|
b847345308 | ||
|
|
85e683446f | ||
|
|
4f49aa5760 | ||
|
|
8ca9cf24bb | ||
|
|
61d0216c21 | ||
|
|
ba4a2406ba | ||
|
|
c2974416b4 | ||
|
|
48fac4e876 | ||
|
|
f0add9a67c | ||
|
|
20f9df01c2 | ||
|
|
514e850072 | ||
|
|
61fe78a982 | ||
|
|
c4b066c845 | ||
|
|
d24a23f3bd | ||
|
|
22045982e2 | ||
|
|
f496f1e18f | ||
|
|
2e802432c4 | ||
|
|
a2f747e16f | ||
|
|
982338e162 | ||
|
|
03fe4775dd | ||
|
|
def7d9bde0 | ||
|
|
a58a7cbeeb | ||
|
|
82ca66c23b | ||
|
|
92c971c0d7 | ||
|
|
30c4faf72b | ||
|
|
85ee7d17cf | ||
|
|
a6d278ae91 | ||
|
|
ad8d02f701 | ||
|
|
00fa5542f7 | ||
|
|
9ed2719d19 | ||
|
|
8a809baf35 | ||
|
|
ff90c42fa7 | ||
|
|
d651e8fe48 | ||
|
|
bc613905e9 | ||
|
|
e3321118e5 | ||
|
|
31f526cbd6 | ||
|
|
493554178f | ||
|
|
004b1cc7dd | ||
|
|
767602592c | ||
|
|
34676acaf5 | ||
|
|
491ab7affa | ||
|
|
b522bbd903 | ||
|
|
dd3bc28806 | ||
|
|
764e7e275d | ||
|
|
931c051153 | ||
|
|
3da86fe118 | ||
|
|
93f37a3022 | ||
|
|
77b3d861e6 | ||
|
|
ce0e16ffe8 | ||
|
|
fb9709ae78 | ||
|
|
191c3868ab | ||
|
|
d076f0859e | ||
|
|
df24ba86d0 | ||
|
|
3996bcfa67 | ||
|
|
9e8a4ad384 | ||
|
|
26ee668612 | ||
|
|
e3c102e7f8 | ||
|
|
ba60b127ea | ||
|
|
74c69dc07e | ||
|
|
0687d89178 | ||
|
|
7a454c005f | ||
|
|
2ce4f3a93e | ||
|
|
7baaaebdd4 | ||
|
|
608c7f7a31 | ||
|
|
1a0daa8678 | ||
|
|
ed0d25af97 | ||
|
|
720d04aba1 | ||
|
|
901648393a | ||
|
|
b5acd817fc | ||
|
|
2586fc6ef0 | ||
|
|
62e0eb6395 | ||
|
|
768b0490e2 | ||
|
|
852454fa2c | ||
|
|
970b67d6f6 | ||
|
|
ea0eddff82 | ||
|
|
0d4d2ac37b | ||
|
|
d0591916a4 | ||
|
|
6a8aef8675 | ||
|
|
a894a7a0ce | ||
|
|
0bbe724b8c | ||
|
|
bea22c0259 | ||
|
|
6363580120 | ||
|
|
cbdc7ef2d3 | ||
|
|
0959406609 | ||
|
|
cf41f9a478 | ||
|
|
6fe6a41e3e | ||
|
|
91cd2648d9 | ||
|
|
240591a6b8 | ||
|
|
2973822113 | ||
|
|
a6b2b1246c | ||
|
|
c74456411d | ||
|
|
31b3fcf906 | ||
|
|
767be5b6a8 | ||
|
|
48834cd8d1 | ||
|
|
f4bb0ea9c2 | ||
|
|
cf25a9a8a5 | ||
|
|
4f0ad7a067 | ||
|
|
c0fe461a9f | ||
|
|
1911143514 | ||
|
|
9b67b360d0 | ||
|
|
991e01efd2 | ||
|
|
83b8ae46c9 | ||
|
|
c3b7aee063 | ||
|
|
66d662c085 | ||
|
|
4d5876fb76 | ||
|
|
7ca2558a81 | ||
|
|
8957994c1a | ||
|
|
0147aea69b | ||
|
|
b5f73d66ec | ||
|
|
6800181594 | ||
|
|
6f5f80a085 | ||
|
|
fd23a2f98f | ||
|
|
63cb8a5ba5 | ||
|
|
4a9e3182c6 | ||
|
|
5cbc3df7b5 | ||
|
|
dcadc2303f | ||
|
|
cf5f364ed2 | ||
|
|
e45ace5d9b | ||
|
|
6e7421b0d8 | ||
|
|
647d02890f | ||
|
|
7e72d23b60 | ||
|
|
9fada306f0 | ||
|
|
8d1cc83405 | ||
|
|
1979bc59d0 | ||
|
|
bf7ebc9708 | ||
|
|
dc3cde88d2 | ||
|
|
98beb1011e | ||
|
|
8c59e9d2b4 | ||
|
|
9a87d47f45 | ||
|
|
f25023ed1b | ||
|
|
806b233d58 | ||
|
|
677ee8d639 | ||
|
|
61ac8d7a8c | ||
|
|
278680b248 | ||
|
|
5e4a58a1c1 | ||
|
|
757b5ca22e | ||
|
|
6d1da5bb45 | ||
|
|
9ca79d147d | ||
|
|
37fcfe15bb | ||
|
|
a9c7466359 | ||
|
|
91a3f2c9a7 | ||
|
|
9aa341d088 | ||
|
|
c9e09fa8eb | ||
|
|
e6257b7531 | ||
|
|
aee027c91c | ||
|
|
c106796751 | ||
|
|
42bd600482 | ||
|
|
47ad81be5b | ||
|
|
88c450e3bd | ||
|
|
2ebedd185c | ||
|
|
0fdbef4cda | ||
|
|
68500dc579 | ||
|
|
12a29f1939 | ||
|
|
9974968dee | ||
|
|
f2eaa91c9c | ||
|
|
f117f72901 | ||
|
|
5424126d3c | ||
|
|
028933b635 | ||
|
|
678f79fc61 | ||
|
|
933c19fdf4 | ||
|
|
d678c59285 | ||
|
|
2285bd210e | ||
|
|
cba6e5f811 | ||
|
|
3fa9f37192 | ||
|
|
c243756802 | ||
|
|
27b1b882ea | ||
|
|
2505cbfe15 | ||
|
|
396452b7b6 | ||
|
|
76c82f48a4 | ||
|
|
948226dd4e | ||
|
|
1c97fc86c9 | ||
|
|
00de7abfde | ||
|
|
631d93b8d9 | ||
|
|
2e38dbc565 | ||
|
|
b122f7f71a | ||
|
|
6101557000 | ||
|
|
cdc66128a9 | ||
|
|
eace3713ce | ||
|
|
fd50c4b4b7 | ||
|
|
62a5f8c5d6 | ||
|
|
093cb24602 | ||
|
|
4f63f7f9e4 | ||
|
|
9f359327f0 | ||
|
|
2bc8194d96 | ||
|
|
181d50b7b6 | ||
|
|
3ae995f55c | ||
|
|
fbb37ad5e4 | ||
|
|
5cc3b905b4 | ||
|
|
abb8d946cc | ||
|
|
797316fc4d | ||
|
|
beed6369a0 | ||
|
|
9618d2ea0d | ||
|
|
94e5bfc031 | ||
|
|
bb620ad94a | ||
|
|
7c6d1c48a3 | ||
|
|
bd5d884c8b | ||
|
|
1c06721c9a | ||
|
|
1e29e2c4eb | ||
|
|
88c39d7379 | ||
|
|
da43a152ba | ||
|
|
ec63aa9999 | ||
|
|
7b9df746ad | ||
|
|
52d93ddda2 | ||
|
|
eb0331f2bf | ||
|
|
6a66a87a44 | ||
|
|
f3cc810948 | ||
|
|
12d84b2e24 | ||
|
|
58bde24ece | ||
|
|
5b3fd0efca | ||
|
|
ee6e39afa6 | ||
|
|
677b9d9197 | ||
|
|
786c5aa93a | ||
|
|
fd44f1fabf | ||
|
|
b20e0178e1 | ||
|
|
5a490abfdd | ||
|
|
674c79da94 | ||
|
|
23ebb4235d | ||
|
|
b2500d0ccb | ||
|
|
ee500d83ac | ||
|
|
0032c14a78 | ||
|
|
8fd3e927b8 | ||
|
|
1902884b56 | ||
|
|
98d2805267 | ||
|
|
24a74d3589 | ||
|
|
7fe273a21d | ||
|
|
bd817cc520 | ||
|
|
eb856fda13 | ||
|
|
d63f05c92e | ||
|
|
8fde6bdb8a | ||
|
|
8148120421 | ||
|
|
95b8840bf2 | ||
|
|
0e8b1ef20f | ||
|
|
0fbf4dcdb2 | ||
|
|
7aca9468ac | ||
|
|
a6c0f08fcc | ||
|
|
9c1bcc08bb | ||
|
|
87e9dfe3d3 | ||
|
|
d7be66743e | ||
|
|
15463456ec | ||
|
|
752eceed4b | ||
|
|
eadce34d6f | ||
|
|
11ccf34bbc | ||
|
|
e308678ed5 | ||
|
|
cbe72f0aa2 | ||
|
|
bc84e1c154 | ||
|
|
344bd45a0e | ||
|
|
72014f736f | ||
|
|
0a2949b6ad | ||
|
|
2ff695ecfe | ||
|
|
8d0b54e059 | ||
|
|
121a65fad0 | ||
|
|
ecaa203091 | ||
|
|
6d0e3c6468 | ||
|
|
c933476fff | ||
|
|
1335210cf5 | ||
|
|
9d12794600 | ||
|
|
d57fc7d03e | ||
|
|
1f9f6fb55a | ||
|
|
948df55de3 | ||
|
|
8914f26754 | ||
|
|
79b3370892 | ||
|
|
a233b99f0b | ||
|
|
0d94c01678 | ||
|
|
00151e92fe | ||
|
|
f7db0210ea | ||
|
|
cf3ba35fb9 | ||
|
|
177dc824e3 | ||
|
|
5f544b90d6 | ||
|
|
921ac00383 | ||
|
|
7df7218978 | ||
|
|
e4c6903a01 | ||
|
|
027342dc72 | ||
|
|
e17a747785 | ||
|
|
e477b37bd0 | ||
|
|
ad25068375 | ||
|
|
c92230c109 | ||
|
|
9e082d9ee3 | ||
|
|
cfd610ac55 | ||
|
|
82067f13bf | ||
|
|
242d79e49d | ||
|
|
4f01ecde5a | ||
|
|
61141c7479 | ||
|
|
62429ff710 | ||
|
|
82a1f45cc1 | ||
|
|
1a95fc2a9c | ||
|
|
13816eeafa | ||
|
|
5279f73c17 | ||
|
|
d196bb2856 | ||
|
|
3f8f634a1b | ||
|
|
350efb2bfe | ||
|
|
5ba27c898e | ||
|
|
57f1b63fa1 | ||
|
|
d69e203479 | ||
|
|
4d7fae39a8 | ||
|
|
2dc554c92a | ||
|
|
21c394ef7f | ||
|
|
2173bfc1a0 | ||
|
|
a19d016e14 | ||
|
|
8f1b5df9e2 | ||
|
|
2d6b8ecfdf | ||
|
|
8093612011 | ||
|
|
39dc761e32 | ||
|
|
0c68983c62 | ||
|
|
c7539f6e4b | ||
|
|
8cebc0acee | ||
|
|
f60c4d60cf | ||
|
|
662f9cba2e | ||
|
|
4a82e1e223 | ||
|
|
b60b912bf8 | ||
|
|
093348bc60 | ||
|
|
37ebbf14f9 | ||
|
|
156488c8d5 | ||
|
|
68d1f583cc | ||
|
|
3492b07d9a | ||
|
|
d0b582048f | ||
|
|
a82eb7b01f | ||
|
|
cd08afcbeb | ||
|
|
331942a4ed | ||
|
|
aa24d6ff7e | ||
|
|
58c2c19f1e | ||
|
|
2a91149211 | ||
|
|
868482c240 | ||
|
|
4e387fa943 | ||
|
|
15484363d6 | ||
|
|
50b7b74480 | ||
|
|
adb53c63dd | ||
|
|
bdc3a32e96 | ||
|
|
65f716182b | ||
|
|
6ef72e2550 | ||
|
|
60f51ad7d5 | ||
|
|
a09dc2cbd8 | ||
|
|
825d07aa54 | ||
|
|
f46882c778 | ||
|
|
663fa08cc1 | ||
|
|
19e625d38e | ||
|
|
edcff9cd15 | ||
|
|
e0fc5ecb39 | ||
|
|
4ac6629969 | ||
|
|
68d8dad7c8 | ||
|
|
4ab9ceafc1 | ||
|
|
352ed898d4 | ||
|
|
e091d6a50d | ||
|
|
c651ef00c9 | ||
|
|
4b17788a77 | ||
|
|
e5612bca50 | ||
|
|
d21fb1afe8 | ||
|
|
89d0a533e2 | ||
|
|
db673dddd9 | ||
|
|
88ad457e87 | ||
|
|
126b68559e | ||
|
|
2cd3fe47e6 | ||
|
|
15eb7cce55 | ||
|
|
13f923aabf | ||
|
|
0ffb112063 | ||
|
|
b4ea6af110 | ||
|
|
611c8f7374 | ||
|
|
1cc73f37e7 | ||
|
|
ca37fc0eb5 | ||
|
|
5380624da9 | ||
|
|
aaece0bd44 | ||
|
|
de7cc17f5d | ||
|
|
66efa39d27 | ||
|
|
ff7c0a105d | ||
|
|
7b29253df4 | ||
|
|
7ef63b341e | ||
|
|
e7bfaa4f1a | ||
|
|
3a9a408941 | ||
|
|
3e43963daa | ||
|
|
69a6e260f5 | ||
|
|
664e7ad555 | ||
|
|
ee4a009a06 | ||
|
|
36dfd4dd35 | ||
|
|
dbf36082b2 | ||
|
|
3a1018cff6 | ||
|
|
fc10745a1a | ||
|
|
347cfd06de | ||
|
|
ec759ce467 | ||
|
|
f211e0fe31 | ||
|
|
c91a128b65 | ||
|
|
6a080f3032 | ||
|
|
b2c12c1131 | ||
|
|
b945b37089 | ||
|
|
9a5529a0aa | ||
|
|
025785389d | ||
|
|
48d9a0dede | ||
|
|
fbdf38e990 | ||
|
|
ef5bf70386 | ||
|
|
274c1469b4 | ||
|
|
960d506360 | ||
|
|
79a6421178 | ||
|
|
8b5c004860 | ||
|
|
f54768772e | ||
|
|
b9075dc6f9 | ||
|
|
107596ad54 | ||
|
|
3c6a2b1508 | ||
|
|
f996cba354 | ||
|
|
bdd864fbdd | ||
|
|
ca074ef13f | ||
|
|
ddd3a8251e | ||
|
|
f5b862dc1b | ||
|
|
d45d475f61 | ||
|
|
d0f72ea3fa | ||
|
|
5ed5d1e5b6 | ||
|
|
311b14026e | ||
|
|
67cd722b54 | ||
|
|
7f6247eb7b | ||
|
|
f3fd515521 | ||
|
|
9db5dd0d7f | ||
|
|
d07925d79d | ||
|
|
662d0f31ff | ||
|
|
2c5ad0bf8f | ||
|
|
0ea76b986a | ||
|
|
3c4253c336 | ||
|
|
77ba28e91c | ||
|
|
6399e7586c | ||
|
|
1caa62adc8 | ||
|
|
8fa558f124 | ||
|
|
191228633b | ||
|
|
8a981f935a | ||
|
|
a8ea9adbcc | ||
|
|
685d94c44b | ||
|
|
153ed1b044 | ||
|
|
7788f3a1ba | ||
|
|
cd99225f9b | ||
|
|
33ba3b8d4a | ||
|
|
d222dd1069 | ||
|
|
39fd3d46ba | ||
|
|
419c1804b6 | ||
|
|
ae0351ddad | ||
|
|
941be15762 | ||
|
|
578ebcf6ed | ||
|
|
27ab4b08f9 | ||
|
|
428b2208ba | ||
|
|
438c553d60 | ||
|
|
90cb293182 | ||
|
|
1f9f93ebe4 | ||
|
|
f5b97fbb74 | ||
|
|
ce79244126 | ||
|
|
3af5d767d8 | ||
|
|
3ce3efd2f2 | ||
|
|
8108edea31 | ||
|
|
5d80087ab3 | ||
|
|
6593be584d | ||
|
|
a0f63f858f | ||
|
|
49914f3bd5 | ||
|
|
71988a8b98 | ||
|
|
d65be6ef58 | ||
|
|
38c40d02e7 | ||
|
|
9e071b9d60 | ||
|
|
b4ae060122 | ||
|
|
436656e81b | ||
|
|
d7e111b7d4 | ||
|
|
4b6126dd1a | ||
|
|
89faa70196 | ||
|
|
6ed9d4a1db | ||
|
|
9d0e38c2e1 | ||
|
|
8b758fd616 | ||
|
|
14369d8be3 | ||
|
|
7b4153113e | ||
|
|
7d340c5e61 | ||
|
|
337c94376d | ||
|
|
0ef1d0b2f1 | ||
|
|
5cf67bd4e0 | ||
|
|
f22be17852 | ||
|
|
48e79d5dd4 | ||
|
|
59f5a0654a | ||
|
|
6da2e11683 | ||
|
|
802c087a4b | ||
|
|
ed2048e9f3 | ||
|
|
437b1d30c0 | ||
|
|
ba1788cbc5 | ||
|
|
773094a20d | ||
|
|
5aa39106a0 | ||
|
|
a9167801ba | ||
|
|
62f4a6cb96 | ||
|
|
ea2b41e96e | ||
|
|
d28ce650e9 | ||
|
|
1bfcdba499 | ||
|
|
e48faa9144 | ||
|
|
33fbe99561 | ||
|
|
989925b484 | ||
|
|
7dd66559e7 | ||
|
|
2ef1c5608e | ||
|
|
b5932e8905 | ||
|
|
37999d3250 | ||
|
|
83985ae482 | ||
|
|
3adfcc837e | ||
|
|
c720fee3ab | ||
|
|
881387e522 | ||
|
|
d9f3378e29 | ||
|
|
ba87620225 | ||
|
|
1cd0c49872 | ||
|
|
12ac96deeb | ||
|
|
17e6f35785 | ||
|
|
bd115633a3 | ||
|
|
86ea172380 | ||
|
|
d87bbbbc1e | ||
|
|
6196f69f4d | ||
|
|
be31bcf22f | ||
|
|
cba2135c69 | ||
|
|
2e52573499 | ||
|
|
b2ce1ed1fb | ||
|
|
77a485af74 | ||
|
|
d8b847a973 | ||
|
|
e80a3d3232 | ||
|
|
780ba82385 | ||
|
|
6ba69dce0a | ||
|
|
3c7a561db8 | ||
|
|
49c942bea0 | ||
|
|
bf1ca293dc | ||
|
|
62b906d30b | ||
|
|
65bf048189 | ||
|
|
a498ed8200 | ||
|
|
9f12bbcd98 | ||
|
|
fcd520787d | ||
|
|
e2417e4e40 | ||
|
|
70a2cbf1c6 | ||
|
|
fa0c6af6aa | ||
|
|
4f1abd0c8d | ||
|
|
41e839aa36 | ||
|
|
2fd1593ad2 | ||
|
|
27b601c5aa | ||
|
|
5fc69134e3 | ||
|
|
9adc0698bb | ||
|
|
119c2ff464 | ||
|
|
f3a4201c7d | ||
|
|
8b6aa73df0 | ||
|
|
1d4dfb0883 | ||
|
|
eab7f126a6 | ||
|
|
fe7547d83e | ||
|
|
7d0df82861 | ||
|
|
7f0cd27591 | ||
|
|
e094c2ae14 | ||
|
|
a5d438257f | ||
|
|
d8cb8f1064 | ||
|
|
a8d8bb2d6f | ||
|
|
a76ea5917c | ||
|
|
b0b6198ec8 | ||
|
|
eda97f35d2 | ||
|
|
2b6507d35a | ||
|
|
f7c4d5aa0b | ||
|
|
74f07cffa6 | ||
|
|
79c8ff0af8 | ||
|
|
ac544eea4b | ||
|
|
231a32331b | ||
|
|
104e8ef050 | ||
|
|
296015faff | ||
|
|
9a9964c968 | ||
|
|
0d05d86e32 | ||
|
|
9680ca98f2 | ||
|
|
42b850ca52 | ||
|
|
3f5c22d863 | ||
|
|
535a92e871 | ||
|
|
3411a6a981 | ||
|
|
b5adee271c | ||
|
|
e2abcd1323 | ||
|
|
25fbe7ecb6 | ||
|
|
6befee79c2 | ||
|
|
f09c5a60f1 | ||
|
|
52e89ff509 | ||
|
|
35e20406ef | ||
|
|
c6e96ff1bb | ||
|
|
793ab524b0 | ||
|
|
5a479d0187 | ||
|
|
a23e4f1d2a | ||
|
|
bd35a3f61c | ||
|
|
197e987d5f | ||
|
|
7f29beb639 | ||
|
|
1140af8dc7 | ||
|
|
a2688c3910 | ||
|
|
75b27ab3f3 | ||
|
|
59d3f55fb2 | ||
|
|
f34739f334 | ||
|
|
90c71ec18f | ||
|
|
395234d7c8 | ||
|
|
e322ba0065 | ||
|
|
6db8b96f72 | ||
|
|
44d7e96e96 | ||
|
|
1662479c8d | ||
|
|
2e351fcf0d | ||
|
|
5d81876d07 | ||
|
|
c81e6989ec | ||
|
|
4d61a896c3 | ||
|
|
d148933ab3 | ||
|
|
04a56a3591 | ||
|
|
4a354e74d4 | ||
|
|
1e3e6427d5 | ||
|
|
38826108c8 | ||
|
|
4c4752f907 | ||
|
|
94dcd6c94d | ||
|
|
eabef3db30 | ||
|
|
6750f10ffa | ||
|
|
56cb888cbf | ||
|
|
b3e7fb3417 | ||
|
|
2c6e1baca2 | ||
|
|
c8358929d1 | ||
|
|
1dc7677dfb | ||
|
|
8e699a7543 | ||
|
|
cbbabdfac0 | ||
|
|
9d92de234c | ||
|
|
ba65975fb5 | ||
|
|
ef423b2078 | ||
|
|
f451b4e36c | ||
|
|
0856e13ee6 | ||
|
|
87b9fa8ca7 | ||
|
|
5b43d3d314 | ||
|
|
ac4972dd8d | ||
|
|
8a8f68af5d | ||
|
|
c669dc0c4b | ||
|
|
863a5466cc | ||
|
|
e2347c84e3 | ||
|
|
e0e673f565 | ||
|
|
30cbf2a741 | ||
|
|
f58de3801c | ||
|
|
7c6b88d4c1 | ||
|
|
0c0ebaecd5 | ||
|
|
1925f99118 | ||
|
|
6f2a22a1cc | ||
|
|
ee04082cd7 | ||
|
|
efd901ac3a | ||
|
|
e565789ae8 | ||
|
|
d3953004f6 | ||
|
|
df1d9e3011 | ||
|
|
631c55fa6e | ||
|
|
29cdd43288 | ||
|
|
9b79af9fcd | ||
|
|
2c9c1adb47 | ||
|
|
5dfb5808c4 | ||
|
|
bb0175aebf | ||
|
|
adaf4c99c0 | ||
|
|
bed6ed09d5 | ||
|
|
4ff67a85ce | ||
|
|
702f4fcd14 | ||
|
|
8a03ae153d | ||
|
|
434c6149ab | ||
|
|
97fc4a90ae | ||
|
|
217ef06930 | ||
|
|
71057946e6 | ||
|
|
a74ad52c72 | ||
|
|
12d26874f8 | ||
|
|
27de9ce151 | ||
|
|
9e7cd5a8c5 | ||
|
|
38cb487b64 | ||
|
|
05ca266c5e | ||
|
|
5cc26de645 | ||
|
|
2b9a195fa3 | ||
|
|
4454749eec | ||
|
|
b435a03fab | ||
|
|
7c166e2b40 | ||
|
|
f7a7963dcf | ||
|
|
9c77c0d69c | ||
|
|
e8a9555346 | ||
|
|
59751dd007 | ||
|
|
9c4d4d16b6 | ||
|
|
0e3d1b3e8f | ||
|
|
f119b78940 | ||
|
|
456d914c35 | ||
|
|
737507b0fe | ||
|
|
4bcf82d295 | ||
|
|
e9cd7afc8a | ||
|
|
0830abd51d | ||
|
|
5b296e01b3 | ||
|
|
3fd039afd1 | ||
|
|
5904348ba5 | ||
|
|
1a98e93723 | ||
|
|
c9685fbd13 | ||
|
|
dc347e273d | ||
|
|
8170916897 | ||
|
|
71cd4e0cb7 | ||
|
|
0109788ccc | ||
|
|
1649dea468 |
@@ -1,8 +0,0 @@
|
||||
coverage:
|
||||
status:
|
||||
project:
|
||||
default:
|
||||
target: auto
|
||||
threshold: 50
|
||||
base: auto
|
||||
patch: off
|
||||
@@ -1 +0,0 @@
|
||||
root: ./docs/gitbook
|
||||
17
.github/main.workflow
vendored
@@ -1,17 +0,0 @@
|
||||
workflow "Publish Helm charts" {
|
||||
on = "push"
|
||||
resolves = ["helm-push"]
|
||||
}
|
||||
|
||||
action "helm-lint" {
|
||||
uses = "stefanprodan/gh-actions/helm@master"
|
||||
args = ["lint charts/*"]
|
||||
}
|
||||
|
||||
action "helm-push" {
|
||||
needs = ["helm-lint"]
|
||||
uses = "stefanprodan/gh-actions/helm-gh-pages@master"
|
||||
args = ["charts/*","https://flagger.app"]
|
||||
secrets = ["GITHUB_TOKEN"]
|
||||
}
|
||||
|
||||
53
.github/workflows/website.yaml
vendored
Normal file
@@ -0,0 +1,53 @@
|
||||
name: website
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- website
|
||||
|
||||
jobs:
|
||||
vuepress:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v5
|
||||
- uses: actions/setup-node@v5
|
||||
with:
|
||||
node-version: '12.x'
|
||||
- id: yarn-cache-dir-path
|
||||
run: echo "::set-output name=dir::$(yarn cache dir)"
|
||||
- uses: actions/cache@v4
|
||||
id: yarn-cache
|
||||
with:
|
||||
path: ${{ steps.yarn-cache-dir-path.outputs.dir }}
|
||||
key: ${{ runner.os }}-yarn-${{ hashFiles('**/yarn.lock') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-yarn-
|
||||
- name: Build
|
||||
run: |
|
||||
yarn add -D vuepress
|
||||
yarn docs:build
|
||||
mkdir $HOME/site
|
||||
rsync -avzh docs/.vuepress/dist/ $HOME/site
|
||||
- name: Publish
|
||||
env:
|
||||
REPOSITORY: "https://x-access-token:${{ secrets.BOT_GITHUB_TOKEN }}@github.com/fluxcd/flagger.git"
|
||||
run: |
|
||||
tmpDir=$(mktemp -d)
|
||||
pushd $tmpDir >& /dev/null
|
||||
|
||||
git clone ${REPOSITORY}
|
||||
cd flagger
|
||||
git config user.name ${{ github.actor }}
|
||||
git config user.email ${{ github.actor }}@users.noreply.github.com
|
||||
git remote set-url origin ${REPOSITORY}
|
||||
git checkout gh-pages
|
||||
rm -rf assets
|
||||
rsync -avzh $HOME/site/ .
|
||||
rm -rf node_modules
|
||||
git add .
|
||||
git commit -m "Publish website"
|
||||
git push origin gh-pages
|
||||
|
||||
popd >& /dev/null
|
||||
rm -rf $tmpDir
|
||||
74
.gitignore
vendored
@@ -1,13 +1,65 @@
|
||||
# Binaries for programs and plugins
|
||||
*.exe
|
||||
*.exe~
|
||||
*.dll
|
||||
*.so
|
||||
*.dylib
|
||||
# Logs
|
||||
logs
|
||||
*.log
|
||||
npm-debug.log*
|
||||
yarn-debug.log*
|
||||
yarn-error.log*
|
||||
|
||||
# Test binary, build with `go test -c`
|
||||
*.test
|
||||
# Runtime data
|
||||
pids
|
||||
*.pid
|
||||
*.seed
|
||||
*.pid.lock
|
||||
|
||||
# Output of the go coverage tool, specifically when used with LiteIDE
|
||||
*.out
|
||||
.DS_Store
|
||||
# Directory for instrumented libs generated by jscoverage/JSCover
|
||||
lib-cov
|
||||
|
||||
# Coverage directory used by tools like istanbul
|
||||
coverage
|
||||
|
||||
# nyc test coverage
|
||||
.nyc_output
|
||||
|
||||
# Grunt intermediate storage (http://gruntjs.com/creating-plugins#storing-task-files)
|
||||
.grunt
|
||||
|
||||
# Bower dependency directory (https://bower.io/)
|
||||
bower_components
|
||||
|
||||
# node-waf configuration
|
||||
.lock-wscript
|
||||
|
||||
# Compiled binary addons (https://nodejs.org/api/addons.html)
|
||||
build/Release
|
||||
|
||||
# Dependency directories
|
||||
node_modules/
|
||||
jspm_packages/
|
||||
|
||||
# TypeScript v1 declaration files
|
||||
typings/
|
||||
|
||||
# Optional npm cache directory
|
||||
.npm
|
||||
|
||||
# Optional eslint cache
|
||||
.eslintcache
|
||||
|
||||
# Optional REPL history
|
||||
.node_repl_history
|
||||
|
||||
# Output of 'npm pack'
|
||||
*.tgz
|
||||
|
||||
# Yarn Integrity file
|
||||
.yarn-integrity
|
||||
|
||||
# dotenv environment variables file
|
||||
.env
|
||||
|
||||
# next.js build output
|
||||
.next
|
||||
|
||||
bin/
|
||||
docs/.vuepress/dist/
|
||||
Makefile.dev
|
||||
@@ -1,14 +0,0 @@
|
||||
builds:
|
||||
- main: ./cmd/flagger
|
||||
binary: flagger
|
||||
ldflags: -s -w -X github.com/stefanprodan/flagger/pkg/version.REVISION={{.Commit}}
|
||||
goos:
|
||||
- linux
|
||||
goarch:
|
||||
- amd64
|
||||
env:
|
||||
- CGO_ENABLED=0
|
||||
archive:
|
||||
name_template: "{{ .Binary }}_{{ .Version }}_{{ .Os }}_{{ .Arch }}"
|
||||
files:
|
||||
- none*
|
||||
46
.travis.yml
@@ -1,46 +0,0 @@
|
||||
sudo: required
|
||||
language: go
|
||||
|
||||
go:
|
||||
- 1.11.x
|
||||
|
||||
services:
|
||||
- docker
|
||||
|
||||
addons:
|
||||
apt:
|
||||
packages:
|
||||
- docker-ce
|
||||
|
||||
script:
|
||||
- set -e
|
||||
- make test-fmt
|
||||
- make test-codegen
|
||||
- go test -race -coverprofile=coverage.txt -covermode=atomic ./pkg/controller/
|
||||
- make build
|
||||
|
||||
after_success:
|
||||
- if [ -z "$DOCKER_USER" ]; then
|
||||
echo "PR build, skipping image push";
|
||||
else
|
||||
BRANCH_COMMIT=${TRAVIS_BRANCH}-$(echo ${TRAVIS_COMMIT} | head -c7);
|
||||
docker tag stefanprodan/flagger:latest quay.io/stefanprodan/flagger:${BRANCH_COMMIT};
|
||||
echo $DOCKER_PASS | docker login -u=$DOCKER_USER --password-stdin quay.io;
|
||||
docker push quay.io/stefanprodan/flagger:${BRANCH_COMMIT};
|
||||
fi
|
||||
- if [ -z "$TRAVIS_TAG" ]; then
|
||||
echo "Not a release, skipping image push";
|
||||
else
|
||||
docker tag stefanprodan/flagger:latest quay.io/stefanprodan/flagger:${TRAVIS_TAG};
|
||||
echo $DOCKER_PASS | docker login -u=$DOCKER_USER --password-stdin quay.io;
|
||||
docker push quay.io/stefanprodan/flagger:$TRAVIS_TAG;
|
||||
fi
|
||||
- bash <(curl -s https://codecov.io/bash)
|
||||
- rm coverage.txt
|
||||
|
||||
deploy:
|
||||
- provider: script
|
||||
skip_cleanup: true
|
||||
script: curl -sL http://git.io/goreleaser | bash
|
||||
on:
|
||||
tags: true
|
||||
@@ -1,72 +0,0 @@
|
||||
# How to Contribute
|
||||
|
||||
Flagger is [Apache 2.0 licensed](LICENSE) and accepts contributions via GitHub
|
||||
pull requests. This document outlines some of the conventions on development
|
||||
workflow, commit message formatting, contact points and other resources to make
|
||||
it easier to get your contribution accepted.
|
||||
|
||||
We gratefully welcome improvements to documentation as well as to code.
|
||||
|
||||
## Certificate of Origin
|
||||
|
||||
By contributing to this project you agree to the Developer Certificate of
|
||||
Origin (DCO). This document was created by the Linux Kernel community and is a
|
||||
simple statement that you, as a contributor, have the legal right to make the
|
||||
contribution.
|
||||
|
||||
## Chat
|
||||
|
||||
The project uses Slack: To join the conversation, simply join the
|
||||
[Weave community](https://slack.weave.works/) Slack workspace.
|
||||
|
||||
## Getting Started
|
||||
|
||||
- Fork the repository on GitHub
|
||||
- If you want to contribute as a developer, continue reading this document for further instructions
|
||||
- If you have questions, concerns, get stuck or need a hand, let us know
|
||||
on the Slack channel. We are happy to help and look forward to having
|
||||
you part of the team. No matter in which capacity.
|
||||
- Play with the project, submit bugs, submit pull requests!
|
||||
|
||||
## Contribution workflow
|
||||
|
||||
This is a rough outline of how to prepare a contribution:
|
||||
|
||||
- Create a topic branch from where you want to base your work (usually branched from master).
|
||||
- Make commits of logical units.
|
||||
- Make sure your commit messages are in the proper format (see below).
|
||||
- Push your changes to a topic branch in your fork of the repository.
|
||||
- If you changed code:
|
||||
- add automated tests to cover your changes
|
||||
- Submit a pull request to the original repository.
|
||||
|
||||
## Acceptance policy
|
||||
|
||||
These things will make a PR more likely to be accepted:
|
||||
|
||||
- a well-described requirement
|
||||
- new code and tests follow the conventions in old code and tests
|
||||
- a good commit message (see below)
|
||||
- All code must abide [Go Code Review Comments](https://github.com/golang/go/wiki/CodeReviewComments)
|
||||
- Names should abide [What's in a name](https://talks.golang.org/2014/names.slide#1)
|
||||
- Code must build on both Linux and Darwin, via plain `go build`
|
||||
- Code should have appropriate test coverage and tests should be written
|
||||
to work with `go test`
|
||||
|
||||
In general, we will merge a PR once one maintainer has endorsed it.
|
||||
For substantial changes, more people may become involved, and you might
|
||||
get asked to resubmit the PR or divide the changes into more than one PR.
|
||||
|
||||
### Format of the Commit Message
|
||||
|
||||
For Flux we prefer the following rules for good commit messages:
|
||||
|
||||
- Limit the subject to 50 characters and write as the continuation
|
||||
of the sentence "If applied, this commit will ..."
|
||||
- Explain what and why in the body, if more than a trivial change;
|
||||
wrap it at 72 characters.
|
||||
|
||||
The [following article](https://chris.beams.io/posts/git-commit/#seven-rules)
|
||||
has some more helpful advice on documenting your work.
|
||||
|
||||
This doc is adapted from the [Weaveworks Flux](https://github.com/weaveworks/flux/blob/master/CONTRIBUTING.md)
|
||||
29
Dockerfile
@@ -1,29 +0,0 @@
|
||||
FROM golang:1.11
|
||||
|
||||
RUN mkdir -p /go/src/github.com/stefanprodan/flagger/
|
||||
|
||||
WORKDIR /go/src/github.com/stefanprodan/flagger
|
||||
|
||||
COPY . .
|
||||
|
||||
RUN GIT_COMMIT=$(git rev-list -1 HEAD) && \
|
||||
CGO_ENABLED=0 GOOS=linux go build -ldflags "-s -w \
|
||||
-X github.com/stefanprodan/flagger/pkg/version.REVISION=${GIT_COMMIT}" \
|
||||
-a -installsuffix cgo -o flagger ./cmd/flagger/*
|
||||
|
||||
FROM alpine:3.8
|
||||
|
||||
RUN addgroup -S flagger \
|
||||
&& adduser -S -g flagger flagger \
|
||||
&& apk --no-cache add ca-certificates
|
||||
|
||||
WORKDIR /home/flagger
|
||||
|
||||
COPY --from=0 /go/src/github.com/stefanprodan/flagger/flagger .
|
||||
|
||||
RUN chown -R flagger:flagger ./
|
||||
|
||||
USER flagger
|
||||
|
||||
ENTRYPOINT ["./flagger"]
|
||||
|
||||
@@ -1,44 +0,0 @@
|
||||
FROM golang:1.11 AS hey-builder
|
||||
|
||||
RUN mkdir -p /go/src/github.com/rakyll/hey/
|
||||
|
||||
WORKDIR /go/src/github.com/rakyll/hey
|
||||
|
||||
ADD https://github.com/rakyll/hey/archive/v0.1.1.tar.gz .
|
||||
|
||||
RUN tar xzf v0.1.1.tar.gz --strip 1
|
||||
|
||||
RUN go get ./...
|
||||
|
||||
RUN CGO_ENABLED=0 GOOS=linux GOARCH=amd64 \
|
||||
go install -ldflags '-w -extldflags "-static"' \
|
||||
/go/src/github.com/rakyll/hey
|
||||
|
||||
FROM golang:1.11 AS builder
|
||||
|
||||
RUN mkdir -p /go/src/github.com/stefanprodan/flagger/
|
||||
|
||||
WORKDIR /go/src/github.com/stefanprodan/flagger
|
||||
|
||||
COPY . .
|
||||
|
||||
RUN go test -race ./pkg/loadtester/
|
||||
|
||||
RUN CGO_ENABLED=0 GOOS=linux go build -a -installsuffix cgo -o loadtester ./cmd/loadtester/*
|
||||
|
||||
FROM alpine:3.8
|
||||
|
||||
RUN addgroup -S app \
|
||||
&& adduser -S -g app app \
|
||||
&& apk --no-cache add ca-certificates curl
|
||||
|
||||
WORKDIR /home/app
|
||||
|
||||
COPY --from=hey-builder /go/bin/hey /usr/local/bin/hey
|
||||
COPY --from=builder /go/src/github.com/stefanprodan/flagger/loadtester .
|
||||
|
||||
RUN chown -R app:app ./
|
||||
|
||||
USER app
|
||||
|
||||
ENTRYPOINT ["./loadtester"]
|
||||
737
Gopkg.lock
generated
@@ -1,737 +0,0 @@
|
||||
# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
|
||||
|
||||
|
||||
[[projects]]
|
||||
digest = "1:5c3894b2aa4d6bead0ceeea6831b305d62879c871780e7b76296ded1b004bc57"
|
||||
name = "cloud.google.com/go"
|
||||
packages = ["compute/metadata"]
|
||||
pruneopts = "NUT"
|
||||
revision = "97efc2c9ffd9fe8ef47f7f3203dc60bbca547374"
|
||||
version = "v0.28.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:707ebe952a8b3d00b343c01536c79c73771d100f63ec6babeaed5c79e2b8a8dd"
|
||||
name = "github.com/beorn7/perks"
|
||||
packages = ["quantile"]
|
||||
pruneopts = "NUT"
|
||||
revision = "3a771d992973f24aa725d07868b467d1ddfceafb"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:ffe9824d294da03b391f44e1ae8281281b4afc1bdaa9588c9097785e3af10cec"
|
||||
name = "github.com/davecgh/go-spew"
|
||||
packages = ["spew"]
|
||||
pruneopts = "NUT"
|
||||
revision = "8991bc29aa16c548c550c7ff78260e27b9ab7c73"
|
||||
version = "v1.1.1"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:81466b4218bf6adddac2572a30ac733a9255919bc2f470b4827a317bd4ee1756"
|
||||
name = "github.com/ghodss/yaml"
|
||||
packages = ["."]
|
||||
pruneopts = "NUT"
|
||||
revision = "0ca9ea5df5451ffdf184b4428c902747c2c11cd7"
|
||||
version = "v1.0.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:8679b8a64f3613e9749c5640c3535c83399b8e69f67ce54d91dc73f6d77373af"
|
||||
name = "github.com/gogo/protobuf"
|
||||
packages = [
|
||||
"proto",
|
||||
"sortkeys",
|
||||
]
|
||||
pruneopts = "NUT"
|
||||
revision = "636bf0302bc95575d69441b25a2603156ffdddf1"
|
||||
version = "v1.1.1"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:e0f096f9332ad5f84341de82db69fd098864b17c668333a1fbbffd1b846dcc2b"
|
||||
name = "github.com/golang/glog"
|
||||
packages = ["."]
|
||||
pruneopts = "NUT"
|
||||
revision = "2cc4b790554d1a0c48fcc3aeb891e3de70cf8de0"
|
||||
source = "github.com/istio/glog"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:3fb07f8e222402962fa190eb060608b34eddfb64562a18e2167df2de0ece85d8"
|
||||
name = "github.com/golang/groupcache"
|
||||
packages = ["lru"]
|
||||
pruneopts = "NUT"
|
||||
revision = "24b0969c4cb722950103eed87108c8d291a8df00"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:63ccdfbd20f7ccd2399d0647a7d100b122f79c13bb83da9660b1598396fd9f62"
|
||||
name = "github.com/golang/protobuf"
|
||||
packages = [
|
||||
"proto",
|
||||
"ptypes",
|
||||
"ptypes/any",
|
||||
"ptypes/duration",
|
||||
"ptypes/timestamp",
|
||||
]
|
||||
pruneopts = "NUT"
|
||||
revision = "aa810b61a9c79d51363740d207bb46cf8e620ed5"
|
||||
version = "v1.2.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:05f95ffdfcf651bdb0f05b40b69e7f5663047f8da75c72d58728acb59b5cc107"
|
||||
name = "github.com/google/btree"
|
||||
packages = ["."]
|
||||
pruneopts = "NUT"
|
||||
revision = "4030bb1f1f0c35b30ca7009e9ebd06849dd45306"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:d2754cafcab0d22c13541618a8029a70a8959eb3525ff201fe971637e2274cd0"
|
||||
name = "github.com/google/go-cmp"
|
||||
packages = [
|
||||
"cmp",
|
||||
"cmp/cmpopts",
|
||||
"cmp/internal/diff",
|
||||
"cmp/internal/function",
|
||||
"cmp/internal/value",
|
||||
]
|
||||
pruneopts = "NUT"
|
||||
revision = "3af367b6b30c263d47e8895973edcca9a49cf029"
|
||||
version = "v0.2.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:52c5834e2bebac9030c97cc0798ac11c3aa8a39f098aeb419f142533da6cd3cc"
|
||||
name = "github.com/google/gofuzz"
|
||||
packages = ["."]
|
||||
pruneopts = "NUT"
|
||||
revision = "24818f796faf91cd76ec7bddd72458fbced7a6c1"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:06a7dadb7b760767341ffb6c8d377238d68a1226f2b21b5d497d2e3f6ecf6b4e"
|
||||
name = "github.com/googleapis/gnostic"
|
||||
packages = [
|
||||
"OpenAPIv2",
|
||||
"compiler",
|
||||
"extensions",
|
||||
]
|
||||
pruneopts = "NUT"
|
||||
revision = "7c663266750e7d82587642f65e60bc4083f1f84e"
|
||||
version = "v0.2.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:7fdf3223c7372d1ced0b98bf53457c5e89d89aecbad9a77ba9fcc6e01f9e5621"
|
||||
name = "github.com/gregjones/httpcache"
|
||||
packages = [
|
||||
".",
|
||||
"diskcache",
|
||||
]
|
||||
pruneopts = "NUT"
|
||||
revision = "9cad4c3443a7200dd6400aef47183728de563a38"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:b42cde0e1f3c816dd57f57f7bbcf05ca40263ad96f168714c130c611fc0856a6"
|
||||
name = "github.com/hashicorp/golang-lru"
|
||||
packages = [
|
||||
".",
|
||||
"simplelru",
|
||||
]
|
||||
pruneopts = "NUT"
|
||||
revision = "20f1fb78b0740ba8c3cb143a61e86ba5c8669768"
|
||||
version = "v0.5.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:9a52adf44086cead3b384e5d0dbf7a1c1cce65e67552ee3383a8561c42a18cd3"
|
||||
name = "github.com/imdario/mergo"
|
||||
packages = ["."]
|
||||
pruneopts = "NUT"
|
||||
revision = "9f23e2d6bd2a77f959b2bf6acdbefd708a83a4a4"
|
||||
version = "v0.3.6"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:e0f096f9332ad5f84341de82db69fd098864b17c668333a1fbbffd1b846dcc2b"
|
||||
name = "github.com/istio/glog"
|
||||
packages = ["."]
|
||||
pruneopts = "NUT"
|
||||
revision = "2cc4b790554d1a0c48fcc3aeb891e3de70cf8de0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:0243cffa4a3410f161ee613dfdd903a636d07e838a42d341da95d81f42cd1d41"
|
||||
name = "github.com/json-iterator/go"
|
||||
packages = ["."]
|
||||
pruneopts = "NUT"
|
||||
revision = "f2b4162afba35581b6d4a50d3b8f34e33c144682"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:03a74b0d86021c8269b52b7c908eb9bb3852ff590b363dad0a807cf58cec2f89"
|
||||
name = "github.com/knative/pkg"
|
||||
packages = [
|
||||
"apis",
|
||||
"apis/duck",
|
||||
"apis/duck/v1alpha1",
|
||||
"apis/istio",
|
||||
"apis/istio/authentication",
|
||||
"apis/istio/authentication/v1alpha1",
|
||||
"apis/istio/common/v1alpha1",
|
||||
"apis/istio/v1alpha3",
|
||||
"client/clientset/versioned",
|
||||
"client/clientset/versioned/fake",
|
||||
"client/clientset/versioned/scheme",
|
||||
"client/clientset/versioned/typed/authentication/v1alpha1",
|
||||
"client/clientset/versioned/typed/authentication/v1alpha1/fake",
|
||||
"client/clientset/versioned/typed/duck/v1alpha1",
|
||||
"client/clientset/versioned/typed/duck/v1alpha1/fake",
|
||||
"client/clientset/versioned/typed/istio/v1alpha3",
|
||||
"client/clientset/versioned/typed/istio/v1alpha3/fake",
|
||||
"signals",
|
||||
]
|
||||
pruneopts = "NUT"
|
||||
revision = "c15d7c8f2220a7578b33504df6edefa948c845ae"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:5985ef4caf91ece5d54817c11ea25f182697534f8ae6521eadcd628c142ac4b6"
|
||||
name = "github.com/matttproud/golang_protobuf_extensions"
|
||||
packages = ["pbutil"]
|
||||
pruneopts = "NUT"
|
||||
revision = "c12348ce28de40eed0136aa2b644d0ee0650e56c"
|
||||
version = "v1.0.1"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:2f42fa12d6911c7b7659738758631bec870b7e9b4c6be5444f963cdcfccc191f"
|
||||
name = "github.com/modern-go/concurrent"
|
||||
packages = ["."]
|
||||
pruneopts = "NUT"
|
||||
revision = "bacd9c7ef1dd9b15be4a9909b8ac7a4e313eec94"
|
||||
version = "1.0.3"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:c6aca19413b13dc59c220ad7430329e2ec454cc310bc6d8de2c7e2b93c18a0f6"
|
||||
name = "github.com/modern-go/reflect2"
|
||||
packages = ["."]
|
||||
pruneopts = "NUT"
|
||||
revision = "4b7aa43c6742a2c18fdef89dd197aaae7dac7ccd"
|
||||
version = "1.0.1"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:3bf17a6e6eaa6ad24152148a631d18662f7212e21637c2699bff3369b7f00fa2"
|
||||
name = "github.com/petar/GoLLRB"
|
||||
packages = ["llrb"]
|
||||
pruneopts = "NUT"
|
||||
revision = "53be0d36a84c2a886ca057d34b6aa4468df9ccb4"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:6c6d91dc326ed6778783cff869c49fb2f61303cdd2ebbcf90abe53505793f3b6"
|
||||
name = "github.com/peterbourgon/diskv"
|
||||
packages = ["."]
|
||||
pruneopts = "NUT"
|
||||
revision = "5f041e8faa004a95c88a202771f4cc3e991971e6"
|
||||
version = "v2.0.1"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:03bca087b180bf24c4f9060775f137775550a0834e18f0bca0520a868679dbd7"
|
||||
name = "github.com/prometheus/client_golang"
|
||||
packages = [
|
||||
"prometheus",
|
||||
"prometheus/promhttp",
|
||||
]
|
||||
pruneopts = "NUT"
|
||||
revision = "c5b7fccd204277076155f10851dad72b76a49317"
|
||||
version = "v0.8.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:2d5cd61daa5565187e1d96bae64dbbc6080dacf741448e9629c64fd93203b0d4"
|
||||
name = "github.com/prometheus/client_model"
|
||||
packages = ["go"]
|
||||
pruneopts = "NUT"
|
||||
revision = "5c3871d89910bfb32f5fcab2aa4b9ec68e65a99f"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:fad5a35eea6a1a33d6c8f949fbc146f24275ca809ece854248187683f52cc30b"
|
||||
name = "github.com/prometheus/common"
|
||||
packages = [
|
||||
"expfmt",
|
||||
"internal/bitbucket.org/ww/goautoneg",
|
||||
"model",
|
||||
]
|
||||
pruneopts = "NUT"
|
||||
revision = "c7de2306084e37d54b8be01f3541a8464345e9a5"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:26a2f5e891cc4d2321f18a0caa84c8e788663c17bed6a487f3cbe2c4295292d0"
|
||||
name = "github.com/prometheus/procfs"
|
||||
packages = [
|
||||
".",
|
||||
"internal/util",
|
||||
"nfs",
|
||||
"xfs",
|
||||
]
|
||||
pruneopts = "NUT"
|
||||
revision = "418d78d0b9a7b7de3a6bbc8a23def624cc977bb2"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:e3707aeaccd2adc89eba6c062fec72116fe1fc1ba71097da85b4d8ae1668a675"
|
||||
name = "github.com/spf13/pflag"
|
||||
packages = ["."]
|
||||
pruneopts = "NUT"
|
||||
revision = "9a97c102cda95a86cec2345a6f09f55a939babf5"
|
||||
version = "v1.0.2"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:22f696cee54865fb8e9ff91df7b633f6b8f22037a8015253c6b6a71ca82219c7"
|
||||
name = "go.uber.org/atomic"
|
||||
packages = ["."]
|
||||
pruneopts = "NUT"
|
||||
revision = "1ea20fb1cbb1cc08cbd0d913a96dead89aa18289"
|
||||
version = "v1.3.2"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:58ca93bdf81bac106ded02226b5395a0595d5346cdc4caa8d9c1f3a5f8f9976e"
|
||||
name = "go.uber.org/multierr"
|
||||
packages = ["."]
|
||||
pruneopts = "NUT"
|
||||
revision = "3c4937480c32f4c13a875a1829af76c98ca3d40a"
|
||||
version = "v1.1.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:85674ac609b704fd4e9f463553b6ffc3a3527a993ae0ba550eb56beaabdfe094"
|
||||
name = "go.uber.org/zap"
|
||||
packages = [
|
||||
".",
|
||||
"buffer",
|
||||
"internal/bufferpool",
|
||||
"internal/color",
|
||||
"internal/exit",
|
||||
"zapcore",
|
||||
]
|
||||
pruneopts = "NUT"
|
||||
revision = "ff33455a0e382e8a81d14dd7c922020b6b5e7982"
|
||||
version = "v1.9.1"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:3f3a05ae0b95893d90b9b3b5afdb79a9b3d96e4e36e099d841ae602e4aca0da8"
|
||||
name = "golang.org/x/crypto"
|
||||
packages = ["ssh/terminal"]
|
||||
pruneopts = "NUT"
|
||||
revision = "0e37d006457bf46f9e6692014ba72ef82c33022c"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:1400b8e87c2c9bd486ea1a13155f59f8f02d385761206df05c0b7db007a53b2c"
|
||||
name = "golang.org/x/net"
|
||||
packages = [
|
||||
"context",
|
||||
"context/ctxhttp",
|
||||
"http/httpguts",
|
||||
"http2",
|
||||
"http2/hpack",
|
||||
"idna",
|
||||
]
|
||||
pruneopts = "NUT"
|
||||
revision = "26e67e76b6c3f6ce91f7c52def5af501b4e0f3a2"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:bc2b221d465bb28ce46e8d472ecdc424b9a9b541bd61d8c311c5f29c8dd75b1b"
|
||||
name = "golang.org/x/oauth2"
|
||||
packages = [
|
||||
".",
|
||||
"google",
|
||||
"internal",
|
||||
"jws",
|
||||
"jwt",
|
||||
]
|
||||
pruneopts = "NUT"
|
||||
revision = "d2e6202438beef2727060aa7cabdd924d92ebfd9"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:44261e94b6095310a2df925fd68632d399a00eb153b52566a7b3697f7c70638c"
|
||||
name = "golang.org/x/sys"
|
||||
packages = [
|
||||
"unix",
|
||||
"windows",
|
||||
]
|
||||
pruneopts = "NUT"
|
||||
revision = "1561086e645b2809fb9f8a1e2a38160bf8d53bf4"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:e7071ed636b5422cc51c0e3a6cebc229d6c9fffc528814b519a980641422d619"
|
||||
name = "golang.org/x/text"
|
||||
packages = [
|
||||
"collate",
|
||||
"collate/build",
|
||||
"internal/colltab",
|
||||
"internal/gen",
|
||||
"internal/tag",
|
||||
"internal/triegen",
|
||||
"internal/ucd",
|
||||
"language",
|
||||
"secure/bidirule",
|
||||
"transform",
|
||||
"unicode/bidi",
|
||||
"unicode/cldr",
|
||||
"unicode/norm",
|
||||
"unicode/rangetable",
|
||||
]
|
||||
pruneopts = "NUT"
|
||||
revision = "f21a4dfb5e38f5895301dc265a8def02365cc3d0"
|
||||
version = "v0.3.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:c9e7a4b4d47c0ed205d257648b0e5b0440880cb728506e318f8ac7cd36270bc4"
|
||||
name = "golang.org/x/time"
|
||||
packages = ["rate"]
|
||||
pruneopts = "NUT"
|
||||
revision = "fbb02b2291d28baffd63558aa44b4b56f178d650"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:45751dc3302c90ea55913674261b2d74286b05cdd8e3ae9606e02e4e77f4353f"
|
||||
name = "golang.org/x/tools"
|
||||
packages = [
|
||||
"go/ast/astutil",
|
||||
"imports",
|
||||
"internal/fastwalk",
|
||||
]
|
||||
pruneopts = "NUT"
|
||||
revision = "90fa682c2a6e6a37b3a1364ce2fe1d5e41af9d6d"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:e2da54c7866453ac5831c61c7ec5d887f39328cac088c806553303bff4048e6f"
|
||||
name = "google.golang.org/appengine"
|
||||
packages = [
|
||||
".",
|
||||
"internal",
|
||||
"internal/app_identity",
|
||||
"internal/base",
|
||||
"internal/datastore",
|
||||
"internal/log",
|
||||
"internal/modules",
|
||||
"internal/remote_api",
|
||||
"internal/urlfetch",
|
||||
"urlfetch",
|
||||
]
|
||||
pruneopts = "NUT"
|
||||
revision = "ae0ab99deb4dc413a2b4bd6c8bdd0eb67f1e4d06"
|
||||
version = "v1.2.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:2d1fbdc6777e5408cabeb02bf336305e724b925ff4546ded0fa8715a7267922a"
|
||||
name = "gopkg.in/inf.v0"
|
||||
packages = ["."]
|
||||
pruneopts = "NUT"
|
||||
revision = "d2d2541c53f18d2a059457998ce2876cc8e67cbf"
|
||||
version = "v0.9.1"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:7c95b35057a0ff2e19f707173cc1a947fa43a6eb5c4d300d196ece0334046082"
|
||||
name = "gopkg.in/yaml.v2"
|
||||
packages = ["."]
|
||||
pruneopts = "NUT"
|
||||
revision = "5420a8b6744d3b0345ab293f6fcba19c978f1183"
|
||||
version = "v2.2.1"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:8960ef753a87391086a307122d23cd5007cee93c28189437e4f1b6ed72bffc50"
|
||||
name = "k8s.io/api"
|
||||
packages = [
|
||||
"admissionregistration/v1alpha1",
|
||||
"admissionregistration/v1beta1",
|
||||
"apps/v1",
|
||||
"apps/v1beta1",
|
||||
"apps/v1beta2",
|
||||
"authentication/v1",
|
||||
"authentication/v1beta1",
|
||||
"authorization/v1",
|
||||
"authorization/v1beta1",
|
||||
"autoscaling/v1",
|
||||
"autoscaling/v2beta1",
|
||||
"batch/v1",
|
||||
"batch/v1beta1",
|
||||
"batch/v2alpha1",
|
||||
"certificates/v1beta1",
|
||||
"core/v1",
|
||||
"events/v1beta1",
|
||||
"extensions/v1beta1",
|
||||
"networking/v1",
|
||||
"policy/v1beta1",
|
||||
"rbac/v1",
|
||||
"rbac/v1alpha1",
|
||||
"rbac/v1beta1",
|
||||
"scheduling/v1alpha1",
|
||||
"scheduling/v1beta1",
|
||||
"settings/v1alpha1",
|
||||
"storage/v1",
|
||||
"storage/v1alpha1",
|
||||
"storage/v1beta1",
|
||||
]
|
||||
pruneopts = "NUT"
|
||||
revision = "072894a440bdee3a891dea811fe42902311cd2a3"
|
||||
version = "kubernetes-1.11.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:4b0d523ee389c762d02febbcfa0734c4530ebe87abe925db18f05422adcb33e8"
|
||||
name = "k8s.io/apimachinery"
|
||||
packages = [
|
||||
"pkg/api/equality",
|
||||
"pkg/api/errors",
|
||||
"pkg/api/meta",
|
||||
"pkg/api/resource",
|
||||
"pkg/apis/meta/internalversion",
|
||||
"pkg/apis/meta/v1",
|
||||
"pkg/apis/meta/v1/unstructured",
|
||||
"pkg/apis/meta/v1beta1",
|
||||
"pkg/conversion",
|
||||
"pkg/conversion/queryparams",
|
||||
"pkg/fields",
|
||||
"pkg/labels",
|
||||
"pkg/runtime",
|
||||
"pkg/runtime/schema",
|
||||
"pkg/runtime/serializer",
|
||||
"pkg/runtime/serializer/json",
|
||||
"pkg/runtime/serializer/protobuf",
|
||||
"pkg/runtime/serializer/recognizer",
|
||||
"pkg/runtime/serializer/streaming",
|
||||
"pkg/runtime/serializer/versioning",
|
||||
"pkg/selection",
|
||||
"pkg/types",
|
||||
"pkg/util/cache",
|
||||
"pkg/util/clock",
|
||||
"pkg/util/diff",
|
||||
"pkg/util/errors",
|
||||
"pkg/util/framer",
|
||||
"pkg/util/intstr",
|
||||
"pkg/util/json",
|
||||
"pkg/util/mergepatch",
|
||||
"pkg/util/net",
|
||||
"pkg/util/runtime",
|
||||
"pkg/util/sets",
|
||||
"pkg/util/sets/types",
|
||||
"pkg/util/strategicpatch",
|
||||
"pkg/util/validation",
|
||||
"pkg/util/validation/field",
|
||||
"pkg/util/wait",
|
||||
"pkg/util/yaml",
|
||||
"pkg/version",
|
||||
"pkg/watch",
|
||||
"third_party/forked/golang/json",
|
||||
"third_party/forked/golang/reflect",
|
||||
]
|
||||
pruneopts = "NUT"
|
||||
revision = "103fd098999dc9c0c88536f5c9ad2e5da39373ae"
|
||||
version = "kubernetes-1.11.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:c7d6cf5e28c377ab4000b94b6b9ff562c4b13e7e8b948ad943f133c5104be011"
|
||||
name = "k8s.io/client-go"
|
||||
packages = [
|
||||
"discovery",
|
||||
"discovery/fake",
|
||||
"kubernetes",
|
||||
"kubernetes/fake",
|
||||
"kubernetes/scheme",
|
||||
"kubernetes/typed/admissionregistration/v1alpha1",
|
||||
"kubernetes/typed/admissionregistration/v1alpha1/fake",
|
||||
"kubernetes/typed/admissionregistration/v1beta1",
|
||||
"kubernetes/typed/admissionregistration/v1beta1/fake",
|
||||
"kubernetes/typed/apps/v1",
|
||||
"kubernetes/typed/apps/v1/fake",
|
||||
"kubernetes/typed/apps/v1beta1",
|
||||
"kubernetes/typed/apps/v1beta1/fake",
|
||||
"kubernetes/typed/apps/v1beta2",
|
||||
"kubernetes/typed/apps/v1beta2/fake",
|
||||
"kubernetes/typed/authentication/v1",
|
||||
"kubernetes/typed/authentication/v1/fake",
|
||||
"kubernetes/typed/authentication/v1beta1",
|
||||
"kubernetes/typed/authentication/v1beta1/fake",
|
||||
"kubernetes/typed/authorization/v1",
|
||||
"kubernetes/typed/authorization/v1/fake",
|
||||
"kubernetes/typed/authorization/v1beta1",
|
||||
"kubernetes/typed/authorization/v1beta1/fake",
|
||||
"kubernetes/typed/autoscaling/v1",
|
||||
"kubernetes/typed/autoscaling/v1/fake",
|
||||
"kubernetes/typed/autoscaling/v2beta1",
|
||||
"kubernetes/typed/autoscaling/v2beta1/fake",
|
||||
"kubernetes/typed/batch/v1",
|
||||
"kubernetes/typed/batch/v1/fake",
|
||||
"kubernetes/typed/batch/v1beta1",
|
||||
"kubernetes/typed/batch/v1beta1/fake",
|
||||
"kubernetes/typed/batch/v2alpha1",
|
||||
"kubernetes/typed/batch/v2alpha1/fake",
|
||||
"kubernetes/typed/certificates/v1beta1",
|
||||
"kubernetes/typed/certificates/v1beta1/fake",
|
||||
"kubernetes/typed/core/v1",
|
||||
"kubernetes/typed/core/v1/fake",
|
||||
"kubernetes/typed/events/v1beta1",
|
||||
"kubernetes/typed/events/v1beta1/fake",
|
||||
"kubernetes/typed/extensions/v1beta1",
|
||||
"kubernetes/typed/extensions/v1beta1/fake",
|
||||
"kubernetes/typed/networking/v1",
|
||||
"kubernetes/typed/networking/v1/fake",
|
||||
"kubernetes/typed/policy/v1beta1",
|
||||
"kubernetes/typed/policy/v1beta1/fake",
|
||||
"kubernetes/typed/rbac/v1",
|
||||
"kubernetes/typed/rbac/v1/fake",
|
||||
"kubernetes/typed/rbac/v1alpha1",
|
||||
"kubernetes/typed/rbac/v1alpha1/fake",
|
||||
"kubernetes/typed/rbac/v1beta1",
|
||||
"kubernetes/typed/rbac/v1beta1/fake",
|
||||
"kubernetes/typed/scheduling/v1alpha1",
|
||||
"kubernetes/typed/scheduling/v1alpha1/fake",
|
||||
"kubernetes/typed/scheduling/v1beta1",
|
||||
"kubernetes/typed/scheduling/v1beta1/fake",
|
||||
"kubernetes/typed/settings/v1alpha1",
|
||||
"kubernetes/typed/settings/v1alpha1/fake",
|
||||
"kubernetes/typed/storage/v1",
|
||||
"kubernetes/typed/storage/v1/fake",
|
||||
"kubernetes/typed/storage/v1alpha1",
|
||||
"kubernetes/typed/storage/v1alpha1/fake",
|
||||
"kubernetes/typed/storage/v1beta1",
|
||||
"kubernetes/typed/storage/v1beta1/fake",
|
||||
"pkg/apis/clientauthentication",
|
||||
"pkg/apis/clientauthentication/v1alpha1",
|
||||
"pkg/apis/clientauthentication/v1beta1",
|
||||
"pkg/version",
|
||||
"plugin/pkg/client/auth/exec",
|
||||
"plugin/pkg/client/auth/gcp",
|
||||
"rest",
|
||||
"rest/watch",
|
||||
"testing",
|
||||
"third_party/forked/golang/template",
|
||||
"tools/auth",
|
||||
"tools/cache",
|
||||
"tools/clientcmd",
|
||||
"tools/clientcmd/api",
|
||||
"tools/clientcmd/api/latest",
|
||||
"tools/clientcmd/api/v1",
|
||||
"tools/metrics",
|
||||
"tools/pager",
|
||||
"tools/record",
|
||||
"tools/reference",
|
||||
"transport",
|
||||
"util/buffer",
|
||||
"util/cert",
|
||||
"util/connrotation",
|
||||
"util/flowcontrol",
|
||||
"util/homedir",
|
||||
"util/integer",
|
||||
"util/jsonpath",
|
||||
"util/retry",
|
||||
"util/workqueue",
|
||||
]
|
||||
pruneopts = "NUT"
|
||||
revision = "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65"
|
||||
version = "kubernetes-1.11.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:8ab487a323486c8bbbaa3b689850487fdccc6cbea8690620e083b2d230a4447e"
|
||||
name = "k8s.io/code-generator"
|
||||
packages = [
|
||||
"cmd/client-gen",
|
||||
"cmd/client-gen/args",
|
||||
"cmd/client-gen/generators",
|
||||
"cmd/client-gen/generators/fake",
|
||||
"cmd/client-gen/generators/scheme",
|
||||
"cmd/client-gen/generators/util",
|
||||
"cmd/client-gen/path",
|
||||
"cmd/client-gen/types",
|
||||
"cmd/deepcopy-gen",
|
||||
"cmd/deepcopy-gen/args",
|
||||
"cmd/defaulter-gen",
|
||||
"cmd/defaulter-gen/args",
|
||||
"cmd/informer-gen",
|
||||
"cmd/informer-gen/args",
|
||||
"cmd/informer-gen/generators",
|
||||
"cmd/lister-gen",
|
||||
"cmd/lister-gen/args",
|
||||
"cmd/lister-gen/generators",
|
||||
"pkg/util",
|
||||
]
|
||||
pruneopts = "T"
|
||||
revision = "6702109cc68eb6fe6350b83e14407c8d7309fd1a"
|
||||
version = "kubernetes-1.11.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:5249c83f0fb9e277b2d28c19eca814feac7ef05dc762e4deaf0a2e4b1a7c5df3"
|
||||
name = "k8s.io/gengo"
|
||||
packages = [
|
||||
"args",
|
||||
"examples/deepcopy-gen/generators",
|
||||
"examples/defaulter-gen/generators",
|
||||
"examples/set-gen/sets",
|
||||
"generator",
|
||||
"namer",
|
||||
"parser",
|
||||
"types",
|
||||
]
|
||||
pruneopts = "NUT"
|
||||
revision = "4242d8e6c5dba56827bb7bcf14ad11cda38f3991"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:a2c842a1e0aed96fd732b535514556323a6f5edfded3b63e5e0ab1bce188aa54"
|
||||
name = "k8s.io/kube-openapi"
|
||||
packages = ["pkg/util/proto"]
|
||||
pruneopts = "NUT"
|
||||
revision = "e3762e86a74c878ffed47484592986685639c2cd"
|
||||
|
||||
[solve-meta]
|
||||
analyzer-name = "dep"
|
||||
analyzer-version = 1
|
||||
input-imports = [
|
||||
"github.com/google/go-cmp/cmp",
|
||||
"github.com/google/go-cmp/cmp/cmpopts",
|
||||
"github.com/istio/glog",
|
||||
"github.com/knative/pkg/apis/istio/v1alpha3",
|
||||
"github.com/knative/pkg/client/clientset/versioned",
|
||||
"github.com/knative/pkg/client/clientset/versioned/fake",
|
||||
"github.com/knative/pkg/signals",
|
||||
"github.com/prometheus/client_golang/prometheus/promhttp",
|
||||
"go.uber.org/zap",
|
||||
"go.uber.org/zap/zapcore",
|
||||
"k8s.io/api/apps/v1",
|
||||
"k8s.io/api/autoscaling/v1",
|
||||
"k8s.io/api/autoscaling/v2beta1",
|
||||
"k8s.io/api/core/v1",
|
||||
"k8s.io/apimachinery/pkg/api/errors",
|
||||
"k8s.io/apimachinery/pkg/api/resource",
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1",
|
||||
"k8s.io/apimachinery/pkg/labels",
|
||||
"k8s.io/apimachinery/pkg/runtime",
|
||||
"k8s.io/apimachinery/pkg/runtime/schema",
|
||||
"k8s.io/apimachinery/pkg/runtime/serializer",
|
||||
"k8s.io/apimachinery/pkg/types",
|
||||
"k8s.io/apimachinery/pkg/util/intstr",
|
||||
"k8s.io/apimachinery/pkg/util/runtime",
|
||||
"k8s.io/apimachinery/pkg/util/sets/types",
|
||||
"k8s.io/apimachinery/pkg/util/wait",
|
||||
"k8s.io/apimachinery/pkg/watch",
|
||||
"k8s.io/client-go/discovery",
|
||||
"k8s.io/client-go/discovery/fake",
|
||||
"k8s.io/client-go/kubernetes",
|
||||
"k8s.io/client-go/kubernetes/fake",
|
||||
"k8s.io/client-go/kubernetes/scheme",
|
||||
"k8s.io/client-go/kubernetes/typed/core/v1",
|
||||
"k8s.io/client-go/plugin/pkg/client/auth/gcp",
|
||||
"k8s.io/client-go/rest",
|
||||
"k8s.io/client-go/testing",
|
||||
"k8s.io/client-go/tools/cache",
|
||||
"k8s.io/client-go/tools/clientcmd",
|
||||
"k8s.io/client-go/tools/record",
|
||||
"k8s.io/client-go/util/flowcontrol",
|
||||
"k8s.io/client-go/util/workqueue",
|
||||
"k8s.io/code-generator/cmd/client-gen",
|
||||
"k8s.io/code-generator/cmd/deepcopy-gen",
|
||||
"k8s.io/code-generator/cmd/defaulter-gen",
|
||||
"k8s.io/code-generator/cmd/informer-gen",
|
||||
"k8s.io/code-generator/cmd/lister-gen",
|
||||
]
|
||||
solver-name = "gps-cdcl"
|
||||
solver-version = 1
|
||||
64
Gopkg.toml
@@ -1,64 +0,0 @@
|
||||
required = [
|
||||
"k8s.io/apimachinery/pkg/util/sets/types",
|
||||
"k8s.io/code-generator/cmd/deepcopy-gen",
|
||||
"k8s.io/code-generator/cmd/defaulter-gen",
|
||||
"k8s.io/code-generator/cmd/client-gen",
|
||||
"k8s.io/code-generator/cmd/lister-gen",
|
||||
"k8s.io/code-generator/cmd/informer-gen",
|
||||
]
|
||||
|
||||
[[constraint]]
|
||||
name = "go.uber.org/zap"
|
||||
version = "v1.9.1"
|
||||
|
||||
[[override]]
|
||||
name = "gopkg.in/yaml.v2"
|
||||
version = "v2.2.1"
|
||||
|
||||
[[override]]
|
||||
name = "k8s.io/api"
|
||||
version = "kubernetes-1.11.0"
|
||||
|
||||
[[override]]
|
||||
name = "k8s.io/apimachinery"
|
||||
version = "kubernetes-1.11.0"
|
||||
|
||||
[[override]]
|
||||
name = "k8s.io/code-generator"
|
||||
version = "kubernetes-1.11.0"
|
||||
|
||||
[[override]]
|
||||
name = "k8s.io/client-go"
|
||||
version = "kubernetes-1.11.0"
|
||||
|
||||
[[override]]
|
||||
name = "github.com/json-iterator/go"
|
||||
# This is the commit at which k8s depends on this in 1.11
|
||||
# It seems to be broken at HEAD.
|
||||
revision = "f2b4162afba35581b6d4a50d3b8f34e33c144682"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/prometheus/client_golang"
|
||||
version = "v0.8.0"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/google/go-cmp"
|
||||
version = "v0.2.0"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/knative/pkg"
|
||||
revision = "c15d7c8f2220a7578b33504df6edefa948c845ae"
|
||||
|
||||
[[override]]
|
||||
name = "github.com/golang/glog"
|
||||
source = "github.com/istio/glog"
|
||||
|
||||
[prune]
|
||||
go-tests = true
|
||||
unused-packages = true
|
||||
non-go = true
|
||||
|
||||
[[prune.project]]
|
||||
name = "k8s.io/code-generator"
|
||||
unused-packages = false
|
||||
non-go = false
|
||||
2
LICENSE
@@ -186,7 +186,7 @@
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright 2018 Weaveworks. All rights reserved.
|
||||
Copyright 2019 Weaveworks
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
||||
85
Makefile
@@ -1,85 +0,0 @@
|
||||
TAG?=latest
|
||||
VERSION?=$(shell grep 'VERSION' pkg/version/version.go | awk '{ print $$4 }' | tr -d '"')
|
||||
VERSION_MINOR:=$(shell grep 'VERSION' pkg/version/version.go | awk '{ print $$4 }' | tr -d '"' | rev | cut -d'.' -f2- | rev)
|
||||
PATCH:=$(shell grep 'VERSION' pkg/version/version.go | awk '{ print $$4 }' | tr -d '"' | awk -F. '{print $$NF}')
|
||||
SOURCE_DIRS = cmd pkg/apis pkg/controller pkg/server pkg/logging pkg/version
|
||||
LT_VERSION?=$(shell grep 'VERSION' cmd/loadtester/main.go | awk '{ print $$4 }' | tr -d '"' | head -n1)
|
||||
|
||||
run:
|
||||
go run cmd/flagger/* -kubeconfig=$$HOME/.kube/config -log-level=info \
|
||||
-metrics-server=https://prometheus.iowa.weavedx.com \
|
||||
-slack-url=https://hooks.slack.com/services/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 \
|
||||
-slack-channel="devops-alerts"
|
||||
|
||||
build:
|
||||
docker build -t stefanprodan/flagger:$(TAG) . -f Dockerfile
|
||||
|
||||
push:
|
||||
docker tag stefanprodan/flagger:$(TAG) quay.io/stefanprodan/flagger:$(VERSION)
|
||||
docker push quay.io/stefanprodan/flagger:$(VERSION)
|
||||
|
||||
fmt:
|
||||
gofmt -l -s -w $(SOURCE_DIRS)
|
||||
|
||||
test-fmt:
|
||||
gofmt -l -s $(SOURCE_DIRS) | grep ".*\.go"; if [ "$$?" = "0" ]; then exit 1; fi
|
||||
|
||||
test-codegen:
|
||||
./hack/verify-codegen.sh
|
||||
|
||||
test: test-fmt test-codegen
|
||||
go test ./...
|
||||
|
||||
helm-package:
|
||||
cd charts/ && helm package flagger/ && helm package grafana/ && helm package loadtester/
|
||||
mv charts/*.tgz docs/
|
||||
helm repo index docs --url https://stefanprodan.github.io/flagger --merge ./docs/index.yaml
|
||||
|
||||
helm-up:
|
||||
helm upgrade --install flagger ./charts/flagger --namespace=istio-system --set crd.create=false
|
||||
helm upgrade --install flagger-grafana ./charts/grafana --namespace=istio-system
|
||||
|
||||
version-set:
|
||||
@next="$(TAG)" && \
|
||||
current="$(VERSION)" && \
|
||||
sed -i '' "s/$$current/$$next/g" pkg/version/version.go && \
|
||||
sed -i '' "s/flagger:$$current/flagger:$$next/g" artifacts/flagger/deployment.yaml && \
|
||||
sed -i '' "s/tag: $$current/tag: $$next/g" charts/flagger/values.yaml && \
|
||||
sed -i '' "s/appVersion: $$current/appVersion: $$next/g" charts/flagger/Chart.yaml && \
|
||||
echo "Version $$next set in code, deployment and charts"
|
||||
|
||||
version-up:
|
||||
@next="$(VERSION_MINOR).$$(($(PATCH) + 1))" && \
|
||||
current="$(VERSION)" && \
|
||||
sed -i '' "s/$$current/$$next/g" pkg/version/version.go && \
|
||||
sed -i '' "s/flagger:$$current/flagger:$$next/g" artifacts/flagger/deployment.yaml && \
|
||||
sed -i '' "s/tag: $$current/tag: $$next/g" charts/flagger/values.yaml && \
|
||||
sed -i '' "s/appVersion: $$current/appVersion: $$next/g" charts/flagger/Chart.yaml && \
|
||||
echo "Version $$next set in code, deployment and chart"
|
||||
|
||||
dev-up: version-up
|
||||
@echo "Starting build/push/deploy pipeline for $(VERSION)"
|
||||
docker build -t quay.io/stefanprodan/flagger:$(VERSION) . -f Dockerfile
|
||||
docker push quay.io/stefanprodan/flagger:$(VERSION)
|
||||
kubectl apply -f ./artifacts/flagger/crd.yaml
|
||||
helm upgrade -i flagger ./charts/flagger --namespace=istio-system --set crd.create=false
|
||||
|
||||
release:
|
||||
git tag $(VERSION)
|
||||
git push origin $(VERSION)
|
||||
|
||||
release-set: fmt version-set helm-package
|
||||
git add .
|
||||
git commit -m "Release $(VERSION)"
|
||||
git push origin master
|
||||
git tag $(VERSION)
|
||||
git push origin $(VERSION)
|
||||
|
||||
reset-test:
|
||||
kubectl delete -f ./artifacts/namespaces
|
||||
kubectl apply -f ./artifacts/namespaces
|
||||
kubectl apply -f ./artifacts/canaries
|
||||
|
||||
loadtester-push:
|
||||
docker build -t quay.io/stefanprodan/flagger-loadtester:$(LT_VERSION) . -f Dockerfile.loadtester
|
||||
docker push quay.io/stefanprodan/flagger-loadtester:$(LT_VERSION)
|
||||
450
README.md
@@ -1,450 +1,4 @@
|
||||
# flagger
|
||||
# flagger website
|
||||
|
||||
[](https://travis-ci.org/stefanprodan/flagger)
|
||||
[](https://goreportcard.com/report/github.com/stefanprodan/flagger)
|
||||
[](https://codecov.io/gh/stefanprodan/flagger)
|
||||
[](https://github.com/stefanprodan/flagger/blob/master/LICENSE)
|
||||
[](https://github.com/stefanprodan/flagger/releases)
|
||||
[flagger.app](https://flagger.app)
|
||||
|
||||
Flagger is a Kubernetes operator that automates the promotion of canary deployments
|
||||
using Istio routing for traffic shifting and Prometheus metrics for canary analysis.
|
||||
The canary analysis can be extended with webhooks for running integration tests,
|
||||
load tests or any other custom validation.
|
||||
|
||||
### Install
|
||||
|
||||
Before installing Flagger make sure you have Istio setup up with Prometheus enabled.
|
||||
If you are new to Istio you can follow my [Istio service mesh walk-through](https://github.com/stefanprodan/istio-gke).
|
||||
|
||||
Deploy Flagger in the `istio-system` namespace using Helm:
|
||||
|
||||
```bash
|
||||
# add the Helm repository
|
||||
helm repo add flagger https://flagger.app
|
||||
|
||||
# install or upgrade
|
||||
helm upgrade -i flagger flagger/flagger \
|
||||
--namespace=istio-system \
|
||||
--set metricsServer=http://prometheus.istio-system:9090
|
||||
```
|
||||
|
||||
Flagger is compatible with Kubernetes >1.11.0 and Istio >1.0.0.
|
||||
|
||||
### Usage
|
||||
|
||||
Flagger takes a Kubernetes deployment and creates a series of objects
|
||||
(Kubernetes [deployments](https://kubernetes.io/docs/concepts/workloads/controllers/deployment/),
|
||||
ClusterIP [services](https://kubernetes.io/docs/concepts/services-networking/service/) and
|
||||
Istio [virtual services](https://istio.io/docs/reference/config/istio.networking.v1alpha3/#VirtualService))
|
||||
to drive the canary analysis and promotion.
|
||||
|
||||

|
||||
|
||||
Gated canary promotion stages:
|
||||
|
||||
* scan for canary deployments
|
||||
* check Istio virtual service routes are mapped to primary and canary ClusterIP services
|
||||
* check primary and canary deployments status
|
||||
* halt advancement if a rolling update is underway
|
||||
* halt advancement if pods are unhealthy
|
||||
* increase canary traffic weight percentage from 0% to 5% (step weight)
|
||||
* check canary HTTP request success rate and latency
|
||||
* halt advancement if any metric is under the specified threshold
|
||||
* increment the failed checks counter
|
||||
* check if the number of failed checks reached the threshold
|
||||
* route all traffic to primary
|
||||
* scale to zero the canary deployment and mark it as failed
|
||||
* wait for the canary deployment to be updated (revision bump) and start over
|
||||
* increase canary traffic weight by 5% (step weight) till it reaches 50% (max weight)
|
||||
* halt advancement while canary request success rate is under the threshold
|
||||
* halt advancement while canary request duration P99 is over the threshold
|
||||
* halt advancement if the primary or canary deployment becomes unhealthy
|
||||
* halt advancement while canary deployment is being scaled up/down by HPA
|
||||
* promote canary to primary
|
||||
* copy canary deployment spec template over primary
|
||||
* wait for primary rolling update to finish
|
||||
* halt advancement if pods are unhealthy
|
||||
* route all traffic to primary
|
||||
* scale to zero the canary deployment
|
||||
* mark rollout as finished
|
||||
* wait for the canary deployment to be updated (revision bump) and start over
|
||||
|
||||
You can change the canary analysis _max weight_ and the _step weight_ percentage in the Flagger's custom resource.
|
||||
|
||||
For a deployment named _podinfo_, a canary promotion can be defined using Flagger's custom resource:
|
||||
|
||||
```yaml
|
||||
apiVersion: flagger.app/v1alpha3
|
||||
kind: Canary
|
||||
metadata:
|
||||
name: podinfo
|
||||
namespace: test
|
||||
spec:
|
||||
# deployment reference
|
||||
targetRef:
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
name: podinfo
|
||||
# the maximum time in seconds for the canary deployment
|
||||
# to make progress before it is rollback (default 600s)
|
||||
progressDeadlineSeconds: 60
|
||||
# HPA reference (optional)
|
||||
autoscalerRef:
|
||||
apiVersion: autoscaling/v2beta1
|
||||
kind: HorizontalPodAutoscaler
|
||||
name: podinfo
|
||||
service:
|
||||
# container port
|
||||
port: 9898
|
||||
# Istio gateways (optional)
|
||||
gateways:
|
||||
- public-gateway.istio-system.svc.cluster.local
|
||||
# Istio virtual service host names (optional)
|
||||
hosts:
|
||||
- podinfo.example.com
|
||||
canaryAnalysis:
|
||||
# schedule interval (default 60s)
|
||||
interval: 1m
|
||||
# max number of failed metric checks before rollback
|
||||
threshold: 10
|
||||
# max traffic percentage routed to canary
|
||||
# percentage (0-100)
|
||||
maxWeight: 50
|
||||
# canary increment step
|
||||
# percentage (0-100)
|
||||
stepWeight: 5
|
||||
# Istio Prometheus checks
|
||||
metrics:
|
||||
- name: istio_requests_total
|
||||
# minimum req success rate (non 5xx responses)
|
||||
# percentage (0-100)
|
||||
threshold: 99
|
||||
interval: 1m
|
||||
- name: istio_request_duration_seconds_bucket
|
||||
# maximum req duration P99
|
||||
# milliseconds
|
||||
threshold: 500
|
||||
interval: 30s
|
||||
# external checks (optional)
|
||||
webhooks:
|
||||
- name: load-test
|
||||
url: http://flagger-loadtester.test/
|
||||
timeout: 5s
|
||||
metadata:
|
||||
cmd: "hey -z 1m -q 10 -c 2 http://podinfo.test:9898/"
|
||||
```
|
||||
|
||||
The canary analysis is using the following promql queries:
|
||||
|
||||
_HTTP requests success rate percentage_
|
||||
|
||||
```sql
|
||||
sum(
|
||||
rate(
|
||||
istio_requests_total{
|
||||
reporter="destination",
|
||||
destination_workload_namespace=~"$namespace",
|
||||
destination_workload=~"$workload",
|
||||
response_code!~"5.*"
|
||||
}[$interval]
|
||||
)
|
||||
)
|
||||
/
|
||||
sum(
|
||||
rate(
|
||||
istio_requests_total{
|
||||
reporter="destination",
|
||||
destination_workload_namespace=~"$namespace",
|
||||
destination_workload=~"$workload"
|
||||
}[$interval]
|
||||
)
|
||||
)
|
||||
```
|
||||
|
||||
_HTTP requests milliseconds duration P99_
|
||||
|
||||
```sql
|
||||
histogram_quantile(0.99,
|
||||
sum(
|
||||
irate(
|
||||
istio_request_duration_seconds_bucket{
|
||||
reporter="destination",
|
||||
destination_workload=~"$workload",
|
||||
destination_workload_namespace=~"$namespace"
|
||||
}[$interval]
|
||||
)
|
||||
) by (le)
|
||||
)
|
||||
```
|
||||
|
||||
The canary analysis can be extended with webhooks.
|
||||
Flagger will call the webhooks (HTTP POST) and determine from the response status code (HTTP 2xx) if the canary is failing or not.
|
||||
|
||||
Webhook payload:
|
||||
|
||||
```json
|
||||
{
|
||||
"name": "podinfo",
|
||||
"namespace": "test",
|
||||
"metadata": {
|
||||
"test": "all",
|
||||
"token": "16688eb5e9f289f1991c"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Automated canary analysis, promotions and rollbacks
|
||||
|
||||
Create a test namespace with Istio sidecar injection enabled:
|
||||
|
||||
```bash
|
||||
export REPO=https://raw.githubusercontent.com/stefanprodan/flagger/master
|
||||
|
||||
kubectl apply -f ${REPO}/artifacts/namespaces/test.yaml
|
||||
```
|
||||
|
||||
Create a deployment and a horizontal pod autoscaler:
|
||||
|
||||
```bash
|
||||
kubectl apply -f ${REPO}/artifacts/canaries/deployment.yaml
|
||||
kubectl apply -f ${REPO}/artifacts/canaries/hpa.yaml
|
||||
```
|
||||
|
||||
Deploy the load testing service to generate traffic during the canary analysis:
|
||||
|
||||
```bash
|
||||
kubectl -n test apply -f ${REPO}/artifacts/loadtester/deployment.yaml
|
||||
kubectl -n test apply -f ${REPO}/artifacts/loadtester/service.yaml
|
||||
```
|
||||
|
||||
Create a canary promotion custom resource (replace the Istio gateway and the internet domain with your own):
|
||||
|
||||
```bash
|
||||
kubectl apply -f ${REPO}/artifacts/canaries/canary.yaml
|
||||
```
|
||||
|
||||
After a couple of seconds Flagger will create the canary objects:
|
||||
|
||||
```bash
|
||||
# applied
|
||||
deployment.apps/podinfo
|
||||
horizontalpodautoscaler.autoscaling/podinfo
|
||||
canary.flagger.app/podinfo
|
||||
# generated
|
||||
deployment.apps/podinfo-primary
|
||||
horizontalpodautoscaler.autoscaling/podinfo-primary
|
||||
service/podinfo
|
||||
service/podinfo-canary
|
||||
service/podinfo-primary
|
||||
virtualservice.networking.istio.io/podinfo
|
||||
```
|
||||
|
||||

|
||||
|
||||
Trigger a canary deployment by updating the container image:
|
||||
|
||||
```bash
|
||||
kubectl -n test set image deployment/podinfo \
|
||||
podinfod=quay.io/stefanprodan/podinfo:1.4.0
|
||||
```
|
||||
|
||||
Flagger detects that the deployment revision changed and starts a new canary analysis:
|
||||
|
||||
```
|
||||
kubectl -n test describe canary/podinfo
|
||||
|
||||
Status:
|
||||
Canary Weight: 0
|
||||
Failed Checks: 0
|
||||
Last Transition Time: 2019-01-16T13:47:16Z
|
||||
Phase: Succeeded
|
||||
Events:
|
||||
Type Reason Age From Message
|
||||
---- ------ ---- ---- -------
|
||||
Normal Synced 3m flagger New revision detected podinfo.test
|
||||
Normal Synced 3m flagger Scaling up podinfo.test
|
||||
Warning Synced 3m flagger Waiting for podinfo.test rollout to finish: 0 of 1 updated replicas are available
|
||||
Normal Synced 3m flagger Advance podinfo.test canary weight 5
|
||||
Normal Synced 3m flagger Advance podinfo.test canary weight 10
|
||||
Normal Synced 3m flagger Advance podinfo.test canary weight 15
|
||||
Normal Synced 2m flagger Advance podinfo.test canary weight 20
|
||||
Normal Synced 2m flagger Advance podinfo.test canary weight 25
|
||||
Normal Synced 1m flagger Advance podinfo.test canary weight 30
|
||||
Normal Synced 1m flagger Advance podinfo.test canary weight 35
|
||||
Normal Synced 55s flagger Advance podinfo.test canary weight 40
|
||||
Normal Synced 45s flagger Advance podinfo.test canary weight 45
|
||||
Normal Synced 35s flagger Advance podinfo.test canary weight 50
|
||||
Normal Synced 25s flagger Copying podinfo.test template spec to podinfo-primary.test
|
||||
Warning Synced 15s flagger Waiting for podinfo-primary.test rollout to finish: 1 of 2 updated replicas are available
|
||||
Normal Synced 5s flagger Promotion completed! Scaling down podinfo.test
|
||||
```
|
||||
|
||||
You can monitor all canaries with:
|
||||
|
||||
```bash
|
||||
watch kubectl get canaries --all-namespaces
|
||||
|
||||
NAMESPACE NAME STATUS WEIGHT LASTTRANSITIONTIME
|
||||
test podinfo Progressing 5 2019-01-16T14:05:07Z
|
||||
```
|
||||
|
||||
During the canary analysis you can generate HTTP 500 errors and high latency to test if Flagger pauses the rollout.
|
||||
|
||||
Create a tester pod and exec into it:
|
||||
|
||||
```bash
|
||||
kubectl -n test run tester --image=quay.io/stefanprodan/podinfo:1.2.1 -- ./podinfo --port=9898
|
||||
kubectl -n test exec -it tester-xx-xx sh
|
||||
```
|
||||
|
||||
Generate HTTP 500 errors:
|
||||
|
||||
```bash
|
||||
watch curl http://podinfo-canary:9898/status/500
|
||||
```
|
||||
|
||||
Generate latency:
|
||||
|
||||
```bash
|
||||
watch curl http://podinfo-canary:9898/delay/1
|
||||
```
|
||||
|
||||
When the number of failed checks reaches the canary analysis threshold, the traffic is routed back to the primary,
|
||||
the canary is scaled to zero and the rollout is marked as failed.
|
||||
|
||||
```
|
||||
kubectl -n test describe canary/podinfo
|
||||
|
||||
Status:
|
||||
Canary Weight: 0
|
||||
Failed Checks: 10
|
||||
Last Transition Time: 2019-01-16T13:47:16Z
|
||||
Phase: Failed
|
||||
Events:
|
||||
Type Reason Age From Message
|
||||
---- ------ ---- ---- -------
|
||||
Normal Synced 3m flagger Starting canary deployment for podinfo.test
|
||||
Normal Synced 3m flagger Advance podinfo.test canary weight 5
|
||||
Normal Synced 3m flagger Advance podinfo.test canary weight 10
|
||||
Normal Synced 3m flagger Advance podinfo.test canary weight 15
|
||||
Normal Synced 3m flagger Halt podinfo.test advancement success rate 69.17% < 99%
|
||||
Normal Synced 2m flagger Halt podinfo.test advancement success rate 61.39% < 99%
|
||||
Normal Synced 2m flagger Halt podinfo.test advancement success rate 55.06% < 99%
|
||||
Normal Synced 2m flagger Halt podinfo.test advancement success rate 47.00% < 99%
|
||||
Normal Synced 2m flagger (combined from similar events): Halt podinfo.test advancement success rate 38.08% < 99%
|
||||
Warning Synced 1m flagger Rolling back podinfo.test failed checks threshold reached 10
|
||||
Warning Synced 1m flagger Canary failed! Scaling down podinfo.test
|
||||
```
|
||||
|
||||
### Monitoring
|
||||
|
||||
Flagger comes with a Grafana dashboard made for canary analysis.
|
||||
|
||||
Install Grafana with Helm:
|
||||
|
||||
```bash
|
||||
helm upgrade -i flagger-grafana flagger/grafana \
|
||||
--namespace=istio-system \
|
||||
--set url=http://prometheus.istio-system:9090
|
||||
```
|
||||
|
||||
The dashboard shows the RED and USE metrics for the primary and canary workloads:
|
||||
|
||||

|
||||
|
||||
The canary errors and latency spikes have been recorded as Kubernetes events and logged by Flagger in json format:
|
||||
|
||||
```
|
||||
kubectl -n istio-system logs deployment/flagger --tail=100 | jq .msg
|
||||
|
||||
Starting canary deployment for podinfo.test
|
||||
Advance podinfo.test canary weight 5
|
||||
Advance podinfo.test canary weight 10
|
||||
Advance podinfo.test canary weight 15
|
||||
Advance podinfo.test canary weight 20
|
||||
Advance podinfo.test canary weight 25
|
||||
Advance podinfo.test canary weight 30
|
||||
Advance podinfo.test canary weight 35
|
||||
Halt podinfo.test advancement success rate 98.69% < 99%
|
||||
Advance podinfo.test canary weight 40
|
||||
Halt podinfo.test advancement request duration 1.515s > 500ms
|
||||
Advance podinfo.test canary weight 45
|
||||
Advance podinfo.test canary weight 50
|
||||
Copying podinfo.test template spec to podinfo-primary.test
|
||||
Halt podinfo-primary.test advancement waiting for rollout to finish: 1 old replicas are pending termination
|
||||
Scaling down podinfo.test
|
||||
Promotion completed! podinfo.test
|
||||
```
|
||||
|
||||
Flagger exposes Prometheus metrics that can be used to determine the canary analysis status and the destination weight values:
|
||||
|
||||
```bash
|
||||
# Canaries total gauge
|
||||
flagger_canary_total{namespace="test"} 1
|
||||
|
||||
# Canary promotion last known status gauge
|
||||
# 0 - running, 1 - successful, 2 - failed
|
||||
flagger_canary_status{name="podinfo" namespace="test"} 1
|
||||
|
||||
# Canary traffic weight gauge
|
||||
flagger_canary_weight{workload="podinfo-primary" namespace="test"} 95
|
||||
flagger_canary_weight{workload="podinfo" namespace="test"} 5
|
||||
|
||||
# Seconds spent performing canary analysis histogram
|
||||
flagger_canary_duration_seconds_bucket{name="podinfo",namespace="test",le="10"} 6
|
||||
flagger_canary_duration_seconds_bucket{name="podinfo",namespace="test",le="+Inf"} 6
|
||||
flagger_canary_duration_seconds_sum{name="podinfo",namespace="test"} 17.3561329
|
||||
flagger_canary_duration_seconds_count{name="podinfo",namespace="test"} 6
|
||||
```
|
||||
|
||||
### Alerting
|
||||
|
||||
Flagger can be configured to send Slack notifications:
|
||||
|
||||
```bash
|
||||
helm upgrade -i flagger flagger/flagger \
|
||||
--namespace=istio-system \
|
||||
--set slack.url=https://hooks.slack.com/services/YOUR/SLACK/WEBHOOK \
|
||||
--set slack.channel=general \
|
||||
--set slack.user=flagger
|
||||
```
|
||||
|
||||
Once configured with a Slack incoming webhook, Flagger will post messages when a canary deployment has been initialized,
|
||||
when a new revision has been detected and if the canary analysis failed or succeeded.
|
||||
|
||||

|
||||
|
||||
A canary deployment will be rolled back if the progress deadline exceeded or if the analysis
|
||||
reached the maximum number of failed checks:
|
||||
|
||||

|
||||
|
||||
Besides Slack, you can use Alertmanager to trigger alerts when a canary deployment failed:
|
||||
|
||||
```yaml
|
||||
- alert: canary_rollback
|
||||
expr: flagger_canary_status > 1
|
||||
for: 1m
|
||||
labels:
|
||||
severity: warning
|
||||
annotations:
|
||||
summary: "Canary failed"
|
||||
description: "Workload {{ $labels.name }} namespace {{ $labels.namespace }}"
|
||||
```
|
||||
|
||||
### Roadmap
|
||||
|
||||
* Extend the validation mechanism to support other metrics than HTTP success rate and latency
|
||||
* Add support for comparing the canary metrics to the primary ones and do the validation based on the derivation between the two
|
||||
* Extend the canary analysis and promotion to other types than Kubernetes deployments such as Flux Helm releases or OpenFaaS functions
|
||||
|
||||
### Contributing
|
||||
|
||||
Flagger is Apache 2.0 licensed and accepts contributions via GitHub pull requests.
|
||||
|
||||
When submitting bug reports please include as much details as possible:
|
||||
* which Flagger version
|
||||
* which Flagger CRD version
|
||||
* which Kubernetes/Istio version
|
||||
* what configuration (canary, virtual service and workloads definitions)
|
||||
* what happened (Flagger, Istio Pilot and Proxy logs)
|
||||
|
||||
@@ -1,58 +0,0 @@
|
||||
apiVersion: flagger.app/v1alpha3
|
||||
kind: Canary
|
||||
metadata:
|
||||
name: podinfo
|
||||
namespace: test
|
||||
spec:
|
||||
# deployment reference
|
||||
targetRef:
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
name: podinfo
|
||||
# the maximum time in seconds for the canary deployment
|
||||
# to make progress before it is rollback (default 600s)
|
||||
progressDeadlineSeconds: 60
|
||||
# HPA reference (optional)
|
||||
autoscalerRef:
|
||||
apiVersion: autoscaling/v2beta1
|
||||
kind: HorizontalPodAutoscaler
|
||||
name: podinfo
|
||||
service:
|
||||
# container port
|
||||
port: 9898
|
||||
# Istio gateways (optional)
|
||||
gateways:
|
||||
- public-gateway.istio-system.svc.cluster.local
|
||||
# Istio virtual service host names (optional)
|
||||
hosts:
|
||||
- app.iowa.weavedx.com
|
||||
canaryAnalysis:
|
||||
# schedule interval (default 60s)
|
||||
interval: 10s
|
||||
# max number of failed metric checks before rollback
|
||||
threshold: 10
|
||||
# max traffic percentage routed to canary
|
||||
# percentage (0-100)
|
||||
maxWeight: 50
|
||||
# canary increment step
|
||||
# percentage (0-100)
|
||||
stepWeight: 5
|
||||
# Istio Prometheus checks
|
||||
metrics:
|
||||
- name: istio_requests_total
|
||||
# minimum req success rate (non 5xx responses)
|
||||
# percentage (0-100)
|
||||
threshold: 99
|
||||
interval: 1m
|
||||
- name: istio_request_duration_seconds_bucket
|
||||
# maximum req duration P99
|
||||
# milliseconds
|
||||
threshold: 500
|
||||
interval: 30s
|
||||
# external checks (optional)
|
||||
webhooks:
|
||||
- name: load-test
|
||||
url: http://flagger-loadtester.test/
|
||||
timeout: 5s
|
||||
metadata:
|
||||
cmd: "hey -z 1m -q 10 -c 2 http://podinfo.test:9898/"
|
||||
@@ -1,67 +0,0 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: podinfo
|
||||
namespace: test
|
||||
labels:
|
||||
app: podinfo
|
||||
spec:
|
||||
minReadySeconds: 5
|
||||
revisionHistoryLimit: 5
|
||||
progressDeadlineSeconds: 60
|
||||
strategy:
|
||||
rollingUpdate:
|
||||
maxUnavailable: 0
|
||||
type: RollingUpdate
|
||||
selector:
|
||||
matchLabels:
|
||||
app: podinfo
|
||||
template:
|
||||
metadata:
|
||||
annotations:
|
||||
prometheus.io/scrape: "true"
|
||||
labels:
|
||||
app: podinfo
|
||||
spec:
|
||||
containers:
|
||||
- name: podinfod
|
||||
image: quay.io/stefanprodan/podinfo:1.3.0
|
||||
imagePullPolicy: IfNotPresent
|
||||
ports:
|
||||
- containerPort: 9898
|
||||
name: http
|
||||
protocol: TCP
|
||||
command:
|
||||
- ./podinfo
|
||||
- --port=9898
|
||||
- --level=info
|
||||
- --random-delay=false
|
||||
- --random-error=false
|
||||
env:
|
||||
- name: PODINFO_UI_COLOR
|
||||
value: blue
|
||||
livenessProbe:
|
||||
exec:
|
||||
command:
|
||||
- podcli
|
||||
- check
|
||||
- http
|
||||
- localhost:9898/healthz
|
||||
initialDelaySeconds: 5
|
||||
timeoutSeconds: 5
|
||||
readinessProbe:
|
||||
exec:
|
||||
command:
|
||||
- podcli
|
||||
- check
|
||||
- http
|
||||
- localhost:9898/readyz
|
||||
initialDelaySeconds: 5
|
||||
timeoutSeconds: 5
|
||||
resources:
|
||||
limits:
|
||||
cpu: 2000m
|
||||
memory: 512Mi
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 64Mi
|
||||
@@ -1,19 +0,0 @@
|
||||
apiVersion: autoscaling/v2beta1
|
||||
kind: HorizontalPodAutoscaler
|
||||
metadata:
|
||||
name: podinfo
|
||||
namespace: test
|
||||
spec:
|
||||
scaleTargetRef:
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
name: podinfo
|
||||
minReplicas: 2
|
||||
maxReplicas: 4
|
||||
metrics:
|
||||
- type: Resource
|
||||
resource:
|
||||
name: cpu
|
||||
# scale up if usage is above
|
||||
# 99% of the requested CPU (100m)
|
||||
targetAverageUtilization: 99
|
||||
@@ -1,35 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: flagger
|
||||
namespace: istio-system
|
||||
labels:
|
||||
app: flagger
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: flagger
|
||||
labels:
|
||||
app: flagger
|
||||
rules:
|
||||
- apiGroups: ['*']
|
||||
resources: ['*']
|
||||
verbs: ['*']
|
||||
- nonResourceURLs: ['*']
|
||||
verbs: ['*']
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: flagger
|
||||
labels:
|
||||
app: flagger
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: flagger
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: flagger
|
||||
namespace: istio-system
|
||||
@@ -1,115 +0,0 @@
|
||||
apiVersion: apiextensions.k8s.io/v1beta1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: canaries.flagger.app
|
||||
spec:
|
||||
group: flagger.app
|
||||
version: v1alpha3
|
||||
versions:
|
||||
- name: v1alpha3
|
||||
served: true
|
||||
storage: true
|
||||
- name: v1alpha2
|
||||
served: true
|
||||
storage: false
|
||||
- name: v1alpha1
|
||||
served: true
|
||||
storage: false
|
||||
names:
|
||||
plural: canaries
|
||||
singular: canary
|
||||
kind: Canary
|
||||
categories:
|
||||
- all
|
||||
scope: Namespaced
|
||||
subresources:
|
||||
status: {}
|
||||
additionalPrinterColumns:
|
||||
- name: Status
|
||||
type: string
|
||||
JSONPath: .status.phase
|
||||
- name: Weight
|
||||
type: string
|
||||
JSONPath: .status.canaryWeight
|
||||
- name: LastTransitionTime
|
||||
type: string
|
||||
JSONPath: .status.lastTransitionTime
|
||||
validation:
|
||||
openAPIV3Schema:
|
||||
properties:
|
||||
spec:
|
||||
required:
|
||||
- targetRef
|
||||
- service
|
||||
- canaryAnalysis
|
||||
properties:
|
||||
progressDeadlineSeconds:
|
||||
type: number
|
||||
targetRef:
|
||||
type: object
|
||||
required: ['apiVersion', 'kind', 'name']
|
||||
properties:
|
||||
apiVersion:
|
||||
type: string
|
||||
kind:
|
||||
type: string
|
||||
name:
|
||||
type: string
|
||||
autoscalerRef:
|
||||
anyOf:
|
||||
- type: string
|
||||
- type: object
|
||||
required: ['apiVersion', 'kind', 'name']
|
||||
properties:
|
||||
apiVersion:
|
||||
type: string
|
||||
kind:
|
||||
type: string
|
||||
name:
|
||||
type: string
|
||||
service:
|
||||
type: object
|
||||
required: ['port']
|
||||
properties:
|
||||
port:
|
||||
type: number
|
||||
canaryAnalysis:
|
||||
properties:
|
||||
interval:
|
||||
type: string
|
||||
pattern: "^[0-9]+(m|s)"
|
||||
threshold:
|
||||
type: number
|
||||
maxWeight:
|
||||
type: number
|
||||
stepWeight:
|
||||
type: number
|
||||
metrics:
|
||||
type: array
|
||||
properties:
|
||||
items:
|
||||
type: object
|
||||
required: ['name', 'interval', 'threshold']
|
||||
properties:
|
||||
name:
|
||||
type: string
|
||||
interval:
|
||||
type: string
|
||||
pattern: "^[0-9]+(m|s)"
|
||||
threshold:
|
||||
type: number
|
||||
webhooks:
|
||||
type: array
|
||||
properties:
|
||||
items:
|
||||
type: object
|
||||
required: ['name', 'url', 'timeout']
|
||||
properties:
|
||||
name:
|
||||
type: string
|
||||
url:
|
||||
type: string
|
||||
format: url
|
||||
timeout:
|
||||
type: string
|
||||
pattern: "^[0-9]+(m|s)"
|
||||
@@ -1,64 +0,0 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: flagger
|
||||
namespace: istio-system
|
||||
labels:
|
||||
app: flagger
|
||||
spec:
|
||||
replicas: 1
|
||||
strategy:
|
||||
type: Recreate
|
||||
selector:
|
||||
matchLabels:
|
||||
app: flagger
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: flagger
|
||||
annotations:
|
||||
prometheus.io/scrape: "true"
|
||||
spec:
|
||||
serviceAccountName: flagger
|
||||
containers:
|
||||
- name: flagger
|
||||
image: quay.io/stefanprodan/flagger:0.4.1
|
||||
imagePullPolicy: Always
|
||||
ports:
|
||||
- name: http
|
||||
containerPort: 8080
|
||||
command:
|
||||
- ./flagger
|
||||
- -log-level=info
|
||||
- -control-loop-interval=10s
|
||||
- -metrics-server=http://prometheus.istio-system.svc.cluster.local:9090
|
||||
livenessProbe:
|
||||
exec:
|
||||
command:
|
||||
- wget
|
||||
- --quiet
|
||||
- --tries=1
|
||||
- --timeout=2
|
||||
- --spider
|
||||
- http://localhost:8080/healthz
|
||||
timeoutSeconds: 5
|
||||
readinessProbe:
|
||||
exec:
|
||||
command:
|
||||
- wget
|
||||
- --quiet
|
||||
- --tries=1
|
||||
- --timeout=2
|
||||
- --spider
|
||||
- http://localhost:8080/healthz
|
||||
timeoutSeconds: 5
|
||||
resources:
|
||||
limits:
|
||||
memory: "512Mi"
|
||||
cpu: "1000m"
|
||||
requests:
|
||||
memory: "32Mi"
|
||||
cpu: "10m"
|
||||
securityContext:
|
||||
readOnlyRootFilesystem: true
|
||||
runAsUser: 10001
|
||||
@@ -1,60 +0,0 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: flagger-loadtester
|
||||
labels:
|
||||
app: flagger-loadtester
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: flagger-loadtester
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: flagger-loadtester
|
||||
annotations:
|
||||
prometheus.io/scrape: "true"
|
||||
spec:
|
||||
containers:
|
||||
- name: loadtester
|
||||
image: quay.io/stefanprodan/flagger-loadtester:0.1.0
|
||||
imagePullPolicy: IfNotPresent
|
||||
ports:
|
||||
- name: http
|
||||
containerPort: 8080
|
||||
command:
|
||||
- ./loadtester
|
||||
- -port=8080
|
||||
- -log-level=info
|
||||
- -timeout=1h
|
||||
- -log-cmd-output=true
|
||||
livenessProbe:
|
||||
exec:
|
||||
command:
|
||||
- wget
|
||||
- --quiet
|
||||
- --tries=1
|
||||
- --timeout=4
|
||||
- --spider
|
||||
- http://localhost:8080/healthz
|
||||
timeoutSeconds: 5
|
||||
readinessProbe:
|
||||
exec:
|
||||
command:
|
||||
- wget
|
||||
- --quiet
|
||||
- --tries=1
|
||||
- --timeout=4
|
||||
- --spider
|
||||
- http://localhost:8080/healthz
|
||||
timeoutSeconds: 5
|
||||
resources:
|
||||
limits:
|
||||
memory: "512Mi"
|
||||
cpu: "1000m"
|
||||
requests:
|
||||
memory: "32Mi"
|
||||
cpu: "10m"
|
||||
securityContext:
|
||||
readOnlyRootFilesystem: true
|
||||
runAsUser: 10001
|
||||
@@ -1,15 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: flagger-loadtester
|
||||
labels:
|
||||
app: flagger-loadtester
|
||||
spec:
|
||||
type: ClusterIP
|
||||
selector:
|
||||
app: flagger-loadtester
|
||||
ports:
|
||||
- name: http
|
||||
port: 80
|
||||
protocol: TCP
|
||||
targetPort: http
|
||||
@@ -1,6 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: test
|
||||
labels:
|
||||
istio-injection: enabled
|
||||
@@ -1,34 +0,0 @@
|
||||
apiVersion: networking.istio.io/v1alpha3
|
||||
kind: VirtualService
|
||||
metadata:
|
||||
name: podinfo
|
||||
namespace: test
|
||||
spec:
|
||||
gateways:
|
||||
- public-gateway.istio-system.svc.cluster.local
|
||||
- mesh
|
||||
hosts:
|
||||
- podinfo.iowa.weavedx.com
|
||||
- podinfo
|
||||
http:
|
||||
- match:
|
||||
- headers:
|
||||
user-agent:
|
||||
regex: ^(?!.*Chrome)(?=.*\bSafari\b).*$
|
||||
route:
|
||||
- destination:
|
||||
host: podinfo-primary
|
||||
port:
|
||||
number: 9898
|
||||
weight: 0
|
||||
- destination:
|
||||
host: podinfo
|
||||
port:
|
||||
number: 9898
|
||||
weight: 100
|
||||
- route:
|
||||
- destination:
|
||||
host: podinfo-primary
|
||||
port:
|
||||
number: 9898
|
||||
weight: 100
|
||||
@@ -1,25 +0,0 @@
|
||||
apiVersion: networking.istio.io/v1alpha3
|
||||
kind: VirtualService
|
||||
metadata:
|
||||
name: podinfo
|
||||
namespace: test
|
||||
labels:
|
||||
app: podinfo
|
||||
spec:
|
||||
gateways:
|
||||
- public-gateway.istio-system.svc.cluster.local
|
||||
- mesh
|
||||
hosts:
|
||||
- podinfo.iowa.weavedx.com
|
||||
- podinfo
|
||||
http:
|
||||
- route:
|
||||
- destination:
|
||||
host: podinfo-primary
|
||||
port:
|
||||
number: 9898
|
||||
weight: 100
|
||||
mirror:
|
||||
host: podinfo
|
||||
port:
|
||||
number: 9898
|
||||
@@ -1,26 +0,0 @@
|
||||
apiVersion: networking.istio.io/v1alpha3
|
||||
kind: VirtualService
|
||||
metadata:
|
||||
name: podinfo
|
||||
namespace: test
|
||||
labels:
|
||||
app: podinfo
|
||||
spec:
|
||||
gateways:
|
||||
- public-gateway.istio-system.svc.cluster.local
|
||||
- mesh
|
||||
hosts:
|
||||
- podinfo.iowa.weavedx.com
|
||||
- podinfo
|
||||
http:
|
||||
- route:
|
||||
- destination:
|
||||
host: podinfo-primary
|
||||
port:
|
||||
number: 9898
|
||||
weight: 100
|
||||
- destination:
|
||||
host: podinfo
|
||||
port:
|
||||
number: 9898
|
||||
weight: 0
|
||||
@@ -1,75 +0,0 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: podinfo
|
||||
namespace: test
|
||||
labels:
|
||||
app: podinfo
|
||||
spec:
|
||||
replicas: 1
|
||||
strategy:
|
||||
rollingUpdate:
|
||||
maxUnavailable: 0
|
||||
type: RollingUpdate
|
||||
selector:
|
||||
matchLabels:
|
||||
app: podinfo
|
||||
template:
|
||||
metadata:
|
||||
annotations:
|
||||
prometheus.io/scrape: "true"
|
||||
labels:
|
||||
app: podinfo
|
||||
spec:
|
||||
containers:
|
||||
- name: podinfod
|
||||
image: quay.io/stefanprodan/podinfo:1.2.0
|
||||
imagePullPolicy: IfNotPresent
|
||||
ports:
|
||||
- containerPort: 9898
|
||||
name: http
|
||||
protocol: TCP
|
||||
command:
|
||||
- ./podinfo
|
||||
- --port=9898
|
||||
- --level=info
|
||||
- --random-delay=false
|
||||
- --random-error=false
|
||||
env:
|
||||
- name: PODINFO_UI_COLOR
|
||||
value: green
|
||||
livenessProbe:
|
||||
exec:
|
||||
command:
|
||||
- podcli
|
||||
- check
|
||||
- http
|
||||
- localhost:9898/healthz
|
||||
failureThreshold: 3
|
||||
periodSeconds: 10
|
||||
successThreshold: 1
|
||||
timeoutSeconds: 2
|
||||
readinessProbe:
|
||||
exec:
|
||||
command:
|
||||
- podcli
|
||||
- check
|
||||
- http
|
||||
- localhost:9898/readyz
|
||||
failureThreshold: 3
|
||||
periodSeconds: 3
|
||||
successThreshold: 1
|
||||
timeoutSeconds: 2
|
||||
resources:
|
||||
limits:
|
||||
cpu: 1000m
|
||||
memory: 256Mi
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 16Mi
|
||||
volumeMounts:
|
||||
- mountPath: /data
|
||||
name: data
|
||||
volumes:
|
||||
- emptyDir: {}
|
||||
name: data
|
||||
@@ -1,23 +0,0 @@
|
||||
apiVersion: autoscaling/v2beta1
|
||||
kind: HorizontalPodAutoscaler
|
||||
metadata:
|
||||
name: podinfo
|
||||
namespace: test
|
||||
spec:
|
||||
scaleTargetRef:
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
name: podinfo
|
||||
minReplicas: 2
|
||||
maxReplicas: 3
|
||||
metrics:
|
||||
- type: Resource
|
||||
resource:
|
||||
name: cpu
|
||||
# scale up if usage is above
|
||||
# 99% of the requested CPU (100m)
|
||||
targetAverageUtilization: 99
|
||||
- type: Resource
|
||||
resource:
|
||||
name: memory
|
||||
targetAverageValue: 200Mi
|
||||
@@ -1,16 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: podinfo
|
||||
namespace: test
|
||||
labels:
|
||||
app: podinfo
|
||||
spec:
|
||||
type: ClusterIP
|
||||
selector:
|
||||
app: podinfo
|
||||
ports:
|
||||
- name: http
|
||||
port: 9898
|
||||
protocol: TCP
|
||||
targetPort: http
|
||||
@@ -1,71 +0,0 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: podinfo-primary
|
||||
namespace: test
|
||||
labels:
|
||||
app: podinfo-primary
|
||||
spec:
|
||||
replicas: 1
|
||||
strategy:
|
||||
rollingUpdate:
|
||||
maxUnavailable: 0
|
||||
type: RollingUpdate
|
||||
selector:
|
||||
matchLabels:
|
||||
app: podinfo-primary
|
||||
template:
|
||||
metadata:
|
||||
annotations:
|
||||
prometheus.io/scrape: "true"
|
||||
labels:
|
||||
app: podinfo-primary
|
||||
spec:
|
||||
containers:
|
||||
- name: podinfod
|
||||
image: quay.io/stefanprodan/podinfo:1.1.1
|
||||
imagePullPolicy: IfNotPresent
|
||||
ports:
|
||||
- containerPort: 9898
|
||||
name: http
|
||||
protocol: TCP
|
||||
command:
|
||||
- ./podinfo
|
||||
- --port=9898
|
||||
- --level=info
|
||||
- --random-delay=false
|
||||
- --random-error=false
|
||||
env:
|
||||
- name: PODINFO_UI_COLOR
|
||||
value: blue
|
||||
livenessProbe:
|
||||
exec:
|
||||
command:
|
||||
- podcli
|
||||
- check
|
||||
- http
|
||||
- localhost:9898/healthz
|
||||
initialDelaySeconds: 5
|
||||
failureThreshold: 3
|
||||
periodSeconds: 10
|
||||
successThreshold: 1
|
||||
timeoutSeconds: 1
|
||||
readinessProbe:
|
||||
exec:
|
||||
command:
|
||||
- podcli
|
||||
- check
|
||||
- http
|
||||
- localhost:9898/readyz
|
||||
initialDelaySeconds: 5
|
||||
failureThreshold: 3
|
||||
periodSeconds: 3
|
||||
successThreshold: 1
|
||||
timeoutSeconds: 1
|
||||
resources:
|
||||
limits:
|
||||
cpu: 2000m
|
||||
memory: 512Mi
|
||||
requests:
|
||||
cpu: 10m
|
||||
memory: 64Mi
|
||||
@@ -1,19 +0,0 @@
|
||||
apiVersion: autoscaling/v2beta1
|
||||
kind: HorizontalPodAutoscaler
|
||||
metadata:
|
||||
name: podinfo-primary
|
||||
namespace: test
|
||||
spec:
|
||||
scaleTargetRef:
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
name: podinfo-primary
|
||||
minReplicas: 2
|
||||
maxReplicas: 4
|
||||
metrics:
|
||||
- type: Resource
|
||||
resource:
|
||||
name: cpu
|
||||
# scale up if usage is above
|
||||
# 99% of the requested CPU (100m)
|
||||
targetAverageUtilization: 99
|
||||
@@ -1,16 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: podinfo-primary
|
||||
namespace: test
|
||||
labels:
|
||||
app: podinfo-primary
|
||||
spec:
|
||||
type: ClusterIP
|
||||
selector:
|
||||
app: podinfo-primary
|
||||
ports:
|
||||
- name: http
|
||||
port: 9898
|
||||
protocol: TCP
|
||||
targetPort: http
|
||||
@@ -1,30 +0,0 @@
|
||||
apiVersion: networking.istio.io/v1alpha3
|
||||
kind: VirtualService
|
||||
metadata:
|
||||
name: podinfo
|
||||
namespace: test
|
||||
labels:
|
||||
app: podinfo
|
||||
spec:
|
||||
gateways:
|
||||
- public-gateway.istio-system.svc.cluster.local
|
||||
- mesh
|
||||
hosts:
|
||||
- podinfo.istio.weavedx.com
|
||||
- podinfo
|
||||
http:
|
||||
- route:
|
||||
- destination:
|
||||
host: podinfo-primary
|
||||
port:
|
||||
number: 9898
|
||||
weight: 100
|
||||
- destination:
|
||||
host: podinfo
|
||||
port:
|
||||
number: 9898
|
||||
weight: 0
|
||||
timeout: 10s
|
||||
retries:
|
||||
attempts: 3
|
||||
perTryTimeout: 2s
|
||||
@@ -1,21 +0,0 @@
|
||||
# Patterns to ignore when building packages.
|
||||
# This supports shell glob matching, relative path matching, and
|
||||
# negation (prefixed with !). Only one pattern per line.
|
||||
.DS_Store
|
||||
# Common VCS dirs
|
||||
.git/
|
||||
.gitignore
|
||||
.bzr/
|
||||
.bzrignore
|
||||
.hg/
|
||||
.hgignore
|
||||
.svn/
|
||||
# Common backup files
|
||||
*.swp
|
||||
*.bak
|
||||
*.tmp
|
||||
*~
|
||||
# Various IDEs
|
||||
.project
|
||||
.idea/
|
||||
*.tmproj
|
||||
@@ -1,19 +0,0 @@
|
||||
apiVersion: v1
|
||||
name: flagger
|
||||
version: 0.4.1
|
||||
appVersion: 0.4.1
|
||||
kubeVersion: ">=1.11.0-0"
|
||||
engine: gotpl
|
||||
description: Flagger is a Kubernetes operator that automates the promotion of canary deployments using Istio routing for traffic shifting and Prometheus metrics for canary analysis.
|
||||
home: https://docs.flagger.app
|
||||
icon: https://raw.githubusercontent.com/stefanprodan/flagger/master/docs/logo/flagger-icon.png
|
||||
sources:
|
||||
- https://github.com/stefanprodan/flagger
|
||||
maintainers:
|
||||
- name: stefanprodan
|
||||
url: https://github.com/stefanprodan
|
||||
email: stefanprodan@users.noreply.github.com
|
||||
keywords:
|
||||
- canary
|
||||
- istio
|
||||
- gitops
|
||||
@@ -1,84 +0,0 @@
|
||||
# Flagger
|
||||
|
||||
[Flagger](https://github.com/stefanprodan/flagger) is a Kubernetes operator that automates the promotion of
|
||||
canary deployments using Istio routing for traffic shifting and Prometheus metrics for canary analysis.
|
||||
Flagger implements a control loop that gradually shifts traffic to the canary while measuring key performance indicators
|
||||
like HTTP requests success rate, requests average duration and pods health.
|
||||
Based on the KPIs analysis a canary is promoted or aborted and the analysis result is published to Slack.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
* Kubernetes >= 1.11
|
||||
* Istio >= 1.0
|
||||
* Prometheus >= 2.6
|
||||
|
||||
## Installing the Chart
|
||||
|
||||
Add Flagger Helm repository:
|
||||
|
||||
```console
|
||||
helm repo add flagger https://flagger.app
|
||||
```
|
||||
|
||||
To install the chart with the release name `flagger`:
|
||||
|
||||
```console
|
||||
$ helm install --name flagger --namespace istio-system flagger/flagger
|
||||
```
|
||||
|
||||
The command deploys Flagger on the Kubernetes cluster in the istio-system namespace.
|
||||
The [configuration](#configuration) section lists the parameters that can be configured during installation.
|
||||
|
||||
## Uninstalling the Chart
|
||||
|
||||
To uninstall/delete the `flagger` deployment:
|
||||
|
||||
```console
|
||||
$ helm delete --purge flagger
|
||||
```
|
||||
|
||||
The command removes all the Kubernetes components associated with the chart and deletes the release.
|
||||
|
||||
## Configuration
|
||||
|
||||
The following tables lists the configurable parameters of the Flagger chart and their default values.
|
||||
|
||||
Parameter | Description | Default
|
||||
--- | --- | ---
|
||||
`image.repository` | image repository | `quay.io/stefanprodan/flagger`
|
||||
`image.tag` | image tag | `<VERSION>`
|
||||
`image.pullPolicy` | image pull policy | `IfNotPresent`
|
||||
`metricsServer` | Prometheus URL | `http://prometheus.istio-system:9090`
|
||||
`slack.url` | Slack incoming webhook | None
|
||||
`slack.channel` | Slack channel | None
|
||||
`slack.user` | Slack username | `flagger`
|
||||
`rbac.create` | if `true`, create and use RBAC resources | `true`
|
||||
`crd.create` | if `true`, create Flagger's CRDs | `true`
|
||||
`resources.requests/cpu` | pod CPU request | `10m`
|
||||
`resources.requests/memory` | pod memory request | `32Mi`
|
||||
`resources.limits/cpu` | pod CPU limit | `1000m`
|
||||
`resources.limits/memory` | pod memory limit | `512Mi`
|
||||
`affinity` | node/pod affinities | None
|
||||
`nodeSelector` | node labels for pod assignment | `{}`
|
||||
`tolerations` | list of node taints to tolerate | `[]`
|
||||
|
||||
Specify each parameter using the `--set key=value[,key=value]` argument to `helm upgrade`. For example,
|
||||
|
||||
```console
|
||||
$ helm upgrade -i flagger flagger/flagger \
|
||||
--namespace istio-system \
|
||||
--set slack.url=https://hooks.slack.com/services/YOUR/SLACK/WEBHOOK \
|
||||
--set slack.channel=general
|
||||
```
|
||||
|
||||
Alternatively, a YAML file that specifies the values for the above parameters can be provided while installing the chart. For example,
|
||||
|
||||
```console
|
||||
$ helm upgrade -i flagger flagger/flagger \
|
||||
--namespace istio-system \
|
||||
-f values.yaml
|
||||
```
|
||||
|
||||
> **Tip**: You can use the default [values.yaml](values.yaml)
|
||||
|
||||
|
||||
@@ -1 +0,0 @@
|
||||
Flagger installed
|
||||
@@ -1,42 +0,0 @@
|
||||
{{/*
|
||||
Create chart name and version as used by the chart label.
|
||||
*/}}
|
||||
{{- define "flagger.chart" -}}
|
||||
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Expand the name of the chart.
|
||||
*/}}
|
||||
{{- define "flagger.name" -}}
|
||||
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Create a default fully qualified app name.
|
||||
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
|
||||
If release name contains chart name it will be used as a full name.
|
||||
*/}}
|
||||
{{- define "flagger.fullname" -}}
|
||||
{{- if .Values.fullnameOverride -}}
|
||||
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
|
||||
{{- else -}}
|
||||
{{- $name := default .Chart.Name .Values.nameOverride -}}
|
||||
{{- if contains $name .Release.Name -}}
|
||||
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
|
||||
{{- else -}}
|
||||
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Create the name of the service account to use
|
||||
*/}}
|
||||
{{- define "flagger.serviceAccountName" -}}
|
||||
{{- if .Values.serviceAccount.create -}}
|
||||
{{ default (include "flagger.fullname" .) .Values.serviceAccount.name }}
|
||||
{{- else -}}
|
||||
{{ default "default" .Values.serviceAccount.name }}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
@@ -1,11 +0,0 @@
|
||||
{{- if .Values.serviceAccount.create }}
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: {{ template "flagger.serviceAccountName" . }}
|
||||
labels:
|
||||
helm.sh/chart: {{ template "flagger.chart" . }}
|
||||
app.kubernetes.io/name: {{ template "flagger.name" . }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
{{- end }}
|
||||
@@ -1,117 +0,0 @@
|
||||
{{- if .Values.crd.create }}
|
||||
apiVersion: apiextensions.k8s.io/v1beta1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: canaries.flagger.app
|
||||
spec:
|
||||
group: flagger.app
|
||||
version: v1alpha3
|
||||
versions:
|
||||
- name: v1alpha3
|
||||
served: true
|
||||
storage: true
|
||||
- name: v1alpha2
|
||||
served: true
|
||||
storage: false
|
||||
- name: v1alpha1
|
||||
served: true
|
||||
storage: false
|
||||
names:
|
||||
plural: canaries
|
||||
singular: canary
|
||||
kind: Canary
|
||||
categories:
|
||||
- all
|
||||
scope: Namespaced
|
||||
subresources:
|
||||
status: {}
|
||||
additionalPrinterColumns:
|
||||
- name: Status
|
||||
type: string
|
||||
JSONPath: .status.phase
|
||||
- name: Weight
|
||||
type: string
|
||||
JSONPath: .status.canaryWeight
|
||||
- name: LastTransitionTime
|
||||
type: string
|
||||
JSONPath: .status.lastTransitionTime
|
||||
validation:
|
||||
openAPIV3Schema:
|
||||
properties:
|
||||
spec:
|
||||
required:
|
||||
- targetRef
|
||||
- service
|
||||
- canaryAnalysis
|
||||
properties:
|
||||
progressDeadlineSeconds:
|
||||
type: number
|
||||
targetRef:
|
||||
type: object
|
||||
required: ['apiVersion', 'kind', 'name']
|
||||
properties:
|
||||
apiVersion:
|
||||
type: string
|
||||
kind:
|
||||
type: string
|
||||
name:
|
||||
type: string
|
||||
autoscalerRef:
|
||||
anyOf:
|
||||
- type: string
|
||||
- type: object
|
||||
required: ['apiVersion', 'kind', 'name']
|
||||
properties:
|
||||
apiVersion:
|
||||
type: string
|
||||
kind:
|
||||
type: string
|
||||
name:
|
||||
type: string
|
||||
service:
|
||||
type: object
|
||||
required: ['port']
|
||||
properties:
|
||||
port:
|
||||
type: number
|
||||
canaryAnalysis:
|
||||
properties:
|
||||
interval:
|
||||
type: string
|
||||
pattern: "^[0-9]+(m|s)"
|
||||
threshold:
|
||||
type: number
|
||||
maxWeight:
|
||||
type: number
|
||||
stepWeight:
|
||||
type: number
|
||||
metrics:
|
||||
type: array
|
||||
properties:
|
||||
items:
|
||||
type: object
|
||||
required: ['name', 'interval', 'threshold']
|
||||
properties:
|
||||
name:
|
||||
type: string
|
||||
interval:
|
||||
type: string
|
||||
pattern: "^[0-9]+(m|s)"
|
||||
threshold:
|
||||
type: number
|
||||
webhooks:
|
||||
type: array
|
||||
properties:
|
||||
items:
|
||||
type: object
|
||||
required: ['name', 'url', 'timeout']
|
||||
properties:
|
||||
name:
|
||||
type: string
|
||||
url:
|
||||
type: string
|
||||
format: url
|
||||
timeout:
|
||||
type: string
|
||||
pattern: "^[0-9]+(m|s)"
|
||||
{{- end }}
|
||||
@@ -1,77 +0,0 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: {{ include "flagger.fullname" . }}
|
||||
labels:
|
||||
helm.sh/chart: {{ template "flagger.chart" . }}
|
||||
app.kubernetes.io/name: {{ template "flagger.name" . }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
spec:
|
||||
replicas: 1
|
||||
strategy:
|
||||
type: Recreate
|
||||
selector:
|
||||
matchLabels:
|
||||
app.kubernetes.io/name: {{ template "flagger.name" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/name: {{ template "flagger.name" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
spec:
|
||||
serviceAccountName: {{ template "flagger.serviceAccountName" . }}
|
||||
containers:
|
||||
- name: flagger
|
||||
securityContext:
|
||||
readOnlyRootFilesystem: true
|
||||
runAsUser: 10001
|
||||
image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
|
||||
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
||||
ports:
|
||||
- name: http
|
||||
containerPort: 8080
|
||||
command:
|
||||
- ./flagger
|
||||
- -log-level=info
|
||||
- -metrics-server={{ .Values.metricsServer }}
|
||||
{{- if .Values.slack.url }}
|
||||
- -slack-url={{ .Values.slack.url }}
|
||||
- -slack-user={{ .Values.slack.user }}
|
||||
- -slack-channel={{ .Values.slack.channel }}
|
||||
{{- end }}
|
||||
livenessProbe:
|
||||
exec:
|
||||
command:
|
||||
- wget
|
||||
- --quiet
|
||||
- --tries=1
|
||||
- --timeout=4
|
||||
- --spider
|
||||
- http://localhost:8080/healthz
|
||||
timeoutSeconds: 5
|
||||
readinessProbe:
|
||||
exec:
|
||||
command:
|
||||
- wget
|
||||
- --quiet
|
||||
- --tries=1
|
||||
- --timeout=4
|
||||
- --spider
|
||||
- http://localhost:8080/healthz
|
||||
timeoutSeconds: 5
|
||||
resources:
|
||||
{{ toYaml .Values.resources | indent 12 }}
|
||||
{{- with .Values.nodeSelector }}
|
||||
nodeSelector:
|
||||
{{ toYaml . | indent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.affinity }}
|
||||
affinity:
|
||||
{{ toYaml . | indent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.tolerations }}
|
||||
tolerations:
|
||||
{{ toYaml . | indent 8 }}
|
||||
{{- end }}
|
||||
@@ -1,35 +0,0 @@
|
||||
{{- if .Values.rbac.create }}
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: {{ template "flagger.fullname" . }}
|
||||
labels:
|
||||
helm.sh/chart: {{ template "flagger.chart" . }}
|
||||
app.kubernetes.io/name: {{ template "flagger.name" . }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
rules:
|
||||
- apiGroups: ['*']
|
||||
resources: ['*']
|
||||
verbs: ['*']
|
||||
- nonResourceURLs: ['*']
|
||||
verbs: ['*']
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: {{ template "flagger.fullname" . }}
|
||||
labels:
|
||||
helm.sh/chart: {{ template "flagger.chart" . }}
|
||||
app.kubernetes.io/name: {{ template "flagger.name" . }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: {{ template "flagger.fullname" . }}
|
||||
subjects:
|
||||
- name: {{ template "flagger.serviceAccountName" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
kind: ServiceAccount
|
||||
{{- end }}
|
||||
@@ -1,45 +0,0 @@
|
||||
# Default values for flagger.
|
||||
|
||||
image:
|
||||
repository: quay.io/stefanprodan/flagger
|
||||
tag: 0.4.1
|
||||
pullPolicy: IfNotPresent
|
||||
|
||||
metricsServer: "http://prometheus.istio-system.svc.cluster.local:9090"
|
||||
|
||||
slack:
|
||||
user: flagger
|
||||
channel:
|
||||
# incoming webhook https://api.slack.com/incoming-webhooks
|
||||
url:
|
||||
|
||||
serviceAccount:
|
||||
# serviceAccount.create: Whether to create a service account or not
|
||||
create: true
|
||||
# serviceAccount.name: The name of the service account to create or use
|
||||
name: ""
|
||||
|
||||
rbac:
|
||||
# rbac.create: `true` if rbac resources should be created
|
||||
create: true
|
||||
|
||||
crd:
|
||||
# crd.create: `true` if custom resource definitions should be created
|
||||
create: true
|
||||
|
||||
nameOverride: ""
|
||||
fullnameOverride: ""
|
||||
|
||||
resources:
|
||||
limits:
|
||||
memory: "512Mi"
|
||||
cpu: "1000m"
|
||||
requests:
|
||||
memory: "32Mi"
|
||||
cpu: "10m"
|
||||
|
||||
nodeSelector: {}
|
||||
|
||||
tolerations: []
|
||||
|
||||
affinity: {}
|
||||
@@ -1,21 +0,0 @@
|
||||
# Patterns to ignore when building packages.
|
||||
# This supports shell glob matching, relative path matching, and
|
||||
# negation (prefixed with !). Only one pattern per line.
|
||||
.DS_Store
|
||||
# Common VCS dirs
|
||||
.git/
|
||||
.gitignore
|
||||
.bzr/
|
||||
.bzrignore
|
||||
.hg/
|
||||
.hgignore
|
||||
.svn/
|
||||
# Common backup files
|
||||
*.swp
|
||||
*.bak
|
||||
*.tmp
|
||||
*~
|
||||
# Various IDEs
|
||||
.project
|
||||
.idea/
|
||||
*.tmproj
|
||||
@@ -1,13 +0,0 @@
|
||||
apiVersion: v1
|
||||
name: grafana
|
||||
version: 0.1.0
|
||||
appVersion: 5.4.2
|
||||
description: Grafana dashboards for monitoring Flagger canary deployments
|
||||
icon: https://raw.githubusercontent.com/stefanprodan/flagger/master/docs/logo/flagger-icon.png
|
||||
home: https://flagger.app
|
||||
sources:
|
||||
- https://github.com/stefanprodan/flagger
|
||||
maintainers:
|
||||
- name: stefanprodan
|
||||
url: https://github.com/stefanprodan
|
||||
email: stefanprodan@users.noreply.github.com
|
||||
@@ -1,79 +0,0 @@
|
||||
# Flagger Grafana
|
||||
|
||||
Grafana dashboards for monitoring progressive deployments powered by Istio, Prometheus and Flagger.
|
||||
|
||||

|
||||
|
||||
## Prerequisites
|
||||
|
||||
* Kubernetes >= 1.11
|
||||
* Istio >= 1.0
|
||||
* Prometheus >= 2.6
|
||||
|
||||
## Installing the Chart
|
||||
|
||||
Add Flagger Helm repository:
|
||||
|
||||
```console
|
||||
helm repo add flagger https://flagger.app
|
||||
```
|
||||
|
||||
To install the chart with the release name `flagger-grafana`:
|
||||
|
||||
```console
|
||||
helm upgrade -i flagger-grafana flagger/grafana \
|
||||
--namespace=istio-system \
|
||||
--set url=http://prometheus:9090 \
|
||||
--set user=admin \
|
||||
--set password=admin
|
||||
```
|
||||
|
||||
The command deploys Grafana on the Kubernetes cluster in the default namespace.
|
||||
The [configuration](#configuration) section lists the parameters that can be configured during installation.
|
||||
|
||||
## Uninstalling the Chart
|
||||
|
||||
To uninstall/delete the `flagger-grafana` deployment:
|
||||
|
||||
```console
|
||||
helm delete --purge flagger-grafana
|
||||
```
|
||||
|
||||
The command removes all the Kubernetes components associated with the chart and deletes the release.
|
||||
|
||||
## Configuration
|
||||
|
||||
The following tables lists the configurable parameters of the Grafana chart and their default values.
|
||||
|
||||
Parameter | Description | Default
|
||||
--- | --- | ---
|
||||
`image.repository` | Image repository | `grafana/grafana`
|
||||
`image.pullPolicy` | Image pull policy | `IfNotPresent`
|
||||
`image.tag` | Image tag | `<VERSION>`
|
||||
`replicaCount` | desired number of pods | `1`
|
||||
`resources` | pod resources | `none`
|
||||
`tolerations` | List of node taints to tolerate | `[]`
|
||||
`affinity` | node/pod affinities | `node`
|
||||
`nodeSelector` | node labels for pod assignment | `{}`
|
||||
`service.type` | type of service | `ClusterIP`
|
||||
`url` | Prometheus URL, used when Weave Cloud token is empty | `http://prometheus:9090`
|
||||
`token` | Weave Cloud token | `none`
|
||||
`user` | Grafana admin username | `admin`
|
||||
`password` | Grafana admin password | `admin`
|
||||
|
||||
Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example,
|
||||
|
||||
```console
|
||||
helm install flagger/grafana --name flagger-grafana \
|
||||
--set token=WEAVE-CLOUD-TOKEN
|
||||
```
|
||||
|
||||
Alternatively, a YAML file that specifies the values for the above parameters can be provided while installing the chart. For example,
|
||||
|
||||
```console
|
||||
helm install flagger/grafana --name flagger-grafana -f values.yaml
|
||||
```
|
||||
|
||||
> **Tip**: You can use the default [values.yaml](values.yaml)
|
||||
|
||||
|
||||
@@ -1,15 +0,0 @@
|
||||
1. Get the application URL by running these commands:
|
||||
{{- if contains "NodePort" .Values.service.type }}
|
||||
export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "grafana.fullname" . }})
|
||||
export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}")
|
||||
echo http://$NODE_IP:$NODE_PORT
|
||||
{{- else if contains "LoadBalancer" .Values.service.type }}
|
||||
NOTE: It may take a few minutes for the LoadBalancer IP to be available.
|
||||
You can watch the status of by running 'kubectl get svc -w {{ template "grafana.fullname" . }}'
|
||||
export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "grafana.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}')
|
||||
echo http://$SERVICE_IP:{{ .Values.service.port }}
|
||||
{{- else if contains "ClusterIP" .Values.service.type }}
|
||||
export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app={{ template "grafana.name" . }},release={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
|
||||
echo "Visit http://127.0.0.1:8080 to use your application"
|
||||
kubectl port-forward $POD_NAME 8080:80
|
||||
{{- end }}
|
||||
@@ -1,32 +0,0 @@
|
||||
{{/* vim: set filetype=mustache: */}}
|
||||
{{/*
|
||||
Expand the name of the chart.
|
||||
*/}}
|
||||
{{- define "grafana.name" -}}
|
||||
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Create a default fully qualified app name.
|
||||
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
|
||||
If release name contains chart name it will be used as a full name.
|
||||
*/}}
|
||||
{{- define "grafana.fullname" -}}
|
||||
{{- if .Values.fullnameOverride -}}
|
||||
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
|
||||
{{- else -}}
|
||||
{{- $name := default .Chart.Name .Values.nameOverride -}}
|
||||
{{- if contains $name .Release.Name -}}
|
||||
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
|
||||
{{- else -}}
|
||||
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Create chart name and version as used by the chart label.
|
||||
*/}}
|
||||
{{- define "grafana.chart" -}}
|
||||
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
|
||||
{{- end -}}
|
||||
@@ -1,6 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: {{ template "grafana.fullname" . }}-dashboards
|
||||
data:
|
||||
{{ (.Files.Glob "dashboards/*").AsConfig | indent 2 }}
|
||||
@@ -1,32 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: {{ template "grafana.fullname" . }}-datasources
|
||||
data:
|
||||
datasources.yaml: |-
|
||||
apiVersion: 1
|
||||
|
||||
deleteDatasources:
|
||||
- name: prometheus
|
||||
{{- if .Values.token }}
|
||||
datasources:
|
||||
- name: prometheus
|
||||
type: prometheus
|
||||
access: proxy
|
||||
url: https://cloud.weave.works/api/prom
|
||||
isDefault: true
|
||||
editable: true
|
||||
version: 1
|
||||
basicAuth: true
|
||||
basicAuthUser: weave
|
||||
basicAuthPassword: {{ .Values.token }}
|
||||
{{- else }}
|
||||
datasources:
|
||||
- name: prometheus
|
||||
type: prometheus
|
||||
access: proxy
|
||||
url: {{ .Values.url }}
|
||||
isDefault: true
|
||||
editable: true
|
||||
version: 1
|
||||
{{- end }}
|
||||
@@ -1,81 +0,0 @@
|
||||
apiVersion: apps/v1beta2
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: {{ template "grafana.fullname" . }}
|
||||
labels:
|
||||
app: {{ template "grafana.fullname" . }}
|
||||
chart: {{ template "grafana.chart" . }}
|
||||
release: {{ .Release.Name }}
|
||||
heritage: {{ .Release.Service }}
|
||||
spec:
|
||||
replicas: {{ .Values.replicaCount }}
|
||||
selector:
|
||||
matchLabels:
|
||||
app: {{ template "grafana.fullname" . }}
|
||||
release: {{ .Release.Name }}
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: {{ template "grafana.fullname" . }}
|
||||
release: {{ .Release.Name }}
|
||||
annotations:
|
||||
prometheus.io/scrape: 'false'
|
||||
spec:
|
||||
containers:
|
||||
- name: {{ .Chart.Name }}
|
||||
image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
|
||||
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
||||
ports:
|
||||
- name: http
|
||||
containerPort: 3000
|
||||
protocol: TCP
|
||||
# livenessProbe:
|
||||
# httpGet:
|
||||
# path: /
|
||||
# port: http
|
||||
# readinessProbe:
|
||||
# httpGet:
|
||||
# path: /
|
||||
# port: http
|
||||
env:
|
||||
- name: GF_SECURITY_ADMIN_USER
|
||||
value: {{ .Values.user }}
|
||||
- name: GF_SECURITY_ADMIN_PASSWORD
|
||||
value: {{ .Values.password }}
|
||||
- name: GF_PATHS_PROVISIONING
|
||||
value: /etc/grafana/provisioning/
|
||||
volumeMounts:
|
||||
- name: grafana
|
||||
mountPath: /var/lib/grafana
|
||||
- name: dashboards
|
||||
mountPath: /etc/grafana/dashboards
|
||||
- name: datasources
|
||||
mountPath: /etc/grafana/provisioning/datasources
|
||||
- name: providers
|
||||
mountPath: /etc/grafana/provisioning/dashboards
|
||||
resources:
|
||||
{{ toYaml .Values.resources | indent 12 }}
|
||||
{{- with .Values.nodeSelector }}
|
||||
nodeSelector:
|
||||
{{ toYaml . | indent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.affinity }}
|
||||
affinity:
|
||||
{{ toYaml . | indent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.tolerations }}
|
||||
tolerations:
|
||||
{{ toYaml . | indent 8 }}
|
||||
{{- end }}
|
||||
volumes:
|
||||
- name: grafana
|
||||
emptyDir: {}
|
||||
- name: dashboards
|
||||
configMap:
|
||||
name: {{ template "grafana.fullname" . }}-dashboards
|
||||
- name: providers
|
||||
configMap:
|
||||
name: {{ template "grafana.fullname" . }}-providers
|
||||
- name: datasources
|
||||
configMap:
|
||||
name: {{ template "grafana.fullname" . }}-datasources
|
||||
@@ -1,17 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: {{ template "grafana.fullname" . }}-providers
|
||||
data:
|
||||
providers.yaml: |+
|
||||
apiVersion: 1
|
||||
|
||||
providers:
|
||||
- name: 'default'
|
||||
orgId: 1
|
||||
folder: ''
|
||||
type: file
|
||||
disableDeletion: false
|
||||
editable: true
|
||||
options:
|
||||
path: /etc/grafana/dashboards
|
||||
@@ -1,19 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: {{ template "grafana.fullname" . }}
|
||||
labels:
|
||||
app: {{ template "grafana.name" . }}
|
||||
chart: {{ template "grafana.chart" . }}
|
||||
release: {{ .Release.Name }}
|
||||
heritage: {{ .Release.Service }}
|
||||
spec:
|
||||
type: {{ .Values.service.type }}
|
||||
ports:
|
||||
- port: {{ .Values.service.port }}
|
||||
targetPort: http
|
||||
protocol: TCP
|
||||
name: http
|
||||
selector:
|
||||
app: {{ template "grafana.fullname" . }}
|
||||
release: {{ .Release.Name }}
|
||||
@@ -1,37 +0,0 @@
|
||||
# Default values for grafana.
|
||||
# This is a YAML-formatted file.
|
||||
# Declare variables to be passed into your templates.
|
||||
|
||||
replicaCount: 1
|
||||
|
||||
image:
|
||||
repository: grafana/grafana
|
||||
tag: 5.4.2
|
||||
pullPolicy: IfNotPresent
|
||||
|
||||
service:
|
||||
type: ClusterIP
|
||||
port: 80
|
||||
|
||||
resources: {}
|
||||
# limits:
|
||||
# cpu: 100m
|
||||
# memory: 128Mi
|
||||
# requests:
|
||||
# cpu: 100m
|
||||
# memory: 128Mi
|
||||
|
||||
nodeSelector: {}
|
||||
|
||||
tolerations: []
|
||||
|
||||
affinity: {}
|
||||
|
||||
user: admin
|
||||
password: admin
|
||||
|
||||
# Istio Prometheus instance
|
||||
url: http://prometheus:9090
|
||||
|
||||
# Weave Cloud instance token
|
||||
token:
|
||||
@@ -1,22 +0,0 @@
|
||||
# Patterns to ignore when building packages.
|
||||
# This supports shell glob matching, relative path matching, and
|
||||
# negation (prefixed with !). Only one pattern per line.
|
||||
.DS_Store
|
||||
# Common VCS dirs
|
||||
.git/
|
||||
.gitignore
|
||||
.bzr/
|
||||
.bzrignore
|
||||
.hg/
|
||||
.hgignore
|
||||
.svn/
|
||||
# Common backup files
|
||||
*.swp
|
||||
*.bak
|
||||
*.tmp
|
||||
*~
|
||||
# Various IDEs
|
||||
.project
|
||||
.idea/
|
||||
*.tmproj
|
||||
.vscode/
|
||||
@@ -1,20 +0,0 @@
|
||||
apiVersion: v1
|
||||
name: loadtester
|
||||
version: 0.1.0
|
||||
appVersion: 0.1.0
|
||||
kubeVersion: ">=1.11.0-0"
|
||||
engine: gotpl
|
||||
description: Flagger's load testing services based on rakyll/hey that generates traffic during canary analysis when configured as a webhook.
|
||||
home: https://docs.flagger.app
|
||||
icon: https://raw.githubusercontent.com/stefanprodan/flagger/master/docs/logo/flagger-icon.png
|
||||
sources:
|
||||
- https://github.com/stefanprodan/flagger
|
||||
maintainers:
|
||||
- name: stefanprodan
|
||||
url: https://github.com/stefanprodan
|
||||
email: stefanprodan@users.noreply.github.com
|
||||
keywords:
|
||||
- canary
|
||||
- istio
|
||||
- gitops
|
||||
- load testing
|
||||
@@ -1,78 +0,0 @@
|
||||
# Flagger load testing service
|
||||
|
||||
[Flagger's](https://github.com/stefanprodan/flagger) load testing service is based on
|
||||
[rakyll/hey](https://github.com/rakyll/hey)
|
||||
and can be used to generates traffic during canary analysis when configured as a webhook.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
* Kubernetes >= 1.11
|
||||
* Istio >= 1.0
|
||||
|
||||
## Installing the Chart
|
||||
|
||||
Add Flagger Helm repository:
|
||||
|
||||
```console
|
||||
helm repo add flagger https://flagger.app
|
||||
```
|
||||
|
||||
To install the chart with the release name `flagger-loadtester`:
|
||||
|
||||
```console
|
||||
helm upgrade -i flagger-loadtester flagger/loadtester
|
||||
```
|
||||
|
||||
The command deploys Grafana on the Kubernetes cluster in the default namespace.
|
||||
|
||||
> **Tip**: Note that the namespace where you deploy the load tester should have the Istio sidecar injection enabled
|
||||
|
||||
The [configuration](#configuration) section lists the parameters that can be configured during installation.
|
||||
|
||||
## Uninstalling the Chart
|
||||
|
||||
To uninstall/delete the `flagger-loadtester` deployment:
|
||||
|
||||
```console
|
||||
helm delete --purge flagger-loadtester
|
||||
```
|
||||
|
||||
The command removes all the Kubernetes components associated with the chart and deletes the release.
|
||||
|
||||
## Configuration
|
||||
|
||||
The following tables lists the configurable parameters of the load tester chart and their default values.
|
||||
|
||||
Parameter | Description | Default
|
||||
--- | --- | ---
|
||||
`image.repository` | Image repository | `quay.io/stefanprodan/flagger-loadtester`
|
||||
`image.pullPolicy` | Image pull policy | `IfNotPresent`
|
||||
`image.tag` | Image tag | `<VERSION>`
|
||||
`replicaCount` | desired number of pods | `1`
|
||||
`resources.requests.cpu` | CPU requests | `10m`
|
||||
`resources.requests.memory` | memory requests | `64Mi`
|
||||
`tolerations` | List of node taints to tolerate | `[]`
|
||||
`affinity` | node/pod affinities | `node`
|
||||
`nodeSelector` | node labels for pod assignment | `{}`
|
||||
`service.type` | type of service | `ClusterIP`
|
||||
`service.port` | ClusterIP port | `80`
|
||||
`cmd.logOutput` | Log the command output to stderr | `true`
|
||||
`cmd.timeout` | Command execution timeout | `1h`
|
||||
`logLevel` | Log level can be debug, info, warning, error or panic | `info`
|
||||
|
||||
Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example,
|
||||
|
||||
```console
|
||||
helm install flagger/loadtester --name flagger-loadtester \
|
||||
--set cmd.logOutput=false
|
||||
```
|
||||
|
||||
Alternatively, a YAML file that specifies the values for the above parameters can be provided while installing the chart. For example,
|
||||
|
||||
```console
|
||||
helm install flagger/loadtester --name flagger-loadtester -f values.yaml
|
||||
```
|
||||
|
||||
> **Tip**: You can use the default [values.yaml](values.yaml)
|
||||
|
||||
|
||||
@@ -1 +0,0 @@
|
||||
Flagger's load testing service is available at http://{{ include "loadtester.fullname" . }}.{{ .Release.Namespace }}/
|
||||
@@ -1,32 +0,0 @@
|
||||
{{/* vim: set filetype=mustache: */}}
|
||||
{{/*
|
||||
Expand the name of the chart.
|
||||
*/}}
|
||||
{{- define "loadtester.name" -}}
|
||||
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Create a default fully qualified app name.
|
||||
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
|
||||
If release name contains chart name it will be used as a full name.
|
||||
*/}}
|
||||
{{- define "loadtester.fullname" -}}
|
||||
{{- if .Values.fullnameOverride -}}
|
||||
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
|
||||
{{- else -}}
|
||||
{{- $name := default .Chart.Name .Values.nameOverride -}}
|
||||
{{- if contains $name .Release.Name -}}
|
||||
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
|
||||
{{- else -}}
|
||||
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Create chart name and version as used by the chart label.
|
||||
*/}}
|
||||
{{- define "loadtester.chart" -}}
|
||||
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
|
||||
{{- end -}}
|
||||
@@ -1,66 +0,0 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: {{ include "loadtester.fullname" . }}
|
||||
labels:
|
||||
app.kubernetes.io/name: {{ include "loadtester.name" . }}
|
||||
helm.sh/chart: {{ include "loadtester.chart" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
spec:
|
||||
replicas: {{ .Values.replicaCount }}
|
||||
selector:
|
||||
matchLabels:
|
||||
app: {{ include "loadtester.name" . }}
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: {{ include "loadtester.name" . }}
|
||||
spec:
|
||||
containers:
|
||||
- name: {{ .Chart.Name }}
|
||||
image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
|
||||
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
||||
ports:
|
||||
- name: http
|
||||
containerPort: 8080
|
||||
command:
|
||||
- ./loadtester
|
||||
- -port=8080
|
||||
- -log-level={{ .Values.logLevel }}
|
||||
- -timeout={{ .Values.cmd.timeout }}
|
||||
- -log-cmd-output={{ .Values.cmd.logOutput }}
|
||||
livenessProbe:
|
||||
exec:
|
||||
command:
|
||||
- wget
|
||||
- --quiet
|
||||
- --tries=1
|
||||
- --timeout=4
|
||||
- --spider
|
||||
- http://localhost:8080/healthz
|
||||
timeoutSeconds: 5
|
||||
readinessProbe:
|
||||
exec:
|
||||
command:
|
||||
- wget
|
||||
- --quiet
|
||||
- --tries=1
|
||||
- --timeout=4
|
||||
- --spider
|
||||
- http://localhost:8080/healthz
|
||||
timeoutSeconds: 5
|
||||
resources:
|
||||
{{- toYaml .Values.resources | nindent 12 }}
|
||||
{{- with .Values.nodeSelector }}
|
||||
nodeSelector:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.affinity }}
|
||||
affinity:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.tolerations }}
|
||||
tolerations:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
@@ -1,18 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: {{ include "loadtester.fullname" . }}
|
||||
labels:
|
||||
app.kubernetes.io/name: {{ include "loadtester.name" . }}
|
||||
helm.sh/chart: {{ include "loadtester.chart" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
spec:
|
||||
type: {{ .Values.service.type }}
|
||||
ports:
|
||||
- port: {{ .Values.service.port }}
|
||||
targetPort: http
|
||||
protocol: TCP
|
||||
name: http
|
||||
selector:
|
||||
app: {{ include "loadtester.name" . }}
|
||||
@@ -1,29 +0,0 @@
|
||||
replicaCount: 1
|
||||
|
||||
image:
|
||||
repository: quay.io/stefanprodan/flagger-loadtester
|
||||
tag: 0.1.0
|
||||
pullPolicy: IfNotPresent
|
||||
|
||||
logLevel: info
|
||||
cmd:
|
||||
logOutput: true
|
||||
timeout: 1h
|
||||
|
||||
nameOverride: ""
|
||||
fullnameOverride: ""
|
||||
|
||||
service:
|
||||
type: ClusterIP
|
||||
port: 80
|
||||
|
||||
resources:
|
||||
requests:
|
||||
cpu: 10m
|
||||
memory: 64Mi
|
||||
|
||||
nodeSelector: {}
|
||||
|
||||
tolerations: []
|
||||
|
||||
affinity: {}
|
||||
@@ -1,141 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"log"
|
||||
"time"
|
||||
|
||||
_ "github.com/istio/glog"
|
||||
istioclientset "github.com/knative/pkg/client/clientset/versioned"
|
||||
"github.com/knative/pkg/signals"
|
||||
clientset "github.com/stefanprodan/flagger/pkg/client/clientset/versioned"
|
||||
informers "github.com/stefanprodan/flagger/pkg/client/informers/externalversions"
|
||||
"github.com/stefanprodan/flagger/pkg/controller"
|
||||
"github.com/stefanprodan/flagger/pkg/logging"
|
||||
"github.com/stefanprodan/flagger/pkg/notifier"
|
||||
"github.com/stefanprodan/flagger/pkg/server"
|
||||
"github.com/stefanprodan/flagger/pkg/version"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
_ "k8s.io/client-go/plugin/pkg/client/auth/gcp"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
)
|
||||
|
||||
var (
|
||||
masterURL string
|
||||
kubeconfig string
|
||||
metricsServer string
|
||||
controlLoopInterval time.Duration
|
||||
logLevel string
|
||||
port string
|
||||
slackURL string
|
||||
slackUser string
|
||||
slackChannel string
|
||||
)
|
||||
|
||||
func init() {
|
||||
flag.StringVar(&kubeconfig, "kubeconfig", "", "Path to a kubeconfig. Only required if out-of-cluster.")
|
||||
flag.StringVar(&masterURL, "master", "", "The address of the Kubernetes API server. Overrides any value in kubeconfig. Only required if out-of-cluster.")
|
||||
flag.StringVar(&metricsServer, "metrics-server", "http://prometheus:9090", "Prometheus URL")
|
||||
flag.DurationVar(&controlLoopInterval, "control-loop-interval", 10*time.Second, "Kubernetes API sync interval")
|
||||
flag.StringVar(&logLevel, "log-level", "debug", "Log level can be: debug, info, warning, error.")
|
||||
flag.StringVar(&port, "port", "8080", "Port to listen on.")
|
||||
flag.StringVar(&slackURL, "slack-url", "", "Slack hook URL.")
|
||||
flag.StringVar(&slackUser, "slack-user", "flagger", "Slack user name.")
|
||||
flag.StringVar(&slackChannel, "slack-channel", "", "Slack channel.")
|
||||
}
|
||||
|
||||
func main() {
|
||||
flag.Parse()
|
||||
|
||||
logger, err := logging.NewLogger(logLevel)
|
||||
if err != nil {
|
||||
log.Fatalf("Error creating logger: %v", err)
|
||||
}
|
||||
defer logger.Sync()
|
||||
|
||||
stopCh := signals.SetupSignalHandler()
|
||||
|
||||
cfg, err := clientcmd.BuildConfigFromFlags(masterURL, kubeconfig)
|
||||
if err != nil {
|
||||
logger.Fatalf("Error building kubeconfig: %v", err)
|
||||
}
|
||||
|
||||
kubeClient, err := kubernetes.NewForConfig(cfg)
|
||||
if err != nil {
|
||||
logger.Fatalf("Error building kubernetes clientset: %v", err)
|
||||
}
|
||||
|
||||
istioClient, err := istioclientset.NewForConfig(cfg)
|
||||
if err != nil {
|
||||
logger.Fatalf("Error building istio clientset: %v", err)
|
||||
}
|
||||
|
||||
flaggerClient, err := clientset.NewForConfig(cfg)
|
||||
if err != nil {
|
||||
logger.Fatalf("Error building example clientset: %s", err.Error())
|
||||
}
|
||||
|
||||
flaggerInformerFactory := informers.NewSharedInformerFactory(flaggerClient, time.Second*30)
|
||||
canaryInformer := flaggerInformerFactory.Flagger().V1alpha3().Canaries()
|
||||
|
||||
logger.Infof("Starting flagger version %s revision %s", version.VERSION, version.REVISION)
|
||||
|
||||
ver, err := kubeClient.Discovery().ServerVersion()
|
||||
if err != nil {
|
||||
logger.Fatalf("Error calling Kubernetes API: %v", err)
|
||||
}
|
||||
|
||||
logger.Infof("Connected to Kubernetes API %s", ver)
|
||||
|
||||
ok, err := controller.CheckMetricsServer(metricsServer)
|
||||
if ok {
|
||||
logger.Infof("Connected to metrics server %s", metricsServer)
|
||||
} else {
|
||||
logger.Errorf("Metrics server %s unreachable %v", metricsServer, err)
|
||||
}
|
||||
|
||||
var slack *notifier.Slack
|
||||
if slackURL != "" {
|
||||
slack, err = notifier.NewSlack(slackURL, slackUser, slackChannel)
|
||||
if err != nil {
|
||||
logger.Errorf("Notifier %v", err)
|
||||
} else {
|
||||
logger.Infof("Slack notifications enabled for channel %s", slack.Channel)
|
||||
}
|
||||
}
|
||||
|
||||
// start HTTP server
|
||||
go server.ListenAndServe(port, 3*time.Second, logger, stopCh)
|
||||
|
||||
c := controller.NewController(
|
||||
kubeClient,
|
||||
istioClient,
|
||||
flaggerClient,
|
||||
canaryInformer,
|
||||
controlLoopInterval,
|
||||
metricsServer,
|
||||
logger,
|
||||
slack,
|
||||
)
|
||||
|
||||
flaggerInformerFactory.Start(stopCh)
|
||||
|
||||
logger.Info("Waiting for informer caches to sync")
|
||||
for _, synced := range []cache.InformerSynced{
|
||||
canaryInformer.Informer().HasSynced,
|
||||
} {
|
||||
if ok := cache.WaitForCacheSync(stopCh, synced); !ok {
|
||||
logger.Fatalf("Failed to wait for cache sync")
|
||||
}
|
||||
}
|
||||
|
||||
// start controller
|
||||
go func(ctrl *controller.Controller) {
|
||||
if err := ctrl.Run(2, stopCh); err != nil {
|
||||
logger.Fatalf("Error running controller: %v", err)
|
||||
}
|
||||
}(c)
|
||||
|
||||
<-stopCh
|
||||
}
|
||||
@@ -1,44 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"github.com/knative/pkg/signals"
|
||||
"github.com/stefanprodan/flagger/pkg/loadtester"
|
||||
"github.com/stefanprodan/flagger/pkg/logging"
|
||||
"log"
|
||||
"time"
|
||||
)
|
||||
|
||||
var VERSION = "0.1.0"
|
||||
var (
|
||||
logLevel string
|
||||
port string
|
||||
timeout time.Duration
|
||||
logCmdOutput bool
|
||||
)
|
||||
|
||||
func init() {
|
||||
flag.StringVar(&logLevel, "log-level", "debug", "Log level can be: debug, info, warning, error.")
|
||||
flag.StringVar(&port, "port", "9090", "Port to listen on.")
|
||||
flag.DurationVar(&timeout, "timeout", time.Hour, "Command exec timeout.")
|
||||
flag.BoolVar(&logCmdOutput, "log-cmd-output", true, "Log command output to stderr")
|
||||
}
|
||||
|
||||
func main() {
|
||||
flag.Parse()
|
||||
|
||||
logger, err := logging.NewLogger(logLevel)
|
||||
if err != nil {
|
||||
log.Fatalf("Error creating logger: %v", err)
|
||||
}
|
||||
defer logger.Sync()
|
||||
|
||||
stopCh := signals.SetupSignalHandler()
|
||||
|
||||
taskRunner := loadtester.NewTaskRunner(logger, timeout, logCmdOutput)
|
||||
|
||||
go taskRunner.Start(100*time.Millisecond, stopCh)
|
||||
|
||||
logger.Infof("Starting load tester v%s API on port %s", VERSION, port)
|
||||
loadtester.ListenAndServe(port, time.Minute, logger, taskRunner, stopCh)
|
||||
}
|
||||
@@ -1,73 +0,0 @@
|
||||
# Contributor Covenant Code of Conduct
|
||||
|
||||
## Our Pledge
|
||||
|
||||
In the interest of fostering an open and welcoming environment, we as
|
||||
contributors and maintainers pledge to making participation in our project and
|
||||
our community a harassment-free experience for everyone, regardless of age, body
|
||||
size, disability, ethnicity, gender identity and expression, level of experience,
|
||||
education, socio-economic status, nationality, personal appearance, race,
|
||||
religion, or sexual identity and orientation.
|
||||
|
||||
## Our Standards
|
||||
|
||||
Examples of behavior that contributes to creating a positive environment
|
||||
include:
|
||||
|
||||
* Using welcoming and inclusive language
|
||||
* Being respectful of differing viewpoints and experiences
|
||||
* Gracefully accepting constructive criticism
|
||||
* Focusing on what is best for the community
|
||||
* Showing empathy towards other community members
|
||||
|
||||
Examples of unacceptable behavior by participants include:
|
||||
|
||||
* The use of sexualized language or imagery and unwelcome sexual attention or
|
||||
advances
|
||||
* Trolling, insulting/derogatory comments, and personal or political attacks
|
||||
* Public or private harassment
|
||||
* Publishing others' private information, such as a physical or electronic
|
||||
address, without explicit permission
|
||||
* Other conduct which could reasonably be considered inappropriate in a
|
||||
professional setting
|
||||
|
||||
## Our Responsibilities
|
||||
|
||||
Project maintainers are responsible for clarifying the standards of acceptable
|
||||
behavior and are expected to take appropriate and fair corrective action in
|
||||
response to any instances of unacceptable behavior.
|
||||
|
||||
Project maintainers have the right and responsibility to remove, edit, or
|
||||
reject comments, commits, code, wiki edits, issues, and other contributions
|
||||
that are not aligned to this Code of Conduct, or to ban temporarily or
|
||||
permanently any contributor for other behaviors that they deem inappropriate,
|
||||
threatening, offensive, or harmful.
|
||||
|
||||
## Scope
|
||||
|
||||
This Code of Conduct applies both within project spaces and in public spaces
|
||||
when an individual is representing the project or its community. Examples of
|
||||
representing a project or community include using an official project e-mail
|
||||
address, posting via an official social media account, or acting as an appointed
|
||||
representative at an online or offline event. Representation of a project may be
|
||||
further defined and clarified by project maintainers.
|
||||
|
||||
## Enforcement
|
||||
|
||||
Instances of abusive, harassing, or otherwise unacceptable behavior
|
||||
may be reported by contacting stefan.prodan(at)gmail.com.
|
||||
All complaints will be reviewed and investigated and will result in a response that is deemed
|
||||
necessary and appropriate to the circumstances. The project team is
|
||||
obligated to maintain confidentiality with regard to the reporter of
|
||||
an incident. Further details of specific enforcement policies may be
|
||||
posted separately.
|
||||
|
||||
Project maintainers who do not follow or enforce the Code of Conduct in good
|
||||
faith may face temporary or permanent repercussions as determined by other
|
||||
members of the project's leadership.
|
||||
|
||||
## Attribution
|
||||
|
||||
This Code of Conduct is adapted from the [Contributor Covenant](https://www.contributor-covenant.org), version 1.4,
|
||||
available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html
|
||||
|
||||
23
docs/.vuepress/config.js
Normal file
@@ -0,0 +1,23 @@
|
||||
module.exports = {
|
||||
title: 'Flagger',
|
||||
description: 'Progressive Delivery operator for Kubernetes (Canary, A/B Testing and Blue/Green deployments)',
|
||||
themeConfig: {
|
||||
search: true,
|
||||
activeHeaderLinks: false,
|
||||
docsDir: 'docs',
|
||||
repo: 'fluxcd/flagger',
|
||||
nav: [
|
||||
{ text: 'Docs', link: 'https://docs.flagger.app' },
|
||||
{ text: 'Changelog', link: 'https://github.com/fluxcd/flagger/blob/main/CHANGELOG.md' }
|
||||
]
|
||||
},
|
||||
head: [
|
||||
['link', { rel: 'icon', href: '/favicon.png' }],
|
||||
['link', { rel: 'stylesheet', href: '/website.css' }],
|
||||
['meta', { name: 'keywords', content: 'gitops kubernetes flagger flux istio linkerd appmesh contour gloo nginx skipper traefik' }],
|
||||
['meta', { name: 'twitter:card', content: 'summary_large_image' }],
|
||||
['meta', { name: 'twitter:title', content: 'Flagger' }],
|
||||
['meta', { name: 'twitter:description', content: 'Progressive delivery Kubernetes operator (Canary, A/B Testing and Blue/Green deployments)' }],
|
||||
['meta', { name: 'twitter:image:src', content: 'https://flagger.app/flagger-overview.png' }]
|
||||
]
|
||||
};
|
||||
BIN
docs/.vuepress/public/cncf.png
Normal file
|
After Width: | Height: | Size: 24 KiB |
BIN
docs/.vuepress/public/favicon.png
Normal file
|
After Width: | Height: | Size: 7.5 KiB |
BIN
docs/.vuepress/public/flagger-gitops.png
Normal file
|
After Width: | Height: | Size: 188 KiB |
BIN
docs/.vuepress/public/flagger-overview.png
Normal file
|
After Width: | Height: | Size: 245 KiB |
12
docs/.vuepress/public/website.css
Normal file
@@ -0,0 +1,12 @@
|
||||
.icon.outbound {
|
||||
display: none !important;
|
||||
}
|
||||
.site-name {
|
||||
padding-left: 30px;
|
||||
position: relative;
|
||||
background: url(favicon.png) left 50% no-repeat;
|
||||
background-size: 20px 20px;
|
||||
}
|
||||
/*.theme-container .theme-default-content:not(.custom) {*/
|
||||
/* max-width: 920px;*/
|
||||
/*}*/
|
||||
100
docs/README.md
Normal file
@@ -0,0 +1,100 @@
|
||||
---
|
||||
title: Flagger
|
||||
home: true
|
||||
heroText: Flagger
|
||||
tagline: Progressive Delivery Operator for Kubernetes
|
||||
actionText: Get Started →
|
||||
actionLink: https://docs.flagger.app
|
||||
features:
|
||||
- title: Safer Releases
|
||||
details: Reduce the risk of introducing a new software version in production by gradually shifting traffic to the new version while measuring metrics like HTTP/gRPC request success rate and latency.
|
||||
- title: Flexible Traffic Routing
|
||||
details: Shift and route traffic between app versions automatically using an ingress controller or a service mesh compatible with Kubernetes Gateway API.
|
||||
- title: Extensible Validation
|
||||
details: Besides the builtin metrics checks, you can extend your application analysis with custom metrics and webhooks for running acceptance tests, load tests, or any other custom validation.
|
||||
footer: Apache License 2.0 | Copyright © 2018-2025 The Flux authors
|
||||
---
|
||||
|
||||
## Progressive Delivery
|
||||
|
||||
Flagger was designed to give developers confidence in automating production releases with progressive delivery techniques.
|
||||
|
||||
::: tip Canary release
|
||||
|
||||
A benefit of using canary releases is the ability to do capacity testing of the new version in a production environment
|
||||
with a safe rollback strategy if issues are found. By slowly ramping up the load, you can monitor and capture metrics
|
||||
about how the new version impacts the production environment.
|
||||
|
||||
[Martin Fowler](https://martinfowler.com/bliki/CanaryRelease.html)
|
||||
:::
|
||||
|
||||
Flagger can run automated application analysis, testing, promotion and rollback for the following deployment strategies:
|
||||
* **Canary** (progressive traffic shifting with session affinity)
|
||||
* [Istio](https://docs.flagger.app/tutorials/istio-progressive-delivery),
|
||||
[Linkerd](https://docs.flagger.app/tutorials/linkerd-progressive-delivery),
|
||||
[Kuma Service Mesh](https://docs.flagger.app/tutorials/kuma-progressive-delivery),
|
||||
[Gateway API](https://docs.flagger.app/tutorials/gatewayapi-progressive-delivery)
|
||||
* [Contour](https://docs.flagger.app/tutorials/contour-progressive-delivery),
|
||||
[Gloo](https://docs.flagger.app/tutorials/gloo-progressive-delivery),
|
||||
[NGINX](https://docs.flagger.app/tutorials/nginx-progressive-delivery),
|
||||
[Skipper](https://docs.flagger.app/tutorials/skipper-progressive-delivery),
|
||||
[Traefik](https://docs.flagger.app/tutorials/traefik-progressive-delivery),
|
||||
[Apache APISIX](https://docs.flagger.app/tutorials/apisix-progressive-delivery),
|
||||
[Knative](https://docs.flagger.app/tutorials/knative-progressive-delivery)
|
||||
* **A/B Testing** (HTTP headers and cookies traffic routing)
|
||||
* [Istio](https://docs.flagger.app/tutorials/istio-ab-testing),
|
||||
[Gateway API](https://docs.flagger.app/tutorials/gatewayapi-progressive-delivery#a-b-testing),
|
||||
[Contour](https://docs.flagger.app/tutorials/contour-progressive-delivery#a-b-testing),
|
||||
[NGINX](https://docs.flagger.app/tutorials/nginx-progressive-delivery#a-b-testing)
|
||||
* **Blue/Green** (traffic switching and mirroring)
|
||||
* [Kubernetes CNI](https://docs.flagger.app/tutorials/kubernetes-blue-green),
|
||||
[Istio](https://docs.flagger.app/tutorials/istio-progressive-delivery#traffic-mirroring),
|
||||
Linkerd, Kuma, Contour, Gloo, NGINX, Skipper, Traefik, Apache
|
||||
|
||||
Flagger's application analysis can be extended with metric queries targeting Prometheus, Datadog,
|
||||
CloudWatch, New Relic, Graphite, Dynatrace, InfluxDB and Google Cloud Monitoring.
|
||||
|
||||
Flagger can be configured to [send notifications](https://docs.flagger.app/usage/alerting) to
|
||||
Slack, Microsoft Teams, Discord and Rocket.
|
||||
It will post messages when a deployment has been initialised,
|
||||
when a new revision has been detected and if the canary analysis failed or succeeded.
|
||||
|
||||
## GitOps
|
||||
|
||||

|
||||
|
||||
You can build fully automated GitOps pipelines for canary deployments with Flagger and
|
||||
[Flux](https://github.com/fluxcd/flux2).
|
||||
|
||||
::: tip GitOps
|
||||
|
||||
GitOps is a way to do Kubernetes cluster management and application delivery.
|
||||
It works by using Git as a single source of truth for declarative infrastructure and applications.
|
||||
With Git at the center of your delivery pipelines, developers can make pull requests
|
||||
to accelerate and simplify application deployments and operations tasks to Kubernetes.
|
||||
|
||||
:::
|
||||
|
||||
## Getting Help
|
||||
|
||||
If you have any questions about Flagger and progressive delivery:
|
||||
|
||||
* Read the Flagger [docs](https://docs.flagger.app).
|
||||
* Invite yourself to the [CNCF community slack](https://slack.cncf.io/)
|
||||
and join the [#flagger](https://cloud-native.slack.com/messages/flagger/) channel.
|
||||
* Check out the [Flux talks section](https://fluxcd.io/community/#talks) and to see a list of online talks,
|
||||
hands-on training and meetups.
|
||||
* File an [issue](https://github.com/fluxcd/flagger/issues/new).
|
||||
|
||||
Your feedback is always welcome!
|
||||
|
||||
## License
|
||||
|
||||
Flagger is [Apache 2.0](https://raw.githubusercontent.com/fluxcd/flagger/main/LICENSE)
|
||||
licensed and accepts contributions via GitHub pull requests.
|
||||
|
||||
Flagger was initially developed in 2018 at Weaveworks by Stefan Prodan.
|
||||
In 2020 Flagger became a [Cloud Native Computing Foundation](https://cncf.io/) project,
|
||||
part of [Flux](https://fluxcd.io) family of GitOps tools.
|
||||
|
||||
[](https://cncf.io/)
|
||||
|
Before Width: | Height: | Size: 220 KiB |
|
Before Width: | Height: | Size: 207 KiB |
|
Before Width: | Height: | Size: 210 KiB |
|
Before Width: | Height: | Size: 159 KiB |
|
Before Width: | Height: | Size: 183 KiB |
@@ -1,23 +0,0 @@
|
||||
---
|
||||
description: Flagger is an Istio progressive delivery Kubernetes operator
|
||||
---
|
||||
|
||||
# Introduction
|
||||
|
||||
[Flagger](https://github.com/stefanprodan/flagger) is a **Kubernetes** operator that automates the promotion of canary
|
||||
deployments using **Istio** routing for traffic shifting and **Prometheus** metrics for canary analysis.
|
||||
The canary analysis can be extended with webhooks for running integration tests,
|
||||
load tests or any other custom validation.
|
||||
|
||||
Flagger implements a control loop that gradually shifts traffic to the canary while measuring key performance
|
||||
indicators like HTTP requests success rate, requests average duration and pods health.
|
||||
Based on the **KPIs** analysis a canary is promoted or aborted and the analysis result is published to **Slack**.
|
||||
|
||||

|
||||
|
||||
Flagger can be configured with Kubernetes custom resources \(canaries.flagger.app kind\) and is compatible with
|
||||
any CI/CD solutions made for Kubernetes. Since Flagger is declarative and reacts to Kubernetes events,
|
||||
it can be used in **GitOps** pipelines together with Weave Flux or JenkinsX.
|
||||
|
||||
This project is sponsored by [Weaveworks](https://www.weave.works/)
|
||||
|
||||
@@ -1,17 +0,0 @@
|
||||
# Table of contents
|
||||
|
||||
* [Introduction](README.md)
|
||||
* [How it works](how-it-works.md)
|
||||
|
||||
## Install
|
||||
|
||||
* [Install Flagger](install/install-flagger.md)
|
||||
* [Install Grafana](install/install-grafana.md)
|
||||
* [Install Istio](install/install-istio.md)
|
||||
|
||||
## Usage
|
||||
|
||||
* [Canary Deployments](usage/progressive-delivery.md)
|
||||
* [Monitoring](usage/monitoring.md)
|
||||
* [Alerting](usage/alerting.md)
|
||||
|
||||
@@ -1,361 +0,0 @@
|
||||
# How it works
|
||||
|
||||
[Flagger](https://github.com/stefanprodan/flagger) takes a Kubernetes deployment and optionally
|
||||
a horizontal pod autoscaler \(HPA\) and creates a series of objects
|
||||
\(Kubernetes deployments, ClusterIP services and Istio virtual services\) to drive the canary analysis and promotion.
|
||||
|
||||

|
||||
|
||||
### Canary Custom Resource
|
||||
|
||||
For a deployment named _podinfo_, a canary promotion can be defined using Flagger's custom resource:
|
||||
|
||||
```yaml
|
||||
apiVersion: flagger.app/v1alpha3
|
||||
kind: Canary
|
||||
metadata:
|
||||
name: podinfo
|
||||
namespace: test
|
||||
spec:
|
||||
# deployment reference
|
||||
targetRef:
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
name: podinfo
|
||||
# the maximum time in seconds for the canary deployment
|
||||
# to make progress before it is rollback (default 600s)
|
||||
progressDeadlineSeconds: 60
|
||||
# HPA reference (optional)
|
||||
autoscalerRef:
|
||||
apiVersion: autoscaling/v2beta1
|
||||
kind: HorizontalPodAutoscaler
|
||||
name: podinfo
|
||||
service:
|
||||
# container port
|
||||
port: 9898
|
||||
# Istio gateways (optional)
|
||||
gateways:
|
||||
- public-gateway.istio-system.svc.cluster.local
|
||||
# Istio virtual service host names (optional)
|
||||
hosts:
|
||||
- podinfo.example.com
|
||||
canaryAnalysis:
|
||||
# schedule interval (default 60s)
|
||||
interval: 1m
|
||||
# max number of failed metric checks before rollback
|
||||
threshold: 10
|
||||
# max traffic percentage routed to canary
|
||||
# percentage (0-100)
|
||||
maxWeight: 50
|
||||
# canary increment step
|
||||
# percentage (0-100)
|
||||
stepWeight: 5
|
||||
# Istio Prometheus checks
|
||||
metrics:
|
||||
- name: istio_requests_total
|
||||
# minimum req success rate (non 5xx responses)
|
||||
# percentage (0-100)
|
||||
threshold: 99
|
||||
interval: 1m
|
||||
- name: istio_request_duration_seconds_bucket
|
||||
# maximum req duration P99
|
||||
# milliseconds
|
||||
threshold: 500
|
||||
interval: 30s
|
||||
# external checks (optional)
|
||||
webhooks:
|
||||
- name: integration-tests
|
||||
url: http://podinfo.test:9898/echo
|
||||
timeout: 1m
|
||||
# key-value pairs (optional)
|
||||
metadata:
|
||||
test: "all"
|
||||
token: "16688eb5e9f289f1991c"
|
||||
```
|
||||
|
||||
**Note** that the target deployment must have a single label selector in the format `app: <DEPLOYMENT-NAME>`:
|
||||
|
||||
```yaml
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: podinfo
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: podinfo
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: podinfo
|
||||
```
|
||||
|
||||
The target deployment should expose a TCP port that will be used by Flagger to create the ClusterIP Service and
|
||||
the Istio Virtual Service. The container port from the target deployment should match the `service.port` value.
|
||||
|
||||
### Canary Deployment
|
||||
|
||||

|
||||
|
||||
Gated canary promotion stages:
|
||||
|
||||
* scan for canary deployments
|
||||
* creates the primary deployment if needed
|
||||
* check Istio virtual service routes are mapped to primary and canary ClusterIP services
|
||||
* check primary and canary deployments status
|
||||
* halt advancement if a rolling update is underway
|
||||
* halt advancement if pods are unhealthy
|
||||
* increase canary traffic weight percentage from 0% to 5% \(step weight\)
|
||||
* check canary HTTP request success rate and latency
|
||||
* halt advancement if any metric is under the specified threshold
|
||||
* increment the failed checks counter
|
||||
* check if the number of failed checks reached the threshold
|
||||
* route all traffic to primary
|
||||
* scale to zero the canary deployment and mark it as failed
|
||||
* wait for the canary deployment to be updated \(revision bump\) and start over
|
||||
* increase canary traffic weight by 5% \(step weight\) till it reaches 50% \(max weight\)
|
||||
* halt advancement if the primary or canary deployment becomes unhealthy
|
||||
* halt advancement while canary deployment is being scaled up/down by HPA
|
||||
* halt advancement if any of the webhook calls are failing
|
||||
* halt advancement while canary request success rate is under the threshold
|
||||
* halt advancement while canary request duration P99 is over the threshold
|
||||
* promote canary to primary
|
||||
* copy canary deployment spec template over primary
|
||||
* wait for primary rolling update to finish
|
||||
* halt advancement if pods are unhealthy
|
||||
* route all traffic to primary
|
||||
* scale to zero the canary deployment
|
||||
* mark the canary deployment as finished
|
||||
* wait for the canary deployment to be updated \(revision bump\) and start over
|
||||
|
||||
You can change the canary analysis _max weight_ and the _step weight_ percentage in the Flagger's custom resource.
|
||||
|
||||
### Canary Analysis
|
||||
|
||||
The canary analysis runs periodically until it reaches the maximum traffic weight or the failed checks threshold.
|
||||
|
||||
Spec:
|
||||
|
||||
```yaml
|
||||
canaryAnalysis:
|
||||
# schedule interval (default 60s)
|
||||
interval: 1m
|
||||
# max number of failed metric checks before rollback
|
||||
threshold: 10
|
||||
# max traffic percentage routed to canary
|
||||
# percentage (0-100)
|
||||
maxWeight: 50
|
||||
# canary increment step
|
||||
# percentage (0-100)
|
||||
stepWeight: 2
|
||||
```
|
||||
|
||||
The above analysis, if it succeeds, will run for 25 minutes while validating the HTTP metrics and webhooks every minute.
|
||||
You can determine the minimum time that it takes to validate and promote a canary deployment using this formula:
|
||||
|
||||
```
|
||||
interval * (maxWeight / stepWeight)
|
||||
```
|
||||
|
||||
And the time it takes for a canary to be rollback when the metrics or webhook checks are failing:
|
||||
|
||||
```
|
||||
interval * threshold
|
||||
```
|
||||
|
||||
### HTTP Metrics
|
||||
|
||||
The canary analysis is using the following Prometheus queries:
|
||||
|
||||
**HTTP requests success rate percentage**
|
||||
|
||||
Spec:
|
||||
|
||||
```yaml
|
||||
canaryAnalysis:
|
||||
metrics:
|
||||
- name: istio_requests_total
|
||||
# minimum req success rate (non 5xx responses)
|
||||
# percentage (0-100)
|
||||
threshold: 99
|
||||
interval: 1m
|
||||
```
|
||||
|
||||
Query:
|
||||
|
||||
```javascript
|
||||
sum(
|
||||
rate(
|
||||
istio_requests_total{
|
||||
reporter="destination",
|
||||
destination_workload_namespace=~"$namespace",
|
||||
destination_workload=~"$workload",
|
||||
response_code!~"5.*"
|
||||
}[$interval]
|
||||
)
|
||||
)
|
||||
/
|
||||
sum(
|
||||
rate(
|
||||
istio_requests_total{
|
||||
reporter="destination",
|
||||
destination_workload_namespace=~"$namespace",
|
||||
destination_workload=~"$workload"
|
||||
}[$interval]
|
||||
)
|
||||
)
|
||||
```
|
||||
|
||||
**HTTP requests milliseconds duration P99**
|
||||
|
||||
Spec:
|
||||
|
||||
```yaml
|
||||
canaryAnalysis:
|
||||
metrics:
|
||||
- name: istio_request_duration_seconds_bucket
|
||||
# maximum req duration P99
|
||||
# milliseconds
|
||||
threshold: 500
|
||||
interval: 1m
|
||||
```
|
||||
|
||||
Query:
|
||||
|
||||
```javascript
|
||||
histogram_quantile(0.99,
|
||||
sum(
|
||||
irate(
|
||||
istio_request_duration_seconds_bucket{
|
||||
reporter="destination",
|
||||
destination_workload=~"$workload",
|
||||
destination_workload_namespace=~"$namespace"
|
||||
}[$interval]
|
||||
)
|
||||
) by (le)
|
||||
)
|
||||
```
|
||||
|
||||
> **Note** that the metric interval should be lower or equal to the control loop interval.
|
||||
|
||||
### Webhooks
|
||||
|
||||
The canary analysis can be extended with webhooks.
|
||||
Flagger will call each webhook URL and determine from the response status code (HTTP 2xx) if the canary is failing or not.
|
||||
|
||||
Spec:
|
||||
|
||||
```yaml
|
||||
canaryAnalysis:
|
||||
webhooks:
|
||||
- name: integration-tests
|
||||
url: http://podinfo.test:9898/echo
|
||||
timeout: 30s
|
||||
metadata:
|
||||
test: "all"
|
||||
token: "16688eb5e9f289f1991c"
|
||||
- name: load-tests
|
||||
url: http://podinfo.test:9898/echo
|
||||
timeout: 30s
|
||||
metadata:
|
||||
key1: "val1"
|
||||
key2: "val2"
|
||||
```
|
||||
|
||||
> **Note** that the sum of all webhooks timeouts should be lower than the control loop interval.
|
||||
|
||||
Webhook payload (HTTP POST):
|
||||
|
||||
```json
|
||||
{
|
||||
"name": "podinfo",
|
||||
"namespace": "test",
|
||||
"metadata": {
|
||||
"test": "all",
|
||||
"token": "16688eb5e9f289f1991c"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Response status codes:
|
||||
|
||||
* 200-202 - advance canary by increasing the traffic weight
|
||||
* timeout or non-2xx - halt advancement and increment failed checks
|
||||
|
||||
On a non-2xx response Flagger will include the response body (if any) in the failed checks log and Kubernetes events.
|
||||
|
||||
### Load Testing
|
||||
|
||||
For workloads that are not receiving constant traffic Flagger can be configured with a webhook,
|
||||
that when called, will start a load test for the target workload.
|
||||
If the target workload doesn't receive any traffic during the canary analysis,
|
||||
Flagger metric checks will fail with "no values found for metric istio_requests_total".
|
||||
|
||||
Flagger comes with a load testing service based on [rakyll/hey](https://github.com/rakyll/hey)
|
||||
that generates traffic during analysis when configured as a webhook.
|
||||
|
||||

|
||||
|
||||
First you need to deploy the load test runner in a namespace with Istio sidecar injection enabled:
|
||||
|
||||
```bash
|
||||
export REPO=https://raw.githubusercontent.com/stefanprodan/flagger/master
|
||||
|
||||
kubectl -n test apply -f ${REPO}/artifacts/loadtester/deployment.yaml
|
||||
kubectl -n test apply -f ${REPO}/artifacts/loadtester/service.yaml
|
||||
```
|
||||
|
||||
Or by using Helm:
|
||||
|
||||
```bash
|
||||
helm repo add flagger https://flagger.app
|
||||
|
||||
helm upgrade -i flagger-loadtester flagger/loadtester \
|
||||
--namepace=test \
|
||||
--set cmd.logOutput=true \
|
||||
--set cmd.timeout=1h
|
||||
```
|
||||
|
||||
When deployed the load tester API will be available at `http://flagger-loadtester.test/`.
|
||||
|
||||
Now you can add webhooks to the canary analysis spec:
|
||||
|
||||
```yaml
|
||||
webhooks:
|
||||
- name: load-test-get
|
||||
url: http://flagger-loadtester.test/
|
||||
timeout: 5s
|
||||
metadata:
|
||||
cmd: "hey -z 1m -q 10 -c 2 http://podinfo.test:9898/"
|
||||
- name: load-test-post
|
||||
url: http://flagger-loadtester.test/
|
||||
timeout: 5s
|
||||
metadata:
|
||||
cmd: "hey -z 1m -q 10 -c 2 -m POST -d '{test: 2}' http://podinfo.test:9898/echo"
|
||||
```
|
||||
|
||||
When the canary analysis starts, Flagger will call the webhooks and the load tester will run the `hey` commands
|
||||
in the background, if they are not already running. This will ensure that during the
|
||||
analysis, the `podinfo.test` virtual service will receive a steady steam of GET and POST requests.
|
||||
|
||||
If your workload is exposed outside the mesh with the Istio Gateway and TLS you can point `hey` to the
|
||||
public URL and use HTTP2.
|
||||
|
||||
```yaml
|
||||
webhooks:
|
||||
- name: load-test-get
|
||||
url: http://flagger-loadtester.test/
|
||||
timeout: 5s
|
||||
metadata:
|
||||
cmd: "hey -z 1m -q 10 -c 2 -h2 https://podinfo.example.com/"
|
||||
```
|
||||
|
||||
The load tester can run arbitrary commands as long as the binary is present in the container image.
|
||||
For example if you you want to replace `hey` with another CLI, you can create your own Docker image:
|
||||
|
||||
```dockerfile
|
||||
FROM quay.io/stefanprodan/flagger-loadtester:<VER>
|
||||
|
||||
RUN curl -Lo /usr/local/bin/my-cli https://github.com/user/repo/releases/download/ver/my-cli \
|
||||
&& chmod +x /usr/local/bin/my-cli
|
||||
```
|
||||
@@ -1,75 +0,0 @@
|
||||
# Install Flagger
|
||||
|
||||
Before installing Flagger make sure you have [Istio](https://istio.io) running with Prometheus enabled.
|
||||
If you are new to Istio you can follow this GKE guide
|
||||
[Istio service mesh walk-through](https://docs.flagger.app/install/install-istio).
|
||||
|
||||
**Prerequisites**
|
||||
|
||||
* Kubernetes >= 1.11
|
||||
* Istio >= 1.0
|
||||
* Prometheus >= 2.6
|
||||
|
||||
### Install with Helm and Tiller
|
||||
|
||||
Add Flagger Helm repository:
|
||||
|
||||
```bash
|
||||
helm repo add flagger https://flagger.app
|
||||
```
|
||||
|
||||
Deploy Flagger in the _**istio-system**_ namespace:
|
||||
|
||||
```bash
|
||||
helm upgrade -i flagger flagger/flagger \
|
||||
--namespace=istio-system \
|
||||
--set metricsServer=http://prometheus.istio-system:9090
|
||||
```
|
||||
|
||||
Enable **Slack** notifications:
|
||||
|
||||
```bash
|
||||
helm upgrade -i flagger flagger/flagger \
|
||||
--namespace=istio-system \
|
||||
--set slack.url=https://hooks.slack.com/services/YOUR/SLACK/WEBHOOK \
|
||||
--set slack.channel=general \
|
||||
--set slack.user=flagger
|
||||
```
|
||||
|
||||
### Install with kubectl
|
||||
|
||||
If you don't have Tiller you can use the helm template command and apply the generated yaml with kubectl:
|
||||
|
||||
```bash
|
||||
# generate
|
||||
helm template flagger/flagger \
|
||||
--name flagger \
|
||||
--namespace=istio-system \
|
||||
--set metricsServer=http://prometheus.istio-system:9090 \
|
||||
--set controlLoopInterval=1m > $HOME/flagger.yaml
|
||||
|
||||
# apply
|
||||
kubectl apply -f $HOME/flagger.yaml
|
||||
```
|
||||
|
||||
### Uninstall
|
||||
|
||||
To uninstall/delete the flagger release with Helm run:
|
||||
|
||||
```text
|
||||
helm delete --purge flagger
|
||||
```
|
||||
|
||||
The command removes all the Kubernetes components associated with the chart and deletes the release.
|
||||
|
||||
> **Note** that on uninstall the Canary CRD will not be removed.
|
||||
Deleting the CRD will make Kubernetes remove all the objects owned by Flagger like Istio virtual services,
|
||||
Kubernetes deployments and ClusterIP services.
|
||||
|
||||
|
||||
If you want to remove all the objects created by Flagger you have delete the Canary CRD with kubectl:
|
||||
|
||||
```text
|
||||
kubectl delete crd canaries.flagger.app
|
||||
```
|
||||
|
||||
@@ -1,48 +0,0 @@
|
||||
# Install Grafana
|
||||
|
||||
Flagger comes with a Grafana dashboard made for monitoring the canary analysis.
|
||||
|
||||
### Install with Helm and Tiller
|
||||
|
||||
Add Flagger Helm repository:
|
||||
|
||||
```bash
|
||||
helm repo add flagger https://flagger.app
|
||||
```
|
||||
|
||||
Deploy Grafana in the _**istio-system**_ namespace:
|
||||
|
||||
```bash
|
||||
helm upgrade -i flagger-grafana flagger/grafana \
|
||||
--namespace=istio-system \
|
||||
--set url=http://prometheus:9090 \
|
||||
--set user=admin \
|
||||
--set password=admin
|
||||
```
|
||||
|
||||
### Install with kubectl
|
||||
|
||||
If you don't have Tiller you can use the helm template command and apply the generated yaml with kubectl:
|
||||
|
||||
```bash
|
||||
# generate
|
||||
helm template flagger/grafana \
|
||||
--name flagger-grafana \
|
||||
--namespace=istio-system \
|
||||
--set user=admin \
|
||||
--set password=admin > $HOME/flagger-grafana.yaml
|
||||
|
||||
# apply
|
||||
kubectl apply -f $HOME/flagger-grafana.yaml
|
||||
```
|
||||
|
||||
### Uninstall
|
||||
|
||||
To uninstall/delete the Grafana release with Helm run:
|
||||
|
||||
```text
|
||||
helm delete --purge flagger-grafana
|
||||
```
|
||||
|
||||
The command removes all the Kubernetes components associated with the chart and deletes the release.
|
||||
|
||||
@@ -1,460 +0,0 @@
|
||||
# Install Istio
|
||||
|
||||
This guide walks you through setting up Istio with Jaeger, Prometheus, Grafana and
|
||||
Let’s Encrypt TLS for ingress gateway on Google Kubernetes Engine.
|
||||
|
||||

|
||||
|
||||
### Prerequisites
|
||||
|
||||
You will be creating a cluster on Google’s Kubernetes Engine \(GKE\),
|
||||
if you don’t have an account you can sign up [here](https://cloud.google.com/free/) for free credits.
|
||||
|
||||
Login into GCP, create a project and enable billing for it.
|
||||
|
||||
Install the [gcloud](https://cloud.google.com/sdk/) command line utility and configure your project with `gcloud init`.
|
||||
|
||||
Set the default project \(replace `PROJECT_ID` with your own project\):
|
||||
|
||||
```text
|
||||
gcloud config set project PROJECT_ID
|
||||
```
|
||||
|
||||
Set the default compute region and zone:
|
||||
|
||||
```text
|
||||
gcloud config set compute/region europe-west3
|
||||
gcloud config set compute/zone europe-west3-a
|
||||
```
|
||||
|
||||
Enable the Kubernetes and Cloud DNS services for your project:
|
||||
|
||||
```text
|
||||
gcloud services enable container.googleapis.com
|
||||
gcloud services enable dns.googleapis.com
|
||||
```
|
||||
|
||||
Install the `kubectl` command-line tool:
|
||||
|
||||
```text
|
||||
gcloud components install kubectl
|
||||
```
|
||||
|
||||
Install the `helm` command-line tool:
|
||||
|
||||
```text
|
||||
brew install kubernetes-helm
|
||||
```
|
||||
|
||||
### GKE cluster setup
|
||||
|
||||
Create a cluster with three nodes using the latest Kubernetes version:
|
||||
|
||||
```bash
|
||||
k8s_version=$(gcloud container get-server-config --format=json \
|
||||
| jq -r '.validNodeVersions[0]')
|
||||
|
||||
gcloud container clusters create istio \
|
||||
--cluster-version=${k8s_version} \
|
||||
--zone=europe-west3-a \
|
||||
--num-nodes=3 \
|
||||
--machine-type=n1-highcpu-4 \
|
||||
--preemptible \
|
||||
--no-enable-cloud-logging \
|
||||
--disk-size=30 \
|
||||
--enable-autorepair \
|
||||
--scopes=gke-default,compute-rw,storage-rw
|
||||
```
|
||||
|
||||
The above command will create a default node pool consisting of `n1-highcpu-4` \(vCPU: 4, RAM 3.60GB, DISK: 30GB\)
|
||||
preemptible VMs. Preemptible VMs are up to 80% cheaper than regular instances and are terminated and replaced
|
||||
after a maximum of 24 hours.
|
||||
|
||||
Set up credentials for `kubectl`:
|
||||
|
||||
```bash
|
||||
gcloud container clusters get-credentials istio -z=europe-west3-a
|
||||
```
|
||||
|
||||
Create a cluster admin role binding:
|
||||
|
||||
```bash
|
||||
kubectl create clusterrolebinding "cluster-admin-$(whoami)" \
|
||||
--clusterrole=cluster-admin \
|
||||
--user="$(gcloud config get-value core/account)"
|
||||
```
|
||||
|
||||
Validate your setup with:
|
||||
|
||||
```bash
|
||||
kubectl get nodes -o wide
|
||||
```
|
||||
|
||||
### Cloud DNS setup
|
||||
|
||||
You will need an internet domain and access to the registrar to change the name servers to Google Cloud DNS.
|
||||
|
||||
Create a managed zone named `istio` in Cloud DNS \(replace `example.com` with your domain\):
|
||||
|
||||
```bash
|
||||
gcloud dns managed-zones create \
|
||||
--dns-name="example.com." \
|
||||
--description="Istio zone" "istio"
|
||||
```
|
||||
|
||||
Look up your zone's name servers:
|
||||
|
||||
```bash
|
||||
gcloud dns managed-zones describe istio
|
||||
```
|
||||
|
||||
Update your registrar's name server records with the records returned by the above command.
|
||||
|
||||
Wait for the name servers to change \(replace `example.com` with your domain\):
|
||||
|
||||
```bash
|
||||
watch dig +short NS example.com
|
||||
```
|
||||
|
||||
Create a static IP address named `istio-gateway-ip` in the same region as your GKE cluster:
|
||||
|
||||
```bash
|
||||
gcloud compute addresses create istio-gateway-ip --region europe-west3
|
||||
```
|
||||
|
||||
Find the static IP address:
|
||||
|
||||
```bash
|
||||
gcloud compute addresses describe istio-gateway-ip --region europe-west3
|
||||
```
|
||||
|
||||
Create the following DNS records \(replace `example.com` with your domain and set your Istio Gateway IP\):
|
||||
|
||||
```bash
|
||||
DOMAIN="example.com"
|
||||
GATEWAYIP="35.198.98.90"
|
||||
|
||||
gcloud dns record-sets transaction start --zone=istio
|
||||
|
||||
gcloud dns record-sets transaction add --zone=istio \
|
||||
--name="${DOMAIN}" --ttl=300 --type=A ${GATEWAYIP}
|
||||
|
||||
gcloud dns record-sets transaction add --zone=istio \
|
||||
--name="www.${DOMAIN}" --ttl=300 --type=A ${GATEWAYIP}
|
||||
|
||||
gcloud dns record-sets transaction add --zone=istio \
|
||||
--name="*.${DOMAIN}" --ttl=300 --type=A ${GATEWAYIP}
|
||||
|
||||
gcloud dns record-sets transaction execute --zone istio
|
||||
```
|
||||
|
||||
Verify that the wildcard DNS is working \(replace `example.com` with your domain\):
|
||||
|
||||
```bash
|
||||
watch host test.example.com
|
||||
```
|
||||
|
||||
### Install Istio with Helm
|
||||
|
||||
Download the latest Istio release:
|
||||
|
||||
```bash
|
||||
curl -L https://git.io/getLatestIstio | sh -
|
||||
```
|
||||
|
||||
Navigate to `istio-x.x.x` dir and copy the Istio CLI in your bin:
|
||||
|
||||
```bash
|
||||
cd istio-x.x.x/
|
||||
sudo cp ./bin/istioctl /usr/local/bin/istioctl
|
||||
```
|
||||
|
||||
Apply the Istio CRDs:
|
||||
|
||||
```bash
|
||||
kubectl apply -f ./install/kubernetes/helm/istio/templates/crds.yaml
|
||||
```
|
||||
|
||||
Create a service account and a cluster role binding for Tiller:
|
||||
|
||||
```bash
|
||||
kubectl apply -f ./install/kubernetes/helm/helm-service-account.yaml
|
||||
```
|
||||
|
||||
Deploy Tiller in the `kube-system` namespace:
|
||||
|
||||
```bash
|
||||
helm init --service-account tiller
|
||||
```
|
||||
|
||||
Find the GKE IP ranges:
|
||||
|
||||
```bash
|
||||
gcloud container clusters describe istio --zone=europe-west3-a \
|
||||
| grep -e clusterIpv4Cidr -e servicesIpv4Cidr
|
||||
```
|
||||
|
||||
You'll be using the IP ranges to allow unrestricted egress traffic for services running inside the service mesh.
|
||||
|
||||
Configure Istio with Prometheus, Jaeger, and cert-manager:
|
||||
|
||||
```yaml
|
||||
global:
|
||||
nodePort: false
|
||||
proxy:
|
||||
# replace with your GKE IP ranges
|
||||
includeIPRanges: "10.28.0.0/14,10.7.240.0/20"
|
||||
|
||||
sidecarInjectorWebhook:
|
||||
enabled: true
|
||||
enableNamespacesByDefault: false
|
||||
|
||||
gateways:
|
||||
enabled: true
|
||||
istio-ingressgateway:
|
||||
replicaCount: 2
|
||||
autoscaleMin: 2
|
||||
autoscaleMax: 3
|
||||
# replace with your Istio Gateway IP
|
||||
loadBalancerIP: "35.198.98.90"
|
||||
type: LoadBalancer
|
||||
|
||||
pilot:
|
||||
enabled: true
|
||||
replicaCount: 1
|
||||
autoscaleMin: 1
|
||||
autoscaleMax: 1
|
||||
resources:
|
||||
requests:
|
||||
cpu: 500m
|
||||
memory: 1024Mi
|
||||
|
||||
grafana:
|
||||
enabled: true
|
||||
security:
|
||||
enabled: true
|
||||
adminUser: admin
|
||||
# change the password
|
||||
adminPassword: admin
|
||||
|
||||
prometheus:
|
||||
enabled: true
|
||||
|
||||
servicegraph:
|
||||
enabled: true
|
||||
|
||||
tracing:
|
||||
enabled: true
|
||||
jaeger:
|
||||
tag: 1.7
|
||||
|
||||
certmanager:
|
||||
enabled: true
|
||||
```
|
||||
|
||||
Save the above file as `my-istio.yaml` and install Istio with Helm:
|
||||
|
||||
```bash
|
||||
helm upgrade --install istio ./install/kubernetes/helm/istio \
|
||||
--namespace=istio-system \
|
||||
-f ./my-istio.yaml
|
||||
```
|
||||
|
||||
Verify that Istio workloads are running:
|
||||
|
||||
```text
|
||||
kubectl -n istio-system get pods
|
||||
```
|
||||
|
||||
### Configure Istio Gateway with LE TLS
|
||||
|
||||

|
||||
|
||||
Create a Istio Gateway in istio-system namespace with HTTPS redirect:
|
||||
|
||||
```yaml
|
||||
apiVersion: networking.istio.io/v1alpha3
|
||||
kind: Gateway
|
||||
metadata:
|
||||
name: public-gateway
|
||||
namespace: istio-system
|
||||
spec:
|
||||
selector:
|
||||
istio: ingressgateway
|
||||
servers:
|
||||
- port:
|
||||
number: 80
|
||||
name: http
|
||||
protocol: HTTP
|
||||
hosts:
|
||||
- "*"
|
||||
tls:
|
||||
httpsRedirect: true
|
||||
- port:
|
||||
number: 443
|
||||
name: https
|
||||
protocol: HTTPS
|
||||
hosts:
|
||||
- "*"
|
||||
tls:
|
||||
mode: SIMPLE
|
||||
privateKey: /etc/istio/ingressgateway-certs/tls.key
|
||||
serverCertificate: /etc/istio/ingressgateway-certs/tls.crt
|
||||
```
|
||||
|
||||
Save the above resource as istio-gateway.yaml and then apply it:
|
||||
|
||||
```text
|
||||
kubectl apply -f ./istio-gateway.yaml
|
||||
```
|
||||
|
||||
Create a service account with Cloud DNS admin role \(replace `my-gcp-project` with your project ID\):
|
||||
|
||||
```bash
|
||||
GCP_PROJECT=my-gcp-project
|
||||
|
||||
gcloud iam service-accounts create dns-admin \
|
||||
--display-name=dns-admin \
|
||||
--project=${GCP_PROJECT}
|
||||
|
||||
gcloud iam service-accounts keys create ./gcp-dns-admin.json \
|
||||
--iam-account=dns-admin@${GCP_PROJECT}.iam.gserviceaccount.com \
|
||||
--project=${GCP_PROJECT}
|
||||
|
||||
gcloud projects add-iam-policy-binding ${GCP_PROJECT} \
|
||||
--member=serviceAccount:dns-admin@${GCP_PROJECT}.iam.gserviceaccount.com \
|
||||
--role=roles/dns.admin
|
||||
```
|
||||
|
||||
Create a Kubernetes secret with the GCP Cloud DNS admin key:
|
||||
|
||||
```bash
|
||||
kubectl create secret generic cert-manager-credentials \
|
||||
--from-file=./gcp-dns-admin.json \
|
||||
--namespace=istio-system
|
||||
```
|
||||
|
||||
Create a letsencrypt issuer for CloudDNS \(replace `email@example.com` with a valid email address and
|
||||
`my-gcp-project`with your project ID\):
|
||||
|
||||
```yaml
|
||||
apiVersion: certmanager.k8s.io/v1alpha1
|
||||
kind: Issuer
|
||||
metadata:
|
||||
name: letsencrypt-prod
|
||||
namespace: istio-system
|
||||
spec:
|
||||
acme:
|
||||
server: https://acme-v02.api.letsencrypt.org/directory
|
||||
email: email@example.com
|
||||
privateKeySecretRef:
|
||||
name: letsencrypt-prod
|
||||
dns01:
|
||||
providers:
|
||||
- name: cloud-dns
|
||||
clouddns:
|
||||
serviceAccountSecretRef:
|
||||
name: cert-manager-credentials
|
||||
key: gcp-dns-admin.json
|
||||
project: my-gcp-project
|
||||
```
|
||||
|
||||
Save the above resource as letsencrypt-issuer.yaml and then apply it:
|
||||
|
||||
```text
|
||||
kubectl apply -f ./letsencrypt-issuer.yaml
|
||||
```
|
||||
|
||||
Create a wildcard certificate \(replace `example.com` with your domain\):
|
||||
|
||||
```yaml
|
||||
apiVersion: certmanager.k8s.io/v1alpha1
|
||||
kind: Certificate
|
||||
metadata:
|
||||
name: istio-gateway
|
||||
namespace: istio-system
|
||||
spec:
|
||||
secretname: istio-ingressgateway-certs
|
||||
issuerRef:
|
||||
name: letsencrypt-prod
|
||||
commonName: "*.example.com"
|
||||
acme:
|
||||
config:
|
||||
- dns01:
|
||||
provider: cloud-dns
|
||||
domains:
|
||||
- "*.example.com"
|
||||
- "example.com"
|
||||
```
|
||||
|
||||
Save the above resource as of-cert.yaml and then apply it:
|
||||
|
||||
```text
|
||||
kubectl apply -f ./of-cert.yaml
|
||||
```
|
||||
|
||||
In a couple of seconds cert-manager should fetch a wildcard certificate from letsencrypt.org:
|
||||
|
||||
```text
|
||||
kubectl -n istio-system logs deployment/certmanager -f
|
||||
|
||||
Certificate issued successfully
|
||||
Certificate istio-system/istio-gateway scheduled for renewal in 1438 hours
|
||||
```
|
||||
|
||||
Recreate Istio ingress gateway pods:
|
||||
|
||||
```bash
|
||||
kubectl -n istio-system delete pods -l istio=ingressgateway
|
||||
```
|
||||
|
||||
Note that Istio gateway doesn't reload the certificates from the TLS secret on cert-manager renewal.
|
||||
Since the GKE cluster is made out of preemptible VMs the gateway pods will be replaced once every 24h,
|
||||
if your not using preemptible nodes then you need to manually kill the gateway pods every two months
|
||||
before the certificate expires.
|
||||
|
||||
### Expose services outside the service mesh
|
||||
|
||||
In order to expose services via the Istio Gateway you have to create a Virtual Service attached to Istio Gateway.
|
||||
|
||||
Create a virtual service in `istio-system` namespace for Grafana \(replace `example.com` with your domain\):
|
||||
|
||||
```yaml
|
||||
apiVersion: networking.istio.io/v1alpha3
|
||||
kind: VirtualService
|
||||
metadata:
|
||||
name: grafana
|
||||
namespace: istio-system
|
||||
spec:
|
||||
hosts:
|
||||
- "grafana.example.com"
|
||||
gateways:
|
||||
- public-gateway.istio-system.svc.cluster.local
|
||||
http:
|
||||
- route:
|
||||
- destination:
|
||||
host: grafana
|
||||
timeout: 30s
|
||||
```
|
||||
|
||||
Save the above resource as grafana-virtual-service.yaml and then apply it:
|
||||
|
||||
```bash
|
||||
kubectl apply -f ./grafana-virtual-service.yaml
|
||||
```
|
||||
|
||||
Navigate to `http://grafana.example.com` in your browser and you should be redirected to the HTTPS version.
|
||||
|
||||
Check that HTTP2 is enabled:
|
||||
|
||||
```bash
|
||||
curl -I --http2 https://grafana.example.com
|
||||
|
||||
HTTP/2 200
|
||||
content-type: text/html; charset=UTF-8
|
||||
x-envoy-upstream-service-time: 3
|
||||
server: envoy
|
||||
```
|
||||
|
||||
|
||||
|
||||
@@ -1,39 +0,0 @@
|
||||
# Alerting
|
||||
|
||||
### Slack
|
||||
|
||||
Flagger can be configured to send Slack notifications:
|
||||
|
||||
```bash
|
||||
helm upgrade -i flagger flagger/flagger \
|
||||
--namespace=istio-system \
|
||||
--set slack.url=https://hooks.slack.com/services/YOUR/SLACK/WEBHOOK \
|
||||
--set slack.channel=general \
|
||||
--set slack.user=flagger
|
||||
```
|
||||
|
||||
Once configured with a Slack incoming **webhook**, Flagger will post messages when a canary deployment
|
||||
has been initialised, when a new revision has been detected and if the canary analysis failed or succeeded.
|
||||
|
||||

|
||||
|
||||
A canary deployment will be rolled back if the progress deadline exceeded or if the analysis reached the
|
||||
maximum number of failed checks:
|
||||
|
||||

|
||||
|
||||
### Prometheus Alert Manager
|
||||
|
||||
Besides Slack, you can use Alertmanager to trigger alerts when a canary deployment failed:
|
||||
|
||||
```yaml
|
||||
- alert: canary_rollback
|
||||
expr: flagger_canary_status > 1
|
||||
for: 1m
|
||||
labels:
|
||||
severity: warning
|
||||
annotations:
|
||||
summary: "Canary failed"
|
||||
description: "Workload {{ $labels.name }} namespace {{ $labels.namespace }}"
|
||||
```
|
||||
|
||||
@@ -1,69 +0,0 @@
|
||||
# Monitoring
|
||||
|
||||
### Grafana
|
||||
|
||||
Flagger comes with a Grafana dashboard made for canary analysis. Install Grafana with Helm:
|
||||
|
||||
```bash
|
||||
helm upgrade -i flagger-grafana flagger/grafana \
|
||||
--namespace=istio-system \
|
||||
--set url=http://prometheus:9090 \
|
||||
--set user=admin \
|
||||
--set password=admin
|
||||
```
|
||||
|
||||
The dashboard shows the RED and USE metrics for the primary and canary workloads:
|
||||
|
||||

|
||||
|
||||
### Logging
|
||||
|
||||
The canary errors and latency spikes have been recorded as Kubernetes events and logged by Flagger in json format:
|
||||
|
||||
```text
|
||||
kubectl -n istio-system logs deployment/flagger --tail=100 | jq .msg
|
||||
|
||||
Starting canary deployment for podinfo.test
|
||||
Advance podinfo.test canary weight 5
|
||||
Advance podinfo.test canary weight 10
|
||||
Advance podinfo.test canary weight 15
|
||||
Advance podinfo.test canary weight 20
|
||||
Advance podinfo.test canary weight 25
|
||||
Advance podinfo.test canary weight 30
|
||||
Advance podinfo.test canary weight 35
|
||||
Halt podinfo.test advancement success rate 98.69% < 99%
|
||||
Advance podinfo.test canary weight 40
|
||||
Halt podinfo.test advancement request duration 1.515s > 500ms
|
||||
Advance podinfo.test canary weight 45
|
||||
Advance podinfo.test canary weight 50
|
||||
Copying podinfo.test template spec to podinfo-primary.test
|
||||
Halt podinfo-primary.test advancement waiting for rollout to finish: 1 old replicas are pending termination
|
||||
Scaling down podinfo.test
|
||||
Promotion completed! podinfo.test
|
||||
```
|
||||
|
||||
### Metrics
|
||||
|
||||
Flagger exposes Prometheus metrics that can be used to determine the canary analysis status and
|
||||
the destination weight values:
|
||||
|
||||
```bash
|
||||
# Canaries total gauge
|
||||
flagger_canary_total{namespace="test"} 1
|
||||
|
||||
# Canary promotion last known status gauge
|
||||
# 0 - running, 1 - successful, 2 - failed
|
||||
flagger_canary_status{name="podinfo" namespace="test"} 1
|
||||
|
||||
# Canary traffic weight gauge
|
||||
flagger_canary_weight{workload="podinfo-primary" namespace="test"} 95
|
||||
flagger_canary_weight{workload="podinfo" namespace="test"} 5
|
||||
|
||||
# Seconds spent performing canary analysis histogram
|
||||
flagger_canary_duration_seconds_bucket{name="podinfo",namespace="test",le="10"} 6
|
||||
flagger_canary_duration_seconds_bucket{name="podinfo",namespace="test",le="+Inf"} 6
|
||||
flagger_canary_duration_seconds_sum{name="podinfo",namespace="test"} 17.3561329
|
||||
flagger_canary_duration_seconds_count{name="podinfo",namespace="test"} 6
|
||||
```
|
||||
|
||||
|
||||
@@ -1,208 +0,0 @@
|
||||
# Canary Deployments
|
||||
|
||||
This guide shows you how to use Istio and Flagger to automate canary deployments.
|
||||
|
||||
Create a test namespace with Istio sidecar injection enabled:
|
||||
|
||||
```bash
|
||||
export REPO=https://raw.githubusercontent.com/stefanprodan/flagger/master
|
||||
|
||||
kubectl apply -f ${REPO}/artifacts/namespaces/test.yaml
|
||||
```
|
||||
|
||||
Create a deployment and a horizontal pod autoscaler:
|
||||
|
||||
```bash
|
||||
kubectl apply -f ${REPO}/artifacts/canaries/deployment.yaml
|
||||
kubectl apply -f ${REPO}/artifacts/canaries/hpa.yaml
|
||||
```
|
||||
|
||||
Deploy the load testing service to generate traffic during the canary analysis:
|
||||
|
||||
```bash
|
||||
kubectl -n test apply -f ${REPO}/artifacts/loadtester/deployment.yaml
|
||||
kubectl -n test apply -f ${REPO}/artifacts/loadtester/service.yaml
|
||||
```
|
||||
|
||||
Create a canary custom resource \(replace example.com with your own domain\):
|
||||
|
||||
```yaml
|
||||
apiVersion: flagger.app/v1alpha3
|
||||
kind: Canary
|
||||
metadata:
|
||||
name: podinfo
|
||||
namespace: test
|
||||
spec:
|
||||
# deployment reference
|
||||
targetRef:
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
name: podinfo
|
||||
# the maximum time in seconds for the canary deployment
|
||||
# to make progress before it is rollback (default 600s)
|
||||
progressDeadlineSeconds: 60
|
||||
# HPA reference (optional)
|
||||
autoscalerRef:
|
||||
apiVersion: autoscaling/v2beta1
|
||||
kind: HorizontalPodAutoscaler
|
||||
name: podinfo
|
||||
service:
|
||||
# container port
|
||||
port: 9898
|
||||
# Istio gateways (optional)
|
||||
gateways:
|
||||
- public-gateway.istio-system.svc.cluster.local
|
||||
# Istio virtual service host names (optional)
|
||||
hosts:
|
||||
- app.example.com
|
||||
canaryAnalysis:
|
||||
# schedule interval (default 60s)
|
||||
interval: 1m
|
||||
# max number of failed metric checks before rollback
|
||||
threshold: 5
|
||||
# max traffic percentage routed to canary
|
||||
# percentage (0-100)
|
||||
maxWeight: 50
|
||||
# canary increment step
|
||||
# percentage (0-100)
|
||||
stepWeight: 10
|
||||
metrics:
|
||||
- name: istio_requests_total
|
||||
# minimum req success rate (non 5xx responses)
|
||||
# percentage (0-100)
|
||||
threshold: 99
|
||||
interval: 1m
|
||||
- name: istio_request_duration_seconds_bucket
|
||||
# maximum req duration P99
|
||||
# milliseconds
|
||||
threshold: 500
|
||||
interval: 30s
|
||||
# generate traffic during analysis
|
||||
webhooks:
|
||||
- name: load-test
|
||||
url: http://flagger-loadtester.test/
|
||||
timeout: 5s
|
||||
metadata:
|
||||
cmd: "hey -z 1m -q 10 -c 2 http://podinfo.test:9898/"
|
||||
```
|
||||
|
||||
Save the above resource as podinfo-canary.yaml and then apply it:
|
||||
|
||||
```bash
|
||||
kubectl apply -f ./podinfo-canary.yaml
|
||||
```
|
||||
|
||||
After a couple of seconds Flagger will create the canary objects:
|
||||
|
||||
```bash
|
||||
# applied
|
||||
deployment.apps/podinfo
|
||||
horizontalpodautoscaler.autoscaling/podinfo
|
||||
canary.flagger.app/podinfo
|
||||
|
||||
# generated
|
||||
deployment.apps/podinfo-primary
|
||||
horizontalpodautoscaler.autoscaling/podinfo-primary
|
||||
service/podinfo
|
||||
service/podinfo-canary
|
||||
service/podinfo-primary
|
||||
virtualservice.networking.istio.io/podinfo
|
||||
```
|
||||
|
||||
Trigger a canary deployment by updating the container image:
|
||||
|
||||
```bash
|
||||
kubectl -n test set image deployment/podinfo \
|
||||
podinfod=quay.io/stefanprodan/podinfo:1.4.0
|
||||
```
|
||||
|
||||
Flagger detects that the deployment revision changed and starts a new rollout:
|
||||
|
||||
```text
|
||||
kubectl -n test describe canary/podinfo
|
||||
|
||||
Status:
|
||||
Canary Weight: 0
|
||||
Failed Checks: 0
|
||||
Phase: Succeeded
|
||||
Events:
|
||||
Type Reason Age From Message
|
||||
---- ------ ---- ---- -------
|
||||
Normal Synced 3m flagger New revision detected podinfo.test
|
||||
Normal Synced 3m flagger Scaling up podinfo.test
|
||||
Warning Synced 3m flagger Waiting for podinfo.test rollout to finish: 0 of 1 updated replicas are available
|
||||
Normal Synced 3m flagger Advance podinfo.test canary weight 5
|
||||
Normal Synced 3m flagger Advance podinfo.test canary weight 10
|
||||
Normal Synced 3m flagger Advance podinfo.test canary weight 15
|
||||
Normal Synced 2m flagger Advance podinfo.test canary weight 20
|
||||
Normal Synced 2m flagger Advance podinfo.test canary weight 25
|
||||
Normal Synced 1m flagger Advance podinfo.test canary weight 30
|
||||
Normal Synced 1m flagger Advance podinfo.test canary weight 35
|
||||
Normal Synced 55s flagger Advance podinfo.test canary weight 40
|
||||
Normal Synced 45s flagger Advance podinfo.test canary weight 45
|
||||
Normal Synced 35s flagger Advance podinfo.test canary weight 50
|
||||
Normal Synced 25s flagger Copying podinfo.test template spec to podinfo-primary.test
|
||||
Warning Synced 15s flagger Waiting for podinfo-primary.test rollout to finish: 1 of 2 updated replicas are available
|
||||
Normal Synced 5s flagger Promotion completed! Scaling down podinfo.test
|
||||
```
|
||||
|
||||
You can monitor all canaries with:
|
||||
|
||||
```bash
|
||||
watch kubectl get canaries --all-namespaces
|
||||
|
||||
NAMESPACE NAME STATUS WEIGHT LASTTRANSITIONTIME
|
||||
test podinfo Progressing 15 2019-01-16T14:05:07Z
|
||||
prod frontend Succeeded 0 2019-01-15T16:15:07Z
|
||||
prod backend Failed 0 2019-01-14T17:05:07Z
|
||||
```
|
||||
|
||||
During the canary analysis you can generate HTTP 500 errors and high latency to test if Flagger pauses the rollout.
|
||||
|
||||
Create a tester pod and exec into it:
|
||||
|
||||
```bash
|
||||
kubectl -n test run tester \
|
||||
--image=quay.io/stefanprodan/podinfo:1.2.1 \
|
||||
-- ./podinfo --port=9898
|
||||
|
||||
kubectl -n test exec -it tester-xx-xx sh
|
||||
```
|
||||
|
||||
Generate HTTP 500 errors:
|
||||
|
||||
```bash
|
||||
watch curl http://podinfo-canary:9898/status/500
|
||||
```
|
||||
|
||||
Generate latency:
|
||||
|
||||
```bash
|
||||
watch curl http://podinfo-canary:9898/delay/1
|
||||
```
|
||||
|
||||
When the number of failed checks reaches the canary analysis threshold, the traffic is routed back to the primary, the canary is scaled to zero and the rollout is marked as failed.
|
||||
|
||||
```text
|
||||
kubectl -n test describe canary/podinfo
|
||||
|
||||
Status:
|
||||
Canary Weight: 0
|
||||
Failed Checks: 10
|
||||
Phase: Failed
|
||||
Events:
|
||||
Type Reason Age From Message
|
||||
---- ------ ---- ---- -------
|
||||
Normal Synced 3m flagger Starting canary deployment for podinfo.test
|
||||
Normal Synced 3m flagger Advance podinfo.test canary weight 5
|
||||
Normal Synced 3m flagger Advance podinfo.test canary weight 10
|
||||
Normal Synced 3m flagger Advance podinfo.test canary weight 15
|
||||
Normal Synced 3m flagger Halt podinfo.test advancement success rate 69.17% < 99%
|
||||
Normal Synced 2m flagger Halt podinfo.test advancement success rate 61.39% < 99%
|
||||
Normal Synced 2m flagger Halt podinfo.test advancement success rate 55.06% < 99%
|
||||
Normal Synced 2m flagger Halt podinfo.test advancement success rate 47.00% < 99%
|
||||
Normal Synced 2m flagger (combined from similar events): Halt podinfo.test advancement success rate 38.08% < 99%
|
||||
Warning Synced 1m flagger Rolling back podinfo.test failed checks threshold reached 10
|
||||
Warning Synced 1m flagger Canary failed! Scaling down podinfo.test
|
||||
```
|
||||
|
||||
|
Before Width: | Height: | Size: 19 KiB |
|
Before Width: | Height: | Size: 18 KiB |
|
Before Width: | Height: | Size: 30 KiB |
|
Before Width: | Height: | Size: 28 KiB |
|
Before Width: | Height: | Size: 31 KiB |
|
Before Width: | Height: | Size: 28 KiB |
|
Before Width: | Height: | Size: 442 KiB |
|
Before Width: | Height: | Size: 78 KiB |
|
Before Width: | Height: | Size: 113 KiB |
@@ -1,16 +0,0 @@
|
||||
/*
|
||||
Copyright The Flagger Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
@@ -1,28 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright 2018 The Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
SCRIPT_ROOT=$(dirname ${BASH_SOURCE})/..
|
||||
CODEGEN_PKG=${CODEGEN_PKG:-$(cd ${SCRIPT_ROOT}; ls -d -1 ./vendor/k8s.io/code-generator 2>/dev/null || echo ../code-generator)}
|
||||
|
||||
${CODEGEN_PKG}/generate-groups.sh "deepcopy,client,informer,lister" \
|
||||
github.com/stefanprodan/flagger/pkg/client github.com/stefanprodan/flagger/pkg/apis \
|
||||
flagger:v1alpha3 \
|
||||
--go-header-file ${SCRIPT_ROOT}/hack/boilerplate.go.txt
|
||||
|
||||
@@ -1,48 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright 2017 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
SCRIPT_ROOT=$(dirname "${BASH_SOURCE}")/..
|
||||
|
||||
DIFFROOT="${SCRIPT_ROOT}/pkg"
|
||||
TMP_DIFFROOT="${SCRIPT_ROOT}/_tmp/pkg"
|
||||
_tmp="${SCRIPT_ROOT}/_tmp"
|
||||
|
||||
cleanup() {
|
||||
rm -rf "${_tmp}"
|
||||
}
|
||||
trap "cleanup" EXIT SIGINT
|
||||
|
||||
cleanup
|
||||
|
||||
mkdir -p "${TMP_DIFFROOT}"
|
||||
cp -a "${DIFFROOT}"/* "${TMP_DIFFROOT}"
|
||||
|
||||
"${SCRIPT_ROOT}/hack/update-codegen.sh"
|
||||
echo "diffing ${DIFFROOT} against freshly generated codegen"
|
||||
ret=0
|
||||
diff -Naupr "${DIFFROOT}" "${TMP_DIFFROOT}" || ret=$?
|
||||
cp -a "${TMP_DIFFROOT}"/* "${DIFFROOT}"
|
||||
if [[ $ret -eq 0 ]]
|
||||
then
|
||||
echo "${DIFFROOT} up to date."
|
||||
else
|
||||
echo "${DIFFROOT} is out of date. Please run hack/update-codegen.sh"
|
||||
exit 1
|
||||
fi
|
||||