Compare commits

...

169 Commits

Author SHA1 Message Date
stefanprodan
d4a94f03de Publish website 2025-10-16 15:36:35 +00:00
stefanprodan
b3b7958790 Publish website 2025-10-16 15:35:36 +00:00
stefanprodan
200584f5cd Publish website 2025-10-16 15:24:37 +00:00
stefanprodan
48a45a6142 Publish website 2025-10-16 15:03:05 +00:00
stefanprodan
550e768120 Publish website 2025-10-16 14:58:57 +00:00
stefanprodan
6b050a3702 Publish flagger-1.42.0.tgz grafana-1.7.0.tgz loadtester-0.36.0.tgz podinfo-6.1.4.tgz 2025-10-16 14:02:39 +00:00
aryan9600
c8055e652d Publish flagger-1.41.0.tgz grafana-1.7.0.tgz loadtester-0.35.0.tgz podinfo-6.1.4.tgz 2025-04-02 11:48:15 +00:00
stefanprodan
1f788e4980 Publish flagger-1.40.0.tgz grafana-1.7.0.tgz loadtester-0.34.0.tgz podinfo-6.1.4.tgz 2024-12-17 09:46:23 +00:00
stefanprodan
fae2dc52bb Publish flagger-1.39.0.tgz grafana-1.7.0.tgz loadtester-0.33.0.tgz podinfo-6.1.4.tgz 2024-11-26 12:08:45 +00:00
stefanprodan
b8bce87149 Publish flagger-1.39.0.tgz grafana-1.7.0.tgz loadtester-0.33.0.tgz podinfo-6.1.4.tgz 2024-11-26 11:48:33 +00:00
aryan9600
4ccf2beba7 Publish flagger-1.38.0.tgz grafana-1.7.0.tgz loadtester-0.33.0.tgz podinfo-6.1.3.tgz 2024-07-30 13:42:54 +00:00
stefanprodan
b7a1fb69f2 Publish flagger-1.37.0.tgz grafana-1.7.0.tgz loadtester-0.32.0.tgz podinfo-6.1.3.tgz 2024-03-26 13:44:54 +00:00
stefanprodan
c1c9e5cc43 Publish flagger-1.37.0.tgz grafana-1.7.0.tgz loadtester-0.32.0.tgz podinfo-6.1.3.tgz 2024-03-26 13:01:02 +00:00
aryan9600
b83f69b75c Publish flagger-1.36.1.tgz grafana-1.7.0.tgz loadtester-0.31.0.tgz podinfo-6.1.3.tgz 2024-03-06 08:16:19 +00:00
stefanprodan
b816e58c7c Publish flagger-1.36.0.tgz grafana-1.7.0.tgz loadtester-0.31.0.tgz podinfo-6.1.3.tgz 2024-02-07 19:44:52 +00:00
aryan9600
a8911b2fc7 Publish flagger-1.35.0.tgz grafana-1.7.0.tgz loadtester-0.30.0.tgz podinfo-6.1.3.tgz 2023-11-30 15:39:49 +00:00
aryan9600
d5ae4d3467 Publish flagger-1.34.0.tgz grafana-1.7.0.tgz loadtester-0.29.0.tgz podinfo-6.1.3.tgz 2023-10-04 10:30:21 +00:00
stefanprodan
0381f85425 Publish flagger-1.33.0.tgz grafana-1.7.0.tgz loadtester-0.29.0.tgz podinfo-6.1.3.tgz 2023-08-29 14:56:19 +00:00
stefanprodan
e65f125ee5 Publish flagger-1.33.0.tgz grafana-1.7.0.tgz loadtester-0.29.0.tgz podinfo-6.1.3.tgz 2023-08-29 14:03:17 +00:00
aryan9600
cf3963a62a Publish flagger-1.33.0.tgz grafana-1.7.0.tgz loadtester-0.29.0.tgz podinfo-6.1.3.tgz 2023-08-29 11:57:46 +00:00
aryan9600
c5b6f50cad Publish flagger-1.32.0.tgz grafana-1.7.0.tgz loadtester-0.28.1.tgz podinfo-6.1.3.tgz 2023-07-14 10:29:06 +00:00
stefanprodan
a9ee2aa455 Publish flagger-1.31.0.tgz grafana-1.7.0.tgz loadtester-0.28.1.tgz podinfo-6.1.3.tgz 2023-05-10 17:07:50 +00:00
aryan9600
b1bcf3c010 Publish flagger-1.30.0.tgz grafana-1.7.0.tgz loadtester-0.28.1.tgz podinfo-6.1.3.tgz 2023-04-12 15:26:05 +00:00
aryan9600
8dba425b2b Publish flagger-1.29.0.tgz grafana-1.7.0.tgz loadtester-0.28.1.tgz podinfo-6.1.3.tgz 2023-02-21 09:01:27 +00:00
stefanprodan
82e9c1fda2 Publish flagger-1.28.0.tgz grafana-1.7.0.tgz loadtester-0.28.1.tgz podinfo-6.1.3.tgz 2023-01-26 12:24:51 +00:00
aryan9600
1424e8f2bf Publish flagger-1.27.0.tgz grafana-1.7.0.tgz loadtester-0.28.0.tgz podinfo-6.1.3.tgz 2022-12-16 10:17:17 +00:00
stefanprodan
83feb035b8 Publish flagger-1.26.0.tgz grafana-1.7.0.tgz loadtester-0.27.0.tgz podinfo-6.1.3.tgz 2022-11-23 14:56:42 +00:00
aryan9600
88d65a19a9 Publish flagger-1.25.0.tgz grafana-1.7.0.tgz loadtester-0.26.0.tgz podinfo-6.1.3.tgz 2022-11-17 05:14:21 +00:00
stefanprodan
fe34721a74 Publish flagger-1.24.1.tgz grafana-1.7.0.tgz loadtester-0.26.0.tgz podinfo-6.1.3.tgz 2022-10-26 14:46:51 +00:00
stefanprodan
fbc4fb21a3 Publish flagger-1.24.0.tgz grafana-1.7.0.tgz loadtester-0.26.0.tgz podinfo-6.1.3.tgz 2022-10-23 13:27:46 +00:00
stefanprodan
1378300d06 Publish flagger-1.24.0.tgz grafana-1.7.0.tgz loadtester-0.26.0.tgz podinfo-6.1.3.tgz 2022-10-23 11:13:01 +00:00
stefanprodan
4fe36885eb Publish flagger-1.23.0.tgz grafana-1.7.0.tgz loadtester-0.25.0.tgz podinfo-6.1.3.tgz 2022-10-20 11:41:10 +00:00
aryan9600
8cd370ee0a Publish flagger-1.22.2.tgz grafana-1.7.0.tgz loadtester-0.24.0.tgz podinfo-6.1.3.tgz 2022-08-29 15:36:20 +00:00
stefanprodan
1b4c5067c9 Publish website 2022-08-23 14:19:34 +00:00
stefanprodan
b00c8b97d9 Publish flagger-1.22.1.tgz grafana-1.7.0.tgz loadtester-0.22.0.tgz podinfo-6.1.3.tgz 2022-08-01 10:39:51 +00:00
stefanprodan
55587026d8 Publish flagger-1.22.0.tgz grafana-1.7.0.tgz loadtester-0.22.0.tgz podinfo-6.1.3.tgz 2022-07-12 11:36:19 +00:00
stefanprodan
1a538100e7 Publish flagger-1.21.0.tgz grafana-1.7.0.tgz loadtester-0.22.0.tgz podinfo-6.1.3.tgz 2022-05-06 16:49:06 +00:00
stefanprodan
0394f94200 Publish flagger-1.20.0.tgz grafana-1.7.0.tgz loadtester-0.22.0.tgz podinfo-6.1.3.tgz 2022-04-15 10:59:52 +00:00
stefanprodan
6333bc45d3 Publish flagger-1.19.0.tgz grafana-1.6.0.tgz loadtester-0.22.0.tgz podinfo-6.0.0.tgz 2022-03-14 14:27:03 +00:00
stefanprodan
3ab45bd3e8 Publish flagger-1.18.0.tgz grafana-1.6.0.tgz loadtester-0.21.0.tgz podinfo-5.0.0.tgz 2022-02-14 14:01:06 +00:00
stefanprodan
39eade6319 Publish website 2022-02-14 13:32:47 +00:00
stefanprodan
d30f82f3d6 Publish flagger-1.17.0.tgz grafana-1.6.0.tgz loadtester-0.21.0.tgz podinfo-5.0.0.tgz 2022-01-25 09:27:05 +00:00
stefanprodan
b075d4b797 Publish flagger-1.16.1.tgz grafana-1.6.0.tgz loadtester-0.21.0.tgz podinfo-5.0.0.tgz 2021-12-17 16:11:22 +00:00
stefanprodan
3d638326d9 Publish flagger-1.16.0.tgz grafana-1.6.0.tgz loadtester-0.20.0.tgz podinfo-5.0.0.tgz 2021-11-22 14:09:53 +00:00
stefanprodan
29486e31d8 Publish flagger-1.15.0.tgz grafana-1.6.0.tgz loadtester-0.20.0.tgz podinfo-5.0.0.tgz 2021-10-29 07:25:49 +00:00
dholbach
f5cd13cc78 Publish website 2021-10-05 07:40:04 +00:00
stefanprodan
f211a898c4 Publish flagger-1.14.0.tgz grafana-1.6.0.tgz loadtester-0.19.2.tgz podinfo-5.0.0.tgz 2021-09-20 09:31:36 +00:00
stefanprodan
2028cac167 Publish website 2021-09-20 08:50:00 +00:00
stefanprodan
4a1fa4edad Publish flagger-1.13.0.tgz grafana-1.6.0.tgz loadtester-0.19.2.tgz podinfo-5.0.0.tgz 2021-08-27 07:02:33 +00:00
stefanprodan
1e69a78648 Publish flagger-1.13.0.tgz grafana-1.6.0.tgz loadtester-0.19.1.tgz podinfo-5.0.0.tgz 2021-08-25 10:34:22 +00:00
stefanprodan
9fe712d618 Publish website 2021-08-23 12:30:54 +00:00
stefanprodan
cab0ad1273 Publish flagger-1.12.1.tgz grafana-1.5.0.tgz loadtester-0.19.0.tgz podinfo-5.0.0.tgz 2021-06-24 10:07:20 +00:00
stefanprodan
edbc023fa6 Publish flagger-1.12.0.tgz grafana-1.5.0.tgz loadtester-0.19.0.tgz podinfo-5.0.0.tgz 2021-06-16 10:38:09 +00:00
stefanprodan
3bd46efa7f Publish flagger-1.11.0.tgz grafana-1.5.0.tgz loadtester-0.19.0.tgz podinfo-5.0.0.tgz 2021-06-01 11:19:44 +00:00
stefanprodan
ce5fad4724 Publish flagger-1.10.0.tgz grafana-1.5.0.tgz loadtester-0.19.0.tgz podinfo-5.0.0.tgz 2021-05-28 06:51:11 +00:00
stefanprodan
a943e7cf10 Publish flagger-1.9.0.tgz grafana-1.5.0.tgz loadtester-0.19.0.tgz podinfo-5.0.0.tgz 2021-05-14 14:01:17 +00:00
Stefan Prodan
58d618ed82 Docs cleanup
Signed-off-by: Stefan Prodan <stefan.prodan@gmail.com>
2021-05-06 11:27:12 +03:00
stefanprodan
d10ff53531 Publish flagger-1.8.0.tgz grafana-1.5.0.tgz loadtester-0.19.0.tgz podinfo-5.0.0.tgz 2021-04-29 13:29:49 +00:00
stefanprodan
9fd93e019f Publish flagger-1.7.0.tgz grafana-1.5.0.tgz loadtester-0.18.0.tgz podinfo-5.0.0.tgz 2021-03-23 09:52:46 +00:00
stefanprodan
623c8193a1 Publish website 2021-02-26 15:05:24 +00:00
stefanprodan
6ae108a955 Publish website 2021-02-26 14:39:48 +00:00
stefanprodan
fc5e240ffe Publish flagger-1.6.4.tgz grafana-1.5.0.tgz loadtester-0.18.0.tgz podinfo-5.0.0.tgz 2021-02-26 13:47:08 +00:00
stefanprodan
3b3cb55f0e Publish flagger-1.6.3.tgz grafana-1.5.0.tgz loadtester-0.18.0.tgz podinfo-5.0.0.tgz 2021-02-15 13:33:44 +00:00
Stefan Prodan
983a68f9a1 Remove deprecated charts (podinfo and gateway)
Signed-off-by: Stefan Prodan <stefan.prodan@gmail.com>
2021-02-05 09:59:57 +02:00
stefanprodan
d064fa62b6 Publish flagger-1.6.2.tgz grafana-1.5.0.tgz loadtester-0.18.0.tgz podinfo-5.0.0.tgz 2021-01-28 08:32:34 +00:00
stefanprodan
c530d694fc Publish flagger-1.6.1.tgz grafana-1.5.0.tgz loadtester-0.18.0.tgz podinfo-5.0.0.tgz 2021-01-19 09:53:27 +00:00
stefanprodan
b5e374b873 Publish flagger-1.6.0.tgz grafana-1.5.0.tgz loadtester-0.18.0.tgz podinfo-5.0.0.tgz 2021-01-05 13:23:53 +00:00
stefanprodan
89eac0b361 Publish flagger-1.5.0.tgz grafana-1.5.0.tgz loadtester-0.18.0.tgz podinfo-5.0.0.tgz 2020-12-22 16:34:23 +00:00
weaveworksbot
f3451820fd Publish Helm charts vv1.4.2 2020-12-09 13:07:16 +00:00
weaveworksbot
2c9ca4a817 Publish Helm charts vv1.4.1 2020-12-08 13:12:13 +00:00
weaveworksbot
7e238c129a Publish website 2020-12-07 10:14:31 +00:00
weaveworksbot
6884e4184d Publish Helm charts vv1.4.0 2020-12-07 10:03:38 +00:00
weaveworksbot
68148f4f33 Publish Helm charts vv1.3.0 2020-11-23 12:56:28 +00:00
weaveworksbot
75e3e652ef Publish Helm charts vv1.2.0 2020-09-29 06:49:18 +00:00
weaveworksbot
c9c97c379d Publish website 2020-08-20 06:16:09 +00:00
weaveworksbot
b102a046a7 Publish Helm charts vv1.1.0 2020-08-19 15:41:37 +00:00
weaveworksbot
f4094f1e1d Publish Helm charts vv1.0.1 2020-07-18 06:48:04 +00:00
weaveworksbot
746848f362 Publish Helm charts vv1.0.0 2020-06-17 08:49:20 +00:00
weaveworksbot
feb869de03 Publish Helm charts vv1.0.0-rc.5 2020-05-14 12:36:48 +00:00
weaveworksbot
0ba873e310 Publish website 2020-05-08 10:22:10 +00:00
weaveworksbot
b44ecf7ded Publish Helm charts vv1.0.0-rc.4 2020-04-03 09:48:28 +00:00
weaveworksbot
7324da08d7 Publish Helm charts vv1.0.0-rc.3 2020-03-23 11:23:53 +00:00
weaveworksbot
f08a0ffc5a Publish Helm charts vv1.0.0-rc.2 2020-03-19 13:12:36 +00:00
weaveworksbot
e087d5f638 Publish website 2020-03-05 15:17:54 +00:00
weaveworksbot
a600f9f46c Publish Helm charts vv1.0.0-rc.1 2020-03-04 18:53:07 +00:00
weaveworksbot
86d1c9f792 Publish Helm charts vv1.0.0-rc.1 2020-03-04 18:21:06 +00:00
stefanprodan
91d6161993 fix ci 2020-03-03 18:33:30 +02:00
Stefan Prodan
d750e737e0 Merge pull request #473 from dholbach/patch-1
Update copyright
2020-03-03 18:31:02 +02:00
Daniel Holbach
3597117bd5 Update copyright, fix CircleCI (thanks Stefan) 2020-03-03 17:31:09 +01:00
weaveworksbot
be1b12fac5 Publish Helm charts v0.23.0 2020-02-06 13:10:06 +00:00
weaveworksbot
a252d43a32 Publish Helm charts v0.22.0 2020-01-16 17:54:33 +00:00
weaveworksbot
a1f8b6fcd7 Publish website 2020-01-06 14:07:52 +00:00
weaveworksbot
edfe897112 Publish Helm charts v0.21.0 2020-01-06 11:03:09 +00:00
weaveworksbot
5947402048 Publish Helm charts v0.20.4 2019-12-03 08:09:28 +00:00
weaveworksbot
8739ab6973 Publish website 2019-11-13 13:28:17 +00:00
weaveworksbot
436d6374f0 Publish Helm charts v0.20.3 2019-11-13 12:33:20 +00:00
weaveworksbot
c280e235b5 Publish Helm charts v0.20.3 2019-11-12 11:22:18 +00:00
weaveworksbot
06dc29e79c Publish Helm charts v0.20.3 2019-11-11 17:18:06 +00:00
weaveworksbot
5b6a257258 Publish Helm charts v0.20.2 2019-11-07 10:11:49 +00:00
weaveworksbot
ef404a0d81 Publish Helm charts v0.20.1 2019-11-03 11:08:04 +00:00
weaveworksbot
370a660be1 Publish Helm charts v0.20.0 2019-10-22 17:27:58 +00:00
weaveworksbot
83f570876d Publish Helm charts v0.19.0 2019-10-08 10:34:26 +00:00
weaveworksbot
0d20480f34 Publish Helm charts v0.18.6 2019-10-03 12:23:53 +00:00
weaveworksbot
ab726411fd Publish Helm charts v0.18.5 2019-10-02 15:33:20 +00:00
weaveworksbot
8c9b45fc4b Publish website 2019-09-21 21:15:31 +00:00
Stefan Prodan
0c48ad8332 Update README.md 2019-09-08 11:44:30 +03:00
weaveworksbot
e9451e52a4 Publish Helm charts v0.18.4 2019-09-08 08:41:45 +00:00
stefanprodan
41b4135e6b Publish load tester v0.7.0 2019-08-22 21:46:51 +03:00
weaveworksbot
e1b4264d1a Publish Helm charts v0.18.3 2019-08-22 15:57:32 +00:00
weaveworksbot
1215310d9e Publish website 2019-08-20 16:23:39 +00:00
weaveworksbot
d89cb68c2a Publish website 2019-08-19 16:00:13 +00:00
weaveworksbot
5baf41e798 Publish website 2019-08-19 15:43:51 +00:00
weaveworksbot
b727088b0f Publish website 2019-08-19 14:40:42 +00:00
weaveworksbot
7f47cb669e Publish website 2019-08-19 14:31:43 +00:00
weaveworksbot
35bfb4cc83 Publish website 2019-08-19 14:29:22 +00:00
stefanprodan
432ca88ae9 Cleanup site 2019-08-19 17:12:05 +03:00
weaveworksbot
a632a93963 Publish website 2019-08-19 14:04:46 +00:00
weaveworksbot
40b3063821 Publish Helm charts v0.18.2 2019-08-05 16:02:26 +00:00
Stefan Prodan
774935c680 Update README.md 2019-07-30 13:58:29 +03:00
stefanprodan
9bac4fe592 Publish Helm chart charts/* 0.18.1 2019-07-30 10:53:53 +00:00
stefanprodan
237720d2c9 Publish 0.18.0 charts 2019-07-29 16:36:40 +03:00
Stefan Prodan
1ddff9e80e Update README.md 2019-07-29 16:27:10 +03:00
Stefan Prodan
9b076a4cee Update README.md 2019-07-10 09:19:58 +03:00
Stefan Prodan
50ef7a2c64 Update README.md 2019-07-10 09:18:05 +03:00
stefanprodan
d985083b18 Publish Helm chart charts/* 0.17.0 2019-07-10 06:09:57 +00:00
stefanprodan
c8de6c3433 CircleCI: ignore gh-pages 2019-06-24 19:27:37 +03:00
Stefan Prodan
ca129d2d01 Update README.md 2019-06-24 18:05:59 +03:00
stefanprodan
adb3d11b37 Publish Helm chart charts/* 0.16.0 2019-06-24 15:00:10 +00:00
stefanprodan
10688060bc Publish Helm chart charts/* 0.16.0 2019-06-24 14:30:10 +00:00
stefanprodan
07d86b4f9f Publish Helm chart charts/* 0.16.0 2019-06-24 10:55:26 +00:00
Stefan Prodan
236f7b5d31 Update README.md 2019-06-12 17:15:19 +03:00
stefanprodan
a28741c3ee Publish Helm chart charts/* 0.15.0 2019-06-12 14:13:13 +00:00
Stefan Prodan
3f198d158f Update README.md 2019-06-05 11:18:40 +03:00
stefanprodan
44377ea9c5 Publish Helm chart charts/* 0.14.1 2019-06-05 07:37:48 +00:00
Stefan Prodan
d793a38656 Update README.md 2019-05-21 14:26:07 +02:00
stefanprodan
33b0e712f1 Publish Helm chart charts/* 0.14.0 2019-05-21 12:23:18 +00:00
Stefan Prodan
1cc21d9fe3 Update README.md 2019-05-11 15:28:03 +03:00
stefanprodan
23bc8fe0df Publish Helm chart charts/* 0.13.2 2019-05-11 12:20:14 +00:00
Stefan Prodan
b8bcfd1b0a Update README.md 2019-05-09 14:49:22 +03:00
stefanprodan
c2120ceeb7 Publish Helm chart charts/* 0.13.1 2019-05-09 11:41:55 +00:00
Stefan Prodan
7286d5225f Update README.md 2019-05-08 20:55:47 +03:00
stefanprodan
3a153fa534 Publish Helm chart charts/* 0.13.0 2019-05-08 17:54:34 +00:00
Stefan Prodan
eddf7c4e9b Update README.md 2019-04-29 17:11:28 +03:00
stefanprodan
1661b8bc2f Publish Helm chart charts/* 0.12.0 2019-04-29 14:10:08 +00:00
Stefan Prodan
79d034bace Update README.md 2019-04-18 16:46:58 +03:00
stefanprodan
c318058fec Publish Helm chart charts/* 0.11.1 2019-04-18 13:36:14 +00:00
Stefan Prodan
fab0bd0616 Update README.md 2019-04-17 11:35:00 +03:00
stefanprodan
71840ce2a0 Publish Helm chart charts/* 0.11.0 2019-04-17 08:26:23 +00:00
stefanprodan
83a216caca Remove banner 2019-03-28 16:02:48 +02:00
stefanprodan
d7546d4579 Update banner and intro 2019-03-28 15:58:37 +02:00
Stefan Prodan
b0e646274b Update README.md 2019-03-27 14:46:22 +02:00
stefanprodan
e790103cd3 Publish Helm chart charts/* 0.10.0 2019-03-27 12:37:34 +00:00
Stefan Prodan
0285647239 Update README.md 2019-03-11 15:59:20 +02:00
stefanprodan
d9f280750a Publish Helm chart charts/* 0.9.0 2019-03-11 13:29:27 +00:00
Stefan Prodan
7f47441ce8 Update README.md 2019-03-06 21:45:20 +02:00
stefanprodan
2ad02f052a Publish Helm chart charts/* 0.8.0 2019-03-06 19:39:09 +00:00
Stefan Prodan
b0eda2612f Update README.md 2019-02-28 17:40:59 +02:00
stefanprodan
597449b68e Publish Helm chart charts/* 0.7.0 2019-02-28 15:33:12 +00:00
Stefan Prodan
16d793b132 Update README.md 2019-02-26 02:04:59 +02:00
stefanprodan
e078fdffdf Publish Helm chart charts/* 0.6.0 2019-02-25 23:47:39 +00:00
Stefan Prodan
8ba7997dd8 Update README.md 2019-02-14 13:25:13 +02:00
stefanprodan
4758c86787 Publish Helm chart charts/* 0.5.1 2019-02-14 11:19:01 +00:00
Stefan Prodan
eb6253354d Update README.md 2019-01-30 15:08:07 +02:00
stefanprodan
47c8b6ee5d Publish Helm chart charts/* 0.5.0 2019-01-30 13:01:48 +00:00
stefanprodan
193c21108e Add og image 2019-01-24 13:25:12 +02:00
stefanprodan
a0a8aa0327 Publish Helm chart charts/* 0.4.1 2019-01-24 11:07:28 +00:00
Stefan Prodan
060ac5476e Update README.md 2019-01-24 12:15:55 +02:00
Stefan Prodan
236a6ca0bf Update README.md 2019-01-24 12:11:49 +02:00
stefanprodan
1d566fd1e3 Init Helm repo 2019-01-23 15:14:57 +02:00
2327 changed files with 4349 additions and 783850 deletions

16
.circleci/config.yml Normal file
View File

@@ -0,0 +1,16 @@
version: 2.1
jobs:
build:
machine: true
steps:
- run: "echo skip"
workflows:
version: 2
ignore:
jobs:
- build:
filters:
branches:
ignore:
- gh-pages

View File

@@ -1,8 +0,0 @@
coverage:
status:
project:
default:
target: auto
threshold: 50
base: auto
patch: off

View File

@@ -1 +0,0 @@
root: ./docs/gitbook

71
.gitignore vendored
View File

@@ -1,13 +1,62 @@
# Binaries for programs and plugins
*.exe
*.exe~
*.dll
*.so
*.dylib
# Test binary, build with `go test -c`
*.test
# Logs
logs
*.log
npm-debug.log*
yarn-debug.log*
yarn-error.log*
# Output of the go coverage tool, specifically when used with LiteIDE
*.out
.DS_Store
# Runtime data
pids
*.pid
*.seed
*.pid.lock
# Directory for instrumented libs generated by jscoverage/JSCover
lib-cov
# Coverage directory used by tools like istanbul
coverage
# nyc test coverage
.nyc_output
# Grunt intermediate storage (http://gruntjs.com/creating-plugins#storing-task-files)
.grunt
# Bower dependency directory (https://bower.io/)
bower_components
# node-waf configuration
.lock-wscript
# Compiled binary addons (https://nodejs.org/api/addons.html)
build/Release
# Dependency directories
node_modules/
jspm_packages/
# TypeScript v1 declaration files
typings/
# Optional npm cache directory
.npm
# Optional eslint cache
.eslintcache
# Optional REPL history
.node_repl_history
# Yarn Integrity file
.yarn-integrity
# dotenv environment variables file
.env
# next.js build output
.next
docs/.vuepress/dist/
bin/

View File

@@ -1,14 +0,0 @@
builds:
- main: ./cmd/flagger
binary: flagger
ldflags: -s -w -X github.com/stefanprodan/flagger/pkg/version.REVISION={{.Commit}}
goos:
- linux
goarch:
- amd64
env:
- CGO_ENABLED=0
archive:
name_template: "{{ .Binary }}_{{ .Version }}_{{ .Os }}_{{ .Arch }}"
files:
- none*

View File

@@ -1,45 +0,0 @@
sudo: required
language: go
go:
- 1.11.x
services:
- docker
addons:
apt:
packages:
- docker-ce
script:
- set -e
- make test-fmt
- make test-codegen
- go test -race -coverprofile=coverage.txt -covermode=atomic ./pkg/controller/
- make build
after_success:
- if [ -z "$DOCKER_USER" ]; then
echo "PR build, skipping image push";
else
docker tag stefanprodan/flagger:latest quay.io/stefanprodan/flagger:${TRAVIS_COMMIT};
echo $DOCKER_PASS | docker login -u=$DOCKER_USER --password-stdin quay.io;
docker push quay.io/stefanprodan/flagger:${TRAVIS_COMMIT};
fi
- if [ -z "$TRAVIS_TAG" ]; then
echo "Not a release, skipping image push";
else
docker tag stefanprodan/flagger:latest quay.io/stefanprodan/flagger:${TRAVIS_TAG};
echo $DOCKER_PASS | docker login -u=$DOCKER_USER --password-stdin quay.io;
docker push quay.io/stefanprodan/flagger:$TRAVIS_TAG;
fi
- bash <(curl -s https://codecov.io/bash)
- rm coverage.txt
deploy:
- provider: script
skip_cleanup: true
script: curl -sL http://git.io/goreleaser | bash
on:
tags: true

26
404.html Normal file
View File

@@ -0,0 +1,26 @@
<!DOCTYPE html>
<html lang="en-US">
<head>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width,initial-scale=1">
<title>Flagger</title>
<meta name="generator" content="VuePress 1.9.10">
<link rel="icon" href="/favicon.png">
<link rel="stylesheet" href="/website.css">
<meta name="description" content="Progressive Delivery operator for Kubernetes (Canary, A/B Testing and Blue/Green deployments)">
<meta name="keywords" content="gitops kubernetes flagger flux istio linkerd appmesh contour gloo nginx skipper traefik">
<meta name="twitter:card" content="summary_large_image">
<meta name="twitter:title" content="Flagger">
<meta name="twitter:description" content="Progressive delivery Kubernetes operator (Canary, A/B Testing and Blue/Green deployments)">
<meta name="twitter:image:src" content="https://flagger.app/flagger-overview.png">
<link rel="preload" href="/assets/css/0.styles.6bc4203f.css" as="style"><link rel="preload" href="/assets/js/app.a29d2723.js" as="script"><link rel="preload" href="/assets/js/7.e62ef287.js" as="script"><link rel="prefetch" href="/assets/js/1.231ad2b8.js"><link rel="prefetch" href="/assets/js/10.f8611bf6.js"><link rel="prefetch" href="/assets/js/11.88ab7ab3.js"><link rel="prefetch" href="/assets/js/12.fd171bf8.js"><link rel="prefetch" href="/assets/js/13.9a0f9bc6.js"><link rel="prefetch" href="/assets/js/14.31f4ed2f.js"><link rel="prefetch" href="/assets/js/15.0445c5e3.js"><link rel="prefetch" href="/assets/js/16.51c9c6ac.js"><link rel="prefetch" href="/assets/js/17.81fe6f29.js"><link rel="prefetch" href="/assets/js/18.b08e7074.js"><link rel="prefetch" href="/assets/js/19.ad5e0e55.js"><link rel="prefetch" href="/assets/js/2.29a21a78.js"><link rel="prefetch" href="/assets/js/20.1dc9b2d7.js"><link rel="prefetch" href="/assets/js/21.d29d05f0.js"><link rel="prefetch" href="/assets/js/22.9ec5f7d7.js"><link rel="prefetch" href="/assets/js/3.bff69ddf.js"><link rel="prefetch" href="/assets/js/4.9ac45811.js"><link rel="prefetch" href="/assets/js/5.fd30eb93.js"><link rel="prefetch" href="/assets/js/6.4f9d90d2.js"><link rel="prefetch" href="/assets/js/vendors~docsearch.46a2d037.js">
<link rel="stylesheet" href="/assets/css/0.styles.6bc4203f.css">
</head>
<body>
<div id="app" data-server-rendered="true"><div class="theme-container"><div class="theme-default-content"><h1>404</h1> <blockquote>How did we get here?</blockquote> <a href="/" class="router-link-active">
Take me home.
</a></div></div><div class="global-ui"></div></div>
<script src="/assets/js/app.a29d2723.js" defer></script><script src="/assets/js/7.e62ef287.js" defer></script>
</body>
</html>

View File

View File

@@ -1,72 +0,0 @@
# How to Contribute
Flagger is [Apache 2.0 licensed](LICENSE) and accepts contributions via GitHub
pull requests. This document outlines some of the conventions on development
workflow, commit message formatting, contact points and other resources to make
it easier to get your contribution accepted.
We gratefully welcome improvements to documentation as well as to code.
## Certificate of Origin
By contributing to this project you agree to the Developer Certificate of
Origin (DCO). This document was created by the Linux Kernel community and is a
simple statement that you, as a contributor, have the legal right to make the
contribution.
## Chat
The project uses Slack: To join the conversation, simply join the
[Weave community](https://slack.weave.works/) Slack workspace.
## Getting Started
- Fork the repository on GitHub
- If you want to contribute as a developer, continue reading this document for further instructions
- If you have questions, concerns, get stuck or need a hand, let us know
on the Slack channel. We are happy to help and look forward to having
you part of the team. No matter in which capacity.
- Play with the project, submit bugs, submit pull requests!
## Contribution workflow
This is a rough outline of how to prepare a contribution:
- Create a topic branch from where you want to base your work (usually branched from master).
- Make commits of logical units.
- Make sure your commit messages are in the proper format (see below).
- Push your changes to a topic branch in your fork of the repository.
- If you changed code:
- add automated tests to cover your changes
- Submit a pull request to the original repository.
## Acceptance policy
These things will make a PR more likely to be accepted:
- a well-described requirement
- new code and tests follow the conventions in old code and tests
- a good commit message (see below)
- All code must abide [Go Code Review Comments](https://github.com/golang/go/wiki/CodeReviewComments)
- Names should abide [What's in a name](https://talks.golang.org/2014/names.slide#1)
- Code must build on both Linux and Darwin, via plain `go build`
- Code should have appropriate test coverage and tests should be written
to work with `go test`
In general, we will merge a PR once one maintainer has endorsed it.
For substantial changes, more people may become involved, and you might
get asked to resubmit the PR or divide the changes into more than one PR.
### Format of the Commit Message
For Flux we prefer the following rules for good commit messages:
- Limit the subject to 50 characters and write as the continuation
of the sentence "If applied, this commit will ..."
- Explain what and why in the body, if more than a trivial change;
wrap it at 72 characters.
The [following article](https://chris.beams.io/posts/git-commit/#seven-rules)
has some more helpful advice on documenting your work.
This doc is adapted from the [Weaveworks Flux](https://github.com/weaveworks/flux/blob/master/CONTRIBUTING.md)

View File

@@ -1,29 +0,0 @@
FROM golang:1.11
RUN mkdir -p /go/src/github.com/stefanprodan/flagger/
WORKDIR /go/src/github.com/stefanprodan/flagger
COPY . .
RUN GIT_COMMIT=$(git rev-list -1 HEAD) && \
CGO_ENABLED=0 GOOS=linux go build -ldflags "-s -w \
-X github.com/stefanprodan/flagger/pkg/version.REVISION=${GIT_COMMIT}" \
-a -installsuffix cgo -o flagger ./cmd/flagger/*
FROM alpine:3.8
RUN addgroup -S flagger \
&& adduser -S -g flagger flagger \
&& apk --no-cache add ca-certificates
WORKDIR /home/flagger
COPY --from=0 /go/src/github.com/stefanprodan/flagger/flagger .
RUN chown -R flagger:flagger ./
USER flagger
ENTRYPOINT ["./flagger"]

737
Gopkg.lock generated
View File

@@ -1,737 +0,0 @@
# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
[[projects]]
digest = "1:5c3894b2aa4d6bead0ceeea6831b305d62879c871780e7b76296ded1b004bc57"
name = "cloud.google.com/go"
packages = ["compute/metadata"]
pruneopts = "NUT"
revision = "97efc2c9ffd9fe8ef47f7f3203dc60bbca547374"
version = "v0.28.0"
[[projects]]
branch = "master"
digest = "1:707ebe952a8b3d00b343c01536c79c73771d100f63ec6babeaed5c79e2b8a8dd"
name = "github.com/beorn7/perks"
packages = ["quantile"]
pruneopts = "NUT"
revision = "3a771d992973f24aa725d07868b467d1ddfceafb"
[[projects]]
digest = "1:ffe9824d294da03b391f44e1ae8281281b4afc1bdaa9588c9097785e3af10cec"
name = "github.com/davecgh/go-spew"
packages = ["spew"]
pruneopts = "NUT"
revision = "8991bc29aa16c548c550c7ff78260e27b9ab7c73"
version = "v1.1.1"
[[projects]]
digest = "1:81466b4218bf6adddac2572a30ac733a9255919bc2f470b4827a317bd4ee1756"
name = "github.com/ghodss/yaml"
packages = ["."]
pruneopts = "NUT"
revision = "0ca9ea5df5451ffdf184b4428c902747c2c11cd7"
version = "v1.0.0"
[[projects]]
digest = "1:8679b8a64f3613e9749c5640c3535c83399b8e69f67ce54d91dc73f6d77373af"
name = "github.com/gogo/protobuf"
packages = [
"proto",
"sortkeys",
]
pruneopts = "NUT"
revision = "636bf0302bc95575d69441b25a2603156ffdddf1"
version = "v1.1.1"
[[projects]]
branch = "master"
digest = "1:e0f096f9332ad5f84341de82db69fd098864b17c668333a1fbbffd1b846dcc2b"
name = "github.com/golang/glog"
packages = ["."]
pruneopts = "NUT"
revision = "2cc4b790554d1a0c48fcc3aeb891e3de70cf8de0"
source = "github.com/istio/glog"
[[projects]]
branch = "master"
digest = "1:3fb07f8e222402962fa190eb060608b34eddfb64562a18e2167df2de0ece85d8"
name = "github.com/golang/groupcache"
packages = ["lru"]
pruneopts = "NUT"
revision = "24b0969c4cb722950103eed87108c8d291a8df00"
[[projects]]
digest = "1:63ccdfbd20f7ccd2399d0647a7d100b122f79c13bb83da9660b1598396fd9f62"
name = "github.com/golang/protobuf"
packages = [
"proto",
"ptypes",
"ptypes/any",
"ptypes/duration",
"ptypes/timestamp",
]
pruneopts = "NUT"
revision = "aa810b61a9c79d51363740d207bb46cf8e620ed5"
version = "v1.2.0"
[[projects]]
branch = "master"
digest = "1:05f95ffdfcf651bdb0f05b40b69e7f5663047f8da75c72d58728acb59b5cc107"
name = "github.com/google/btree"
packages = ["."]
pruneopts = "NUT"
revision = "4030bb1f1f0c35b30ca7009e9ebd06849dd45306"
[[projects]]
digest = "1:d2754cafcab0d22c13541618a8029a70a8959eb3525ff201fe971637e2274cd0"
name = "github.com/google/go-cmp"
packages = [
"cmp",
"cmp/cmpopts",
"cmp/internal/diff",
"cmp/internal/function",
"cmp/internal/value",
]
pruneopts = "NUT"
revision = "3af367b6b30c263d47e8895973edcca9a49cf029"
version = "v0.2.0"
[[projects]]
branch = "master"
digest = "1:52c5834e2bebac9030c97cc0798ac11c3aa8a39f098aeb419f142533da6cd3cc"
name = "github.com/google/gofuzz"
packages = ["."]
pruneopts = "NUT"
revision = "24818f796faf91cd76ec7bddd72458fbced7a6c1"
[[projects]]
digest = "1:06a7dadb7b760767341ffb6c8d377238d68a1226f2b21b5d497d2e3f6ecf6b4e"
name = "github.com/googleapis/gnostic"
packages = [
"OpenAPIv2",
"compiler",
"extensions",
]
pruneopts = "NUT"
revision = "7c663266750e7d82587642f65e60bc4083f1f84e"
version = "v0.2.0"
[[projects]]
branch = "master"
digest = "1:7fdf3223c7372d1ced0b98bf53457c5e89d89aecbad9a77ba9fcc6e01f9e5621"
name = "github.com/gregjones/httpcache"
packages = [
".",
"diskcache",
]
pruneopts = "NUT"
revision = "9cad4c3443a7200dd6400aef47183728de563a38"
[[projects]]
digest = "1:b42cde0e1f3c816dd57f57f7bbcf05ca40263ad96f168714c130c611fc0856a6"
name = "github.com/hashicorp/golang-lru"
packages = [
".",
"simplelru",
]
pruneopts = "NUT"
revision = "20f1fb78b0740ba8c3cb143a61e86ba5c8669768"
version = "v0.5.0"
[[projects]]
digest = "1:9a52adf44086cead3b384e5d0dbf7a1c1cce65e67552ee3383a8561c42a18cd3"
name = "github.com/imdario/mergo"
packages = ["."]
pruneopts = "NUT"
revision = "9f23e2d6bd2a77f959b2bf6acdbefd708a83a4a4"
version = "v0.3.6"
[[projects]]
branch = "master"
digest = "1:e0f096f9332ad5f84341de82db69fd098864b17c668333a1fbbffd1b846dcc2b"
name = "github.com/istio/glog"
packages = ["."]
pruneopts = "NUT"
revision = "2cc4b790554d1a0c48fcc3aeb891e3de70cf8de0"
[[projects]]
digest = "1:0243cffa4a3410f161ee613dfdd903a636d07e838a42d341da95d81f42cd1d41"
name = "github.com/json-iterator/go"
packages = ["."]
pruneopts = "NUT"
revision = "f2b4162afba35581b6d4a50d3b8f34e33c144682"
[[projects]]
digest = "1:03a74b0d86021c8269b52b7c908eb9bb3852ff590b363dad0a807cf58cec2f89"
name = "github.com/knative/pkg"
packages = [
"apis",
"apis/duck",
"apis/duck/v1alpha1",
"apis/istio",
"apis/istio/authentication",
"apis/istio/authentication/v1alpha1",
"apis/istio/common/v1alpha1",
"apis/istio/v1alpha3",
"client/clientset/versioned",
"client/clientset/versioned/fake",
"client/clientset/versioned/scheme",
"client/clientset/versioned/typed/authentication/v1alpha1",
"client/clientset/versioned/typed/authentication/v1alpha1/fake",
"client/clientset/versioned/typed/duck/v1alpha1",
"client/clientset/versioned/typed/duck/v1alpha1/fake",
"client/clientset/versioned/typed/istio/v1alpha3",
"client/clientset/versioned/typed/istio/v1alpha3/fake",
"signals",
]
pruneopts = "NUT"
revision = "c15d7c8f2220a7578b33504df6edefa948c845ae"
[[projects]]
digest = "1:5985ef4caf91ece5d54817c11ea25f182697534f8ae6521eadcd628c142ac4b6"
name = "github.com/matttproud/golang_protobuf_extensions"
packages = ["pbutil"]
pruneopts = "NUT"
revision = "c12348ce28de40eed0136aa2b644d0ee0650e56c"
version = "v1.0.1"
[[projects]]
digest = "1:2f42fa12d6911c7b7659738758631bec870b7e9b4c6be5444f963cdcfccc191f"
name = "github.com/modern-go/concurrent"
packages = ["."]
pruneopts = "NUT"
revision = "bacd9c7ef1dd9b15be4a9909b8ac7a4e313eec94"
version = "1.0.3"
[[projects]]
digest = "1:c6aca19413b13dc59c220ad7430329e2ec454cc310bc6d8de2c7e2b93c18a0f6"
name = "github.com/modern-go/reflect2"
packages = ["."]
pruneopts = "NUT"
revision = "4b7aa43c6742a2c18fdef89dd197aaae7dac7ccd"
version = "1.0.1"
[[projects]]
branch = "master"
digest = "1:3bf17a6e6eaa6ad24152148a631d18662f7212e21637c2699bff3369b7f00fa2"
name = "github.com/petar/GoLLRB"
packages = ["llrb"]
pruneopts = "NUT"
revision = "53be0d36a84c2a886ca057d34b6aa4468df9ccb4"
[[projects]]
digest = "1:6c6d91dc326ed6778783cff869c49fb2f61303cdd2ebbcf90abe53505793f3b6"
name = "github.com/peterbourgon/diskv"
packages = ["."]
pruneopts = "NUT"
revision = "5f041e8faa004a95c88a202771f4cc3e991971e6"
version = "v2.0.1"
[[projects]]
digest = "1:03bca087b180bf24c4f9060775f137775550a0834e18f0bca0520a868679dbd7"
name = "github.com/prometheus/client_golang"
packages = [
"prometheus",
"prometheus/promhttp",
]
pruneopts = "NUT"
revision = "c5b7fccd204277076155f10851dad72b76a49317"
version = "v0.8.0"
[[projects]]
branch = "master"
digest = "1:2d5cd61daa5565187e1d96bae64dbbc6080dacf741448e9629c64fd93203b0d4"
name = "github.com/prometheus/client_model"
packages = ["go"]
pruneopts = "NUT"
revision = "5c3871d89910bfb32f5fcab2aa4b9ec68e65a99f"
[[projects]]
branch = "master"
digest = "1:fad5a35eea6a1a33d6c8f949fbc146f24275ca809ece854248187683f52cc30b"
name = "github.com/prometheus/common"
packages = [
"expfmt",
"internal/bitbucket.org/ww/goautoneg",
"model",
]
pruneopts = "NUT"
revision = "c7de2306084e37d54b8be01f3541a8464345e9a5"
[[projects]]
branch = "master"
digest = "1:26a2f5e891cc4d2321f18a0caa84c8e788663c17bed6a487f3cbe2c4295292d0"
name = "github.com/prometheus/procfs"
packages = [
".",
"internal/util",
"nfs",
"xfs",
]
pruneopts = "NUT"
revision = "418d78d0b9a7b7de3a6bbc8a23def624cc977bb2"
[[projects]]
digest = "1:e3707aeaccd2adc89eba6c062fec72116fe1fc1ba71097da85b4d8ae1668a675"
name = "github.com/spf13/pflag"
packages = ["."]
pruneopts = "NUT"
revision = "9a97c102cda95a86cec2345a6f09f55a939babf5"
version = "v1.0.2"
[[projects]]
digest = "1:22f696cee54865fb8e9ff91df7b633f6b8f22037a8015253c6b6a71ca82219c7"
name = "go.uber.org/atomic"
packages = ["."]
pruneopts = "NUT"
revision = "1ea20fb1cbb1cc08cbd0d913a96dead89aa18289"
version = "v1.3.2"
[[projects]]
digest = "1:58ca93bdf81bac106ded02226b5395a0595d5346cdc4caa8d9c1f3a5f8f9976e"
name = "go.uber.org/multierr"
packages = ["."]
pruneopts = "NUT"
revision = "3c4937480c32f4c13a875a1829af76c98ca3d40a"
version = "v1.1.0"
[[projects]]
digest = "1:85674ac609b704fd4e9f463553b6ffc3a3527a993ae0ba550eb56beaabdfe094"
name = "go.uber.org/zap"
packages = [
".",
"buffer",
"internal/bufferpool",
"internal/color",
"internal/exit",
"zapcore",
]
pruneopts = "NUT"
revision = "ff33455a0e382e8a81d14dd7c922020b6b5e7982"
version = "v1.9.1"
[[projects]]
branch = "master"
digest = "1:3f3a05ae0b95893d90b9b3b5afdb79a9b3d96e4e36e099d841ae602e4aca0da8"
name = "golang.org/x/crypto"
packages = ["ssh/terminal"]
pruneopts = "NUT"
revision = "0e37d006457bf46f9e6692014ba72ef82c33022c"
[[projects]]
branch = "master"
digest = "1:1400b8e87c2c9bd486ea1a13155f59f8f02d385761206df05c0b7db007a53b2c"
name = "golang.org/x/net"
packages = [
"context",
"context/ctxhttp",
"http/httpguts",
"http2",
"http2/hpack",
"idna",
]
pruneopts = "NUT"
revision = "26e67e76b6c3f6ce91f7c52def5af501b4e0f3a2"
[[projects]]
branch = "master"
digest = "1:bc2b221d465bb28ce46e8d472ecdc424b9a9b541bd61d8c311c5f29c8dd75b1b"
name = "golang.org/x/oauth2"
packages = [
".",
"google",
"internal",
"jws",
"jwt",
]
pruneopts = "NUT"
revision = "d2e6202438beef2727060aa7cabdd924d92ebfd9"
[[projects]]
branch = "master"
digest = "1:44261e94b6095310a2df925fd68632d399a00eb153b52566a7b3697f7c70638c"
name = "golang.org/x/sys"
packages = [
"unix",
"windows",
]
pruneopts = "NUT"
revision = "1561086e645b2809fb9f8a1e2a38160bf8d53bf4"
[[projects]]
digest = "1:e7071ed636b5422cc51c0e3a6cebc229d6c9fffc528814b519a980641422d619"
name = "golang.org/x/text"
packages = [
"collate",
"collate/build",
"internal/colltab",
"internal/gen",
"internal/tag",
"internal/triegen",
"internal/ucd",
"language",
"secure/bidirule",
"transform",
"unicode/bidi",
"unicode/cldr",
"unicode/norm",
"unicode/rangetable",
]
pruneopts = "NUT"
revision = "f21a4dfb5e38f5895301dc265a8def02365cc3d0"
version = "v0.3.0"
[[projects]]
branch = "master"
digest = "1:c9e7a4b4d47c0ed205d257648b0e5b0440880cb728506e318f8ac7cd36270bc4"
name = "golang.org/x/time"
packages = ["rate"]
pruneopts = "NUT"
revision = "fbb02b2291d28baffd63558aa44b4b56f178d650"
[[projects]]
branch = "master"
digest = "1:45751dc3302c90ea55913674261b2d74286b05cdd8e3ae9606e02e4e77f4353f"
name = "golang.org/x/tools"
packages = [
"go/ast/astutil",
"imports",
"internal/fastwalk",
]
pruneopts = "NUT"
revision = "90fa682c2a6e6a37b3a1364ce2fe1d5e41af9d6d"
[[projects]]
digest = "1:e2da54c7866453ac5831c61c7ec5d887f39328cac088c806553303bff4048e6f"
name = "google.golang.org/appengine"
packages = [
".",
"internal",
"internal/app_identity",
"internal/base",
"internal/datastore",
"internal/log",
"internal/modules",
"internal/remote_api",
"internal/urlfetch",
"urlfetch",
]
pruneopts = "NUT"
revision = "ae0ab99deb4dc413a2b4bd6c8bdd0eb67f1e4d06"
version = "v1.2.0"
[[projects]]
digest = "1:2d1fbdc6777e5408cabeb02bf336305e724b925ff4546ded0fa8715a7267922a"
name = "gopkg.in/inf.v0"
packages = ["."]
pruneopts = "NUT"
revision = "d2d2541c53f18d2a059457998ce2876cc8e67cbf"
version = "v0.9.1"
[[projects]]
digest = "1:7c95b35057a0ff2e19f707173cc1a947fa43a6eb5c4d300d196ece0334046082"
name = "gopkg.in/yaml.v2"
packages = ["."]
pruneopts = "NUT"
revision = "5420a8b6744d3b0345ab293f6fcba19c978f1183"
version = "v2.2.1"
[[projects]]
digest = "1:8960ef753a87391086a307122d23cd5007cee93c28189437e4f1b6ed72bffc50"
name = "k8s.io/api"
packages = [
"admissionregistration/v1alpha1",
"admissionregistration/v1beta1",
"apps/v1",
"apps/v1beta1",
"apps/v1beta2",
"authentication/v1",
"authentication/v1beta1",
"authorization/v1",
"authorization/v1beta1",
"autoscaling/v1",
"autoscaling/v2beta1",
"batch/v1",
"batch/v1beta1",
"batch/v2alpha1",
"certificates/v1beta1",
"core/v1",
"events/v1beta1",
"extensions/v1beta1",
"networking/v1",
"policy/v1beta1",
"rbac/v1",
"rbac/v1alpha1",
"rbac/v1beta1",
"scheduling/v1alpha1",
"scheduling/v1beta1",
"settings/v1alpha1",
"storage/v1",
"storage/v1alpha1",
"storage/v1beta1",
]
pruneopts = "NUT"
revision = "072894a440bdee3a891dea811fe42902311cd2a3"
version = "kubernetes-1.11.0"
[[projects]]
digest = "1:4b0d523ee389c762d02febbcfa0734c4530ebe87abe925db18f05422adcb33e8"
name = "k8s.io/apimachinery"
packages = [
"pkg/api/equality",
"pkg/api/errors",
"pkg/api/meta",
"pkg/api/resource",
"pkg/apis/meta/internalversion",
"pkg/apis/meta/v1",
"pkg/apis/meta/v1/unstructured",
"pkg/apis/meta/v1beta1",
"pkg/conversion",
"pkg/conversion/queryparams",
"pkg/fields",
"pkg/labels",
"pkg/runtime",
"pkg/runtime/schema",
"pkg/runtime/serializer",
"pkg/runtime/serializer/json",
"pkg/runtime/serializer/protobuf",
"pkg/runtime/serializer/recognizer",
"pkg/runtime/serializer/streaming",
"pkg/runtime/serializer/versioning",
"pkg/selection",
"pkg/types",
"pkg/util/cache",
"pkg/util/clock",
"pkg/util/diff",
"pkg/util/errors",
"pkg/util/framer",
"pkg/util/intstr",
"pkg/util/json",
"pkg/util/mergepatch",
"pkg/util/net",
"pkg/util/runtime",
"pkg/util/sets",
"pkg/util/sets/types",
"pkg/util/strategicpatch",
"pkg/util/validation",
"pkg/util/validation/field",
"pkg/util/wait",
"pkg/util/yaml",
"pkg/version",
"pkg/watch",
"third_party/forked/golang/json",
"third_party/forked/golang/reflect",
]
pruneopts = "NUT"
revision = "103fd098999dc9c0c88536f5c9ad2e5da39373ae"
version = "kubernetes-1.11.0"
[[projects]]
digest = "1:c7d6cf5e28c377ab4000b94b6b9ff562c4b13e7e8b948ad943f133c5104be011"
name = "k8s.io/client-go"
packages = [
"discovery",
"discovery/fake",
"kubernetes",
"kubernetes/fake",
"kubernetes/scheme",
"kubernetes/typed/admissionregistration/v1alpha1",
"kubernetes/typed/admissionregistration/v1alpha1/fake",
"kubernetes/typed/admissionregistration/v1beta1",
"kubernetes/typed/admissionregistration/v1beta1/fake",
"kubernetes/typed/apps/v1",
"kubernetes/typed/apps/v1/fake",
"kubernetes/typed/apps/v1beta1",
"kubernetes/typed/apps/v1beta1/fake",
"kubernetes/typed/apps/v1beta2",
"kubernetes/typed/apps/v1beta2/fake",
"kubernetes/typed/authentication/v1",
"kubernetes/typed/authentication/v1/fake",
"kubernetes/typed/authentication/v1beta1",
"kubernetes/typed/authentication/v1beta1/fake",
"kubernetes/typed/authorization/v1",
"kubernetes/typed/authorization/v1/fake",
"kubernetes/typed/authorization/v1beta1",
"kubernetes/typed/authorization/v1beta1/fake",
"kubernetes/typed/autoscaling/v1",
"kubernetes/typed/autoscaling/v1/fake",
"kubernetes/typed/autoscaling/v2beta1",
"kubernetes/typed/autoscaling/v2beta1/fake",
"kubernetes/typed/batch/v1",
"kubernetes/typed/batch/v1/fake",
"kubernetes/typed/batch/v1beta1",
"kubernetes/typed/batch/v1beta1/fake",
"kubernetes/typed/batch/v2alpha1",
"kubernetes/typed/batch/v2alpha1/fake",
"kubernetes/typed/certificates/v1beta1",
"kubernetes/typed/certificates/v1beta1/fake",
"kubernetes/typed/core/v1",
"kubernetes/typed/core/v1/fake",
"kubernetes/typed/events/v1beta1",
"kubernetes/typed/events/v1beta1/fake",
"kubernetes/typed/extensions/v1beta1",
"kubernetes/typed/extensions/v1beta1/fake",
"kubernetes/typed/networking/v1",
"kubernetes/typed/networking/v1/fake",
"kubernetes/typed/policy/v1beta1",
"kubernetes/typed/policy/v1beta1/fake",
"kubernetes/typed/rbac/v1",
"kubernetes/typed/rbac/v1/fake",
"kubernetes/typed/rbac/v1alpha1",
"kubernetes/typed/rbac/v1alpha1/fake",
"kubernetes/typed/rbac/v1beta1",
"kubernetes/typed/rbac/v1beta1/fake",
"kubernetes/typed/scheduling/v1alpha1",
"kubernetes/typed/scheduling/v1alpha1/fake",
"kubernetes/typed/scheduling/v1beta1",
"kubernetes/typed/scheduling/v1beta1/fake",
"kubernetes/typed/settings/v1alpha1",
"kubernetes/typed/settings/v1alpha1/fake",
"kubernetes/typed/storage/v1",
"kubernetes/typed/storage/v1/fake",
"kubernetes/typed/storage/v1alpha1",
"kubernetes/typed/storage/v1alpha1/fake",
"kubernetes/typed/storage/v1beta1",
"kubernetes/typed/storage/v1beta1/fake",
"pkg/apis/clientauthentication",
"pkg/apis/clientauthentication/v1alpha1",
"pkg/apis/clientauthentication/v1beta1",
"pkg/version",
"plugin/pkg/client/auth/exec",
"plugin/pkg/client/auth/gcp",
"rest",
"rest/watch",
"testing",
"third_party/forked/golang/template",
"tools/auth",
"tools/cache",
"tools/clientcmd",
"tools/clientcmd/api",
"tools/clientcmd/api/latest",
"tools/clientcmd/api/v1",
"tools/metrics",
"tools/pager",
"tools/record",
"tools/reference",
"transport",
"util/buffer",
"util/cert",
"util/connrotation",
"util/flowcontrol",
"util/homedir",
"util/integer",
"util/jsonpath",
"util/retry",
"util/workqueue",
]
pruneopts = "NUT"
revision = "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65"
version = "kubernetes-1.11.0"
[[projects]]
digest = "1:8ab487a323486c8bbbaa3b689850487fdccc6cbea8690620e083b2d230a4447e"
name = "k8s.io/code-generator"
packages = [
"cmd/client-gen",
"cmd/client-gen/args",
"cmd/client-gen/generators",
"cmd/client-gen/generators/fake",
"cmd/client-gen/generators/scheme",
"cmd/client-gen/generators/util",
"cmd/client-gen/path",
"cmd/client-gen/types",
"cmd/deepcopy-gen",
"cmd/deepcopy-gen/args",
"cmd/defaulter-gen",
"cmd/defaulter-gen/args",
"cmd/informer-gen",
"cmd/informer-gen/args",
"cmd/informer-gen/generators",
"cmd/lister-gen",
"cmd/lister-gen/args",
"cmd/lister-gen/generators",
"pkg/util",
]
pruneopts = "T"
revision = "6702109cc68eb6fe6350b83e14407c8d7309fd1a"
version = "kubernetes-1.11.0"
[[projects]]
branch = "master"
digest = "1:5249c83f0fb9e277b2d28c19eca814feac7ef05dc762e4deaf0a2e4b1a7c5df3"
name = "k8s.io/gengo"
packages = [
"args",
"examples/deepcopy-gen/generators",
"examples/defaulter-gen/generators",
"examples/set-gen/sets",
"generator",
"namer",
"parser",
"types",
]
pruneopts = "NUT"
revision = "4242d8e6c5dba56827bb7bcf14ad11cda38f3991"
[[projects]]
branch = "master"
digest = "1:a2c842a1e0aed96fd732b535514556323a6f5edfded3b63e5e0ab1bce188aa54"
name = "k8s.io/kube-openapi"
packages = ["pkg/util/proto"]
pruneopts = "NUT"
revision = "e3762e86a74c878ffed47484592986685639c2cd"
[solve-meta]
analyzer-name = "dep"
analyzer-version = 1
input-imports = [
"github.com/google/go-cmp/cmp",
"github.com/google/go-cmp/cmp/cmpopts",
"github.com/istio/glog",
"github.com/knative/pkg/apis/istio/v1alpha3",
"github.com/knative/pkg/client/clientset/versioned",
"github.com/knative/pkg/client/clientset/versioned/fake",
"github.com/knative/pkg/signals",
"github.com/prometheus/client_golang/prometheus/promhttp",
"go.uber.org/zap",
"go.uber.org/zap/zapcore",
"k8s.io/api/apps/v1",
"k8s.io/api/autoscaling/v1",
"k8s.io/api/autoscaling/v2beta1",
"k8s.io/api/core/v1",
"k8s.io/apimachinery/pkg/api/errors",
"k8s.io/apimachinery/pkg/api/resource",
"k8s.io/apimachinery/pkg/apis/meta/v1",
"k8s.io/apimachinery/pkg/labels",
"k8s.io/apimachinery/pkg/runtime",
"k8s.io/apimachinery/pkg/runtime/schema",
"k8s.io/apimachinery/pkg/runtime/serializer",
"k8s.io/apimachinery/pkg/types",
"k8s.io/apimachinery/pkg/util/intstr",
"k8s.io/apimachinery/pkg/util/runtime",
"k8s.io/apimachinery/pkg/util/sets/types",
"k8s.io/apimachinery/pkg/util/wait",
"k8s.io/apimachinery/pkg/watch",
"k8s.io/client-go/discovery",
"k8s.io/client-go/discovery/fake",
"k8s.io/client-go/kubernetes",
"k8s.io/client-go/kubernetes/fake",
"k8s.io/client-go/kubernetes/scheme",
"k8s.io/client-go/kubernetes/typed/core/v1",
"k8s.io/client-go/plugin/pkg/client/auth/gcp",
"k8s.io/client-go/rest",
"k8s.io/client-go/testing",
"k8s.io/client-go/tools/cache",
"k8s.io/client-go/tools/clientcmd",
"k8s.io/client-go/tools/record",
"k8s.io/client-go/util/flowcontrol",
"k8s.io/client-go/util/workqueue",
"k8s.io/code-generator/cmd/client-gen",
"k8s.io/code-generator/cmd/deepcopy-gen",
"k8s.io/code-generator/cmd/defaulter-gen",
"k8s.io/code-generator/cmd/informer-gen",
"k8s.io/code-generator/cmd/lister-gen",
]
solver-name = "gps-cdcl"
solver-version = 1

View File

@@ -1,64 +0,0 @@
required = [
"k8s.io/apimachinery/pkg/util/sets/types",
"k8s.io/code-generator/cmd/deepcopy-gen",
"k8s.io/code-generator/cmd/defaulter-gen",
"k8s.io/code-generator/cmd/client-gen",
"k8s.io/code-generator/cmd/lister-gen",
"k8s.io/code-generator/cmd/informer-gen",
]
[[constraint]]
name = "go.uber.org/zap"
version = "v1.9.1"
[[override]]
name = "gopkg.in/yaml.v2"
version = "v2.2.1"
[[override]]
name = "k8s.io/api"
version = "kubernetes-1.11.0"
[[override]]
name = "k8s.io/apimachinery"
version = "kubernetes-1.11.0"
[[override]]
name = "k8s.io/code-generator"
version = "kubernetes-1.11.0"
[[override]]
name = "k8s.io/client-go"
version = "kubernetes-1.11.0"
[[override]]
name = "github.com/json-iterator/go"
# This is the commit at which k8s depends on this in 1.11
# It seems to be broken at HEAD.
revision = "f2b4162afba35581b6d4a50d3b8f34e33c144682"
[[constraint]]
name = "github.com/prometheus/client_golang"
version = "v0.8.0"
[[constraint]]
name = "github.com/google/go-cmp"
version = "v0.2.0"
[[constraint]]
name = "github.com/knative/pkg"
revision = "c15d7c8f2220a7578b33504df6edefa948c845ae"
[[override]]
name = "github.com/golang/glog"
source = "github.com/istio/glog"
[prune]
go-tests = true
unused-packages = true
non-go = true
[[prune.project]]
name = "k8s.io/code-generator"
unused-packages = false
non-go = false

201
LICENSE
View File

@@ -1,201 +0,0 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright 2018 Weaveworks. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@@ -1,79 +0,0 @@
TAG?=latest
VERSION?=$(shell grep 'VERSION' pkg/version/version.go | awk '{ print $$4 }' | tr -d '"')
VERSION_MINOR:=$(shell grep 'VERSION' pkg/version/version.go | awk '{ print $$4 }' | tr -d '"' | rev | cut -d'.' -f2- | rev)
PATCH:=$(shell grep 'VERSION' pkg/version/version.go | awk '{ print $$4 }' | tr -d '"' | awk -F. '{print $$NF}')
SOURCE_DIRS = cmd pkg/apis pkg/controller pkg/server pkg/logging pkg/version
run:
go run cmd/flagger/* -kubeconfig=$$HOME/.kube/config -log-level=info \
-metrics-server=https://prometheus.iowa.weavedx.com \
-slack-url=https://hooks.slack.com/services/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 \
-slack-channel="devops-alerts"
build:
docker build -t stefanprodan/flagger:$(TAG) . -f Dockerfile
push:
docker tag stefanprodan/flagger:$(TAG) quay.io/stefanprodan/flagger:$(VERSION)
docker push quay.io/stefanprodan/flagger:$(VERSION)
fmt:
gofmt -l -s -w $(SOURCE_DIRS)
test-fmt:
gofmt -l -s $(SOURCE_DIRS) | grep ".*\.go"; if [ "$$?" = "0" ]; then exit 1; fi
test-codegen:
./hack/verify-codegen.sh
test: test-fmt test-codegen
go test ./...
helm-package:
cd charts/ && helm package flagger/ && helm package grafana/
mv charts/*.tgz docs/
helm repo index docs --url https://stefanprodan.github.io/flagger --merge ./docs/index.yaml
helm-up:
helm upgrade --install flagger ./charts/flagger --namespace=istio-system --set crd.create=false
helm upgrade --install flagger-grafana ./charts/grafana --namespace=istio-system
version-set:
@next="$(TAG)" && \
current="$(VERSION)" && \
sed -i '' "s/$$current/$$next/g" pkg/version/version.go && \
sed -i '' "s/flagger:$$current/flagger:$$next/g" artifacts/flagger/deployment.yaml && \
sed -i '' "s/tag: $$current/tag: $$next/g" charts/flagger/values.yaml && \
sed -i '' "s/appVersion: $$current/appVersion: $$next/g" charts/flagger/Chart.yaml && \
echo "Version $$next set in code, deployment and charts"
version-up:
@next="$(VERSION_MINOR).$$(($(PATCH) + 1))" && \
current="$(VERSION)" && \
sed -i '' "s/$$current/$$next/g" pkg/version/version.go && \
sed -i '' "s/flagger:$$current/flagger:$$next/g" artifacts/flagger/deployment.yaml && \
sed -i '' "s/tag: $$current/tag: $$next/g" charts/flagger/values.yaml && \
sed -i '' "s/appVersion: $$current/appVersion: $$next/g" charts/flagger/Chart.yaml && \
echo "Version $$next set in code, deployment and chart"
dev-up: version-up
@echo "Starting build/push/deploy pipeline for $(VERSION)"
docker build -t quay.io/stefanprodan/flagger:$(VERSION) . -f Dockerfile
docker push quay.io/stefanprodan/flagger:$(VERSION)
kubectl apply -f ./artifacts/flagger/crd.yaml
helm upgrade -i flagger ./charts/flagger --namespace=istio-system --set crd.create=false
release:
git tag $(VERSION)
git push origin $(VERSION)
release-set: fmt version-set helm-package
git add .
git commit -m "Release $(VERSION)"
git push origin master
git tag $(VERSION)
git push origin $(VERSION)
reset-test:
kubectl delete -f ./artifacts/namespaces
kubectl apply -f ./artifacts/namespaces
kubectl apply -f ./artifacts/canaries

503
README.md
View File

@@ -1,444 +1,59 @@
# flagger
[![build](https://travis-ci.org/stefanprodan/flagger.svg?branch=master)](https://travis-ci.org/stefanprodan/flagger)
[![report](https://goreportcard.com/badge/github.com/stefanprodan/flagger)](https://goreportcard.com/report/github.com/stefanprodan/flagger)
[![codecov](https://codecov.io/gh/stefanprodan/flagger/branch/master/graph/badge.svg)](https://codecov.io/gh/stefanprodan/flagger)
[![license](https://img.shields.io/github/license/stefanprodan/flagger.svg)](https://github.com/stefanprodan/flagger/blob/master/LICENSE)
[![release](https://img.shields.io/github/release/stefanprodan/flagger/all.svg)](https://github.com/stefanprodan/flagger/releases)
Flagger is a Kubernetes operator that automates the promotion of canary deployments
using Istio routing for traffic shifting and Prometheus metrics for canary analysis.
The canary analysis can be extended with webhooks for running integration tests,
load tests or any other custom validation.
### Install
Before installing Flagger make sure you have Istio setup up with Prometheus enabled.
If you are new to Istio you can follow my [Istio service mesh walk-through](https://github.com/stefanprodan/istio-gke).
Deploy Flagger in the `istio-system` namespace using Helm:
```bash
# add the Helm repository
helm repo add flagger https://flagger.app
# install or upgrade
helm upgrade -i flagger flagger/flagger \
--namespace=istio-system \
--set metricsServer=http://prometheus.istio-system:9090
```
Flagger is compatible with Kubernetes >1.11.0 and Istio >1.0.0.
### Usage
Flagger takes a Kubernetes deployment and creates a series of objects
(Kubernetes [deployments](https://kubernetes.io/docs/concepts/workloads/controllers/deployment/),
ClusterIP [services](https://kubernetes.io/docs/concepts/services-networking/service/) and
Istio [virtual services](https://istio.io/docs/reference/config/istio.networking.v1alpha3/#VirtualService))
to drive the canary analysis and promotion.
![flagger-overview](https://raw.githubusercontent.com/stefanprodan/flagger/master/docs/diagrams/flagger-canary-overview.png)
Gated canary promotion stages:
* scan for canary deployments
* check Istio virtual service routes are mapped to primary and canary ClusterIP services
* check primary and canary deployments status
* halt advancement if a rolling update is underway
* halt advancement if pods are unhealthy
* increase canary traffic weight percentage from 0% to 5% (step weight)
* check canary HTTP request success rate and latency
* halt advancement if any metric is under the specified threshold
* increment the failed checks counter
* check if the number of failed checks reached the threshold
* route all traffic to primary
* scale to zero the canary deployment and mark it as failed
* wait for the canary deployment to be updated (revision bump) and start over
* increase canary traffic weight by 5% (step weight) till it reaches 50% (max weight)
* halt advancement while canary request success rate is under the threshold
* halt advancement while canary request duration P99 is over the threshold
* halt advancement if the primary or canary deployment becomes unhealthy
* halt advancement while canary deployment is being scaled up/down by HPA
* promote canary to primary
* copy canary deployment spec template over primary
* wait for primary rolling update to finish
* halt advancement if pods are unhealthy
* route all traffic to primary
* scale to zero the canary deployment
* mark rollout as finished
* wait for the canary deployment to be updated (revision bump) and start over
You can change the canary analysis _max weight_ and the _step weight_ percentage in the Flagger's custom resource.
For a deployment named _podinfo_, a canary promotion can be defined using Flagger's custom resource:
```yaml
apiVersion: flagger.app/v1alpha3
kind: Canary
metadata:
name: podinfo
namespace: test
spec:
# deployment reference
targetRef:
apiVersion: apps/v1
kind: Deployment
name: podinfo
# the maximum time in seconds for the canary deployment
# to make progress before it is rollback (default 600s)
progressDeadlineSeconds: 60
# HPA reference (optional)
autoscalerRef:
apiVersion: autoscaling/v2beta1
kind: HorizontalPodAutoscaler
name: podinfo
service:
# container port
port: 9898
# Istio gateways (optional)
gateways:
- public-gateway.istio-system.svc.cluster.local
# Istio virtual service host names (optional)
hosts:
- podinfo.example.com
canaryAnalysis:
# schedule interval (default 60s)
interval: 1m
# max number of failed metric checks before rollback
threshold: 10
# max traffic percentage routed to canary
# percentage (0-100)
maxWeight: 50
# canary increment step
# percentage (0-100)
stepWeight: 5
# Istio Prometheus checks
metrics:
- name: istio_requests_total
# minimum req success rate (non 5xx responses)
# percentage (0-100)
threshold: 99
interval: 1m
- name: istio_request_duration_seconds_bucket
# maximum req duration P99
# milliseconds
threshold: 500
interval: 30s
# external checks (optional)
webhooks:
- name: integration-tests
url: http://podinfo.test:9898/echo
timeout: 1m
metadata:
test: "all"
token: "16688eb5e9f289f1991c"
```
The canary analysis is using the following promql queries:
_HTTP requests success rate percentage_
```sql
sum(
rate(
istio_requests_total{
reporter="destination",
destination_workload_namespace=~"$namespace",
destination_workload=~"$workload",
response_code!~"5.*"
}[$interval]
)
)
/
sum(
rate(
istio_requests_total{
reporter="destination",
destination_workload_namespace=~"$namespace",
destination_workload=~"$workload"
}[$interval]
)
)
```
_HTTP requests milliseconds duration P99_
```sql
histogram_quantile(0.99,
sum(
irate(
istio_request_duration_seconds_bucket{
reporter="destination",
destination_workload=~"$workload",
destination_workload_namespace=~"$namespace"
}[$interval]
)
) by (le)
)
```
The canary analysis can be extended with webhooks.
Flagger will call the webhooks (HTTP POST) and determine from the response status code (HTTP 2xx) if the canary is failing or not.
Webhook payload:
```json
{
"name": "podinfo",
"namespace": "test",
"metadata": {
"test": "all",
"token": "16688eb5e9f289f1991c"
}
}
```
### Automated canary analysis, promotions and rollbacks
Create a test namespace with Istio sidecar injection enabled:
```bash
export REPO=https://raw.githubusercontent.com/stefanprodan/flagger/master
kubectl apply -f ${REPO}/artifacts/namespaces/test.yaml
```
Create a deployment and a horizontal pod autoscaler:
```bash
kubectl apply -f ${REPO}/artifacts/canaries/deployment.yaml
kubectl apply -f ${REPO}/artifacts/canaries/hpa.yaml
```
Create a canary promotion custom resource (replace the Istio gateway and the internet domain with your own):
```bash
kubectl apply -f ${REPO}/artifacts/canaries/canary.yaml
```
After a couple of seconds Flagger will create the canary objects:
```bash
# applied
deployment.apps/podinfo
horizontalpodautoscaler.autoscaling/podinfo
canary.flagger.app/podinfo
# generated
deployment.apps/podinfo-primary
horizontalpodautoscaler.autoscaling/podinfo-primary
service/podinfo
service/podinfo-canary
service/podinfo-primary
virtualservice.networking.istio.io/podinfo
```
![flagger-canary-steps](https://raw.githubusercontent.com/stefanprodan/flagger/master/docs/diagrams/flagger-canary-steps.png)
Trigger a canary deployment by updating the container image:
```bash
kubectl -n test set image deployment/podinfo \
podinfod=quay.io/stefanprodan/podinfo:1.2.1
```
Flagger detects that the deployment revision changed and starts a new canary analysis:
```
kubectl -n test describe canary/podinfo
Status:
Canary Weight: 0
Failed Checks: 0
Last Transition Time: 2019-01-16T13:47:16Z
Phase: Succeeded
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal Synced 3m flagger New revision detected podinfo.test
Normal Synced 3m flagger Scaling up podinfo.test
Warning Synced 3m flagger Waiting for podinfo.test rollout to finish: 0 of 1 updated replicas are available
Normal Synced 3m flagger Advance podinfo.test canary weight 5
Normal Synced 3m flagger Advance podinfo.test canary weight 10
Normal Synced 3m flagger Advance podinfo.test canary weight 15
Normal Synced 2m flagger Advance podinfo.test canary weight 20
Normal Synced 2m flagger Advance podinfo.test canary weight 25
Normal Synced 1m flagger Advance podinfo.test canary weight 30
Normal Synced 1m flagger Advance podinfo.test canary weight 35
Normal Synced 55s flagger Advance podinfo.test canary weight 40
Normal Synced 45s flagger Advance podinfo.test canary weight 45
Normal Synced 35s flagger Advance podinfo.test canary weight 50
Normal Synced 25s flagger Copying podinfo.test template spec to podinfo-primary.test
Warning Synced 15s flagger Waiting for podinfo-primary.test rollout to finish: 1 of 2 updated replicas are available
Normal Synced 5s flagger Promotion completed! Scaling down podinfo.test
```
You can monitor all canaries with:
```bash
watch kubectl get canaries --all-namespaces
NAMESPACE NAME STATUS WEIGHT LASTTRANSITIONTIME
test podinfo Progressing 5 2019-01-16T14:05:07Z
```
During the canary analysis you can generate HTTP 500 errors and high latency to test if Flagger pauses the rollout.
Create a tester pod and exec into it:
```bash
kubectl -n test run tester --image=quay.io/stefanprodan/podinfo:1.2.1 -- ./podinfo --port=9898
kubectl -n test exec -it tester-xx-xx sh
```
Generate HTTP 500 errors:
```bash
watch curl http://podinfo-canary:9898/status/500
```
Generate latency:
```bash
watch curl http://podinfo-canary:9898/delay/1
```
When the number of failed checks reaches the canary analysis threshold, the traffic is routed back to the primary,
the canary is scaled to zero and the rollout is marked as failed.
```
kubectl -n test describe canary/podinfo
Status:
Canary Weight: 0
Failed Checks: 10
Last Transition Time: 2019-01-16T13:47:16Z
Phase: Failed
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal Synced 3m flagger Starting canary deployment for podinfo.test
Normal Synced 3m flagger Advance podinfo.test canary weight 5
Normal Synced 3m flagger Advance podinfo.test canary weight 10
Normal Synced 3m flagger Advance podinfo.test canary weight 15
Normal Synced 3m flagger Halt podinfo.test advancement success rate 69.17% < 99%
Normal Synced 2m flagger Halt podinfo.test advancement success rate 61.39% < 99%
Normal Synced 2m flagger Halt podinfo.test advancement success rate 55.06% < 99%
Normal Synced 2m flagger Halt podinfo.test advancement success rate 47.00% < 99%
Normal Synced 2m flagger (combined from similar events): Halt podinfo.test advancement success rate 38.08% < 99%
Warning Synced 1m flagger Rolling back podinfo.test failed checks threshold reached 10
Warning Synced 1m flagger Canary failed! Scaling down podinfo.test
```
### Monitoring
Flagger comes with a Grafana dashboard made for canary analysis.
Install Grafana with Helm:
```bash
helm upgrade -i flagger-grafana flagger/grafana \
--namespace=istio-system \
--set url=http://prometheus.istio-system:9090
```
The dashboard shows the RED and USE metrics for the primary and canary workloads:
![flagger-grafana](https://raw.githubusercontent.com/stefanprodan/flagger/master/docs/screens/grafana-canary-analysis.png)
The canary errors and latency spikes have been recorded as Kubernetes events and logged by Flagger in json format:
```
kubectl -n istio-system logs deployment/flagger --tail=100 | jq .msg
Starting canary deployment for podinfo.test
Advance podinfo.test canary weight 5
Advance podinfo.test canary weight 10
Advance podinfo.test canary weight 15
Advance podinfo.test canary weight 20
Advance podinfo.test canary weight 25
Advance podinfo.test canary weight 30
Advance podinfo.test canary weight 35
Halt podinfo.test advancement success rate 98.69% < 99%
Advance podinfo.test canary weight 40
Halt podinfo.test advancement request duration 1.515s > 500ms
Advance podinfo.test canary weight 45
Advance podinfo.test canary weight 50
Copying podinfo.test template spec to podinfo-primary.test
Halt podinfo-primary.test advancement waiting for rollout to finish: 1 old replicas are pending termination
Scaling down podinfo.test
Promotion completed! podinfo.test
```
Flagger exposes Prometheus metrics that can be used to determine the canary analysis status and the destination weight values:
```bash
# Canaries total gauge
flagger_canary_total{namespace="test"} 1
# Canary promotion last known status gauge
# 0 - running, 1 - successful, 2 - failed
flagger_canary_status{name="podinfo" namespace="test"} 1
# Canary traffic weight gauge
flagger_canary_weight{workload="podinfo-primary" namespace="test"} 95
flagger_canary_weight{workload="podinfo" namespace="test"} 5
# Seconds spent performing canary analysis histogram
flagger_canary_duration_seconds_bucket{name="podinfo",namespace="test",le="10"} 6
flagger_canary_duration_seconds_bucket{name="podinfo",namespace="test",le="+Inf"} 6
flagger_canary_duration_seconds_sum{name="podinfo",namespace="test"} 17.3561329
flagger_canary_duration_seconds_count{name="podinfo",namespace="test"} 6
```
### Alerting
Flagger can be configured to send Slack notifications:
```bash
helm upgrade -i flagger flagger/flagger \
--namespace=istio-system \
--set slack.url=https://hooks.slack.com/services/YOUR/SLACK/WEBHOOK \
--set slack.channel=general \
--set slack.user=flagger
```
Once configured with a Slack incoming webhook, Flagger will post messages when a canary deployment has been initialized,
when a new revision has been detected and if the canary analysis failed or succeeded.
![flagger-slack](https://raw.githubusercontent.com/stefanprodan/flagger/master/docs/screens/slack-canary-notifications.png)
A canary deployment will be rolled back if the progress deadline exceeded or if the analysis
reached the maximum number of failed checks:
![flagger-slack-errors](https://raw.githubusercontent.com/stefanprodan/flagger/master/docs/screens/slack-canary-failed.png)
Besides Slack, you can use Alertmanager to trigger alerts when a canary deployment failed:
```yaml
- alert: canary_rollback
expr: flagger_canary_status > 1
for: 1m
labels:
severity: warning
annotations:
summary: "Canary failed"
description: "Workload {{ $labels.name }} namespace {{ $labels.namespace }}"
```
### Roadmap
* Extend the validation mechanism to support other metrics than HTTP success rate and latency
* Add support for comparing the canary metrics to the primary ones and do the validation based on the derivation between the two
* Extend the canary analysis and promotion to other types than Kubernetes deployments such as Flux Helm releases or OpenFaaS functions
### Contributing
Flagger is Apache 2.0 licensed and accepts contributions via GitHub pull requests.
When submitting bug reports please include as much details as possible:
* which Flagger version
* which Flagger CRD version
* which Kubernetes/Istio version
* what configuration (canary, virtual service and workloads definitions)
* what happened (Flagger, Istio Pilot and Proxy logs)
# Flagger
Continuous delivery is accepted as an enterprise software practice, and is a natural evolution of well-established
continuous integration principles. However continuous deployment continues to be notably rare,
perhaps due to the complexity of management and the fear of failed deployments impacting system availability.
Flagger is an open source Kubernetes operator that aims to untangle this complexity.
It automates the promotion of canary deployments by using
Istio, Linkerd, App Mesh, Gloo or NGNIX traffic shifting and Prometheus metrics to analyse the application's behaviour during a controlled rollout.
The canary analysis can be extended with webhooks for running integration tests, load tests or any other custom validation.
Flagger implements a control loop that gradually shifts traffic to the canary while measuring key performance
indicators like HTTP requests success rate, requests average duration and pods health. Based on the KPIs analysis
a canary is promoted or aborted and the analysis result is published to Slack or MS Teams.
Flagger can automate the application analysis for the following deployment strategies:
* Canary (progressive traffic shifting)
* A/B Testing (HTTP headers and cookies traffic routing)
* Blue/Green (traffic switch)
With Flagger you don't have to worry about keeping code and configuration changes in sync. Flagger keeps track of
ConfigMaps and Secrets referenced by a Kubernetes Deployment and triggers a canary analysis if any of those objects change.
When promoting a workload in production, both code (container images) and configuration (config maps and secrets) are
being synchronised.
### Documentation
Flagger documentation can be found at [docs.flagger.app](https://docs.flagger.app)
* Install
* [Flagger install on Kubernetes](https://docs.flagger.app/install/flagger-install-on-kubernetes)
* [Flagger install on GKE Istio](https://docs.flagger.app/install/flagger-install-on-google-cloud)
* [Flagger install on EKS App Mesh](https://docs.flagger.app/install/flagger-install-on-eks-appmesh)
* [Flagger install with SuperGloo](https://docs.flagger.app/install/flagger-install-with-supergloo)
* How it works
* [Canary custom resource](https://docs.flagger.app/how-it-works#canary-custom-resource)
* [Routing](https://docs.flagger.app/how-it-works#istio-routing)
* [Canary deployment stages](https://docs.flagger.app/how-it-works#canary-deployment)
* [Canary analysis](https://docs.flagger.app/how-it-works#canary-analysis)
* [HTTP metrics](https://docs.flagger.app/how-it-works#http-metrics)
* [Custom metrics](https://docs.flagger.app/how-it-works#custom-metrics)
* [Webhooks](https://docs.flagger.app/how-it-works#webhooks)
* [Load testing](https://docs.flagger.app/how-it-works#load-testing)
* [Integration testing](https://docs.flagger.app/how-it-works#integration-testing)
* [Manual gating](https://docs.flagger.app/how-it-works#manual-gating)
* [FAQ](https://docs.flagger.app/faq)
* Usage
* [Istio canary deployments](https://docs.flagger.app/usage/progressive-delivery)
* [Istio A/B testing](https://docs.flagger.app/usage/ab-testing)
* [Linkerd canary deployments](https://docs.flagger.app/usage/linkerd-progressive-delivery)
* [App Mesh canary deployments](https://docs.flagger.app/usage/appmesh-progressive-delivery)
* [NGINX ingress controller canary deployments](https://docs.flagger.app/usage/nginx-progressive-delivery)
* [Gloo Canary Deployments](https://docs.flagger.app/usage/gloo-progressive-delivery.md)
* [Blue/Green deployments](https://docs.flagger.app/usage/blue-green)
* [Monitoring](https://docs.flagger.app/usage/monitoring)
* [Alerting](https://docs.flagger.app/usage/alerting)
This project is sponsored by [Weaveworks](https://www.weave.works/)

View File

@@ -1,59 +0,0 @@
apiVersion: flagger.app/v1alpha3
kind: Canary
metadata:
name: podinfo
namespace: test
spec:
# deployment reference
targetRef:
apiVersion: apps/v1
kind: Deployment
name: podinfo
# the maximum time in seconds for the canary deployment
# to make progress before it is rollback (default 600s)
progressDeadlineSeconds: 60
# HPA reference (optional)
autoscalerRef:
apiVersion: autoscaling/v2beta1
kind: HorizontalPodAutoscaler
name: podinfo
service:
# container port
port: 9898
# Istio gateways (optional)
gateways:
- public-gateway.istio-system.svc.cluster.local
# Istio virtual service host names (optional)
hosts:
- app.iowa.weavedx.com
canaryAnalysis:
# schedule interval (default 60s)
interval: 10s
# max number of failed metric checks before rollback
threshold: 10
# max traffic percentage routed to canary
# percentage (0-100)
maxWeight: 50
# canary increment step
# percentage (0-100)
stepWeight: 5
# Istio Prometheus checks
metrics:
- name: istio_requests_total
# minimum req success rate (non 5xx responses)
# percentage (0-100)
threshold: 99
interval: 1m
- name: istio_request_duration_seconds_bucket
# maximum req duration P99
# milliseconds
threshold: 500
interval: 30s
# external checks (optional)
webhooks:
- name: integration-tests
url: https://httpbin.org/post
timeout: 1m
metadata:
test: "all"
token: "16688eb5e9f289f1991c"

View File

@@ -1,67 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: podinfo
namespace: test
labels:
app: podinfo
spec:
minReadySeconds: 5
revisionHistoryLimit: 5
progressDeadlineSeconds: 60
strategy:
rollingUpdate:
maxUnavailable: 0
type: RollingUpdate
selector:
matchLabels:
app: podinfo
template:
metadata:
annotations:
prometheus.io/scrape: "true"
labels:
app: podinfo
spec:
containers:
- name: podinfod
image: quay.io/stefanprodan/podinfo:1.3.0
imagePullPolicy: IfNotPresent
ports:
- containerPort: 9898
name: http
protocol: TCP
command:
- ./podinfo
- --port=9898
- --level=info
- --random-delay=false
- --random-error=false
env:
- name: PODINFO_UI_COLOR
value: blue
livenessProbe:
exec:
command:
- podcli
- check
- http
- localhost:9898/healthz
initialDelaySeconds: 5
timeoutSeconds: 5
readinessProbe:
exec:
command:
- podcli
- check
- http
- localhost:9898/readyz
initialDelaySeconds: 5
timeoutSeconds: 5
resources:
limits:
cpu: 2000m
memory: 512Mi
requests:
cpu: 100m
memory: 64Mi

View File

@@ -1,19 +0,0 @@
apiVersion: autoscaling/v2beta1
kind: HorizontalPodAutoscaler
metadata:
name: podinfo
namespace: test
spec:
scaleTargetRef:
apiVersion: apps/v1
kind: Deployment
name: podinfo
minReplicas: 2
maxReplicas: 4
metrics:
- type: Resource
resource:
name: cpu
# scale up if usage is above
# 99% of the requested CPU (100m)
targetAverageUtilization: 99

View File

@@ -1,35 +0,0 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: flagger
namespace: istio-system
labels:
app: flagger
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
name: flagger
labels:
app: flagger
rules:
- apiGroups: ['*']
resources: ['*']
verbs: ['*']
- nonResourceURLs: ['*']
verbs: ['*']
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: flagger
labels:
app: flagger
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: flagger
subjects:
- kind: ServiceAccount
name: flagger
namespace: istio-system

View File

@@ -1,115 +0,0 @@
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: canaries.flagger.app
spec:
group: flagger.app
version: v1alpha3
versions:
- name: v1alpha3
served: true
storage: true
- name: v1alpha2
served: true
storage: false
- name: v1alpha1
served: true
storage: false
names:
plural: canaries
singular: canary
kind: Canary
categories:
- all
scope: Namespaced
subresources:
status: {}
additionalPrinterColumns:
- name: Status
type: string
JSONPath: .status.phase
- name: Weight
type: string
JSONPath: .status.canaryWeight
- name: LastTransitionTime
type: string
JSONPath: .status.lastTransitionTime
validation:
openAPIV3Schema:
properties:
spec:
required:
- targetRef
- service
- canaryAnalysis
properties:
progressDeadlineSeconds:
type: number
targetRef:
type: object
required: ['apiVersion', 'kind', 'name']
properties:
apiVersion:
type: string
kind:
type: string
name:
type: string
autoscalerRef:
anyOf:
- type: string
- type: object
required: ['apiVersion', 'kind', 'name']
properties:
apiVersion:
type: string
kind:
type: string
name:
type: string
service:
type: object
required: ['port']
properties:
port:
type: number
canaryAnalysis:
properties:
interval:
type: string
pattern: "^[0-9]+(m|s)"
threshold:
type: number
maxWeight:
type: number
stepWeight:
type: number
metrics:
type: array
properties:
items:
type: object
required: ['name', 'interval', 'threshold']
properties:
name:
type: string
interval:
type: string
pattern: "^[0-9]+(m|s)"
threshold:
type: number
webhooks:
type: array
properties:
items:
type: object
required: ['name', 'url', 'timeout']
properties:
name:
type: string
url:
type: string
format: url
timeout:
type: string
pattern: "^[0-9]+(m|s)"

View File

@@ -1,64 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: flagger
namespace: istio-system
labels:
app: flagger
spec:
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app: flagger
template:
metadata:
labels:
app: flagger
annotations:
prometheus.io/scrape: "true"
spec:
serviceAccountName: flagger
containers:
- name: flagger
image: quay.io/stefanprodan/flagger:0.4.0
imagePullPolicy: Always
ports:
- name: http
containerPort: 8080
command:
- ./flagger
- -log-level=info
- -control-loop-interval=10s
- -metrics-server=http://prometheus.istio-system.svc.cluster.local:9090
livenessProbe:
exec:
command:
- wget
- --quiet
- --tries=1
- --timeout=2
- --spider
- http://localhost:8080/healthz
timeoutSeconds: 5
readinessProbe:
exec:
command:
- wget
- --quiet
- --tries=1
- --timeout=2
- --spider
- http://localhost:8080/healthz
timeoutSeconds: 5
resources:
limits:
memory: "512Mi"
cpu: "1000m"
requests:
memory: "32Mi"
cpu: "10m"
securityContext:
readOnlyRootFilesystem: true
runAsUser: 10001

View File

@@ -1,6 +0,0 @@
apiVersion: v1
kind: Namespace
metadata:
name: test
labels:
istio-injection: enabled

View File

@@ -1,34 +0,0 @@
apiVersion: networking.istio.io/v1alpha3
kind: VirtualService
metadata:
name: podinfo
namespace: test
spec:
gateways:
- public-gateway.istio-system.svc.cluster.local
- mesh
hosts:
- podinfo.iowa.weavedx.com
- podinfo
http:
- match:
- headers:
user-agent:
regex: ^(?!.*Chrome)(?=.*\bSafari\b).*$
route:
- destination:
host: podinfo-primary
port:
number: 9898
weight: 0
- destination:
host: podinfo
port:
number: 9898
weight: 100
- route:
- destination:
host: podinfo-primary
port:
number: 9898
weight: 100

View File

@@ -1,25 +0,0 @@
apiVersion: networking.istio.io/v1alpha3
kind: VirtualService
metadata:
name: podinfo
namespace: test
labels:
app: podinfo
spec:
gateways:
- public-gateway.istio-system.svc.cluster.local
- mesh
hosts:
- podinfo.iowa.weavedx.com
- podinfo
http:
- route:
- destination:
host: podinfo-primary
port:
number: 9898
weight: 100
mirror:
host: podinfo
port:
number: 9898

View File

@@ -1,26 +0,0 @@
apiVersion: networking.istio.io/v1alpha3
kind: VirtualService
metadata:
name: podinfo
namespace: test
labels:
app: podinfo
spec:
gateways:
- public-gateway.istio-system.svc.cluster.local
- mesh
hosts:
- podinfo.iowa.weavedx.com
- podinfo
http:
- route:
- destination:
host: podinfo-primary
port:
number: 9898
weight: 100
- destination:
host: podinfo
port:
number: 9898
weight: 0

View File

@@ -1,75 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: podinfo
namespace: test
labels:
app: podinfo
spec:
replicas: 1
strategy:
rollingUpdate:
maxUnavailable: 0
type: RollingUpdate
selector:
matchLabels:
app: podinfo
template:
metadata:
annotations:
prometheus.io/scrape: "true"
labels:
app: podinfo
spec:
containers:
- name: podinfod
image: quay.io/stefanprodan/podinfo:1.2.0
imagePullPolicy: IfNotPresent
ports:
- containerPort: 9898
name: http
protocol: TCP
command:
- ./podinfo
- --port=9898
- --level=info
- --random-delay=false
- --random-error=false
env:
- name: PODINFO_UI_COLOR
value: green
livenessProbe:
exec:
command:
- podcli
- check
- http
- localhost:9898/healthz
failureThreshold: 3
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 2
readinessProbe:
exec:
command:
- podcli
- check
- http
- localhost:9898/readyz
failureThreshold: 3
periodSeconds: 3
successThreshold: 1
timeoutSeconds: 2
resources:
limits:
cpu: 1000m
memory: 256Mi
requests:
cpu: 100m
memory: 16Mi
volumeMounts:
- mountPath: /data
name: data
volumes:
- emptyDir: {}
name: data

View File

@@ -1,23 +0,0 @@
apiVersion: autoscaling/v2beta1
kind: HorizontalPodAutoscaler
metadata:
name: podinfo
namespace: test
spec:
scaleTargetRef:
apiVersion: apps/v1
kind: Deployment
name: podinfo
minReplicas: 2
maxReplicas: 3
metrics:
- type: Resource
resource:
name: cpu
# scale up if usage is above
# 99% of the requested CPU (100m)
targetAverageUtilization: 99
- type: Resource
resource:
name: memory
targetAverageValue: 200Mi

View File

@@ -1,16 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: podinfo
namespace: test
labels:
app: podinfo
spec:
type: ClusterIP
selector:
app: podinfo
ports:
- name: http
port: 9898
protocol: TCP
targetPort: http

View File

@@ -1,71 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: podinfo-primary
namespace: test
labels:
app: podinfo-primary
spec:
replicas: 1
strategy:
rollingUpdate:
maxUnavailable: 0
type: RollingUpdate
selector:
matchLabels:
app: podinfo-primary
template:
metadata:
annotations:
prometheus.io/scrape: "true"
labels:
app: podinfo-primary
spec:
containers:
- name: podinfod
image: quay.io/stefanprodan/podinfo:1.1.1
imagePullPolicy: IfNotPresent
ports:
- containerPort: 9898
name: http
protocol: TCP
command:
- ./podinfo
- --port=9898
- --level=info
- --random-delay=false
- --random-error=false
env:
- name: PODINFO_UI_COLOR
value: blue
livenessProbe:
exec:
command:
- podcli
- check
- http
- localhost:9898/healthz
initialDelaySeconds: 5
failureThreshold: 3
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
readinessProbe:
exec:
command:
- podcli
- check
- http
- localhost:9898/readyz
initialDelaySeconds: 5
failureThreshold: 3
periodSeconds: 3
successThreshold: 1
timeoutSeconds: 1
resources:
limits:
cpu: 2000m
memory: 512Mi
requests:
cpu: 10m
memory: 64Mi

View File

@@ -1,19 +0,0 @@
apiVersion: autoscaling/v2beta1
kind: HorizontalPodAutoscaler
metadata:
name: podinfo-primary
namespace: test
spec:
scaleTargetRef:
apiVersion: apps/v1
kind: Deployment
name: podinfo-primary
minReplicas: 2
maxReplicas: 4
metrics:
- type: Resource
resource:
name: cpu
# scale up if usage is above
# 99% of the requested CPU (100m)
targetAverageUtilization: 99

View File

@@ -1,16 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: podinfo-primary
namespace: test
labels:
app: podinfo-primary
spec:
type: ClusterIP
selector:
app: podinfo-primary
ports:
- name: http
port: 9898
protocol: TCP
targetPort: http

View File

@@ -1,30 +0,0 @@
apiVersion: networking.istio.io/v1alpha3
kind: VirtualService
metadata:
name: podinfo
namespace: test
labels:
app: podinfo
spec:
gateways:
- public-gateway.istio-system.svc.cluster.local
- mesh
hosts:
- podinfo.istio.weavedx.com
- podinfo
http:
- route:
- destination:
host: podinfo-primary
port:
number: 9898
weight: 100
- destination:
host: podinfo
port:
number: 9898
weight: 0
timeout: 10s
retries:
attempts: 3
perTryTimeout: 2s

File diff suppressed because one or more lines are too long

View File

@@ -0,0 +1 @@
<?xml version="1.0" encoding="UTF-8"?><svg xmlns="http://www.w3.org/2000/svg" width="12" height="13"><g stroke-width="2" stroke="#aaa" fill="none"><path d="M11.29 11.71l-4-4"/><circle cx="5" cy="5" r="4"/></g></svg>

After

Width:  |  Height:  |  Size: 216 B

1
assets/js/1.231ad2b8.js Normal file

File diff suppressed because one or more lines are too long

1
assets/js/10.f8611bf6.js Normal file

File diff suppressed because one or more lines are too long

1
assets/js/11.88ab7ab3.js Normal file

File diff suppressed because one or more lines are too long

1
assets/js/12.fd171bf8.js Normal file

File diff suppressed because one or more lines are too long

1
assets/js/13.9a0f9bc6.js Normal file

File diff suppressed because one or more lines are too long

1
assets/js/14.31f4ed2f.js Normal file
View File

@@ -0,0 +1 @@
(window.webpackJsonp=window.webpackJsonp||[]).push([[14],{305:function(e,t,a){"use strict";var i=TypeError;e.exports=function(e,t){if(e<t)throw new i("Not enough arguments");return e}},306:function(e,t,a){},316:function(e,t,a){"use strict";var i=a(43),n=a(4),r=a(120),s=a(305),o=URLSearchParams,l=o.prototype,h=n(l.append),u=n(l.delete),c=n(l.forEach),p=n([].push),d=new o("a=1&a=2&b=3");d.delete("a",1),d.delete("b",void 0),d+""!="a=2"&&i(l,"delete",function(e){var t=arguments.length,a=t<2?void 0:arguments[1];if(t&&void 0===a)return u(this,e);var i=[];c(this,function(e,t){p(i,{key:t,value:e})}),s(t,1);for(var n,o=r(e),l=r(a),d=0,f=0,v=!1,g=i.length;d<g;)n=i[d++],v||n.key===o?(v=!0,u(this,n.key)):f++;for(;f<g;)(n=i[f++]).key===o&&n.value===l||h(this,n.key,n.value)},{enumerable:!0,unsafe:!0})},317:function(e,t,a){"use strict";var i=a(43),n=a(4),r=a(120),s=a(305),o=URLSearchParams,l=o.prototype,h=n(l.getAll),u=n(l.has),c=new o("a=1");!c.has("a",2)&&c.has("a",void 0)||i(l,"has",function(e){var t=arguments.length,a=t<2?void 0:arguments[1];if(t&&void 0===a)return u(this,e);var i=h(this,e);s(t,1);for(var n=r(a),o=0;o<i.length;)if(i[o++]===n)return!0;return!1},{enumerable:!0,unsafe:!0})},318:function(e,t,a){"use strict";var i=a(5),n=a(4),r=a(121),s=URLSearchParams.prototype,o=n(s.forEach);!i||"size"in s||r(s,"size",{get:function(){var e=0;return o(this,function(){e++}),e},configurable:!0,enumerable:!0})},319:function(e,t,a){"use strict";a(306)},328:function(e,t,a){"use strict";a.r(t);a(118),a(316),a(317),a(318);var i={name:"AlgoliaSearchBox",props:["options"],data:()=>({placeholder:void 0}),watch:{$lang(e){this.update(this.options,e)},options(e){this.update(e,this.$lang)}},mounted(){this.initialize(this.options,this.$lang),this.placeholder=this.$site.themeConfig.searchPlaceholder||""},methods:{initialize(e,t){Promise.all([Promise.all([a.e(0),a.e(9)]).then(a.t.bind(null,326,7)),Promise.all([a.e(0),a.e(9)]).then(a.t.bind(null,327,7))]).then(([a])=>{a=a.default;const{algoliaOptions:i={}}=e;a(Object.assign({},e,{inputSelector:"#algolia-search-input",algoliaOptions:{...i,facetFilters:[`lang:${t}`].concat(i.facetFilters||[])},handleSelected:(e,t,a)=>{const{pathname:i,hash:n}=new URL(a.url),r=i.replace(this.$site.base,"/"),s=decodeURIComponent(n);this.$router.push(`${r}${s}`)}}))})},update(e,t){this.$el.innerHTML='<input id="algolia-search-input" class="search-query">',this.initialize(e,t)}}},n=(a(319),a(26)),r=Object(n.a)(i,function(){var e=this._self._c;return e("form",{staticClass:"algolia-search-wrapper search-box",attrs:{id:"search-form",role:"search"}},[e("input",{staticClass:"search-query",attrs:{id:"algolia-search-input",placeholder:this.placeholder}})])},[],!1,null,null,null);t.default=r.exports}}]);

1
assets/js/15.0445c5e3.js Normal file

File diff suppressed because one or more lines are too long

1
assets/js/16.51c9c6ac.js Normal file

File diff suppressed because one or more lines are too long

1
assets/js/17.81fe6f29.js Normal file
View File

@@ -0,0 +1 @@
(window.webpackJsonp=window.webpackJsonp||[]).push([[17],{271:function(t,e,n){"use strict";n.d(e,"d",function(){return i}),n.d(e,"a",function(){return a}),n.d(e,"i",function(){return s}),n.d(e,"f",function(){return c}),n.d(e,"g",function(){return u}),n.d(e,"h",function(){return l}),n.d(e,"b",function(){return d}),n.d(e,"e",function(){return p}),n.d(e,"k",function(){return f}),n.d(e,"l",function(){return h}),n.d(e,"c",function(){return m}),n.d(e,"j",function(){return b});n(118),n(17),n(27),n(62),n(42);const i=/#.*$/,r=/\.(md|html)$/,a=/\/$/,s=/^[a-z]+:/i;function o(t){return decodeURI(t).replace(i,"").replace(r,"")}function c(t){return s.test(t)}function u(t){return/^mailto:/.test(t)}function l(t){return/^tel:/.test(t)}function d(t){if(c(t))return t;const e=t.match(i),n=e?e[0]:"",r=o(t);return a.test(r)?t:r+".html"+n}function p(t,e){const n=decodeURIComponent(t.hash),r=function(t){const e=t.match(i);if(e)return e[0]}(e);return(!r||n===r)&&o(t.path)===o(e)}function f(t,e,n){if(c(e))return{type:"external",path:e};n&&(e=function(t,e,n){const i=t.charAt(0);if("/"===i)return t;if("?"===i||"#"===i)return e+t;const r=e.split("/");n&&r[r.length-1]||r.pop();const a=t.replace(/^\//,"").split("/");for(let t=0;t<a.length;t++){const e=a[t];".."===e?r.pop():"."!==e&&r.push(e)}""!==r[0]&&r.unshift("");return r.join("/")}(e,n));const i=o(e);for(let e=0;e<t.length;e++)if(o(t[e].regularPath)===i)return Object.assign({},t[e],{type:"page",path:d(t[e].path)});return console.error(`[vuepress] No matching page found for sidebar item "${e}"`),{}}function h(t,e,n,i){const{pages:r,themeConfig:a}=n,s=i&&a.locales&&a.locales[i]||a;if("auto"===(t.frontmatter.sidebar||s.sidebar||a.sidebar))return g(t);const o=s.sidebar||a.sidebar;if(o){const{base:n,config:i}=function(t,e){if(Array.isArray(e))return{base:"/",config:e};for(const i in e)if(0===(n=t,/(\.html|\/)$/.test(n)?n:n+"/").indexOf(encodeURI(i)))return{base:i,config:e[i]};var n;return{}}(e,o);return"auto"===i?g(t):i?i.map(t=>(function t(e,n,i,r=1){if("string"==typeof e)return f(n,e,i);if(Array.isArray(e))return Object.assign(f(n,e[0],i),{title:e[1]});{const a=e.children||[];return 0===a.length&&e.path?Object.assign(f(n,e.path,i),{title:e.title}):{type:"group",path:e.path,title:e.title,sidebarDepth:e.sidebarDepth,initialOpenGroupIndex:e.initialOpenGroupIndex,children:a.map(e=>t(e,n,i,r+1)),collapsable:!1!==e.collapsable}}})(t,r,n)):[]}return[]}function g(t){const e=m(t.headers||[]);return[{type:"group",collapsable:!1,title:t.title,path:null,children:e.map(e=>({type:"auto",title:e.title,basePath:t.path,path:t.path+"#"+e.slug,children:e.children||[]}))}]}function m(t){let e;return(t=t.map(t=>Object.assign({},t))).forEach(t=>{2===t.level?e=t:e&&(e.children||(e.children=[])).push(t)}),t.filter(t=>2===t.level)}function b(t){return Object.assign(t,{type:t.items&&t.items.length?"links":"link"})}},278:function(t,e){t.exports=function(t){return null==t}},280:function(t,e,n){},291:function(t,e,n){"use strict";n(280)},299:function(t,e,n){"use strict";n.r(e);var i=n(278),r=n.n(i),a=n(271),s={name:"PageEdit",computed:{lastUpdated(){return this.$page.lastUpdated},lastUpdatedText(){return"string"==typeof this.$themeLocaleConfig.lastUpdated?this.$themeLocaleConfig.lastUpdated:"string"==typeof this.$site.themeConfig.lastUpdated?this.$site.themeConfig.lastUpdated:"Last Updated"},editLink(){const t=r()(this.$page.frontmatter.editLink)?this.$site.themeConfig.editLinks:this.$page.frontmatter.editLink,{repo:e,docsDir:n="",docsBranch:i="master",docsRepo:a=e}=this.$site.themeConfig;return t&&a&&this.$page.relativePath?this.createEditLink(e,a,n,i,this.$page.relativePath):null},editLinkText(){return this.$themeLocaleConfig.editLinkText||this.$site.themeConfig.editLinkText||"Edit this page"}},methods:{createEditLink(t,e,n,i,r){if(/bitbucket.org/.test(e)){return e.replace(a.a,"")+"/src"+`/${i}/`+(n?n.replace(a.a,"")+"/":"")+r+`?mode=edit&spa=0&at=${i}&fileviewer=file-view-default`}if(/gitlab.com/.test(e)){return e.replace(a.a,"")+"/-/edit"+`/${i}/`+(n?n.replace(a.a,"")+"/":"")+r}return(a.i.test(e)?e:`https://github.com/${e}`).replace(a.a,"")+"/edit"+`/${i}/`+(n?n.replace(a.a,"")+"/":"")+r}}},o=(n(291),n(26)),c=Object(o.a)(s,function(){var t=this,e=t._self._c;return e("footer",{staticClass:"page-edit"},[t.editLink?e("div",{staticClass:"edit-link"},[e("a",{attrs:{href:t.editLink,target:"_blank",rel:"noopener noreferrer"}},[t._v(t._s(t.editLinkText))]),t._v(" "),e("OutboundLink")],1):t._e(),t._v(" "),t.lastUpdated?e("div",{staticClass:"last-updated"},[e("span",{staticClass:"prefix"},[t._v(t._s(t.lastUpdatedText)+":")]),t._v(" "),e("span",{staticClass:"time"},[t._v(t._s(t.lastUpdated))])]):t._e()])},[],!1,null,null,null);e.default=c.exports}}]);

1
assets/js/18.b08e7074.js Normal file
View File

@@ -0,0 +1 @@
(window.webpackJsonp=window.webpackJsonp||[]).push([[18],{271:function(t,e,n){"use strict";n.d(e,"d",function(){return r}),n.d(e,"a",function(){return a}),n.d(e,"i",function(){return s}),n.d(e,"f",function(){return o}),n.d(e,"g",function(){return c}),n.d(e,"h",function(){return l}),n.d(e,"b",function(){return p}),n.d(e,"e",function(){return f}),n.d(e,"k",function(){return d}),n.d(e,"l",function(){return h}),n.d(e,"c",function(){return g}),n.d(e,"j",function(){return m});n(118),n(17),n(27),n(62),n(42);const r=/#.*$/,i=/\.(md|html)$/,a=/\/$/,s=/^[a-z]+:/i;function u(t){return decodeURI(t).replace(r,"").replace(i,"")}function o(t){return s.test(t)}function c(t){return/^mailto:/.test(t)}function l(t){return/^tel:/.test(t)}function p(t){if(o(t))return t;const e=t.match(r),n=e?e[0]:"",i=u(t);return a.test(i)?t:i+".html"+n}function f(t,e){const n=decodeURIComponent(t.hash),i=function(t){const e=t.match(r);if(e)return e[0]}(e);return(!i||n===i)&&u(t.path)===u(e)}function d(t,e,n){if(o(e))return{type:"external",path:e};n&&(e=function(t,e,n){const r=t.charAt(0);if("/"===r)return t;if("?"===r||"#"===r)return e+t;const i=e.split("/");n&&i[i.length-1]||i.pop();const a=t.replace(/^\//,"").split("/");for(let t=0;t<a.length;t++){const e=a[t];".."===e?i.pop():"."!==e&&i.push(e)}""!==i[0]&&i.unshift("");return i.join("/")}(e,n));const r=u(e);for(let e=0;e<t.length;e++)if(u(t[e].regularPath)===r)return Object.assign({},t[e],{type:"page",path:p(t[e].path)});return console.error(`[vuepress] No matching page found for sidebar item "${e}"`),{}}function h(t,e,n,r){const{pages:i,themeConfig:a}=n,s=r&&a.locales&&a.locales[r]||a;if("auto"===(t.frontmatter.sidebar||s.sidebar||a.sidebar))return b(t);const u=s.sidebar||a.sidebar;if(u){const{base:n,config:r}=function(t,e){if(Array.isArray(e))return{base:"/",config:e};for(const r in e)if(0===(n=t,/(\.html|\/)$/.test(n)?n:n+"/").indexOf(encodeURI(r)))return{base:r,config:e[r]};var n;return{}}(e,u);return"auto"===r?b(t):r?r.map(t=>(function t(e,n,r,i=1){if("string"==typeof e)return d(n,e,r);if(Array.isArray(e))return Object.assign(d(n,e[0],r),{title:e[1]});{const a=e.children||[];return 0===a.length&&e.path?Object.assign(d(n,e.path,r),{title:e.title}):{type:"group",path:e.path,title:e.title,sidebarDepth:e.sidebarDepth,initialOpenGroupIndex:e.initialOpenGroupIndex,children:a.map(e=>t(e,n,r,i+1)),collapsable:!1!==e.collapsable}}})(t,i,n)):[]}return[]}function b(t){const e=g(t.headers||[]);return[{type:"group",collapsable:!1,title:t.title,path:null,children:e.map(e=>({type:"auto",title:e.title,basePath:t.path,path:t.path+"#"+e.slug,children:e.children||[]}))}]}function g(t){let e;return(t=t.map(t=>Object.assign({},t))).forEach(t=>{2===t.level?e=t:e&&(e.children||(e.children=[])).push(t)}),t.filter(t=>2===t.level)}function m(t){return Object.assign(t,{type:t.items&&t.items.length?"links":"link"})}},277:function(t,e,n){},284:function(t,e,n){"use strict";n(277)},287:function(t,e,n){"use strict";n.r(e);n(17),n(42),n(117);var r=n(271);function i(t,e,n,r,i){const a={props:{to:e,activeClass:"",exactActiveClass:""},class:{active:r,"sidebar-link":!0}};return i>2&&(a.style={"padding-left":i+"rem"}),t("RouterLink",a,n)}function a(t,e,n,s,u,o=1){return!e||o>u?null:t("ul",{class:"sidebar-sub-headers"},e.map(e=>{const c=Object(r.e)(s,n+"#"+e.slug);return t("li",{class:"sidebar-sub-header"},[i(t,n+"#"+e.slug,e.title,c,e.level-1),a(t,e.children,n,s,u,o+1)])}))}var s={functional:!0,props:["item","sidebarDepth"],render(t,{parent:{$page:e,$site:n,$route:s,$themeConfig:u,$themeLocaleConfig:o},props:{item:c,sidebarDepth:l}}){const p=Object(r.e)(s,c.path),f="auto"===c.type?p||c.children.some(t=>Object(r.e)(s,c.basePath+"#"+t.slug)):p,d="external"===c.type?function(t,e,n){return t("a",{attrs:{href:e,target:"_blank",rel:"noopener noreferrer"},class:{"sidebar-link":!0}},[n,t("OutboundLink")])}(t,c.path,c.title||c.path):i(t,c.path,c.title||c.path,f),h=[e.frontmatter.sidebarDepth,l,o.sidebarDepth,u.sidebarDepth,1].find(t=>void 0!==t),b=o.displayAllHeaders||u.displayAllHeaders;if("auto"===c.type)return[d,a(t,c.children,c.basePath,s,h)];if((f||b)&&c.headers&&!r.d.test(c.path)){return[d,a(t,Object(r.c)(c.headers),c.path,s,h)]}return d}},u=(n(284),n(26)),o=Object(u.a)(s,void 0,void 0,!1,null,null,null);e.default=o.exports}}]);

1
assets/js/19.ad5e0e55.js Normal file
View File

@@ -0,0 +1 @@
(window.webpackJsonp=window.webpackJsonp||[]).push([[19],{272:function(t,e,n){},274:function(t,e,n){"use strict";n.r(e);var s={name:"DropdownTransition",methods:{setHeight(t){t.style.height=t.scrollHeight+"px"},unsetHeight(t){t.style.height=""}}},i=(n(275),n(26)),o=Object(i.a)(s,function(){return(0,this._self._c)("transition",{attrs:{name:"dropdown"},on:{enter:this.setHeight,"after-enter":this.unsetHeight,"before-leave":this.setHeight}},[this._t("default")],2)},[],!1,null,null,null);e.default=o.exports},275:function(t,e,n){"use strict";n(272)}}]);

1
assets/js/2.29a21a78.js Normal file

File diff suppressed because one or more lines are too long

1
assets/js/20.1dc9b2d7.js Normal file
View File

@@ -0,0 +1 @@
(window.webpackJsonp=window.webpackJsonp||[]).push([[20],{290:function(t,c,n){},302:function(t,c,n){"use strict";n(290)},315:function(t,c,n){"use strict";n.r(c);n(302);var i=n(26),s=Object(i.a)({},function(){var t=this,c=t._self._c;return c("div",{staticClass:"sidebar-button",on:{click:function(c){return t.$emit("toggle-sidebar")}}},[c("svg",{staticClass:"icon",attrs:{xmlns:"http://www.w3.org/2000/svg","aria-hidden":"true",role:"img",viewBox:"0 0 448 512"}},[c("path",{attrs:{fill:"currentColor",d:"M436 124H12c-6.627 0-12-5.373-12-12V80c0-6.627 5.373-12 12-12h424c6.627 0 12 5.373 12 12v32c0 6.627-5.373 12-12 12zm0 160H12c-6.627 0-12-5.373-12-12v-32c0-6.627 5.373-12 12-12h424c6.627 0 12 5.373 12 12v32c0 6.627-5.373 12-12 12zm0 160H12c-6.627 0-12-5.373-12-12v-32c0-6.627 5.373-12 12-12h424c6.627 0 12 5.373 12 12v32c0 6.627-5.373 12-12 12z"}})])])},[],!1,null,null,null);c.default=s.exports}}]);

1
assets/js/21.d29d05f0.js Normal file
View File

@@ -0,0 +1 @@
(window.webpackJsonp=window.webpackJsonp||[]).push([[21],{271:function(t,n,e){"use strict";e.d(n,"d",function(){return r}),e.d(n,"a",function(){return s}),e.d(n,"i",function(){return u}),e.d(n,"f",function(){return a}),e.d(n,"g",function(){return l}),e.d(n,"h",function(){return c}),e.d(n,"b",function(){return f}),e.d(n,"e",function(){return h}),e.d(n,"k",function(){return p}),e.d(n,"l",function(){return d}),e.d(n,"c",function(){return b}),e.d(n,"j",function(){return m});e(118),e(17),e(27),e(62),e(42);const r=/#.*$/,i=/\.(md|html)$/,s=/\/$/,u=/^[a-z]+:/i;function o(t){return decodeURI(t).replace(r,"").replace(i,"")}function a(t){return u.test(t)}function l(t){return/^mailto:/.test(t)}function c(t){return/^tel:/.test(t)}function f(t){if(a(t))return t;const n=t.match(r),e=n?n[0]:"",i=o(t);return s.test(i)?t:i+".html"+e}function h(t,n){const e=decodeURIComponent(t.hash),i=function(t){const n=t.match(r);if(n)return n[0]}(n);return(!i||e===i)&&o(t.path)===o(n)}function p(t,n,e){if(a(n))return{type:"external",path:n};e&&(n=function(t,n,e){const r=t.charAt(0);if("/"===r)return t;if("?"===r||"#"===r)return n+t;const i=n.split("/");e&&i[i.length-1]||i.pop();const s=t.replace(/^\//,"").split("/");for(let t=0;t<s.length;t++){const n=s[t];".."===n?i.pop():"."!==n&&i.push(n)}""!==i[0]&&i.unshift("");return i.join("/")}(n,e));const r=o(n);for(let n=0;n<t.length;n++)if(o(t[n].regularPath)===r)return Object.assign({},t[n],{type:"page",path:f(t[n].path)});return console.error(`[vuepress] No matching page found for sidebar item "${n}"`),{}}function d(t,n,e,r){const{pages:i,themeConfig:s}=e,u=r&&s.locales&&s.locales[r]||s;if("auto"===(t.frontmatter.sidebar||u.sidebar||s.sidebar))return g(t);const o=u.sidebar||s.sidebar;if(o){const{base:e,config:r}=function(t,n){if(Array.isArray(n))return{base:"/",config:n};for(const r in n)if(0===(e=t,/(\.html|\/)$/.test(e)?e:e+"/").indexOf(encodeURI(r)))return{base:r,config:n[r]};var e;return{}}(n,o);return"auto"===r?g(t):r?r.map(t=>(function t(n,e,r,i=1){if("string"==typeof n)return p(e,n,r);if(Array.isArray(n))return Object.assign(p(e,n[0],r),{title:n[1]});{const s=n.children||[];return 0===s.length&&n.path?Object.assign(p(e,n.path,r),{title:n.title}):{type:"group",path:n.path,title:n.title,sidebarDepth:n.sidebarDepth,initialOpenGroupIndex:n.initialOpenGroupIndex,children:s.map(n=>t(n,e,r,i+1)),collapsable:!1!==n.collapsable}}})(t,i,e)):[]}return[]}function g(t){const n=b(t.headers||[]);return[{type:"group",collapsable:!1,title:t.title,path:null,children:n.map(n=>({type:"auto",title:n.title,basePath:t.path,path:t.path+"#"+n.slug,children:n.children||[]}))}]}function b(t){let n;return(t=t.map(t=>Object.assign({},t))).forEach(t=>{2===t.level?n=t:n&&(n.children||(n.children=[])).push(t)}),t.filter(t=>2===t.level)}function m(t){return Object.assign(t,{type:t.items&&t.items.length?"links":"link"})}},273:function(t,n,e){"use strict";e.r(n);e(17),e(117);var r=e(271),i={name:"NavLink",props:{item:{required:!0}},computed:{link(){return Object(r.b)(this.item.link)},exact(){return this.$site.locales?Object.keys(this.$site.locales).some(t=>t===this.link):"/"===this.link},isNonHttpURI(){return Object(r.g)(this.link)||Object(r.h)(this.link)},isBlankTarget(){return"_blank"===this.target},isInternal(){return!Object(r.f)(this.link)&&!this.isBlankTarget},target(){return this.isNonHttpURI?null:this.item.target?this.item.target:Object(r.f)(this.link)?"_blank":""},rel(){return this.isNonHttpURI?null:!1===this.item.rel?null:this.item.rel?this.item.rel:this.isBlankTarget?"noopener noreferrer":null}},methods:{focusoutAction(){this.$emit("focusout")}}},s=e(26),u=Object(s.a)(i,function(){var t=this,n=t._self._c;return t.isInternal?n("RouterLink",{staticClass:"nav-link",attrs:{to:t.link,exact:t.exact},nativeOn:{focusout:function(n){return t.focusoutAction.apply(null,arguments)}}},[t._v("\n "+t._s(t.item.text)+"\n")]):n("a",{staticClass:"nav-link external",attrs:{href:t.link,target:t.target,rel:t.rel},on:{focusout:t.focusoutAction}},[t._v("\n "+t._s(t.item.text)+"\n "),t.isBlankTarget?n("OutboundLink"):t._e()],1)},[],!1,null,null,null);n.default=u.exports}}]);

1
assets/js/22.9ec5f7d7.js Normal file

File diff suppressed because one or more lines are too long

1
assets/js/3.bff69ddf.js Normal file

File diff suppressed because one or more lines are too long

1
assets/js/4.9ac45811.js Normal file
View File

@@ -0,0 +1 @@
(window.webpackJsonp=window.webpackJsonp||[]).push([[4],{309:function(t,e,n){},322:function(t,e,n){"use strict";n(309)},333:function(t,e,n){"use strict";n.r(e);var i={functional:!0,props:{type:{type:String,default:"tip"},text:String,vertical:{type:String,default:"top"}},render:(t,{props:e,slots:n})=>t("span",{class:["badge",e.type],style:{verticalAlign:e.vertical}},e.text||n().default)},p=(n(322),n(26)),l=Object(p.a)(i,void 0,void 0,!1,null,"15b7b770",null);e.default=l.exports}}]);

1
assets/js/5.fd30eb93.js Normal file
View File

@@ -0,0 +1 @@
(window.webpackJsonp=window.webpackJsonp||[]).push([[5],{310:function(t,e,a){},323:function(t,e,a){"use strict";a(310)},329:function(t,e,a){"use strict";a.r(e);var s={name:"CodeBlock",props:{title:{type:String,required:!0},active:{type:Boolean,default:!1}},mounted(){this.$parent&&this.$parent.loadTabs&&this.$parent.loadTabs()}},i=(a(323),a(26)),n=Object(i.a)(s,function(){return(0,this._self._c)("div",{staticClass:"theme-code-block",class:{"theme-code-block__active":this.active}},[this._t("default")],2)},[],!1,null,"759a7d02",null);e.default=n.exports}}]);

1
assets/js/6.4f9d90d2.js Normal file
View File

@@ -0,0 +1 @@
(window.webpackJsonp=window.webpackJsonp||[]).push([[6],{311:function(e,t,a){},324:function(e,t,a){"use strict";a(311)},330:function(e,t,a){"use strict";a.r(t);a(17),a(27),a(62),a(42);var o={name:"CodeGroup",data:()=>({codeTabs:[],activeCodeTabIndex:-1}),watch:{activeCodeTabIndex(e){this.activateCodeTab(e)}},mounted(){this.loadTabs()},methods:{changeCodeTab(e){this.activeCodeTabIndex=e},loadTabs(){this.codeTabs=(this.$slots.default||[]).filter(e=>Boolean(e.componentOptions)).map((e,t)=>(""===e.componentOptions.propsData.active&&(this.activeCodeTabIndex=t),{title:e.componentOptions.propsData.title,elm:e.elm})),-1===this.activeCodeTabIndex&&this.codeTabs.length>0&&(this.activeCodeTabIndex=0),this.activateCodeTab(0)},activateCodeTab(e){this.codeTabs.forEach(e=>{e.elm&&e.elm.classList.remove("theme-code-block__active")}),this.codeTabs[e].elm&&this.codeTabs[e].elm.classList.add("theme-code-block__active")}}},s=(a(324),a(26)),c=Object(s.a)(o,function(){var e=this,t=e._self._c;return t("ClientOnly",[t("div",{staticClass:"theme-code-group"},[t("div",{staticClass:"theme-code-group__nav"},[t("ul",{staticClass:"theme-code-group__ul"},e._l(e.codeTabs,function(a,o){return t("li",{key:a.title,staticClass:"theme-code-group__li"},[t("button",{staticClass:"theme-code-group__nav-tab",class:{"theme-code-group__nav-tab-active":o===e.activeCodeTabIndex},on:{click:function(t){return e.changeCodeTab(o)}}},[e._v("\n "+e._s(a.title)+"\n ")])])}),0)]),e._v(" "),e._t("default"),e._v(" "),e.codeTabs.length<1?t("pre",{staticClass:"pre-blank"},[e._v("// Make sure to add code blocks to your code group")]):e._e()],2)])},[],!1,null,"deefee04",null);t.default=c.exports}}]);

1
assets/js/7.e62ef287.js Normal file
View File

@@ -0,0 +1 @@
(window.webpackJsonp=window.webpackJsonp||[]).push([[7],{331:function(t,e,s){"use strict";s.r(e);const o=["There's nothing here.","How did we get here?","That's a Four-Oh-Four.","Looks like we've got some broken links."];var n={methods:{getMsg:()=>o[Math.floor(Math.random()*o.length)]}},h=s(26),i=Object(h.a)(n,function(){var t=this._self._c;return t("div",{staticClass:"theme-container"},[t("div",{staticClass:"theme-default-content"},[t("h1",[this._v("404")]),this._v(" "),t("blockquote",[this._v(this._s(this.getMsg()))]),this._v(" "),t("RouterLink",{attrs:{to:"/"}},[this._v("\n Take me home.\n ")])],1)])},[],!1,null,null,null);e.default=i.exports}}]);

10
assets/js/app.a29d2723.js Normal file

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View File

@@ -1,21 +0,0 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*~
# Various IDEs
.project
.idea/
*.tmproj

View File

@@ -1,19 +0,0 @@
apiVersion: v1
name: flagger
version: 0.4.0
appVersion: 0.4.0
kubeVersion: ">=1.11.0-0"
engine: gotpl
description: Flagger is a Kubernetes operator that automates the promotion of canary deployments using Istio routing for traffic shifting and Prometheus metrics for canary analysis.
home: https://docs.flagger.app
icon: https://raw.githubusercontent.com/stefanprodan/flagger/master/docs/logo/flagger-icon.png
sources:
- https://github.com/stefanprodan/flagger
maintainers:
- name: stefanprodan
url: https://github.com/stefanprodan
email: stefanprodan@users.noreply.github.com
keywords:
- canary
- istio
- gitops

View File

@@ -1,84 +0,0 @@
# Flagger
[Flagger](https://github.com/stefanprodan/flagger) is a Kubernetes operator that automates the promotion of
canary deployments using Istio routing for traffic shifting and Prometheus metrics for canary analysis.
Flagger implements a control loop that gradually shifts traffic to the canary while measuring key performance indicators
like HTTP requests success rate, requests average duration and pods health.
Based on the KPIs analysis a canary is promoted or aborted and the analysis result is published to Slack.
## Prerequisites
* Kubernetes >= 1.11
* Istio >= 1.0
* Prometheus >= 2.6
## Installing the Chart
Add Flagger Helm repository:
```console
helm repo add flagger https://flagger.app
```
To install the chart with the release name `flagger`:
```console
$ helm install --name flagger --namespace istio-system flagger/flagger
```
The command deploys Flagger on the Kubernetes cluster in the istio-system namespace.
The [configuration](#configuration) section lists the parameters that can be configured during installation.
## Uninstalling the Chart
To uninstall/delete the `flagger` deployment:
```console
$ helm delete --purge flagger
```
The command removes all the Kubernetes components associated with the chart and deletes the release.
## Configuration
The following tables lists the configurable parameters of the Flagger chart and their default values.
Parameter | Description | Default
--- | --- | ---
`image.repository` | image repository | `quay.io/stefanprodan/flagger`
`image.tag` | image tag | `<VERSION>`
`image.pullPolicy` | image pull policy | `IfNotPresent`
`metricsServer` | Prometheus URL | `http://prometheus.istio-system:9090`
`slack.url` | Slack incoming webhook | None
`slack.channel` | Slack channel | None
`slack.user` | Slack username | `flagger`
`rbac.create` | if `true`, create and use RBAC resources | `true`
`crd.create` | if `true`, create Flagger's CRDs | `true`
`resources.requests/cpu` | pod CPU request | `10m`
`resources.requests/memory` | pod memory request | `32Mi`
`resources.limits/cpu` | pod CPU limit | `1000m`
`resources.limits/memory` | pod memory limit | `512Mi`
`affinity` | node/pod affinities | None
`nodeSelector` | node labels for pod assignment | `{}`
`tolerations` | list of node taints to tolerate | `[]`
Specify each parameter using the `--set key=value[,key=value]` argument to `helm upgrade`. For example,
```console
$ helm upgrade -i flagger flagger/flagger \
--namespace istio-system \
--set slack.url=https://hooks.slack.com/services/YOUR/SLACK/WEBHOOK \
--set slack.channel=general
```
Alternatively, a YAML file that specifies the values for the above parameters can be provided while installing the chart. For example,
```console
$ helm upgrade -i flagger flagger/flagger \
--namespace istio-system \
-f values.yaml
```
> **Tip**: You can use the default [values.yaml](values.yaml)

View File

@@ -1 +0,0 @@
Flagger installed

View File

@@ -1,42 +0,0 @@
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "flagger.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Expand the name of the chart.
*/}}
{{- define "flagger.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "flagger.fullname" -}}
{{- if .Values.fullnameOverride -}}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- $name := default .Chart.Name .Values.nameOverride -}}
{{- if contains $name .Release.Name -}}
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{/*
Create the name of the service account to use
*/}}
{{- define "flagger.serviceAccountName" -}}
{{- if .Values.serviceAccount.create -}}
{{ default (include "flagger.fullname" .) .Values.serviceAccount.name }}
{{- else -}}
{{ default "default" .Values.serviceAccount.name }}
{{- end -}}
{{- end -}}

View File

@@ -1,11 +0,0 @@
{{- if .Values.serviceAccount.create }}
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ template "flagger.serviceAccountName" . }}
labels:
helm.sh/chart: {{ template "flagger.chart" . }}
app.kubernetes.io/name: {{ template "flagger.name" . }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- end }}

View File

@@ -1,117 +0,0 @@
{{- if .Values.crd.create }}
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: canaries.flagger.app
spec:
group: flagger.app
version: v1alpha3
versions:
- name: v1alpha3
served: true
storage: true
- name: v1alpha2
served: true
storage: false
- name: v1alpha1
served: true
storage: false
names:
plural: canaries
singular: canary
kind: Canary
categories:
- all
scope: Namespaced
subresources:
status: {}
additionalPrinterColumns:
- name: Status
type: string
JSONPath: .status.phase
- name: Weight
type: string
JSONPath: .status.canaryWeight
- name: LastTransitionTime
type: string
JSONPath: .status.lastTransitionTime
validation:
openAPIV3Schema:
properties:
spec:
required:
- targetRef
- service
- canaryAnalysis
properties:
progressDeadlineSeconds:
type: number
targetRef:
type: object
required: ['apiVersion', 'kind', 'name']
properties:
apiVersion:
type: string
kind:
type: string
name:
type: string
autoscalerRef:
anyOf:
- type: string
- type: object
required: ['apiVersion', 'kind', 'name']
properties:
apiVersion:
type: string
kind:
type: string
name:
type: string
service:
type: object
required: ['port']
properties:
port:
type: number
canaryAnalysis:
properties:
interval:
type: string
pattern: "^[0-9]+(m|s)"
threshold:
type: number
maxWeight:
type: number
stepWeight:
type: number
metrics:
type: array
properties:
items:
type: object
required: ['name', 'interval', 'threshold']
properties:
name:
type: string
interval:
type: string
pattern: "^[0-9]+(m|s)"
threshold:
type: number
webhooks:
type: array
properties:
items:
type: object
required: ['name', 'url', 'timeout']
properties:
name:
type: string
url:
type: string
format: url
timeout:
type: string
pattern: "^[0-9]+(m|s)"
{{- end }}

View File

@@ -1,77 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ include "flagger.fullname" . }}
labels:
helm.sh/chart: {{ template "flagger.chart" . }}
app.kubernetes.io/name: {{ template "flagger.name" . }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/instance: {{ .Release.Name }}
spec:
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app.kubernetes.io/name: {{ template "flagger.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
template:
metadata:
labels:
app.kubernetes.io/name: {{ template "flagger.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
spec:
serviceAccountName: {{ template "flagger.serviceAccountName" . }}
containers:
- name: flagger
securityContext:
readOnlyRootFilesystem: true
runAsUser: 10001
image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
imagePullPolicy: {{ .Values.image.pullPolicy }}
ports:
- name: http
containerPort: 8080
command:
- ./flagger
- -log-level=info
- -metrics-server={{ .Values.metricsServer }}
{{- if .Values.slack.url }}
- -slack-url={{ .Values.slack.url }}
- -slack-user={{ .Values.slack.user }}
- -slack-channel={{ .Values.slack.channel }}
{{- end }}
livenessProbe:
exec:
command:
- wget
- --quiet
- --tries=1
- --timeout=4
- --spider
- http://localhost:8080/healthz
timeoutSeconds: 5
readinessProbe:
exec:
command:
- wget
- --quiet
- --tries=1
- --timeout=4
- --spider
- http://localhost:8080/healthz
timeoutSeconds: 5
resources:
{{ toYaml .Values.resources | indent 12 }}
{{- with .Values.nodeSelector }}
nodeSelector:
{{ toYaml . | indent 8 }}
{{- end }}
{{- with .Values.affinity }}
affinity:
{{ toYaml . | indent 8 }}
{{- end }}
{{- with .Values.tolerations }}
tolerations:
{{ toYaml . | indent 8 }}
{{- end }}

View File

@@ -1,35 +0,0 @@
{{- if .Values.rbac.create }}
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
name: {{ template "flagger.fullname" . }}
labels:
helm.sh/chart: {{ template "flagger.chart" . }}
app.kubernetes.io/name: {{ template "flagger.name" . }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/instance: {{ .Release.Name }}
rules:
- apiGroups: ['*']
resources: ['*']
verbs: ['*']
- nonResourceURLs: ['*']
verbs: ['*']
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: {{ template "flagger.fullname" . }}
labels:
helm.sh/chart: {{ template "flagger.chart" . }}
app.kubernetes.io/name: {{ template "flagger.name" . }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/instance: {{ .Release.Name }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: {{ template "flagger.fullname" . }}
subjects:
- name: {{ template "flagger.serviceAccountName" . }}
namespace: {{ .Release.Namespace }}
kind: ServiceAccount
{{- end }}

View File

@@ -1,45 +0,0 @@
# Default values for flagger.
image:
repository: quay.io/stefanprodan/flagger
tag: 0.4.0
pullPolicy: IfNotPresent
metricsServer: "http://prometheus.istio-system.svc.cluster.local:9090"
slack:
user: flagger
channel:
# incoming webhook https://api.slack.com/incoming-webhooks
url:
serviceAccount:
# serviceAccount.create: Whether to create a service account or not
create: true
# serviceAccount.name: The name of the service account to create or use
name: ""
rbac:
# rbac.create: `true` if rbac resources should be created
create: true
crd:
# crd.create: `true` if custom resource definitions should be created
create: true
nameOverride: ""
fullnameOverride: ""
resources:
limits:
memory: "512Mi"
cpu: "1000m"
requests:
memory: "32Mi"
cpu: "10m"
nodeSelector: {}
tolerations: []
affinity: {}

View File

@@ -1,21 +0,0 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*~
# Various IDEs
.project
.idea/
*.tmproj

View File

@@ -1,13 +0,0 @@
apiVersion: v1
name: grafana
version: 0.1.0
appVersion: 5.4.2
description: Grafana dashboards for monitoring Flagger canary deployments
icon: https://raw.githubusercontent.com/stefanprodan/flagger/master/docs/logo/flagger-icon.png
home: https://flagger.app
sources:
- https://github.com/stefanprodan/flagger
maintainers:
- name: stefanprodan
url: https://github.com/stefanprodan
email: stefanprodan@users.noreply.github.com

View File

@@ -1,79 +0,0 @@
# Flagger Grafana
Grafana dashboards for monitoring progressive deployments powered by Istio, Prometheus and Flagger.
![flagger-grafana](https://raw.githubusercontent.com/stefanprodan/flagger/master/docs/screens/grafana-canary-analysis.png)
## Prerequisites
* Kubernetes >= 1.9
* Istio >= 1.0
* Prometheus >= 2.6
## Installing the Chart
Add Flagger Helm repository:
```console
helm repo add flagger https://flagger.app
```
To install the chart with the release name `flagger-grafana`:
```console
helm upgrade -i flagger-grafana flagger/grafana \
--namespace=istio-system \
--set url=http://prometheus:9090 \
--set user=admin \
--set password=admin
```
The command deploys Grafana on the Kubernetes cluster in the default namespace.
The [configuration](#configuration) section lists the parameters that can be configured during installation.
## Uninstalling the Chart
To uninstall/delete the `flagger-grafana` deployment:
```console
helm delete --purge flagger-grafana
```
The command removes all the Kubernetes components associated with the chart and deletes the release.
## Configuration
The following tables lists the configurable parameters of the Grafana chart and their default values.
Parameter | Description | Default
--- | --- | ---
`image.repository` | Image repository | `grafana/grafana`
`image.pullPolicy` | Image pull policy | `IfNotPresent`
`image.tag` | Image tag | `<VERSION>`
`replicaCount` | desired number of pods | `1`
`resources` | pod resources | `none`
`tolerations` | List of node taints to tolerate | `[]`
`affinity` | node/pod affinities | `node`
`nodeSelector` | node labels for pod assignment | `{}`
`service.type` | type of service | `ClusterIP`
`url` | Prometheus URL, used when Weave Cloud token is empty | `http://prometheus:9090`
`token` | Weave Cloud token | `none`
`user` | Grafana admin username | `admin`
`password` | Grafana admin password | `admin`
Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example,
```console
helm install flagger/grafana --name flagger-grafana \
--set token=WEAVE-CLOUD-TOKEN
```
Alternatively, a YAML file that specifies the values for the above parameters can be provided while installing the chart. For example,
```console
helm install flagger/grafana --name flagger-grafana -f values.yaml
```
> **Tip**: You can use the default [values.yaml](values.yaml)
```

File diff suppressed because it is too large Load Diff

View File

@@ -1,15 +0,0 @@
1. Get the application URL by running these commands:
{{- if contains "NodePort" .Values.service.type }}
export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "grafana.fullname" . }})
export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}")
echo http://$NODE_IP:$NODE_PORT
{{- else if contains "LoadBalancer" .Values.service.type }}
NOTE: It may take a few minutes for the LoadBalancer IP to be available.
You can watch the status of by running 'kubectl get svc -w {{ template "grafana.fullname" . }}'
export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "grafana.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}')
echo http://$SERVICE_IP:{{ .Values.service.port }}
{{- else if contains "ClusterIP" .Values.service.type }}
export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app={{ template "grafana.name" . }},release={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
echo "Visit http://127.0.0.1:8080 to use your application"
kubectl port-forward $POD_NAME 8080:80
{{- end }}

View File

@@ -1,32 +0,0 @@
{{/* vim: set filetype=mustache: */}}
{{/*
Expand the name of the chart.
*/}}
{{- define "grafana.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "grafana.fullname" -}}
{{- if .Values.fullnameOverride -}}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- $name := default .Chart.Name .Values.nameOverride -}}
{{- if contains $name .Release.Name -}}
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "grafana.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
{{- end -}}

View File

@@ -1,6 +0,0 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ template "grafana.fullname" . }}-dashboards
data:
{{ (.Files.Glob "dashboards/*").AsConfig | indent 2 }}

View File

@@ -1,32 +0,0 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ template "grafana.fullname" . }}-datasources
data:
datasources.yaml: |-
apiVersion: 1
deleteDatasources:
- name: prometheus
{{- if .Values.token }}
datasources:
- name: prometheus
type: prometheus
access: proxy
url: https://cloud.weave.works/api/prom
isDefault: true
editable: true
version: 1
basicAuth: true
basicAuthUser: weave
basicAuthPassword: {{ .Values.token }}
{{- else }}
datasources:
- name: prometheus
type: prometheus
access: proxy
url: {{ .Values.url }}
isDefault: true
editable: true
version: 1
{{- end }}

View File

@@ -1,81 +0,0 @@
apiVersion: apps/v1beta2
kind: Deployment
metadata:
name: {{ template "grafana.fullname" . }}
labels:
app: {{ template "grafana.fullname" . }}
chart: {{ template "grafana.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
spec:
replicas: {{ .Values.replicaCount }}
selector:
matchLabels:
app: {{ template "grafana.fullname" . }}
release: {{ .Release.Name }}
template:
metadata:
labels:
app: {{ template "grafana.fullname" . }}
release: {{ .Release.Name }}
annotations:
prometheus.io/scrape: 'false'
spec:
containers:
- name: {{ .Chart.Name }}
image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
imagePullPolicy: {{ .Values.image.pullPolicy }}
ports:
- name: http
containerPort: 3000
protocol: TCP
# livenessProbe:
# httpGet:
# path: /
# port: http
# readinessProbe:
# httpGet:
# path: /
# port: http
env:
- name: GF_SECURITY_ADMIN_USER
value: {{ .Values.user }}
- name: GF_SECURITY_ADMIN_PASSWORD
value: {{ .Values.password }}
- name: GF_PATHS_PROVISIONING
value: /etc/grafana/provisioning/
volumeMounts:
- name: grafana
mountPath: /var/lib/grafana
- name: dashboards
mountPath: /etc/grafana/dashboards
- name: datasources
mountPath: /etc/grafana/provisioning/datasources
- name: providers
mountPath: /etc/grafana/provisioning/dashboards
resources:
{{ toYaml .Values.resources | indent 12 }}
{{- with .Values.nodeSelector }}
nodeSelector:
{{ toYaml . | indent 8 }}
{{- end }}
{{- with .Values.affinity }}
affinity:
{{ toYaml . | indent 8 }}
{{- end }}
{{- with .Values.tolerations }}
tolerations:
{{ toYaml . | indent 8 }}
{{- end }}
volumes:
- name: grafana
emptyDir: {}
- name: dashboards
configMap:
name: {{ template "grafana.fullname" . }}-dashboards
- name: providers
configMap:
name: {{ template "grafana.fullname" . }}-providers
- name: datasources
configMap:
name: {{ template "grafana.fullname" . }}-datasources

View File

@@ -1,17 +0,0 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ template "grafana.fullname" . }}-providers
data:
providers.yaml: |+
apiVersion: 1
providers:
- name: 'default'
orgId: 1
folder: ''
type: file
disableDeletion: false
editable: true
options:
path: /etc/grafana/dashboards

View File

@@ -1,19 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: {{ template "grafana.fullname" . }}
labels:
app: {{ template "grafana.name" . }}
chart: {{ template "grafana.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
spec:
type: {{ .Values.service.type }}
ports:
- port: {{ .Values.service.port }}
targetPort: http
protocol: TCP
name: http
selector:
app: {{ template "grafana.fullname" . }}
release: {{ .Release.Name }}

View File

@@ -1,37 +0,0 @@
# Default values for grafana.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
replicaCount: 1
image:
repository: grafana/grafana
tag: 5.4.2
pullPolicy: IfNotPresent
service:
type: ClusterIP
port: 80
resources: {}
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
nodeSelector: {}
tolerations: []
affinity: {}
user: admin
password: admin
# Istio Prometheus instance
url: http://prometheus:9090
# Weave Cloud instance token
token:

View File

@@ -1,141 +0,0 @@
package main
import (
"flag"
"log"
"time"
_ "github.com/istio/glog"
istioclientset "github.com/knative/pkg/client/clientset/versioned"
"github.com/knative/pkg/signals"
clientset "github.com/stefanprodan/flagger/pkg/client/clientset/versioned"
informers "github.com/stefanprodan/flagger/pkg/client/informers/externalversions"
"github.com/stefanprodan/flagger/pkg/controller"
"github.com/stefanprodan/flagger/pkg/logging"
"github.com/stefanprodan/flagger/pkg/notifier"
"github.com/stefanprodan/flagger/pkg/server"
"github.com/stefanprodan/flagger/pkg/version"
"k8s.io/client-go/kubernetes"
_ "k8s.io/client-go/plugin/pkg/client/auth/gcp"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/clientcmd"
)
var (
masterURL string
kubeconfig string
metricsServer string
controlLoopInterval time.Duration
logLevel string
port string
slackURL string
slackUser string
slackChannel string
)
func init() {
flag.StringVar(&kubeconfig, "kubeconfig", "", "Path to a kubeconfig. Only required if out-of-cluster.")
flag.StringVar(&masterURL, "master", "", "The address of the Kubernetes API server. Overrides any value in kubeconfig. Only required if out-of-cluster.")
flag.StringVar(&metricsServer, "metrics-server", "http://prometheus:9090", "Prometheus URL")
flag.DurationVar(&controlLoopInterval, "control-loop-interval", 10*time.Second, "Kubernetes API sync interval")
flag.StringVar(&logLevel, "log-level", "debug", "Log level can be: debug, info, warning, error.")
flag.StringVar(&port, "port", "8080", "Port to listen on.")
flag.StringVar(&slackURL, "slack-url", "", "Slack hook URL.")
flag.StringVar(&slackUser, "slack-user", "flagger", "Slack user name.")
flag.StringVar(&slackChannel, "slack-channel", "", "Slack channel.")
}
func main() {
flag.Parse()
logger, err := logging.NewLogger(logLevel)
if err != nil {
log.Fatalf("Error creating logger: %v", err)
}
defer logger.Sync()
stopCh := signals.SetupSignalHandler()
cfg, err := clientcmd.BuildConfigFromFlags(masterURL, kubeconfig)
if err != nil {
logger.Fatalf("Error building kubeconfig: %v", err)
}
kubeClient, err := kubernetes.NewForConfig(cfg)
if err != nil {
logger.Fatalf("Error building kubernetes clientset: %v", err)
}
istioClient, err := istioclientset.NewForConfig(cfg)
if err != nil {
logger.Fatalf("Error building istio clientset: %v", err)
}
flaggerClient, err := clientset.NewForConfig(cfg)
if err != nil {
logger.Fatalf("Error building example clientset: %s", err.Error())
}
flaggerInformerFactory := informers.NewSharedInformerFactory(flaggerClient, time.Second*30)
canaryInformer := flaggerInformerFactory.Flagger().V1alpha3().Canaries()
logger.Infof("Starting flagger version %s revision %s", version.VERSION, version.REVISION)
ver, err := kubeClient.Discovery().ServerVersion()
if err != nil {
logger.Fatalf("Error calling Kubernetes API: %v", err)
}
logger.Infof("Connected to Kubernetes API %s", ver)
ok, err := controller.CheckMetricsServer(metricsServer)
if ok {
logger.Infof("Connected to metrics server %s", metricsServer)
} else {
logger.Errorf("Metrics server %s unreachable %v", metricsServer, err)
}
var slack *notifier.Slack
if slackURL != "" {
slack, err = notifier.NewSlack(slackURL, slackUser, slackChannel)
if err != nil {
logger.Errorf("Notifier %v", err)
} else {
logger.Infof("Slack notifications enabled for channel %s", slack.Channel)
}
}
// start HTTP server
go server.ListenAndServe(port, 3*time.Second, logger, stopCh)
c := controller.NewController(
kubeClient,
istioClient,
flaggerClient,
canaryInformer,
controlLoopInterval,
metricsServer,
logger,
slack,
)
flaggerInformerFactory.Start(stopCh)
logger.Info("Waiting for informer caches to sync")
for _, synced := range []cache.InformerSynced{
canaryInformer.Informer().HasSynced,
} {
if ok := cache.WaitForCacheSync(stopCh, synced); !ok {
logger.Fatalf("Failed to wait for cache sync")
}
}
// start controller
go func(ctrl *controller.Controller) {
if err := ctrl.Run(2, stopCh); err != nil {
logger.Fatalf("Error running controller: %v", err)
}
}(c)
<-stopCh
}

BIN
cncf.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 24 KiB

View File

@@ -1,73 +0,0 @@
# Contributor Covenant Code of Conduct
## Our Pledge
In the interest of fostering an open and welcoming environment, we as
contributors and maintainers pledge to making participation in our project and
our community a harassment-free experience for everyone, regardless of age, body
size, disability, ethnicity, gender identity and expression, level of experience,
education, socio-economic status, nationality, personal appearance, race,
religion, or sexual identity and orientation.
## Our Standards
Examples of behavior that contributes to creating a positive environment
include:
* Using welcoming and inclusive language
* Being respectful of differing viewpoints and experiences
* Gracefully accepting constructive criticism
* Focusing on what is best for the community
* Showing empathy towards other community members
Examples of unacceptable behavior by participants include:
* The use of sexualized language or imagery and unwelcome sexual attention or
advances
* Trolling, insulting/derogatory comments, and personal or political attacks
* Public or private harassment
* Publishing others' private information, such as a physical or electronic
address, without explicit permission
* Other conduct which could reasonably be considered inappropriate in a
professional setting
## Our Responsibilities
Project maintainers are responsible for clarifying the standards of acceptable
behavior and are expected to take appropriate and fair corrective action in
response to any instances of unacceptable behavior.
Project maintainers have the right and responsibility to remove, edit, or
reject comments, commits, code, wiki edits, issues, and other contributions
that are not aligned to this Code of Conduct, or to ban temporarily or
permanently any contributor for other behaviors that they deem inappropriate,
threatening, offensive, or harmful.
## Scope
This Code of Conduct applies both within project spaces and in public spaces
when an individual is representing the project or its community. Examples of
representing a project or community include using an official project e-mail
address, posting via an official social media account, or acting as an appointed
representative at an online or offline event. Representation of a project may be
further defined and clarified by project maintainers.
## Enforcement
Instances of abusive, harassing, or otherwise unacceptable behavior
may be reported by contacting stefan.prodan(at)gmail.com.
All complaints will be reviewed and investigated and will result in a response that is deemed
necessary and appropriate to the circumstances. The project team is
obligated to maintain confidentiality with regard to the reporter of
an incident. Further details of specific enforcement policies may be
posted separately.
Project maintainers who do not follow or enforce the Code of Conduct in good
faith may face temporary or permanent repercussions as determined by other
members of the project's leadership.
## Attribution
This Code of Conduct is adapted from the [Contributor Covenant](https://www.contributor-covenant.org), version 1.4,
available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html

Binary file not shown.

After

Width:  |  Height:  |  Size: 99 KiB

View File

@@ -1,11 +0,0 @@
# Flagger
Flagger is a Kubernetes operator that automates the promotion of canary deployments using Istio routing for traffic
shifting and Prometheus metrics for canary analysis.
Flagger implements a control loop that gradually shifts traffic to the canary while measuring key performance
indicators like HTTP requests success rate, requests average duration and pods health. Based on the KPIs analysis
a canary is promoted or aborted and the analysis result is published to Slack.
### For the install instructions and usage examples please see [docs.flagger.app](https://docs.flagger.app)

View File

@@ -1,55 +0,0 @@
title: Flagger - Istio Progressive Delivery Kubernetes Operator
remote_theme: errordeveloper/simple-project-homepage
repository: stefanprodan/flagger
by_weaveworks: true
url: "https://flagger.app"
baseurl: "/"
twitter:
username: "stefanprodan"
author:
twitter: "stefanprodan"
# Set default og:image
defaults:
- scope: {path: ""}
values: {image: "diagrams/flagger-overview.png"}
# See: https://material.io/guidelines/style/color.html
# Use color-name-value, like pink-200 or deep-purple-100
brand_color: "amber-400"
# How article URLs are structured.
# See: https://jekyllrb.com/docs/permalinks/
permalink: posts/:title/
# "UA-NNNNNNNN-N"
google_analytics: ""
# Language. For example, if you write in Japanese, use "ja"
lang: "en"
# How many posts are visible on the home page without clicking "View More"
num_posts_visible_initially: 5
# Date format: See http://strftime.net/
date_format: "%b %-d, %Y"
plugins:
- jekyll-feed
- jekyll-readme-index
- jekyll-seo-tag
- jekyll-sitemap
- jemoji
# # required for local builds with starefossen/github-pages
# - jekyll-github-metadata
# - jekyll-mentions
# - jekyll-redirect-from
# - jekyll-remote-theme
exclude:
- CNAME
- gitbook

Binary file not shown.

Before

Width:  |  Height:  |  Size: 220 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 207 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 210 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 183 KiB

View File

@@ -1,23 +0,0 @@
---
description: Flagger is an Istio progressive delivery Kubernetes operator
---
# Introduction
[Flagger](https://github.com/stefanprodan/flagger) is a **Kubernetes** operator that automates the promotion of canary
deployments using **Istio** routing for traffic shifting and **Prometheus** metrics for canary analysis.
The canary analysis can be extended with webhooks for running integration tests,
load tests or any other custom validation.
Flagger implements a control loop that gradually shifts traffic to the canary while measuring key performance
indicators like HTTP requests success rate, requests average duration and pods health.
Based on the **KPIs** analysis a canary is promoted or aborted and the analysis result is published to **Slack**.
![Flagger overview diagram](https://raw.githubusercontent.com/stefanprodan/flagger/master/docs/diagrams/flagger-canary-overview.png)
Flagger can be configured with Kubernetes custom resources \(canaries.flagger.app kind\) and is compatible with
any CI/CD solutions made for Kubernetes. Since Flagger is declarative and reacts to Kubernetes events,
it can be used in **GitOps** pipelines together with Weave Flux or JenkinsX.
This project is sponsored by [Weaveworks](https://www.weave.works/)

View File

@@ -1,17 +0,0 @@
# Table of contents
* [Introduction](README.md)
* [How it works](how-it-works.md)
## Install
* [Install Flagger](install/install-flagger.md)
* [Install Grafana](install/install-grafana.md)
* [Install Istio](install/install-istio.md)
## Usage
* [Canary Deployments](usage/progressive-delivery.md)
* [Monitoring](usage/monitoring.md)
* [Alerting](usage/alerting.md)

View File

@@ -1,284 +0,0 @@
# How it works
[Flagger](https://github.com/stefanprodan/flagger) takes a Kubernetes deployment and optionally a horizontal pod autoscaler \(HPA\) and creates a series of objects \(Kubernetes deployments, ClusterIP services and Istio virtual services\) to drive the canary analysis and promotion.
![Flagger Canary Process](https://raw.githubusercontent.com/stefanprodan/flagger/master/docs/diagrams/flagger-canary-hpa.png)
### Canary Custom Resource
For a deployment named _podinfo_, a canary promotion can be defined using Flagger's custom resource:
```yaml
apiVersion: flagger.app/v1alpha3
kind: Canary
metadata:
name: podinfo
namespace: test
spec:
# deployment reference
targetRef:
apiVersion: apps/v1
kind: Deployment
name: podinfo
# the maximum time in seconds for the canary deployment
# to make progress before it is rollback (default 600s)
progressDeadlineSeconds: 60
# HPA reference (optional)
autoscalerRef:
apiVersion: autoscaling/v2beta1
kind: HorizontalPodAutoscaler
name: podinfo
service:
# container port
port: 9898
# Istio gateways (optional)
gateways:
- public-gateway.istio-system.svc.cluster.local
# Istio virtual service host names (optional)
hosts:
- podinfo.example.com
canaryAnalysis:
# schedule interval (default 60s)
interval: 1m
# max number of failed metric checks before rollback
threshold: 10
# max traffic percentage routed to canary
# percentage (0-100)
maxWeight: 50
# canary increment step
# percentage (0-100)
stepWeight: 5
# Istio Prometheus checks
metrics:
- name: istio_requests_total
# minimum req success rate (non 5xx responses)
# percentage (0-100)
threshold: 99
interval: 1m
- name: istio_request_duration_seconds_bucket
# maximum req duration P99
# milliseconds
threshold: 500
interval: 30s
# external checks (optional)
webhooks:
- name: integration-tests
url: http://podinfo.test:9898/echo
timeout: 1m
# key-value pairs (optional)
metadata:
test: "all"
token: "16688eb5e9f289f1991c"
```
**Note** that the target deployment must have a single label selector in the format `app: <DEPLOYMENT-NAME>`:
```yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: podinfo
spec:
selector:
matchLabels:
app: podinfo
template:
metadata:
labels:
app: podinfo
```
The target deployment should expose a TCP port that will be used by Flagger to create the ClusterIP Service and
the Istio Virtual Service. The container port from the target deployment should match the `service.port` value.
### Canary Deployment
![Flagger Canary Stages](https://raw.githubusercontent.com/stefanprodan/flagger/master/docs/diagrams/flagger-canary-steps.png)
Gated canary promotion stages:
* scan for canary deployments
* creates the primary deployment if needed
* check Istio virtual service routes are mapped to primary and canary ClusterIP services
* check primary and canary deployments status
* halt advancement if a rolling update is underway
* halt advancement if pods are unhealthy
* increase canary traffic weight percentage from 0% to 5% \(step weight\)
* check canary HTTP request success rate and latency
* halt advancement if any metric is under the specified threshold
* increment the failed checks counter
* check if the number of failed checks reached the threshold
* route all traffic to primary
* scale to zero the canary deployment and mark it as failed
* wait for the canary deployment to be updated \(revision bump\) and start over
* increase canary traffic weight by 5% \(step weight\) till it reaches 50% \(max weight\)
* halt advancement while canary request success rate is under the threshold
* halt advancement while canary request duration P99 is over the threshold
* halt advancement if the primary or canary deployment becomes unhealthy
* halt advancement while canary deployment is being scaled up/down by HPA
* promote canary to primary
* copy canary deployment spec template over primary
* wait for primary rolling update to finish
* halt advancement if pods are unhealthy
* route all traffic to primary
* scale to zero the canary deployment
* mark the canary deployment as finished
* wait for the canary deployment to be updated \(revision bump\) and start over
You can change the canary analysis _max weight_ and the _step weight_ percentage in the Flagger's custom resource.
### Canary Analysis
The canary analysis runs periodically until it reaches the maximum traffic weight or the failed checks threshold.
Spec:
```yaml
canaryAnalysis:
# schedule interval (default 60s)
interval: 1m
# max number of failed metric checks before rollback
threshold: 10
# max traffic percentage routed to canary
# percentage (0-100)
maxWeight: 50
# canary increment step
# percentage (0-100)
stepWeight: 2
```
The above analysis, if it succeeds, will run for 25 minutes while validating the HTTP metrics and webhooks every minute.
You can determine the minimum time that it takes to validate and promote a canary deployment using this formula:
```
interval * (maxWeight / stepWeight)
```
And the time it takes for a canary to be rollback when the metrics or webhook checks are failing:
```
interval * threshold
```
### HTTP Metrics
The canary analysis is using the following Prometheus queries:
**HTTP requests success rate percentage**
Spec:
```yaml
canaryAnalysis:
metrics:
- name: istio_requests_total
# minimum req success rate (non 5xx responses)
# percentage (0-100)
threshold: 99
interval: 1m
```
Query:
```javascript
sum(
rate(
istio_requests_total{
reporter="destination",
destination_workload_namespace=~"$namespace",
destination_workload=~"$workload",
response_code!~"5.*"
}[$interval]
)
)
/
sum(
rate(
istio_requests_total{
reporter="destination",
destination_workload_namespace=~"$namespace",
destination_workload=~"$workload"
}[$interval]
)
)
```
**HTTP requests milliseconds duration P99**
Spec:
```yaml
canaryAnalysis:
metrics:
- name: istio_request_duration_seconds_bucket
# maximum req duration P99
# milliseconds
threshold: 500
interval: 1m
```
Query:
```javascript
histogram_quantile(0.99,
sum(
irate(
istio_request_duration_seconds_bucket{
reporter="destination",
destination_workload=~"$workload",
destination_workload_namespace=~"$namespace"
}[$interval]
)
) by (le)
)
```
> **Note** that the metric interval should be lower or equal to the control loop interval.
### Webhooks
The canary analysis can be extended with webhooks.
Flagger will call each webhook URL and determine from the response status code (HTTP 2xx) if the canary is failing or not.
Spec:
```yaml
canaryAnalysis:
webhooks:
- name: integration-tests
url: http://podinfo.test:9898/echo
timeout: 30s
metadata:
test: "all"
token: "16688eb5e9f289f1991c"
- name: load-tests
url: http://podinfo.test:9898/echo
timeout: 30s
metadata:
key1: "val1"
key2: "val2"
```
> **Note** that the sum of all webhooks timeouts should be lower than the control loop interval.
Webhook payload (HTTP POST):
```json
{
"name": "podinfo",
"namespace": "test",
"metadata": {
"test": "all",
"token": "16688eb5e9f289f1991c"
}
}
```
Response status codes:
* 200-202 - advance canary by increasing the traffic weight
* timeout or non-2xx - halt advancement and increment failed checks
On a non-2xx response Flagger will include the response body (if any) in the failed checks log and Kubernetes events.

View File

@@ -1,75 +0,0 @@
# Install Flagger
Before installing Flagger make sure you have [Istio](https://istio.io) running with Prometheus enabled.
If you are new to Istio you can follow this GKE guide
[Istio service mesh walk-through](https://docs.flagger.app/install/install-istio).
**Prerequisites**
* Kubernetes &gt;= 1.11
* Istio &gt;= 1.0
* Prometheus &gt;= 2.6
### Install with Helm and Tiller
Add Flagger Helm repository:
```bash
helm repo add flagger https://flagger.app
```
Deploy Flagger in the _**istio-system**_ namespace:
```bash
helm upgrade -i flagger flagger/flagger \
--namespace=istio-system \
--set metricsServer=http://prometheus.istio-system:9090
```
Enable **Slack** notifications:
```bash
helm upgrade -i flagger flagger/flagger \
--namespace=istio-system \
--set slack.url=https://hooks.slack.com/services/YOUR/SLACK/WEBHOOK \
--set slack.channel=general \
--set slack.user=flagger
```
### Install with kubectl
If you don't have Tiller you can use the helm template command and apply the generated yaml with kubectl:
```bash
# generate
helm template flagger/flagger \
--name flagger \
--namespace=istio-system \
--set metricsServer=http://prometheus.istio-system:9090 \
--set controlLoopInterval=1m > $HOME/flagger.yaml
# apply
kubectl apply -f $HOME/flagger.yaml
```
### Uninstall
To uninstall/delete the flagger release with Helm run:
```text
helm delete --purge flagger
```
The command removes all the Kubernetes components associated with the chart and deletes the release.
> **Note** that on uninstall the Canary CRD will not be removed.
Deleting the CRD will make Kubernetes remove all the objects owned by Flagger like Istio virtual services,
Kubernetes deployments and ClusterIP services.
If you want to remove all the objects created by Flagger you have delete the Canary CRD with kubectl:
```text
kubectl delete crd canaries.flagger.app
```

View File

@@ -1,48 +0,0 @@
# Install Grafana
Flagger comes with a Grafana dashboard made for monitoring the canary analysis.
### Install with Helm and Tiller
Add Flagger Helm repository:
```bash
helm repo add flagger https://flagger.app
```
Deploy Grafana in the _**istio-system**_ namespace:
```bash
helm upgrade -i flagger-grafana flagger/grafana \
--namespace=istio-system \
--set url=http://prometheus:9090 \
--set user=admin \
--set password=admin
```
### Install with kubectl
If you don't have Tiller you can use the helm template command and apply the generated yaml with kubectl:
```bash
# generate
helm template flagger/grafana \
--name flagger-grafana \
--namespace=istio-system \
--set user=admin \
--set password=admin > $HOME/flagger-grafana.yaml
# apply
kubectl apply -f $HOME/flagger-grafana.yaml
```
### Uninstall
To uninstall/delete the Grafana release with Helm run:
```text
helm delete --purge flagger-grafana
```
The command removes all the Kubernetes components associated with the chart and deletes the release.

View File

@@ -1,460 +0,0 @@
# Install Istio
This guide walks you through setting up Istio with Jaeger, Prometheus, Grafana and
Lets Encrypt TLS for ingress gateway on Google Kubernetes Engine.
![Istio GKE diagram](https://raw.githubusercontent.com/stefanprodan/istio-gke/master/docs/screens/istio-gcp-overview.png)
### Prerequisites
You will be creating a cluster on Googles Kubernetes Engine \(GKE\),
if you dont have an account you can sign up [here](https://cloud.google.com/free/) for free credits.
Login into GCP, create a project and enable billing for it.
Install the [gcloud](https://cloud.google.com/sdk/) command line utility and configure your project with `gcloud init`.
Set the default project \(replace `PROJECT_ID` with your own project\):
```text
gcloud config set project PROJECT_ID
```
Set the default compute region and zone:
```text
gcloud config set compute/region europe-west3
gcloud config set compute/zone europe-west3-a
```
Enable the Kubernetes and Cloud DNS services for your project:
```text
gcloud services enable container.googleapis.com
gcloud services enable dns.googleapis.com
```
Install the `kubectl` command-line tool:
```text
gcloud components install kubectl
```
Install the `helm` command-line tool:
```text
brew install kubernetes-helm
```
### GKE cluster setup
Create a cluster with three nodes using the latest Kubernetes version:
```bash
k8s_version=$(gcloud container get-server-config --format=json \
| jq -r '.validNodeVersions[0]')
gcloud container clusters create istio \
--cluster-version=${k8s_version} \
--zone=europe-west3-a \
--num-nodes=3 \
--machine-type=n1-highcpu-4 \
--preemptible \
--no-enable-cloud-logging \
--disk-size=30 \
--enable-autorepair \
--scopes=gke-default,compute-rw,storage-rw
```
The above command will create a default node pool consisting of `n1-highcpu-4` \(vCPU: 4, RAM 3.60GB, DISK: 30GB\)
preemptible VMs. Preemptible VMs are up to 80% cheaper than regular instances and are terminated and replaced
after a maximum of 24 hours.
Set up credentials for `kubectl`:
```bash
gcloud container clusters get-credentials istio -z=europe-west3-a
```
Create a cluster admin role binding:
```bash
kubectl create clusterrolebinding "cluster-admin-$(whoami)" \
--clusterrole=cluster-admin \
--user="$(gcloud config get-value core/account)"
```
Validate your setup with:
```bash
kubectl get nodes -o wide
```
### Cloud DNS setup
You will need an internet domain and access to the registrar to change the name servers to Google Cloud DNS.
Create a managed zone named `istio` in Cloud DNS \(replace `example.com` with your domain\):
```bash
gcloud dns managed-zones create \
--dns-name="example.com." \
--description="Istio zone" "istio"
```
Look up your zone's name servers:
```bash
gcloud dns managed-zones describe istio
```
Update your registrar's name server records with the records returned by the above command.
Wait for the name servers to change \(replace `example.com` with your domain\):
```bash
watch dig +short NS example.com
```
Create a static IP address named `istio-gateway-ip` in the same region as your GKE cluster:
```bash
gcloud compute addresses create istio-gateway-ip --region europe-west3
```
Find the static IP address:
```bash
gcloud compute addresses describe istio-gateway-ip --region europe-west3
```
Create the following DNS records \(replace `example.com` with your domain and set your Istio Gateway IP\):
```bash
DOMAIN="example.com"
GATEWAYIP="35.198.98.90"
gcloud dns record-sets transaction start --zone=istio
gcloud dns record-sets transaction add --zone=istio \
--name="${DOMAIN}" --ttl=300 --type=A ${GATEWAYIP}
gcloud dns record-sets transaction add --zone=istio \
--name="www.${DOMAIN}" --ttl=300 --type=A ${GATEWAYIP}
gcloud dns record-sets transaction add --zone=istio \
--name="*.${DOMAIN}" --ttl=300 --type=A ${GATEWAYIP}
gcloud dns record-sets transaction execute --zone istio
```
Verify that the wildcard DNS is working \(replace `example.com` with your domain\):
```bash
watch host test.example.com
```
### Install Istio with Helm
Download the latest Istio release:
```bash
curl -L https://git.io/getLatestIstio | sh -
```
Navigate to `istio-x.x.x` dir and copy the Istio CLI in your bin:
```bash
cd istio-x.x.x/
sudo cp ./bin/istioctl /usr/local/bin/istioctl
```
Apply the Istio CRDs:
```bash
kubectl apply -f ./install/kubernetes/helm/istio/templates/crds.yaml
```
Create a service account and a cluster role binding for Tiller:
```bash
kubectl apply -f ./install/kubernetes/helm/helm-service-account.yaml
```
Deploy Tiller in the `kube-system` namespace:
```bash
helm init --service-account tiller
```
Find the GKE IP ranges:
```bash
gcloud container clusters describe istio --zone=europe-west3-a \
| grep -e clusterIpv4Cidr -e servicesIpv4Cidr
```
You'll be using the IP ranges to allow unrestricted egress traffic for services running inside the service mesh.
Configure Istio with Prometheus, Jaeger, and cert-manager:
```yaml
global:
nodePort: false
proxy:
# replace with your GKE IP ranges
includeIPRanges: "10.28.0.0/14,10.7.240.0/20"
sidecarInjectorWebhook:
enabled: true
enableNamespacesByDefault: false
gateways:
enabled: true
istio-ingressgateway:
replicaCount: 2
autoscaleMin: 2
autoscaleMax: 3
# replace with your Istio Gateway IP
loadBalancerIP: "35.198.98.90"
type: LoadBalancer
pilot:
enabled: true
replicaCount: 1
autoscaleMin: 1
autoscaleMax: 1
resources:
requests:
cpu: 500m
memory: 1024Mi
grafana:
enabled: true
security:
enabled: true
adminUser: admin
# change the password
adminPassword: admin
prometheus:
enabled: true
servicegraph:
enabled: true
tracing:
enabled: true
jaeger:
tag: 1.7
certmanager:
enabled: true
```
Save the above file as `my-istio.yaml` and install Istio with Helm:
```bash
helm upgrade --install istio ./install/kubernetes/helm/istio \
--namespace=istio-system \
-f ./my-istio.yaml
```
Verify that Istio workloads are running:
```text
kubectl -n istio-system get pods
```
### Configure Istio Gateway with LE TLS
![Istio Let&apos;s Encrypt diagram](https://raw.githubusercontent.com/stefanprodan/istio-gke/master/docs/screens/istio-cert-manager-gcp.png)
Create a Istio Gateway in istio-system namespace with HTTPS redirect:
```yaml
apiVersion: networking.istio.io/v1alpha3
kind: Gateway
metadata:
name: public-gateway
namespace: istio-system
spec:
selector:
istio: ingressgateway
servers:
- port:
number: 80
name: http
protocol: HTTP
hosts:
- "*"
tls:
httpsRedirect: true
- port:
number: 443
name: https
protocol: HTTPS
hosts:
- "*"
tls:
mode: SIMPLE
privateKey: /etc/istio/ingressgateway-certs/tls.key
serverCertificate: /etc/istio/ingressgateway-certs/tls.crt
```
Save the above resource as istio-gateway.yaml and then apply it:
```text
kubectl apply -f ./istio-gateway.yaml
```
Create a service account with Cloud DNS admin role \(replace `my-gcp-project` with your project ID\):
```bash
GCP_PROJECT=my-gcp-project
gcloud iam service-accounts create dns-admin \
--display-name=dns-admin \
--project=${GCP_PROJECT}
gcloud iam service-accounts keys create ./gcp-dns-admin.json \
--iam-account=dns-admin@${GCP_PROJECT}.iam.gserviceaccount.com \
--project=${GCP_PROJECT}
gcloud projects add-iam-policy-binding ${GCP_PROJECT} \
--member=serviceAccount:dns-admin@${GCP_PROJECT}.iam.gserviceaccount.com \
--role=roles/dns.admin
```
Create a Kubernetes secret with the GCP Cloud DNS admin key:
```bash
kubectl create secret generic cert-manager-credentials \
--from-file=./gcp-dns-admin.json \
--namespace=istio-system
```
Create a letsencrypt issuer for CloudDNS \(replace `email@example.com` with a valid email address and
`my-gcp-project`with your project ID\):
```yaml
apiVersion: certmanager.k8s.io/v1alpha1
kind: Issuer
metadata:
name: letsencrypt-prod
namespace: istio-system
spec:
acme:
server: https://acme-v02.api.letsencrypt.org/directory
email: email@example.com
privateKeySecretRef:
name: letsencrypt-prod
dns01:
providers:
- name: cloud-dns
clouddns:
serviceAccountSecretRef:
name: cert-manager-credentials
key: gcp-dns-admin.json
project: my-gcp-project
```
Save the above resource as letsencrypt-issuer.yaml and then apply it:
```text
kubectl apply -f ./letsencrypt-issuer.yaml
```
Create a wildcard certificate \(replace `example.com` with your domain\):
```yaml
apiVersion: certmanager.k8s.io/v1alpha1
kind: Certificate
metadata:
name: istio-gateway
namespace: istio-system
spec:
secretname: istio-ingressgateway-certs
issuerRef:
name: letsencrypt-prod
commonName: "*.example.com"
acme:
config:
- dns01:
provider: cloud-dns
domains:
- "*.example.com"
- "example.com"
```
Save the above resource as of-cert.yaml and then apply it:
```text
kubectl apply -f ./of-cert.yaml
```
In a couple of seconds cert-manager should fetch a wildcard certificate from letsencrypt.org:
```text
kubectl -n istio-system logs deployment/certmanager -f
Certificate issued successfully
Certificate istio-system/istio-gateway scheduled for renewal in 1438 hours
```
Recreate Istio ingress gateway pods:
```bash
kubectl -n istio-system delete pods -l istio=ingressgateway
```
Note that Istio gateway doesn't reload the certificates from the TLS secret on cert-manager renewal.
Since the GKE cluster is made out of preemptible VMs the gateway pods will be replaced once every 24h,
if your not using preemptible nodes then you need to manually kill the gateway pods every two months
before the certificate expires.
### Expose services outside the service mesh
In order to expose services via the Istio Gateway you have to create a Virtual Service attached to Istio Gateway.
Create a virtual service in `istio-system` namespace for Grafana \(replace `example.com` with your domain\):
```yaml
apiVersion: networking.istio.io/v1alpha3
kind: VirtualService
metadata:
name: grafana
namespace: istio-system
spec:
hosts:
- "grafana.example.com"
gateways:
- public-gateway.istio-system.svc.cluster.local
http:
- route:
- destination:
host: grafana
timeout: 30s
```
Save the above resource as grafana-virtual-service.yaml and then apply it:
```bash
kubectl apply -f ./grafana-virtual-service.yaml
```
Navigate to `http://grafana.example.com` in your browser and you should be redirected to the HTTPS version.
Check that HTTP2 is enabled:
```bash
curl -I --http2 https://grafana.example.com
HTTP/2 200
content-type: text/html; charset=UTF-8
x-envoy-upstream-service-time: 3
server: envoy
```

View File

@@ -1,39 +0,0 @@
# Alerting
### Slack
Flagger can be configured to send Slack notifications:
```bash
helm upgrade -i flagger flagger/flagger \
--namespace=istio-system \
--set slack.url=https://hooks.slack.com/services/YOUR/SLACK/WEBHOOK \
--set slack.channel=general \
--set slack.user=flagger
```
Once configured with a Slack incoming **webhook**, Flagger will post messages when a canary deployment
has been initialised, when a new revision has been detected and if the canary analysis failed or succeeded.
![flagger-slack](https://raw.githubusercontent.com/stefanprodan/flagger/master/docs/screens/slack-canary-notifications.png)
A canary deployment will be rolled back if the progress deadline exceeded or if the analysis reached the
maximum number of failed checks:
![flagger-slack-errors](https://raw.githubusercontent.com/stefanprodan/flagger/master/docs/screens/slack-canary-failed.png)
### Prometheus Alert Manager
Besides Slack, you can use Alertmanager to trigger alerts when a canary deployment failed:
```yaml
- alert: canary_rollback
expr: flagger_canary_status > 1
for: 1m
labels:
severity: warning
annotations:
summary: "Canary failed"
description: "Workload {{ $labels.name }} namespace {{ $labels.namespace }}"
```

View File

@@ -1,69 +0,0 @@
# Monitoring
### Grafana
Flagger comes with a Grafana dashboard made for canary analysis. Install Grafana with Helm:
```bash
helm upgrade -i flagger-grafana flagger/grafana \
--namespace=istio-system \
--set url=http://prometheus:9090 \
--set user=admin \
--set password=admin
```
The dashboard shows the RED and USE metrics for the primary and canary workloads:
![canary dashboard](https://raw.githubusercontent.com/stefanprodan/flagger/master/docs/screens/grafana-canary-analysis.png)
### Logging
The canary errors and latency spikes have been recorded as Kubernetes events and logged by Flagger in json format:
```text
kubectl -n istio-system logs deployment/flagger --tail=100 | jq .msg
Starting canary deployment for podinfo.test
Advance podinfo.test canary weight 5
Advance podinfo.test canary weight 10
Advance podinfo.test canary weight 15
Advance podinfo.test canary weight 20
Advance podinfo.test canary weight 25
Advance podinfo.test canary weight 30
Advance podinfo.test canary weight 35
Halt podinfo.test advancement success rate 98.69% < 99%
Advance podinfo.test canary weight 40
Halt podinfo.test advancement request duration 1.515s > 500ms
Advance podinfo.test canary weight 45
Advance podinfo.test canary weight 50
Copying podinfo.test template spec to podinfo-primary.test
Halt podinfo-primary.test advancement waiting for rollout to finish: 1 old replicas are pending termination
Scaling down podinfo.test
Promotion completed! podinfo.test
```
### Metrics
Flagger exposes Prometheus metrics that can be used to determine the canary analysis status and
the destination weight values:
```bash
# Canaries total gauge
flagger_canary_total{namespace="test"} 1
# Canary promotion last known status gauge
# 0 - running, 1 - successful, 2 - failed
flagger_canary_status{name="podinfo" namespace="test"} 1
# Canary traffic weight gauge
flagger_canary_weight{workload="podinfo-primary" namespace="test"} 95
flagger_canary_weight{workload="podinfo" namespace="test"} 5
# Seconds spent performing canary analysis histogram
flagger_canary_duration_seconds_bucket{name="podinfo",namespace="test",le="10"} 6
flagger_canary_duration_seconds_bucket{name="podinfo",namespace="test",le="+Inf"} 6
flagger_canary_duration_seconds_sum{name="podinfo",namespace="test"} 17.3561329
flagger_canary_duration_seconds_count{name="podinfo",namespace="test"} 6
```

View File

@@ -1,194 +0,0 @@
# Canary Deployments
This guide shows you how to use Istio and Flagger to automate canary deployments.
Create a test namespace with Istio sidecar injection enabled:
```bash
export REPO=https://raw.githubusercontent.com/stefanprodan/flagger/master
kubectl apply -f ${REPO}/artifacts/namespaces/test.yaml
```
Create a deployment and a horizontal pod autoscaler:
```bash
kubectl apply -f ${REPO}/artifacts/canaries/deployment.yaml
kubectl apply -f ${REPO}/artifacts/canaries/hpa.yaml
```
Create a canary custom resource \(replace example.com with your own domain\):
```yaml
apiVersion: flagger.app/v1alpha3
kind: Canary
metadata:
name: podinfo
namespace: test
spec:
# deployment reference
targetRef:
apiVersion: apps/v1
kind: Deployment
name: podinfo
# the maximum time in seconds for the canary deployment
# to make progress before it is rollback (default 600s)
progressDeadlineSeconds: 60
# HPA reference (optional)
autoscalerRef:
apiVersion: autoscaling/v2beta1
kind: HorizontalPodAutoscaler
name: podinfo
service:
# container port
port: 9898
# Istio gateways (optional)
gateways:
- public-gateway.istio-system.svc.cluster.local
# Istio virtual service host names (optional)
hosts:
- app.example.com
canaryAnalysis:
# schedule interval (default 60s)
interval: 1m
# max number of failed metric checks before rollback
threshold: 5
# max traffic percentage routed to canary
# percentage (0-100)
maxWeight: 50
# canary increment step
# percentage (0-100)
stepWeight: 10
metrics:
- name: istio_requests_total
# minimum req success rate (non 5xx responses)
# percentage (0-100)
threshold: 99
interval: 1m
- name: istio_request_duration_seconds_bucket
# maximum req duration P99
# milliseconds
threshold: 500
interval: 30s
```
Save the above resource as podinfo-canary.yaml and then apply it:
```bash
kubectl apply -f ./podinfo-canary.yaml
```
After a couple of seconds Flagger will create the canary objects:
```bash
# applied
deployment.apps/podinfo
horizontalpodautoscaler.autoscaling/podinfo
canary.flagger.app/podinfo
# generated
deployment.apps/podinfo-primary
horizontalpodautoscaler.autoscaling/podinfo-primary
service/podinfo
service/podinfo-canary
service/podinfo-primary
virtualservice.networking.istio.io/podinfo
```
Trigger a canary deployment by updating the container image:
```bash
kubectl -n test set image deployment/podinfo \
podinfod=quay.io/stefanprodan/podinfo:1.2.1
```
Flagger detects that the deployment revision changed and starts a new rollout:
```text
kubectl -n test describe canary/podinfo
Status:
Canary Weight: 0
Failed Checks: 0
Phase: Succeeded
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal Synced 3m flagger New revision detected podinfo.test
Normal Synced 3m flagger Scaling up podinfo.test
Warning Synced 3m flagger Waiting for podinfo.test rollout to finish: 0 of 1 updated replicas are available
Normal Synced 3m flagger Advance podinfo.test canary weight 5
Normal Synced 3m flagger Advance podinfo.test canary weight 10
Normal Synced 3m flagger Advance podinfo.test canary weight 15
Normal Synced 2m flagger Advance podinfo.test canary weight 20
Normal Synced 2m flagger Advance podinfo.test canary weight 25
Normal Synced 1m flagger Advance podinfo.test canary weight 30
Normal Synced 1m flagger Advance podinfo.test canary weight 35
Normal Synced 55s flagger Advance podinfo.test canary weight 40
Normal Synced 45s flagger Advance podinfo.test canary weight 45
Normal Synced 35s flagger Advance podinfo.test canary weight 50
Normal Synced 25s flagger Copying podinfo.test template spec to podinfo-primary.test
Warning Synced 15s flagger Waiting for podinfo-primary.test rollout to finish: 1 of 2 updated replicas are available
Normal Synced 5s flagger Promotion completed! Scaling down podinfo.test
```
You can monitor all canaries with:
```bash
watch kubectl get canaries --all-namespaces
NAMESPACE NAME STATUS WEIGHT LASTTRANSITIONTIME
test podinfo Progressing 15 2019-01-16T14:05:07Z
prod frontend Succeeded 0 2019-01-15T16:15:07Z
prod backend Failed 0 2019-01-14T17:05:07Z
```
During the canary analysis you can generate HTTP 500 errors and high latency to test if Flagger pauses the rollout.
Create a tester pod and exec into it:
```bash
kubectl -n test run tester \
--image=quay.io/stefanprodan/podinfo:1.2.1 \
-- ./podinfo --port=9898
kubectl -n test exec -it tester-xx-xx sh
```
Generate HTTP 500 errors:
```bash
watch curl http://podinfo-canary:9898/status/500
```
Generate latency:
```bash
watch curl http://podinfo-canary:9898/delay/1
```
When the number of failed checks reaches the canary analysis threshold, the traffic is routed back to the primary, the canary is scaled to zero and the rollout is marked as failed.
```text
kubectl -n test describe canary/podinfo
Status:
Canary Weight: 0
Failed Checks: 10
Phase: Failed
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal Synced 3m flagger Starting canary deployment for podinfo.test
Normal Synced 3m flagger Advance podinfo.test canary weight 5
Normal Synced 3m flagger Advance podinfo.test canary weight 10
Normal Synced 3m flagger Advance podinfo.test canary weight 15
Normal Synced 3m flagger Halt podinfo.test advancement success rate 69.17% < 99%
Normal Synced 2m flagger Halt podinfo.test advancement success rate 61.39% < 99%
Normal Synced 2m flagger Halt podinfo.test advancement success rate 55.06% < 99%
Normal Synced 2m flagger Halt podinfo.test advancement success rate 47.00% < 99%
Normal Synced 2m flagger (combined from similar events): Halt podinfo.test advancement success rate 38.08% < 99%
Warning Synced 1m flagger Rolling back podinfo.test failed checks threshold reached 10
Warning Synced 1m flagger Canary failed! Scaling down podinfo.test
```

Binary file not shown.

View File

@@ -1,146 +0,0 @@
apiVersion: v1
entries:
flagger:
- apiVersion: v1
appVersion: 0.4.0
created: 2019-01-18T12:49:18.099861+02:00
description: Flagger is a Kubernetes operator that automates the promotion of
canary deployments using Istio routing for traffic shifting and Prometheus metrics
for canary analysis.
digest: fe06de1c68c6cc414440ef681cde67ae02c771de9b1e4d2d264c38a7a9c37b3d
engine: gotpl
home: https://docs.flagger.app
icon: https://raw.githubusercontent.com/stefanprodan/flagger/master/docs/logo/flagger-icon.png
keywords:
- canary
- istio
- gitops
kubeVersion: '>=1.11.0-0'
maintainers:
- email: stefanprodan@users.noreply.github.com
name: stefanprodan
url: https://github.com/stefanprodan
name: flagger
sources:
- https://github.com/stefanprodan/flagger
urls:
- https://stefanprodan.github.io/flagger/flagger-0.4.0.tgz
version: 0.4.0
- apiVersion: v1
appVersion: 0.3.0
created: 2019-01-18T12:49:18.099501+02:00
description: Flagger is a Kubernetes operator that automates the promotion of
canary deployments using Istio routing for traffic shifting and Prometheus metrics
for canary analysis.
digest: 8baa478cc802f4e6b7593934483359b8f70ec34413ca3b8de3a692e347a9bda4
engine: gotpl
home: https://docs.flagger.app
icon: https://raw.githubusercontent.com/stefanprodan/flagger/master/docs/logo/flagger-icon.png
keywords:
- canary
- istio
- gitops
kubeVersion: '>=1.9.0-0'
maintainers:
- email: stefanprodan@users.noreply.github.com
name: stefanprodan
url: https://github.com/stefanprodan
name: flagger
sources:
- https://github.com/stefanprodan/flagger
urls:
- https://stefanprodan.github.io/flagger/flagger-0.3.0.tgz
version: 0.3.0
- apiVersion: v1
appVersion: 0.2.0
created: 2019-01-18T12:49:18.099162+02:00
description: Flagger is a Kubernetes operator that automates the promotion of
canary deployments using Istio routing for traffic shifting and Prometheus metrics
for canary analysis.
digest: 800b5fd1a0b2854ee8412b3170c36ecda3d382f209e18b475ee1d5e3c7fa2f83
engine: gotpl
home: https://flagger.app
icon: https://raw.githubusercontent.com/stefanprodan/flagger/master/docs/logo/flagger-icon.png
keywords:
- canary
- istio
- gitops
kubeVersion: '>=1.9.0-0'
maintainers:
- email: stefanprodan@users.noreply.github.com
name: stefanprodan
url: https://github.com/stefanprodan
name: flagger
sources:
- https://github.com/stefanprodan/flagger
urls:
- https://stefanprodan.github.io/flagger/flagger-0.2.0.tgz
version: 0.2.0
- apiVersion: v1
appVersion: 0.1.2
created: 2019-01-18T12:49:18.098811+02:00
description: Flagger is a Kubernetes operator that automates the promotion of
canary deployments using Istio routing for traffic shifting and Prometheus metrics
for canary analysis.
digest: 0029ef8dd20ebead3d84638eaa4b44d60b3e2bd953b4b7a1169963ce93a4e87c
engine: gotpl
home: https://flagger.app
icon: https://raw.githubusercontent.com/stefanprodan/flagger/master/docs/logo/flagger-icon.png
keywords:
- canary
- istio
- gitops
kubeVersion: '>=1.9.0-0'
maintainers:
- email: stefanprodan@users.noreply.github.com
name: stefanprodan
url: https://github.com/stefanprodan
name: flagger
sources:
- https://github.com/stefanprodan/flagger
urls:
- https://stefanprodan.github.io/flagger/flagger-0.1.2.tgz
version: 0.1.2
- apiVersion: v1
appVersion: 0.1.1
created: 2019-01-18T12:49:18.098439+02:00
description: Flagger is a Kubernetes operator that automates the promotion of
canary deployments using Istio routing for traffic shifting and Prometheus metrics
for canary analysis.
digest: 2bb8f72fcf63a5ba5ecbaa2ab0d0446f438ec93fbf3a598cd7de45e64d8f9628
home: https://github.com/stefanprodan/flagger
name: flagger
urls:
- https://stefanprodan.github.io/flagger/flagger-0.1.1.tgz
version: 0.1.1
- apiVersion: v1
appVersion: 0.1.0
created: 2019-01-18T12:49:18.098153+02:00
description: Flagger is a Kubernetes operator that automates the promotion of
canary deployments using Istio routing for traffic shifting and Prometheus metrics
for canary analysis.
digest: 03e05634149e13ddfddae6757266d65c271878a026c21c7d1429c16712bf3845
home: https://github.com/stefanprodan/flagger
name: flagger
urls:
- https://stefanprodan.github.io/flagger/flagger-0.1.0.tgz
version: 0.1.0
grafana:
- apiVersion: v1
appVersion: 5.4.2
created: 2019-01-18T12:49:18.100331+02:00
description: Grafana dashboards for monitoring Flagger canary deployments
digest: 97257d1742aca506f8703922d67863c459c1b43177870bc6050d453d19a683c0
home: https://flagger.app
icon: https://raw.githubusercontent.com/stefanprodan/flagger/master/docs/logo/flagger-icon.png
maintainers:
- email: stefanprodan@users.noreply.github.com
name: stefanprodan
url: https://github.com/stefanprodan
name: grafana
sources:
- https://github.com/stefanprodan/flagger
urls:
- https://stefanprodan.github.io/flagger/grafana-0.1.0.tgz
version: 0.1.0
generated: 2019-01-18T12:49:18.097682+02:00

Binary file not shown.

Before

Width:  |  Height:  |  Size: 19 KiB

Some files were not shown because too many files have changed in this diff Show More