Compare commits

...

271 Commits

Author SHA1 Message Date
stakater-user
2aa514a34c [skip-ci] Update artifacts 2021-07-28 10:36:56 +00:00
Faizan Ahmad
ac39bc4eba Merge pull request #251 from aslafy-z/patch-1
docs(helm): podmonitor does not need service
2021-07-28 12:20:07 +02:00
Zadkiel
284d21686e docs(helm): podmonitor does not need service 2021-07-20 17:42:59 +02:00
stakater-user
00c0c11c76 [skip-ci] Update artifacts 2021-07-11 07:51:39 +00:00
Brandon Clifford
96ebfa8e62 Fix typo in Chart.yaml sources (#248) 2021-07-11 09:37:32 +02:00
stakater-user
95d442d80f [skip-ci] Update artifacts 2021-07-09 07:43:58 +00:00
Faizan Ahmad
e4e58882ab Merge pull request #246 from gciria/add-chart-liveness-readiness
Add Liveness and readiness probe timeout values
2021-07-09 09:29:41 +02:00
Gustavo Ciria
ea71fc0eec Create Chart.yaml 2021-07-08 13:07:43 -03:00
Gustavo Ciria
462b225d92 Delete Chart.yaml
Version and appVersion do not need to be updated manually.
2021-07-08 12:55:03 -03:00
Gustavo Ciria
d8728092f8 Add Liveness and readiness probe timeout values 2021-07-08 11:44:49 -03:00
stakater-user
2c8ef70c43 [skip-ci] Update artifacts 2021-06-28 14:19:33 +00:00
Faizan Ahmad
4d2c8a451e Merge pull request #243 from sfynx/master
Only enable Rollouts when enabled in Helm chart.
2021-06-28 16:03:38 +02:00
Henno Schooljan
f7927c85b1 Disable OpenShift by default, add notes in README. 2021-06-25 21:28:16 +02:00
Henno Schooljan
2e2fd2a11b Only enable Rollouts when enabled in Helm chart.
This prevents a permission issue in case Rollouts is available on a cluster, but the user does not have permission to use it (e.g. as a tenant on a cluster without cluster admin rights), and therefore also may not set permissions for it.

See issue #231.
2021-06-16 20:46:51 +02:00
stakater-user
0e6ec1d36b [skip-ci] Update artifacts 2021-06-15 17:40:07 +00:00
Faizan Ahmad
85b33d9104 Merge pull request #242 from stakater/actions-update
Added helm template step in push action
2021-06-15 19:25:54 +02:00
talha0324
c838ecbbc7 Updated command to one line 2021-06-15 19:52:28 +05:00
stakater-user
068a5c1e64 [skip-ci] Update artifacts 2021-06-15 14:13:35 +00:00
Faizan Ahmad
4d559a1864 Merge pull request #240 from stakater/file-name-fix
Fixed file name
2021-06-15 15:57:30 +02:00
talha0324
322142dd66 syntax fix 2021-06-15 18:24:56 +05:00
talha0324
39f37b706c Added helm template step in push action 2021-06-15 18:23:43 +05:00
talha0324
4e10dd4f80 Merge branch 'master' into file-name-fix
merging master into this branch
2021-06-15 17:58:49 +05:00
stakater-user
ccaa600ff4 [skip-ci] Update artifacts 2021-06-15 11:42:02 +00:00
Faizan Ahmad
a3fcfeb62f Merge pull request #241 from stakater/actions-fix
Fixed helm version tag and PR message
2021-06-15 13:28:07 +02:00
talha0324
d2cbbafeb1 Fixed helm version tag and PR message 2021-06-15 16:07:49 +05:00
talha0324
eaf8e16414 Fixed file name 2021-06-15 15:48:52 +05:00
stakater-user
5a65cf9f6d [skip-ci] Update artifacts 2021-06-13 19:48:18 +00:00
Faizan Ahmad
a8a68ae1b0 Merge pull request #236 from tete17/Update-dependencies-for-Argo-Rollouts
Update ArgoCD Rollouts to 1.0.1 to fix a compatibility issue
2021-06-13 21:33:59 +02:00
tete17
7643a27fb1 Upgrade argo-rollouts to v1.0.1 2021-06-04 18:32:02 +02:00
tete17
71fdb53c2e Update ArgoCD Rollouts to 0.10.2 to fix a compatibility issue and update necessary k8s machinery 2021-05-31 15:35:51 +02:00
stakater-user
d6312f6f83 [skip-ci] Update artifacts 2021-05-25 03:26:20 +00:00
Ahmed Waleed Malik
19220f5e6e Merge pull request #235 from phillebaba/feature/chart-priority-class
Add priority class name to helm chart
2021-05-25 08:13:08 +05:00
Philip Laine
05456b0905 Remove priority class name value 2021-05-24 21:19:20 +02:00
Philip Laine
10328dee8d Update deployments/kubernetes/chart/reloader/templates/deployment.yaml
Co-authored-by: Ahmed Waleed Malik <ahmedwaleedmalik@gmail.com>
2021-05-24 21:18:42 +02:00
Philip Laine
fd174ed691 Add priority class name to helm chart 2021-05-19 22:06:35 +02:00
stakater-user
2e47f1740c [skip-ci] Update artifacts 2021-04-26 04:52:02 +00:00
Ahmed Waleed Malik
15cb96f945 Merge pull request #228 from stakater/fix-issue-221
Add Optional pod monitor
2021-04-26 09:38:29 +05:00
faizanahmad055
1e987db54d Add endline in podmonitor.yaml
Signed-off-by: faizanahmad055 <faizan.ahmad55@outlook.com>
2021-04-25 23:10:23 +02:00
faizanahmad055
12a7fed3ae Add endline in values.yaml.tmpl
Signed-off-by: faizanahmad055 <faizan.ahmad55@outlook.com>
2021-04-25 23:09:40 +02:00
stakater-user
f18fac66c2 [skip-ci] Update artifacts 2021-04-25 21:05:39 +00:00
faizanahmad055
b5c95f9cbf Add Optional pod monitor
Signed-off-by: faizanahmad055 <faizan.ahmad55@outlook.com>
2021-04-25 23:05:36 +02:00
Faizan Ahmad
46b948388f Merge pull request #226 from tenstad/propagate-error-to-fix-retry
Propagate error to enable retry
2021-04-25 22:52:06 +02:00
Amund Tenstad
78be58b090 Do not log content of secrets 2021-04-21 16:12:53 +02:00
Amund Tenstad
54a8e0683b Propagate PerformRollingUpgrade error to Handle 2021-04-21 13:25:08 +02:00
stakater-user
702f0caa93 [skip-ci] Update artifacts 2021-04-11 18:21:36 +00:00
Faizan Ahmad
2e709e85ae Merge pull request #223 from sfynx/master
fix: read isArgoRollouts correctly in Helm chart
2021-04-11 20:08:10 +02:00
Henno Schooljan
debfd57a91 fix: read isArgoRollouts correctly in Helm chart 2021-04-07 23:21:41 +02:00
stakater-user
c3b8af34ac [skip-ci] Update artifacts 2021-03-22 13:47:07 +00:00
Ahmed Waleed Malik
7a65bcb35b Merge pull request #218 from stakater/fix-issue-207
Make argo rollouts optional
2021-03-22 18:32:15 +05:00
faizanahmad055
af6cd9e37c Add isArgoRollouts in values.yaml.tmpl
Signed-off-by: faizanahmad055 <faizan.ahmad55@outlook.com>
2021-03-22 11:34:47 +01:00
faizanahmad055
344004d0b3 Make argo rollouts optional
Signed-off-by: faizanahmad055 <faizan.ahmad55@outlook.com>
2021-03-22 11:30:22 +01:00
stakater-user
a5bc586f09 [skip-ci] Update artifacts 2021-03-17 04:30:18 +00:00
Ahmed Waleed Malik
81ca7ab601 Merge pull request #212 from corco/multi_arch_dockerfile
Dockerfile now use the platform supplied by buildx
2021-03-17 09:15:39 +05:00
Jonathan Drolet
69c9ccb2ea Merge remote-tracking branch 'stakater/master' into multi_arch_dockerfile 2021-03-16 09:05:32 -04:00
Ahmed Waleed Malik
0ec3effab8 Merge pull request #215 from stakater/use-commit-hash
Update pull_request.yaml
2021-03-16 16:26:17 +05:00
Ahmed Waleed Malik
dba42e91bc Update pull_request.yaml 2021-03-16 16:20:16 +05:00
Jonathan Drolet
68fd3bebe5 Dockerfile now use the platform supplied by buildx 2021-03-15 21:11:18 -04:00
Ahmed Waleed Malik
52b975ef0d Merge pull request #211 from stakater/update-pr-workflow
Update pull_request.yaml
2021-03-15 20:38:18 +05:00
Ahmed Waleed Malik
0679af76f4 Update pull_request.yaml 2021-03-15 20:23:53 +05:00
Ahmed Waleed Malik
309c10f632 Merge pull request #206 from stakater/update-modules
Update modules
2021-03-10 09:49:27 +05:00
Waleed Malik
07ddec9fd1 Clean up unused dependencies 2021-03-10 09:19:23 +05:00
Waleed Malik
69a80fd1d9 Update modules 2021-03-10 09:18:51 +05:00
stakater-user
04975de060 [skip-ci] Update artifacts 2021-03-09 19:20:58 +00:00
Ahmed Waleed Malik
459a808371 Merge pull request #205 from tete17/Support-Rollouts-on-Helm-deployment
Allow reloader to modify rollouts when installed through helm
2021-03-09 23:50:42 +05:00
tete17
ef8a335c93 Allow reloader to modify rollouts when installed through helm 2021-03-09 20:19:33 +01:00
stakater-user
93a52500d1 [skip-ci] Update artifacts 2021-03-09 18:23:56 +00:00
Ahmed Waleed Malik
ac2dac330e Merge pull request #202 from tete17/Add-support-for-Argo-Rollouts
Add support for argo rollouts
2021-03-09 22:55:44 +05:00
stakater-user
e9843c7c7d [skip-ci] Update artifacts 2021-03-05 05:55:14 +00:00
Ahmed Waleed Malik
1f154d0572 Merge pull request #204 from corco/master
Added multi-arch docker image for Github actions
2021-03-05 10:29:52 +05:00
Jonathan Drolet
7ccb17392e Added multi-arch docker image for Github actions 2021-03-04 10:52:44 -05:00
tete17
e8da3f48ec Modify documentation to reflect feature with rollout 2021-02-23 18:37:46 +01:00
tete17
614865a8d7 Add support for ArgoCD Rollout CRD 2021-02-23 18:24:39 +01:00
stakater-user
4f551ada6e [skip-ci] Update artifacts 2021-02-18 09:08:37 +00:00
Ahmed Waleed Malik
608a928967 Merge pull request #201 from wtayyeb/patch-1
fix deployment args indentation and add search, match custom_annotations
2021-02-18 13:57:36 +05:00
wtayyeb
5a14798341 simplify auto-search, search-match annotations 2021-02-18 10:59:46 +03:30
wtayyeb
e7516e82e3 fix deployment args, add custom_annotations.auto_search, search_match 2021-02-18 10:44:13 +03:30
stakater-user
dc3494c041 [skip-ci] Update artifacts 2021-02-08 04:48:48 +00:00
Ahmed Waleed Malik
79e3588389 Merge pull request #200 from stakater/fix-issue-199
Add custom annotation support in service account
2021-02-08 09:37:43 +05:00
faizanahmad055
45a833bbb2 Add custom annotation support in service account
Signed-off-by: faizanahmad055 <faizan.ahmad55@outlook.com>
2021-02-06 14:29:47 +01:00
stakater-user
1f22ebe132 [skip-ci] Update artifacts 2021-01-26 13:00:36 +00:00
Ahmed Waleed Malik
1846b31936 Merge pull request #198 from stakater/version-bump-fix
Fixed version bump command in Makefile
2021-01-26 17:49:18 +05:00
talha0324
935a17b1c7 Fixed version bump command in Makefile 2021-01-26 17:31:54 +05:00
stakater-user
7b44a472ad [skip-ci] Update artifacts 2021-01-26 11:12:27 +00:00
Ahmed Waleed Malik
a46b56271c Merge pull request #197 from stakater/fix-pipeline
Fix version env var in Makefile
2021-01-26 16:00:48 +05:00
Waleed Malik
2f9dd7c422 Fix version env var in Makefile 2021-01-26 15:57:25 +05:00
Ahmed Waleed Malik
f373686b75 Merge pull request #195 from stakater/fix-chart-path
Fix chart path for helm publish step
2021-01-26 15:36:32 +05:00
Ahmed Waleed Malik
80557ce43e Fix chart path for helm publish step 2021-01-26 15:34:58 +05:00
Ahmed Waleed Malik
c4f6d93eb9 Merge pull request #194 from stakater/ahmedwaleedmalik-patch-1
Update push.yaml
2021-01-26 15:17:16 +05:00
Ahmed Waleed Malik
c75c787738 Update push.yaml 2021-01-26 15:16:12 +05:00
Ahmed Waleed Malik
ba18bbfd72 Merge pull request #193 from stakater/update-chart-publish
Update chart publish step
2021-01-26 15:01:24 +05:00
Waleed Malik
610b4e5716 Update chart publish step 2021-01-26 14:56:06 +05:00
Ahmed Waleed Malik
dc0715de61 Merge pull request #192 from stakater/use-pr-target-hook
[skip-ci] use pull_request_target hook for pipelines against PRs
2021-01-26 14:51:15 +05:00
Waleed Malik
4f6ff420e8 Fix chart publish step 2021-01-26 14:31:04 +05:00
Waleed Malik
966d5e61c0 [skip-ci] use pull_request_target hook for pipelines against PRs 2021-01-26 14:10:56 +05:00
Ahmed Waleed Malik
d017747792 Merge pull request #189 from stakater/workflow-implementation
Workflow implementation
2021-01-26 14:10:10 +05:00
Waleed Malik
70099fdc8f Fix helm lint step 2021-01-26 13:54:14 +05:00
Waleed Malik
aaddec1103 Skip failing test cases 2021-01-26 12:59:49 +05:00
Waleed Malik
b5fdcd577d Refactor controller test cases 2021-01-26 12:04:40 +05:00
Waleed Malik
8b9bf07631 Temporarily switch to pull_request hook for testing 2021-01-26 11:42:40 +05:00
Waleed Malik
674444850d Merge latest master 2021-01-26 11:35:44 +05:00
Waleed Malik
e74dcc3cbd Update workflows 2021-01-26 11:30:38 +05:00
Waleed Malik
dcae4c98ac Add updated Dockerfile 2021-01-26 11:30:22 +05:00
Waleed Malik
94a83c5974 Bump golang version to 1.15 2021-01-26 11:14:52 +05:00
Waleed Malik
592976bf09 Run go mod tidy 2021-01-26 11:11:30 +05:00
Waleed Malik
ed736c8e20 Remove .VERSION file 2021-01-26 11:11:21 +05:00
Ahmed Waleed Malik
84133742b1 Merge pull request #186 from coldfire84/pr-docker-multi-arch
Enable support for multi-arch container image build/ publish: linux/arm, linux/arm64 and linux/amd64.
2021-01-26 11:09:00 +05:00
stakater-user
04e19a733b Bump Version to v0.0.77 2021-01-21 08:32:24 +00:00
Ahmed Waleed Malik
c1ae5efb7b Merge pull request #190 from gracedo/gracedo/check_api_legacy
[helm chart] Check api version availability instead of using legacy value
2021-01-21 13:00:23 +05:00
Grace Do
f630336fed Check api version availability instead of using legacy value 2021-01-19 10:18:27 -08:00
talha0324
fde312edcc Update golang code lint errors 2021-01-19 15:54:30 +05:00
talha0324
57eb4f4eaa Updates to the workflow 2021-01-19 15:22:31 +05:00
talha0324
1490a1feaa Updates to workflow and few path updates 2021-01-18 17:43:07 +05:00
talha0324
58c622eb91 Added workflow files for Jenkins replacement 2021-01-18 17:33:02 +05:00
stakater-user
2fd8b190b1 Bump Version to v0.0.76 2021-01-11 04:45:48 +00:00
Ahmed Waleed Malik
81c840ea30 Merge pull request #187 from stakater/fix-issue-166
Remove redundant reload on resource creation
2021-01-11 09:36:26 +05:00
faizanahmad055
21dbeb9810 Remove redundant reload on resource creation
Signed-off-by: faizanahmad055 <faizan.ahmad55@outlook.com>
2021-01-08 17:28:50 +01:00
Chris Bradford
fba004d655 Enable support for multi-arch container images: arm, arm64 and amd64.
Add Container Build documentation.
2020-12-19 12:38:32 +00:00
stakater-user
631781aa8a Bump Version to v0.0.75 2020-12-01 12:11:52 +00:00
Ali Kahoot
707dccf6b8 Merge pull request #184 from stakater/fix-helm-chart-template
Fix helm chart template
2020-12-01 17:02:49 +05:00
Waleed Malik
5edd29b8e9 Remove redundant fields from service in helm chart 2020-12-01 16:58:51 +05:00
Waleed Malik
27815ea3b3 Update values.service.ports to values.service.port 2020-12-01 16:48:10 +05:00
Waleed Malik
5fd275a05c Add waleed as reviewer and approver 2020-12-01 16:39:11 +05:00
Waleed Malik
b22694d3c2 Add servicemonitor in values template 2020-12-01 16:38:55 +05:00
Ahmed Waleed Malik
5c95c6898b Merge pull request #180 from dpetersen/service-monitor
Add optional ServiceMonitor object to Helm chart
2020-12-01 15:08:00 +05:00
Jose Bautista
46bc4b71db update readme 2020-11-28 17:28:37 +02:00
Don Petersen
cee81b4757 Add optional ServiceMonitor object to Helm chart
This adds the ability to create a ServiceMonitor instance to configure
Prometheus to monitor reloader for metrics. ServiceMonitor is a CRD that
comes with the prometheus-operator project.
2020-11-25 13:25:07 -08:00
stakater-user
1cec52637f Bump Version to v0.0.74 2020-10-28 17:08:26 +00:00
Ahmed Waleed Malik
1901a4eb49 Merge pull request #146 from mnach/add-metrics-service
add metrics endpoints to kubernetes specs
2020-10-28 21:57:15 +05:00
Mikhail Vladimirovich Nacharov
710396f66e add metrics endpoints to kubernetes specs 2020-10-28 01:13:49 +05:00
stakater-user
11bafa9f36 Bump Version to v0.0.73 2020-10-27 10:13:10 +00:00
Ahmed Waleed Malik
9a45318fc9 Merge pull request #175 from stakater/fix-issue-173
Fix issue 173
2020-10-26 12:50:12 +05:00
faizanahmad055
843f47600a Fix formatting of documenation to support helm3 migration
Signed-off-by: faizanahmad055 <faizan.ahmad55@outlook.com>
2020-10-25 19:28:37 +01:00
faizanahmad055
3d9dee27b5 Fix formatting of documenation to support helm3 migration
Signed-off-by: faizanahmad055 <faizan.ahmad55@outlook.com>
2020-10-25 19:27:58 +01:00
faizanahmad055
63fd3c2635 Add documenation to support helm3 migration
Signed-off-by: faizanahmad055 <faizan.ahmad55@outlook.com>
2020-10-25 19:24:41 +01:00
faizanahmad055
284ca59ca4 Add annotations, labels and documenation to support helm3
Signed-off-by: faizanahmad055 <faizan.ahmad55@outlook.com>
2020-10-25 19:22:05 +01:00
stakater-user
2ce24abe40 Bump Version to v0.0.72 2020-10-20 07:36:45 +00:00
Usama Ahmad
6419444663 Merge pull request #172 from stakater/fix-chart
Fix helm chart template
2020-10-20 12:26:56 +05:00
Waleed Malik
1a6fd3e302 Fix helm chart template 2020-10-20 10:44:59 +05:00
Ahmed Waleed Malik
7ac90b8c88 Merge pull request #170 from stakater/fix-issue-169
Fix#169 - Update Rbac api versions
2020-10-20 09:33:08 +05:00
faizanahmad055
faf27c2d5d Add support for legacy rbac
Signed-off-by: faizanahmad055 <faizan.ahmad55@outlook.com>
2020-10-19 16:00:54 +02:00
faizanahmad055
6a0dfd3ce0 Update Rbac api versions
Signed-off-by: faizanahmad055 <faizan.ahmad55@outlook.com>
2020-10-19 10:32:32 +02:00
stakater-user
fdbc3067ce Bump Version to v0.0.71 2020-10-13 03:56:54 +00:00
Ahmed Waleed Malik
c4ead210ee Merge pull request #168 from booleanbetrayal/namespaces-to-ignore_helm_support
Add Helm chart support for --namespaces-to-ignore flag
2020-10-13 08:47:22 +05:00
Brent Dearth
0441f6d481 Add Helm chart support for --namespaces-to-ignore flag 2020-10-12 15:04:08 -06:00
stakater-user
09b9a073a0 Bump Version to v0.0.70 2020-10-05 10:28:03 +00:00
Ahmed Waleed Malik
d6d188f224 Merge pull request #162 from pchico83/master
Add okteto manifest to develop Reloader directly on Kubernetes
2020-10-05 15:18:27 +05:00
stakater-user
422c291b06 Bump Version to v0.0.69 2020-09-22 13:39:09 +00:00
Júlia Biró
ed6ea026a8 Trim spaces in annotation list (#165)
* strip whitespace

* only trim spaces
2020-09-22 15:29:29 +02:00
Pablo Chico de Guzman
da30b4744b Add okteto manifest to develop Reloader directly on Kubernetes 2020-09-04 11:26:59 +02:00
stakater-user
503e357349 Bump Version to v0.0.68 2020-09-01 05:42:04 +00:00
Josh Soref
61e9202781 Spelling (#161)
* spelling: create-or
2020-09-01 10:32:16 +05:00
stakater-user
8dbe7a85af Bump Version to v0.0.67 2020-08-08 18:46:03 +00:00
Ahmad Iqbal Ali
e86f616305 update slack links in readme (#156) 2020-08-08 20:36:09 +02:00
stakater-user
0c36cfd602 Bump Version to v0.0.66 2020-08-06 18:20:25 +00:00
Faizan Ahmad
f38f86a45c Merge pull request #154 from clive-jevons/respect-configmap-binarydata-for-hash
Include data from ConfigMap.BinaryData when calculating SHA
2020-08-06 20:09:59 +02:00
Faizan Ahmad
5033b8fcdc Merge pull request #155 from kostyrev/master
Add fullnameOverride to helm chart
2020-08-06 20:09:45 +02:00
Aleksandr Kostyrev
be4285742a Add fullnameOverride to helm chart 2020-08-06 16:54:50 +03:00
Clive Jevons
6a008999f5 Include data from ConfigMap.BinaryData when calculating SHA 2020-08-06 13:37:50 +02:00
stakater-user
93f4ea240f Bump Version to v0.0.65 2020-08-04 09:17:57 +00:00
stakater-user
c6fbae2f62 Bump Version to v0.0.64 2020-08-04 08:15:47 +00:00
Ahmed Waleed Malik
3fe0ebb48a Merge pull request #152 from liuming-dev/refactor--code-polish
Polishing code
2020-08-04 13:02:17 +05:00
Ahmed Waleed Malik
67b847bf41 Merge pull request #151 from liuming-dev/style--gofmt
style: gofmt -l -w -s .
2020-08-04 13:01:44 +05:00
Liu Ming
eaa3db48f5 Polish code
Signed-off-by: Liu Ming <hit_oak_tree@126.com>
2020-07-29 10:25:55 +08:00
Liu Ming
a505d2e3b1 style: gofmt -l -w -s .
Signed-off-by: Liu Ming <hit_oak_tree@126.com>
2020-07-28 21:45:56 +08:00
stakater-user
9ec5515a39 Bump Version to v0.0.63 2020-07-20 17:56:16 +00:00
Ahmed Waleed Malik
8db17acf67 Merge pull request #150 from stakater/fix-watch-global
Fix watch global
2020-07-20 22:46:29 +05:00
faizanahmad055
b43719cf34 Remove duplicate condition
Signed-off-by: faizanahmad055 <faizan.ahmad55@outlook.com>
2020-07-20 19:33:38 +02:00
faizanahmad055
e8216069a5 Fix issue for watch global variable 2020-07-20 18:48:58 +02:00
stakater-user
732d35e45f Bump Version to v0.0.62 2020-07-17 09:17:35 +00:00
Ahmed Waleed Malik
dcedaa2cfe Merge pull request #147 from alexconlin/patch-1
Remove empty fields from kustomize deployment
2020-07-17 14:07:01 +05:00
Alex Conlin-Oakley
8d77121c3b only include env and args in container when needed 2020-07-15 17:42:08 +01:00
Alex Conlin
013cd92219 Merge branch 'master' into patch-1 2020-07-10 17:24:33 +01:00
stakater-user
39b5be37af Bump Version to v0.0.61 2020-07-10 15:54:41 +00:00
kahootali
86c2ed265d Add non-root security context 2020-07-10 20:40:48 +05:00
LucasBoisserie
87130f06bc Run as Non Root (#149) 2020-07-10 17:34:49 +02:00
Ali Kahoot
17f702f510 Merge pull request #148 from TBBle/patch-1
Typo fix "resatart"
2020-07-10 20:27:40 +05:00
Paul "TBBle" Hampson
16f3055e10 Typo fix "resatart" 2020-07-10 11:34:47 +10:00
Alex Conlin
4800af8e28 Remove empty fields from deployment manifest 2020-07-08 11:22:43 +01:00
Alex Conlin
db79c65334 Remove empty fields from kustomize deployment
Fixes #115
2020-07-07 22:52:53 +01:00
stakater-user
d2223f313f Bump Version to v0.0.60 2020-06-23 07:15:27 +00:00
Ahmed Waleed Malik
c9dabc3a14 Merge pull request #142 from vladlosev/feat-track-secrets-configmaps-by-annotation
Adds support for auto-reloading secrets and configmaps by annotation.
2020-06-23 12:05:22 +05:00
Vlad Losev
e61f9a6bdb Adds note on incompatibility with 'reloader.stakater.com/auto'. 2020-06-21 14:15:58 -07:00
Vlad Losev
6bcec06052 Fixes missing import. 2020-06-09 19:45:12 -07:00
Vlad Losev
0988e8947f Removes unused import. 2020-06-09 19:18:12 -07:00
Vlad Losev
ff27cc0f51 Simplifies test. 2020-06-09 19:17:19 -07:00
Vlad Losev
be7d454504 Fixes test using auto annotations. 2020-06-09 19:16:04 -07:00
Vlad Losev
3131116ed6 Re-adds sleep statements. 2020-06-09 19:16:04 -07:00
Vlad Losev
965cacf1ba Updates documentation. 2020-06-09 19:16:04 -07:00
Vlad Losev
e81b49d81b Renames search annotation. 2020-06-09 19:16:03 -07:00
Vlad Losev
17f8b81110 Simplifies annotations for searching secrets for reload. 2020-06-09 19:16:03 -07:00
Vlad Losev
5980c91560 Abstracts out configmap and deployment creation. 2020-06-09 19:16:01 -07:00
Vlad Losev
fda733ea5a Adds support for auto-reloading secrets and configmaps by annotation. 2020-06-09 19:14:14 -07:00
stakater-user
732cd5b53a Bump Version to v0.0.59 2020-06-09 16:50:55 +00:00
stakater-user
aae0c5c443 Merge pull request #141 from cucxabong/support-projected-volume
Support projected volume
2020-06-09 20:16:24 +05:00
Quan Hoang
d4223311de Support projected volume 2020-06-02 23:36:08 +07:00
stakater-user
29173c7364 Bump Version to v0.0.58 2020-03-30 15:12:34 +00:00
inductor
7767809a38 Refactor Dockerfile (#133)
* refactor dockerfile

* update go version on Dockerfile
2020-03-30 16:52:33 +02:00
stakater-user
f0d6a9e646 Bump Version to v0.0.57 2020-03-18 09:26:04 +00:00
Usama Ahmad
d1538dbeec Merge pull request #131 from stakater/fix-pipeline
Update pipeline library and pipeline tools version
2020-03-18 14:15:55 +05:00
Waleed Malik
8470962383 Update pipeline library and pipeline tools version 2020-03-18 14:02:05 +05:00
Ali Kahoot
139bc1ca38 Merge pull request #128 from katainaka0503/add-prometheus-endpoint
Implement prometheus endpoint
2020-03-17 20:43:03 +05:00
kahootali
6d9f89a452 add service manifest 2020-03-17 15:27:47 +05:00
Ali Kahoot
5ba914d6bb Fix documentation 2020-03-17 15:10:42 +05:00
katainaka0503
9a5094a4ed Add document of prometheus endpoint 2020-03-01 21:56:05 +09:00
katainaka0503
3fe7ad04e9 Implement prometheus endpoint 2020-03-01 21:56:02 +09:00
stakater-user
bf6a247f54 Bump Version to v0.0.56 2020-03-01 12:47:59 +00:00
stakater-user
8203cc3c11 Bump Version to v0.0.55 2020-03-01 12:37:09 +00:00
stakater-user
f287d84b6a Bump Version to v0.0.54 2020-03-01 12:26:27 +00:00
katainaka
205d36512c Remove redundant namePrefix from kustomization.yaml sample (#126) 2020-03-01 13:16:35 +01:00
stakater-user
0b39353c12 Bump Version to v0.0.53 2020-02-28 12:53:28 +00:00
Ali Kahoot
97a5616e60 Merge pull request #125 from stakater/update-library-version
update library version and toolsImage
2020-02-28 17:43:19 +05:00
usamaahmadkhan
b274ac0947 update library version and toolsImage 2020-02-28 17:28:26 +05:00
stakater-user
ed29d1d18c Bump Version to v0.0.52 2020-02-14 06:45:57 +00:00
aure-olli
2384d65953 feat: allow annotations in the pod template too (#123)
Several public charts only allow to edit the annotations of the pod template, not the deployment. Annotations will also be checked in the pod template if not present in the deployment.

fix: #122

Signed-off-by: Aurélien Lambert <aure@olli-ai.com>
2020-02-14 07:32:31 +01:00
stakater-user
7b19601423 Bump Version to v0.0.51 2020-01-13 13:33:00 +00:00
Usama Ahmad
76bf43cb13 Merge pull request #121 from elblivion/json-logging
JSON logging
2020-01-13 18:22:46 +05:00
kahootali
1b7bb3bead add support for logFormat argument in deployment 2020-01-13 17:10:08 +05:00
kahootali
c844f12f73 add logFormat default value 2020-01-13 17:09:53 +05:00
kahootali
5ac2164a1c add logFormat parameter in Readme 2020-01-13 17:09:25 +05:00
Anthony Stanton
c9b89c37c1 Update README 2020-01-03 13:44:01 +01:00
Anthony Stanton
55bc4c3e22 JSON logging 2020-01-02 22:10:41 +01:00
stakater-user
77c7d63296 Bump Version to v0.0.50 2019-12-31 05:49:32 +00:00
Irtiza Ali
2ae4753efb Merge pull request #119 from rjshrjndrn/patch-1
Include annotations in templating.
2019-12-31 10:39:13 +05:00
Rajesh Rajendran
68d0349793 Include annotations in templating.
for example, If I have istio enabled in a namespace, and I want to deploy reloader in that, It doesn't make sense to include envoy there. So there should be an option to add `sidecar.istio.io/inject: "false"` annotation.
2019-12-31 10:52:19 +05:30
stakater-user
ded923b12a Bump Version to v0.0.49 2019-10-22 12:21:42 +00:00
Waseem Hassan Shahid
0726999bf9 Merge pull request #109 from alexandrsemak/patch-1
fix value for helm template
2019-10-22 14:11:26 +02:00
Alexandr Semak
f89c321a50 fix value for helm template 2019-10-22 06:43:19 -04:00
stakater-user
225427cec1 Bump Version to v0.0.48 2019-10-21 07:12:07 +00:00
Waseem Hassan Shahid
be86e8417f Update .goreleaser.yml 2019-10-21 09:01:55 +02:00
stakater-user
bf961c0456 Bump Version to v0.0.47 2019-10-21 07:01:11 +00:00
Waseem Hassan Shahid
3248ca9578 Change name template of archives 2019-10-21 08:50:39 +02:00
stakater-user
d517626033 Bump Version to v0.0.46 2019-10-21 06:39:52 +00:00
Waseem Hassan Shahid
28c9696bdf Update Jenkinsfile 2019-10-21 08:29:14 +02:00
stakater-user
d0baf7a5cc Bump Version to v0.0.45 2019-10-18 11:59:28 +00:00
Waseem Hassan
492fae7b52 Merge pull request #108 from stakater/add-github-token
Add github token to goreleaser
2019-10-18 13:30:31 +02:00
waseem
4999bdbd96 Add github token to goreleaser 2019-10-18 13:29:01 +02:00
stakater-user
fd7d0f9f99 Bump Version to v0.0.44 2019-10-18 11:26:06 +00:00
Waseem Hassan
0359c3040a Merge pull request #107 from stakater/use-goreleaser
Use goreleaser for releasing binaries
2019-10-18 13:11:34 +02:00
waseem
d710c16774 Use stk compatible version file 2019-10-18 12:38:47 +02:00
waseem
e57db0dc56 Remove version file 2019-10-18 12:38:13 +02:00
waseem
e1a6a1ed87 Change library function to use 2019-10-18 12:35:44 +02:00
waseem
f8b3b21bbd Use goreleaser for releasing binaries 2019-10-18 12:34:26 +02:00
stakater-user
b6a333ea73 Bump Version to v0.0.43 2019-10-16 08:08:06 +00:00
Waseem Hassan
efa30662ae Merge pull request #106 from stakater/compatibility-1.16
Make reloader compatible with kubernetes 1.16
2019-10-16 09:57:50 +02:00
waseem
5fff0c9bb7 Add compatibility note 2019-10-16 08:05:03 +02:00
waseem
ec35f653b1 Fix labels 2019-10-16 07:25:02 +02:00
waseem
9229775f11 Make reloader compatible with kubernetes 1.16 2019-10-16 07:03:43 +02:00
stakater-user
e14b0c81b0 Bump Version to v0.0.42 2019-10-15 06:39:28 +00:00
Waseem Hassan
6aa016f6dd Merge pull request #104 from stakater/go-modules
Use Go modules
2019-10-15 08:29:12 +02:00
waseem
02cab487c4 Add missing global field in values template 2019-10-15 08:10:33 +02:00
waseem
4ecdc0acaa Update toolsImage 2019-10-15 08:07:44 +02:00
waseem
4b02478210 Use go mod to download dependencies 2019-10-15 08:03:09 +02:00
waseem
6ec1f9add8 Use go modules 2019-10-15 07:58:12 +02:00
Waseem Hassan
fe3499ee26 Merge pull request #102 from stakater/ignore-namespace-flag
Add support for ignoring namespaces
2019-10-15 07:49:13 +02:00
waseem
aee5d84f45 Add to update method 2019-10-14 14:20:17 +02:00
waseem
a10b2fa747 Add support for ignoring namespaces 2019-10-14 14:14:40 +02:00
Irtiza Ali
6d8a81fa26 Merge pull request #101 from marcostvz/reloader-sc-ips
Sync with upstream helm/charts/stable/reloader
2019-10-14 10:13:26 +05:00
Marcos Estevez
734b33ba55 Sync with upstream helm/charts/stable/reloader
Signed-off-by: Marcos Estevez <marcos.stvz@gmail.com>
2019-10-10 14:29:51 +02:00
Irtiza Ali
5265c14760 Merge pull request #100 from stakater/fix-vanilla-manifest-usage-guideline
fix-vanilla-manifest-usage-guideline
2019-10-09 13:01:13 +05:00
irti
5fb1a8b5ef [fix-vanilla-manifest-usage-guideline] update the vanilla manifest usage guidelines 2019-10-09 12:17:32 +05:00
stakater-user
29f4c66274 Bump Version to v0.0.41 2019-10-09 06:55:17 +00:00
Ali Kahoot
82b94d8b57 Merge pull request #98 from stakater/fix-direct-installation-issue
[fix-direct-installation-issue] update Jenkinsfile
2019-10-09 11:44:37 +05:00
irti
984794850b [fix-direct-installation-issue] remove the chartRepositoryURL arg from the goBuildAndRelease method 2019-10-09 11:27:32 +05:00
Ali Kahoot
de0d588406 Merge pull request #99 from stakater/remove-chartmuseum-from-pipeline
remove chartmuseum from pipeline
2019-10-07 12:35:08 +05:00
Usama Ahmad
5665a5c424 remove chartmuseum from pipeline 2019-10-07 12:17:43 +05:00
irti
4598bf0f7d [fix-direct-installation-issue] update Jenkinsfile 2019-10-01 20:14:16 +05:00
irti
c8ab70f80d [fix-direct-installation-issue] update Jenkinsfile 2019-10-01 15:37:35 +05:00
irti
2d720809d9 [fix-direct-installation-issue] change the pipeline library version 2019-09-23 12:15:39 +05:00
irti
2c2beb91c7 [fix-direct-installation-issue] update Jenkinsfile 2019-09-20 11:27:30 +05:00
Ali Kahoot
d3f2eb794f Merge pull request #93 from stakater/add-info-for-manifest-issue
add-info-for-manifest-issue
2019-08-28 19:08:54 +05:00
irti
02ddd80280 [add-info-for-manifest-issue] update the README.md by adding the info to replace the RELEASE-NAME with a proper value 2019-08-28 18:57:25 +05:00
62 changed files with 4383 additions and 783 deletions

136
.github/workflows/pull_request.yaml vendored Normal file
View File

@@ -0,0 +1,136 @@
name: Pull Request
on:
pull_request_target:
branches:
- master
env:
DOCKER_FILE_PATH: Dockerfile
GOLANG_VERSION: 1.15.2
KUBERNETES_VERSION: "1.18.0"
KIND_VERSION: "0.7.0"
jobs:
build:
runs-on: ubuntu-latest
name: Build
if: "! contains(toJSON(github.event.commits.*.message), '[skip-ci]')"
steps:
- name: Check out code
uses: actions/checkout@v2
with:
ref: ${{github.event.pull_request.head.sha}}
# Setting up helm binary
- name: Set up Helm
uses: azure/setup-helm@v1
- name: Set up Go
id: go
uses: actions/setup-go@v2
with:
go-version: ${{ env.GOLANG_VERSION }}
- name: Install Dependencies
run: |
make install
- name: Lint
run: |
curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(go env GOPATH)/bin v1.26.0
golangci-lint run --timeout=10m ./...
- name: Helm Lint
run: |
cd deployments/kubernetes/chart/reloader
helm lint
- name: Install kubectl
run: |
curl -LO "https://storage.googleapis.com/kubernetes-release/release/v${KUBERNETES_VERSION}/bin/linux/amd64/kubectl"
sudo install ./kubectl /usr/local/bin/ && rm kubectl
kubectl version --short --client
kubectl version --short --client | grep -q ${KUBERNETES_VERSION}
- name: Install Kind
run: |
curl -L -o kind https://github.com/kubernetes-sigs/kind/releases/download/v${KIND_VERSION}/kind-linux-amd64
sudo install ./kind /usr/local/bin && rm kind
kind version
kind version | grep -q ${KIND_VERSION}
- name: Create Kind Cluster
run: |
kind create cluster
kubectl cluster-info
- name: Test
run: make test
- name: Generate Tag
id: generate_tag
run: |
sha=${{ github.event.pull_request.head.sha }}
tag="SNAPSHOT-PR-${{ github.event.pull_request.number }}-${sha:0:8}"
echo "##[set-output name=GIT_TAG;]$(echo ${tag})"
- name: Set up QEMU
uses: docker/setup-qemu-action@v1
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v1
- name: Login to Registry
uses: docker/login-action@v1
with:
username: ${{ secrets.STAKATER_DOCKERHUB_USERNAME }}
password: ${{ secrets.STAKATER_DOCKERHUB_PASSWORD }}
- name: Generate image repository path
run: |
echo IMAGE_REPOSITORY=$(echo ${{ github.repository }} | tr '[:upper:]' '[:lower:]') >> $GITHUB_ENV
- name: Build and Push Docker Image
uses: docker/build-push-action@v2
with:
context: .
file: ${{ env.DOCKER_FILE_PATH }}
pull: true
push: true
build-args: BUILD_PARAMETERS=${{ env.BUILD_PARAMETERS }}
cache-to: type=inline
platforms: linux/amd64,linux/arm,linux/arm64
tags: |
${{ env.IMAGE_REPOSITORY }}:${{ steps.generate_tag.outputs.GIT_TAG }}
labels: |
org.opencontainers.image.source=${{ github.event.repository.clone_url }}
org.opencontainers.image.created=${{ steps.prep.outputs.created }}
org.opencontainers.image.revision=${{ github.sha }}
- name: Comment on PR
uses: mshick/add-pr-comment@v1
env:
GITHUB_TOKEN: ${{ secrets.STAKATER_GITHUB_TOKEN }}
with:
message: '@${{ github.actor }} Image is available for testing. `docker pull ${{ env.IMAGE_REPOSITORY }}:${{ steps.generate_tag.outputs.GIT_TAG }}`'
allow-repeats: false
- name: Notify Failure
if: failure()
uses: mshick/add-pr-comment@v1
env:
GITHUB_TOKEN: ${{ secrets.STAKATER_GITHUB_TOKEN }}
with:
message: '@${{ github.actor }} Yikes! You better fix it before anyone else finds out! [Build](https://github.com/${{ github.repository }}/commit/${{ github.event.pull_request.head.sha }}/checks) has Failed!'
allow-repeats: false
- name: Notify Slack
uses: 8398a7/action-slack@v3
if: always() # Pick up events even if the job fails or is canceled.
with:
status: ${{ job.status }}
fields: repo,author,action,eventName,ref,workflow
env:
GITHUB_TOKEN: ${{ secrets.STAKATER_GITHUB_TOKEN }}
SLACK_WEBHOOK_URL: ${{ secrets.STAKATER_DELIVERY_SLACK_WEBHOOK }}

179
.github/workflows/push.yaml vendored Normal file
View File

@@ -0,0 +1,179 @@
name: Push
on:
push:
branches:
- master
env:
DOCKER_FILE_PATH: Dockerfile
GOLANG_VERSION: 1.15.2
KUBERNETES_VERSION: "1.18.0"
KIND_VERSION: "0.7.0"
HELM_REGISTRY_URL: "https://stakater.github.io/stakater-charts"
jobs:
build:
name: Build
if: "! contains(toJSON(github.event.commits.*.message), '[skip-ci]')"
runs-on: ubuntu-latest
steps:
- name: Check out code
uses: actions/checkout@v2
with:
persist-credentials: false # otherwise, the token used is the GITHUB_TOKEN, instead of your personal token
fetch-depth: 0 # otherwise, you will fail to push refs to dest repo
# Setting up helm binary
- name: Set up Helm
uses: azure/setup-helm@v1
- name: Set up Go
id: go
uses: actions/setup-go@v2
with:
go-version: ${{ env.GOLANG_VERSION }}
- name: Install Dependencies
run: |
make install
- name: Lint
run: |
curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(go env GOPATH)/bin v1.26.0
golangci-lint run --timeout=10m ./...
- name: Install kubectl
run: |
curl -LO "https://storage.googleapis.com/kubernetes-release/release/v${KUBERNETES_VERSION}/bin/linux/amd64/kubectl"
sudo install ./kubectl /usr/local/bin/ && rm kubectl
kubectl version --short --client
kubectl version --short --client | grep -q ${KUBERNETES_VERSION}
- name: Install Kind
run: |
curl -L -o kind https://github.com/kubernetes-sigs/kind/releases/download/v${KIND_VERSION}/kind-linux-amd64
sudo install ./kind /usr/local/bin && rm kind
kind version
kind version | grep -q ${KIND_VERSION}
- name: Create Kind Cluster
run: |
kind create cluster
kubectl cluster-info
- name: Test
run: make test
- name: Generate Tag
id: generate_tag
uses: anothrNick/github-tag-action@1.26.0
env:
GITHUB_TOKEN: ${{ secrets.STAKATER_GITHUB_TOKEN }}
WITH_V: true
DEFAULT_BUMP: patch
DRY_RUN: true
- name: Set up QEMU
uses: docker/setup-qemu-action@v1
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v1
- name: Login to Registry
uses: docker/login-action@v1
with:
username: ${{ secrets.STAKATER_DOCKERHUB_USERNAME }}
password: ${{ secrets.STAKATER_DOCKERHUB_PASSWORD }}
- name: Generate image repository path
run: |
echo IMAGE_REPOSITORY=$(echo ${{ github.repository }} | tr '[:upper:]' '[:lower:]') >> $GITHUB_ENV
- name: Build and push
uses: docker/build-push-action@v2
with:
context: .
file: ${{ env.DOCKER_FILE_PATH }}
pull: true
push: true
build-args: BUILD_PARAMETERS=${{ env.BUILD_PARAMETERS }}
cache-to: type=inline
platforms: linux/amd64,linux/arm,linux/arm64
tags: |
${{ env.IMAGE_REPOSITORY }}:${{ steps.generate_tag.outputs.new_tag }}
labels: |
org.opencontainers.image.source=${{ github.event.repository.clone_url }}
org.opencontainers.image.created=${{ steps.prep.outputs.created }}
org.opencontainers.image.revision=${{ github.sha }}
##############################
## Add steps to generate required artifacts for a release here(helm chart, operator manifest etc.)
##############################
# Generate tag for operator without "v"
- name: Generate Operator Tag
id: generate_operator_tag
uses: anothrNick/github-tag-action@1.26.0
env:
GITHUB_TOKEN: ${{ secrets.STAKATER_GITHUB_TOKEN }}
WITH_V: false
DEFAULT_BUMP: patch
DRY_RUN: true
# Update chart tag to the latest semver tag
- name: Update Chart Version
env:
VERSION: ${{ steps.generate_operator_tag.outputs.new_tag }}
run: make bump-chart
- name: Helm Template
run: |
helm template stakater deployments/kubernetes/chart/reloader/ > deployments/kubernetes/reloader.yaml
# Publish helm chart
- name: Publish Helm chart
uses: stefanprodan/helm-gh-pages@master
with:
branch: master
repository: stakater-charts
target_dir: docs
token: ${{ secrets.STAKATER_GITHUB_TOKEN }}
charts_dir: deployments/kubernetes/chart/
charts_url: ${{ env.HELM_REGISTRY_URL }}
owner: stakater
linting: on
commit_username: stakater-user
commit_email: stakater@gmail.com
# Commit back changes
- name: Commit files
run: |
git config --local user.email "stakater@gmail.com"
git config --local user.name "stakater-user"
git status
git add .
git commit -m "[skip-ci] Update artifacts" -a
- name: Push changes
uses: ad-m/github-push-action@master
with:
github_token: ${{ secrets.STAKATER_GITHUB_TOKEN }}
branch: ${{ github.ref }}
- name: Push Latest Tag
uses: anothrNick/github-tag-action@1.26.0
env:
GITHUB_TOKEN: ${{ secrets.STAKATER_GITHUB_TOKEN }}
WITH_V: true
DEFAULT_BUMP: patch
- name: Notify Slack
uses: 8398a7/action-slack@v3
if: always() # Pick up events even if the job fails or is canceled.
with:
status: ${{ job.status }}
fields: repo,author,action,eventName,ref,workflow
env:
GITHUB_TOKEN: ${{ secrets.STAKATER_GITHUB_TOKEN }}
SLACK_WEBHOOK_URL: ${{ secrets.STAKATER_DELIVERY_SLACK_WEBHOOK }}

44
.github/workflows/release.yaml vendored Normal file
View File

@@ -0,0 +1,44 @@
name: Release Go project
on:
push:
tags:
- "v*"
env:
GOLANG_VERSION: 1.15.2
jobs:
build:
name: GoReleaser build
runs-on: ubuntu-latest
steps:
- name: Check out code
uses: actions/checkout@v2
with:
fetch-depth: 0 # See: https://goreleaser.com/ci/actions/
- name: Set up Go 1.x
uses: actions/setup-go@v2
with:
go-version: ${{ env.GOLANG_VERSION }}
id: go
- name: Run GoReleaser
uses: goreleaser/goreleaser-action@master
with:
version: latest
args: release --rm-dist
env:
GITHUB_TOKEN: ${{ secrets.STAKATER_GITHUB_TOKEN }}
- name: Notify Slack
uses: 8398a7/action-slack@v3
if: always()
with:
status: ${{ job.status }}
fields: repo,author,action,eventName,ref,workflow
env:
GITHUB_TOKEN: ${{ secrets.STAKATER_GITHUB_TOKEN }}
SLACK_WEBHOOK_URL: ${{ secrets.STAKATER_DELIVERY_SLACK_WEBHOOK }}

3
.gitignore vendored
View File

@@ -7,4 +7,5 @@ out/
_gopath/
.DS_Store
.vscode
vendor
vendor
dist

26
.goreleaser.yml Normal file
View File

@@ -0,0 +1,26 @@
builds:
- env:
- CGO_ENABLED=0
goos:
- windows
- darwin
- linux
goarch:
- 386
- amd64
- arm
- arm64
archives:
- name_template: "{{ .ProjectName }}_v{{ .Version }}_{{ .Os }}_{{ .Arch }}{{ if .Arm }}v{{ .Arm }}{{ end }}"
snapshot:
name_template: "{{ .Tag }}-next"
checksum:
name_template: "{{ .ProjectName }}_{{ .Version }}_checksums.txt"
changelog:
sort: asc
filters:
exclude:
- '^docs:'
- '^test:'
env_files:
github_token: /home/jenkins/.apitoken/hub

3
.stignore Normal file
View File

@@ -0,0 +1,3 @@
.git
Reloader
__debug_bin

View File

@@ -1 +0,0 @@
v0.0.40

34
Dockerfile Normal file
View File

@@ -0,0 +1,34 @@
# Build the manager binary
FROM --platform=${BUILDPLATFORM} golang:1.15.2 as builder
ARG TARGETOS
ARG TARGETARCH
WORKDIR /workspace
# Copy the Go Modules manifests
COPY go.mod go.mod
COPY go.sum go.sum
# cache deps before building and copying source so that we don't need to re-download as much
# and so that source changes don't invalidate our downloaded layer
RUN go mod download
# Copy the go source
COPY main.go main.go
COPY internal/ internal/
COPY pkg/ pkg/
# Build
RUN CGO_ENABLED=0 GOOS=${TARGETOS} GOARCH=${TARGETARCH} GO111MODULE=on go build -mod=mod -a -o manager main.go
# Use distroless as minimal base image to package the manager binary
# Refer to https://github.com/GoogleContainerTools/distroless for more details
FROM gcr.io/distroless/static:nonroot
WORKDIR /
COPY --from=builder /workspace/manager .
USER nonroot:nonroot
# Port for metrics and probes
EXPOSE 9090
ENTRYPOINT ["/manager"]

10
Jenkinsfile vendored
View File

@@ -1,10 +0,0 @@
#!/usr/bin/groovy
@Library('github.com/stakater/fabric8-pipeline-library@v2.10.8')
def dummy
goBuildAndRelease {
chartRepositoryURL = 'https://chartmuseum.release.stakater.com'
publicChartRepositoryURL = 'https://stakater.github.io/stakater-charts'
publicChartGitURL = 'git@github.com:stakater/stakater-charts.git'
}

View File

@@ -1,35 +1,64 @@
# note: call scripts from /scripts
.PHONY: default build builder-image binary-image test stop clean-images clean push apply deploy
.PHONY: default build builder-image binary-image test stop clean-images clean push apply deploy release release-all manifest push clean-image
BUILDER ?= reloader-builder
OS ?= linux
ARCH ?= ???
ALL_ARCH ?= arm64 arm amd64
BUILDER ?= reloader-builder-${ARCH}
BINARY ?= Reloader
DOCKER_IMAGE ?= stakater/reloader
# Default value "dev"
DOCKER_TAG ?= 1.0.0
REPOSITORY = ${DOCKER_IMAGE}:${DOCKER_TAG}
TAG ?= v0.0.75.0
REPOSITORY_GENERIC = ${DOCKER_IMAGE}:${TAG}
REPOSITORY_ARCH = ${DOCKER_IMAGE}:${TAG}-${ARCH}
VERSION=$(shell cat .version)
VERSION ?= 0.0.1
BUILD=
GOCMD = go
GLIDECMD = glide
GOFLAGS ?= $(GOFLAGS:)
LDFLAGS =
default: build test
install:
"$(GLIDECMD)" install --strip-vendor
"$(GOCMD)" mod download
build:
"$(GOCMD)" build ${GOFLAGS} ${LDFLAGS} -o "${BINARY}"
builder-image:
@docker build --network host -t "${BUILDER}" -f build/package/Dockerfile.build .
docker buildx build --platform ${OS}/${ARCH} --build-arg GOARCH=$(ARCH) -t "${BUILDER}" --load -f build/package/Dockerfile.build .
reloader-${ARCH}.tar:
docker buildx build --platform ${OS}/${ARCH} --build-arg GOARCH=$(ARCH) -t "${BUILDER}" --load -f build/package/Dockerfile.build .
docker run --platform ${OS}/${ARCH} --rm "${BUILDER}" > reloader-${ARCH}.tar
binary-image: builder-image
@docker run --network host --rm "${BUILDER}" | docker build --network host -t "${REPOSITORY}" -f Dockerfile.run -
cat reloader-${ARCH}.tar | docker buildx build --platform ${OS}/${ARCH} -t "${REPOSITORY_ARCH}" --load -f Dockerfile.run -
push:
docker push ${REPOSITORY_ARCH}
release: binary-image push manifest
release-all:
-rm -rf ~/.docker/manifests/*
# Make arch-specific release
@for arch in $(ALL_ARCH) ; do \
echo Make release: $$arch ; \
make release ARCH=$$arch ; \
done
set -e
docker manifest push --purge $(REPOSITORY_GENERIC)
manifest:
set -e
docker manifest create -a $(REPOSITORY_GENERIC) $(REPOSITORY_ARCH)
docker manifest annotate --arch $(ARCH) $(REPOSITORY_GENERIC) $(REPOSITORY_ARCH)
test:
"$(GOCMD)" test -timeout 1800s -v ./...
@@ -38,15 +67,30 @@ stop:
@docker stop "${BINARY}"
clean-images: stop
@docker rmi "${BUILDER}" "${BINARY}"
-docker rmi "${BINARY}"
@for arch in $(ALL_ARCH) ; do \
echo Clean image: $$arch ; \
make clean-image ARCH=$$arch ; \
done
-docker rmi "${REPOSITORY_GENERIC}"
clean-image:
-docker rmi "${BUILDER}"
-docker rmi "${REPOSITORY_ARCH}"
-rm -rf ~/.docker/manifests/*
clean:
"$(GOCMD)" clean -i
push: ## push the latest Docker image to DockerHub
docker push $(REPOSITORY)
-rm -rf reloader-*.tar
apply:
kubectl apply -f deployments/manifests/ -n temp-reloader
deploy: binary-image push apply
# Bump Chart
bump-chart:
sed -i "s/^version:.*/version: v$(VERSION)/" deployments/kubernetes/chart/reloader/Chart.yaml
sed -i "s/^appVersion:.*/appVersion: v$(VERSION)/" deployments/kubernetes/chart/reloader/Chart.yaml
sed -i "s/tag:.*/tag: v$(VERSION)/" deployments/kubernetes/chart/reloader/values.yaml
sed -i "s/version:.*/version: v$(VERSION)/" deployments/kubernetes/chart/reloader/values.yaml

140
README.md
View File

@@ -13,11 +13,15 @@
## Problem
We would like to watch if some change happens in `ConfigMap` and/or `Secret`; then perform a rolling upgrade on relevant `DeploymentConfig`, `Deployment`, `Daemonset` and `Statefulset`
We would like to watch if some change happens in `ConfigMap` and/or `Secret`; then perform a rolling upgrade on relevant `DeploymentConfig`, `Deployment`, `Daemonset`, `Statefulset` and `Rollout`
## Solution
Reloader can watch changes in `ConfigMap` and `Secret` and do rolling upgrades on Pods with their associated `DeploymentConfigs`, `Deployments`, `Daemonsets` and `Statefulsets`.
Reloader can watch changes in `ConfigMap` and `Secret` and do rolling upgrades on Pods with their associated `DeploymentConfigs`, `Deployments`, `Daemonsets` `Statefulsets` and `Rollouts`.
## Compatibility
Reloader is compatible with kubernetes >= 1.9
## How to use Reloader
@@ -29,13 +33,47 @@ metadata:
annotations:
reloader.stakater.com/auto: "true"
spec:
template:
metadata:
template: metadata:
```
This will discover deployments/daemonsets/statefulset automatically where `foo-configmap` or `foo-secret` is being used either via environment variable or from volume mount. And it will perform rolling upgrade on related pods when `foo-configmap` or `foo-secret`are updated.
This will discover deploymentconfigs/deployments/daemonsets/statefulset/rollouts automatically where `foo-configmap` or `foo-secret` is being used either via environment variable or from volume mount. And it will perform rolling upgrade on related pods when `foo-configmap` or `foo-secret`are updated.
We can also specify a specific configmap or secret which would trigger rolling upgrade only upon change in our specified configmap or secret, this way, it will not trigger rolling upgrade upon changes in all configmaps or secrets used in a deployment, daemonset or statefulset.
You can restrict this discovery to only `ConfigMap` or `Secret` objects that
are tagged with a special annotation. To take advantage of that, annotate
your deploymentconfigs/deployments/daemonsets/statefulset/rollouts like this:
```yaml
kind: Deployment
metadata:
annotations:
reloader.stakater.com/search: "true"
spec:
template:
```
and Reloader will trigger the rolling upgrade upon modification of any
`ConfigMap` or `Secret` annotated like this:
```yaml
kind: ConfigMap
metadata:
annotations:
reloader.stakater.com/match: "true"
data:
key: value
```
provided the secret/configmap is being used in an environment variable, or a
volume mount.
Please note that `reloader.stakater.com/search` and
`reloader.stakater.com/auto` do not work together. If you have the
`reloader.stakater.com/auto: "true"` annotation on your deployment, then it
will always restart upon a change in configmaps or secrets it uses, regardless
of whether they have the `reloader.stakater.com/match: "true"` annotation or
not.
We can also specify a specific configmap or secret which would trigger rolling upgrade only upon change in our specified configmap or secret, this way, it will not trigger rolling upgrade upon changes in all configmaps or secrets used in a deploymentconfig, deployment, daemonset, statefulset or rollout.
To do this either set the auto annotation to `"false"` (`reloader.stakater.com/auto: "false"`) or remove it altogether, and use annotations mentioned [here](#Configmap) or [here](#Secret)
### Configmap
@@ -50,8 +88,7 @@ metadata:
annotations:
configmap.reloader.stakater.com/reload: "foo-configmap"
spec:
template:
metadata:
template: metadata:
```
Use comma separated list to define multiple configmaps.
@@ -62,15 +99,14 @@ metadata:
annotations:
configmap.reloader.stakater.com/reload: "foo-configmap,bar-configmap,baz-configmap"
spec:
template:
metadata:
template: metadata:
```
### Secret
To perform rolling upgrade when change happens only on specific secrets use below annotation.
For a `Deployment` called `foo` have a `Secret` called `foo-secret`. Then add this annotation to main metadata of your `Deployment`
For a `Deployment` called `foo` have a `Secret` called `foo-secret`. Then add this annotation to main metadata of your `Deployment`
```yaml
kind: Deployment
@@ -78,8 +114,7 @@ metadata:
annotations:
secret.reloader.stakater.com/reload: "foo-secret"
spec:
template:
metadata:
template: metadata:
```
Use comma separated list to define multiple secrets.
@@ -90,17 +125,23 @@ metadata:
annotations:
secret.reloader.stakater.com/reload: "foo-secret,bar-secret,baz-secret"
spec:
template:
metadata:
template: metadata:
```
### NOTES
- Reloader also supports [sealed-secrets](https://github.com/bitnami-labs/sealed-secrets). [Here](docs/Reloader-with-Sealed-Secrets.md) are the steps to use sealed-secrets with reloader.
- For [rollouts](https://github.com/argoproj/argo-rollouts/) reloader simply triggers a change is up to you how you configure the rollout strategy.
- `reloader.stakater.com/auto: "true"` will only reload the pod, if the configmap or secret is used (as a volume mount or as an env) in `DeploymentConfigs/Deployment/Daemonsets/Statefulsets`
- `secret.reloader.stakater.com/reload` or `configmap.reloader.stakater.com/reload` annotation will reload the pod upon changes in specified configmap or secret, irrespective of the usage of configmap or secret.
- you may override the auto annotation with the `--auto-annotation` flag
- you may override the search annotation with the `--auto-search-annotation` flag
and the match annotation with the `--search-match-annotation` flag
- you may override the configmap annotation with the `--configmap-annotation` flag
- you may override the secret annotation with the `--secret-annotation` flag
- you may want to prevent watching certain namespaces with the `--namespaces-to-ignore` flag
- you may want to prevent watching certain resources with the `--resources-to-ignore` flag
- you can configure logging in JSON format with the `--log-format=json` option
## Deploying to Kubernetes
@@ -108,30 +149,31 @@ You can deploy Reloader by following methods:
### Vanilla Manifests
You can apply vanilla manifests by running the following command
You can apply vanilla manifests by changing `RELEASE-NAME` placeholder provided in manifest with a proper value and apply it by running the command given below:
```bash
kubectl apply -f https://raw.githubusercontent.com/stakater/Reloader/master/deployments/kubernetes/reloader.yaml
```
By default Reloader gets deployed in `default` namespace and watches changes `secrets` and `configmaps` in all namespaces.
By default, Reloader gets deployed in `default` namespace and watches changes `secrets` and `configmaps` in all namespaces.
Reloader can be configured to ignore the resources `secrets` and `configmaps` by passing the following args (`spec.template.spec.containers.args`) to its container :
| Args | Description |
|---|---|
| Args | Description |
| -------------------------------- | -------------------- |
| --resources-to-ignore=configMaps | To ignore configMaps |
| --resources-to-ignore=secrets | To ignore secrets |
`Note`: At one time only one of these resource can be ignored, trying to do it will cause error in Reloader. Workaround for ignoring both resources is by scaling down the reloader pods to `0`.
| --resources-to-ignore=secrets | To ignore secrets |
`Note`: At one time only one of these resource can be ignored, trying to do it will cause error in Reloader. Workaround for ignoring both resources is by scaling down the reloader pods to `0`.
### Vanilla kustomize
You can also apply the vanilla manifests by running the following command
```bash
kubectl apply -k https://github.com/stakater/Reloader/deployments/kubernetes
```
Similarly to vanilla manifests get deployed in `default` namespace and watches changes `secrets` and `configmaps` in all namespaces.
### Kustomize
@@ -142,8 +184,6 @@ You can write your own `kustomization.yaml` using ours as a 'base' and write pat
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namePrefix: reloader-
bases:
- https://github.com/stakater/Reloader/deployments/kubernetes
@@ -152,47 +192,58 @@ namespace: reloader
### Helm Charts
Alternatively if you have configured helm on your cluster, you can add reloader to helm from our public chart repository and deploy it via helm using below mentioned commands
Alternatively if you have configured helm on your cluster, you can add reloader to helm from our public chart repository and deploy it via helm using below mentioned commands. Follow [this](docs/Helm2-to-Helm3.md) guide, in case you have trouble migrating reloader from Helm2 to Helm3
```bash
```bash
helm repo add stakater https://stakater.github.io/stakater-charts
helm repo update
helm install stakater/reloader
helm install stakater/reloader # For helm3 add --generate-name flag or set the release name
```
**Note:** By default reloader watches in all namespaces. To watch in single namespace, please run following command. It will install reloader in `test` namespace which will only watch `Deployments`, `Daemonsets` and `Statefulsets` in `test` namespace.
**Note:** By default reloader watches in all namespaces. To watch in single namespace, please run following command. It will install reloader in `test` namespace which will only watch `Deployments`, `Daemonsets` `Statefulsets` and `Rollouts` in `test` namespace.
```bash
helm install stakater/reloader --set reloader.watchGlobally=false --namespace test
helm install stakater/reloader --set reloader.watchGlobally=false --namespace test # For helm3 add --generate-name flag or set the release name
```
Reloader can be configured to ignore the resources `secrets` and `configmaps` by using the following parameters of `values.yaml` file:
| Parameter | Description | Type |
|---|---|---|
| ignoreSecrets | To ignore secrets. Valid value are either `true` or `false` | boolean |
| Parameter | Description | Type |
| ---------------- | -------------------------------------------------------------- | ------- |
| ignoreSecrets | To ignore secrets. Valid value are either `true` or `false` | boolean |
| ignoreConfigMaps | To ignore configMaps. Valid value are either `true` or `false` | boolean |
`Note`: At one time only one of these resource can be ignored, trying to do it will cause error in helm template compilation.
You can also set the log format of Reloader to json by setting `logFormat` to `json` in values.yaml and apply the chart
You can enable to scrape Reloader's Prometheus metrics by setting `serviceMonitor.enabled` or `podMonitor.enabled` to `true` in values.yaml file. Service monitor will be removed in future releases of reloader in favour of Pod monitor.
**Note:** Reloading of OpenShift (DeploymentConfig) and/or Argo Rollouts has to be enabled explicitly because it might not be always possible to use it on a cluster with restricted permissions. This can be done by changing the following parameters:
| Parameter | Description | Type |
| ---------------- | ---------------------------------------------------------------------------- | ------- |
| isOpenshift | Enable OpenShift DeploymentConfigs. Valid value are either `true` or `false` | boolean |
| isArgoRollouts | Enable Argo Rollouts. Valid value are either `true` or `false` | boolean |
## Help
### Documentation
You can find more documentation [here](docs/)
You can find more documentation [here](docs)
### Have a question?
File a GitHub [issue](https://github.com/stakater/Reloader/issues), or send us an [email](mailto:stakater@gmail.com).
### Talk to us on Slack
Join and talk to us on Slack for discussing Reloader
[![Join Slack](https://stakater.github.io/README/stakater-join-slack-btn.png)](https://stakater-slack.herokuapp.com/)
[![Chat](https://stakater.github.io/README/stakater-chat-btn.png)](https://stakater.slack.com/messages/CC5S05S12)
[![Join Slack](https://stakater.github.io/README/stakater-join-slack-btn.png)](https://slack.stakater.com/)
[![Chat](https://stakater.github.io/README/stakater-chat-btn.png)](https://stakater-community.slack.com/messages/CC5S05S12)
## Contributing
@@ -202,13 +253,18 @@ Please use the [issue tracker](https://github.com/stakater/Reloader/issues) to r
### Developing
1. Deploy Reloader.
2. Run `okteto up` to activate your development container.
3. `make build`.
4. `./Reloader`
PRs are welcome. In general, we follow the "fork-and-pull" Git workflow.
1. **Fork** the repo on GitHub
2. **Clone** the project to your own machine
3. **Commit** changes to your own branch
4. **Push** your work back up to your fork
5. Submit a **Pull request** so that we can review your changes
1. **Fork** the repo on GitHub
2. **Clone** the project to your own machine
3. **Commit** changes to your own branch
4. **Push** your work back up to your fork
5. Submit a **Pull request** so that we can review your changes
NOTE: Be sure to merge the latest from "upstream" before making a pull request!
@@ -227,8 +283,8 @@ Apache2 © [Stakater](http://stakater.com)
See [our other projects][community]
or contact us in case of professional services and queries on <hello@stakater.com>
[website]: http://stakater.com/
[community]: https://github.com/stakater/
[website]: http://stakater.com/
[community]: https://github.com/stakater/
## Acknowledgements

View File

@@ -1,18 +1,23 @@
FROM stakater/go-glide:1.12.6
MAINTAINER "Stakater Team"
FROM golang:1.15.2-alpine
LABEL maintainer "Stakater Team"
RUN apk update
ARG GOARCH=amd64
RUN apk -v --update \
add git build-base && \
rm -rf /var/cache/apk/* && \
mkdir -p "$GOPATH/src/github.com/stakater/Reloader"
--no-cache \
add git build-base
ADD . "$GOPATH/src/github.com/stakater/Reloader"
WORKDIR "$GOPATH/src/github.com/stakater/Reloader"
RUN cd "$GOPATH/src/github.com/stakater/Reloader" && \
glide install --strip-vendor && \
CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -a --installsuffix cgo --ldflags="-s" -o /Reloader
COPY go.mod go.sum ./
RUN go mod download
COPY . .
ENV CGO_ENABLED=0 GOOS=linux GOARCH=$GOARCH
RUN go build -a --installsuffix cgo --ldflags="-s" -o /Reloader
COPY build/package/Dockerfile.run /

View File

@@ -1,8 +1,14 @@
FROM alpine:3.9
MAINTAINER "Stakater Team"
FROM alpine:3.11
LABEL maintainer "Stakater Team"
RUN apk add --update ca-certificates
RUN apk add --update --no-cache ca-certificates
COPY Reloader /bin/Reloader
# On alpine 'nobody' has uid 65534
USER 65534
# Port for metrics and probes
EXPOSE 9090
ENTRYPOINT ["/bin/Reloader"]

View File

@@ -3,27 +3,29 @@
apiVersion: v1
name: reloader
description: Reloader chart that runs on kubernetes
version: v0.0.40
appVersion: v0.0.40
version: v0.0.98
appVersion: v0.0.98
keywords:
- Reloader
- kubernetes
home: https://github.com/stakater/Reloader
sources:
- https://github.com/stakater/IngressMonitorController
- https://github.com/stakater/Reloader
icon: https://raw.githubusercontent.com/stakater/Reloader/master/assets/web/reloader-round-100px.png
maintainers:
- name: Stakater
email: hello@stakater.com
- name: rasheedamir
email: rasheed@aurorasolutions.io
- name: waseem-h
email: waseemhassan@stakater.com
- name: faizanahmad055
email: faizan.ahmad55@outlook.com
- name: kahootali
email: ali.kahoot@aurorasolutions.io
- name: ahmadiq
email: ahmad@aurorasolutions.io
- name: ahsan-storm
email: ahsanmuhammad1@outlook.com
- name: Stakater
email: hello@stakater.com
- name: rasheedamir
email: rasheed@aurorasolutions.io
- name: waseem-h
email: waseemhassan@stakater.com
- name: faizanahmad055
email: faizan.ahmad55@outlook.com
- name: kahootali
email: ali.kahoot@aurorasolutions.io
- name: ahmadiq
email: ahmad@aurorasolutions.io
- name: ahsan-storm
email: ahsanmuhammad1@outlook.com
- name: ahmedwaleedmalik
email: waleed@stakater.com

View File

@@ -5,6 +5,7 @@ approvers:
- waseem-h
- rasheedamir
- ahsan-storm
- ahmedwaleedmalik
reviewers:
- faizanahmad055
- kahootali
@@ -12,3 +13,4 @@ reviewers:
- waseem-h
- rasheedamir
- ahsan-storm
- ahmedwaleedmalik

View File

@@ -12,15 +12,20 @@ Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
*/}}
{{- define "reloader-fullname" -}}
{{- if .Values.fullnameOverride -}}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- $name := default .Chart.Name .Values.nameOverride -}}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- end -}}
{{- define "reloader-labels.chart" -}}
app: {{ template "reloader-fullname" . }}
chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
release: {{ .Release.Name | quote }}
heritage: {{ .Release.Service | quote }}
app.kubernetes.io/managed-by: {{ .Release.Service | quote }}
{{- end -}}
{{/*
@@ -33,3 +38,11 @@ Create the name of the service account to use
{{ default "default" .Values.reloader.serviceAccount.name }}
{{- end -}}
{{- end -}}
{{/*
Create the annotations to support helm3
*/}}
{{- define "reloader-helm3.annotations" -}}
meta.helm.sh/release-namespace: {{ .Release.Namespace | quote }}
meta.helm.sh/release-name: {{ .Release.Name | quote }}
{{- end -}}

View File

@@ -1,7 +1,13 @@
{{- if and .Values.reloader.watchGlobally (.Values.reloader.rbac.enabled) }}
{{- if (.Capabilities.APIVersions.Has "rbac.authorization.k8s.io/v1") }}
apiVersion: rbac.authorization.k8s.io/v1
{{ else }}
apiVersion: rbac.authorization.k8s.io/v1beta1
{{- end }}
kind: ClusterRole
metadata:
annotations:
{{ include "reloader-helm3.annotations" . | indent 4 }}
labels:
{{ include "reloader-labels.chart" . | indent 4 }}
{{- if .Values.reloader.rbac.labels }}
@@ -26,7 +32,7 @@ rules:
- list
- get
- watch
{{- if or (.Capabilities.APIVersions.Has "apps.openshift.io/v1") (.Values.isOpenshift) }}
{{- if and (.Capabilities.APIVersions.Has "apps.openshift.io/v1") (.Values.reloader.isOpenshift) }}
- apiGroups:
- "apps.openshift.io"
- ""
@@ -37,6 +43,18 @@ rules:
- get
- update
- patch
{{- end }}
{{- if and (.Capabilities.APIVersions.Has "argoproj.io/v1alpha1") (.Values.reloader.isArgoRollouts) }}
- apiGroups:
- "argoproj.io"
- ""
resources:
- rollouts
verbs:
- list
- get
- update
- patch
{{- end }}
- apiGroups:
- "apps"

View File

@@ -1,7 +1,13 @@
{{- if and .Values.reloader.watchGlobally (.Values.reloader.rbac.enabled) }}
{{- if (.Capabilities.APIVersions.Has "rbac.authorization.k8s.io/v1") }}
apiVersion: rbac.authorization.k8s.io/v1
{{ else }}
apiVersion: rbac.authorization.k8s.io/v1beta1
{{- end }}
kind: ClusterRoleBinding
metadata:
annotations:
{{ include "reloader-helm3.annotations" . | indent 4 }}
labels:
{{ include "reloader-labels.chart" . | indent 4 }}
{{- if .Values.reloader.rbac.labels }}

View File

@@ -1,8 +1,9 @@
apiVersion: apps/v1
kind: Deployment
metadata:
{{- if .Values.reloader.deployment.annotations }}
annotations:
{{ include "reloader-helm3.annotations" . | indent 4 }}
{{- if .Values.reloader.deployment.annotations }}
{{ toYaml .Values.reloader.deployment.annotations | indent 4 }}
{{- end }}
labels:
@@ -26,6 +27,10 @@ spec:
{{- end }}
template:
metadata:
{{- if .Values.reloader.deployment.pod.annotations }}
annotations:
{{ toYaml .Values.reloader.deployment.pod.annotations | indent 8 }}
{{- end }}
labels:
{{ include "reloader-labels.chart" . | indent 8 }}
{{- if .Values.reloader.deployment.labels }}
@@ -46,9 +51,16 @@ spec:
{{- if .Values.reloader.deployment.tolerations }}
tolerations:
{{ toYaml .Values.reloader.deployment.tolerations | indent 8 }}
{{- end }}
{{- if .Values.reloader.deployment.priorityClassName }}
priorityClassName: {{ .Values.reloader.deployment.priorityClassName }}
{{- end }}
containers:
- env:
- image: "{{ .Values.reloader.deployment.image.name }}:{{ .Values.reloader.deployment.image.tag }}"
imagePullPolicy: {{ .Values.reloader.deployment.image.pullPolicy }}
name: {{ template "reloader-fullname" . }}
{{- if or (.Values.reloader.deployment.env.open) (.Values.reloader.deployment.env.secret) (.Values.reloader.deployment.env.field) (eq .Values.reloader.watchGlobally false) }}
env:
{{- range $name, $value := .Values.reloader.deployment.env.open }}
{{- if not (empty $value) }}
- name: {{ $name | quote }}
@@ -79,42 +91,81 @@ spec:
fieldRef:
fieldPath: metadata.namespace
{{- end }}
image: "{{ .Values.reloader.deployment.image.name }}:{{ .Values.reloader.deployment.image.tag }}"
imagePullPolicy: {{ .Values.reloader.deployment.image.pullPolicy }}
name: {{ template "reloader-fullname" . }}
{{- end }}
ports:
- name: http
containerPort: 9090
livenessProbe:
httpGet:
path: /metrics
port: http
timeoutSeconds: {{ .Values.reloader.deployment.livenessProbe.timeoutSeconds | default "5" }}
failureThreshold: {{ .Values.reloader.deployment.livenessProbe.failureThreshold | default "5" }}
periodSeconds: {{ .Values.reloader.deployment.livenessProbe.periodSeconds | default "10" }}
successThreshold: {{ .Values.reloader.deployment.livenessProbe.successThreshold | default "1" }}
readinessProbe:
httpGet:
path: /metrics
port: http
timeoutSeconds: {{ .Values.reloader.deployment.readinessProbe.timeoutSeconds | default "5" }}
failureThreshold: {{ .Values.reloader.deployment.readinessProbe.failureThreshold | default "5" }}
periodSeconds: {{ .Values.reloader.deployment.readinessProbe.periodSeconds | default "10" }}
successThreshold: {{ .Values.reloader.deployment.readinessProbe.successThreshold | default "1" }}
{{- if eq .Values.reloader.readOnlyRootFileSystem true }}
volumeMounts:
- mountPath: /tmp/
name: tmp-volume
{{- end }}
{{- if or (.Values.reloader.logFormat) (.Values.reloader.ignoreSecrets) (.Values.reloader.ignoreNamespaces) (.Values.reloader.ignoreConfigMaps) (.Values.reloader.custom_annotations) (eq .Values.reloader.isArgoRollouts true) }}
args:
{{- if .Values.reloader.logFormat }}
- "--log-format={{ .Values.reloader.logFormat }}"
{{- end }}
{{- if .Values.reloader.ignoreSecrets }}
- "--resources-to-ignore=secrets"
{{- end }}
{{- if eq .Values.reloader.ignoreConfigMaps true }}
{{- if .Values.reloader.ignoreConfigMaps }}
- "--resources-to-ignore=configMaps"
{{- end }}
{{- if .Values.reloader.ignoreNamespaces }}
- "--namespaces-to-ignore={{ .Values.reloader.ignoreNamespaces }}"
{{- end }}
{{- if .Values.reloader.custom_annotations }}
{{- if .Values.reloader.custom_annotations.configmap }}
- "--configmap-annotation"
- "{{ .Values.reloader.custom_annotations.configmap }}"
- "--configmap-annotation"
- "{{ .Values.reloader.custom_annotations.configmap }}"
{{- end }}
{{- if .Values.reloader.custom_annotations.secret }}
- "--secret-annotation"
- "{{ .Values.reloader.custom_annotations.secret }}"
- "--secret-annotation"
- "{{ .Values.reloader.custom_annotations.secret }}"
{{- end }}
{{- if .Values.reloader.custom_annotations.auto }}
- "--auto-annotation"
- "{{ .Values.reloader.custom_annotations.auto }}"
- "--auto-annotation"
- "{{ .Values.reloader.custom_annotations.auto }}"
{{- end }}
{{- if .Values.reloader.custom_annotations.search }}
- "--auto-search-annotation"
- "{{ .Values.reloader.custom_annotations.search }}"
{{- end }}
{{- if .Values.reloader.custom_annotations.match }}
- "--search-match-annotation"
- "{{ .Values.reloader.custom_annotations.match }}"
{{- end }}
{{- end }}
{{- if eq .Values.reloader.isArgoRollouts true }}
- "--is-Argo-Rollouts={{ .Values.reloader.isArgoRollouts }}"
{{- end }}
{{- end }}
{{- if .Values.reloader.deployment.resources }}
resources:
{{ toYaml .Values.reloader.deployment.resources | indent 10 }}
{{- end }}
{{- if .Values.reloader.deployment.securityContext }}
securityContext: {{ toYaml .Values.reloader.deployment.securityContext | nindent 8 }}
{{- end }}
serviceAccountName: {{ template "reloader-serviceAccountName" . }}
{{- if eq .Values.reloader.readOnlyRootFileSystem true }}
volumes:

View File

@@ -0,0 +1,31 @@
{{- if and ( .Capabilities.APIVersions.Has "monitoring.coreos.com/v1" ) ( .Values.reloader.podMonitor.enabled ) }}
apiVersion: monitoring.coreos.com/v1
kind: PodMonitor
metadata:
labels:
{{ include "reloader-labels.chart" . | indent 4 }}
{{- if .Values.reloader.podMonitor.labels }}
{{ toYaml .Values.reloader.podMonitor.labels | indent 4}}
{{- end }}
name: {{ template "reloader-fullname" . }}
{{- if .Values.reloader.podMonitor.namespace }}
namespace: {{ .Values.reloader.podMonitor.namespace }}
{{- end }}
spec:
podMetricsEndpoints:
- port: http
path: "/metrics"
{{- if .Values.reloader.podMonitor.interval }}
interval: {{ .Values.reloader.podMonitor.interval }}
{{- end }}
{{- if .Values.reloader.podMonitor.timeout }}
scrapeTimeout: {{ .Values.reloader.podMonitor.timeout }}
{{- end }}
jobLabel: {{ template "reloader-fullname" . }}
namespaceSelector:
matchNames:
- {{ .Release.Namespace }}
selector:
matchLabels:
{{ include "reloader-labels.chart" . | nindent 6 }}
{{- end }}

View File

@@ -1,7 +1,13 @@
{{- if and (not (.Values.reloader.watchGlobally)) (.Values.reloader.rbac.enabled) }}
{{- if (.Capabilities.APIVersions.Has "rbac.authorization.k8s.io/v1") }}
apiVersion: rbac.authorization.k8s.io/v1
{{ else }}
apiVersion: rbac.authorization.k8s.io/v1beta1
{{- end }}
kind: Role
metadata:
annotations:
{{ include "reloader-helm3.annotations" . | indent 4 }}
labels:
{{ include "reloader-labels.chart" . | indent 4 }}
{{- if .Values.reloader.rbac.labels }}
@@ -26,7 +32,7 @@ rules:
- list
- get
- watch
{{- if or (.Capabilities.APIVersions.Has "apps.openshift.io/v1") (.Values.isOpenshift) }}
{{- if and (.Capabilities.APIVersions.Has "apps.openshift.io/v1") (.Values.reloader.isOpenshift) }}
- apiGroups:
- "apps.openshift.io"
- ""
@@ -37,6 +43,18 @@ rules:
- get
- update
- patch
{{- end }}
{{- if and (.Capabilities.APIVersions.Has "argoproj.io/v1alpha1") (.Values.reloader.isArgoRollouts) }}
- apiGroups:
- "argoproj.io"
- ""
resources:
- rollouts
verbs:
- list
- get
- update
- patch
{{- end }}
- apiGroups:
- "apps"

View File

@@ -1,8 +1,14 @@
{{- if and (not (.Values.reloader.watchGlobally)) (.Values.reloader.rbac.enabled) }}
{{- if (.Capabilities.APIVersions.Has "rbac.authorization.k8s.io/v1") }}
apiVersion: rbac.authorization.k8s.io/v1
{{ else }}
apiVersion: rbac.authorization.k8s.io/v1beta1
{{- end }}
kind: RoleBinding
metadata:
labels:
annotations:
{{ include "reloader-helm3.annotations" . | indent 4 }}
labels:
{{ include "reloader-labels.chart" . | indent 4 }}
{{- if .Values.reloader.rbac.labels }}
{{ toYaml .Values.reloader.rbac.labels | indent 4 }}

View File

@@ -0,0 +1,29 @@
{{- if .Values.reloader.service }}
apiVersion: v1
kind: Service
metadata:
annotations:
{{ include "reloader-helm3.annotations" . | indent 4 }}
{{- if .Values.reloader.service.annotations }}
{{ toYaml .Values.reloader.service.annotations | indent 4 }}
{{- end }}
labels:
{{ include "reloader-labels.chart" . | indent 4 }}
{{- if .Values.reloader.service.labels }}
{{ toYaml .Values.reloader.service.labels | indent 4 }}
{{- end }}
name: {{ template "reloader-fullname" . }}
spec:
selector:
{{- if .Values.reloader.deployment.labels }}
{{ toYaml .Values.reloader.deployment.labels | indent 4 }}
{{- end }}
{{- if .Values.reloader.matchLabels }}
{{ toYaml .Values.reloader.matchLabels | indent 4 }}
{{- end }}
ports:
- port: {{ .Values.reloader.service.port }}
name: http
protocol: TCP
targetPort: http
{{- end }}

View File

@@ -1,7 +1,15 @@
{{- if .Values.reloader.serviceAccount.create }}
apiVersion: v1
kind: ServiceAccount
{{- if .Values.global.imagePullSecrets }}
imagePullSecrets: {{ toYaml .Values.global.imagePullSecrets | nindent 2 }}
{{- end }}
metadata:
annotations:
{{ include "reloader-helm3.annotations" . | indent 4 }}
{{- if .Values.reloader.serviceAccount.annotations }}
{{ toYaml .Values.reloader.serviceAccount.annotations | indent 4 }}
{{- end }}
labels:
{{ include "reloader-labels.chart" . | indent 4 }}
{{- if .Values.reloader.serviceAccount.labels }}

View File

@@ -0,0 +1,31 @@
{{- if and ( .Capabilities.APIVersions.Has "monitoring.coreos.com/v1" ) ( .Values.reloader.serviceMonitor.enabled ) }}
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
labels:
{{ include "reloader-labels.chart" . | indent 4 }}
{{- if .Values.reloader.serviceMonitor.labels }}
{{ toYaml .Values.reloader.serviceMonitor.labels | indent 4}}
{{- end }}
name: {{ template "reloader-fullname" . }}
{{- if .Values.reloader.serviceMonitor.namespace }}
namespace: {{ .Values.reloader.serviceMonitor.namespace }}
{{- end }}
spec:
endpoints:
- targetPort: http
path: "/metrics"
{{- if .Values.reloader.serviceMonitor.interval }}
interval: {{ .Values.reloader.serviceMonitor.interval }}
{{- end }}
{{- if .Values.reloader.serviceMonitor.timeout }}
scrapeTimeout: {{ .Values.reloader.serviceMonitor.timeout }}
{{- end }}
jobLabel: {{ template "reloader-fullname" . }}
namespaceSelector:
matchNames:
- {{ .Release.Namespace }}
selector:
matchLabels:
{{ include "reloader-labels.chart" . | nindent 6 }}
{{- end }}

View File

@@ -1,15 +1,25 @@
# Generated from deployments/kubernetes/templates/chart/values.yaml.tmpl
global:
## Reference to one or more secrets to be used when pulling images
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
##
imagePullSecrets: []
kubernetes:
host: https://kubernetes.default
reloader:
isArgoRollouts: false
isOpenshift: false
ignoreSecrets: false
ignoreConfigMaps: false
ignoreNamespaces: "" # Comma separated list of namespaces to ignore
logFormat: "" #json
watchGlobally: true
# Set to true if you have a pod security policy that enforces readOnlyRootFilesystem
readOnlyRootFileSystem: false
legacy:
rbac: false
matchLabels: {}
deployment:
nodeSelector:
@@ -26,6 +36,10 @@ reloader:
# operator: "Exists"
affinity: {}
securityContext:
runAsNonRoot: true
runAsUser: 65534
# A list of tolerations to be applied to the Deployment.
# Example:
# tolerations:
@@ -38,10 +52,10 @@ reloader:
labels:
provider: stakater
group: com.stakater.platform
version: v0.0.40
version: v0.0.98
image:
name: stakater/reloader
tag: "v0.0.40"
tag: v0.0.98
pullPolicy: IfNotPresent
# Support for extra environment variables.
env:
@@ -52,6 +66,18 @@ reloader:
# field supports Key value pair as environment variables. It gets the values from other fields of pod.
field:
# Liveness and readiness probe timeout values.
livenessProbe: {}
# timeoutSeconds: 5
# failureThreshold: 5
# periodSeconds: 10
# successThreshold: 1
readinessProbe: {}
# timeoutSeconds: 15
# failureThreshold: 5
# periodSeconds: 10
# successThreshold: 1
# Specify resource requests/limits for the deployment.
# Example:
# resources:
@@ -62,6 +88,14 @@ reloader:
# cpu: "10m"
# memory: "128Mi"
resources: {}
pod:
annotations: {}
priorityClassName: ""
service: {}
# labels: {}
# annotations: {}
# port: 9090
rbac:
enabled: true
@@ -71,12 +105,37 @@ reloader:
# Specifies whether a ServiceAccount should be created
create: true
labels: {}
annotations: {}
# The name of the ServiceAccount to use.
# If not set and create is true, a name is generated using the fullname template
name:
name:
# Optional flags to pass to the Reloader entrypoint
# Example:
# custom_annotations:
# configmap: "my.company.com/configmap"
# secret: "my.company.com/secret"
custom_annotations: {}
serviceMonitor:
# Deprecated: Service monitor will be removed in future releases of reloader in favour of Pod monitor
# Enabling this requires service to be enabled as well, or no endpoints will be found
enabled: false
# Set the namespace the ServiceMonitor should be deployed
# namespace: monitoring
# Set how frequently Prometheus should scrape
# interval: 30s
# Set labels for the ServiceMonitor, use this to define your scrape label for Prometheus Operator
# labels:
# Set timeout for scrape
# timeout: 10s
podMonitor:
enabled: false
# Set the namespace the podMonitor should be deployed
# namespace: monitoring
# Set how frequently Prometheus should scrape
# interval: 30s
# Set labels for the podMonitor, use this to define your scrape label for Prometheus Operator
# labels:
# Set timeout for scrape
# timeout: 10s

View File

@@ -4,12 +4,16 @@
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
annotations:
meta.helm.sh/release-namespace: "default"
meta.helm.sh/release-name: "reloader"
labels:
app: RELEASE-NAME-reloader
chart: "reloader-v0.0.40"
release: "RELEASE-NAME"
app: reloader-reloader
chart: "reloader-v0.0.77"
release: "reloader"
heritage: "Tiller"
name: RELEASE-NAME-reloader-role
app.kubernetes.io/managed-by: "Tiller"
name: reloader-reloader-role
namespace: default
rules:
- apiGroups:

View File

@@ -4,19 +4,23 @@
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
annotations:
meta.helm.sh/release-namespace: "default"
meta.helm.sh/release-name: "reloader"
labels:
app: RELEASE-NAME-reloader
chart: "reloader-v0.0.40"
release: "RELEASE-NAME"
app: reloader-reloader
chart: "reloader-v0.0.77"
release: "reloader"
heritage: "Tiller"
name: RELEASE-NAME-reloader-role-binding
app.kubernetes.io/managed-by: "Tiller"
name: reloader-reloader-role-binding
namespace: default
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: RELEASE-NAME-reloader-role
name: reloader-reloader-role
subjects:
- kind: ServiceAccount
name: RELEASE-NAME-reloader
name: reloader-reloader
namespace: default

View File

@@ -3,40 +3,59 @@
apiVersion: apps/v1
kind: Deployment
metadata:
annotations:
meta.helm.sh/release-namespace: "default"
meta.helm.sh/release-name: "reloader"
labels:
app: RELEASE-NAME-reloader
chart: "reloader-v0.0.40"
release: "RELEASE-NAME"
app: reloader-reloader
chart: "reloader-v0.0.77"
release: "reloader"
heritage: "Tiller"
app.kubernetes.io/managed-by: "Tiller"
group: com.stakater.platform
provider: stakater
version: v0.0.40
version: v0.0.77
name: RELEASE-NAME-reloader
name: reloader-reloader
spec:
replicas: 1
revisionHistoryLimit: 2
selector:
matchLabels:
app: RELEASE-NAME-reloader
release: "RELEASE-NAME"
app: reloader-reloader
release: "reloader"
template:
metadata:
labels:
app: RELEASE-NAME-reloader
chart: "reloader-v0.0.40"
release: "RELEASE-NAME"
app: reloader-reloader
chart: "reloader-v0.0.77"
release: "reloader"
heritage: "Tiller"
app.kubernetes.io/managed-by: "Tiller"
group: com.stakater.platform
provider: stakater
version: v0.0.40
version: v0.0.77
spec:
containers:
- env:
image: "stakater/reloader:v0.0.40"
- image: "stakater/reloader:v0.0.77"
imagePullPolicy: IfNotPresent
name: RELEASE-NAME-reloader
args:
serviceAccountName: RELEASE-NAME-reloader
name: reloader-reloader
ports:
- name: http
containerPort: 9090
livenessProbe:
httpGet:
path: /metrics
port: http
readinessProbe:
httpGet:
path: /metrics
port: http
securityContext:
runAsNonRoot: true
runAsUser: 65534
serviceAccountName: reloader-reloader

View File

@@ -0,0 +1,3 @@
---
# Source: reloader/templates/podmonitor.yaml

View File

@@ -0,0 +1,4 @@
---
# Source: reloader/templates/service.yaml

View File

@@ -4,10 +4,14 @@
apiVersion: v1
kind: ServiceAccount
metadata:
annotations:
meta.helm.sh/release-namespace: "default"
meta.helm.sh/release-name: "reloader"
labels:
app: RELEASE-NAME-reloader
chart: "reloader-v0.0.40"
release: "RELEASE-NAME"
app: reloader-reloader
chart: "reloader-v0.0.77"
release: "reloader"
heritage: "Tiller"
name: RELEASE-NAME-reloader
app.kubernetes.io/managed-by: "Tiller"
name: reloader-reloader

View File

@@ -0,0 +1,4 @@
---
# Source: reloader/templates/servicemonitor.yaml

View File

@@ -1,61 +1,34 @@
---
# Source: reloader/templates/role.yaml
---
# Source: reloader/templates/deployment.yaml
apiVersion: apps/v1
kind: Deployment
# Source: reloader/templates/serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
annotations:
meta.helm.sh/release-namespace: "default"
meta.helm.sh/release-name: "stakater"
labels:
app: RELEASE-NAME-reloader
chart: "reloader-v0.0.40"
release: "RELEASE-NAME"
heritage: "Tiller"
group: com.stakater.platform
provider: stakater
version: v0.0.40
name: RELEASE-NAME-reloader
spec:
replicas: 1
revisionHistoryLimit: 2
selector:
matchLabels:
app: RELEASE-NAME-reloader
release: "RELEASE-NAME"
template:
metadata:
labels:
app: RELEASE-NAME-reloader
chart: "reloader-v0.0.40"
release: "RELEASE-NAME"
heritage: "Tiller"
group: com.stakater.platform
provider: stakater
version: v0.0.40
spec:
containers:
- env:
image: "stakater/reloader:v0.0.40"
imagePullPolicy: IfNotPresent
name: RELEASE-NAME-reloader
args:
serviceAccountName: RELEASE-NAME-reloader
app: stakater-reloader
chart: "reloader-v0.0.98"
release: "stakater"
heritage: "Helm"
app.kubernetes.io/managed-by: "Helm"
name: stakater-reloader
---
# Source: reloader/templates/clusterrole.yaml
apiVersion: rbac.authorization.k8s.io/v1
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
annotations:
meta.helm.sh/release-namespace: "default"
meta.helm.sh/release-name: "stakater"
labels:
app: RELEASE-NAME-reloader
chart: "reloader-v0.0.40"
release: "RELEASE-NAME"
heritage: "Tiller"
name: RELEASE-NAME-reloader-role
app: stakater-reloader
chart: "reloader-v0.0.98"
release: "stakater"
heritage: "Helm"
app.kubernetes.io/managed-by: "Helm"
name: stakater-reloader-role
namespace: default
rules:
- apiGroups:
@@ -88,43 +61,93 @@ rules:
- get
- update
- patch
---
# Source: reloader/templates/rolebinding.yaml
---
# Source: reloader/templates/clusterrolebinding.yaml
apiVersion: rbac.authorization.k8s.io/v1
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
annotations:
meta.helm.sh/release-namespace: "default"
meta.helm.sh/release-name: "stakater"
labels:
app: RELEASE-NAME-reloader
chart: "reloader-v0.0.40"
release: "RELEASE-NAME"
heritage: "Tiller"
name: RELEASE-NAME-reloader-role-binding
app: stakater-reloader
chart: "reloader-v0.0.98"
release: "stakater"
heritage: "Helm"
app.kubernetes.io/managed-by: "Helm"
name: stakater-reloader-role-binding
namespace: default
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: RELEASE-NAME-reloader-role
name: stakater-reloader-role
subjects:
- kind: ServiceAccount
name: RELEASE-NAME-reloader
name: stakater-reloader
namespace: default
---
# Source: reloader/templates/serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
# Source: reloader/templates/deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
annotations:
meta.helm.sh/release-namespace: "default"
meta.helm.sh/release-name: "stakater"
labels:
app: RELEASE-NAME-reloader
chart: "reloader-v0.0.40"
release: "RELEASE-NAME"
heritage: "Tiller"
name: RELEASE-NAME-reloader
app: stakater-reloader
chart: "reloader-v0.0.98"
release: "stakater"
heritage: "Helm"
app.kubernetes.io/managed-by: "Helm"
group: com.stakater.platform
provider: stakater
version: v0.0.98
name: stakater-reloader
spec:
replicas: 1
revisionHistoryLimit: 2
selector:
matchLabels:
app: stakater-reloader
release: "stakater"
template:
metadata:
labels:
app: stakater-reloader
chart: "reloader-v0.0.98"
release: "stakater"
heritage: "Helm"
app.kubernetes.io/managed-by: "Helm"
group: com.stakater.platform
provider: stakater
version: v0.0.98
spec:
containers:
- image: "stakater/reloader:v0.0.98"
imagePullPolicy: IfNotPresent
name: stakater-reloader
ports:
- name: http
containerPort: 9090
livenessProbe:
httpGet:
path: /metrics
port: http
timeoutSeconds: 5
failureThreshold: 5
periodSeconds: 10
successThreshold: 1
readinessProbe:
httpGet:
path: /metrics
port: http
timeoutSeconds: 5
failureThreshold: 5
periodSeconds: 10
successThreshold: 1
securityContext:
runAsNonRoot: true
runAsUser: 65534
serviceAccountName: stakater-reloader

View File

@@ -1,15 +1,25 @@
# Generated from deployments/kubernetes/templates/chart/values.yaml.tmpl
global:
## Reference to one or more secrets to be used when pulling images
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
##
imagePullSecrets: []
kubernetes:
host: https://kubernetes.default
reloader:
isArgoRollouts: false
isOpenshift: false
ignoreSecrets: false
ignoreConfigMaps: false
ignoreNamespaces: "" # Comma separated list of namespaces to ignore
logFormat: "" #json
watchGlobally: true
# Set to true if you have a pod security policy that enforces readOnlyRootFilesystem
readOnlyRootFileSystem: false
legacy:
rbac: false
matchLabels: {}
deployment:
nodeSelector:
@@ -26,6 +36,10 @@ reloader:
# operator: "Exists"
affinity: {}
securityContext:
runAsNonRoot: true
runAsUser: 65534
# A list of tolerations to be applied to the Deployment.
# Example:
# tolerations:
@@ -62,6 +76,13 @@ reloader:
# cpu: "10m"
# memory: "128Mi"
resources: {}
pod:
annotations: {}
service: {}
# labels: {}
# annotations: {}
# port: 9090
rbac:
enabled: true
@@ -71,6 +92,7 @@ reloader:
# Specifies whether a ServiceAccount should be created
create: true
labels: {}
annotations: {}
# The name of the ServiceAccount to use.
# If not set and create is true, a name is generated using the fullname template
name:
@@ -80,3 +102,27 @@ reloader:
# configmap: "my.company.com/configmap"
# secret: "my.company.com/secret"
custom_annotations: {}
serviceMonitor:
# enabling this requires service to be enabled as well, or no endpoints will be found
enabled: false
# Set the namespace the ServiceMonitor should be deployed
# namespace: monitoring
# Set how frequently Prometheus should scrape
# interval: 30s
# Set labels for the ServiceMonitor, use this to define your scrape label for Prometheus Operator
# labels:
# Set timeout for scrape
# timeout: 10s
podMonitor:
# enabling this requires service to be enabled as well, or no endpoints will be found
enabled: false
# Set the namespace the podMonitor should be deployed
# namespace: monitoring
# Set how frequently Prometheus should scrape
# interval: 30s
# Set labels for the podMonitor, use this to define your scrape label for Prometheus Operator
# labels:
# Set timeout for scrape
# timeout: 10s

41
docs/Container Build.md Normal file
View File

@@ -0,0 +1,41 @@
# Container Build
> **WARNING:** As a user of Reloader there is no need to build containers, these are freely available here: https://hub.docker.com/r/stakater/reloader/
Multi-architecture approach is based on original work by @mdh02038: https://github.com/mdh02038/Reloader
Images tested on linux/arm, linux/arm64 and linux/amd64.
# Install Pre-Reqs
The build environment requires the following packages (tested on Ubuntu 20.04):
* golang
* make
* qemu (for arm, arm64 etc. emulation)
* binfmt-support
* Docker engine
## Docker
Follow instructions here: https://docs.docker.com/engine/install/ubuntu/#install-using-the-repository
Once installed, enable the experimental CLI:
```
export DOCKER_CLI_EXPERIMENTAL=enabled
```
Login, to enable publishing of packages:
```
sudo docker login
```
## Remaining Pre-Reqs
Remaining Pre-Reqs can be installed via:
```
sudo apt install golang make qemu-user-static binfmt-support -y
```
# Publish Multi-Architecture Image
To build/ publish multi-arch Docker images clone repository and execute from repository root:
```
sudo make release-all
```
# Additional Links/ Info
* *https://medium.com/@artur.klauser/building-multi-architecture-docker-images-with-buildx-27d80f7e2408

62
docs/Helm2-to-Helm3.md Normal file
View File

@@ -0,0 +1,62 @@
# Helm2 to Helm3 Migration
Follow below mentioned instructions to migrate reloader from Helm2 to Helm3
## Instrcutions:
There are 3 steps involved in migrating the reloader from Helm2 to Helm3.
### Step 1:
Install the helm-2to3 plugin
```bash
helm3 plugin install https://github.com/helm/helm-2to3
helm3 2to3 convert <release-name>
helm3 2to3 cleanup --release-cleanup --skip-confirmation
```
### Step 2:
Add the following Helm3 labels and annotations on reloader resources.
Label:
```yaml
app.kubernetes.io/managed-by=Helm
```
Annotations:
```yaml
meta.helm.sh/release-name=<release-name>
meta.helm.sh/release-namespace=<namespace>
```
For example, to label and annotate the ClusterRoleBinding and ClusterRole:
```bash
KIND=ClusterRoleBinding
NAME=reloader-reloader-role-binding
RELEASE=reloader
NAMESPACE=kube-system
kubectl annotate $KIND $NAME meta.helm.sh/release-name=$RELEASE
kubectl annotate $KIND $NAME meta.helm.sh/release-namespace=$NAMESPACE
kubectl label $KIND $NAME app.kubernetes.io/managed-by=Helm
KIND=ClusterRole
NAME=reloader-reloader-role
RELEASE=reloader
NAMESPACE=kube-system
kubectl annotate $KIND $NAME meta.helm.sh/release-name=$RELEASE
kubectl annotate $KIND $NAME meta.helm.sh/release-namespace=$NAMESPACE
kubectl label $KIND $NAME app.kubernetes.io/managed-by=Helm
```
### Step 3:
Upgrade to desired version
```bash
helm3 repo add stakater https://stakater.github.io/stakater-charts
helm3 repo update
helm3 upgrade <release-name> stakater/reloader --version=v0.0.72
```

View File

@@ -37,7 +37,7 @@ metadata:
```
<small>*the default annotation can be changed with the `--secret-annotation` flag</small>
Above mentioned annotation are also work for `Daemonsets` and `Statefulsets`
Above mentioned annotation are also work for `Daemonsets` `Statefulsets` and `Rollouts`
## How Rolling upgrade works?

View File

@@ -8,5 +8,5 @@ Reloader is inspired from [Configmapcontroller](https://github.com/fabric8io/con
| Reloader can watch both `secrets` and `configmaps`. | ConfigmapController can only watch changes in `configmaps`. It cannot detect changes in other resources like `secrets`. |
| Reloader can perform rolling upgrades on `deployments` as well as on `statefulsets` and `daemonsets` | ConfigmapController can only perform rolling upgrades on `deployments`. It currently does not support rolling upgrades on `statefulsets` and `daemonsets` |
| Reloader provides both unit test cases and end to end integration test cases for future updates. So one can make sure that new changes do not break any old functionality. | Currently there are not any unit test cases or end to end integration test cases in configmap controller. It add difficulties for any additional updates in configmap controller and one can not know for sure whether new changes breaks any old functionality or not. |
| Reloader uses SHA1 to encode the change in configmap or secret. It then saves the SHA1 value in `STAKATER_FOO_CONFIGMAP` or `STAKATER_FOO_SECRET` environment variable depending upon where the change has happened. The use of SHA1 provides a concise 40 characters encoded value that is very less pron to collision. | Configmap controller uses `FABRICB_FOO_REVISION` environment variable to store any change in configmap controller. It does not encode it or convert it in suitable hash value to avoid data pollution in deployment. |
| Reloader allows you to customize your own annotation (for both Secrets and Configmaps) using command line flags | Configmap controller restricts you to only their provided annotation |
| Reloader uses SHA1 to encode the change in configmap or secret. It then saves the SHA1 value in `STAKATER_FOO_CONFIGMAP` or `STAKATER_FOO_SECRET` environment variable depending upon where the change has happened. The use of SHA1 provides a concise 40 characters encoded value that is very less prone to collision. | Configmap controller uses `FABRICB_FOO_REVISION` environment variable to store any change in configmap controller. It does not encode it or convert it in suitable hash value to avoid data pollution in deployment. |
| Reloader allows you to customize your own annotation (for both Secrets and Configmaps) using command line flags | Configmap controller restricts you to only their provided annotation |

View File

@@ -8,4 +8,4 @@ Below are the steps to use reloader with Sealed Secrets.
8. Install Reloader.
9. Once everything is setup, update the original secret at client and encrypt it with kubeseal to see reloader working.
10. Apply the updated sealed secret.
11. Reloader will resatart the pod to use that updated secret.
11. Reloader will restart the pod to use that updated secret.

View File

@@ -1,6 +1,6 @@
# Verify Reloader's Working
Reloader's working can be verified by two ways.
Reloader's working can be verified by three ways.
## Verify from logs
@@ -49,3 +49,13 @@ After a change in `secret` or `configmap`. Run the below mentioned command and v
```bash
kubectl get pods <pod name> -n <namespace name>
```
## Verify from metrics
Some metrics are exported to prometheus endpoint `/metrics` on port `9090`.
When reloader is unable to reload, `reloader_reload_executed_total{success="false"}` metric gets incremented and when it reloads successfully, `reloader_reload_executed_total{success="true"}` gets incremented. You will be able to see the following metrics, with some other metrics, at `/metrics` endpoint.
```
reloader_reload_executed_total{success="false"} 15
reloader_reload_executed_total{success="true"} 12
```

View File

@@ -2,6 +2,7 @@
These are the key features of Reloader:
1. Restart pod in a depoloyment on change in linked/related configmap's or secret's
1. Restart pod in a deployment on change in linked/related configmap's or secret's
2. Restart pod in a daemonset on change in linked/related configmap's or secret's
3. Restart pod in a statefulset on change in linked/related configmap's or secret's
4. Restart pod in a rollout on change in linked/related configmap's or secret's

277
glide.lock generated
View File

@@ -1,277 +0,0 @@
hash: 0a37eeebda95f7ac050377c5b8ca8a6f7ab051ef66ba1752471090157e6a6ea2
updated: 2019-07-03T21:04:13.576837+02:00
imports:
- name: github.com/davecgh/go-spew
version: 782f4967f2dc4564575ca782fe2d04090b5faca8
subpackages:
- spew
- name: github.com/ghodss/yaml
version: 73d445a93680fa1a78ae23a5839bad48f32ba1ee
- name: github.com/gogo/protobuf
version: c0656edd0d9eab7c66d1eb0c568f9039345796f7
subpackages:
- proto
- sortkeys
- name: github.com/golang/glog
version: 44145f04b68cf362d9c4df2182967c2275eaefed
- name: github.com/golang/protobuf
version: b4deda0973fb4c70b50d226b1af49f3da59f5265
subpackages:
- proto
- ptypes
- ptypes/any
- ptypes/duration
- ptypes/timestamp
- name: github.com/google/btree
version: 7d79101e329e5a3adf994758c578dab82b90c017
- name: github.com/google/gofuzz
version: 44d81051d367757e1c7c6a5a86423ece9afcf63c
- name: github.com/googleapis/gnostic
version: 0c5108395e2debce0d731cf0287ddf7242066aba
subpackages:
- OpenAPIv2
- compiler
- extensions
- name: github.com/gregjones/httpcache
version: 787624de3eb7bd915c329cba748687a3b22666a6
subpackages:
- diskcache
- name: github.com/hashicorp/golang-lru
version: a0d98a5f288019575c6d1f4bb1573fef2d1fcdc4
subpackages:
- simplelru
- name: github.com/imdario/mergo
version: 6633656539c1639d9d78127b7d47c622b5d7b6dc
- name: github.com/inconshreveable/mousetrap
version: 76626ae9c91c4f2a10f34cad8ce83ea42c93bb75
- name: github.com/json-iterator/go
version: f2b4162afba35581b6d4a50d3b8f34e33c144682
- name: github.com/modern-go/concurrent
version: bacd9c7ef1dd9b15be4a9909b8ac7a4e313eec94
- name: github.com/modern-go/reflect2
version: 05fbef0ca5da472bbf96c9322b84a53edc03c9fd
- name: github.com/openshift/api
version: d5b34b957e91dbf64013a866951c3ed5770db0b5
subpackages:
- apps/v1
- name: github.com/openshift/client-go
version: 431ec9a26e5021f35fa41ee9a89842db9bfdb370
subpackages:
- apps/clientset/versioned
- apps/clientset/versioned/scheme
- apps/clientset/versioned/typed/apps/v1
- name: github.com/openshift/library-go
version: 0b8367a4679859036c27a30dbe010d76409e7075
- name: github.com/peterbourgon/diskv
version: 5f041e8faa004a95c88a202771f4cc3e991971e6
- name: github.com/sirupsen/logrus
version: c155da19408a8799da419ed3eeb0cb5db0ad5dbc
- name: github.com/spf13/cobra
version: f62e98d28ab7ad31d707ba837a966378465c7b57
- name: github.com/spf13/pflag
version: 583c0c0531f06d5278b7d917446061adc344b5cd
- name: golang.org/x/crypto
version: 81e90905daefcd6fd217b62423c0908922eadb30
subpackages:
- ssh/terminal
- name: golang.org/x/net
version: 1c05540f6879653db88113bc4a2b70aec4bd491f
subpackages:
- context
- http2
- http2/hpack
- idna
- lex/httplex
- name: golang.org/x/sys
version: 95c6576299259db960f6c5b9b69ea52422860fce
subpackages:
- unix
- windows
- name: golang.org/x/text
version: b19bf474d317b857955b12035d2c5acb57ce8b01
subpackages:
- secure/bidirule
- transform
- unicode/bidi
- unicode/norm
- name: golang.org/x/time
version: f51c12702a4d776e4c1fa9b0fabab841babae631
subpackages:
- rate
- name: gopkg.in/inf.v0
version: 3887ee99ecf07df5b447e9b00d9c0b2adaa9f3e4
- name: gopkg.in/yaml.v2
version: 670d4cfef0544295bc27a114dbac37980d83185a
- name: k8s.io/api
version: 2d6f90ab1293a1fb871cf149423ebb72aa7423aa
subpackages:
- admissionregistration/v1alpha1
- admissionregistration/v1beta1
- apps/v1
- apps/v1beta1
- apps/v1beta2
- authentication/v1
- authentication/v1beta1
- authorization/v1
- authorization/v1beta1
- autoscaling/v1
- autoscaling/v2beta1
- batch/v1
- batch/v1beta1
- batch/v2alpha1
- certificates/v1beta1
- core/v1
- events/v1beta1
- extensions/v1beta1
- networking/v1
- policy/v1beta1
- rbac/v1
- rbac/v1alpha1
- rbac/v1beta1
- scheduling/v1alpha1
- scheduling/v1beta1
- settings/v1alpha1
- storage/v1
- storage/v1alpha1
- storage/v1beta1
- name: k8s.io/apimachinery
version: 103fd098999dc9c0c88536f5c9ad2e5da39373ae
subpackages:
- pkg/api/errors
- pkg/api/meta
- pkg/api/resource
- pkg/apis/meta/internalversion
- pkg/apis/meta/v1
- pkg/apis/meta/v1/unstructured
- pkg/apis/meta/v1beta1
- pkg/conversion
- pkg/conversion/queryparams
- pkg/fields
- pkg/labels
- pkg/runtime
- pkg/runtime/schema
- pkg/runtime/serializer
- pkg/runtime/serializer/json
- pkg/runtime/serializer/protobuf
- pkg/runtime/serializer/recognizer
- pkg/runtime/serializer/streaming
- pkg/runtime/serializer/versioning
- pkg/selection
- pkg/types
- pkg/util/cache
- pkg/util/clock
- pkg/util/diff
- pkg/util/errors
- pkg/util/framer
- pkg/util/intstr
- pkg/util/json
- pkg/util/mergepatch
- pkg/util/net
- pkg/util/runtime
- pkg/util/sets
- pkg/util/strategicpatch
- pkg/util/validation
- pkg/util/validation/field
- pkg/util/wait
- pkg/util/yaml
- pkg/version
- pkg/watch
- third_party/forked/golang/json
- third_party/forked/golang/reflect
- name: k8s.io/client-go
version: 59698c7d9724b0f95f9dc9e7f7dfdcc3dfeceb82
subpackages:
- discovery
- discovery/fake
- kubernetes
- kubernetes/fake
- kubernetes/scheme
- kubernetes/typed/admissionregistration/v1alpha1
- kubernetes/typed/admissionregistration/v1alpha1/fake
- kubernetes/typed/admissionregistration/v1beta1
- kubernetes/typed/admissionregistration/v1beta1/fake
- kubernetes/typed/apps/v1
- kubernetes/typed/apps/v1/fake
- kubernetes/typed/apps/v1beta1
- kubernetes/typed/apps/v1beta1/fake
- kubernetes/typed/apps/v1beta2
- kubernetes/typed/apps/v1beta2/fake
- kubernetes/typed/authentication/v1
- kubernetes/typed/authentication/v1/fake
- kubernetes/typed/authentication/v1beta1
- kubernetes/typed/authentication/v1beta1/fake
- kubernetes/typed/authorization/v1
- kubernetes/typed/authorization/v1/fake
- kubernetes/typed/authorization/v1beta1
- kubernetes/typed/authorization/v1beta1/fake
- kubernetes/typed/autoscaling/v1
- kubernetes/typed/autoscaling/v1/fake
- kubernetes/typed/autoscaling/v2beta1
- kubernetes/typed/autoscaling/v2beta1/fake
- kubernetes/typed/batch/v1
- kubernetes/typed/batch/v1/fake
- kubernetes/typed/batch/v1beta1
- kubernetes/typed/batch/v1beta1/fake
- kubernetes/typed/batch/v2alpha1
- kubernetes/typed/batch/v2alpha1/fake
- kubernetes/typed/certificates/v1beta1
- kubernetes/typed/certificates/v1beta1/fake
- kubernetes/typed/core/v1
- kubernetes/typed/core/v1/fake
- kubernetes/typed/events/v1beta1
- kubernetes/typed/events/v1beta1/fake
- kubernetes/typed/extensions/v1beta1
- kubernetes/typed/extensions/v1beta1/fake
- kubernetes/typed/networking/v1
- kubernetes/typed/networking/v1/fake
- kubernetes/typed/policy/v1beta1
- kubernetes/typed/policy/v1beta1/fake
- kubernetes/typed/rbac/v1
- kubernetes/typed/rbac/v1/fake
- kubernetes/typed/rbac/v1alpha1
- kubernetes/typed/rbac/v1alpha1/fake
- kubernetes/typed/rbac/v1beta1
- kubernetes/typed/rbac/v1beta1/fake
- kubernetes/typed/scheduling/v1alpha1
- kubernetes/typed/scheduling/v1alpha1/fake
- kubernetes/typed/scheduling/v1beta1
- kubernetes/typed/scheduling/v1beta1/fake
- kubernetes/typed/settings/v1alpha1
- kubernetes/typed/settings/v1alpha1/fake
- kubernetes/typed/storage/v1
- kubernetes/typed/storage/v1/fake
- kubernetes/typed/storage/v1alpha1
- kubernetes/typed/storage/v1alpha1/fake
- kubernetes/typed/storage/v1beta1
- kubernetes/typed/storage/v1beta1/fake
- pkg/apis/clientauthentication
- pkg/apis/clientauthentication/v1alpha1
- pkg/apis/clientauthentication/v1beta1
- pkg/version
- plugin/pkg/client/auth/exec
- rest
- rest/watch
- testing
- tools/auth
- tools/cache
- tools/clientcmd
- tools/clientcmd/api
- tools/clientcmd/api/latest
- tools/clientcmd/api/v1
- tools/metrics
- tools/pager
- tools/reference
- transport
- util/buffer
- util/cert
- util/connrotation
- util/flowcontrol
- util/homedir
- util/integer
- util/retry
- util/workqueue
- name: k8s.io/kube-openapi
version: 91cfa479c814065e420cee7ed227db0f63a5854e
subpackages:
- pkg/util/proto
testImports: []

View File

@@ -1,10 +0,0 @@
package: github.com/stakater/Reloader
import:
- package: github.com/openshift/client-go
version: release-3.11
- package: github.com/spf13/cobra
version: f62e98d28ab7ad31d707ba837a966378465c7b57
- package: github.com/openshift/library-go
version: release-3.11
- package: github.com/openshift/api
version: master

44
go.mod Normal file
View File

@@ -0,0 +1,44 @@
module github.com/stakater/Reloader
go 1.15
require (
github.com/argoproj/argo-rollouts v1.0.1
github.com/onsi/ginkgo v1.15.1 // indirect
github.com/onsi/gomega v1.11.0 // indirect
github.com/openshift/api v0.0.0-20210527122704-efd9d5958e01
github.com/openshift/client-go v0.0.0-20210521082421-73d9475a9142
github.com/prometheus/client_golang v1.10.0
github.com/sirupsen/logrus v1.7.0
github.com/spf13/cobra v1.1.3
k8s.io/api v0.21.1
k8s.io/apimachinery v0.21.1
k8s.io/client-go v0.21.1
)
replace (
k8s.io/api => k8s.io/api v0.20.4
k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.20.4
k8s.io/apimachinery => k8s.io/apimachinery v0.21.0-alpha.0
k8s.io/apiserver => k8s.io/apiserver v0.20.4
k8s.io/cli-runtime => k8s.io/cli-runtime v0.20.4
k8s.io/client-go => k8s.io/client-go v0.20.4
k8s.io/cloud-provider => k8s.io/cloud-provider v0.20.4
k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.20.4
k8s.io/code-generator => k8s.io/code-generator v0.20.5-rc.0
k8s.io/component-base => k8s.io/component-base v0.20.4
k8s.io/component-helpers => k8s.io/component-helpers v0.20.4
k8s.io/controller-manager => k8s.io/controller-manager v0.20.4
k8s.io/cri-api => k8s.io/cri-api v0.20.5-rc.0
k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.20.4
k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.20.4
k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.20.4
k8s.io/kube-proxy => k8s.io/kube-proxy v0.20.4
k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.20.4
k8s.io/kubectl => k8s.io/kubectl v0.20.4
k8s.io/kubelet => k8s.io/kubelet v0.20.4
k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.20.4
k8s.io/metrics => k8s.io/metrics v0.20.4
k8s.io/mount-utils => k8s.io/mount-utils v0.20.5-rc.0
k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.20.4
)

1595
go.sum Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -1,14 +1,15 @@
package callbacks
import (
"context"
"github.com/sirupsen/logrus"
"github.com/stakater/Reloader/internal/pkg/util"
"github.com/stakater/Reloader/pkg/kube"
apps_v1beta1 "k8s.io/api/apps/v1beta1"
appsv1 "k8s.io/api/apps/v1"
v1 "k8s.io/api/core/v1"
"k8s.io/api/extensions/v1beta1"
meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
argorolloutv1alpha1 "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1"
openshiftv1 "github.com/openshift/api/apps/v1"
)
@@ -27,9 +28,17 @@ type VolumesFunc func(interface{}) []v1.Volume
//UpdateFunc performs the resource update
type UpdateFunc func(kube.Clients, string, interface{}) error
//AnnotationsFunc is a generic func to return annotations
type AnnotationsFunc func(interface{}) map[string]string
//PodAnnotationsFunc is a generic func to return annotations
type PodAnnotationsFunc func(interface{}) map[string]string
//RollingUpgradeFuncs contains generic functions to perform rolling upgrade
type RollingUpgradeFuncs struct {
ItemsFunc ItemsFunc
AnnotationsFunc AnnotationsFunc
PodAnnotationsFunc PodAnnotationsFunc
ContainersFunc ContainersFunc
InitContainersFunc InitContainersFunc
UpdateFunc UpdateFunc
@@ -39,7 +48,7 @@ type RollingUpgradeFuncs struct {
// GetDeploymentItems returns the deployments in given namespace
func GetDeploymentItems(clients kube.Clients, namespace string) []interface{} {
deployments, err := clients.KubernetesClient.ExtensionsV1beta1().Deployments(namespace).List(meta_v1.ListOptions{})
deployments, err := clients.KubernetesClient.AppsV1().Deployments(namespace).List(context.TODO(), meta_v1.ListOptions{})
if err != nil {
logrus.Errorf("Failed to list deployments %v", err)
}
@@ -48,7 +57,7 @@ func GetDeploymentItems(clients kube.Clients, namespace string) []interface{} {
// GetDaemonSetItems returns the daemonSets in given namespace
func GetDaemonSetItems(clients kube.Clients, namespace string) []interface{} {
daemonSets, err := clients.KubernetesClient.ExtensionsV1beta1().DaemonSets(namespace).List(meta_v1.ListOptions{})
daemonSets, err := clients.KubernetesClient.AppsV1().DaemonSets(namespace).List(context.TODO(), meta_v1.ListOptions{})
if err != nil {
logrus.Errorf("Failed to list daemonSets %v", err)
}
@@ -57,7 +66,7 @@ func GetDaemonSetItems(clients kube.Clients, namespace string) []interface{} {
// GetStatefulSetItems returns the statefulSets in given namespace
func GetStatefulSetItems(clients kube.Clients, namespace string) []interface{} {
statefulSets, err := clients.KubernetesClient.AppsV1beta1().StatefulSets(namespace).List(meta_v1.ListOptions{})
statefulSets, err := clients.KubernetesClient.AppsV1().StatefulSets(namespace).List(context.TODO(), meta_v1.ListOptions{})
if err != nil {
logrus.Errorf("Failed to list statefulSets %v", err)
}
@@ -66,26 +75,85 @@ func GetStatefulSetItems(clients kube.Clients, namespace string) []interface{} {
// GetDeploymentConfigItems returns the deploymentConfigs in given namespace
func GetDeploymentConfigItems(clients kube.Clients, namespace string) []interface{} {
deploymentConfigs, err := clients.OpenshiftAppsClient.Apps().DeploymentConfigs(namespace).List(meta_v1.ListOptions{})
deploymentConfigs, err := clients.OpenshiftAppsClient.AppsV1().DeploymentConfigs(namespace).List(context.TODO(), meta_v1.ListOptions{})
if err != nil {
logrus.Errorf("Failed to list deploymentConfigs %v", err)
}
return util.InterfaceSlice(deploymentConfigs.Items)
}
// GetRolloutItems returns the rollouts in given namespace
func GetRolloutItems(clients kube.Clients, namespace string) []interface{} {
rollouts, err := clients.ArgoRolloutClient.ArgoprojV1alpha1().Rollouts(namespace).List(context.TODO(), meta_v1.ListOptions{})
if err != nil {
logrus.Errorf("Failed to list Rollouts %v", err)
}
return util.InterfaceSlice(rollouts.Items)
}
// GetDeploymentAnnotations returns the annotations of given deployment
func GetDeploymentAnnotations(item interface{}) map[string]string {
return item.(appsv1.Deployment).ObjectMeta.Annotations
}
// GetDaemonSetAnnotations returns the annotations of given daemonSet
func GetDaemonSetAnnotations(item interface{}) map[string]string {
return item.(appsv1.DaemonSet).ObjectMeta.Annotations
}
// GetStatefulSetAnnotations returns the annotations of given statefulSet
func GetStatefulSetAnnotations(item interface{}) map[string]string {
return item.(appsv1.StatefulSet).ObjectMeta.Annotations
}
// GetDeploymentConfigAnnotations returns the annotations of given deploymentConfig
func GetDeploymentConfigAnnotations(item interface{}) map[string]string {
return item.(openshiftv1.DeploymentConfig).ObjectMeta.Annotations
}
// GetRolloutAnnotations returns the annotations of given rollout
func GetRolloutAnnotations(item interface{}) map[string]string {
return item.(argorolloutv1alpha1.Rollout).ObjectMeta.Annotations
}
// GetDeploymentPodAnnotations returns the pod's annotations of given deployment
func GetDeploymentPodAnnotations(item interface{}) map[string]string {
return item.(appsv1.Deployment).Spec.Template.ObjectMeta.Annotations
}
// GetDaemonSetPodAnnotations returns the pod's annotations of given daemonSet
func GetDaemonSetPodAnnotations(item interface{}) map[string]string {
return item.(appsv1.DaemonSet).Spec.Template.ObjectMeta.Annotations
}
// GetStatefulSetPodAnnotations returns the pod's annotations of given statefulSet
func GetStatefulSetPodAnnotations(item interface{}) map[string]string {
return item.(appsv1.StatefulSet).Spec.Template.ObjectMeta.Annotations
}
// GetDeploymentConfigPodAnnotations returns the pod's annotations of given deploymentConfig
func GetDeploymentConfigPodAnnotations(item interface{}) map[string]string {
return item.(openshiftv1.DeploymentConfig).Spec.Template.ObjectMeta.Annotations
}
// GetRolloutPodAnnotations returns the pod's annotations of given rollout
func GetRolloutPodAnnotations(item interface{}) map[string]string {
return item.(argorolloutv1alpha1.Rollout).Spec.Template.ObjectMeta.Annotations
}
// GetDeploymentContainers returns the containers of given deployment
func GetDeploymentContainers(item interface{}) []v1.Container {
return item.(v1beta1.Deployment).Spec.Template.Spec.Containers
return item.(appsv1.Deployment).Spec.Template.Spec.Containers
}
// GetDaemonSetContainers returns the containers of given daemonset
// GetDaemonSetContainers returns the containers of given daemonSet
func GetDaemonSetContainers(item interface{}) []v1.Container {
return item.(v1beta1.DaemonSet).Spec.Template.Spec.Containers
return item.(appsv1.DaemonSet).Spec.Template.Spec.Containers
}
// GetStatefulsetContainers returns the containers of given statefulSet
func GetStatefulsetContainers(item interface{}) []v1.Container {
return item.(apps_v1beta1.StatefulSet).Spec.Template.Spec.Containers
// GetStatefulSetContainers returns the containers of given statefulSet
func GetStatefulSetContainers(item interface{}) []v1.Container {
return item.(appsv1.StatefulSet).Spec.Template.Spec.Containers
}
// GetDeploymentConfigContainers returns the containers of given deploymentConfig
@@ -93,19 +161,24 @@ func GetDeploymentConfigContainers(item interface{}) []v1.Container {
return item.(openshiftv1.DeploymentConfig).Spec.Template.Spec.Containers
}
// GetRolloutContainers returns the containers of given rollout
func GetRolloutContainers(item interface{}) []v1.Container {
return item.(argorolloutv1alpha1.Rollout).Spec.Template.Spec.Containers
}
// GetDeploymentInitContainers returns the containers of given deployment
func GetDeploymentInitContainers(item interface{}) []v1.Container {
return item.(v1beta1.Deployment).Spec.Template.Spec.InitContainers
return item.(appsv1.Deployment).Spec.Template.Spec.InitContainers
}
// GetDaemonSetInitContainers returns the containers of given daemonset
// GetDaemonSetInitContainers returns the containers of given daemonSet
func GetDaemonSetInitContainers(item interface{}) []v1.Container {
return item.(v1beta1.DaemonSet).Spec.Template.Spec.InitContainers
return item.(appsv1.DaemonSet).Spec.Template.Spec.InitContainers
}
// GetStatefulsetInitContainers returns the containers of given statefulSet
func GetStatefulsetInitContainers(item interface{}) []v1.Container {
return item.(apps_v1beta1.StatefulSet).Spec.Template.Spec.InitContainers
// GetStatefulSetInitContainers returns the containers of given statefulSet
func GetStatefulSetInitContainers(item interface{}) []v1.Container {
return item.(appsv1.StatefulSet).Spec.Template.Spec.InitContainers
}
// GetDeploymentConfigInitContainers returns the containers of given deploymentConfig
@@ -113,50 +186,70 @@ func GetDeploymentConfigInitContainers(item interface{}) []v1.Container {
return item.(openshiftv1.DeploymentConfig).Spec.Template.Spec.InitContainers
}
// GetRolloutInitContainers returns the containers of given rollout
func GetRolloutInitContainers(item interface{}) []v1.Container {
return item.(argorolloutv1alpha1.Rollout).Spec.Template.Spec.InitContainers
}
// UpdateDeployment performs rolling upgrade on deployment
func UpdateDeployment(clients kube.Clients, namespace string, resource interface{}) error {
deployment := resource.(v1beta1.Deployment)
_, err := clients.KubernetesClient.ExtensionsV1beta1().Deployments(namespace).Update(&deployment)
deployment := resource.(appsv1.Deployment)
_, err := clients.KubernetesClient.AppsV1().Deployments(namespace).Update(context.TODO(), &deployment, meta_v1.UpdateOptions{FieldManager: "Reloader"})
return err
}
// UpdateDaemonSet performs rolling upgrade on daemonSet
func UpdateDaemonSet(clients kube.Clients, namespace string, resource interface{}) error {
daemonSet := resource.(v1beta1.DaemonSet)
_, err := clients.KubernetesClient.ExtensionsV1beta1().DaemonSets(namespace).Update(&daemonSet)
daemonSet := resource.(appsv1.DaemonSet)
_, err := clients.KubernetesClient.AppsV1().DaemonSets(namespace).Update(context.TODO(), &daemonSet, meta_v1.UpdateOptions{FieldManager: "Reloader"})
return err
}
// UpdateStatefulset performs rolling upgrade on statefulSet
func UpdateStatefulset(clients kube.Clients, namespace string, resource interface{}) error {
statefulSet := resource.(apps_v1beta1.StatefulSet)
_, err := clients.KubernetesClient.AppsV1beta1().StatefulSets(namespace).Update(&statefulSet)
// UpdateStatefulSet performs rolling upgrade on statefulSet
func UpdateStatefulSet(clients kube.Clients, namespace string, resource interface{}) error {
statefulSet := resource.(appsv1.StatefulSet)
_, err := clients.KubernetesClient.AppsV1().StatefulSets(namespace).Update(context.TODO(), &statefulSet, meta_v1.UpdateOptions{FieldManager: "Reloader"})
return err
}
// UpdateDeploymentConfig performs rolling upgrade on deploymentConfig
func UpdateDeploymentConfig(clients kube.Clients, namespace string, resource interface{}) error {
deploymentConfig := resource.(openshiftv1.DeploymentConfig)
_, err := clients.OpenshiftAppsClient.AppsV1().DeploymentConfigs(namespace).Update(&deploymentConfig)
_, err := clients.OpenshiftAppsClient.AppsV1().DeploymentConfigs(namespace).Update(context.TODO(), &deploymentConfig, meta_v1.UpdateOptions{FieldManager: "Reloader"})
return err
}
// UpdateRollout performs rolling upgrade on rollout
func UpdateRollout(clients kube.Clients, namespace string, resource interface{}) error {
rollout := resource.(argorolloutv1alpha1.Rollout)
rolloutBefore, _ := clients.ArgoRolloutClient.ArgoprojV1alpha1().Rollouts(namespace).Get(context.TODO(), rollout.Name, meta_v1.GetOptions{})
logrus.Warnf("Before: %+v", rolloutBefore.Spec.Template.Spec.Containers[0].Env)
logrus.Warnf("After: %+v", rollout.Spec.Template.Spec.Containers[0].Env)
_, err := clients.ArgoRolloutClient.ArgoprojV1alpha1().Rollouts(namespace).Update(context.TODO(), &rollout, meta_v1.UpdateOptions{FieldManager: "Reloader"})
return err
}
// GetDeploymentVolumes returns the Volumes of given deployment
func GetDeploymentVolumes(item interface{}) []v1.Volume {
return item.(v1beta1.Deployment).Spec.Template.Spec.Volumes
return item.(appsv1.Deployment).Spec.Template.Spec.Volumes
}
// GetDaemonSetVolumes returns the Volumes of given daemonset
// GetDaemonSetVolumes returns the Volumes of given daemonSet
func GetDaemonSetVolumes(item interface{}) []v1.Volume {
return item.(v1beta1.DaemonSet).Spec.Template.Spec.Volumes
return item.(appsv1.DaemonSet).Spec.Template.Spec.Volumes
}
// GetStatefulsetVolumes returns the Volumes of given statefulSet
func GetStatefulsetVolumes(item interface{}) []v1.Volume {
return item.(apps_v1beta1.StatefulSet).Spec.Template.Spec.Volumes
// GetStatefulSetVolumes returns the Volumes of given statefulSet
func GetStatefulSetVolumes(item interface{}) []v1.Volume {
return item.(appsv1.StatefulSet).Spec.Template.Spec.Volumes
}
// GetDeploymentConfigVolumes returns the Volumes of given deploymentConfig
func GetDeploymentConfigVolumes(item interface{}) []v1.Volume {
return item.(openshiftv1.DeploymentConfig).Spec.Template.Spec.Volumes
}
// GetRolloutVolumes returns the Volumes of given rollout
func GetRolloutVolumes(item interface{}) []v1.Volume {
return item.(argorolloutv1alpha1.Rollout).Spec.Template.Spec.Volumes
}

View File

@@ -1,11 +1,14 @@
package cmd
import (
"errors"
"fmt"
"os"
"github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"github.com/stakater/Reloader/internal/pkg/controller"
"github.com/stakater/Reloader/internal/pkg/metrics"
"github.com/stakater/Reloader/internal/pkg/options"
"github.com/stakater/Reloader/internal/pkg/util"
"github.com/stakater/Reloader/pkg/kube"
@@ -21,17 +24,36 @@ func NewReloaderCommand() *cobra.Command {
}
// options
cmd.PersistentFlags().StringVar(&options.ConfigmapUpdateOnChangeAnnotation, "configmap-annotation", "configmap.reloader.stakater.com/reload", "annotation to detect changes in configmaps")
cmd.PersistentFlags().StringVar(&options.SecretUpdateOnChangeAnnotation, "secret-annotation", "secret.reloader.stakater.com/reload", "annotation to detect changes in secrets")
cmd.PersistentFlags().StringVar(&options.ConfigmapUpdateOnChangeAnnotation, "configmap-annotation", "configmap.reloader.stakater.com/reload", "annotation to detect changes in configmaps, specified by name")
cmd.PersistentFlags().StringVar(&options.SecretUpdateOnChangeAnnotation, "secret-annotation", "secret.reloader.stakater.com/reload", "annotation to detect changes in secrets, specified by name")
cmd.PersistentFlags().StringVar(&options.ReloaderAutoAnnotation, "auto-annotation", "reloader.stakater.com/auto", "annotation to detect changes in secrets")
cmd.PersistentFlags().StringVar(&options.AutoSearchAnnotation, "auto-search-annotation", "reloader.stakater.com/search", "annotation to detect changes in configmaps or secrets tagged with special match annotation")
cmd.PersistentFlags().StringVar(&options.SearchMatchAnnotation, "search-match-annotation", "reloader.stakater.com/match", "annotation to mark secrets or configmapts to match the search")
cmd.PersistentFlags().StringVar(&options.LogFormat, "log-format", "", "Log format to use (empty string for text, or JSON")
cmd.PersistentFlags().StringSlice("resources-to-ignore", []string{}, "list of resources to ignore (valid options 'configMaps' or 'secrets')")
cmd.PersistentFlags().StringSlice("namespaces-to-ignore", []string{}, "list of namespaces to ignore")
cmd.PersistentFlags().StringVar(&options.IsArgoRollouts, "is-Argo-Rollouts", "false", "Add support for argo rollouts")
return cmd
}
func configureLogging(logFormat string) error {
switch logFormat {
case "json":
logrus.SetFormatter(&logrus.JSONFormatter{})
default:
// just let the library use default on empty string.
if logFormat != "" {
return fmt.Errorf("unsupported logging formatter: %q", logFormat)
}
}
return nil
}
func startReloader(cmd *cobra.Command, args []string) {
var ignoreList util.List
var err error
err := configureLogging(options.LogFormat)
if err != nil {
logrus.Warn(err)
}
logrus.Info("Starting Reloader")
currentNamespace := os.Getenv("KUBERNETES_NAMESPACE")
@@ -46,27 +68,24 @@ func startReloader(cmd *cobra.Command, args []string) {
logrus.Fatal(err)
}
ignoreList, err = cmd.Flags().GetStringSlice("resources-to-ignore")
ignoredResourcesList, err := getIgnoredResourcesList(cmd)
if err != nil {
logrus.Fatal(err)
}
for _, v := range ignoreList {
if v != "configMaps" && v != "secrets" {
logrus.Fatalf("'resources-to-ignore' only accepts 'configMaps' or 'secrets', not '%s'", v)
}
ignoredNamespacesList, err := getIgnoredNamespacesList(cmd)
if err != nil {
logrus.Fatal(err)
}
if len(ignoreList) > 1 {
logrus.Fatal("'resources-to-ignore' only accepts 'configMaps' or 'secrets', not both")
}
collectors := metrics.SetupPrometheusEndpoint()
for k := range kube.ResourceMap {
if ignoreList.Contains(k) {
if ignoredResourcesList.Contains(k) {
continue
}
c, err := controller.NewController(clientset, k, currentNamespace)
c, err := controller.NewController(clientset, k, currentNamespace, ignoredNamespacesList, collectors)
if err != nil {
logrus.Fatalf("%s", err)
}
@@ -81,3 +100,36 @@ func startReloader(cmd *cobra.Command, args []string) {
// Wait forever
select {}
}
func getIgnoredNamespacesList(cmd *cobra.Command) (util.List, error) {
return getStringSliceFromFlags(cmd, "namespaces-to-ignore")
}
func getStringSliceFromFlags(cmd *cobra.Command, flag string) ([]string, error) {
slice, err := cmd.Flags().GetStringSlice(flag)
if err != nil {
return nil, err
}
return slice, nil
}
func getIgnoredResourcesList(cmd *cobra.Command) (util.List, error) {
ignoredResourcesList, err := getStringSliceFromFlags(cmd, "resources-to-ignore")
if err != nil {
return nil, err
}
for _, v := range ignoredResourcesList {
if v != "configMaps" && v != "secrets" {
return nil, fmt.Errorf("'resources-to-ignore' only accepts 'configMaps' or 'secrets', not '%s'", v)
}
}
if len(ignoredResourcesList) > 1 {
return nil, errors.New("'resources-to-ignore' only accepts 'configMaps' or 'secrets', not both")
}
return ignoredResourcesList, nil
}

View File

@@ -6,6 +6,8 @@ import (
"github.com/sirupsen/logrus"
"github.com/stakater/Reloader/internal/pkg/handler"
"github.com/stakater/Reloader/internal/pkg/metrics"
"github.com/stakater/Reloader/internal/pkg/util"
"github.com/stakater/Reloader/pkg/kube"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/util/runtime"
@@ -13,24 +15,29 @@ import (
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/util/workqueue"
v1 "k8s.io/api/core/v1"
)
// Controller for checking events
type Controller struct {
client kubernetes.Interface
indexer cache.Indexer
queue workqueue.RateLimitingInterface
informer cache.Controller
namespace string
client kubernetes.Interface
indexer cache.Indexer
queue workqueue.RateLimitingInterface
informer cache.Controller
namespace string
ignoredNamespaces util.List
collectors metrics.Collectors
}
// NewController for initializing a Controller
func NewController(
client kubernetes.Interface, resource string, namespace string) (*Controller, error) {
client kubernetes.Interface, resource string, namespace string, ignoredNamespaces []string, collectors metrics.Collectors) (*Controller, error) {
c := Controller{
client: client,
namespace: namespace,
client: client,
namespace: namespace,
ignoredNamespaces: ignoredNamespaces,
}
queue := workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter())
@@ -44,22 +51,35 @@ func NewController(
c.indexer = indexer
c.informer = informer
c.queue = queue
c.collectors = collectors
return &c, nil
}
// Add function to add a new object to the queue in case of creating a resource
func (c *Controller) Add(obj interface{}) {
c.queue.Add(handler.ResourceCreatedHandler{
Resource: obj,
})
// Not required as reloader should update the resource in the event of any change and not in the event of any resource creation.
// This causes the issue where reloader reloads the pods when reloader itself gets restarted as it's queue is filled with all the k8s objects as new resources.
}
func (c *Controller) resourceInIgnoredNamespace(raw interface{}) bool {
switch object := raw.(type) {
case *v1.ConfigMap:
return c.ignoredNamespaces.Contains(object.ObjectMeta.Namespace)
case *v1.Secret:
return c.ignoredNamespaces.Contains(object.ObjectMeta.Namespace)
}
return false
}
// Update function to add an old object and a new object to the queue in case of updating a resource
func (c *Controller) Update(old interface{}, new interface{}) {
c.queue.Add(handler.ResourceUpdatedHandler{
Resource: new,
OldResource: old,
})
if !c.resourceInIgnoredNamespace(new) {
c.queue.Add(handler.ResourceUpdatedHandler{
Resource: new,
OldResource: old,
Collectors: c.collectors,
})
}
}
// Delete function to add an object to the queue in case of deleting a resource
@@ -125,7 +145,7 @@ func (c *Controller) handleErr(err error, key interface{}) {
// This controller retries 5 times if something goes wrong. After that, it stops trying.
if c.queue.NumRequeues(key) < 5 {
logrus.Errorf("Error syncing events %v: %v", key, err)
logrus.Errorf("Error syncing events: %v", err)
// Re-enqueue the key rate limited. Based on the rate limiter on the
// queue and the re-enqueue history, the key will be processed later again.

View File

@@ -5,6 +5,8 @@ import (
"testing"
"time"
"github.com/stakater/Reloader/internal/pkg/metrics"
"github.com/sirupsen/logrus"
"github.com/stakater/Reloader/internal/pkg/constants"
"github.com/stakater/Reloader/internal/pkg/handler"
@@ -12,6 +14,9 @@ import (
"github.com/stakater/Reloader/internal/pkg/testutil"
"github.com/stakater/Reloader/internal/pkg/util"
"github.com/stakater/Reloader/pkg/kube"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/util/workqueue"
)
var (
@@ -22,6 +27,11 @@ var (
data = "dGVzdFNlY3JldEVuY29kaW5nRm9yUmVsb2FkZXI="
newData = "dGVzdE5ld1NlY3JldEVuY29kaW5nRm9yUmVsb2FkZXI="
updatedData = "dGVzdFVwZGF0ZWRTZWNyZXRFbmNvZGluZ0ZvclJlbG9hZGVy"
collectors = metrics.NewCollectors()
)
const (
sleepDuration = 3 * time.Second
)
func TestMain(m *testing.M) {
@@ -30,7 +40,7 @@ func TestMain(m *testing.M) {
logrus.Infof("Creating controller")
for k := range kube.ResourceMap {
c, err := NewController(clients.KubernetesClient, k, namespace)
c, err := NewController(clients.KubernetesClient, k, namespace, []string{}, collectors)
if err != nil {
logrus.Fatalf("%s", err)
}
@@ -40,7 +50,7 @@ func TestMain(m *testing.M) {
defer close(stop)
go c.Run(1, stop)
}
time.Sleep(3 * time.Second)
time.Sleep(sleepDuration)
logrus.Infof("Running Testcases")
retCode := m.Run()
@@ -90,7 +100,7 @@ func TestControllerUpdatingConfigmapShouldCreateEnvInDeploymentConfig(t *testing
if !updated {
t.Errorf("DeploymentConfig was not updated")
}
time.Sleep(5 * time.Second)
time.Sleep(sleepDuration)
// Deleting deployment
err = testutil.DeleteDeploymentConfig(clients.OpenshiftAppsClient, namespace, configmapName)
@@ -103,7 +113,7 @@ func TestControllerUpdatingConfigmapShouldCreateEnvInDeploymentConfig(t *testing
if err != nil {
logrus.Errorf("Error while deleting the configmap %v", err)
}
time.Sleep(3 * time.Second)
time.Sleep(sleepDuration)
}
// Perform rolling upgrade on deployment and create env var upon updating the configmap
@@ -142,7 +152,7 @@ func TestControllerUpdatingConfigmapShouldCreateEnvInDeployment(t *testing.T) {
if !updated {
t.Errorf("Deployment was not updated")
}
time.Sleep(3 * time.Second)
time.Sleep(sleepDuration)
// Deleting deployment
err = testutil.DeleteDeployment(clients.KubernetesClient, namespace, configmapName)
@@ -155,7 +165,7 @@ func TestControllerUpdatingConfigmapShouldCreateEnvInDeployment(t *testing.T) {
if err != nil {
logrus.Errorf("Error while deleting the configmap %v", err)
}
time.Sleep(3 * time.Second)
time.Sleep(sleepDuration)
}
// Perform rolling upgrade on deployment and create env var upon updating the configmap
@@ -194,7 +204,7 @@ func TestControllerUpdatingConfigmapShouldAutoCreateEnvInDeployment(t *testing.T
if !updated {
t.Errorf("Deployment was not updated")
}
time.Sleep(3 * time.Second)
time.Sleep(sleepDuration)
// Deleting deployment
err = testutil.DeleteDeployment(clients.KubernetesClient, namespace, configmapName)
@@ -207,12 +217,15 @@ func TestControllerUpdatingConfigmapShouldAutoCreateEnvInDeployment(t *testing.T
if err != nil {
logrus.Errorf("Error while deleting the configmap %v", err)
}
time.Sleep(3 * time.Second)
time.Sleep(sleepDuration)
}
// Perform rolling upgrade on deployment and create env var upon creating the configmap
func TestControllerCreatingConfigmapShouldCreateEnvInDeployment(t *testing.T) {
// TODO: Fix this test case
t.Skip("Skipping TestControllerCreatingConfigmapShouldCreateEnvInDeployment test case")
// Creating configmap
configmapName := configmapNamePrefix + "-create-" + testutil.RandSeq(5)
_, err := testutil.CreateConfigMap(clients.KubernetesClient, namespace, configmapName, "www.google.com")
@@ -232,14 +245,14 @@ func TestControllerCreatingConfigmapShouldCreateEnvInDeployment(t *testing.T) {
logrus.Errorf("Error while deleting the configmap %v", err)
}
time.Sleep(3 * time.Second)
time.Sleep(sleepDuration)
_, err = testutil.CreateConfigMap(clients.KubernetesClient, namespace, configmapName, "www.stakater.com")
if err != nil {
t.Errorf("Error while creating the configmap second time %v", err)
}
time.Sleep(3 * time.Second)
time.Sleep(sleepDuration)
// Verifying deployment update
logrus.Infof("Verifying env var has been created")
@@ -255,7 +268,7 @@ func TestControllerCreatingConfigmapShouldCreateEnvInDeployment(t *testing.T) {
if !updated {
t.Errorf("Deployment was not updated")
}
time.Sleep(3 * time.Second)
time.Sleep(sleepDuration)
// Deleting deployment
err = testutil.DeleteDeployment(clients.KubernetesClient, namespace, configmapName)
@@ -268,7 +281,7 @@ func TestControllerCreatingConfigmapShouldCreateEnvInDeployment(t *testing.T) {
if err != nil {
logrus.Errorf("Error while deleting the configmap %v", err)
}
time.Sleep(3 * time.Second)
time.Sleep(sleepDuration)
}
// Perform rolling upgrade on deployment and update env var upon updating the configmap
@@ -314,7 +327,7 @@ func TestControllerForUpdatingConfigmapShouldUpdateDeployment(t *testing.T) {
if !updated {
t.Errorf("Deployment was not updated")
}
time.Sleep(3 * time.Second)
time.Sleep(sleepDuration)
// Deleting deployment
err = testutil.DeleteDeployment(clients.KubernetesClient, namespace, configmapName)
@@ -327,11 +340,11 @@ func TestControllerForUpdatingConfigmapShouldUpdateDeployment(t *testing.T) {
if err != nil {
logrus.Errorf("Error while deleting the configmap %v", err)
}
time.Sleep(3 * time.Second)
time.Sleep(sleepDuration)
}
// Do not Perform rolling upgrade on deployment and create env var upon updating the labels configmap
func TestControllerUpdatingConfigmapLabelsShouldNotCreateorUpdateEnvInDeployment(t *testing.T) {
func TestControllerUpdatingConfigmapLabelsShouldNotCreateOrUpdateEnvInDeployment(t *testing.T) {
// Creating configmap
configmapName := configmapNamePrefix + "-update-" + testutil.RandSeq(5)
configmapClient, err := testutil.CreateConfigMap(clients.KubernetesClient, namespace, configmapName, "www.google.com")
@@ -365,7 +378,7 @@ func TestControllerUpdatingConfigmapLabelsShouldNotCreateorUpdateEnvInDeployment
if updated {
t.Errorf("Deployment should not be updated by changing label")
}
time.Sleep(3 * time.Second)
time.Sleep(sleepDuration)
// Deleting deployment
err = testutil.DeleteDeployment(clients.KubernetesClient, namespace, configmapName)
@@ -378,11 +391,15 @@ func TestControllerUpdatingConfigmapLabelsShouldNotCreateorUpdateEnvInDeployment
if err != nil {
logrus.Errorf("Error while deleting the configmap %v", err)
}
time.Sleep(3 * time.Second)
time.Sleep(sleepDuration)
}
// Perform rolling upgrade on pod and create a env var upon creating the secret
func TestControllerCreatingSecretShouldCreateEnvInDeployment(t *testing.T) {
// TODO: Fix this test case
t.Skip("Skipping TestControllerCreatingConfigmapShouldCreateEnvInDeployment test case")
// Creating secret
secretName := secretNamePrefix + "-create-" + testutil.RandSeq(5)
_, err := testutil.CreateSecret(clients.KubernetesClient, namespace, secretName, data)
@@ -401,14 +418,14 @@ func TestControllerCreatingSecretShouldCreateEnvInDeployment(t *testing.T) {
if err != nil {
logrus.Errorf("Error while deleting the secret %v", err)
}
time.Sleep(3 * time.Second)
time.Sleep(sleepDuration)
_, err = testutil.CreateSecret(clients.KubernetesClient, namespace, secretName, newData)
if err != nil {
t.Errorf("Error in secret creation: %v", err)
}
time.Sleep(3 * time.Second)
time.Sleep(sleepDuration)
// Verifying Upgrade
logrus.Infof("Verifying env var has been created")
@@ -420,7 +437,7 @@ func TestControllerCreatingSecretShouldCreateEnvInDeployment(t *testing.T) {
Annotation: options.SecretUpdateOnChangeAnnotation,
}
deploymentFuncs := handler.GetDeploymentRollingUpgradeFuncs()
time.Sleep(3 * time.Second)
time.Sleep(sleepDuration)
updated := testutil.VerifyResourceUpdate(clients, config, constants.SecretEnvVarPostfix, deploymentFuncs)
if !updated {
t.Errorf("Deployment was not updated")
@@ -437,7 +454,7 @@ func TestControllerCreatingSecretShouldCreateEnvInDeployment(t *testing.T) {
if err != nil {
logrus.Errorf("Error while deleting the secret %v", err)
}
time.Sleep(3 * time.Second)
time.Sleep(sleepDuration)
}
// Perform rolling upgrade on pod and create a env var upon updating the secret
@@ -487,7 +504,7 @@ func TestControllerUpdatingSecretShouldCreateEnvInDeployment(t *testing.T) {
if err != nil {
logrus.Errorf("Error while deleting the secret %v", err)
}
time.Sleep(3 * time.Second)
time.Sleep(sleepDuration)
}
// Perform rolling upgrade on deployment and update env var upon updating the secret
@@ -543,11 +560,11 @@ func TestControllerUpdatingSecretShouldUpdateEnvInDeployment(t *testing.T) {
if err != nil {
logrus.Errorf("Error while deleting the secret %v", err)
}
time.Sleep(3 * time.Second)
time.Sleep(sleepDuration)
}
// Do not Perform rolling upgrade on pod and create or update a env var upon updating the label in secret
func TestControllerUpdatingSecretLabelsShouldNotCreateorUpdateEnvInDeployment(t *testing.T) {
func TestControllerUpdatingSecretLabelsShouldNotCreateOrUpdateEnvInDeployment(t *testing.T) {
// Creating secret
secretName := secretNamePrefix + "-update-" + testutil.RandSeq(5)
secretClient, err := testutil.CreateSecret(clients.KubernetesClient, namespace, secretName, data)
@@ -592,7 +609,7 @@ func TestControllerUpdatingSecretLabelsShouldNotCreateorUpdateEnvInDeployment(t
if err != nil {
logrus.Errorf("Error while deleting the secret %v", err)
}
time.Sleep(3 * time.Second)
time.Sleep(sleepDuration)
}
// Perform rolling upgrade on DaemonSet and create env var upon updating the configmap
@@ -630,7 +647,7 @@ func TestControllerUpdatingConfigmapShouldCreateEnvInDaemonSet(t *testing.T) {
if !updated {
t.Errorf("DaemonSet was not updated")
}
time.Sleep(3 * time.Second)
time.Sleep(sleepDuration)
// Deleting DaemonSet
err = testutil.DeleteDaemonSet(clients.KubernetesClient, namespace, configmapName)
@@ -643,7 +660,7 @@ func TestControllerUpdatingConfigmapShouldCreateEnvInDaemonSet(t *testing.T) {
if err != nil {
logrus.Errorf("Error while deleting the configmap %v", err)
}
time.Sleep(3 * time.Second)
time.Sleep(sleepDuration)
}
// Perform rolling upgrade on DaemonSet and update env var upon updating the configmap
@@ -667,7 +684,7 @@ func TestControllerForUpdatingConfigmapShouldUpdateDaemonSet(t *testing.T) {
t.Errorf("Configmap was not updated")
}
time.Sleep(3 * time.Second)
time.Sleep(sleepDuration)
// Updating configmap for second time
updateErr = testutil.UpdateConfigMap(configmapClient, namespace, configmapName, "", "aurorasolutions.io")
@@ -675,7 +692,7 @@ func TestControllerForUpdatingConfigmapShouldUpdateDaemonSet(t *testing.T) {
t.Errorf("Configmap was not updated")
}
time.Sleep(3 * time.Second)
time.Sleep(sleepDuration)
// Verifying DaemonSet update
logrus.Infof("Verifying env var has been updated")
@@ -691,7 +708,7 @@ func TestControllerForUpdatingConfigmapShouldUpdateDaemonSet(t *testing.T) {
if !updated {
t.Errorf("DaemonSet was not updated")
}
time.Sleep(3 * time.Second)
time.Sleep(sleepDuration)
// Deleting DaemonSet
err = testutil.DeleteDaemonSet(clients.KubernetesClient, namespace, configmapName)
@@ -704,7 +721,7 @@ func TestControllerForUpdatingConfigmapShouldUpdateDaemonSet(t *testing.T) {
if err != nil {
logrus.Errorf("Error while deleting the configmap %v", err)
}
time.Sleep(3 * time.Second)
time.Sleep(sleepDuration)
}
// Perform rolling upgrade on pod and create a env var upon updating the secret
@@ -754,7 +771,7 @@ func TestControllerUpdatingSecretShouldCreateEnvInDaemonSet(t *testing.T) {
if err != nil {
logrus.Errorf("Error while deleting the secret %v", err)
}
time.Sleep(3 * time.Second)
time.Sleep(sleepDuration)
}
// Perform rolling upgrade on DaemonSet and update env var upon updating the secret
@@ -777,7 +794,7 @@ func TestControllerUpdatingSecretShouldUpdateEnvInDaemonSet(t *testing.T) {
if err != nil {
t.Errorf("Error while updating secret %v", err)
}
time.Sleep(3 * time.Second)
time.Sleep(sleepDuration)
// Updating Secret
err = testutil.UpdateSecret(secretClient, namespace, secretName, "", updatedData)
@@ -811,11 +828,11 @@ func TestControllerUpdatingSecretShouldUpdateEnvInDaemonSet(t *testing.T) {
if err != nil {
logrus.Errorf("Error while deleting the secret %v", err)
}
time.Sleep(3 * time.Second)
time.Sleep(sleepDuration)
}
// Do not Perform rolling upgrade on pod and create or update a env var upon updating the label in secret
func TestControllerUpdatingSecretLabelsShouldNotCreateorUpdateEnvInDaemonSet(t *testing.T) {
func TestControllerUpdatingSecretLabelsShouldNotCreateOrUpdateEnvInDaemonSet(t *testing.T) {
// Creating secret
secretName := secretNamePrefix + "-update-" + testutil.RandSeq(5)
secretClient, err := testutil.CreateSecret(clients.KubernetesClient, namespace, secretName, data)
@@ -860,7 +877,7 @@ func TestControllerUpdatingSecretLabelsShouldNotCreateorUpdateEnvInDaemonSet(t *
if err != nil {
logrus.Errorf("Error while deleting the secret %v", err)
}
time.Sleep(3 * time.Second)
time.Sleep(sleepDuration)
}
// Perform rolling upgrade on StatefulSet and create env var upon updating the configmap
@@ -898,7 +915,7 @@ func TestControllerUpdatingConfigmapShouldCreateEnvInStatefulSet(t *testing.T) {
if !updated {
t.Errorf("StatefulSet was not updated")
}
time.Sleep(3 * time.Second)
time.Sleep(sleepDuration)
// Deleting StatefulSet
err = testutil.DeleteStatefulSet(clients.KubernetesClient, namespace, configmapName)
@@ -911,7 +928,7 @@ func TestControllerUpdatingConfigmapShouldCreateEnvInStatefulSet(t *testing.T) {
if err != nil {
logrus.Errorf("Error while deleting the configmap %v", err)
}
time.Sleep(3 * time.Second)
time.Sleep(sleepDuration)
}
// Perform rolling upgrade on StatefulSet and update env var upon updating the configmap
@@ -955,7 +972,7 @@ func TestControllerForUpdatingConfigmapShouldUpdateStatefulSet(t *testing.T) {
if !updated {
t.Errorf("StatefulSet was not updated")
}
time.Sleep(3 * time.Second)
time.Sleep(sleepDuration)
// Deleting StatefulSet
err = testutil.DeleteStatefulSet(clients.KubernetesClient, namespace, configmapName)
@@ -968,7 +985,7 @@ func TestControllerForUpdatingConfigmapShouldUpdateStatefulSet(t *testing.T) {
if err != nil {
logrus.Errorf("Error while deleting the configmap %v", err)
}
time.Sleep(3 * time.Second)
time.Sleep(sleepDuration)
}
// Perform rolling upgrade on pod and create a env var upon updating the secret
@@ -1018,7 +1035,7 @@ func TestControllerUpdatingSecretShouldCreateEnvInStatefulSet(t *testing.T) {
if err != nil {
logrus.Errorf("Error while deleting the secret %v", err)
}
time.Sleep(3 * time.Second)
time.Sleep(sleepDuration)
}
// Perform rolling upgrade on StatefulSet and update env var upon updating the secret
@@ -1074,5 +1091,89 @@ func TestControllerUpdatingSecretShouldUpdateEnvInStatefulSet(t *testing.T) {
if err != nil {
logrus.Errorf("Error while deleting the secret %v", err)
}
time.Sleep(3 * time.Second)
time.Sleep(sleepDuration)
}
func TestController_resourceInIgnoredNamespace(t *testing.T) {
type fields struct {
client kubernetes.Interface
indexer cache.Indexer
queue workqueue.RateLimitingInterface
informer cache.Controller
namespace string
ignoredNamespaces util.List
}
type args struct {
raw interface{}
}
tests := []struct {
name string
fields fields
args args
want bool
}{
{
name: "TestConfigMapResourceInIgnoredNamespaceShouldReturnTrue",
fields: fields{
ignoredNamespaces: util.List{
"system",
},
},
args: args{
raw: testutil.GetConfigmap("system", "testcm", "test"),
},
want: true,
},
{
name: "TestSecretResourceInIgnoredNamespaceShouldReturnTrue",
fields: fields{
ignoredNamespaces: util.List{
"system",
},
},
args: args{
raw: testutil.GetSecret("system", "testsecret", "test"),
},
want: true,
},
{
name: "TestConfigMapResourceInIgnoredNamespaceShouldReturnFalse",
fields: fields{
ignoredNamespaces: util.List{
"system",
},
},
args: args{
raw: testutil.GetConfigmap("some-other-namespace", "testcm", "test"),
},
want: false,
},
{
name: "TestConfigMapResourceInIgnoredNamespaceShouldReturnFalse",
fields: fields{
ignoredNamespaces: util.List{
"system",
},
},
args: args{
raw: testutil.GetSecret("some-other-namespace", "testsecret", "test"),
},
want: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
c := &Controller{
client: tt.fields.client,
indexer: tt.fields.indexer,
queue: tt.fields.queue,
informer: tt.fields.informer,
namespace: tt.fields.namespace,
ignoredNamespaces: tt.fields.ignoredNamespaces,
}
if got := c.resourceInIgnoredNamespace(tt.args.raw); got != tt.want {
t.Errorf("Controller.resourceInIgnoredNamespace() = %v, want %v", got, tt.want)
}
})
}
}

View File

@@ -2,13 +2,15 @@ package handler
import (
"github.com/sirupsen/logrus"
"github.com/stakater/Reloader/internal/pkg/metrics"
"github.com/stakater/Reloader/internal/pkg/util"
v1 "k8s.io/api/core/v1"
)
// ResourceCreatedHandler contains new objects
type ResourceCreatedHandler struct {
Resource interface{}
Resource interface{}
Collectors metrics.Collectors
}
// Handle processes the newly created resource
@@ -18,7 +20,7 @@ func (r ResourceCreatedHandler) Handle() error {
} else {
config, _ := r.GetConfig()
// process resource based on its type
doRollingUpgrade(config)
return doRollingUpgrade(config, r.Collectors)
}
return nil
}

View File

@@ -2,6 +2,7 @@ package handler
import (
"github.com/sirupsen/logrus"
"github.com/stakater/Reloader/internal/pkg/metrics"
"github.com/stakater/Reloader/internal/pkg/util"
v1 "k8s.io/api/core/v1"
)
@@ -10,6 +11,7 @@ import (
type ResourceUpdatedHandler struct {
Resource interface{}
OldResource interface{}
Collectors metrics.Collectors
}
// Handle processes the updated resource
@@ -20,7 +22,7 @@ func (r ResourceUpdatedHandler) Handle() error {
config, oldSHAData := r.GetConfig()
if config.SHAValue != oldSHAData {
// process resource based on its type
doRollingUpgrade(config)
return doRollingUpgrade(config, r.Collectors)
}
}
return nil
@@ -31,7 +33,7 @@ func (r ResourceUpdatedHandler) GetConfig() (util.Config, string) {
var oldSHAData string
var config util.Config
if _, ok := r.Resource.(*v1.ConfigMap); ok {
oldSHAData = util.GetSHAfromConfigmap(r.OldResource.(*v1.ConfigMap).Data)
oldSHAData = util.GetSHAfromConfigmap(r.OldResource.(*v1.ConfigMap))
config = util.GetConfigmapConfig(r.Resource.(*v1.ConfigMap))
} else if _, ok := r.Resource.(*v1.Secret); ok {
oldSHAData = util.GetSHAfromSecret(r.OldResource.(*v1.Secret).Data)

View File

@@ -4,9 +4,11 @@ import (
"strconv"
"strings"
"github.com/prometheus/client_golang/prometheus"
"github.com/sirupsen/logrus"
"github.com/stakater/Reloader/internal/pkg/callbacks"
"github.com/stakater/Reloader/internal/pkg/constants"
"github.com/stakater/Reloader/internal/pkg/metrics"
"github.com/stakater/Reloader/internal/pkg/options"
"github.com/stakater/Reloader/internal/pkg/util"
"github.com/stakater/Reloader/pkg/kube"
@@ -17,6 +19,8 @@ import (
func GetDeploymentRollingUpgradeFuncs() callbacks.RollingUpgradeFuncs {
return callbacks.RollingUpgradeFuncs{
ItemsFunc: callbacks.GetDeploymentItems,
AnnotationsFunc: callbacks.GetDeploymentAnnotations,
PodAnnotationsFunc: callbacks.GetDeploymentPodAnnotations,
ContainersFunc: callbacks.GetDeploymentContainers,
InitContainersFunc: callbacks.GetDeploymentInitContainers,
UpdateFunc: callbacks.UpdateDeployment,
@@ -29,6 +33,8 @@ func GetDeploymentRollingUpgradeFuncs() callbacks.RollingUpgradeFuncs {
func GetDaemonSetRollingUpgradeFuncs() callbacks.RollingUpgradeFuncs {
return callbacks.RollingUpgradeFuncs{
ItemsFunc: callbacks.GetDaemonSetItems,
AnnotationsFunc: callbacks.GetDaemonSetAnnotations,
PodAnnotationsFunc: callbacks.GetDaemonSetPodAnnotations,
ContainersFunc: callbacks.GetDaemonSetContainers,
InitContainersFunc: callbacks.GetDaemonSetInitContainers,
UpdateFunc: callbacks.UpdateDaemonSet,
@@ -41,10 +47,12 @@ func GetDaemonSetRollingUpgradeFuncs() callbacks.RollingUpgradeFuncs {
func GetStatefulSetRollingUpgradeFuncs() callbacks.RollingUpgradeFuncs {
return callbacks.RollingUpgradeFuncs{
ItemsFunc: callbacks.GetStatefulSetItems,
ContainersFunc: callbacks.GetStatefulsetContainers,
InitContainersFunc: callbacks.GetStatefulsetInitContainers,
UpdateFunc: callbacks.UpdateStatefulset,
VolumesFunc: callbacks.GetStatefulsetVolumes,
AnnotationsFunc: callbacks.GetStatefulSetAnnotations,
PodAnnotationsFunc: callbacks.GetStatefulSetPodAnnotations,
ContainersFunc: callbacks.GetStatefulSetContainers,
InitContainersFunc: callbacks.GetStatefulSetInitContainers,
UpdateFunc: callbacks.UpdateStatefulSet,
VolumesFunc: callbacks.GetStatefulSetVolumes,
ResourceType: "StatefulSet",
}
}
@@ -53,6 +61,8 @@ func GetStatefulSetRollingUpgradeFuncs() callbacks.RollingUpgradeFuncs {
func GetDeploymentConfigRollingUpgradeFuncs() callbacks.RollingUpgradeFuncs {
return callbacks.RollingUpgradeFuncs{
ItemsFunc: callbacks.GetDeploymentConfigItems,
AnnotationsFunc: callbacks.GetDeploymentConfigAnnotations,
PodAnnotationsFunc: callbacks.GetDeploymentConfigPodAnnotations,
ContainersFunc: callbacks.GetDeploymentConfigContainers,
InitContainersFunc: callbacks.GetDeploymentConfigInitContainers,
UpdateFunc: callbacks.UpdateDeploymentConfig,
@@ -61,34 +71,78 @@ func GetDeploymentConfigRollingUpgradeFuncs() callbacks.RollingUpgradeFuncs {
}
}
func doRollingUpgrade(config util.Config) {
clients := kube.GetClients()
rollingUpgrade(clients, config, GetDeploymentRollingUpgradeFuncs())
rollingUpgrade(clients, config, GetDaemonSetRollingUpgradeFuncs())
rollingUpgrade(clients, config, GetStatefulSetRollingUpgradeFuncs())
if kube.IsOpenshift {
rollingUpgrade(clients, config, GetDeploymentConfigRollingUpgradeFuncs())
// GetArgoRolloutRollingUpgradeFuncs returns all callback funcs for a rollout
func GetArgoRolloutRollingUpgradeFuncs() callbacks.RollingUpgradeFuncs {
return callbacks.RollingUpgradeFuncs{
ItemsFunc: callbacks.GetRolloutItems,
AnnotationsFunc: callbacks.GetRolloutAnnotations,
PodAnnotationsFunc: callbacks.GetRolloutPodAnnotations,
ContainersFunc: callbacks.GetRolloutContainers,
InitContainersFunc: callbacks.GetRolloutInitContainers,
UpdateFunc: callbacks.UpdateRollout,
VolumesFunc: callbacks.GetRolloutVolumes,
ResourceType: "Rollout",
}
}
func rollingUpgrade(clients kube.Clients, config util.Config, upgradeFuncs callbacks.RollingUpgradeFuncs) {
func doRollingUpgrade(config util.Config, collectors metrics.Collectors) error {
clients := kube.GetClients()
err := PerformRollingUpgrade(clients, config, upgradeFuncs)
err := rollingUpgrade(clients, config, GetDeploymentRollingUpgradeFuncs(), collectors)
if err != nil {
return err
}
err = rollingUpgrade(clients, config, GetDaemonSetRollingUpgradeFuncs(), collectors)
if err != nil {
return err
}
err = rollingUpgrade(clients, config, GetStatefulSetRollingUpgradeFuncs(), collectors)
if err != nil {
return err
}
if kube.IsOpenshift {
err = rollingUpgrade(clients, config, GetDeploymentConfigRollingUpgradeFuncs(), collectors)
if err != nil {
return err
}
}
if options.IsArgoRollouts == "true" {
err = rollingUpgrade(clients, config, GetArgoRolloutRollingUpgradeFuncs(), collectors)
if err != nil {
return err
}
}
return nil
}
func rollingUpgrade(clients kube.Clients, config util.Config, upgradeFuncs callbacks.RollingUpgradeFuncs, collectors metrics.Collectors) error {
err := PerformRollingUpgrade(clients, config, upgradeFuncs, collectors)
if err != nil {
logrus.Errorf("Rolling upgrade for '%s' failed with error = %v", config.ResourceName, err)
}
return err
}
// PerformRollingUpgrade upgrades the deployment if there is any change in configmap or secret data
func PerformRollingUpgrade(clients kube.Clients, config util.Config, upgradeFuncs callbacks.RollingUpgradeFuncs) error {
func PerformRollingUpgrade(clients kube.Clients, config util.Config, upgradeFuncs callbacks.RollingUpgradeFuncs, collectors metrics.Collectors) error {
items := upgradeFuncs.ItemsFunc(clients, config.Namespace)
var err error
for _, i := range items {
// find correct annotation and update the resource
annotationValue := util.ToObjectMeta(i).Annotations[config.Annotation]
reloaderEnabledValue := util.ToObjectMeta(i).Annotations[options.ReloaderAutoAnnotation]
annotations := upgradeFuncs.AnnotationsFunc(i)
annotationValue, found := annotations[config.Annotation]
searchAnnotationValue, foundSearchAnn := annotations[options.AutoSearchAnnotation]
reloaderEnabledValue, foundAuto := annotations[options.ReloaderAutoAnnotation]
if !found && !foundAuto && !foundSearchAnn {
annotations = upgradeFuncs.PodAnnotationsFunc(i)
annotationValue = annotations[config.Annotation]
searchAnnotationValue = annotations[options.AutoSearchAnnotation]
reloaderEnabledValue = annotations[options.ReloaderAutoAnnotation]
}
result := constants.NotUpdated
reloaderEnabled, err := strconv.ParseBool(reloaderEnabledValue)
if err == nil && reloaderEnabled {
@@ -98,6 +152,7 @@ func PerformRollingUpgrade(clients kube.Clients, config util.Config, upgradeFunc
if result != constants.Updated && annotationValue != "" {
values := strings.Split(annotationValue, ",")
for _, value := range values {
value = strings.Trim(value, " ")
if value == config.ResourceName {
result = updateContainers(upgradeFuncs, i, config, false)
if result == constants.Updated {
@@ -107,28 +162,59 @@ func PerformRollingUpgrade(clients kube.Clients, config util.Config, upgradeFunc
}
}
if result != constants.Updated && searchAnnotationValue == "true" {
matchAnnotationValue := config.ResourceAnnotations[options.SearchMatchAnnotation]
if matchAnnotationValue == "true" {
result = updateContainers(upgradeFuncs, i, config, true)
}
}
if result == constants.Updated {
err = upgradeFuncs.UpdateFunc(clients, config.Namespace, i)
resourceName := util.ToObjectMeta(i).Name
if err != nil {
logrus.Errorf("Update for '%s' of type '%s' in namespace '%s' failed with error %v", resourceName, upgradeFuncs.ResourceType, config.Namespace, err)
collectors.Reloaded.With(prometheus.Labels{"success": "false"}).Inc()
return err
} else {
logrus.Infof("Changes detected in '%s' of type '%s' in namespace '%s'", config.ResourceName, config.Type, config.Namespace)
logrus.Infof("Updated '%s' of type '%s' in namespace '%s'", resourceName, upgradeFuncs.ResourceType, config.Namespace)
collectors.Reloaded.With(prometheus.Labels{"success": "true"}).Inc()
}
}
}
return err
return nil
}
func getVolumeMountName(volumes []v1.Volume, mountType string, volumeName string) string {
for i := range volumes {
if mountType == constants.ConfigmapEnvVarPostfix && volumes[i].ConfigMap != nil && volumes[i].ConfigMap.Name == volumeName {
return volumes[i].Name
} else if mountType == constants.SecretEnvVarPostfix && volumes[i].Secret != nil && volumes[i].Secret.SecretName == volumeName {
return volumes[i].Name
if mountType == constants.ConfigmapEnvVarPostfix {
if volumes[i].ConfigMap != nil && volumes[i].ConfigMap.Name == volumeName {
return volumes[i].Name
}
if volumes[i].Projected != nil {
for j := range volumes[i].Projected.Sources {
if volumes[i].Projected.Sources[j].ConfigMap != nil && volumes[i].Projected.Sources[j].ConfigMap.Name == volumeName {
return volumes[i].Name
}
}
}
} else if mountType == constants.SecretEnvVarPostfix {
if volumes[i].Secret != nil && volumes[i].Secret.SecretName == volumeName {
return volumes[i].Name
}
if volumes[i].Projected != nil {
for j := range volumes[i].Projected.Sources {
if volumes[i].Projected.Sources[j].Secret != nil && volumes[i].Projected.Sources[j].Secret.Name == volumeName {
return volumes[i].Name
}
}
}
}
}
return ""
}
@@ -212,7 +298,7 @@ func getContainerToUpdate(upgradeFuncs callbacks.RollingUpgradeFuncs, item inter
func updateContainers(upgradeFuncs callbacks.RollingUpgradeFuncs, item interface{}, config util.Config, autoReload bool) constants.Result {
var result constants.Result
envar := constants.EnvVarPrefix + util.ConvertToEnvVarName(config.ResourceName) + "_" + config.Type
envVar := constants.EnvVarPrefix + util.ConvertToEnvVarName(config.ResourceName) + "_" + config.Type
container := getContainerToUpdate(upgradeFuncs, item, config, autoReload)
if container == nil {
@@ -220,12 +306,12 @@ func updateContainers(upgradeFuncs callbacks.RollingUpgradeFuncs, item interface
}
//update if env var exists
result = updateEnvVar(upgradeFuncs.ContainersFunc(item), envar, config.SHAValue)
result = updateEnvVar(upgradeFuncs.ContainersFunc(item), envVar, config.SHAValue)
// if no existing env var exists lets create one
if result == constants.NoEnvVarFound {
e := v1.EnvVar{
Name: envar,
Name: envVar,
Value: config.SHAValue,
}
container.Env = append(container.Env, e)
@@ -234,11 +320,11 @@ func updateContainers(upgradeFuncs callbacks.RollingUpgradeFuncs, item interface
return result
}
func updateEnvVar(containers []v1.Container, envar string, shaData string) constants.Result {
func updateEnvVar(containers []v1.Container, envVar string, shaData string) constants.Result {
for i := range containers {
envs := containers[i].Env
for j := range envs {
if envs[j].Name == envar {
if envs[j].Name == envVar {
if envs[j].Value != shaData {
envs[j].Value = shaData
return constants.Updated

View File

@@ -1,32 +1,45 @@
package handler
import (
"context"
"fmt"
"os"
"testing"
"time"
"github.com/prometheus/client_golang/prometheus"
promtestutil "github.com/prometheus/client_golang/prometheus/testutil"
"github.com/sirupsen/logrus"
"github.com/stakater/Reloader/internal/pkg/constants"
"github.com/stakater/Reloader/internal/pkg/metrics"
"github.com/stakater/Reloader/internal/pkg/options"
"github.com/stakater/Reloader/internal/pkg/testutil"
"github.com/stakater/Reloader/internal/pkg/util"
"github.com/stakater/Reloader/pkg/kube"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
testclient "k8s.io/client-go/kubernetes/fake"
)
var (
clients = kube.Clients{KubernetesClient: testclient.NewSimpleClientset()}
namespace = "test-handler-" + testutil.RandSeq(5)
configmapName = "testconfigmap-handler-" + testutil.RandSeq(5)
secretName = "testsecret-handler-" + testutil.RandSeq(5)
configmapWithInitContainer = "testconfigmapInitContainerhandler-" + testutil.RandSeq(5)
secretWithInitContainer = "testsecretWithInitContainer-handler-" + testutil.RandSeq(5)
configmapWithInitEnv = "configmapWithInitEnv-" + testutil.RandSeq(5)
secretWithInitEnv = "secretWithInitEnv-handler-" + testutil.RandSeq(5)
configmapWithEnvName = "testconfigmapWithEnv-handler-" + testutil.RandSeq(5)
configmapWithEnvFromName = "testconfigmapWithEnvFrom-handler-" + testutil.RandSeq(5)
secretWithEnvName = "testsecretWithEnv-handler-" + testutil.RandSeq(5)
secretWithEnvFromName = "testsecretWithEnvFrom-handler-" + testutil.RandSeq(5)
clients = kube.Clients{KubernetesClient: testclient.NewSimpleClientset()}
namespace = "test-handler-" + testutil.RandSeq(5)
configmapName = "testconfigmap-handler-" + testutil.RandSeq(5)
secretName = "testsecret-handler-" + testutil.RandSeq(5)
projectedConfigMapName = "testprojectedconfigmap-handler-" + testutil.RandSeq(5)
projectedSecretName = "testprojectedsecret-handler-" + testutil.RandSeq(5)
configmapWithInitContainer = "testconfigmapInitContainerhandler-" + testutil.RandSeq(5)
secretWithInitContainer = "testsecretWithInitContainer-handler-" + testutil.RandSeq(5)
projectedConfigMapWithInitContainer = "testProjectedConfigMapWithInitContainer-handler" + testutil.RandSeq(5)
projectedSecretWithInitContainer = "testProjectedSecretWithInitContainer-handler" + testutil.RandSeq(5)
configmapWithInitEnv = "configmapWithInitEnv-" + testutil.RandSeq(5)
secretWithInitEnv = "secretWithInitEnv-handler-" + testutil.RandSeq(5)
configmapWithEnvName = "testconfigmapWithEnv-handler-" + testutil.RandSeq(5)
configmapWithEnvFromName = "testconfigmapWithEnvFrom-handler-" + testutil.RandSeq(5)
secretWithEnvName = "testsecretWithEnv-handler-" + testutil.RandSeq(5)
secretWithEnvFromName = "testsecretWithEnvFrom-handler-" + testutil.RandSeq(5)
configmapWithPodAnnotations = "testconfigmapPodAnnotations-handler-" + testutil.RandSeq(5)
configmapWithBothAnnotations = "testconfigmapBothAnnotations-handler-" + testutil.RandSeq(5)
configmapAnnotated = "testconfigmapAnnotated-handler-" + testutil.RandSeq(5)
)
func TestMain(m *testing.M) {
@@ -60,6 +73,30 @@ func setup() {
logrus.Errorf("Error in secret creation: %v", err)
}
// Creating configmap will be used in projected volume
_, err = testutil.CreateConfigMap(clients.KubernetesClient, namespace, projectedConfigMapName, "www.google.com")
if err != nil {
logrus.Errorf("Error in configmap creation: %v", err)
}
// Creating secret will be used in projected volume
_, err = testutil.CreateSecret(clients.KubernetesClient, namespace, projectedSecretName, data)
if err != nil {
logrus.Errorf("Error in secret creation: %v", err)
}
// Creating configmap will be used in projected volume in init containers
_, err = testutil.CreateConfigMap(clients.KubernetesClient, namespace, projectedConfigMapWithInitContainer, "www.google.com")
if err != nil {
logrus.Errorf("Error in configmap creation: %v", err)
}
// Creating secret will be used in projected volume in init containers
_, err = testutil.CreateSecret(clients.KubernetesClient, namespace, projectedSecretWithInitContainer, data)
if err != nil {
logrus.Errorf("Error in secret creation: %v", err)
}
_, err = testutil.CreateConfigMap(clients.KubernetesClient, namespace, configmapWithEnvName, "www.google.com")
if err != nil {
logrus.Errorf("Error in configmap creation: %v", err)
@@ -104,6 +141,11 @@ func setup() {
logrus.Errorf("Error in secret creation: %v", err)
}
_, err = testutil.CreateConfigMap(clients.KubernetesClient, namespace, configmapWithPodAnnotations, "www.google.com")
if err != nil {
logrus.Errorf("Error in configmap creation: %v", err)
}
// Creating Deployment with configmap
_, err = testutil.CreateDeployment(clients.KubernetesClient, configmapName, namespace, true)
if err != nil {
@@ -116,6 +158,30 @@ func setup() {
logrus.Errorf("Error in Deployment with configmap creation: %v", err)
}
// Creating Deployment with configmap in projected volume
_, err = testutil.CreateDeployment(clients.KubernetesClient, projectedConfigMapName, namespace, true)
if err != nil {
logrus.Errorf("Error in Deployment with configmap creation: %v", err)
}
// Creating Deployment with configmap in projected volume mounted in init container
_, err = testutil.CreateDeploymentWithInitContainer(clients.KubernetesClient, projectedConfigMapWithInitContainer, namespace, true)
if err != nil {
logrus.Errorf("Error in Deployment with configmap creation: %v", err)
}
// Creating Deployment with secret in projected volume
_, err = testutil.CreateDeployment(clients.KubernetesClient, projectedSecretName, namespace, true)
if err != nil {
logrus.Errorf("Error in Deployment with secret creation: %v", err)
}
// Creating Deployment with secret in projected volume mounted in init container
_, err = testutil.CreateDeploymentWithInitContainer(clients.KubernetesClient, projectedSecretWithInitContainer, namespace, true)
if err != nil {
logrus.Errorf("Error in Deployment with secret creation: %v", err)
}
// Creating Deployment with secret mounted in init container
_, err = testutil.CreateDeploymentWithInitContainer(clients.KubernetesClient, secretWithInitContainer, namespace, true)
if err != nil {
@@ -164,6 +230,17 @@ func setup() {
logrus.Errorf("Error in Deployment with secret configmap as envFrom source creation: %v", err)
}
// Creating Deployment with envFrom source as secret
_, err = testutil.CreateDeploymentWithEnvVarSourceAndAnnotations(
clients.KubernetesClient,
configmapAnnotated,
namespace,
map[string]string{"reloader.stakater.com/search": "true"},
)
if err != nil {
logrus.Errorf("Error in Deployment with secret configmap as envFrom source creation: %v", err)
}
// Creating DaemonSet with configmap
_, err = testutil.CreateDaemonSet(clients.KubernetesClient, configmapName, namespace, true)
if err != nil {
@@ -176,6 +253,18 @@ func setup() {
logrus.Errorf("Error in DaemonSet with secret creation: %v", err)
}
// Creating DaemonSet with configmap in projected volume
_, err = testutil.CreateDaemonSet(clients.KubernetesClient, projectedConfigMapName, namespace, true)
if err != nil {
logrus.Errorf("Error in DaemonSet with configmap creation: %v", err)
}
// Creating DaemonSet with secret in projected volume
_, err = testutil.CreateDaemonSet(clients.KubernetesClient, projectedSecretName, namespace, true)
if err != nil {
logrus.Errorf("Error in DaemonSet with secret creation: %v", err)
}
// Creating DaemonSet with env var source as configmap
_, err = testutil.CreateDaemonSet(clients.KubernetesClient, configmapWithEnvName, namespace, false)
if err != nil {
@@ -200,6 +289,18 @@ func setup() {
logrus.Errorf("Error in StatefulSet with secret creation: %v", err)
}
// Creating StatefulSet with configmap in projected volume
_, err = testutil.CreateStatefulSet(clients.KubernetesClient, projectedConfigMapName, namespace, true)
if err != nil {
logrus.Errorf("Error in StatefulSet with configmap creation: %v", err)
}
// Creating StatefulSet with secret in projected volume
_, err = testutil.CreateStatefulSet(clients.KubernetesClient, projectedSecretName, namespace, true)
if err != nil {
logrus.Errorf("Error in StatefulSet with configmap creation: %v", err)
}
// Creating StatefulSet with env var source as configmap
_, err = testutil.CreateStatefulSet(clients.KubernetesClient, configmapWithEnvName, namespace, false)
if err != nil {
@@ -212,6 +313,17 @@ func setup() {
logrus.Errorf("Error in StatefulSet with secret configmap as env var source creation: %v", err)
}
// Creating Deployment with pod annotations
_, err = testutil.CreateDeploymentWithPodAnnotations(clients.KubernetesClient, configmapWithPodAnnotations, namespace, false)
if err != nil {
logrus.Errorf("Error in Deployment with pod annotations: %v", err)
}
// Creating Deployment with both annotations
_, err = testutil.CreateDeploymentWithPodAnnotations(clients.KubernetesClient, configmapWithBothAnnotations, namespace, true)
if err != nil {
logrus.Errorf("Error in Deployment with both annotations: %v", err)
}
}
func teardown() {
@@ -227,6 +339,30 @@ func teardown() {
logrus.Errorf("Error while deleting deployment with secret %v", deploymentError)
}
// Deleting Deployment with configmap in projected volume
deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, namespace, projectedConfigMapName)
if deploymentError != nil {
logrus.Errorf("Error while deleting deployment with configmap %v", deploymentError)
}
// Deleting Deployment with configmap in projected volume mounted in init container
deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, namespace, projectedConfigMapWithInitContainer)
if deploymentError != nil {
logrus.Errorf("Error while deleting deployment with configmap %v", deploymentError)
}
// Deleting Deployment with secret in projected volume
deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, namespace, projectedSecretName)
if deploymentError != nil {
logrus.Errorf("Error while deleting deployment with configmap %v", deploymentError)
}
// Deleting Deployment with secret in projected volume mounted in init container
deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, namespace, projectedSecretWithInitContainer)
if deploymentError != nil {
logrus.Errorf("Error while deleting deployment with configmap %v", deploymentError)
}
// Deleting Deployment with configmap as env var source
deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, namespace, configmapWithEnvName)
if deploymentError != nil {
@@ -275,6 +411,24 @@ func teardown() {
logrus.Errorf("Error while deleting deployment with secret as envFrom source %v", deploymentError)
}
// Deleting Deployment with pod annotations
deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, namespace, configmapWithPodAnnotations)
if deploymentError != nil {
logrus.Errorf("Error while deleting deployment with pod annotations %v", deploymentError)
}
// Deleting Deployment with both annotations
deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, namespace, configmapWithBothAnnotations)
if deploymentError != nil {
logrus.Errorf("Error while deleting deployment with both annotations %v", deploymentError)
}
// Deleting Deployment with search annotation
deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, namespace, configmapAnnotated)
if deploymentError != nil {
logrus.Errorf("Error while deleting deployment with search annotation %v", deploymentError)
}
// Deleting DaemonSet with configmap
daemonSetError := testutil.DeleteDaemonSet(clients.KubernetesClient, namespace, configmapName)
if daemonSetError != nil {
@@ -287,6 +441,18 @@ func teardown() {
logrus.Errorf("Error while deleting daemonSet with secret %v", daemonSetError)
}
// Deleting DaemonSet with configmap in projected volume
daemonSetError = testutil.DeleteDaemonSet(clients.KubernetesClient, namespace, projectedConfigMapName)
if daemonSetError != nil {
logrus.Errorf("Error while deleting daemonSet with configmap %v", daemonSetError)
}
// Deleting Deployment with secret in projected volume
daemonSetError = testutil.DeleteDaemonSet(clients.KubernetesClient, namespace, projectedSecretName)
if daemonSetError != nil {
logrus.Errorf("Error while deleting daemonSet with secret %v", daemonSetError)
}
// Deleting Deployment with configmap as env var source
daemonSetError = testutil.DeleteDaemonSet(clients.KubernetesClient, namespace, configmapWithEnvName)
if daemonSetError != nil {
@@ -311,6 +477,18 @@ func teardown() {
logrus.Errorf("Error while deleting statefulSet with secret %v", statefulSetError)
}
// Deleting StatefulSet with configmap in projected volume
statefulSetError = testutil.DeleteStatefulSet(clients.KubernetesClient, namespace, projectedConfigMapName)
if statefulSetError != nil {
logrus.Errorf("Error while deleting statefulSet with configmap %v", statefulSetError)
}
// Deleting Deployment with secret in projected volume
statefulSetError = testutil.DeleteStatefulSet(clients.KubernetesClient, namespace, projectedSecretName)
if statefulSetError != nil {
logrus.Errorf("Error while deleting statefulSet with secret %v", statefulSetError)
}
// Deleting StatefulSet with configmap as env var source
statefulSetError = testutil.DeleteStatefulSet(clients.KubernetesClient, namespace, configmapWithEnvName)
if statefulSetError != nil {
@@ -335,6 +513,30 @@ func teardown() {
logrus.Errorf("Error while deleting the secret %v", err)
}
// Deleting configmap used in projected volume
err = testutil.DeleteConfigMap(clients.KubernetesClient, namespace, projectedConfigMapName)
if err != nil {
logrus.Errorf("Error while deleting the configmap %v", err)
}
// Deleting Secret used in projected volume
err = testutil.DeleteSecret(clients.KubernetesClient, namespace, projectedSecretName)
if err != nil {
logrus.Errorf("Error while deleting the secret %v", err)
}
// Deleting configmap used in projected volume in init containers
err = testutil.DeleteConfigMap(clients.KubernetesClient, namespace, projectedConfigMapWithInitContainer)
if err != nil {
logrus.Errorf("Error while deleting the configmap %v", err)
}
// Deleting Configmap used projected volume in init containers
err = testutil.DeleteSecret(clients.KubernetesClient, namespace, projectedSecretWithInitContainer)
if err != nil {
logrus.Errorf("Error while deleting the secret %v", err)
}
// Deleting Configmap used as env var source
err = testutil.DeleteConfigMap(clients.KubernetesClient, namespace, configmapWithEnvName)
if err != nil {
@@ -383,6 +585,11 @@ func teardown() {
logrus.Errorf("Error while deleting the secret used as env var source in init container %v", err)
}
err = testutil.DeleteConfigMap(clients.KubernetesClient, namespace, configmapWithPodAnnotations)
if err != nil {
logrus.Errorf("Error while deleting the configmap used with pod annotations: %v", err)
}
// Deleting namespace
testutil.DeleteNamespace(namespace, clients.KubernetesClient)
@@ -398,12 +605,20 @@ func getConfigWithAnnotations(resourceType string, name string, shaData string,
}
}
func getCollectors() metrics.Collectors {
return metrics.NewCollectors()
}
var labelSucceeded = prometheus.Labels{"success": "true"}
var labelFailed = prometheus.Labels{"success": "false"}
func TestRollingUpgradeForDeploymentWithConfigmap(t *testing.T) {
shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapName, "www.stakater.com")
config := getConfigWithAnnotations(constants.ConfigmapEnvVarPostfix, configmapName, shaData, options.ConfigmapUpdateOnChangeAnnotation)
deploymentFuncs := GetDeploymentRollingUpgradeFuncs()
collectors := getCollectors()
err := PerformRollingUpgrade(clients, config, deploymentFuncs)
err := PerformRollingUpgrade(clients, config, deploymentFuncs, collectors)
time.Sleep(5 * time.Second)
if err != nil {
t.Errorf("Rolling upgrade failed for Deployment with Configmap")
@@ -414,14 +629,132 @@ func TestRollingUpgradeForDeploymentWithConfigmap(t *testing.T) {
if !updated {
t.Errorf("Deployment was not updated")
}
if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 {
t.Errorf("Counter was not increased")
}
}
func TestRollingUpgradeForDeploymentWithConfigmapInProjectedVolume(t *testing.T) {
shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, projectedConfigMapName, "www.stakater.com")
config := getConfigWithAnnotations(constants.ConfigmapEnvVarPostfix, projectedConfigMapName, shaData, options.ConfigmapUpdateOnChangeAnnotation)
deploymentFuncs := GetDeploymentRollingUpgradeFuncs()
collectors := getCollectors()
err := PerformRollingUpgrade(clients, config, deploymentFuncs, collectors)
if err != nil {
t.Errorf("Rolling upgrade failed for Deployment with Configmap in projected volume")
}
logrus.Infof("Verifying deployment update")
updated := testutil.VerifyResourceUpdate(clients, config, constants.ConfigmapEnvVarPostfix, deploymentFuncs)
if !updated {
t.Errorf("Deployment was not updated")
}
if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 {
t.Errorf("Counter was not increased")
}
}
// Un-used function
// func createConfigMap(clients *kube.Clients, namespace, name string, annotations map[string]string) (*core_v1.ConfigMap, error) {
// configmapObj := testutil.GetConfigmap(namespace, name, "www.google.com")
// configmapObj.Annotations = annotations
// return clients.KubernetesClient.CoreV1().ConfigMaps(namespace).Create(configmapObj)
// }
func TestRollingUpgradeForDeploymentWithConfigmapViaSearchAnnotation(t *testing.T) {
shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapAnnotated, "www.stakater.com")
config := getConfigWithAnnotations(constants.ConfigmapEnvVarPostfix, configmapAnnotated, shaData, "")
config.ResourceAnnotations = map[string]string{"reloader.stakater.com/match": "true"}
deploymentFuncs := GetDeploymentRollingUpgradeFuncs()
collectors := getCollectors()
err := PerformRollingUpgrade(clients, config, deploymentFuncs, collectors)
if err != nil {
t.Errorf("Rolling upgrade failed for Deployment with Configmap")
}
logrus.Infof("Verifying deployment update")
updated := testutil.VerifyResourceUpdate(clients, config, constants.ConfigmapEnvVarPostfix, deploymentFuncs)
if !updated {
t.Errorf("Deployment was not updated")
}
if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 {
t.Errorf("Counter was not increased")
}
}
func TestRollingUpgradeForDeploymentWithConfigmapViaSearchAnnotationNoTriggers(t *testing.T) {
shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapAnnotated, "www.stakater.com")
config := getConfigWithAnnotations(constants.ConfigmapEnvVarPostfix, configmapAnnotated, shaData, "")
config.ResourceAnnotations = map[string]string{"reloader.stakater.com/match": "false"}
deploymentFuncs := GetDeploymentRollingUpgradeFuncs()
collectors := getCollectors()
err := PerformRollingUpgrade(clients, config, deploymentFuncs, collectors)
if err != nil {
t.Errorf("Rolling upgrade failed for Deployment with Configmap")
}
logrus.Infof("Verifying deployment update")
updated := testutil.VerifyResourceUpdate(clients, config, constants.ConfigmapEnvVarPostfix, deploymentFuncs)
time.Sleep(5 * time.Second)
if updated {
t.Errorf("Deployment was updated unexpectedly")
}
if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) > 0 {
t.Errorf("Counter was increased unexpectedly")
}
}
func TestRollingUpgradeForDeploymentWithConfigmapViaSearchAnnotationNotMapped(t *testing.T) {
deployment, err := testutil.CreateDeploymentWithEnvVarSourceAndAnnotations(
clients.KubernetesClient,
configmapAnnotated+"-different",
namespace,
map[string]string{"reloader.stakater.com/search": "true"},
)
if err != nil {
t.Errorf("Failed to create deployment with search annotation.")
}
defer func() {
_ = clients.KubernetesClient.AppsV1().Deployments(namespace).Delete(context.TODO(), deployment.Name, v1.DeleteOptions{})
}()
// defer clients.KubernetesClient.AppsV1().Deployments(namespace).Delete(deployment.Name, &v1.DeleteOptions{})
shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapAnnotated, "www.stakater.com")
config := getConfigWithAnnotations(constants.ConfigmapEnvVarPostfix, configmapAnnotated, shaData, "")
config.ResourceAnnotations = map[string]string{"reloader.stakater.com/match": "false"}
deploymentFuncs := GetDeploymentRollingUpgradeFuncs()
collectors := getCollectors()
err = PerformRollingUpgrade(clients, config, deploymentFuncs, collectors)
if err != nil {
t.Errorf("Rolling upgrade failed for Deployment with Configmap")
}
logrus.Infof("Verifying deployment update")
updated := testutil.VerifyResourceUpdate(clients, config, constants.ConfigmapEnvVarPostfix, deploymentFuncs)
if updated {
t.Errorf("Deployment was updated unexpectedly")
}
if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) > 0 {
t.Errorf("Counter was increased unexpectedly")
}
}
func TestRollingUpgradeForDeploymentWithConfigmapInInitContainer(t *testing.T) {
shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapWithInitContainer, "www.stakater.com")
config := getConfigWithAnnotations(constants.ConfigmapEnvVarPostfix, configmapWithInitContainer, shaData, options.ConfigmapUpdateOnChangeAnnotation)
deploymentFuncs := GetDeploymentRollingUpgradeFuncs()
collectors := getCollectors()
err := PerformRollingUpgrade(clients, config, deploymentFuncs)
err := PerformRollingUpgrade(clients, config, deploymentFuncs, collectors)
time.Sleep(5 * time.Second)
if err != nil {
t.Errorf("Rolling upgrade failed for Deployment with Configmap")
@@ -432,14 +765,42 @@ func TestRollingUpgradeForDeploymentWithConfigmapInInitContainer(t *testing.T) {
if !updated {
t.Errorf("Deployment was not updated")
}
if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 {
t.Errorf("Counter was not increased")
}
}
func TestRollingUpgradeForDeploymentWithConfigmapInProjectVolumeInInitContainer(t *testing.T) {
shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, projectedConfigMapWithInitContainer, "www.stakater.com")
config := getConfigWithAnnotations(constants.ConfigmapEnvVarPostfix, projectedConfigMapWithInitContainer, shaData, options.ConfigmapUpdateOnChangeAnnotation)
deploymentFuncs := GetDeploymentRollingUpgradeFuncs()
collectors := getCollectors()
err := PerformRollingUpgrade(clients, config, deploymentFuncs, collectors)
time.Sleep(5 * time.Second)
if err != nil {
t.Errorf("Rolling upgrade failed for Deployment with Configmap in projected volume")
}
logrus.Infof("Verifying deployment update")
updated := testutil.VerifyResourceUpdate(clients, config, constants.ConfigmapEnvVarPostfix, deploymentFuncs)
if !updated {
t.Errorf("Deployment was not updated")
}
if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 {
t.Errorf("Counter was not increased")
}
}
func TestRollingUpgradeForDeploymentWithConfigmapAsEnvVar(t *testing.T) {
shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapWithEnvName, "www.stakater.com")
config := getConfigWithAnnotations(constants.ConfigmapEnvVarPostfix, configmapWithEnvName, shaData, options.ReloaderAutoAnnotation)
deploymentFuncs := GetDeploymentRollingUpgradeFuncs()
collectors := getCollectors()
err := PerformRollingUpgrade(clients, config, deploymentFuncs)
err := PerformRollingUpgrade(clients, config, deploymentFuncs, collectors)
time.Sleep(5 * time.Second)
if err != nil {
t.Errorf("Rolling upgrade failed for Deployment with Configmap used as env var")
@@ -450,14 +811,19 @@ func TestRollingUpgradeForDeploymentWithConfigmapAsEnvVar(t *testing.T) {
if !updated {
t.Errorf("Deployment was not updated")
}
if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 {
t.Errorf("Counter was not increased")
}
}
func TestRollingUpgradeForDeploymentWithConfigmapAsEnvVarInInitContainer(t *testing.T) {
shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapWithInitEnv, "www.stakater.com")
config := getConfigWithAnnotations(constants.ConfigmapEnvVarPostfix, configmapWithInitEnv, shaData, options.ReloaderAutoAnnotation)
deploymentFuncs := GetDeploymentRollingUpgradeFuncs()
collectors := getCollectors()
err := PerformRollingUpgrade(clients, config, deploymentFuncs)
err := PerformRollingUpgrade(clients, config, deploymentFuncs, collectors)
time.Sleep(5 * time.Second)
if err != nil {
t.Errorf("Rolling upgrade failed for Deployment with Configmap used as env var")
@@ -468,14 +834,19 @@ func TestRollingUpgradeForDeploymentWithConfigmapAsEnvVarInInitContainer(t *test
if !updated {
t.Errorf("Deployment was not updated")
}
if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 {
t.Errorf("Counter was not increased")
}
}
func TestRollingUpgradeForDeploymentWithConfigmapAsEnvVarFrom(t *testing.T) {
shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapWithEnvFromName, "www.stakater.com")
config := getConfigWithAnnotations(constants.ConfigmapEnvVarPostfix, configmapWithEnvFromName, shaData, options.ReloaderAutoAnnotation)
deploymentFuncs := GetDeploymentRollingUpgradeFuncs()
collectors := getCollectors()
err := PerformRollingUpgrade(clients, config, deploymentFuncs)
err := PerformRollingUpgrade(clients, config, deploymentFuncs, collectors)
time.Sleep(5 * time.Second)
if err != nil {
t.Errorf("Rolling upgrade failed for Deployment with Configmap used as env var")
@@ -486,14 +857,19 @@ func TestRollingUpgradeForDeploymentWithConfigmapAsEnvVarFrom(t *testing.T) {
if !updated {
t.Errorf("Deployment was not updated")
}
if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 {
t.Errorf("Counter was not increased")
}
}
func TestRollingUpgradeForDeploymentWithSecret(t *testing.T) {
shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, namespace, secretName, "dGVzdFVwZGF0ZWRTZWNyZXRFbmNvZGluZ0ZvclJlbG9hZGVy")
config := getConfigWithAnnotations(constants.SecretEnvVarPostfix, secretName, shaData, options.SecretUpdateOnChangeAnnotation)
deploymentFuncs := GetDeploymentRollingUpgradeFuncs()
collectors := getCollectors()
err := PerformRollingUpgrade(clients, config, deploymentFuncs)
err := PerformRollingUpgrade(clients, config, deploymentFuncs, collectors)
time.Sleep(5 * time.Second)
if err != nil {
t.Errorf("Rolling upgrade failed for Deployment with Secret")
@@ -504,14 +880,42 @@ func TestRollingUpgradeForDeploymentWithSecret(t *testing.T) {
if !updated {
t.Errorf("Deployment was not updated")
}
if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 {
t.Errorf("Counter was not increased")
}
}
func TestRollingUpgradeForDeploymentWithSecretInProjectedVolume(t *testing.T) {
shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, namespace, projectedSecretName, "dGVzdFVwZGF0ZWRTZWNyZXRFbmNvZGluZ0ZvclJlbG9hZGVy")
config := getConfigWithAnnotations(constants.SecretEnvVarPostfix, projectedSecretName, shaData, options.SecretUpdateOnChangeAnnotation)
deploymentFuncs := GetDeploymentRollingUpgradeFuncs()
collectors := getCollectors()
err := PerformRollingUpgrade(clients, config, deploymentFuncs, collectors)
time.Sleep(5 * time.Second)
if err != nil {
t.Errorf("Rolling upgrade failed for Deployment with Secret in projected volume")
}
logrus.Infof("Verifying deployment update")
updated := testutil.VerifyResourceUpdate(clients, config, constants.SecretEnvVarPostfix, deploymentFuncs)
if !updated {
t.Errorf("Deployment was not updated")
}
if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 {
t.Errorf("Counter was not increased")
}
}
func TestRollingUpgradeForDeploymentWithSecretinInitContainer(t *testing.T) {
shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, namespace, secretWithInitContainer, "dGVzdFVwZGF0ZWRTZWNyZXRFbmNvZGluZ0ZvclJlbG9hZGVy")
config := getConfigWithAnnotations(constants.SecretEnvVarPostfix, secretWithInitContainer, shaData, options.SecretUpdateOnChangeAnnotation)
deploymentFuncs := GetDeploymentRollingUpgradeFuncs()
collectors := getCollectors()
err := PerformRollingUpgrade(clients, config, deploymentFuncs)
err := PerformRollingUpgrade(clients, config, deploymentFuncs, collectors)
time.Sleep(5 * time.Second)
if err != nil {
t.Errorf("Rolling upgrade failed for Deployment with Secret")
@@ -522,14 +926,42 @@ func TestRollingUpgradeForDeploymentWithSecretinInitContainer(t *testing.T) {
if !updated {
t.Errorf("Deployment was not updated")
}
if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 {
t.Errorf("Counter was not increased")
}
}
func TestRollingUpgradeForDeploymentWithSecretInProjectedVolumeinInitContainer(t *testing.T) {
shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, namespace, projectedSecretWithInitContainer, "dGVzdFVwZGF0ZWRTZWNyZXRFbmNvZGluZ0ZvclJlbG9hZGVy")
config := getConfigWithAnnotations(constants.SecretEnvVarPostfix, projectedSecretWithInitContainer, shaData, options.SecretUpdateOnChangeAnnotation)
deploymentFuncs := GetDeploymentRollingUpgradeFuncs()
collectors := getCollectors()
err := PerformRollingUpgrade(clients, config, deploymentFuncs, collectors)
time.Sleep(5 * time.Second)
if err != nil {
t.Errorf("Rolling upgrade failed for Deployment with Secret in projected volume")
}
logrus.Infof("Verifying deployment update")
updated := testutil.VerifyResourceUpdate(clients, config, constants.SecretEnvVarPostfix, deploymentFuncs)
if !updated {
t.Errorf("Deployment was not updated")
}
if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 {
t.Errorf("Counter was not increased")
}
}
func TestRollingUpgradeForDeploymentWithSecretAsEnvVar(t *testing.T) {
shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, namespace, secretWithEnvName, "dGVzdFVwZGF0ZWRTZWNyZXRFbmNvZGluZ0ZvclJlbG9hZGVy")
config := getConfigWithAnnotations(constants.SecretEnvVarPostfix, secretWithEnvName, shaData, options.ReloaderAutoAnnotation)
deploymentFuncs := GetDeploymentRollingUpgradeFuncs()
collectors := getCollectors()
err := PerformRollingUpgrade(clients, config, deploymentFuncs)
err := PerformRollingUpgrade(clients, config, deploymentFuncs, collectors)
time.Sleep(5 * time.Second)
if err != nil {
t.Errorf("Rolling upgrade failed for Deployment with Secret")
@@ -540,14 +972,19 @@ func TestRollingUpgradeForDeploymentWithSecretAsEnvVar(t *testing.T) {
if !updated {
t.Errorf("Deployment was not updated")
}
if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 {
t.Errorf("Counter was not increased")
}
}
func TestRollingUpgradeForDeploymentWithSecretAsEnvVarFrom(t *testing.T) {
shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, namespace, secretWithEnvFromName, "dGVzdFVwZGF0ZWRTZWNyZXRFbmNvZGluZ0ZvclJlbG9hZGVy")
config := getConfigWithAnnotations(constants.SecretEnvVarPostfix, secretWithEnvFromName, shaData, options.ReloaderAutoAnnotation)
deploymentFuncs := GetDeploymentRollingUpgradeFuncs()
collectors := getCollectors()
err := PerformRollingUpgrade(clients, config, deploymentFuncs)
err := PerformRollingUpgrade(clients, config, deploymentFuncs, collectors)
time.Sleep(5 * time.Second)
if err != nil {
t.Errorf("Rolling upgrade failed for Deployment with Secret")
@@ -558,14 +995,19 @@ func TestRollingUpgradeForDeploymentWithSecretAsEnvVarFrom(t *testing.T) {
if !updated {
t.Errorf("Deployment was not updated")
}
if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 {
t.Errorf("Counter was not increased")
}
}
func TestRollingUpgradeForDeploymentWithSecretAsEnvVarInInitContainer(t *testing.T) {
shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, namespace, secretWithInitEnv, "dGVzdFVwZGF0ZWRTZWNyZXRFbmNvZGluZ0ZvclJlbG9hZGVy")
config := getConfigWithAnnotations(constants.SecretEnvVarPostfix, secretWithInitEnv, shaData, options.ReloaderAutoAnnotation)
deploymentFuncs := GetDeploymentRollingUpgradeFuncs()
collectors := getCollectors()
err := PerformRollingUpgrade(clients, config, deploymentFuncs)
err := PerformRollingUpgrade(clients, config, deploymentFuncs, collectors)
time.Sleep(5 * time.Second)
if err != nil {
t.Errorf("Rolling upgrade failed for Deployment with Secret")
@@ -576,14 +1018,19 @@ func TestRollingUpgradeForDeploymentWithSecretAsEnvVarInInitContainer(t *testing
if !updated {
t.Errorf("Deployment was not updated")
}
if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 {
t.Errorf("Counter was not increased")
}
}
func TestRollingUpgradeForDaemonSetWithConfigmap(t *testing.T) {
shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapName, "www.facebook.com")
config := getConfigWithAnnotations(constants.ConfigmapEnvVarPostfix, configmapName, shaData, options.ConfigmapUpdateOnChangeAnnotation)
daemonSetFuncs := GetDaemonSetRollingUpgradeFuncs()
collectors := getCollectors()
err := PerformRollingUpgrade(clients, config, daemonSetFuncs)
err := PerformRollingUpgrade(clients, config, daemonSetFuncs, collectors)
time.Sleep(5 * time.Second)
if err != nil {
t.Errorf("Rolling upgrade failed for DaemonSet with configmap")
@@ -594,14 +1041,42 @@ func TestRollingUpgradeForDaemonSetWithConfigmap(t *testing.T) {
if !updated {
t.Errorf("DaemonSet was not updated")
}
if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 {
t.Errorf("Counter was not increased")
}
}
func TestRollingUpgradeForDaemonSetWithConfigmapInProjectedVolume(t *testing.T) {
shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, projectedConfigMapName, "www.facebook.com")
config := getConfigWithAnnotations(constants.ConfigmapEnvVarPostfix, projectedConfigMapName, shaData, options.ConfigmapUpdateOnChangeAnnotation)
daemonSetFuncs := GetDaemonSetRollingUpgradeFuncs()
collectors := getCollectors()
err := PerformRollingUpgrade(clients, config, daemonSetFuncs, collectors)
time.Sleep(5 * time.Second)
if err != nil {
t.Errorf("Rolling upgrade failed for DaemonSet with configmap in projected volume")
}
logrus.Infof("Verifying daemonSet update")
updated := testutil.VerifyResourceUpdate(clients, config, constants.ConfigmapEnvVarPostfix, daemonSetFuncs)
if !updated {
t.Errorf("DaemonSet was not updated")
}
if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 {
t.Errorf("Counter was not increased")
}
}
func TestRollingUpgradeForDaemonSetWithConfigmapAsEnvVar(t *testing.T) {
shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapWithEnvName, "www.facebook.com")
config := getConfigWithAnnotations(constants.ConfigmapEnvVarPostfix, configmapWithEnvName, shaData, options.ReloaderAutoAnnotation)
daemonSetFuncs := GetDaemonSetRollingUpgradeFuncs()
collectors := getCollectors()
err := PerformRollingUpgrade(clients, config, daemonSetFuncs)
err := PerformRollingUpgrade(clients, config, daemonSetFuncs, collectors)
time.Sleep(5 * time.Second)
if err != nil {
t.Errorf("Rolling upgrade failed for DaemonSet with configmap used as env var")
@@ -612,14 +1087,19 @@ func TestRollingUpgradeForDaemonSetWithConfigmapAsEnvVar(t *testing.T) {
if !updated {
t.Errorf("DaemonSet was not updated")
}
if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 {
t.Errorf("Counter was not increased")
}
}
func TestRollingUpgradeForDaemonSetWithSecret(t *testing.T) {
shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, namespace, secretName, "d3d3LmZhY2Vib29rLmNvbQ==")
config := getConfigWithAnnotations(constants.SecretEnvVarPostfix, secretName, shaData, options.SecretUpdateOnChangeAnnotation)
daemonSetFuncs := GetDaemonSetRollingUpgradeFuncs()
collectors := getCollectors()
err := PerformRollingUpgrade(clients, config, daemonSetFuncs)
err := PerformRollingUpgrade(clients, config, daemonSetFuncs, collectors)
time.Sleep(5 * time.Second)
if err != nil {
t.Errorf("Rolling upgrade failed for DaemonSet with secret")
@@ -630,14 +1110,42 @@ func TestRollingUpgradeForDaemonSetWithSecret(t *testing.T) {
if !updated {
t.Errorf("DaemonSet was not updated")
}
if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 {
t.Errorf("Counter was not increased")
}
}
func TestRollingUpgradeForDaemonSetWithSecretInProjectedVolume(t *testing.T) {
shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, namespace, projectedSecretName, "d3d3LmZhY2Vib29rLmNvbQ==")
config := getConfigWithAnnotations(constants.SecretEnvVarPostfix, projectedSecretName, shaData, options.SecretUpdateOnChangeAnnotation)
daemonSetFuncs := GetDaemonSetRollingUpgradeFuncs()
collectors := getCollectors()
err := PerformRollingUpgrade(clients, config, daemonSetFuncs, collectors)
time.Sleep(5 * time.Second)
if err != nil {
t.Errorf("Rolling upgrade failed for DaemonSet with secret in projected volume")
}
logrus.Infof("Verifying daemonSet update")
updated := testutil.VerifyResourceUpdate(clients, config, constants.SecretEnvVarPostfix, daemonSetFuncs)
if !updated {
t.Errorf("DaemonSet was not updated")
}
if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 {
t.Errorf("Counter was not increased")
}
}
func TestRollingUpgradeForStatefulSetWithConfigmap(t *testing.T) {
shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapName, "www.twitter.com")
config := getConfigWithAnnotations(constants.ConfigmapEnvVarPostfix, configmapName, shaData, options.ConfigmapUpdateOnChangeAnnotation)
statefulSetFuncs := GetStatefulSetRollingUpgradeFuncs()
collectors := getCollectors()
err := PerformRollingUpgrade(clients, config, statefulSetFuncs)
err := PerformRollingUpgrade(clients, config, statefulSetFuncs, collectors)
time.Sleep(5 * time.Second)
if err != nil {
t.Errorf("Rolling upgrade failed for StatefulSet with configmap")
@@ -648,14 +1156,42 @@ func TestRollingUpgradeForStatefulSetWithConfigmap(t *testing.T) {
if !updated {
t.Errorf("StatefulSet was not updated")
}
if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 {
t.Errorf("Counter was not increased")
}
}
func TestRollingUpgradeForStatefulSetWithConfigmapInProjectedVolume(t *testing.T) {
shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, projectedConfigMapName, "www.twitter.com")
config := getConfigWithAnnotations(constants.ConfigmapEnvVarPostfix, projectedConfigMapName, shaData, options.ConfigmapUpdateOnChangeAnnotation)
statefulSetFuncs := GetStatefulSetRollingUpgradeFuncs()
collectors := getCollectors()
err := PerformRollingUpgrade(clients, config, statefulSetFuncs, collectors)
time.Sleep(5 * time.Second)
if err != nil {
t.Errorf("Rolling upgrade failed for StatefulSet with configmap in projected volume")
}
logrus.Infof("Verifying statefulSet update")
updated := testutil.VerifyResourceUpdate(clients, config, constants.ConfigmapEnvVarPostfix, statefulSetFuncs)
if !updated {
t.Errorf("StatefulSet was not updated")
}
if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 {
t.Errorf("Counter was not increased")
}
}
func TestRollingUpgradeForStatefulSetWithSecret(t *testing.T) {
shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, namespace, secretName, "d3d3LnR3aXR0ZXIuY29t")
config := getConfigWithAnnotations(constants.SecretEnvVarPostfix, secretName, shaData, options.SecretUpdateOnChangeAnnotation)
statefulSetFuncs := GetStatefulSetRollingUpgradeFuncs()
collectors := getCollectors()
err := PerformRollingUpgrade(clients, config, statefulSetFuncs)
err := PerformRollingUpgrade(clients, config, statefulSetFuncs, collectors)
time.Sleep(5 * time.Second)
if err != nil {
t.Errorf("Rolling upgrade failed for StatefulSet with secret")
@@ -666,4 +1202,94 @@ func TestRollingUpgradeForStatefulSetWithSecret(t *testing.T) {
if !updated {
t.Errorf("StatefulSet was not updated")
}
if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 {
t.Errorf("Counter was not increased")
}
}
func TestRollingUpgradeForStatefulSetWithSecretInProjectedVolume(t *testing.T) {
shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, namespace, projectedSecretName, "d3d3LnR3aXR0ZXIuY29t")
config := getConfigWithAnnotations(constants.SecretEnvVarPostfix, projectedSecretName, shaData, options.SecretUpdateOnChangeAnnotation)
statefulSetFuncs := GetStatefulSetRollingUpgradeFuncs()
collectors := getCollectors()
err := PerformRollingUpgrade(clients, config, statefulSetFuncs, collectors)
time.Sleep(5 * time.Second)
if err != nil {
t.Errorf("Rolling upgrade failed for StatefulSet with secret in projected volume")
}
logrus.Infof("Verifying statefulSet update")
updated := testutil.VerifyResourceUpdate(clients, config, constants.SecretEnvVarPostfix, statefulSetFuncs)
if !updated {
t.Errorf("StatefulSet was not updated")
}
if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 {
t.Errorf("Counter was not increased")
}
}
func TestRollingUpgradeForDeploymentWithPodAnnotations(t *testing.T) {
shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapWithPodAnnotations, "www.stakater.com")
config := getConfigWithAnnotations(constants.ConfigmapEnvVarPostfix, configmapWithPodAnnotations, shaData, options.ConfigmapUpdateOnChangeAnnotation)
deploymentFuncs := GetDeploymentRollingUpgradeFuncs()
collectors := getCollectors()
err := PerformRollingUpgrade(clients, config, deploymentFuncs, collectors)
time.Sleep(5 * time.Second)
if err != nil {
t.Errorf("Rolling upgrade failed for Deployment with pod annotations")
}
logrus.Infof("Verifying deployment update")
envName := constants.EnvVarPrefix + util.ConvertToEnvVarName(config.ResourceName) + "_" + constants.ConfigmapEnvVarPostfix
items := deploymentFuncs.ItemsFunc(clients, config.Namespace)
var foundPod, foundBoth bool
for _, i := range items {
name := util.ToObjectMeta(i).Name
if name == configmapWithPodAnnotations {
containers := deploymentFuncs.ContainersFunc(i)
updated := testutil.GetResourceSHA(containers, envName)
if updated != config.SHAValue {
t.Errorf("Deployment was not updated")
}
foundPod = true
}
if name == configmapWithBothAnnotations {
containers := deploymentFuncs.ContainersFunc(i)
updated := testutil.GetResourceSHA(containers, envName)
if updated == config.SHAValue {
t.Errorf("Deployment was updated")
}
foundBoth = true
}
}
if !foundPod {
t.Errorf("Deployment with pod annotations was not found")
}
if !foundBoth {
t.Errorf("Deployment with both annotations was not found")
}
if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 {
t.Errorf("Counter was not increased")
}
}
func TestFailedRollingUpgrade(t *testing.T) {
shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapName, "fail.stakater.com")
config := getConfigWithAnnotations(constants.ConfigmapEnvVarPostfix, configmapName, shaData, options.ConfigmapUpdateOnChangeAnnotation)
deploymentFuncs := GetDeploymentRollingUpgradeFuncs()
deploymentFuncs.UpdateFunc = func(_ kube.Clients, _ string, _ interface{}) error {
return fmt.Errorf("error")
}
collectors := getCollectors()
_ = PerformRollingUpgrade(clients, config, deploymentFuncs, collectors)
if promtestutil.ToFloat64(collectors.Reloaded.With(labelFailed)) != 1 {
t.Errorf("Counter was not increased")
}
}

View File

@@ -0,0 +1,43 @@
package metrics
import (
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promhttp"
"github.com/sirupsen/logrus"
"net/http"
)
type Collectors struct {
Reloaded *prometheus.CounterVec
}
func NewCollectors() Collectors {
reloaded := prometheus.NewCounterVec(
prometheus.CounterOpts{
Namespace: "reloader",
Name: "reload_executed_total",
Help: "Counter of reloads executed by Reloader.",
},
[]string{"success"},
)
//set 0 as default value
reloaded.With(prometheus.Labels{"success": "true"}).Add(0)
reloaded.With(prometheus.Labels{"success": "false"}).Add(0)
return Collectors{
Reloaded: reloaded,
}
}
func SetupPrometheusEndpoint() Collectors {
collectors := NewCollectors()
prometheus.MustRegister(collectors.Reloaded)
go func() {
http.Handle("/metrics", promhttp.Handler())
logrus.Fatal(http.ListenAndServe(":9090", nil))
}()
return collectors
}

View File

@@ -1,10 +1,22 @@
package options
var (
// ConfigmapUpdateOnChangeAnnotation is an annotation to detect changes in configmaps
// ConfigmapUpdateOnChangeAnnotation is an annotation to detect changes in
// configmaps specified by name
ConfigmapUpdateOnChangeAnnotation = "configmap.reloader.stakater.com/reload"
// SecretUpdateOnChangeAnnotation is an annotation to detect changes in secrets
// SecretUpdateOnChangeAnnotation is an annotation to detect changes in
// secrets specified by name
SecretUpdateOnChangeAnnotation = "secret.reloader.stakater.com/reload"
// ReloaderAutoAnnotation is an annotation to detect changes in secrets
ReloaderAutoAnnotation = "reloader.stakater.com/auto"
// AutoSearchAnnotation is an annotation to detect changes in
// configmaps or triggers with the SearchMatchAnnotation
AutoSearchAnnotation = "reloader.stakater.com/search"
// SearchMatchAnnotation is an annotation to tag secrets to be found with
// AutoSearchAnnotation
SearchMatchAnnotation = "reloader.stakater.com/match"
// LogFormat is the log format to use (json, or empty string for default)
LogFormat = ""
// Adds support for argo rollouts
IsArgoRollouts = "false"
)

View File

@@ -1,6 +1,7 @@
package testutil
import (
"context"
"math/rand"
"sort"
"strconv"
@@ -16,9 +17,8 @@ import (
"github.com/stakater/Reloader/internal/pkg/options"
"github.com/stakater/Reloader/internal/pkg/util"
"github.com/stakater/Reloader/pkg/kube"
v1_beta1 "k8s.io/api/apps/v1beta1"
appsv1 "k8s.io/api/apps/v1"
v1 "k8s.io/api/core/v1"
"k8s.io/api/extensions/v1beta1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
core_v1 "k8s.io/client-go/kubernetes/typed/core/v1"
@@ -34,7 +34,7 @@ var (
// CreateNamespace creates namespace for testing
func CreateNamespace(namespace string, client kubernetes.Interface) {
_, err := client.CoreV1().Namespaces().Create(&v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}})
_, err := client.CoreV1().Namespaces().Create(context.TODO(), &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}}, metav1.CreateOptions{})
if err != nil {
logrus.Fatalf("Failed to create namespace for testing %v", err)
} else {
@@ -44,7 +44,7 @@ func CreateNamespace(namespace string, client kubernetes.Interface) {
// DeleteNamespace deletes namespace for testing
func DeleteNamespace(namespace string, client kubernetes.Interface) {
err := client.CoreV1().Namespaces().Delete(namespace, &metav1.DeleteOptions{})
err := client.CoreV1().Namespaces().Delete(context.TODO(), namespace, metav1.DeleteOptions{})
if err != nil {
logrus.Fatalf("Failed to delete namespace that was created for testing %v", err)
} else {
@@ -93,6 +93,38 @@ func getEnvVarSources(name string) []v1.EnvFromSource {
func getVolumes(name string) []v1.Volume {
return []v1.Volume{
{
Name: "projectedconfigmap",
VolumeSource: v1.VolumeSource{
Projected: &v1.ProjectedVolumeSource{
Sources: []v1.VolumeProjection{
{
ConfigMap: &v1.ConfigMapProjection{
LocalObjectReference: v1.LocalObjectReference{
Name: name,
},
},
},
},
},
},
},
{
Name: "projectedsecret",
VolumeSource: v1.VolumeSource{
Projected: &v1.ProjectedVolumeSource{
Sources: []v1.VolumeProjection{
{
Secret: &v1.SecretProjection{
LocalObjectReference: v1.LocalObjectReference{
Name: name,
},
},
},
},
},
},
},
{
Name: "configmap",
VolumeSource: v1.VolumeSource{
@@ -124,6 +156,14 @@ func getVolumeMounts(name string) []v1.VolumeMount {
MountPath: "etc/sec",
Name: "secret",
},
{
MountPath: "etc/projectedconfig",
Name: "projectedconfigmap",
},
{
MountPath: "etc/projectedsec",
Name: "projectedsecret",
},
}
}
@@ -272,14 +312,17 @@ func getPodTemplateSpecWithInitContainerAndEnv(name string) v1.PodTemplateSpec {
}
// GetDeployment provides deployment for testing
func GetDeployment(namespace string, deploymentName string) *v1beta1.Deployment {
func GetDeployment(namespace string, deploymentName string) *appsv1.Deployment {
replicaset := int32(1)
return &v1beta1.Deployment{
return &appsv1.Deployment{
ObjectMeta: getObjectMeta(namespace, deploymentName, false),
Spec: v1beta1.DeploymentSpec{
Spec: appsv1.DeploymentSpec{
Selector: &metav1.LabelSelector{
MatchLabels: map[string]string{"secondLabel": "temp"},
},
Replicas: &replicaset,
Strategy: v1beta1.DeploymentStrategy{
Type: v1beta1.RollingUpdateDeploymentStrategyType,
Strategy: appsv1.DeploymentStrategy{
Type: appsv1.RollingUpdateDeploymentStrategyType,
},
Template: getPodTemplateSpecWithVolumes(deploymentName),
},
@@ -303,14 +346,17 @@ func GetDeploymentConfig(namespace string, deploymentConfigName string) *openshi
}
// GetDeploymentWithInitContainer provides deployment with init container and volumeMounts
func GetDeploymentWithInitContainer(namespace string, deploymentName string) *v1beta1.Deployment {
func GetDeploymentWithInitContainer(namespace string, deploymentName string) *appsv1.Deployment {
replicaset := int32(1)
return &v1beta1.Deployment{
return &appsv1.Deployment{
ObjectMeta: getObjectMeta(namespace, deploymentName, false),
Spec: v1beta1.DeploymentSpec{
Spec: appsv1.DeploymentSpec{
Selector: &metav1.LabelSelector{
MatchLabels: map[string]string{"secondLabel": "temp"},
},
Replicas: &replicaset,
Strategy: v1beta1.DeploymentStrategy{
Type: v1beta1.RollingUpdateDeploymentStrategyType,
Strategy: appsv1.DeploymentStrategy{
Type: appsv1.RollingUpdateDeploymentStrategyType,
},
Template: getPodTemplateSpecWithInitContainer(deploymentName),
},
@@ -318,28 +364,34 @@ func GetDeploymentWithInitContainer(namespace string, deploymentName string) *v1
}
// GetDeploymentWithInitContainerAndEnv provides deployment with init container and EnvSource
func GetDeploymentWithInitContainerAndEnv(namespace string, deploymentName string) *v1beta1.Deployment {
func GetDeploymentWithInitContainerAndEnv(namespace string, deploymentName string) *appsv1.Deployment {
replicaset := int32(1)
return &v1beta1.Deployment{
return &appsv1.Deployment{
ObjectMeta: getObjectMeta(namespace, deploymentName, true),
Spec: v1beta1.DeploymentSpec{
Spec: appsv1.DeploymentSpec{
Selector: &metav1.LabelSelector{
MatchLabels: map[string]string{"secondLabel": "temp"},
},
Replicas: &replicaset,
Strategy: v1beta1.DeploymentStrategy{
Type: v1beta1.RollingUpdateDeploymentStrategyType,
Strategy: appsv1.DeploymentStrategy{
Type: appsv1.RollingUpdateDeploymentStrategyType,
},
Template: getPodTemplateSpecWithInitContainerAndEnv(deploymentName),
},
}
}
func GetDeploymentWithEnvVars(namespace string, deploymentName string) *v1beta1.Deployment {
func GetDeploymentWithEnvVars(namespace string, deploymentName string) *appsv1.Deployment {
replicaset := int32(1)
return &v1beta1.Deployment{
return &appsv1.Deployment{
ObjectMeta: getObjectMeta(namespace, deploymentName, true),
Spec: v1beta1.DeploymentSpec{
Spec: appsv1.DeploymentSpec{
Selector: &metav1.LabelSelector{
MatchLabels: map[string]string{"secondLabel": "temp"},
},
Replicas: &replicaset,
Strategy: v1beta1.DeploymentStrategy{
Type: v1beta1.RollingUpdateDeploymentStrategyType,
Strategy: appsv1.DeploymentStrategy{
Type: appsv1.RollingUpdateDeploymentStrategyType,
},
Template: getPodTemplateSpecWithEnvVars(deploymentName),
},
@@ -361,39 +413,70 @@ func GetDeploymentConfigWithEnvVars(namespace string, deploymentConfigName strin
}
}
func GetDeploymentWithEnvVarSources(namespace string, deploymentName string) *v1beta1.Deployment {
func GetDeploymentWithEnvVarSources(namespace string, deploymentName string) *appsv1.Deployment {
replicaset := int32(1)
return &v1beta1.Deployment{
return &appsv1.Deployment{
ObjectMeta: getObjectMeta(namespace, deploymentName, true),
Spec: v1beta1.DeploymentSpec{
Spec: appsv1.DeploymentSpec{
Selector: &metav1.LabelSelector{
MatchLabels: map[string]string{"secondLabel": "temp"},
},
Replicas: &replicaset,
Strategy: v1beta1.DeploymentStrategy{
Type: v1beta1.RollingUpdateDeploymentStrategyType,
Strategy: appsv1.DeploymentStrategy{
Type: appsv1.RollingUpdateDeploymentStrategyType,
},
Template: getPodTemplateSpecWithEnvVarSources(deploymentName),
},
}
}
func GetDeploymentWithPodAnnotations(namespace string, deploymentName string, both bool) *appsv1.Deployment {
replicaset := int32(1)
deployment := &appsv1.Deployment{
ObjectMeta: getObjectMeta(namespace, deploymentName, false),
Spec: appsv1.DeploymentSpec{
Selector: &metav1.LabelSelector{
MatchLabels: map[string]string{"secondLabel": "temp"},
},
Replicas: &replicaset,
Strategy: appsv1.DeploymentStrategy{
Type: appsv1.RollingUpdateDeploymentStrategyType,
},
Template: getPodTemplateSpecWithEnvVarSources(deploymentName),
},
}
if !both {
deployment.ObjectMeta.Annotations = nil
}
deployment.Spec.Template.ObjectMeta.Annotations = getAnnotations(deploymentName, true)
return deployment
}
// GetDaemonSet provides daemonset for testing
func GetDaemonSet(namespace string, daemonsetName string) *v1beta1.DaemonSet {
return &v1beta1.DaemonSet{
func GetDaemonSet(namespace string, daemonsetName string) *appsv1.DaemonSet {
return &appsv1.DaemonSet{
ObjectMeta: getObjectMeta(namespace, daemonsetName, false),
Spec: v1beta1.DaemonSetSpec{
UpdateStrategy: v1beta1.DaemonSetUpdateStrategy{
Type: v1beta1.RollingUpdateDaemonSetStrategyType,
Spec: appsv1.DaemonSetSpec{
Selector: &metav1.LabelSelector{
MatchLabels: map[string]string{"secondLabel": "temp"},
},
UpdateStrategy: appsv1.DaemonSetUpdateStrategy{
Type: appsv1.RollingUpdateDaemonSetStrategyType,
},
Template: getPodTemplateSpecWithVolumes(daemonsetName),
},
}
}
func GetDaemonSetWithEnvVars(namespace string, daemonSetName string) *v1beta1.DaemonSet {
return &v1beta1.DaemonSet{
func GetDaemonSetWithEnvVars(namespace string, daemonSetName string) *appsv1.DaemonSet {
return &appsv1.DaemonSet{
ObjectMeta: getObjectMeta(namespace, daemonSetName, true),
Spec: v1beta1.DaemonSetSpec{
UpdateStrategy: v1beta1.DaemonSetUpdateStrategy{
Type: v1beta1.RollingUpdateDaemonSetStrategyType,
Spec: appsv1.DaemonSetSpec{
Selector: &metav1.LabelSelector{
MatchLabels: map[string]string{"secondLabel": "temp"},
},
UpdateStrategy: appsv1.DaemonSetUpdateStrategy{
Type: appsv1.RollingUpdateDaemonSetStrategyType,
},
Template: getPodTemplateSpecWithEnvVars(daemonSetName),
},
@@ -401,12 +484,15 @@ func GetDaemonSetWithEnvVars(namespace string, daemonSetName string) *v1beta1.Da
}
// GetStatefulSet provides statefulset for testing
func GetStatefulSet(namespace string, statefulsetName string) *v1_beta1.StatefulSet {
return &v1_beta1.StatefulSet{
func GetStatefulSet(namespace string, statefulsetName string) *appsv1.StatefulSet {
return &appsv1.StatefulSet{
ObjectMeta: getObjectMeta(namespace, statefulsetName, false),
Spec: v1_beta1.StatefulSetSpec{
UpdateStrategy: v1_beta1.StatefulSetUpdateStrategy{
Type: v1_beta1.RollingUpdateStatefulSetStrategyType,
Spec: appsv1.StatefulSetSpec{
Selector: &metav1.LabelSelector{
MatchLabels: map[string]string{"secondLabel": "temp"},
},
UpdateStrategy: appsv1.StatefulSetUpdateStrategy{
Type: appsv1.RollingUpdateStatefulSetStrategyType,
},
Template: getPodTemplateSpecWithVolumes(statefulsetName),
},
@@ -414,12 +500,15 @@ func GetStatefulSet(namespace string, statefulsetName string) *v1_beta1.Stateful
}
// GetStatefulSet provides statefulset for testing
func GetStatefulSetWithEnvVar(namespace string, statefulsetName string) *v1_beta1.StatefulSet {
return &v1_beta1.StatefulSet{
func GetStatefulSetWithEnvVar(namespace string, statefulsetName string) *appsv1.StatefulSet {
return &appsv1.StatefulSet{
ObjectMeta: getObjectMeta(namespace, statefulsetName, true),
Spec: v1_beta1.StatefulSetSpec{
UpdateStrategy: v1_beta1.StatefulSetUpdateStrategy{
Type: v1_beta1.RollingUpdateStatefulSetStrategyType,
Spec: appsv1.StatefulSetSpec{
Selector: &metav1.LabelSelector{
MatchLabels: map[string]string{"secondLabel": "temp"},
},
UpdateStrategy: appsv1.StatefulSetUpdateStrategy{
Type: appsv1.RollingUpdateStatefulSetStrategyType,
},
Template: getPodTemplateSpecWithEnvVars(statefulsetName),
},
@@ -475,11 +564,11 @@ func GetSecretWithUpdatedLabel(namespace string, secretName string, label string
}
// GetResourceSHA returns the SHA value of given environment variable
func GetResourceSHA(containers []v1.Container, envar string) string {
func GetResourceSHA(containers []v1.Container, envVar string) string {
for i := range containers {
envs := containers[i].Env
for j := range envs {
if envs[j].Name == envar {
if envs[j].Name == envVar {
return envs[j].Value
}
}
@@ -509,7 +598,7 @@ func ConvertResourceToSHA(resourceType string, namespace string, resourceName st
func CreateConfigMap(client kubernetes.Interface, namespace string, configmapName string, data string) (core_v1.ConfigMapInterface, error) {
logrus.Infof("Creating configmap")
configmapClient := client.CoreV1().ConfigMaps(namespace)
_, err := configmapClient.Create(GetConfigmap(namespace, configmapName, data))
_, err := configmapClient.Create(context.TODO(), GetConfigmap(namespace, configmapName, data), metav1.CreateOptions{})
time.Sleep(3 * time.Second)
return configmapClient, err
}
@@ -518,22 +607,22 @@ func CreateConfigMap(client kubernetes.Interface, namespace string, configmapNam
func CreateSecret(client kubernetes.Interface, namespace string, secretName string, data string) (core_v1.SecretInterface, error) {
logrus.Infof("Creating secret")
secretClient := client.CoreV1().Secrets(namespace)
_, err := secretClient.Create(GetSecret(namespace, secretName, data))
_, err := secretClient.Create(context.TODO(), GetSecret(namespace, secretName, data), metav1.CreateOptions{})
time.Sleep(3 * time.Second)
return secretClient, err
}
// CreateDeployment creates a deployment in given namespace and returns the Deployment
func CreateDeployment(client kubernetes.Interface, deploymentName string, namespace string, volumeMount bool) (*v1beta1.Deployment, error) {
func CreateDeployment(client kubernetes.Interface, deploymentName string, namespace string, volumeMount bool) (*appsv1.Deployment, error) {
logrus.Infof("Creating Deployment")
deploymentClient := client.ExtensionsV1beta1().Deployments(namespace)
var deploymentObj *v1beta1.Deployment
deploymentClient := client.AppsV1().Deployments(namespace)
var deploymentObj *appsv1.Deployment
if volumeMount {
deploymentObj = GetDeployment(namespace, deploymentName)
} else {
deploymentObj = GetDeploymentWithEnvVars(namespace, deploymentName)
}
deployment, err := deploymentClient.Create(deploymentObj)
deployment, err := deploymentClient.Create(context.TODO(), deploymentObj, metav1.CreateOptions{})
time.Sleep(3 * time.Second)
return deployment, err
}
@@ -548,62 +637,85 @@ func CreateDeploymentConfig(client appsclient.Interface, deploymentName string,
} else {
deploymentConfigObj = GetDeploymentConfigWithEnvVars(namespace, deploymentName)
}
deploymentConfig, err := deploymentConfigsClient.Create(deploymentConfigObj)
deploymentConfig, err := deploymentConfigsClient.Create(context.TODO(), deploymentConfigObj, metav1.CreateOptions{})
time.Sleep(5 * time.Second)
return deploymentConfig, err
}
// CreateDeploymentWithInitContainer creates a deployment in given namespace with init container and returns the Deployment
func CreateDeploymentWithInitContainer(client kubernetes.Interface, deploymentName string, namespace string, volumeMount bool) (*v1beta1.Deployment, error) {
func CreateDeploymentWithInitContainer(client kubernetes.Interface, deploymentName string, namespace string, volumeMount bool) (*appsv1.Deployment, error) {
logrus.Infof("Creating Deployment")
deploymentClient := client.ExtensionsV1beta1().Deployments(namespace)
var deploymentObj *v1beta1.Deployment
deploymentClient := client.AppsV1().Deployments(namespace)
var deploymentObj *appsv1.Deployment
if volumeMount {
deploymentObj = GetDeploymentWithInitContainer(namespace, deploymentName)
} else {
deploymentObj = GetDeploymentWithInitContainerAndEnv(namespace, deploymentName)
}
deployment, err := deploymentClient.Create(deploymentObj)
deployment, err := deploymentClient.Create(context.TODO(), deploymentObj, metav1.CreateOptions{})
time.Sleep(3 * time.Second)
return deployment, err
}
// CreateDeploymentWithEnvVarSource creates a deployment in given namespace and returns the Deployment
func CreateDeploymentWithEnvVarSource(client kubernetes.Interface, deploymentName string, namespace string) (*v1beta1.Deployment, error) {
func CreateDeploymentWithEnvVarSource(client kubernetes.Interface, deploymentName string, namespace string) (*appsv1.Deployment, error) {
logrus.Infof("Creating Deployment")
deploymentClient := client.ExtensionsV1beta1().Deployments(namespace)
deploymentClient := client.AppsV1().Deployments(namespace)
deploymentObj := GetDeploymentWithEnvVarSources(namespace, deploymentName)
deployment, err := deploymentClient.Create(deploymentObj)
deployment, err := deploymentClient.Create(context.TODO(), deploymentObj, metav1.CreateOptions{})
time.Sleep(3 * time.Second)
return deployment, err
}
// CreateDeploymentWithPodAnnotations creates a deployment in given namespace and returns the Deployment
func CreateDeploymentWithPodAnnotations(client kubernetes.Interface, deploymentName string, namespace string, both bool) (*appsv1.Deployment, error) {
logrus.Infof("Creating Deployment")
deploymentClient := client.AppsV1().Deployments(namespace)
deploymentObj := GetDeploymentWithPodAnnotations(namespace, deploymentName, both)
deployment, err := deploymentClient.Create(context.TODO(), deploymentObj, metav1.CreateOptions{})
time.Sleep(3 * time.Second)
return deployment, err
}
// CreateDeploymentWithEnvVarSourceAndAnnotations returns a deployment in given
// namespace with given annotations.
func CreateDeploymentWithEnvVarSourceAndAnnotations(client kubernetes.Interface, deploymentName string, namespace string, annotations map[string]string) (*appsv1.Deployment, error) {
logrus.Infof("Creating Deployment")
deploymentClient := client.AppsV1().Deployments(namespace)
deploymentObj := GetDeploymentWithEnvVarSources(namespace, deploymentName)
deploymentObj.Annotations = annotations
deployment, err := deploymentClient.Create(context.TODO(), deploymentObj, metav1.CreateOptions{})
time.Sleep(3 * time.Second)
return deployment, err
}
// CreateDaemonSet creates a deployment in given namespace and returns the DaemonSet
func CreateDaemonSet(client kubernetes.Interface, daemonsetName string, namespace string, volumeMount bool) (*v1beta1.DaemonSet, error) {
func CreateDaemonSet(client kubernetes.Interface, daemonsetName string, namespace string, volumeMount bool) (*appsv1.DaemonSet, error) {
logrus.Infof("Creating DaemonSet")
daemonsetClient := client.ExtensionsV1beta1().DaemonSets(namespace)
var daemonsetObj *v1beta1.DaemonSet
daemonsetClient := client.AppsV1().DaemonSets(namespace)
var daemonsetObj *appsv1.DaemonSet
if volumeMount {
daemonsetObj = GetDaemonSet(namespace, daemonsetName)
} else {
daemonsetObj = GetDaemonSetWithEnvVars(namespace, daemonsetName)
}
daemonset, err := daemonsetClient.Create(daemonsetObj)
daemonset, err := daemonsetClient.Create(context.TODO(), daemonsetObj, metav1.CreateOptions{})
time.Sleep(3 * time.Second)
return daemonset, err
}
// CreateStatefulSet creates a deployment in given namespace and returns the StatefulSet
func CreateStatefulSet(client kubernetes.Interface, statefulsetName string, namespace string, volumeMount bool) (*v1_beta1.StatefulSet, error) {
func CreateStatefulSet(client kubernetes.Interface, statefulsetName string, namespace string, volumeMount bool) (*appsv1.StatefulSet, error) {
logrus.Infof("Creating StatefulSet")
statefulsetClient := client.AppsV1beta1().StatefulSets(namespace)
var statefulsetObj *v1_beta1.StatefulSet
statefulsetClient := client.AppsV1().StatefulSets(namespace)
var statefulsetObj *appsv1.StatefulSet
if volumeMount {
statefulsetObj = GetStatefulSet(namespace, statefulsetName)
} else {
statefulsetObj = GetStatefulSetWithEnvVar(namespace, statefulsetName)
}
statefulset, err := statefulsetClient.Create(statefulsetObj)
statefulset, err := statefulsetClient.Create(context.TODO(), statefulsetObj, metav1.CreateOptions{})
time.Sleep(3 * time.Second)
return statefulset, err
}
@@ -611,7 +723,7 @@ func CreateStatefulSet(client kubernetes.Interface, statefulsetName string, name
// DeleteDeployment creates a deployment in given namespace and returns the error if any
func DeleteDeployment(client kubernetes.Interface, namespace string, deploymentName string) error {
logrus.Infof("Deleting Deployment")
deploymentError := client.ExtensionsV1beta1().Deployments(namespace).Delete(deploymentName, &metav1.DeleteOptions{})
deploymentError := client.AppsV1().Deployments(namespace).Delete(context.TODO(), deploymentName, metav1.DeleteOptions{})
time.Sleep(3 * time.Second)
return deploymentError
}
@@ -619,7 +731,7 @@ func DeleteDeployment(client kubernetes.Interface, namespace string, deploymentN
// DeleteDeploymentConfig deletes a deploymentConfig in given namespace and returns the error if any
func DeleteDeploymentConfig(client appsclient.Interface, namespace string, deploymentConfigName string) error {
logrus.Infof("Deleting DeploymentConfig")
deploymentConfigError := client.AppsV1().DeploymentConfigs(namespace).Delete(deploymentConfigName, &metav1.DeleteOptions{})
deploymentConfigError := client.AppsV1().DeploymentConfigs(namespace).Delete(context.TODO(), deploymentConfigName, metav1.DeleteOptions{})
time.Sleep(3 * time.Second)
return deploymentConfigError
}
@@ -627,7 +739,7 @@ func DeleteDeploymentConfig(client appsclient.Interface, namespace string, deplo
// DeleteDaemonSet creates a daemonset in given namespace and returns the error if any
func DeleteDaemonSet(client kubernetes.Interface, namespace string, daemonsetName string) error {
logrus.Infof("Deleting DaemonSet %s", daemonsetName)
daemonsetError := client.ExtensionsV1beta1().DaemonSets(namespace).Delete(daemonsetName, &metav1.DeleteOptions{})
daemonsetError := client.AppsV1().DaemonSets(namespace).Delete(context.TODO(), daemonsetName, metav1.DeleteOptions{})
time.Sleep(3 * time.Second)
return daemonsetError
}
@@ -635,7 +747,7 @@ func DeleteDaemonSet(client kubernetes.Interface, namespace string, daemonsetNam
// DeleteStatefulSet creates a statefulset in given namespace and returns the error if any
func DeleteStatefulSet(client kubernetes.Interface, namespace string, statefulsetName string) error {
logrus.Infof("Deleting StatefulSet %s", statefulsetName)
statefulsetError := client.AppsV1beta1().StatefulSets(namespace).Delete(statefulsetName, &metav1.DeleteOptions{})
statefulsetError := client.AppsV1().StatefulSets(namespace).Delete(context.TODO(), statefulsetName, metav1.DeleteOptions{})
time.Sleep(3 * time.Second)
return statefulsetError
}
@@ -649,7 +761,7 @@ func UpdateConfigMap(configmapClient core_v1.ConfigMapInterface, namespace strin
} else {
configmap = GetConfigmap(namespace, configmapName, data)
}
_, updateErr := configmapClient.Update(configmap)
_, updateErr := configmapClient.Update(context.TODO(), configmap, metav1.UpdateOptions{})
time.Sleep(3 * time.Second)
return updateErr
}
@@ -663,7 +775,7 @@ func UpdateSecret(secretClient core_v1.SecretInterface, namespace string, secret
} else {
secret = GetSecret(namespace, secretName, data)
}
_, updateErr := secretClient.Update(secret)
_, updateErr := secretClient.Update(context.TODO(), secret, metav1.UpdateOptions{})
time.Sleep(3 * time.Second)
return updateErr
}
@@ -671,7 +783,7 @@ func UpdateSecret(secretClient core_v1.SecretInterface, namespace string, secret
// DeleteConfigMap deletes a configmap in given namespace and returns the error if any
func DeleteConfigMap(client kubernetes.Interface, namespace string, configmapName string) error {
logrus.Infof("Deleting configmap %q.\n", configmapName)
err := client.CoreV1().ConfigMaps(namespace).Delete(configmapName, &metav1.DeleteOptions{})
err := client.CoreV1().ConfigMaps(namespace).Delete(context.TODO(), configmapName, metav1.DeleteOptions{})
time.Sleep(3 * time.Second)
return err
}
@@ -679,7 +791,7 @@ func DeleteConfigMap(client kubernetes.Interface, namespace string, configmapNam
// DeleteSecret deletes a secret in given namespace and returns the error if any
func DeleteSecret(client kubernetes.Interface, namespace string, secretName string) error {
logrus.Infof("Deleting secret %q.\n", secretName)
err := client.CoreV1().Secrets(namespace).Delete(secretName, &metav1.DeleteOptions{})
err := client.CoreV1().Secrets(namespace).Delete(context.TODO(), secretName, metav1.DeleteOptions{})
time.Sleep(3 * time.Second)
return err
}
@@ -701,6 +813,7 @@ func VerifyResourceUpdate(clients kube.Clients, config util.Config, envVarPostfi
containers := upgradeFuncs.ContainersFunc(i)
// match statefulsets with the correct annotation
annotationValue := util.ToObjectMeta(i).Annotations[config.Annotation]
searchAnnotationValue := util.ToObjectMeta(i).Annotations[options.AutoSearchAnnotation]
reloaderEnabledValue := util.ToObjectMeta(i).Annotations[options.ReloaderAutoAnnotation]
reloaderEnabled, err := strconv.ParseBool(reloaderEnabledValue)
matches := false
@@ -709,11 +822,16 @@ func VerifyResourceUpdate(clients kube.Clients, config util.Config, envVarPostfi
} else if annotationValue != "" {
values := strings.Split(annotationValue, ",")
for _, value := range values {
value = strings.Trim(value, " ")
if value == config.ResourceName {
matches = true
break
}
}
} else if searchAnnotationValue == "true" {
if config.ResourceAnnotations[options.SearchMatchAnnotation] == "true" {
matches = true
}
}
if matches {

View File

@@ -8,31 +8,34 @@ import (
//Config contains rolling upgrade configuration parameters
type Config struct {
Namespace string
ResourceName string
Annotation string
SHAValue string
Type string
Namespace string
ResourceName string
ResourceAnnotations map[string]string
Annotation string
SHAValue string
Type string
}
// GetConfigmapConfig provides utility config for configmap
func GetConfigmapConfig(configmap *v1.ConfigMap) Config {
return Config{
Namespace: configmap.Namespace,
ResourceName: configmap.Name,
Annotation: options.ConfigmapUpdateOnChangeAnnotation,
SHAValue: GetSHAfromConfigmap(configmap.Data),
Type: constants.ConfigmapEnvVarPostfix,
Namespace: configmap.Namespace,
ResourceName: configmap.Name,
ResourceAnnotations: configmap.Annotations,
Annotation: options.ConfigmapUpdateOnChangeAnnotation,
SHAValue: GetSHAfromConfigmap(configmap),
Type: constants.ConfigmapEnvVarPostfix,
}
}
// GetSecretConfig provides utility config for secret
func GetSecretConfig(secret *v1.Secret) Config {
return Config{
Namespace: secret.Namespace,
ResourceName: secret.Name,
Annotation: options.SecretUpdateOnChangeAnnotation,
SHAValue: GetSHAfromSecret(secret.Data),
Type: constants.SecretEnvVarPostfix,
Namespace: secret.Namespace,
ResourceName: secret.Name,
ResourceAnnotations: secret.Annotations,
Annotation: options.SecretUpdateOnChangeAnnotation,
SHAValue: GetSHAfromSecret(secret.Data),
Type: constants.SecretEnvVarPostfix,
}
}

View File

@@ -2,10 +2,12 @@ package util
import (
"bytes"
"encoding/base64"
"sort"
"strings"
"github.com/stakater/Reloader/internal/pkg/crypto"
v1 "k8s.io/api/core/v1"
)
// ConvertToEnvVarName converts the given text into a usable env var
@@ -29,11 +31,14 @@ func ConvertToEnvVarName(text string) string {
return buffer.String()
}
func GetSHAfromConfigmap(data map[string]string) string {
func GetSHAfromConfigmap(configmap *v1.ConfigMap) string {
values := []string{}
for k, v := range data {
for k, v := range configmap.Data {
values = append(values, k+"="+v)
}
for k, v := range configmap.BinaryData {
values = append(values, k+"="+base64.StdEncoding.EncodeToString(v))
}
sort.Strings(values)
return crypto.GenerateSHA(strings.Join(values, ";"))
}

View File

@@ -2,6 +2,8 @@ package util
import (
"testing"
v1 "k8s.io/api/core/v1"
)
func TestConvertToEnvVarName(t *testing.T) {
@@ -11,3 +13,35 @@ func TestConvertToEnvVarName(t *testing.T) {
t.Errorf("Failed to convert data into environment variable")
}
}
func TestGetHashFromConfigMap(t *testing.T) {
data := map[*v1.ConfigMap]string{
{
Data: map[string]string{"test": "test"},
}: "Only Data",
{
Data: map[string]string{"test": "test"},
BinaryData: map[string][]byte{"bintest": []byte("test")},
}: "Both Data and BinaryData",
{
BinaryData: map[string][]byte{"bintest": []byte("test")},
}: "Only BinaryData",
}
converted := map[string]string{}
for cm, cmName := range data {
converted[cmName] = GetSHAfromConfigmap(cm)
}
// Test that the has for each configmap is really unique
for cmName, cmHash := range converted {
count := 0
for _, cmHash2 := range converted {
if cmHash == cmHash2 {
count++
}
}
if count > 1 {
t.Errorf("Found duplicate hashes for %v", cmName)
}
}
}

14
okteto.yml Normal file
View File

@@ -0,0 +1,14 @@
name: reloader-reloader
image: okteto/golang:1
command: bash
securityContext:
capabilities:
add:
- SYS_PTRACE
volumes:
- /go/pkg/
- /root/.cache/go-build/
sync:
- .:/app
forward:
- 2345:2345

View File

@@ -1,10 +1,12 @@
package kube
import (
"context"
"os"
"k8s.io/client-go/tools/clientcmd"
argorollout "github.com/argoproj/argo-rollouts/pkg/client/clientset/versioned"
appsclient "github.com/openshift/client-go/apps/clientset/versioned"
"github.com/sirupsen/logrus"
"k8s.io/client-go/kubernetes"
@@ -15,6 +17,7 @@ import (
type Clients struct {
KubernetesClient kubernetes.Interface
OpenshiftAppsClient appsclient.Interface
ArgoRolloutClient argorollout.Interface
}
var (
@@ -38,18 +41,34 @@ func GetClients() Clients {
}
}
var rolloutClient *argorollout.Clientset
rolloutClient, err = GetArgoRolloutClient()
if err != nil {
logrus.Warnf("Unable to create ArgoRollout client error = %v", err)
}
return Clients{
KubernetesClient: client,
OpenshiftAppsClient: appsClient,
ArgoRolloutClient: rolloutClient,
}
}
func GetArgoRolloutClient() (*argorollout.Clientset, error) {
config, err := getConfig()
if err != nil {
return nil, err
}
return argorollout.NewForConfig(config)
}
func isOpenshift() bool {
client, err := GetKubernetesClient()
if err != nil {
logrus.Fatalf("Unable to create Kubernetes client error = %v", err)
}
_, err = client.RESTClient().Get().AbsPath("/apis/project.openshift.io").Do().Raw()
_, err = client.RESTClient().Get().AbsPath("/apis/project.openshift.io").Do(context.TODO()).Raw()
if err == nil {
logrus.Info("Environment: Openshift")
return true
@@ -78,7 +97,6 @@ func GetKubernetesClient() (*kubernetes.Clientset, error) {
func getConfig() (*rest.Config, error) {
var config *rest.Config
var err error
kubeconfigPath := os.Getenv("KUBECONFIG")
if kubeconfigPath == "" {
kubeconfigPath = os.Getenv("HOME") + "/.kube/config"
@@ -95,9 +113,6 @@ func getConfig() (*rest.Config, error) {
return nil, err
}
}
if err != nil {
return nil, err
}
return config, nil
}