Compare commits

...

182 Commits

Author SHA1 Message Date
stakater-user
032c391cb8 [skip-ci] Update artifacts 2023-02-26 12:43:03 +00:00
Faizan Ahmad
9173f446ab Merge pull request #396 from stakater/update-push-workflow
Remove outdated manifests
2023-02-26 13:23:25 +01:00
stakater-user
f795fa2aec [skip-ci] Update artifacts 2023-02-26 12:07:53 +00:00
Faizan Ahmad
34c1f389bc Merge pull request #393 from stakater/dependabot/go_modules/golang.org/x/net-0.7.0
Bump golang.org/x/net from 0.5.0 to 0.7.0
2023-02-26 12:48:16 +01:00
stakater-user
fdc8a61fc6 [skip-ci] Update artifacts 2023-02-26 11:35:45 +00:00
Faizan Ahmad
c7f507a4b9 Merge pull request #386 from d3adb5/feat/set-rootfs-ro
feat: set read-only root filesystem at container level
2023-02-26 12:16:42 +01:00
stakater-user
70aef8a871 [skip-ci] Update artifacts 2023-02-26 10:59:11 +00:00
Faizan Ahmad
54d0681340 Merge pull request #385 from d3adb5/chore/stop-listening-on-9091
chore: listen on only 9090 for /metrics and /live
2023-02-26 11:39:45 +01:00
MahnoorAsghar
b279aabae3 Remove changes from push workflow 2023-02-24 13:50:48 +05:00
MahnoorAsghar
63022fe4d0 Remove optional manifests which are outdated 2023-02-24 13:48:38 +05:00
MahnoorAsghar
8c4523db69 update push workflow to render manifests correctly 2023-02-22 16:45:43 +05:00
dependabot[bot]
646c64a326 Bump golang.org/x/net from 0.5.0 to 0.7.0
Bumps [golang.org/x/net](https://github.com/golang/net) from 0.5.0 to 0.7.0.
- [Release notes](https://github.com/golang/net/releases)
- [Commits](https://github.com/golang/net/compare/v0.5.0...v0.7.0)

---
updated-dependencies:
- dependency-name: golang.org/x/net
  dependency-type: indirect
...

Signed-off-by: dependabot[bot] <support@github.com>
2023-02-18 04:14:25 +00:00
d3adb5
5a9ccbf01f fix: properly capitalize 'filesystem' in values
Use the proper capitalization in the reference to the value
reloader.readOnlyRootFileSystem: FileSystem instead of Filesystem.
2023-02-08 14:15:28 -08:00
stakater-user
0f7403b7bf [skip-ci] Update artifacts 2023-02-07 17:35:32 +00:00
Faizan Ahmad
2bc83a26ff Merge pull request #383 from jkroepke/service-monitor
[helm] Add support for relabelings and metricRelabelings for serviceMonitor/podMonitor
2023-02-07 18:12:31 +01:00
Jan-Otto Kröpke
09babe46d9 [helm] Add support for relabelings and metricRelabelings for serviceMonitor/podMonitor 2023-02-07 11:20:29 +01:00
d3adb5
451e4f636b feat: set read-only root filesystem at container level
Change the securityContext field of the Reloader container if
reloader.readOnlyFilesystem is set to true. The change takes effect even
if not container securityContext is defined.

Closes #339.
2023-02-07 00:16:16 -08:00
d3adb5
2f8999e3cb chore: listen on only 9090 for /metrics and /live
Previously, 9091 and 9090 both led to the same web server, meaning both
/metrics and /live were reachable and fully functional through both.
This commit changes that so that only port 9090 is used for both.

Closes #381.
2023-02-07 00:15:17 -08:00
stakater-user
9463cd5fc2 [skip-ci] Update artifacts 2023-02-06 22:59:31 +00:00
Faizan Ahmad
5e2f4a0826 Merge pull request #391 from stakater/remove-depreciated-seed
Remove depreciated Seed
2023-02-06 23:37:53 +01:00
faizanahmad055
0083edb3ca Remove depreciated Seed
Signed-off-by: faizanahmad055 <faizan.ahmad55@outlook.com>
2023-02-06 23:13:07 +01:00
Faizan Ahmad
d020c666b7 Merge pull request #390 from stakater/update-golangci-lint
Update golangci-lint version
2023-02-06 22:25:13 +01:00
faizanahmad055
3d29651267 Update golangci-lint version
Signed-off-by: faizanahmad055 <faizan.ahmad55@outlook.com>
2023-02-06 22:22:36 +01:00
Faizan Ahmad
795aae0c78 Merge pull request #389 from stakater/update-pipeline
Update pipeline
2023-02-06 22:14:50 +01:00
faizanahmad055
364d66b90f Remove extra line
Signed-off-by: faizanahmad055 <faizan.ahmad55@outlook.com>
2023-02-06 22:13:58 +01:00
faizanahmad055
2d8f0336dc Fix go version
Signed-off-by: faizanahmad055 <faizan.ahmad55@outlook.com>
2023-02-06 22:13:16 +01:00
faizanahmad055
5716c1b35e Update pipeline
Signed-off-by: faizanahmad055 <faizan.ahmad55@outlook.com>
2023-02-06 22:08:31 +01:00
Faizan Ahmad
fecd21deca Merge pull request #388 from stakater/update-github-actions
[skip-ci] Update github actions
2023-02-06 22:04:31 +01:00
faizanahmad055
5cd8b3d4ca [skip-ci] Update github actions
Signed-off-by: faizanahmad055 <faizan.ahmad55@outlook.com>
2023-02-06 22:02:22 +01:00
Faizan Ahmad
991613bd13 Merge pull request #384 from stakater/update-go-1.20
Update golang version to 1.20
2023-02-05 17:17:43 +01:00
faizanahmad055
ef93197da1 Remove unused dependencies
Signed-off-by: faizanahmad055 <faizan.ahmad55@outlook.com>
2023-02-05 16:10:34 +01:00
faizanahmad055
35754ccd73 Update dependencies
Signed-off-by: faizanahmad055 <faizan.ahmad55@outlook.com>
2023-02-05 16:01:03 +01:00
faizanahmad055
d6d531e08e Update golang version to 1.20
Signed-off-by: faizanahmad055 <faizan.ahmad55@outlook.com>
2023-02-05 15:11:23 +01:00
stakater-user
adff75f040 [skip-ci] Update artifacts 2023-01-30 14:39:35 +00:00
Faizan Ahmad
3e364186c9 Merge pull request #380 from Allex1/master
[helm] - Add template option to pod/servicemonitor ns/labels
2023-01-30 15:33:01 +01:00
birca
edb482d4ba remove version 2023-01-30 16:22:35 +02:00
birca
1f2d75898b [helm] - Add template option to pod/servicemonitor ns/labels 2023-01-27 13:09:47 +02:00
stakater-user
7f331907d3 [skip-ci] Update artifacts 2023-01-19 16:48:47 +00:00
Faizan Ahmad
29aa52a1c7 Merge pull request #377 from stakater/update-golang-dependencies
Update golang version and dependencies
2023-01-19 17:41:34 +01:00
faizanahmad055
ada8dbb5f3 Update golang version and dependencies
Signed-off-by: faizanahmad055 <faizan.ahmad55@outlook.com>
2023-01-19 17:02:03 +01:00
Karl-Johan Grahn
cfe1754c44 Add dependabot (#371)
* Add dependabot

* update
2023-01-18 11:43:45 +01:00
stakater-user
2cfce5144b [skip-ci] Update artifacts 2023-01-12 20:29:14 +00:00
Faizan Ahmad
2fe863a054 Merge pull request #369 from stakater/create-release-1.0.0
Create release 1.0.0
2023-01-12 21:22:29 +01:00
faizanahmad055
3e01091d01 Create release 1.0.0
Signed-off-by: faizanahmad055 <faizan.ahmad55@outlook.com>
2023-01-12 20:47:09 +01:00
stakater-user
7f85a8e53b [skip-ci] Update artifacts 2023-01-05 22:05:37 +00:00
Faizan Ahmad
c679157e24 Merge pull request #356 from avihuly/feature/namespace-selector
Namespace selector
2023-01-05 22:45:22 +01:00
avihuly
9e7b70964e Added wildcard label vaule option 2023-01-05 14:28:08 +02:00
avihuly
8ebbb476b2 Added logs on startup & get ns opration 2023-01-05 12:54:08 +02:00
avihuly
9263b812eb Added namespace get verb to reloder clusterrole 2023-01-05 12:51:41 +02:00
stakater-user
f70dd52b2d [skip-ci] Update artifacts 2022-12-29 17:33:11 +00:00
Faizan Ahmad
e0a8f1ad04 Merge pull request #354 from joaoubaldo/support-imagepullsecrets
[Helm chart] Add support for ImagePullSecrets
2022-12-29 18:26:28 +01:00
stakater-user
45dac417cb [skip-ci] Update artifacts 2022-12-29 17:02:47 +00:00
Faizan Ahmad
1514c5bcd2 Merge pull request #366 from zv0n/master
Add option to use existing secret to Helm chart
2022-12-29 17:44:57 +01:00
Avi Huli
e7cfafd6d6 Handle empty selector & ns label key not exists 2022-12-27 18:49:23 +02:00
Avi Huli
15d7263c95 Handle empty selector & ns label key not exists 2022-12-27 18:44:59 +02:00
Avi Huli
de21a400ab Readme namespace selector 2022-12-22 14:28:15 +02:00
Avi Huly
e702610dc6 Readme 2022-12-19 14:35:28 +02:00
Avi Huly
481eeeffc4 Merge pull request #2 from stakater/master
Updates from master
2022-12-18 15:23:24 +02:00
Tomáš Zvoník
801e1dabed Add option to use existing secret to Helm chart 2022-12-16 13:55:50 +01:00
stakater-user
5c44c1e8f5 [skip-ci] Update artifacts 2022-12-07 15:46:37 +00:00
Tehreem
0ef6dcb510 Merge pull request #365 from stakater/make-namespace-configurable
Enable ability to override Release.Namespace for target namespace
2022-12-07 20:40:04 +05:00
Callum MacDonald
0ef5e75673 fix indentation 2022-12-07 09:18:15 +01:00
Callum MacDonald
e5f85ae37b Enable ability to override Release.Namespace for target namespace 2022-12-07 09:11:46 +01:00
stakater-user
5d0e9ca70b [skip-ci] Update artifacts 2022-12-01 06:34:43 +00:00
Faizan Ahmad
297baa08d5 Update helm version in pipeline (#364) 2022-12-01 07:15:22 +01:00
Faizan Ahmad
dd1433a7a9 Merge pull request #363 from stakater/update-log-level
Update log level
2022-12-01 00:08:09 +01:00
faizanahmad055
9875c416df Update log level to error and debug
Signed-off-by: faizanahmad055 <faizan.ahmad55@outlook.com>
2022-11-30 22:43:06 +01:00
faizanahmad055
b414e3b350 Update log level
Signed-off-by: faizanahmad055 <faizan.ahmad55@outlook.com>
2022-11-30 22:37:50 +01:00
faizanahmad055
e417e8bc12 Add error log
Signed-off-by: faizanahmad055 <faizan.ahmad55@outlook.com>
2022-11-30 22:35:39 +01:00
faizanahmad055
aafe3365eb Update log level
Signed-off-by: faizanahmad055 <faizan.ahmad55@outlook.com>
2022-11-30 22:34:02 +01:00
Faizan Ahmad
694baf715c Merge pull request #361 from AgustinRamiroDiaz/update-okteto-yaml
Upgrade okteto yaml manifest to v2
2022-11-30 22:31:15 +01:00
Faizan Ahmad
48b188d7b4 Merge pull request #362 from stakater/bump-go-version
Update go version and dependencies
2022-11-30 22:12:34 +01:00
faizanahmad055
d1cb53b65a Update go version and dependencies
Signed-off-by: faizanahmad055 <faizan.ahmad55@outlook.com>
2022-11-30 21:31:59 +01:00
Agustin Ramiro Diaz
cefd633176 upgrade okteto yaml to v2
Signed-off-by: Agustin Ramiro Diaz <agustin.ramiro.diaz@gmail.com>
2022-11-25 09:36:27 -03:00
Avi Huly
08e6f81a15 Lint fix 2022-11-15 16:23:24 +02:00
Avi Huly
edbad45637 namespaceSelector chart value 2022-11-15 11:22:23 +02:00
Avi Huly
363fbd3b77 Test for namespace selector 2022-11-15 11:11:11 +02:00
Avi Huly
82ee3ef3d1 Added namespace-selector to helm chart deployment 2022-11-13 16:38:54 +02:00
Avi Huly
21502e2bb4 Added namespace selector functionality
Changes:
	modified:   internal/pkg/cmd/reloader.go
	modified:   internal/pkg/controller/controller.go
	modified:   internal/pkg/util/util.go
2022-11-12 23:00:56 +02:00
Joao Ubaldo
9d3b70d4d2 Add support for ImagePullSecrets 2022-11-03 10:02:18 +00:00
stakater-user
5662919f72 [skip-ci] Update artifacts 2022-10-10 10:56:58 +00:00
Faizan Ahmad
445d0f870e Merge pull request #345 from avestuk/podDisruptionBudget
Add pod disruption budget
2022-10-10 12:50:26 +02:00
stakater-user
81e74fe830 [skip-ci] Update artifacts 2022-10-10 09:03:48 +00:00
Faizan Ahmad
50791ad51a Merge pull request #341 from avestuk/leadership-election
Leadership election
2022-10-10 10:57:25 +02:00
Alex Vest
6a65657e27 Add pod disruption budget 2022-10-06 11:51:28 +01:00
Alex Vest
1c7190884a Merge branch 'master' into leadership-election 2022-10-06 11:17:35 +01:00
stakater-user
e0fcc3bfa6 [skip-ci] Update artifacts 2022-10-04 18:37:34 +00:00
Faizan Ahmad
504b5a8eb0 Merge pull request #343 from junnplus/record-event
support record event
2022-10-04 20:30:45 +02:00
Alex Vest
488eaa9bef Run leadership election as non blocking
Liveness probe endpoint will always be blocking on the main thread
2022-10-04 16:41:34 +01:00
Alex Vest
676c3703aa Set replicas = 1 by default, override if HA is enabled 2022-10-04 16:41:34 +01:00
Alex Vest
deec4df125 Fix pod antiaffinity 2022-10-04 16:41:34 +01:00
Alex Vest
eedc8e81d0 Set enableHA and reloadOnCreate to false 2022-10-04 16:41:34 +01:00
Alex Vest
28456ffafe Add PodAntiAffinity if HA is enabled 2022-10-04 16:41:34 +01:00
Alex Vest
a7c3ae37aa Expand documentation about reloadOnCreate 2022-10-04 16:41:34 +01:00
Alex Vest
d043bcf7be Fix roles 2022-10-04 16:41:34 +01:00
Alex Vest
72a1c59cac Err check response writer 2022-10-04 16:41:34 +01:00
Alex Vest
6299b1d8e9 Update helm chart with new liveness probe 2022-10-04 16:41:34 +01:00
Alex Vest
11ae057b0a Add tests for leadership election
Pull liveness into leadership to ease testing, logically the liveness
probe is directly affected by leadership so it makes sense here.

Moved some of the components of the controller tests into the testutil
package for reuse in my own tests.
2022-10-04 16:41:34 +01:00
Alex Vest
d34c99baf4 Add liveness probe 2022-10-04 16:41:34 +01:00
Alex Vest
b7e83b74d8 Move leadership to its own package 2022-10-04 16:41:34 +01:00
Alex Vest
919f75bb62 Shutdown on leader election loss 2022-10-04 16:41:34 +01:00
Alex Vest
16079bd1d4 Update helm chart for HA in global mode 2022-10-04 16:41:34 +01:00
Alex Vest
401d4227d1 Move consts to const pkg
Should move leadership bits to own pkg?
2022-10-04 16:41:34 +01:00
Alex Vest
7f9f32ca58 Add leadership election 2022-10-04 16:41:34 +01:00
Ye Sijun
6937b8120b support record event
Signed-off-by: Ye Sijun <junnplus@gmail.com>
2022-09-30 21:30:16 +09:00
stakater-user
be80ce35b2 [skip-ci] Update artifacts 2022-09-29 14:30:10 +00:00
Faizan Ahmad
99349ce361 Merge pull request #329 from kenske/patch-1
Remove server side call from podmonitoring template
2022-09-29 16:09:18 +02:00
stakater-user
3a1b808169 [skip-ci] Update artifacts 2022-09-27 22:01:25 +00:00
Faizan Ahmad
798079eb53 Merge pull request #327 from AnuragEkkati/master
Reloader Reporting and Alerting #320
2022-09-27 23:38:34 +02:00
Anurag Ekkati
98749f2c9b Fixing Merge failures 2022-09-26 20:24:00 -07:00
Anurag Ekkati
3a675696cd Merge branch 'master' into master 2022-09-26 20:12:20 -07:00
Anurag Ekkati
632eeaa527 Adding Alerting Docs 2022-09-26 19:33:07 -07:00
stakater-user
9c85ce404b [skip-ci] Update artifacts 2022-09-20 06:40:06 +00:00
Karl-Johan Grahn
f8f8afca88 Merge pull request #338 from krouser/master
extra space removed
2022-09-20 08:33:30 +02:00
Stanlee
11dc048709 extra space removed
removed extra space in the yaml
2022-09-14 18:24:33 +02:00
Kenneth
328442c121 Remove server side call from podmonitoring template
The `.Capabilities.APIVersions.Has` is server side, which means it breaks in workflows where `helm template` is used (ArgoCD for example)
2022-08-12 11:27:28 -05:00
stakater-user
4304880b6b [skip-ci] Update artifacts 2022-07-15 06:10:21 +00:00
Tanveer Alam
aef99a7bb8 Merge pull request #328 from stakater/update-dep-version
Update go-reloader version to v3.8.0
2022-07-15 11:32:54 +05:30
hussnain612
e387b21ed6 Updated dependencies 2022-07-14 17:51:40 +05:00
hussnain612
b6de33d501 Updated replaced dependencies 2022-07-14 17:36:33 +05:00
MahnoorAsghar
48160e0414 Update go-reloader version 2022-07-14 15:22:05 +03:00
Anurag Ekkati
d998c1a19e Reloader Reporting and Alerting #320
Alert a webhook supporting service ( e.g, slack ) whenever reloader reloads
workload on change
2022-07-06 16:32:18 -07:00
stakater-user
0b7ca82218 [skip-ci] Update artifacts 2022-06-20 07:26:00 +00:00
Faizan Ahmad
88fe843285 Merge pull request #311 from stakater/update-golang-version
Update golang version
2022-06-20 09:06:43 +02:00
faizanahmad055
cf31fed9d3 Merge branch 'master' of github.com:stakater/Reloader into update-golang-version 2022-06-18 23:56:53 +02:00
stakater-user
ba7f7537fb [skip-ci] Update artifacts 2022-06-18 21:55:13 +00:00
Faizan Ahmad
499ecf9da5 Merge pull request #314 from maxwell-jiang/reloader-update2
the specific configmap/secret name can be regexp-ly selected
2022-06-18 23:33:52 +02:00
stakater-user
6188811b94 [skip-ci] Update artifacts 2022-06-18 21:29:26 +00:00
faizanahmad055
0f2395309f Fix readme
Signed-off-by: faizanahmad055 <faizan.ahmad55@outlook.com>
2022-06-18 23:17:38 +02:00
faizanahmad055
41d4fa56ca Merge branch 'master' of github.com:stakater/Reloader into update-golang-version 2022-06-18 23:13:35 +02:00
Faizan Ahmad
bf21677357 Merge pull request #316 from scottd018/fix-cve-crypto-golang-version
fix: Fixes #315, upgraded version of crypto package to non-vulnerable…
2022-06-18 22:54:47 +02:00
stakater-user
c58d0965d5 [skip-ci] Update artifacts 2022-06-09 16:35:12 +00:00
Faizan Ahmad
ec1f7a68de Merge pull request #318 from daconstenla/patch-1
Set the namespace for the service
2022-06-09 18:11:53 +02:00
David Constenla
68a353d097 Set the namespace for the service
like for the other resources
2022-06-09 08:11:06 +02:00
Dustin Scott
c82886c921 fix: Fixes #315, upgraded version of crypto package to non-vulnerable version
As per CVE at https://nvd.nist.gov/vuln/detail/CVE-2020-29652, upgraded to the acceptable version.
It should be noted that this PR was originally in place to cover an upgrade to go version 1.17.9, however
a PR was discovered at https://github.com/stakater/Reloader/pull/311 which can be merged
independently of this one.  Once this commit is merged as well as https://github.com/stakater/Reloader/pull/311,
the fix for #315 is complete.

Signed-off-by: Dustin Scott <sdustin@vmware.com>
2022-06-01 11:52:16 -05:00
Maxwell Jiang
4b9844f5c3 the specific configmap/secret name can be regexp-ly selected 2022-05-31 01:34:56 -05:00
stakater-user
1e4016587c [skip-ci] Update artifacts 2022-05-27 10:08:24 +00:00
faizanahmad055
198e5631d4 Merge branch 'master' of github.com:stakater/Reloader into update-golang-version 2022-05-27 11:46:17 +02:00
Faizan Ahmad
f21ad29188 Merge pull request #312 from stakater/remove-spell-checker
Remove spell-checker
2022-05-27 11:45:22 +02:00
faizanahmad055
cd7ec500a7 Remove deadlink
Signed-off-by: faizanahmad055 <faizan.ahmad55@outlook.com>
2022-05-27 11:36:07 +02:00
faizanahmad055
030e51351a Remove spell-checker
Signed-off-by: faizanahmad055 <faizan.ahmad55@outlook.com>
2022-05-27 11:18:24 +02:00
faizanahmad055
8703c150e0 Remove deadlink
Signed-off-by: faizanahmad055 <faizan.ahmad55@outlook.com>
2022-05-27 10:37:43 +02:00
faizanahmad055
1d7192180f Merge branch 'master' of github.com:stakater/Reloader into update-golang-version 2022-05-26 18:24:38 +02:00
stakater-user
9d1733200a [skip-ci] Update artifacts 2022-05-26 15:33:56 +00:00
Faizan Ahmad
841db3c829 Merge pull request #310 from stakater/remove-docker-lint
Remove docker linting from Pull request
2022-05-26 17:11:18 +02:00
stakater-user
1155c981d6 [skip-ci] Update artifacts 2022-05-26 14:18:53 +00:00
faizanahmad055
d29c99ffef Remove docker linting from Pull request
Signed-off-by: faizanahmad055 <faizan.ahmad55@outlook.com>
2022-05-26 14:57:51 +02:00
faizanahmad055
3b572568be Update golang version to 1.18.2
Signed-off-by: faizanahmad055 <faizan.ahmad55@outlook.com>
2022-05-26 14:50:23 +02:00
Abdul Haseeb
3e5b1d09b8 Merge pull request #308 from stakater/update-golang-cli-version
Update golang-cli package versions
2022-05-26 17:47:59 +05:00
faizanahmad055
61b7a88462 Update golang-cli package versions
Signed-off-by: faizanahmad055 <faizan.ahmad55@outlook.com>
2022-05-26 14:46:32 +02:00
Faizan Ahmad
d9d986f1f2 Merge pull request #297 from karl-johan-grahn/doc-qa
feat(qa): add qa checks for dockerfile and documentation
2022-04-04 21:06:56 +02:00
stakater-user
42a25bfe64 [skip-ci] Update artifacts 2022-03-25 12:04:59 +00:00
Faizan Ahmad
df667b9c15 Merge pull request #296 from stefan-work/master
feat(kubernetes/chart): explicitly mount service-account-token in dep…
2022-03-25 12:57:47 +01:00
Stefan Klein
f1c71731d9 feat(kubernetes/chart): explicitly mount service-account-token in deployment 2022-03-25 12:09:10 +01:00
Karl-Johan Grahn
e2b14ab7b5 fix(qa): update dockerfile path 2022-03-21 21:32:49 +01:00
Karl-Johan Grahn
4592bd4331 feat(qa): add qa checks for dockerfile and documentation 2022-03-21 21:03:39 +01:00
stakater-user
e380fbaf03 [skip-ci] Update artifacts 2022-03-02 20:49:44 +00:00
Faizan Ahmad
2bce1352a3 Merge pull request #292 from alin-taranu/upgrade-go-version-to-1.17.7
Update go version to fix critical vulnerabilities
2022-03-02 21:42:39 +01:00
Alin Taranu
5b3b617f06 Update go version to fix critical vulnerabilites
The following CVEs should be fix by upgrading go version:
* CVE-2022-23806
* CVE-2022-23772
* CVE-2021-44717
* CVE-2022-23773
* CVE-2021-44716
* CVE-2021-41771
* CVE-2021-41772
2022-03-02 13:41:07 +01:00
stakater-user
5040a4236a [skip-ci] Update artifacts 2022-02-27 18:58:50 +00:00
Faizan Ahmad
f6cbc005fc Merge pull request #281 from ruinshe/add-container-sc-support
Add container security context support for Helm chart
2022-02-27 19:50:15 +01:00
stakater-user
91774c941f [skip-ci] Update artifacts 2022-02-27 18:40:03 +00:00
Faizan Ahmad
db0e127563 Merge pull request #282 from AyoyAB/feature/enable-internal-build
Enable internal builds without direct Internet access
2022-02-27 19:31:29 +01:00
John Allberg
3671d33447 Enable setting GOPROXY and GOPRIVATE during build. 2022-02-27 10:16:42 +01:00
John Allberg
e85176b5a7 Make build command more readable. 2022-02-27 10:16:42 +01:00
John Allberg
7941de60ac Enable setting builder and base image during build. 2022-02-27 10:16:38 +01:00
stakater-user
565612e421 [skip-ci] Update artifacts 2022-02-26 22:43:34 +00:00
Faizan Ahmad
31e247e3ae Merge pull request #291 from stakater/fix-reloading-on-create
Make reload on create optional
2022-02-26 23:22:54 +01:00
faizanahmad055
1e79b86f72 Update readme
Signed-off-by: faizanahmad055 <faizan.ahmad55@outlook.com>
2022-02-20 00:00:04 +01:00
faizanahmad055
b5b684c67b Fix reload on create flag
Signed-off-by: faizanahmad055 <faizan.ahmad55@outlook.com>
2022-02-19 23:23:33 +01:00
faizanahmad055
bbc6bd2dea Update go version
Signed-off-by: faizanahmad055 <faizan.ahmad55@outlook.com>
2022-02-14 10:33:55 +01:00
faizanahmad055
61ce150d7c Make reload on create optional
Signed-off-by: faizanahmad055 <faizan.ahmad55@outlook.com>
2022-02-13 19:53:49 +01:00
stakater-user
56e83ecde9 [skip-ci] Update artifacts 2022-02-13 12:16:49 +00:00
Faizan Ahmad
c33876508c Merge pull request #290 from ctschubel/master
add replicas config to helm-chart
2022-02-13 13:07:37 +01:00
ctschubel
55ea2e430e add replicas to values.yaml.tmpl 2022-02-10 19:33:29 +01:00
ctschubel
4beefc3f43 fix replica config value name in helm-chart 2022-02-10 16:05:03 +01:00
ctschubel
3b1d30141c add replicas config to helm-chart 2022-02-10 15:21:16 +01:00
Ruins He
fa75df8e96 feat(kubernetes/chart): add container security context configuration into values.yaml.tpl
Signed-off-by: Ruins He <lyhypacm@gmail.com>
2022-01-03 18:22:43 +08:00
Ruins He
21087aaddc feat(kubernetes/chart): add container security context support for Helm chart
Signed-off-by: Ruins He <lyhypacm@gmail.com>
2022-01-03 18:00:15 +08:00
stakater-user
766bc24241 [skip-ci] Update artifacts 2022-01-02 13:43:49 +00:00
Faizan Ahmad
8e3aad3b0e Merge pull request #280 from jsoref/issue-278
Add .Release.Namespace
2022-01-02 14:35:19 +01:00
Josh Soref
ce2866bf6a Add .Release.Namespace
Signed-off-by: Josh Soref <jsoref@users.noreply.github.com>
2022-01-02 01:46:17 -05:00
55 changed files with 1890 additions and 2177 deletions

6
.github/dependabot.yml vendored Normal file
View File

@@ -0,0 +1,6 @@
version: 2
updates:
- package-ecosystem: "gomod"
directory: "/"
schedule:
interval: "weekly"

View File

@@ -7,9 +7,9 @@ on:
env:
DOCKER_FILE_PATH: Dockerfile
GOLANG_VERSION: 1.15.2
GOLANG_VERSION: 1.20.0
KUBERNETES_VERSION: "1.18.0"
KIND_VERSION: "0.7.0"
KIND_VERSION: "0.10.0"
jobs:
build:
@@ -18,17 +18,17 @@ jobs:
if: "! contains(toJSON(github.event.commits.*.message), '[skip-ci]')"
steps:
- name: Check out code
uses: actions/checkout@v2
uses: actions/checkout@v3
with:
ref: ${{github.event.pull_request.head.sha}}
# Setting up helm binary
- name: Set up Helm
uses: azure/setup-helm@v1
uses: azure/setup-helm@v3.4
- name: Set up Go
id: go
uses: actions/setup-go@v2
uses: actions/setup-go@v3
with:
go-version: ${{ env.GOLANG_VERSION }}
@@ -37,9 +37,9 @@ jobs:
make install
- name: Run golangci-lint
uses: golangci/golangci-lint-action@v2.3.0
uses: golangci/golangci-lint-action@v3
with:
version: v1.33
version: v1.51.1
only-new-issues: false
args: --timeout 10m
@@ -48,6 +48,9 @@ jobs:
cd deployments/kubernetes/chart/reloader
helm lint
- name: Link check
uses: gaurav-nelson/github-action-markdown-link-check@v1
- name: Install kubectl
run: |
curl -LO "https://storage.googleapis.com/kubernetes-release/release/v${KUBERNETES_VERSION}/bin/linux/amd64/kubectl"

View File

@@ -7,9 +7,9 @@ on:
env:
DOCKER_FILE_PATH: Dockerfile
GOLANG_VERSION: 1.15.2
GOLANG_VERSION: 1.20.0
KUBERNETES_VERSION: "1.18.0"
KIND_VERSION: "0.7.0"
KIND_VERSION: "0.10.0"
HELM_REGISTRY_URL: "https://stakater.github.io/stakater-charts"
jobs:
@@ -20,18 +20,18 @@ jobs:
steps:
- name: Check out code
uses: actions/checkout@v2
uses: actions/checkout@v3
with:
persist-credentials: false # otherwise, the token used is the GITHUB_TOKEN, instead of your personal token
fetch-depth: 0 # otherwise, you will fail to push refs to dest repo
# Setting up helm binary
- name: Set up Helm
uses: azure/setup-helm@v1
uses: azure/setup-helm@v3.4
- name: Set up Go
id: go
uses: actions/setup-go@v2
uses: actions/setup-go@v3
with:
go-version: ${{ env.GOLANG_VERSION }}
@@ -40,12 +40,12 @@ jobs:
make install
- name: Run golangci-lint
uses: golangci/golangci-lint-action@v2.3.0
uses: golangci/golangci-lint-action@v3
with:
version: v1.33
version: v1.51.1
only-new-issues: false
args: --timeout 10m
- name: Install kubectl
run: |
curl -LO "https://storage.googleapis.com/kubernetes-release/release/v${KUBERNETES_VERSION}/bin/linux/amd64/kubectl"
@@ -70,7 +70,7 @@ jobs:
- name: Generate Tag
id: generate_tag
uses: anothrNick/github-tag-action@1.26.0
uses: anothrNick/github-tag-action@1.36.0
env:
GITHUB_TOKEN: ${{ secrets.STAKATER_GITHUB_TOKEN }}
WITH_V: true
@@ -108,7 +108,7 @@ jobs:
org.opencontainers.image.source=${{ github.event.repository.clone_url }}
org.opencontainers.image.created=${{ steps.prep.outputs.created }}
org.opencontainers.image.revision=${{ github.sha }}
##############################
## Add steps to generate required artifacts for a release here(helm chart, operator manifest etc.)
##############################
@@ -116,7 +116,7 @@ jobs:
# Generate tag for operator without "v"
- name: Generate Operator Tag
id: generate_operator_tag
uses: anothrNick/github-tag-action@1.26.0
uses: anothrNick/github-tag-action@1.36.0
env:
GITHUB_TOKEN: ${{ secrets.STAKATER_GITHUB_TOKEN }}
WITH_V: false
@@ -132,7 +132,7 @@ jobs:
- name: Helm Template
run: |
helm template reloader deployments/kubernetes/chart/reloader/ > deployments/kubernetes/reloader.yaml
helm template reloader deployments/kubernetes/chart/reloader/ --output-dir deployments/kubernetes/manifests/ && mv deployments/kubernetes/manifests/reloader/templates/* deployments/kubernetes/manifests/ && rm -r deployments/kubernetes/manifests/reloader
helm template reloader deployments/kubernetes/chart/reloader/ --output-dir deployments/kubernetes/manifests && mv deployments/kubernetes/manifests/reloader/templates/* deployments/kubernetes/manifests/ && rm -r deployments/kubernetes/manifests/reloader
# Publish helm chart
- name: Publish Helm chart
@@ -148,16 +148,16 @@ jobs:
linting: on
commit_username: stakater-user
commit_email: stakater@gmail.com
# Commit back changes
- name: Commit files
run: |
git config --local user.email "stakater@gmail.com"
git config --local user.name "stakater-user"
git status
git status
git add .
git commit -m "[skip-ci] Update artifacts" -a
- name: Push changes
uses: ad-m/github-push-action@master
with:
@@ -165,7 +165,7 @@ jobs:
branch: ${{ github.ref }}
- name: Push Latest Tag
uses: anothrNick/github-tag-action@1.26.0
uses: anothrNick/github-tag-action@1.36.0
env:
GITHUB_TOKEN: ${{ secrets.STAKATER_GITHUB_TOKEN }}
WITH_V: true

View File

@@ -6,7 +6,7 @@ on:
- "v*"
env:
GOLANG_VERSION: 1.15.2
GOLANG_VERSION: 1.20.0
jobs:
build:
@@ -15,12 +15,12 @@ jobs:
steps:
- name: Check out code
uses: actions/checkout@v2
uses: actions/checkout@v3
with:
fetch-depth: 0 # See: https://goreleaser.com/ci/actions/
- name: Set up Go 1.x
uses: actions/setup-go@v2
uses: actions/setup-go@v3
with:
go-version: ${{ env.GOLANG_VERSION }}
id: go

View File

@@ -1,8 +1,13 @@
ARG BUILDER_IMAGE
ARG BASE_IMAGE
# Build the manager binary
FROM --platform=${BUILDPLATFORM} golang:1.16 as builder
FROM --platform=${BUILDPLATFORM} ${BUILDER_IMAGE:-golang:1.20.0} as builder
ARG TARGETOS
ARG TARGETARCH
ARG GOPROXY
ARG GOPRIVATE
WORKDIR /workspace
@@ -19,11 +24,17 @@ COPY internal/ internal/
COPY pkg/ pkg/
# Build
RUN CGO_ENABLED=0 GOOS=${TARGETOS} GOARCH=${TARGETARCH} GO111MODULE=on go build -mod=mod -a -o manager main.go
RUN CGO_ENABLED=0 \
GOOS=${TARGETOS} \
GOARCH=${TARGETARCH} \
GOPROXY=${GOPROXY} \
GOPRIVATE=${GOPRIVATE} \
GO111MODULE=on \
go build -mod=mod -a -o manager main.go
# Use distroless as minimal base image to package the manager binary
# Refer to https://github.com/GoogleContainerTools/distroless for more details
FROM gcr.io/distroless/static:nonroot
FROM ${BASE_IMAGE:-gcr.io/distroless/static:nonroot}
WORKDIR /
COPY --from=builder /workspace/manager .
USER 65532:65532

View File

@@ -6,7 +6,8 @@ OS ?= linux
ARCH ?= ???
ALL_ARCH ?= arm64 arm amd64
BUILDER ?= reloader-builder-${ARCH}
BUILDER_IMAGE ?=
BASE_IMAGE ?=
BINARY ?= Reloader
DOCKER_IMAGE ?= stakater/reloader
@@ -20,6 +21,8 @@ BUILD=
GOCMD = go
GOFLAGS ?= $(GOFLAGS:)
LDFLAGS =
GOPROXY ?=
GOPRIVATE ?=
default: build test
@@ -33,7 +36,17 @@ build:
"$(GOCMD)" build ${GOFLAGS} ${LDFLAGS} -o "${BINARY}"
build-image:
docker buildx build --platform ${OS}/${ARCH} --build-arg GOARCH=$(ARCH) -t "${REPOSITORY_ARCH}" --load -f Dockerfile .
docker buildx build \
--platform ${OS}/${ARCH} \
--build-arg GOARCH=$(ARCH) \
--build-arg BUILDER_IMAGE=$(BUILDER_IMAGE) \
--build-arg BASE_IMAGE=${BASE_IMAGE} \
--build-arg GOPROXY=${GOPROXY} \
--build-arg GOPRIVATE=${GOPRIVATE} \
-t "${REPOSITORY_ARCH}" \
--load \
-f Dockerfile \
.
push:
docker push ${REPOSITORY_ARCH}

View File

@@ -6,8 +6,6 @@
[![GitHub tag](https://img.shields.io/github/tag/stakater/reloader.svg?style=flat-square)](https://github.com/stakater/reloader/releases/latest)
[![Docker Pulls](https://img.shields.io/docker/pulls/stakater/reloader.svg?style=flat-square)](https://hub.docker.com/r/stakater/reloader/)
[![Docker Stars](https://img.shields.io/docker/stars/stakater/reloader.svg?style=flat-square)](https://hub.docker.com/r/stakater/reloader/)
[![MicroBadger Size](https://img.shields.io/microbadger/image-size/stakater/reloader.svg?style=flat-square)](https://microbadger.com/images/stakater/reloader)
[![MicroBadger Layers](https://img.shields.io/microbadger/layers/stakater/reloader.svg?style=flat-square)](https://microbadger.com/images/stakater/reloader)
[![license](https://img.shields.io/github/license/stakater/reloader.svg?style=flat-square)](LICENSE)
[![Get started with Stakater](https://stakater.github.io/README/stakater-github-banner.png)](http://stakater.com/?utm_source=Reloader&utm_medium=github)
@@ -33,7 +31,8 @@ metadata:
annotations:
reloader.stakater.com/auto: "true"
spec:
template: metadata:
template:
metadata:
```
This will discover deploymentconfigs/deployments/daemonsets/statefulset/rollouts automatically where `foo-configmap` or `foo-secret` is being used either via environment variable or from volume mount. And it will perform rolling upgrade on related pods when `foo-configmap` or `foo-secret`are updated.
@@ -88,7 +87,8 @@ metadata:
annotations:
configmap.reloader.stakater.com/reload: "foo-configmap"
spec:
template: metadata:
template:
metadata:
```
Use comma separated list to define multiple configmaps.
@@ -143,6 +143,7 @@ spec:
- you may override the configmap annotation with the `--configmap-annotation` flag
- you may override the secret annotation with the `--secret-annotation` flag
- you may want to prevent watching certain namespaces with the `--namespaces-to-ignore` flag
- you may want to watch only a set of namespaces with certain labels by using the `--namespace-selector` flag
- you may want to prevent watching certain resources with the `--resources-to-ignore` flag
- you can configure logging in JSON format with the `--log-format=json` option
- you can configure the "reload strategy" with the `--reload-strategy=<strategy-name>` option (details below)
@@ -182,6 +183,25 @@ Reloader can be configured to ignore the resources `secrets` and `configmaps` by
`Note`: At one time only one of these resource can be ignored, trying to do it will cause error in Reloader. Workaround for ignoring both resources is by scaling down the reloader pods to `0`.
Reloader can be configured to watch only namespaces labeled with (one or more) labels of your choosing by using the `--namespace-selector` parameter, for example:
```
--namespace-selector=reloder:enabled,test:true
```
Only namespaces labeled like the following namespace YAML will be watched:
```yaml
kind: Namespace
apiVersion: v1
metadata:
...
labels:
reloder: enabled
test: true
...
```
If you want to select namespace only by the key of the label use ```*``` as the value.
For example, for ```--namespace-selector=select-this:*``` all namespaces with label-key "select-this" will be selected regardless of the labels value
### Vanilla kustomize
You can also apply the vanilla manifests by running the following command
@@ -233,6 +253,12 @@ Reloader can be configured to ignore the resources `secrets` and `configmaps` by
`Note`: At one time only one of these resource can be ignored, trying to do it will cause error in helm template compilation.
Reloader can be configured to watch only namespaces labeled with (one or more) labels of your choosing by using the `namespaceSelector` parameter
| Parameter | Description | Type |
| ---------------- | -------------------------------------------------------------- | ------- |
| namespaceSelector | list of comma separated key:value namespace | string |
You can also set the log format of Reloader to json by setting `logFormat` to `json` in values.yaml and apply the chart
You can enable to scrape Reloader's Prometheus metrics by setting `serviceMonitor.enabled` or `podMonitor.enabled` to `true` in values.yaml file. Service monitor will be removed in future releases of reloader in favour of Pod monitor.
@@ -240,9 +266,18 @@ You can enable to scrape Reloader's Prometheus metrics by setting `serviceMonito
**Note:** Reloading of OpenShift (DeploymentConfig) and/or Argo Rollouts has to be enabled explicitly because it might not be always possible to use it on a cluster with restricted permissions. This can be done by changing the following parameters:
| Parameter | Description | Type |
| ---------------- | ---------------------------------------------------------------------------- | ------- |
| ---------------- |------------------------------------------------------------------------------| ------- |
| isOpenshift | Enable OpenShift DeploymentConfigs. Valid value are either `true` or `false` | boolean |
| isArgoRollouts | Enable Argo Rollouts. Valid value are either `true` or `false` | boolean |
| reloadOnCreate | Enable reload on create events. Valid value are either `true` or `false` | boolean |
**ReloadOnCreate** reloadOnCreate controls how Reloader handles secrets being added to the cache for the first time. If reloadOnCreate is set to true:
* Configmaps/secrets being added to the cache will cause Reloader to perform a rolling update of the associated workload.
* When applications are deployed for the first time, Reloader will perform a rolling update of the associated workload.
* If you are running Reloader in HA mode all workloads will have a rolling update performed when a new leader is elected.
If ReloadOnCreate is set to false:
* Updates to configMaps/Secrets that occur while there is no leader will not be picked up by the new leader until a subsequent update of the configmap/secret occurs. In the worst case the window in which there can be no leader is 15s as this is the LeaseDuration.
## Help

View File

@@ -3,8 +3,8 @@
apiVersion: v1
name: reloader
description: Reloader chart that runs on kubernetes
version: v0.0.103
appVersion: v0.0.103
version: v1.0.9
appVersion: v1.0.9
keywords:
- Reloader
- kubernetes
@@ -16,16 +16,6 @@ maintainers:
- name: Stakater
email: hello@stakater.com
- name: rasheedamir
email: rasheed@aurorasolutions.io
- name: waseem-h
email: waseemhassan@stakater.com
email: rasheed@stakater.com
- name: faizanahmad055
email: faizan.ahmad55@outlook.com
- name: kahootali
email: ali.kahoot@aurorasolutions.io
- name: ahmadiq
email: ahmad@aurorasolutions.io
- name: ahsan-storm
email: ahsanmuhammad1@outlook.com
- name: ahmedwaleedmalik
email: waleed@stakater.com

View File

@@ -28,6 +28,23 @@ heritage: {{ .Release.Service | quote }}
app.kubernetes.io/managed-by: {{ .Release.Service | quote }}
{{- end -}}
{{/*
Create pod anti affinity labels
*/}}
{{- define "reloader-podAntiAffinity" -}}
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 100
podAffinityTerm:
labelSelector:
matchExpressions:
- key: app
operator: In
values:
- {{ template "reloader-fullname" . }}
topologyKey: "kubernetes.io/hostname"
{{- end -}}
{{/*
Create the name of the service account to use
*/}}
@@ -45,4 +62,4 @@ Create the annotations to support helm3
{{- define "reloader-helm3.annotations" -}}
meta.helm.sh/release-namespace: {{ .Release.Namespace | quote }}
meta.helm.sh/release-name: {{ .Release.Name | quote }}
{{- end -}}
{{- end -}}

View File

@@ -17,7 +17,7 @@ metadata:
{{ toYaml .Values.reloader.matchLabels | indent 4 }}
{{- end }}
name: {{ template "reloader-fullname" . }}-role
namespace: {{ .Release.Namespace }}
namespace: {{ .Values.namespace | default .Release.Namespace }}
rules:
- apiGroups:
- ""
@@ -32,6 +32,14 @@ rules:
- list
- get
- watch
{{- if .Values.reloader.namespaceSelector }}
- apiGroups:
- ""
resources:
- namespaces
verbs:
- get
{{- end }}
{{- if and (.Capabilities.APIVersions.Has "apps.openshift.io/v1") (.Values.reloader.isOpenshift) }}
- apiGroups:
- "apps.openshift.io"
@@ -77,4 +85,21 @@ rules:
- get
- update
- patch
{{- if .Values.reloader.enableHA }}
- apiGroups:
- "coordination.k8s.io"
resources:
- leases
verbs:
- create
- get
- update
{{- end}}
- apiGroups:
- ""
resources:
- events
verbs:
- create
- patch
{{- end }}

View File

@@ -17,7 +17,7 @@ metadata:
{{ toYaml .Values.reloader.matchLabels | indent 4 }}
{{- end }}
name: {{ template "reloader-fullname" . }}-role-binding
namespace: {{ .Release.Namespace }}
namespace: {{ .Values.namespace | default .Release.Namespace }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
@@ -25,5 +25,5 @@ roleRef:
subjects:
- kind: ServiceAccount
name: {{ template "reloader-serviceAccountName" . }}
namespace: {{ .Release.Namespace }}
namespace: {{ .Values.namespace | default .Release.Namespace }}
{{- end }}

View File

@@ -15,8 +15,13 @@ metadata:
{{ toYaml .Values.reloader.matchLabels | indent 4 }}
{{- end }}
name: {{ template "reloader-fullname" . }}
namespace: {{ .Values.namespace | default .Release.Namespace }}
spec:
{{- if not (.Values.reloader.enableHA) }}
replicas: 1
{{- else }}
replicas: {{ .Values.reloader.deployment.replicas }}
{{- end}}
revisionHistoryLimit: 2
selector:
matchLabels:
@@ -40,13 +45,20 @@ spec:
{{ toYaml .Values.reloader.matchLabels | indent 8 }}
{{- end }}
spec:
{{- with .Values.reloader.deployment.imagePullSecrets }}
imagePullSecrets:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- if .Values.reloader.deployment.nodeSelector }}
nodeSelector:
{{ toYaml .Values.reloader.deployment.nodeSelector | indent 8 }}
{{- end }}
{{- if .Values.reloader.deployment.affinity }}
{{- if or (.Values.reloader.deployment.affinity) (.Values.reloader.enableHA) }}
affinity:
{{- if .Values.reloader.deployment.affinity }}
{{ toYaml .Values.reloader.deployment.affinity | indent 8 }}
{{- end}}
{{ include "reloader-podAntiAffinity" . | indent 8 }}
{{- end }}
{{- if .Values.reloader.deployment.tolerations }}
tolerations:
@@ -59,7 +71,7 @@ spec:
- image: "{{ .Values.reloader.deployment.image.name }}:{{ .Values.reloader.deployment.image.tag }}"
imagePullPolicy: {{ .Values.reloader.deployment.image.pullPolicy }}
name: {{ template "reloader-fullname" . }}
{{- if or (.Values.reloader.deployment.env.open) (.Values.reloader.deployment.env.secret) (.Values.reloader.deployment.env.field) (eq .Values.reloader.watchGlobally false) }}
{{- if or (.Values.reloader.deployment.env.open) (.Values.reloader.deployment.env.secret) (.Values.reloader.deployment.env.field) (.Values.reloader.deployment.env.existing) (eq .Values.reloader.watchGlobally false) (.Values.reloader.enableHA)}}
env:
{{- range $name, $value := .Values.reloader.deployment.env.open }}
{{- if not (empty $value) }}
@@ -77,6 +89,17 @@ spec:
key: {{ $name | quote }}
{{- end }}
{{- end }}
{{- range $secret, $values := .Values.reloader.deployment.env.existing }}
{{- range $name, $key := $values }}
{{- if not ( empty $name) }}
- name: {{ $name | quote }}
valueFrom:
secretKeyRef:
name: {{ $secret | quote }}
key: {{ $key | quote }}
{{- end }}
{{- end }}
{{- end }}
{{- range $name, $value := .Values.reloader.deployment.env.field }}
{{- if not ( empty $value) }}
- name: {{ $name | quote }}
@@ -91,6 +114,16 @@ spec:
fieldRef:
fieldPath: metadata.namespace
{{- end }}
{{- if .Values.reloader.enableHA }}
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
{{- end }}
{{- end }}
ports:
@@ -98,7 +131,7 @@ spec:
containerPort: 9090
livenessProbe:
httpGet:
path: /metrics
path: /live
port: http
timeoutSeconds: {{ .Values.reloader.deployment.livenessProbe.timeoutSeconds | default "5" }}
failureThreshold: {{ .Values.reloader.deployment.livenessProbe.failureThreshold | default "5" }}
@@ -113,12 +146,20 @@ spec:
periodSeconds: {{ .Values.reloader.deployment.readinessProbe.periodSeconds | default "10" }}
successThreshold: {{ .Values.reloader.deployment.readinessProbe.successThreshold | default "1" }}
{{- $containerSecurityContext := .Values.reloader.deployment.containerSecurityContext | default dict }}
{{- if .Values.reloader.readOnlyRootFileSystem }}
{{- $_ := set $containerSecurityContext "readOnlyRootFilesystem" true }}
{{- end }}
securityContext:
{{- toYaml $containerSecurityContext | nindent 10 }}
{{- if eq .Values.reloader.readOnlyRootFileSystem true }}
volumeMounts:
- mountPath: /tmp/
name: tmp-volume
{{- end }}
{{- if or (.Values.reloader.logFormat) (.Values.reloader.ignoreSecrets) (.Values.reloader.ignoreNamespaces) (.Values.reloader.ignoreConfigMaps) (.Values.reloader.custom_annotations) (eq .Values.reloader.isArgoRollouts true) (ne .Values.reloader.reloadStrategy "default")}}
{{- if or (.Values.reloader.logFormat) (.Values.reloader.ignoreSecrets) (.Values.reloader.ignoreNamespaces) (.Values.reloader.namespaceSelector) (.Values.reloader.ignoreConfigMaps) (.Values.reloader.custom_annotations) (eq .Values.reloader.isArgoRollouts true) (eq .Values.reloader.reloadOnCreate true) (ne .Values.reloader.reloadStrategy "default") (.Values.reloader.enableHA)}}
args:
{{- if .Values.reloader.logFormat }}
- "--log-format={{ .Values.reloader.logFormat }}"
@@ -132,7 +173,9 @@ spec:
{{- if .Values.reloader.ignoreNamespaces }}
- "--namespaces-to-ignore={{ .Values.reloader.ignoreNamespaces }}"
{{- end }}
{{- if .Values.reloader.namespaceSelector }}
- "--namespace-selector={{ .Values.reloader.namespaceSelector }}"
{{- end }}
{{- if .Values.reloader.custom_annotations }}
{{- if .Values.reloader.custom_annotations.configmap }}
- "--configmap-annotation"
@@ -158,9 +201,15 @@ spec:
{{- if eq .Values.reloader.isArgoRollouts true }}
- "--is-Argo-Rollouts={{ .Values.reloader.isArgoRollouts }}"
{{- end }}
{{- if eq .Values.reloader.reloadOnCreate true }}
- "--reload-on-create={{ .Values.reloader.reloadOnCreate }}"
{{- end }}
{{- if ne .Values.reloader.reloadStrategy "default" }}
- "--reload-strategy={{ .Values.reloader.reloadStrategy }}"
{{- end }}
{{- if or (gt .Values.reloader.deployment.replicas 1.0) (.Values.reloader.enableHA) }}
- "--enable-ha=true"
{{- end}}
{{- end }}
{{- if .Values.reloader.deployment.resources }}
resources:
@@ -170,6 +219,9 @@ spec:
securityContext: {{ toYaml .Values.reloader.deployment.securityContext | nindent 8 }}
{{- end }}
serviceAccountName: {{ template "reloader-serviceAccountName" . }}
{{- if hasKey .Values.reloader.deployment "automountServiceAccountToken" }}
automountServiceAccountToken: {{ .Values.reloader.deployment.automountServiceAccountToken }}
{{- end }}
{{- if eq .Values.reloader.readOnlyRootFileSystem true }}
volumes:
- emptyDir: {}

View File

@@ -0,0 +1,11 @@
{{- if .Values.reloader.podDisruptionBudget.enabled }}
apiVersion: policy/v1
kind: PodDisruptionBudget
metadata:
name: {{ template "reloader-fullname" . }}
spec:
minAvailable: {{ .Values.reloader.podDisruptionBudget.minAvailable }}
selector:
matchLabels:
app: {{ template "reloader-fullname" . }}
{{- end }}

View File

@@ -1,26 +1,53 @@
{{- if and ( .Capabilities.APIVersions.Has "monitoring.coreos.com/v1" ) ( .Values.reloader.podMonitor.enabled ) }}
{{- if ( .Values.reloader.podMonitor.enabled ) }}
apiVersion: monitoring.coreos.com/v1
kind: PodMonitor
metadata:
{{- if .Values.reloader.podMonitor.annotations }}
annotations:
{{ tpl (toYaml .Values.reloader.podMonitor.annotations) . | indent 4 }}
{{- end }}
labels:
{{ include "reloader-labels.chart" . | indent 4 }}
{{- if .Values.reloader.podMonitor.labels }}
{{ toYaml .Values.reloader.podMonitor.labels | indent 4}}
{{ tpl (toYaml .Values.reloader.podMonitor.labels) . | indent 4 }}
{{- end }}
name: {{ template "reloader-fullname" . }}
{{- if .Values.reloader.podMonitor.namespace }}
namespace: {{ .Values.reloader.podMonitor.namespace }}
namespace: {{ tpl .Values.reloader.podMonitor.namespace . }}
{{- end }}
spec:
podMetricsEndpoints:
- port: http
path: "/metrics"
{{- if .Values.reloader.podMonitor.interval }}
interval: {{ .Values.reloader.podMonitor.interval }}
{{- end }}
{{- if .Values.reloader.podMonitor.timeout }}
scrapeTimeout: {{ .Values.reloader.podMonitor.timeout }}
{{- end }}
{{- with .Values.reloader.podMonitor.interval }}
interval: {{ . }}
{{- end }}
{{- with .Values.reloader.podMonitor.scheme }}
scheme: {{ . }}
{{- end }}
{{- with .Values.reloader.podMonitor.bearerTokenSecret }}
bearerTokenSecret: {{ . }}
{{- end }}
{{- with .Values.reloader.podMonitor.tlsConfig }}
tlsConfig:
{{- toYaml .| nindent 6 }}
{{- end }}
{{- with .Values.reloader.podMonitor.timeout }}
scrapeTimeout: {{ . }}
{{- end }}
honorLabels: {{ .Values.reloader.podMonitor.honorLabels }}
{{- with .Values.reloader.podMonitor.metricRelabelings }}
metricRelabelings:
{{- tpl (toYaml . | nindent 6) $ }}
{{- end }}
{{- with .Values.reloader.podMonitor.relabelings }}
relabelings:
{{- toYaml . | nindent 6 }}
{{- end }}
{{- with .Values.reloader.podMonitor.podTargetLabels }}
podTargetLabels:
{{- toYaml . | nindent 4 }}
{{- end }}
jobLabel: {{ template "reloader-fullname" . }}
namespaceSelector:
matchNames:

View File

@@ -17,7 +17,7 @@ metadata:
{{ toYaml .Values.reloader.matchLabels | indent 4 }}
{{- end }}
name: {{ template "reloader-fullname" . }}-role
namespace: {{ .Release.Namespace }}
namespace: {{ .Values.namespace | default .Release.Namespace }}
rules:
- apiGroups:
- ""
@@ -77,4 +77,21 @@ rules:
- get
- update
- patch
{{- if .Values.reloader.enableHA }}
- apiGroups:
- "coordination.k8s.io"
resources:
- leases
verbs:
- create
- get
- update
{{- end}}
- apiGroups:
- ""
resources:
- events
verbs:
- create
- patch
{{- end }}

View File

@@ -17,7 +17,7 @@ metadata:
{{ toYaml .Values.reloader.matchLabels | indent 4 }}
{{- end }}
name: {{ template "reloader-fullname" . }}-role-binding
namespace: {{ .Release.Namespace }}
namespace: {{ .Values.namespace | default .Release.Namespace }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
@@ -25,5 +25,5 @@ roleRef:
subjects:
- kind: ServiceAccount
name: {{ template "reloader-serviceAccountName" . }}
namespace: {{ .Release.Namespace }}
namespace: {{ .Values.namespace | default .Release.Namespace }}
{{- end }}

View File

@@ -0,0 +1,21 @@
{{- if .Values.reloader.deployment.env.secret -}}
apiVersion: v1
kind: Secret
metadata:
name: {{ template "reloader-fullname" . }}
namespace: {{ .Values.namespace | default .Release.Namespace }}
type: Opaque
data:
{{ if .Values.reloader.deployment.env.secret.ALERT_ON_RELOAD -}}
ALERT_ON_RELOAD: {{ .Values.reloader.deployment.env.secret.ALERT_ON_RELOAD | b64enc | quote }}
{{ end }}
{{- if .Values.reloader.deployment.env.secret.ALERT_SINK -}}
ALERT_SINK: {{ .Values.reloader.deployment.env.secret.ALERT_SINK | b64enc | quote }}
{{ end }}
{{- if .Values.reloader.deployment.env.secret.ALERT_WEBHOOK_URL -}}
ALERT_WEBHOOK_URL: {{ .Values.reloader.deployment.env.secret.ALERT_WEBHOOK_URL | b64enc | quote }}
{{ end }}
{{- if .Values.reloader.deployment.env.secret.ALERT_ADDITIONAL_INFO -}}
ALERT_ADDITIONAL_INFO: {{ .Values.reloader.deployment.env.secret.ALERT_ADDITIONAL_INFO | b64enc | quote }}
{{ end }}
{{ end }}

View File

@@ -13,6 +13,7 @@ metadata:
{{ toYaml .Values.reloader.service.labels | indent 4 }}
{{- end }}
name: {{ template "reloader-fullname" . }}
namespace: {{ .Values.namespace | default .Release.Namespace }}
spec:
selector:
{{- if .Values.reloader.deployment.labels }}

View File

@@ -4,6 +4,9 @@ kind: ServiceAccount
{{- if .Values.global.imagePullSecrets }}
imagePullSecrets: {{ toYaml .Values.global.imagePullSecrets | nindent 2 }}
{{- end }}
{{- if hasKey .Values.reloader.serviceAccount "automountServiceAccountToken" }}
automountServiceAccountToken: {{ .Values.reloader.serviceAccount.automountServiceAccountToken }}
{{- end }}
metadata:
annotations:
{{ include "reloader-helm3.annotations" . | indent 4 }}
@@ -19,4 +22,5 @@ metadata:
{{ toYaml .Values.reloader.matchLabels | indent 4 }}
{{- end }}
name: {{ template "reloader-serviceAccountName" . }}
namespace: {{ .Values.namespace | default .Release.Namespace }}
{{- end }}

View File

@@ -2,25 +2,52 @@
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
{{- if .Values.reloader.serviceMonitor.annotations }}
annotations:
{{ tpl (toYaml .Values.reloader.serviceMonitor.annotations) . | indent 4 }}
{{- end }}
labels:
{{ include "reloader-labels.chart" . | indent 4 }}
{{- if .Values.reloader.serviceMonitor.labels }}
{{ toYaml .Values.reloader.serviceMonitor.labels | indent 4}}
{{ tpl (toYaml .Values.reloader.serviceMonitor.labels) . | indent 4 }}
{{- end }}
name: {{ template "reloader-fullname" . }}
{{- if .Values.reloader.serviceMonitor.namespace }}
namespace: {{ .Values.reloader.serviceMonitor.namespace }}
namespace: {{ tpl .Values.reloader.serviceMonitor.namespace . }}
{{- end }}
spec:
endpoints:
- targetPort: http
path: "/metrics"
{{- if .Values.reloader.serviceMonitor.interval }}
interval: {{ .Values.reloader.serviceMonitor.interval }}
{{- end }}
{{- if .Values.reloader.serviceMonitor.timeout }}
scrapeTimeout: {{ .Values.reloader.serviceMonitor.timeout }}
{{- end }}
{{- with .Values.reloader.serviceMonitor.interval }}
interval: {{ . }}
{{- end }}
{{- with .Values.reloader.serviceMonitor.scheme }}
scheme: {{ . }}
{{- end }}
{{- with .Values.reloader.serviceMonitor.bearerTokenFile }}
bearerTokenFile: {{ . }}
{{- end }}
{{- with .Values.reloader.serviceMonitor.tlsConfig }}
tlsConfig:
{{- toYaml .| nindent 6 }}
{{- end }}
{{- with .Values.reloader.serviceMonitor.timeout }}
scrapeTimeout: {{ . }}
{{- end }}
honorLabels: {{ .Values.reloader.serviceMonitor.honorLabels }}
{{- with .Values.reloader.serviceMonitor.metricRelabelings }}
metricRelabelings:
{{- tpl (toYaml . | nindent 6) $ }}
{{- end }}
{{- with .Values.reloader.serviceMonitor.relabelings }}
relabelings:
{{- toYaml . | nindent 6 }}
{{- end }}
{{- with .Values.reloader.serviceMonitor.targetLabels }}
targetLabels:
{{- toYaml . | nindent 4 }}
{{- end }}
jobLabel: {{ template "reloader-fullname" . }}
namespaceSelector:
matchNames:

View File

@@ -0,0 +1,50 @@
suite: Deployment
templates:
- deployment.yaml
tests:
- it: sets readOnlyRootFilesystem in container securityContext when reloader.readOnlyRootFileSystem is true
set:
reloader:
readOnlyRootFileSystem: true
deployment:
containerSecurityContext:
readOnlyRootFilesystem: false
asserts:
- equal:
path: spec.template.spec.containers[0].securityContext.readOnlyRootFilesystem
value: true
- it: sets readOnlyRootFilesystem in container securityContext even if reloader.deployment.containerSecurityContext is null
set:
reloader:
readOnlyRootFileSystem: true
deployment:
containerSecurityContext: null
asserts:
- equal:
path: spec.template.spec.containers[0].securityContext.readOnlyRootFilesystem
value: true
- it: does not override readOnlyRootFilesystem in container securityContext based on reloader.readOnlyRootFileSystem
set:
reloader:
readOnlyRootFileSystem: false
deployment:
containerSecurityContext:
readOnlyRootFilesystem: true
asserts:
- equal:
path: spec.template.spec.containers[0].securityContext.readOnlyRootFilesystem
value: true
- it: template is still valid with no defined containerSecurityContext
set:
reloader:
readOnlyRootFileSystem: false
deployment:
containerSecurityContext: null
asserts:
- isEmpty:
path: spec.template.spec.containers[0].securityContext

View File

@@ -13,16 +13,22 @@ reloader:
isOpenshift: false
ignoreSecrets: false
ignoreConfigMaps: false
reloadOnCreate: false
reloadStrategy: default # Set to default, env-vars or annotations
ignoreNamespaces: "" # Comma separated list of namespaces to ignore
namespaceSelector: "" # Comma separated list of 'key:value' labels for namespaces selection
logFormat: "" #json
watchGlobally: true
# Set to true to enable leadership election allowing you to run multiple replicas
enableHA: false
# Set to true if you have a pod security policy that enforces readOnlyRootFilesystem
readOnlyRootFileSystem: false
legacy:
rbac: false
matchLabels: {}
deployment:
# If you wish to run multiple replicas set reloader.enableHA = true
replicas: 1
nodeSelector:
# cloud.google.com/gke-nodepool: default-pool
@@ -41,6 +47,13 @@ reloader:
runAsNonRoot: true
runAsUser: 65534
containerSecurityContext: {}
# capabilities:
# drop:
# - ALL
# allowPrivilegeEscalation: false
# readOnlyRootFilesystem: true
# A list of tolerations to be applied to the Deployment.
# Example:
# tolerations:
@@ -53,10 +66,10 @@ reloader:
labels:
provider: stakater
group: com.stakater.platform
version: v0.0.103
version: v1.0.9
image:
name: stakater/reloader
tag: v0.0.103
tag: v1.0.9
pullPolicy: IfNotPresent
# Support for extra environment variables.
env:
@@ -64,8 +77,21 @@ reloader:
open:
# secret supports Key value pair as environment variables. It gets the values based on keys from default reloader secret if any.
secret:
# ALERT_ON_RELOAD: <"true"|"false">
# ALERT_SINK: <"slack"> # By default it will be a raw text based webhook
# ALERT_WEBHOOK_URL: <"webhook_url">
# ALERT_ADDITIONAL_INFO: <"Additional Info like Cluster Name if needed">
# field supports Key value pair as environment variables. It gets the values from other fields of pod.
field:
# existing secret, you can specify multiple existing secrets, for each
# specify the env var name followed by the key in existing secret that
# will be used to populate the env var
existing:
# existing_secret_name:
# ALERT_ON_RELOAD: alert_on_reload_key
# ALERT_SINK: alert_sink_key
# ALERT_WEBHOOK_URL: alert_webhook_key
# ALERT_ADDITIONAL_INFO: alert_additional_info_key
# Liveness and readiness probe timeout values.
livenessProbe: {}
@@ -92,6 +118,8 @@ reloader:
pod:
annotations: {}
priorityClassName: ""
# imagePullSecrets:
# - name: myregistrykey
service: {}
# labels: {}
@@ -123,20 +151,103 @@ reloader:
enabled: false
# Set the namespace the ServiceMonitor should be deployed
# namespace: monitoring
# Set how frequently Prometheus should scrape
# interval: 30s
# Set labels for the ServiceMonitor, use this to define your scrape label for Prometheus Operator
# labels:
# Set timeout for scrape
# timeout: 10s
# Fallback to the prometheus default unless specified
# interval: 10s
## scheme: HTTP scheme to use for scraping. Can be used with `tlsConfig` for example if using istio mTLS.
# scheme: ""
## tlsConfig: TLS configuration to use when scraping the endpoint. For example if using istio mTLS.
## Of type: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#tlsconfig
# tlsConfig: {}
# bearerTokenFile:
# Fallback to the prometheus default unless specified
# timeout: 30s
## Used to pass Labels that are used by the Prometheus installed in your cluster to select Service Monitors to work with
## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#prometheusspec
labels: {}
## Used to pass annotations that are used by the Prometheus installed in your cluster to select Service Monitors to work with
## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#prometheusspec
annotations: {}
# Retain the job and instance labels of the metrics pushed to the Pushgateway
# [Scraping Pushgateway](https://github.com/prometheus/pushgateway#configure-the-pushgateway-as-a-target-to-scrape)
honorLabels: true
## Metric relabel configs to apply to samples before ingestion.
## [Metric Relabeling](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#metric_relabel_configs)
metricRelabelings: []
# - action: keep
# regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+'
# sourceLabels: [__name__]
## Relabel configs to apply to samples before ingestion.
## [Relabeling](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config)
relabelings: []
# - sourceLabels: [__meta_kubernetes_pod_node_name]
# separator: ;
# regex: ^(.*)$
# targetLabel: nodename
# replacement: $1
# action: replace
targetLabels: []
podMonitor:
enabled: false
# Set the namespace the podMonitor should be deployed
# namespace: monitoring
# Set how frequently Prometheus should scrape
# interval: 30s
# Set labels for the podMonitor, use this to define your scrape label for Prometheus Operator
# labels:
# Set timeout for scrape
# timeout: 10s
# Fallback to the prometheus default unless specified
# interval: 10s
## scheme: HTTP scheme to use for scraping. Can be used with `tlsConfig` for example if using istio mTLS.
# scheme: ""
## tlsConfig: TLS configuration to use when scraping the endpoint. For example if using istio mTLS.
## Of type: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#tlsconfig
# tlsConfig: {}
# bearerTokenSecret:
# Fallback to the prometheus default unless specified
# timeout: 30s
## Used to pass Labels that are used by the Prometheus installed in your cluster to select Service Monitors to work with
## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#prometheusspec
labels: {}
## Used to pass annotations that are used by the Prometheus installed in your cluster to select Service Monitors to work with
## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#prometheusspec
annotations: {}
# Retain the job and instance labels of the metrics pushed to the Pushgateway
# [Scraping Pushgateway](https://github.com/prometheus/pushgateway#configure-the-pushgateway-as-a-target-to-scrape)
honorLabels: true
## Metric relabel configs to apply to samples before ingestion.
## [Metric Relabeling](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#metric_relabel_configs)
metricRelabelings: []
# - action: keep
# regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+'
# sourceLabels: [__name__]
## Relabel configs to apply to samples before ingestion.
## [Relabeling](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config)
relabelings: []
# - sourceLabels: [__meta_kubernetes_pod_node_name]
# separator: ;
# regex: ^(.*)$
# targetLabel: nodename
# replacement: $1
# action: replace
podTargetLabels: []
podDisruptionBudget:
enabled: false
# Set the minimum available replicas
# minAvailable: 1

View File

@@ -4,7 +4,5 @@ kind: Kustomization
resources:
- manifests/clusterrole.yaml
- manifests/clusterrolebinding.yaml
- manifests/role.yaml
- manifests/rolebinding.yaml
- manifests/serviceaccount.yaml
- manifests/deployment.yaml
- manifests/deployment.yaml

View File

@@ -9,7 +9,7 @@ metadata:
meta.helm.sh/release-name: "reloader"
labels:
app: reloader-reloader
chart: "reloader-v0.0.103"
chart: "reloader-v1.0.9"
release: "reloader"
heritage: "Helm"
app.kubernetes.io/managed-by: "Helm"
@@ -46,3 +46,10 @@ rules:
- get
- update
- patch
- apiGroups:
- ""
resources:
- events
verbs:
- create
- patch

View File

@@ -9,7 +9,7 @@ metadata:
meta.helm.sh/release-name: "reloader"
labels:
app: reloader-reloader
chart: "reloader-v0.0.103"
chart: "reloader-v1.0.9"
release: "reloader"
heritage: "Helm"
app.kubernetes.io/managed-by: "Helm"

View File

@@ -8,14 +8,15 @@ metadata:
meta.helm.sh/release-name: "reloader"
labels:
app: reloader-reloader
chart: "reloader-v0.0.103"
chart: "reloader-v1.0.9"
release: "reloader"
heritage: "Helm"
app.kubernetes.io/managed-by: "Helm"
group: com.stakater.platform
provider: stakater
version: v0.0.103
version: v1.0.9
name: reloader-reloader
namespace: default
spec:
replicas: 1
revisionHistoryLimit: 2
@@ -27,16 +28,16 @@ spec:
metadata:
labels:
app: reloader-reloader
chart: "reloader-v0.0.103"
chart: "reloader-v1.0.9"
release: "reloader"
heritage: "Helm"
app.kubernetes.io/managed-by: "Helm"
group: com.stakater.platform
provider: stakater
version: v0.0.103
version: v1.0.9
spec:
containers:
- image: "stakater/reloader:v0.0.103"
- image: "stakater/reloader:v1.0.9"
imagePullPolicy: IfNotPresent
name: reloader-reloader
@@ -45,7 +46,7 @@ spec:
containerPort: 9090
livenessProbe:
httpGet:
path: /metrics
path: /live
port: http
timeoutSeconds: 5
failureThreshold: 5
@@ -59,6 +60,9 @@ spec:
failureThreshold: 5
periodSeconds: 10
successThreshold: 1
securityContext:
{}
securityContext:
runAsNonRoot: true
runAsUser: 65534

View File

@@ -1,3 +0,0 @@
---
# Source: reloader/templates/podmonitor.yaml

View File

@@ -1,4 +0,0 @@
---
# Source: reloader/templates/role.yaml

View File

@@ -1,4 +0,0 @@
---
# Source: reloader/templates/rolebinding.yaml

View File

@@ -1,4 +0,0 @@
---
# Source: reloader/templates/service.yaml

View File

@@ -8,8 +8,9 @@ metadata:
meta.helm.sh/release-name: "reloader"
labels:
app: reloader-reloader
chart: "reloader-v0.0.103"
chart: "reloader-v1.0.9"
release: "reloader"
heritage: "Helm"
app.kubernetes.io/managed-by: "Helm"
name: reloader-reloader
namespace: default

View File

@@ -1,4 +0,0 @@
---
# Source: reloader/templates/servicemonitor.yaml

View File

@@ -8,11 +8,12 @@ metadata:
meta.helm.sh/release-name: "reloader"
labels:
app: reloader-reloader
chart: "reloader-v0.0.103"
chart: "reloader-v1.0.9"
release: "reloader"
heritage: "Helm"
app.kubernetes.io/managed-by: "Helm"
name: reloader-reloader
namespace: default
---
# Source: reloader/templates/clusterrole.yaml
apiVersion: rbac.authorization.k8s.io/v1
@@ -24,7 +25,7 @@ metadata:
meta.helm.sh/release-name: "reloader"
labels:
app: reloader-reloader
chart: "reloader-v0.0.103"
chart: "reloader-v1.0.9"
release: "reloader"
heritage: "Helm"
app.kubernetes.io/managed-by: "Helm"
@@ -61,6 +62,13 @@ rules:
- get
- update
- patch
- apiGroups:
- ""
resources:
- events
verbs:
- create
- patch
---
# Source: reloader/templates/clusterrolebinding.yaml
apiVersion: rbac.authorization.k8s.io/v1
@@ -72,7 +80,7 @@ metadata:
meta.helm.sh/release-name: "reloader"
labels:
app: reloader-reloader
chart: "reloader-v0.0.103"
chart: "reloader-v1.0.9"
release: "reloader"
heritage: "Helm"
app.kubernetes.io/managed-by: "Helm"
@@ -96,14 +104,15 @@ metadata:
meta.helm.sh/release-name: "reloader"
labels:
app: reloader-reloader
chart: "reloader-v0.0.103"
chart: "reloader-v1.0.9"
release: "reloader"
heritage: "Helm"
app.kubernetes.io/managed-by: "Helm"
group: com.stakater.platform
provider: stakater
version: v0.0.103
version: v1.0.9
name: reloader-reloader
namespace: default
spec:
replicas: 1
revisionHistoryLimit: 2
@@ -115,16 +124,16 @@ spec:
metadata:
labels:
app: reloader-reloader
chart: "reloader-v0.0.103"
chart: "reloader-v1.0.9"
release: "reloader"
heritage: "Helm"
app.kubernetes.io/managed-by: "Helm"
group: com.stakater.platform
provider: stakater
version: v0.0.103
version: v1.0.9
spec:
containers:
- image: "stakater/reloader:v0.0.103"
- image: "stakater/reloader:v1.0.9"
imagePullPolicy: IfNotPresent
name: reloader-reloader
@@ -133,7 +142,7 @@ spec:
containerPort: 9090
livenessProbe:
httpGet:
path: /metrics
path: /live
port: http
timeoutSeconds: 5
failureThreshold: 5
@@ -147,6 +156,9 @@ spec:
failureThreshold: 5
periodSeconds: 10
successThreshold: 1
securityContext:
{}
securityContext:
runAsNonRoot: true
runAsUser: 65534

View File

@@ -13,6 +13,8 @@ reloader:
isOpenshift: false
ignoreSecrets: false
ignoreConfigMaps: false
reloadOnCreate: false
reloadStrategy: default # Set to default, env-vars or annotations
ignoreNamespaces: "" # Comma separated list of namespaces to ignore
logFormat: "" #json
watchGlobally: true
@@ -22,6 +24,7 @@ reloader:
rbac: false
matchLabels: {}
deployment:
replicas: 1
nodeSelector:
# cloud.google.com/gke-nodepool: default-pool
@@ -40,6 +43,13 @@ reloader:
runAsNonRoot: true
runAsUser: 65534
containerSecurityContext: {}
# capabilities:
# drop:
# - ALL
# allowPrivilegeEscalation: false
# readOnlyRootFilesystem: true
# A list of tolerations to be applied to the Deployment.
# Example:
# tolerations:

12
docs/Alerting.md Normal file
View File

@@ -0,0 +1,12 @@
# Alerting on Reload
Reloader can alert when it triggers a rolling upgrade on Deployments or StatefulSets. Webhook notification alert would be sent to the configured webhook server with all the required information
#### Enabling the feature
In-order to enable this feature, you need to update the reloader.env.secret section of values.yaml providing the information needed for alert.
<pre> ALERT_ON_RELOAD: [ true/false ] Default: false
ALERT_SINK: [ slack/webhook ] Default: webhook
ALERT_WEBHOOK_URL: Required if ALERT_ON_RELOAD is true
ALERT_ADDITIONAL_INFO: Any additional information to be added to alert
#### Slack incoming-webhook creation docs
https://api.slack.com/messaging/webhooks

118
go.mod
View File

@@ -1,49 +1,91 @@
module github.com/stakater/Reloader
go 1.16
go 1.20
require (
github.com/argoproj/argo-rollouts v1.0.2
github.com/onsi/ginkgo v1.15.1 // indirect
github.com/onsi/gomega v1.11.0 // indirect
github.com/argoproj/argo-rollouts v1.4.0
github.com/openshift/api v0.0.0-20210527122704-efd9d5958e01
github.com/openshift/client-go v0.0.0-20210521082421-73d9475a9142
github.com/prometheus/client_golang v1.10.0
github.com/sirupsen/logrus v1.7.0
github.com/spf13/cobra v1.1.3
k8s.io/api v0.21.2
k8s.io/apimachinery v0.21.2
k8s.io/client-go v0.21.2
github.com/parnurzeal/gorequest v0.2.16
github.com/prometheus/client_golang v1.14.0
github.com/sirupsen/logrus v1.9.0
github.com/spf13/cobra v1.6.1
k8s.io/api v0.26.1
k8s.io/apimachinery v0.26.1
k8s.io/client-go v0.26.1
k8s.io/kubectl v0.26.1
)
require (
github.com/beorn7/perks v1.0.1 // indirect
github.com/cespare/xxhash/v2 v2.2.0 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/emicklei/go-restful/v3 v3.10.1 // indirect
github.com/evanphx/json-patch v5.6.0+incompatible // indirect
github.com/go-logr/logr v1.2.3 // indirect
github.com/go-openapi/jsonpointer v0.19.6 // indirect
github.com/go-openapi/jsonreference v0.20.2 // indirect
github.com/go-openapi/swag v0.22.3 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
github.com/golang/protobuf v1.5.2 // indirect
github.com/google/gnostic v0.6.9 // indirect
github.com/google/go-cmp v0.5.9 // indirect
github.com/google/gofuzz v1.2.0 // indirect
github.com/imdario/mergo v0.3.13 // indirect
github.com/inconshreveable/mousetrap v1.1.0 // indirect
github.com/josharian/intern v1.0.0 // indirect
github.com/json-iterator/go v1.1.12 // indirect
github.com/mailru/easyjson v0.7.7 // indirect
github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.2 // indirect
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
github.com/pkg/errors v0.9.1 // indirect
github.com/prometheus/client_model v0.3.0 // indirect
github.com/prometheus/common v0.39.0 // indirect
github.com/prometheus/procfs v0.9.0 // indirect
github.com/smartystreets/goconvey v1.7.2 // indirect
github.com/spf13/pflag v1.0.5 // indirect
golang.org/x/net v0.7.0 // indirect
golang.org/x/oauth2 v0.4.0 // indirect
golang.org/x/sys v0.5.0 // indirect
golang.org/x/term v0.5.0 // indirect
golang.org/x/text v0.7.0 // indirect
golang.org/x/time v0.3.0 // indirect
google.golang.org/appengine v1.6.7 // indirect
google.golang.org/protobuf v1.28.1 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
k8s.io/klog/v2 v2.80.1 // indirect
k8s.io/kube-openapi v0.0.0-20230202010329-39b3636cbaa3 // indirect
k8s.io/utils v0.0.0-20230202215443-34013725500c // indirect
moul.io/http2curl v1.0.0 // indirect
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect
sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect
sigs.k8s.io/yaml v1.3.0 // indirect
)
// Replacements for argo-rollouts
replace (
github.com/go-check/check => github.com/go-check/check v0.0.0-20180628173108-788fd7840127
github.com/grpc-ecosystem/grpc-gateway => github.com/grpc-ecosystem/grpc-gateway v1.16.0
k8s.io/api => k8s.io/api v0.20.4
k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.20.4
k8s.io/apimachinery => k8s.io/apimachinery v0.21.0-alpha.0
k8s.io/apiserver => k8s.io/apiserver v0.20.4
k8s.io/cli-runtime => k8s.io/cli-runtime v0.20.4
k8s.io/client-go => k8s.io/client-go v0.20.4
k8s.io/cloud-provider => k8s.io/cloud-provider v0.20.4
k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.20.4
k8s.io/code-generator => k8s.io/code-generator v0.20.5-rc.0
k8s.io/component-base => k8s.io/component-base v0.20.4
k8s.io/component-helpers => k8s.io/component-helpers v0.20.4
k8s.io/controller-manager => k8s.io/controller-manager v0.20.4
k8s.io/cri-api => k8s.io/cri-api v0.20.5-rc.0
k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.20.4
k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.20.4
k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.20.4
k8s.io/kube-proxy => k8s.io/kube-proxy v0.20.4
k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.20.4
k8s.io/kubectl => k8s.io/kubectl v0.20.4
k8s.io/kubelet => k8s.io/kubelet v0.20.4
k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.20.4
k8s.io/metrics => k8s.io/metrics v0.20.4
k8s.io/mount-utils => k8s.io/mount-utils v0.20.5-rc.0
k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.20.4
k8s.io/sample-cli-plugin => k8s.io/sample-cli-plugin v0.20.4
k8s.io/sample-controller => k8s.io/sample-controller v0.20.4
)
k8s.io/api v0.0.0 => k8s.io/api v0.26.1
k8s.io/apimachinery v0.0.0 => k8s.io/apimachinery v0.26.1
k8s.io/client-go v0.0.0 => k8s.io/client-go v0.26.1
k8s.io/cloud-provider v0.0.0 => k8s.io/cloud-provider v0.24.2
k8s.io/controller-manager v0.0.0 => k8s.io/controller-manager v0.24.2
k8s.io/cri-api v0.0.0 => k8s.io/cri-api v0.20.5-rc.0
k8s.io/csi-translation-lib v0.0.0 => k8s.io/csi-translation-lib v0.24.2
k8s.io/kube-aggregator v0.0.0 => k8s.io/kube-aggregator v0.24.2
k8s.io/kube-controller-manager v0.0.0 => k8s.io/kube-controller-manager v0.24.2
k8s.io/kube-proxy v0.0.0 => k8s.io/kube-proxy v0.24.2
k8s.io/kube-scheduler v0.0.0 => k8s.io/kube-scheduler v0.24.2
k8s.io/kubectl v0.0.0 => k8s.io/kubectl v0.26.1
k8s.io/kubelet v0.0.0 => k8s.io/kubelet v0.24.2
k8s.io/legacy-cloud-providers v0.0.0 => k8s.io/legacy-cloud-providers v0.24.2
k8s.io/mount-utils v0.0.0 => k8s.io/mount-utils v0.20.5-rc.0
k8s.io/sample-apiserver v0.0.0 => k8s.io/sample-apiserver v0.24.2
k8s.io/sample-cli-plugin v0.0.0 => k8s.io/sample-cli-plugin v0.24.2
k8s.io/sample-controller v0.0.0 => k8s.io/sample-controller v0.24.2
)

1948
go.sum

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,94 @@
package alert
import (
"fmt"
"os"
"strings"
"github.com/parnurzeal/gorequest"
"github.com/sirupsen/logrus"
)
// function to send alert msg to webhook service
func SendWebhookAlert(msg string) {
webhook_url, ok := os.LookupEnv("ALERT_WEBHOOK_URL")
if !ok {
logrus.Error("ALERT_WEBHOOK_URL env variable not provided")
return
}
webhook_url = strings.TrimSpace(webhook_url)
alert_sink := os.Getenv("ALERT_SINK")
alert_sink = strings.ToLower(strings.TrimSpace(alert_sink))
// Provision to add Proxy to reach webhook server if required
webhook_proxy := os.Getenv("ALERT_WEBHOOK_PROXY")
webhook_proxy = strings.TrimSpace(webhook_proxy)
// Provision to add Additional information in the alert. e.g ClusterName
alert_additional_info, ok := os.LookupEnv("ALERT_ADDITIONAL_INFO")
if ok {
alert_additional_info = strings.TrimSpace(alert_additional_info)
msg = fmt.Sprintf("%s : %s", alert_additional_info, msg)
}
if alert_sink == "slack" {
sendSlackAlert(webhook_url, webhook_proxy, msg)
} else {
msg = strings.Replace(msg, "*", "", -1)
sendRawWebhookAlert(webhook_url, webhook_proxy, msg)
}
}
// function to handle server redirection
func redirectPolicy(req gorequest.Request, via []gorequest.Request) error {
return fmt.Errorf("incorrect token (redirection)")
}
// function to send alert to slack
func sendSlackAlert(webhookUrl string, proxy string, msg string) []error {
attachment := Attachment{
Text: msg,
Color: "good",
AuthorName: "Reloader",
}
payload := WebhookMessage{
Attachments: []Attachment{attachment},
}
request := gorequest.New().Proxy(proxy)
resp, _, err := request.
Post(webhookUrl).
RedirectPolicy(redirectPolicy).
Send(payload).
End()
if err != nil {
return err
}
if resp.StatusCode >= 400 {
return []error{fmt.Errorf("error sending msg. status: %v", resp.Status)}
}
return nil
}
// function to send alert to webhook service as text
func sendRawWebhookAlert(webhookUrl string, proxy string, msg string) []error {
request := gorequest.New().Proxy(proxy)
resp, _, err := request.
Post(webhookUrl).
Type("text").
RedirectPolicy(redirectPolicy).
Send(msg).
End()
if err != nil {
return err
}
if resp.StatusCode >= 400 {
return []error{fmt.Errorf("error sending msg. status: %v", resp.Status)}
}
return nil
}

View File

@@ -0,0 +1,61 @@
package alert
type WebhookMessage struct {
Username string `json:"username,omitempty"`
IconEmoji string `json:"icon_emoji,omitempty"`
IconURL string `json:"icon_url,omitempty"`
Channel string `json:"channel,omitempty"`
ThreadTimestamp string `json:"thread_ts,omitempty"`
Text string `json:"text,omitempty"`
Attachments []Attachment `json:"attachments,omitempty"`
Parse string `json:"parse,omitempty"`
ResponseType string `json:"response_type,omitempty"`
ReplaceOriginal bool `json:"replace_original,omitempty"`
DeleteOriginal bool `json:"delete_original,omitempty"`
ReplyBroadcast bool `json:"reply_broadcast,omitempty"`
}
type Attachment struct {
Color string `json:"color,omitempty"`
Fallback string `json:"fallback,omitempty"`
CallbackID string `json:"callback_id,omitempty"`
ID int `json:"id,omitempty"`
AuthorID string `json:"author_id,omitempty"`
AuthorName string `json:"author_name,omitempty"`
AuthorSubname string `json:"author_subname,omitempty"`
AuthorLink string `json:"author_link,omitempty"`
AuthorIcon string `json:"author_icon,omitempty"`
Title string `json:"title,omitempty"`
TitleLink string `json:"title_link,omitempty"`
Pretext string `json:"pretext,omitempty"`
Text string `json:"text,omitempty"`
ImageURL string `json:"image_url,omitempty"`
ThumbURL string `json:"thumb_url,omitempty"`
ServiceName string `json:"service_name,omitempty"`
ServiceIcon string `json:"service_icon,omitempty"`
FromURL string `json:"from_url,omitempty"`
OriginalURL string `json:"original_url,omitempty"`
MarkdownIn []string `json:"mrkdwn_in,omitempty"`
Footer string `json:"footer,omitempty"`
FooterIcon string `json:"footer_icon,omitempty"`
}
type Field struct {
Title string `json:"title"`
Value string `json:"value"`
Short bool `json:"short"`
}
type Action struct {
Type string `json:"type"`
Text string `json:"text"`
Url string `json:"url"`
Style string `json:"style"`
}

View File

@@ -2,39 +2,40 @@ package callbacks
import (
"context"
"github.com/sirupsen/logrus"
"github.com/stakater/Reloader/internal/pkg/util"
"github.com/stakater/Reloader/pkg/kube"
appsv1 "k8s.io/api/apps/v1"
v1 "k8s.io/api/core/v1"
meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
argorolloutv1alpha1 "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1"
openshiftv1 "github.com/openshift/api/apps/v1"
)
//ItemsFunc is a generic function to return a specific resource array in given namespace
type ItemsFunc func(kube.Clients, string) []interface{}
// ItemsFunc is a generic function to return a specific resource array in given namespace
type ItemsFunc func(kube.Clients, string) []runtime.Object
//ContainersFunc is a generic func to return containers
type ContainersFunc func(interface{}) []v1.Container
// ContainersFunc is a generic func to return containers
type ContainersFunc func(runtime.Object) []v1.Container
//InitContainersFunc is a generic func to return containers
type InitContainersFunc func(interface{}) []v1.Container
// InitContainersFunc is a generic func to return containers
type InitContainersFunc func(runtime.Object) []v1.Container
//VolumesFunc is a generic func to return volumes
type VolumesFunc func(interface{}) []v1.Volume
// VolumesFunc is a generic func to return volumes
type VolumesFunc func(runtime.Object) []v1.Volume
//UpdateFunc performs the resource update
type UpdateFunc func(kube.Clients, string, interface{}) error
// UpdateFunc performs the resource update
type UpdateFunc func(kube.Clients, string, runtime.Object) error
//AnnotationsFunc is a generic func to return annotations
type AnnotationsFunc func(interface{}) map[string]string
// AnnotationsFunc is a generic func to return annotations
type AnnotationsFunc func(runtime.Object) map[string]string
//PodAnnotationsFunc is a generic func to return annotations
type PodAnnotationsFunc func(interface{}) map[string]string
// PodAnnotationsFunc is a generic func to return annotations
type PodAnnotationsFunc func(runtime.Object) map[string]string
//RollingUpgradeFuncs contains generic functions to perform rolling upgrade
// RollingUpgradeFuncs contains generic functions to perform rolling upgrade
type RollingUpgradeFuncs struct {
ItemsFunc ItemsFunc
AnnotationsFunc AnnotationsFunc
@@ -47,250 +48,260 @@ type RollingUpgradeFuncs struct {
}
// GetDeploymentItems returns the deployments in given namespace
func GetDeploymentItems(clients kube.Clients, namespace string) []interface{} {
func GetDeploymentItems(clients kube.Clients, namespace string) []runtime.Object {
deployments, err := clients.KubernetesClient.AppsV1().Deployments(namespace).List(context.TODO(), meta_v1.ListOptions{})
if err != nil {
logrus.Errorf("Failed to list deployments %v", err)
}
items := make([]runtime.Object, len(deployments.Items))
// Ensure we always have pod annotations to add to
for i, v := range deployments.Items {
if v.Spec.Template.ObjectMeta.Annotations == nil {
annotations := make(map[string]string)
deployments.Items[i].Spec.Template.ObjectMeta.Annotations = annotations
}
items[i] = &deployments.Items[i]
}
return util.InterfaceSlice(deployments.Items)
return items
}
// GetDaemonSetItems returns the daemonSets in given namespace
func GetDaemonSetItems(clients kube.Clients, namespace string) []interface{} {
func GetDaemonSetItems(clients kube.Clients, namespace string) []runtime.Object {
daemonSets, err := clients.KubernetesClient.AppsV1().DaemonSets(namespace).List(context.TODO(), meta_v1.ListOptions{})
if err != nil {
logrus.Errorf("Failed to list daemonSets %v", err)
}
items := make([]runtime.Object, len(daemonSets.Items))
// Ensure we always have pod annotations to add to
for i, v := range daemonSets.Items {
if v.Spec.Template.ObjectMeta.Annotations == nil {
daemonSets.Items[i].Spec.Template.ObjectMeta.Annotations = make(map[string]string)
}
items[i] = &daemonSets.Items[i]
}
return util.InterfaceSlice(daemonSets.Items)
return items
}
// GetStatefulSetItems returns the statefulSets in given namespace
func GetStatefulSetItems(clients kube.Clients, namespace string) []interface{} {
func GetStatefulSetItems(clients kube.Clients, namespace string) []runtime.Object {
statefulSets, err := clients.KubernetesClient.AppsV1().StatefulSets(namespace).List(context.TODO(), meta_v1.ListOptions{})
if err != nil {
logrus.Errorf("Failed to list statefulSets %v", err)
}
items := make([]runtime.Object, len(statefulSets.Items))
// Ensure we always have pod annotations to add to
for i, v := range statefulSets.Items {
if v.Spec.Template.ObjectMeta.Annotations == nil {
statefulSets.Items[i].Spec.Template.ObjectMeta.Annotations = make(map[string]string)
}
items[i] = &statefulSets.Items[i]
}
return util.InterfaceSlice(statefulSets.Items)
return items
}
// GetDeploymentConfigItems returns the deploymentConfigs in given namespace
func GetDeploymentConfigItems(clients kube.Clients, namespace string) []interface{} {
func GetDeploymentConfigItems(clients kube.Clients, namespace string) []runtime.Object {
deploymentConfigs, err := clients.OpenshiftAppsClient.AppsV1().DeploymentConfigs(namespace).List(context.TODO(), meta_v1.ListOptions{})
if err != nil {
logrus.Errorf("Failed to list deploymentConfigs %v", err)
}
items := make([]runtime.Object, len(deploymentConfigs.Items))
// Ensure we always have pod annotations to add to
for i, v := range deploymentConfigs.Items {
if v.Spec.Template.ObjectMeta.Annotations == nil {
deploymentConfigs.Items[i].Spec.Template.ObjectMeta.Annotations = make(map[string]string)
}
items[i] = &deploymentConfigs.Items[i]
}
return util.InterfaceSlice(deploymentConfigs.Items)
return items
}
// GetRolloutItems returns the rollouts in given namespace
func GetRolloutItems(clients kube.Clients, namespace string) []interface{} {
func GetRolloutItems(clients kube.Clients, namespace string) []runtime.Object {
rollouts, err := clients.ArgoRolloutClient.ArgoprojV1alpha1().Rollouts(namespace).List(context.TODO(), meta_v1.ListOptions{})
if err != nil {
logrus.Errorf("Failed to list Rollouts %v", err)
}
items := make([]runtime.Object, len(rollouts.Items))
// Ensure we always have pod annotations to add to
for i, v := range rollouts.Items {
if v.Spec.Template.ObjectMeta.Annotations == nil {
rollouts.Items[i].Spec.Template.ObjectMeta.Annotations = make(map[string]string)
}
items[i] = &rollouts.Items[i]
}
return util.InterfaceSlice(rollouts.Items)
return items
}
// GetDeploymentAnnotations returns the annotations of given deployment
func GetDeploymentAnnotations(item interface{}) map[string]string {
return item.(appsv1.Deployment).ObjectMeta.Annotations
func GetDeploymentAnnotations(item runtime.Object) map[string]string {
return item.(*appsv1.Deployment).ObjectMeta.Annotations
}
// GetDaemonSetAnnotations returns the annotations of given daemonSet
func GetDaemonSetAnnotations(item interface{}) map[string]string {
return item.(appsv1.DaemonSet).ObjectMeta.Annotations
func GetDaemonSetAnnotations(item runtime.Object) map[string]string {
return item.(*appsv1.DaemonSet).ObjectMeta.Annotations
}
// GetStatefulSetAnnotations returns the annotations of given statefulSet
func GetStatefulSetAnnotations(item interface{}) map[string]string {
return item.(appsv1.StatefulSet).ObjectMeta.Annotations
func GetStatefulSetAnnotations(item runtime.Object) map[string]string {
return item.(*appsv1.StatefulSet).ObjectMeta.Annotations
}
// GetDeploymentConfigAnnotations returns the annotations of given deploymentConfig
func GetDeploymentConfigAnnotations(item interface{}) map[string]string {
return item.(openshiftv1.DeploymentConfig).ObjectMeta.Annotations
func GetDeploymentConfigAnnotations(item runtime.Object) map[string]string {
return item.(*openshiftv1.DeploymentConfig).ObjectMeta.Annotations
}
// GetRolloutAnnotations returns the annotations of given rollout
func GetRolloutAnnotations(item interface{}) map[string]string {
return item.(argorolloutv1alpha1.Rollout).ObjectMeta.Annotations
func GetRolloutAnnotations(item runtime.Object) map[string]string {
return item.(*argorolloutv1alpha1.Rollout).ObjectMeta.Annotations
}
// GetDeploymentPodAnnotations returns the pod's annotations of given deployment
func GetDeploymentPodAnnotations(item interface{}) map[string]string {
return item.(appsv1.Deployment).Spec.Template.ObjectMeta.Annotations
func GetDeploymentPodAnnotations(item runtime.Object) map[string]string {
return item.(*appsv1.Deployment).Spec.Template.ObjectMeta.Annotations
}
// GetDaemonSetPodAnnotations returns the pod's annotations of given daemonSet
func GetDaemonSetPodAnnotations(item interface{}) map[string]string {
return item.(appsv1.DaemonSet).Spec.Template.ObjectMeta.Annotations
func GetDaemonSetPodAnnotations(item runtime.Object) map[string]string {
return item.(*appsv1.DaemonSet).Spec.Template.ObjectMeta.Annotations
}
// GetStatefulSetPodAnnotations returns the pod's annotations of given statefulSet
func GetStatefulSetPodAnnotations(item interface{}) map[string]string {
return item.(appsv1.StatefulSet).Spec.Template.ObjectMeta.Annotations
func GetStatefulSetPodAnnotations(item runtime.Object) map[string]string {
return item.(*appsv1.StatefulSet).Spec.Template.ObjectMeta.Annotations
}
// GetDeploymentConfigPodAnnotations returns the pod's annotations of given deploymentConfig
func GetDeploymentConfigPodAnnotations(item interface{}) map[string]string {
return item.(openshiftv1.DeploymentConfig).Spec.Template.ObjectMeta.Annotations
func GetDeploymentConfigPodAnnotations(item runtime.Object) map[string]string {
return item.(*openshiftv1.DeploymentConfig).Spec.Template.ObjectMeta.Annotations
}
// GetRolloutPodAnnotations returns the pod's annotations of given rollout
func GetRolloutPodAnnotations(item interface{}) map[string]string {
return item.(argorolloutv1alpha1.Rollout).Spec.Template.ObjectMeta.Annotations
func GetRolloutPodAnnotations(item runtime.Object) map[string]string {
return item.(*argorolloutv1alpha1.Rollout).Spec.Template.ObjectMeta.Annotations
}
// GetDeploymentContainers returns the containers of given deployment
func GetDeploymentContainers(item interface{}) []v1.Container {
return item.(appsv1.Deployment).Spec.Template.Spec.Containers
func GetDeploymentContainers(item runtime.Object) []v1.Container {
return item.(*appsv1.Deployment).Spec.Template.Spec.Containers
}
// GetDaemonSetContainers returns the containers of given daemonSet
func GetDaemonSetContainers(item interface{}) []v1.Container {
return item.(appsv1.DaemonSet).Spec.Template.Spec.Containers
func GetDaemonSetContainers(item runtime.Object) []v1.Container {
return item.(*appsv1.DaemonSet).Spec.Template.Spec.Containers
}
// GetStatefulSetContainers returns the containers of given statefulSet
func GetStatefulSetContainers(item interface{}) []v1.Container {
return item.(appsv1.StatefulSet).Spec.Template.Spec.Containers
func GetStatefulSetContainers(item runtime.Object) []v1.Container {
return item.(*appsv1.StatefulSet).Spec.Template.Spec.Containers
}
// GetDeploymentConfigContainers returns the containers of given deploymentConfig
func GetDeploymentConfigContainers(item interface{}) []v1.Container {
return item.(openshiftv1.DeploymentConfig).Spec.Template.Spec.Containers
func GetDeploymentConfigContainers(item runtime.Object) []v1.Container {
return item.(*openshiftv1.DeploymentConfig).Spec.Template.Spec.Containers
}
// GetRolloutContainers returns the containers of given rollout
func GetRolloutContainers(item interface{}) []v1.Container {
return item.(argorolloutv1alpha1.Rollout).Spec.Template.Spec.Containers
func GetRolloutContainers(item runtime.Object) []v1.Container {
return item.(*argorolloutv1alpha1.Rollout).Spec.Template.Spec.Containers
}
// GetDeploymentInitContainers returns the containers of given deployment
func GetDeploymentInitContainers(item interface{}) []v1.Container {
return item.(appsv1.Deployment).Spec.Template.Spec.InitContainers
func GetDeploymentInitContainers(item runtime.Object) []v1.Container {
return item.(*appsv1.Deployment).Spec.Template.Spec.InitContainers
}
// GetDaemonSetInitContainers returns the containers of given daemonSet
func GetDaemonSetInitContainers(item interface{}) []v1.Container {
return item.(appsv1.DaemonSet).Spec.Template.Spec.InitContainers
func GetDaemonSetInitContainers(item runtime.Object) []v1.Container {
return item.(*appsv1.DaemonSet).Spec.Template.Spec.InitContainers
}
// GetStatefulSetInitContainers returns the containers of given statefulSet
func GetStatefulSetInitContainers(item interface{}) []v1.Container {
return item.(appsv1.StatefulSet).Spec.Template.Spec.InitContainers
func GetStatefulSetInitContainers(item runtime.Object) []v1.Container {
return item.(*appsv1.StatefulSet).Spec.Template.Spec.InitContainers
}
// GetDeploymentConfigInitContainers returns the containers of given deploymentConfig
func GetDeploymentConfigInitContainers(item interface{}) []v1.Container {
return item.(openshiftv1.DeploymentConfig).Spec.Template.Spec.InitContainers
func GetDeploymentConfigInitContainers(item runtime.Object) []v1.Container {
return item.(*openshiftv1.DeploymentConfig).Spec.Template.Spec.InitContainers
}
// GetRolloutInitContainers returns the containers of given rollout
func GetRolloutInitContainers(item interface{}) []v1.Container {
return item.(argorolloutv1alpha1.Rollout).Spec.Template.Spec.InitContainers
func GetRolloutInitContainers(item runtime.Object) []v1.Container {
return item.(*argorolloutv1alpha1.Rollout).Spec.Template.Spec.InitContainers
}
// UpdateDeployment performs rolling upgrade on deployment
func UpdateDeployment(clients kube.Clients, namespace string, resource interface{}) error {
deployment := resource.(appsv1.Deployment)
_, err := clients.KubernetesClient.AppsV1().Deployments(namespace).Update(context.TODO(), &deployment, meta_v1.UpdateOptions{FieldManager: "Reloader"})
func UpdateDeployment(clients kube.Clients, namespace string, resource runtime.Object) error {
deployment := resource.(*appsv1.Deployment)
_, err := clients.KubernetesClient.AppsV1().Deployments(namespace).Update(context.TODO(), deployment, meta_v1.UpdateOptions{FieldManager: "Reloader"})
return err
}
// UpdateDaemonSet performs rolling upgrade on daemonSet
func UpdateDaemonSet(clients kube.Clients, namespace string, resource interface{}) error {
daemonSet := resource.(appsv1.DaemonSet)
_, err := clients.KubernetesClient.AppsV1().DaemonSets(namespace).Update(context.TODO(), &daemonSet, meta_v1.UpdateOptions{FieldManager: "Reloader"})
func UpdateDaemonSet(clients kube.Clients, namespace string, resource runtime.Object) error {
daemonSet := resource.(*appsv1.DaemonSet)
_, err := clients.KubernetesClient.AppsV1().DaemonSets(namespace).Update(context.TODO(), daemonSet, meta_v1.UpdateOptions{FieldManager: "Reloader"})
return err
}
// UpdateStatefulSet performs rolling upgrade on statefulSet
func UpdateStatefulSet(clients kube.Clients, namespace string, resource interface{}) error {
statefulSet := resource.(appsv1.StatefulSet)
_, err := clients.KubernetesClient.AppsV1().StatefulSets(namespace).Update(context.TODO(), &statefulSet, meta_v1.UpdateOptions{FieldManager: "Reloader"})
func UpdateStatefulSet(clients kube.Clients, namespace string, resource runtime.Object) error {
statefulSet := resource.(*appsv1.StatefulSet)
_, err := clients.KubernetesClient.AppsV1().StatefulSets(namespace).Update(context.TODO(), statefulSet, meta_v1.UpdateOptions{FieldManager: "Reloader"})
return err
}
// UpdateDeploymentConfig performs rolling upgrade on deploymentConfig
func UpdateDeploymentConfig(clients kube.Clients, namespace string, resource interface{}) error {
deploymentConfig := resource.(openshiftv1.DeploymentConfig)
_, err := clients.OpenshiftAppsClient.AppsV1().DeploymentConfigs(namespace).Update(context.TODO(), &deploymentConfig, meta_v1.UpdateOptions{FieldManager: "Reloader"})
func UpdateDeploymentConfig(clients kube.Clients, namespace string, resource runtime.Object) error {
deploymentConfig := resource.(*openshiftv1.DeploymentConfig)
_, err := clients.OpenshiftAppsClient.AppsV1().DeploymentConfigs(namespace).Update(context.TODO(), deploymentConfig, meta_v1.UpdateOptions{FieldManager: "Reloader"})
return err
}
// UpdateRollout performs rolling upgrade on rollout
func UpdateRollout(clients kube.Clients, namespace string, resource interface{}) error {
rollout := resource.(argorolloutv1alpha1.Rollout)
func UpdateRollout(clients kube.Clients, namespace string, resource runtime.Object) error {
rollout := resource.(*argorolloutv1alpha1.Rollout)
rolloutBefore, _ := clients.ArgoRolloutClient.ArgoprojV1alpha1().Rollouts(namespace).Get(context.TODO(), rollout.Name, meta_v1.GetOptions{})
logrus.Warnf("Before: %+v", rolloutBefore.Spec.Template.Spec.Containers[0].Env)
logrus.Warnf("After: %+v", rollout.Spec.Template.Spec.Containers[0].Env)
_, err := clients.ArgoRolloutClient.ArgoprojV1alpha1().Rollouts(namespace).Update(context.TODO(), &rollout, meta_v1.UpdateOptions{FieldManager: "Reloader"})
_, err := clients.ArgoRolloutClient.ArgoprojV1alpha1().Rollouts(namespace).Update(context.TODO(), rollout, meta_v1.UpdateOptions{FieldManager: "Reloader"})
return err
}
// GetDeploymentVolumes returns the Volumes of given deployment
func GetDeploymentVolumes(item interface{}) []v1.Volume {
return item.(appsv1.Deployment).Spec.Template.Spec.Volumes
func GetDeploymentVolumes(item runtime.Object) []v1.Volume {
return item.(*appsv1.Deployment).Spec.Template.Spec.Volumes
}
// GetDaemonSetVolumes returns the Volumes of given daemonSet
func GetDaemonSetVolumes(item interface{}) []v1.Volume {
return item.(appsv1.DaemonSet).Spec.Template.Spec.Volumes
func GetDaemonSetVolumes(item runtime.Object) []v1.Volume {
return item.(*appsv1.DaemonSet).Spec.Template.Spec.Volumes
}
// GetStatefulSetVolumes returns the Volumes of given statefulSet
func GetStatefulSetVolumes(item interface{}) []v1.Volume {
return item.(appsv1.StatefulSet).Spec.Template.Spec.Volumes
func GetStatefulSetVolumes(item runtime.Object) []v1.Volume {
return item.(*appsv1.StatefulSet).Spec.Template.Spec.Volumes
}
// GetDeploymentConfigVolumes returns the Volumes of given deploymentConfig
func GetDeploymentConfigVolumes(item interface{}) []v1.Volume {
return item.(openshiftv1.DeploymentConfig).Spec.Template.Spec.Volumes
func GetDeploymentConfigVolumes(item runtime.Object) []v1.Volume {
return item.(*openshiftv1.DeploymentConfig).Spec.Template.Spec.Volumes
}
// GetRolloutVolumes returns the Volumes of given rollout
func GetRolloutVolumes(item interface{}) []v1.Volume {
return item.(argorolloutv1alpha1.Rollout).Spec.Template.Spec.Volumes
func GetRolloutVolumes(item runtime.Object) []v1.Volume {
return item.(*argorolloutv1alpha1.Rollout).Spec.Template.Spec.Volumes
}

View File

@@ -1,12 +1,16 @@
package cmd
import (
"context"
"errors"
"fmt"
"github.com/stakater/Reloader/internal/pkg/constants"
"net/http"
"os"
"strings"
"github.com/stakater/Reloader/internal/pkg/constants"
"github.com/stakater/Reloader/internal/pkg/leadership"
"github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"github.com/stakater/Reloader/internal/pkg/controller"
@@ -35,23 +39,38 @@ func NewReloaderCommand() *cobra.Command {
cmd.PersistentFlags().StringVar(&options.LogFormat, "log-format", "", "Log format to use (empty string for text, or JSON")
cmd.PersistentFlags().StringSlice("resources-to-ignore", []string{}, "list of resources to ignore (valid options 'configMaps' or 'secrets')")
cmd.PersistentFlags().StringSlice("namespaces-to-ignore", []string{}, "list of namespaces to ignore")
cmd.PersistentFlags().StringSlice("namespace-selector", []string{}, "list of key:vaule namespace labels to include")
cmd.PersistentFlags().StringVar(&options.IsArgoRollouts, "is-Argo-Rollouts", "false", "Add support for argo rollouts")
cmd.PersistentFlags().StringVar(&options.ReloadStrategy, constants.ReloadStrategyFlag, constants.EnvVarsReloadStrategy, "Specifies the desired reload strategy")
cmd.PersistentFlags().StringVar(&options.ReloadOnCreate, "reload-on-create", "false", "Add support to watch create events")
cmd.PersistentFlags().BoolVar(&options.EnableHA, "enable-ha", false, "Adds support for running multiple replicas via leadership election")
return cmd
}
func validateFlags(*cobra.Command, []string) error {
// Ensure the reload strategy is one of the following...
var validReloadStrategy bool
valid := []string{constants.EnvVarsReloadStrategy, constants.AnnotationsReloadStrategy}
for _, s := range valid {
if s == options.ReloadStrategy {
return nil
validReloadStrategy = true
}
}
err := fmt.Sprintf("%s must be one of: %s", constants.ReloadStrategyFlag, strings.Join(valid, ", "))
return errors.New(err)
if !validReloadStrategy {
err := fmt.Sprintf("%s must be one of: %s", constants.ReloadStrategyFlag, strings.Join(valid, ", "))
return errors.New(err)
}
// Validate that HA options are correct
if options.EnableHA {
if err := validateHAEnvs(); err != nil {
return err
}
}
return nil
}
func configureLogging(logFormat string) error {
@@ -67,6 +86,25 @@ func configureLogging(logFormat string) error {
return nil
}
func validateHAEnvs() error {
podName, podNamespace := getHAEnvs()
if podName == "" {
return fmt.Errorf("%s not set, cannot run in HA mode without %s set", constants.PodNameEnv, constants.PodNameEnv)
}
if podNamespace == "" {
return fmt.Errorf("%s not set, cannot run in HA mode without %s set", constants.PodNamespaceEnv, constants.PodNamespaceEnv)
}
return nil
}
func getHAEnvs() (string, string) {
podName := os.Getenv(constants.PodNameEnv)
podNamespace := os.Getenv(constants.PodNamespaceEnv)
return podName, podNamespace
}
func startReloader(cmd *cobra.Command, args []string) {
err := configureLogging(options.LogFormat)
if err != nil {
@@ -96,18 +134,34 @@ func startReloader(cmd *cobra.Command, args []string) {
logrus.Fatal(err)
}
namespaceLabelSelector, err := getNamespaceLabelSelector(cmd)
if err != nil {
logrus.Fatal(err)
}
if len(namespaceLabelSelector) > 0 {
logrus.Warnf("namespace-selector is set, will detect changes in namespaces with these labels: %s.", namespaceLabelSelector)
}
collectors := metrics.SetupPrometheusEndpoint()
var controllers []*controller.Controller
for k := range kube.ResourceMap {
if ignoredResourcesList.Contains(k) {
continue
}
c, err := controller.NewController(clientset, k, currentNamespace, ignoredNamespacesList, collectors)
c, err := controller.NewController(clientset, k, currentNamespace, ignoredNamespacesList, namespaceLabelSelector, collectors)
if err != nil {
logrus.Fatalf("%s", err)
}
controllers = append(controllers, c)
// If HA is enabled we only run the controller when
if options.EnableHA {
continue
}
// Now let's start the controller
stop := make(chan struct{})
defer close(stop)
@@ -115,14 +169,38 @@ func startReloader(cmd *cobra.Command, args []string) {
go c.Run(1, stop)
}
// Wait forever
select {}
// Run leadership election
if options.EnableHA {
podName, podNamespace := getHAEnvs()
lock := leadership.GetNewLock(clientset.CoordinationV1(), constants.LockName, podName, podNamespace)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
go leadership.RunLeaderElection(lock, ctx, cancel, podName, controllers)
}
leadership.SetupLivenessEndpoint()
logrus.Fatal(http.ListenAndServe(constants.DefaultHttpListenAddr, nil))
}
func getIgnoredNamespacesList(cmd *cobra.Command) (util.List, error) {
return getStringSliceFromFlags(cmd, "namespaces-to-ignore")
}
func getNamespaceLabelSelector(cmd *cobra.Command) (util.Map, error) {
slice, err := getStringSliceFromFlags(cmd, "namespace-selector")
if err != nil {
logrus.Fatal(err)
}
var namespaceSelectorMap util.Map = make(util.Map)
for _, kv := range slice {
split := strings.Split(kv, ":")
namespaceSelectorMap[split[0]] = split[1]
}
return namespaceSelectorMap, nil
}
func getStringSliceFromFlags(cmd *cobra.Command, flag string) ([]string, error) {
slice, err := cmd.Flags().GetStringSlice(flag)
if err != nil {

View File

@@ -1,6 +1,9 @@
package constants
const (
// DefaultHttpListenAddr is the default listening address for global http server
DefaultHttpListenAddr = ":9090"
// ConfigmapEnvVarPostfix is a postfix for configmap envVar
ConfigmapEnvVarPostfix = "CONFIGMAP"
// SecretEnvVarPostfix is a postfix for secret envVar
@@ -20,3 +23,10 @@ const (
// AnnotationsReloadStrategy instructs Reloader to add pod template annotations to facilitate a restart
AnnotationsReloadStrategy = "annotations"
)
// Leadership election related consts
const (
LockName string = "stakater-reloader-lock"
PodNameEnv string = "POD_NAME"
PodNamespaceEnv string = "POD_NAMESPACE"
)

View File

@@ -1,22 +1,27 @@
package controller
import (
"context"
"fmt"
"time"
"github.com/sirupsen/logrus"
"github.com/stakater/Reloader/internal/pkg/handler"
"github.com/stakater/Reloader/internal/pkg/metrics"
"github.com/stakater/Reloader/internal/pkg/options"
"github.com/stakater/Reloader/internal/pkg/util"
"github.com/stakater/Reloader/pkg/kube"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/kubernetes"
typedcorev1 "k8s.io/client-go/kubernetes/typed/core/v1"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/record"
"k8s.io/client-go/util/workqueue"
v1 "k8s.io/api/core/v1"
"k8s.io/kubectl/pkg/scheme"
)
// Controller for checking events
@@ -28,6 +33,8 @@ type Controller struct {
namespace string
ignoredNamespaces util.List
collectors metrics.Collectors
recorder record.EventRecorder
namespaceSelector map[string]string
}
// controllerInitialized flag determines whether controlled is being initialized
@@ -35,13 +42,19 @@ var controllerInitialized bool = false
// NewController for initializing a Controller
func NewController(
client kubernetes.Interface, resource string, namespace string, ignoredNamespaces []string, collectors metrics.Collectors) (*Controller, error) {
client kubernetes.Interface, resource string, namespace string, ignoredNamespaces []string, namespaceLabelSelector map[string]string, collectors metrics.Collectors) (*Controller, error) {
c := Controller{
client: client,
namespace: namespace,
ignoredNamespaces: ignoredNamespaces,
namespaceSelector: namespaceLabelSelector,
}
eventBroadcaster := record.NewBroadcaster()
eventBroadcaster.StartRecordingToSink(&typedcorev1.EventSinkImpl{
Interface: client.CoreV1().Events(""),
})
recorder := eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: fmt.Sprintf("reloader-%s", resource)})
queue := workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter())
listWatcher := cache.NewListWatchFromClient(client.CoreV1().RESTClient(), resource, namespace, fields.Everything())
@@ -55,16 +68,22 @@ func NewController(
c.informer = informer
c.queue = queue
c.collectors = collectors
c.recorder = recorder
logrus.Infof("created controller for: %s", resource)
return &c, nil
}
// Add function to add a new object to the queue in case of creating a resource
func (c *Controller) Add(obj interface{}) {
if !c.resourceInIgnoredNamespace(obj) && controllerInitialized {
c.queue.Add(handler.ResourceCreatedHandler{
Resource: obj,
Collectors: c.collectors,
})
if options.ReloadOnCreate == "true" {
if !c.resourceInIgnoredNamespace(obj) && c.resourceInNamespaceSelector(obj) && controllerInitialized {
c.queue.Add(handler.ResourceCreatedHandler{
Resource: obj,
Collectors: c.collectors,
Recorder: c.recorder,
})
}
}
}
@@ -78,13 +97,50 @@ func (c *Controller) resourceInIgnoredNamespace(raw interface{}) bool {
return false
}
func (c *Controller) resourceInNamespaceSelector(raw interface{}) bool {
if len(c.namespaceSelector) == 0 {
return true
}
switch object := raw.(type) {
case *v1.ConfigMap:
return c.matchLabels(object.ObjectMeta.Namespace)
case *v1.Secret:
return c.matchLabels(object.ObjectMeta.Namespace)
}
return true
}
func (c *Controller) matchLabels(resourceNamespace string) bool {
namespace, err := c.client.CoreV1().Namespaces().Get(context.Background(), resourceNamespace, metav1.GetOptions{})
if err != nil {
logrus.Warn(err)
return false
}
for selectorKey, selectorVal := range c.namespaceSelector {
namespaceLabelVal, namespaceLabelKeyExists := namespace.ObjectMeta.Labels[selectorKey]
if namespaceLabelKeyExists && selectorVal == "*" {
continue
}
if !namespaceLabelKeyExists || selectorVal != namespaceLabelVal {
return false
}
}
return true
}
// Update function to add an old object and a new object to the queue in case of updating a resource
func (c *Controller) Update(old interface{}, new interface{}) {
if !c.resourceInIgnoredNamespace(new) {
if !c.resourceInIgnoredNamespace(new) && c.resourceInNamespaceSelector(new) {
c.queue.Add(handler.ResourceUpdatedHandler{
Resource: new,
OldResource: old,
Collectors: c.collectors,
Recorder: c.recorder,
})
}
}
@@ -94,7 +150,7 @@ func (c *Controller) Delete(old interface{}) {
// Todo: Any future delete event can be handled here
}
//Run function for controller which handles the queue
// Run function for controller which handles the queue
func (c *Controller) Run(threadiness int, stopCh chan struct{}) {
defer runtime.HandleCrash()
@@ -166,5 +222,6 @@ func (c *Controller) handleErr(err error, key interface{}) {
c.queue.Forget(key)
// Report to an external entity that, even after several retries, we could not successfully process this key
runtime.HandleError(err)
logrus.Infof("Dropping the key %q out of the queue: %v", key, err)
logrus.Errorf("Dropping key out of the queue: %v", err)
logrus.Debugf("Dropping the key %q out of the queue: %v", key, err)
}

View File

@@ -1,11 +1,13 @@
package controller
import (
"github.com/stakater/Reloader/internal/pkg/constants"
"context"
"os"
"testing"
"time"
"github.com/stakater/Reloader/internal/pkg/constants"
"github.com/stakater/Reloader/internal/pkg/metrics"
"github.com/sirupsen/logrus"
@@ -14,7 +16,10 @@ import (
"github.com/stakater/Reloader/internal/pkg/testutil"
"github.com/stakater/Reloader/internal/pkg/util"
"github.com/stakater/Reloader/pkg/kube"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/kubernetes/fake"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/util/workqueue"
)
@@ -40,7 +45,7 @@ func TestMain(m *testing.M) {
logrus.Infof("Creating controller")
for k := range kube.ResourceMap {
c, err := NewController(clients.KubernetesClient, k, namespace, []string{}, collectors)
c, err := NewController(clients.KubernetesClient, k, namespace, []string{}, map[string]string{}, collectors)
if err != nil {
logrus.Fatalf("%s", err)
}
@@ -2279,3 +2284,143 @@ func TestController_resourceInIgnoredNamespace(t *testing.T) {
})
}
}
func TestController_resourceInNamespaceSelector(t *testing.T) {
type fields struct {
indexer cache.Indexer
queue workqueue.RateLimitingInterface
informer cache.Controller
namespace v1.Namespace
namespaceSelector util.Map
}
type args struct {
raw interface{}
}
tests := []struct {
name string
fields fields
args args
want bool
}{
{
name: "TestConfigMapResourceInNamespaceSelector",
fields: fields{
namespaceSelector: util.Map{
"select": "this",
"select2": "this2",
},
namespace: v1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: "selected-namespace",
Labels: map[string]string{
"select": "this",
"select2": "this2",
},
},
},
},
args: args{
raw: testutil.GetConfigmap("selected-namespace", "testcm", "test"),
},
want: true,
}, {
name: "TestConfigMapResourceNotInNamespaceSelector",
fields: fields{
namespaceSelector: util.Map{
"select": "this",
"select2": "this2",
},
namespace: v1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: "not-selected-namespace",
Labels: map[string]string{},
},
},
},
args: args{
raw: testutil.GetConfigmap("not-selected-namespace", "testcm", "test"),
},
want: false,
},
{
name: "TestSecretResourceInNamespaceSelector",
fields: fields{
namespaceSelector: util.Map{
"select": "this",
"select2": "this2",
},
namespace: v1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: "selected-namespace",
Labels: map[string]string{
"select": "this",
"select2": "this2",
},
},
},
},
args: args{
raw: testutil.GetSecret("selected-namespace", "testsecret", "test"),
},
want: true,
}, {
name: "TestSecretResourceNotInNamespaceSelector",
fields: fields{
namespaceSelector: util.Map{
"select": "this",
"select2": "this2",
},
namespace: v1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: "not-selected-namespace",
Labels: map[string]string{},
},
},
},
args: args{
raw: testutil.GetSecret("not-selected-namespace", "secret", "test"),
},
want: false,
}, {
name: "TestSecretResourceInNamespaceSelectorWiledcardValue",
fields: fields{
namespaceSelector: util.Map{
"select": "*",
},
namespace: v1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: "selected-namespace",
Labels: map[string]string{
"select": "this",
},
},
},
},
args: args{
raw: testutil.GetSecret("selected-namespace", "secret", "test"),
},
want: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
fakeClient := fake.NewSimpleClientset()
namespace, _ := fakeClient.CoreV1().Namespaces().Create(context.Background(), &tt.fields.namespace, metav1.CreateOptions{})
logrus.Infof("created fakeClient namesapce for testing = %s", namespace.Name)
c := &Controller{
client: fakeClient,
indexer: tt.fields.indexer,
queue: tt.fields.queue,
informer: tt.fields.informer,
namespace: tt.fields.namespace.ObjectMeta.Name,
namespaceSelector: tt.fields.namespaceSelector,
}
if got := c.resourceInNamespaceSelector(tt.args.raw); got != tt.want {
t.Errorf("Controller.resourceInNamespaceSelector() = %v, want %v", got, tt.want)
}
})
}
}

View File

@@ -5,12 +5,14 @@ import (
"github.com/stakater/Reloader/internal/pkg/metrics"
"github.com/stakater/Reloader/internal/pkg/util"
v1 "k8s.io/api/core/v1"
"k8s.io/client-go/tools/record"
)
// ResourceCreatedHandler contains new objects
type ResourceCreatedHandler struct {
Resource interface{}
Collectors metrics.Collectors
Recorder record.EventRecorder
}
// Handle processes the newly created resource
@@ -20,7 +22,7 @@ func (r ResourceCreatedHandler) Handle() error {
} else {
config, _ := r.GetConfig()
// process resource based on its type
return doRollingUpgrade(config, r.Collectors)
return doRollingUpgrade(config, r.Collectors, r.Recorder)
}
return nil
}

View File

@@ -5,6 +5,7 @@ import (
"github.com/stakater/Reloader/internal/pkg/metrics"
"github.com/stakater/Reloader/internal/pkg/util"
v1 "k8s.io/api/core/v1"
"k8s.io/client-go/tools/record"
)
// ResourceUpdatedHandler contains updated objects
@@ -12,6 +13,7 @@ type ResourceUpdatedHandler struct {
Resource interface{}
OldResource interface{}
Collectors metrics.Collectors
Recorder record.EventRecorder
}
// Handle processes the updated resource
@@ -22,7 +24,7 @@ func (r ResourceUpdatedHandler) Handle() error {
config, oldSHAData := r.GetConfig()
if config.SHAValue != oldSHAData {
// process resource based on its type
return doRollingUpgrade(config, r.Collectors)
return doRollingUpgrade(config, r.Collectors, r.Recorder)
}
}
return nil

View File

@@ -4,8 +4,14 @@ import (
"encoding/json"
"errors"
"fmt"
"os"
"regexp"
"strconv"
"strings"
"github.com/prometheus/client_golang/prometheus"
"github.com/sirupsen/logrus"
alert "github.com/stakater/Reloader/internal/pkg/alerts"
"github.com/stakater/Reloader/internal/pkg/callbacks"
"github.com/stakater/Reloader/internal/pkg/constants"
"github.com/stakater/Reloader/internal/pkg/metrics"
@@ -13,8 +19,9 @@ import (
"github.com/stakater/Reloader/internal/pkg/util"
"github.com/stakater/Reloader/pkg/kube"
v1 "k8s.io/api/core/v1"
"strconv"
"strings"
"k8s.io/apimachinery/pkg/api/meta"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/tools/record"
)
// GetDeploymentRollingUpgradeFuncs returns all callback funcs for a deployment
@@ -87,31 +94,31 @@ func GetArgoRolloutRollingUpgradeFuncs() callbacks.RollingUpgradeFuncs {
}
}
func doRollingUpgrade(config util.Config, collectors metrics.Collectors) error {
func doRollingUpgrade(config util.Config, collectors metrics.Collectors, recorder record.EventRecorder) error {
clients := kube.GetClients()
err := rollingUpgrade(clients, config, GetDeploymentRollingUpgradeFuncs(), collectors)
err := rollingUpgrade(clients, config, GetDeploymentRollingUpgradeFuncs(), collectors, recorder)
if err != nil {
return err
}
err = rollingUpgrade(clients, config, GetDaemonSetRollingUpgradeFuncs(), collectors)
err = rollingUpgrade(clients, config, GetDaemonSetRollingUpgradeFuncs(), collectors, recorder)
if err != nil {
return err
}
err = rollingUpgrade(clients, config, GetStatefulSetRollingUpgradeFuncs(), collectors)
err = rollingUpgrade(clients, config, GetStatefulSetRollingUpgradeFuncs(), collectors, recorder)
if err != nil {
return err
}
if kube.IsOpenshift {
err = rollingUpgrade(clients, config, GetDeploymentConfigRollingUpgradeFuncs(), collectors)
err = rollingUpgrade(clients, config, GetDeploymentConfigRollingUpgradeFuncs(), collectors, recorder)
if err != nil {
return err
}
}
if options.IsArgoRollouts == "true" {
err = rollingUpgrade(clients, config, GetArgoRolloutRollingUpgradeFuncs(), collectors)
err = rollingUpgrade(clients, config, GetArgoRolloutRollingUpgradeFuncs(), collectors, recorder)
if err != nil {
return err
}
@@ -120,9 +127,9 @@ func doRollingUpgrade(config util.Config, collectors metrics.Collectors) error {
return nil
}
func rollingUpgrade(clients kube.Clients, config util.Config, upgradeFuncs callbacks.RollingUpgradeFuncs, collectors metrics.Collectors) error {
func rollingUpgrade(clients kube.Clients, config util.Config, upgradeFuncs callbacks.RollingUpgradeFuncs, collectors metrics.Collectors, recorder record.EventRecorder) error {
err := PerformRollingUpgrade(clients, config, upgradeFuncs, collectors)
err := PerformRollingUpgrade(clients, config, upgradeFuncs, collectors, recorder)
if err != nil {
logrus.Errorf("Rolling upgrade for '%s' failed with error = %v", config.ResourceName, err)
}
@@ -130,7 +137,7 @@ func rollingUpgrade(clients kube.Clients, config util.Config, upgradeFuncs callb
}
// PerformRollingUpgrade upgrades the deployment if there is any change in configmap or secret data
func PerformRollingUpgrade(clients kube.Clients, config util.Config, upgradeFuncs callbacks.RollingUpgradeFuncs, collectors metrics.Collectors) error {
func PerformRollingUpgrade(clients kube.Clients, config util.Config, upgradeFuncs callbacks.RollingUpgradeFuncs, collectors metrics.Collectors, recorder record.EventRecorder) error {
items := upgradeFuncs.ItemsFunc(clients, config.Namespace)
for _, i := range items {
@@ -154,8 +161,9 @@ func PerformRollingUpgrade(clients kube.Clients, config util.Config, upgradeFunc
if result != constants.Updated && annotationValue != "" {
values := strings.Split(annotationValue, ",")
for _, value := range values {
value = strings.Trim(value, " ")
if value == config.ResourceName {
value = strings.TrimSpace(value)
re := regexp.MustCompile("^" + value + "$")
if re.Match([]byte(config.ResourceName)) {
result = invokeReloadStrategy(upgradeFuncs, i, config, false)
if result == constants.Updated {
break
@@ -172,16 +180,35 @@ func PerformRollingUpgrade(clients kube.Clients, config util.Config, upgradeFunc
}
if result == constants.Updated {
err = upgradeFuncs.UpdateFunc(clients, config.Namespace, i)
resourceName := util.ToObjectMeta(i).Name
accessor, err := meta.Accessor(i)
if err != nil {
logrus.Errorf("Update for '%s' of type '%s' in namespace '%s' failed with error %v", resourceName, upgradeFuncs.ResourceType, config.Namespace, err)
return err
}
resourceName := accessor.GetName()
err = upgradeFuncs.UpdateFunc(clients, config.Namespace, i)
if err != nil {
message := fmt.Sprintf("Update for '%s' of type '%s' in namespace '%s' failed with error %v", resourceName, upgradeFuncs.ResourceType, config.Namespace, err)
logrus.Errorf(message)
collectors.Reloaded.With(prometheus.Labels{"success": "false"}).Inc()
if recorder != nil {
recorder.Event(i, v1.EventTypeWarning, "ReloadFail", message)
}
return err
} else {
logrus.Infof("Changes detected in '%s' of type '%s' in namespace '%s'", config.ResourceName, config.Type, config.Namespace)
logrus.Infof("Updated '%s' of type '%s' in namespace '%s'", resourceName, upgradeFuncs.ResourceType, config.Namespace)
message := fmt.Sprintf("Changes detected in '%s' of type '%s' in namespace '%s'", config.ResourceName, config.Type, config.Namespace)
message += fmt.Sprintf(", Updated '%s' of type '%s' in namespace '%s'", resourceName, upgradeFuncs.ResourceType, config.Namespace)
logrus.Infof(message)
collectors.Reloaded.With(prometheus.Labels{"success": "true"}).Inc()
alert_on_reload, ok := os.LookupEnv("ALERT_ON_RELOAD")
if recorder != nil {
recorder.Event(i, v1.EventTypeNormal, "Reloaded", message)
}
if ok && alert_on_reload == "true" {
msg := fmt.Sprintf(
"Reloader detected changes in *%s* of type *%s* in namespace *%s*. Hence reloaded *%s* of type *%s* in namespace *%s*",
config.ResourceName, config.Type, config.Namespace, resourceName, upgradeFuncs.ResourceType, config.Namespace)
alert.SendWebhookAlert(msg)
}
}
}
}
@@ -259,7 +286,7 @@ func getContainerWithEnvReference(containers []v1.Container, resourceName string
return nil
}
func getContainerUsingResource(upgradeFuncs callbacks.RollingUpgradeFuncs, item interface{}, config util.Config, autoReload bool) *v1.Container {
func getContainerUsingResource(upgradeFuncs callbacks.RollingUpgradeFuncs, item runtime.Object, config util.Config, autoReload bool) *v1.Container {
volumes := upgradeFuncs.VolumesFunc(item)
containers := upgradeFuncs.ContainersFunc(item)
initContainers := upgradeFuncs.InitContainersFunc(item)
@@ -298,7 +325,7 @@ func getContainerUsingResource(upgradeFuncs callbacks.RollingUpgradeFuncs, item
return container
}
func invokeReloadStrategy(upgradeFuncs callbacks.RollingUpgradeFuncs, item interface{}, config util.Config, autoReload bool) constants.Result {
func invokeReloadStrategy(upgradeFuncs callbacks.RollingUpgradeFuncs, item runtime.Object, config util.Config, autoReload bool) constants.Result {
if options.ReloadStrategy == constants.AnnotationsReloadStrategy {
return updatePodAnnotations(upgradeFuncs, item, config, autoReload)
}
@@ -306,7 +333,7 @@ func invokeReloadStrategy(upgradeFuncs callbacks.RollingUpgradeFuncs, item inter
return updateContainerEnvVars(upgradeFuncs, item, config, autoReload)
}
func updatePodAnnotations(upgradeFuncs callbacks.RollingUpgradeFuncs, item interface{}, config util.Config, autoReload bool) constants.Result {
func updatePodAnnotations(upgradeFuncs callbacks.RollingUpgradeFuncs, item runtime.Object, config util.Config, autoReload bool) constants.Result {
container := getContainerUsingResource(upgradeFuncs, item, config, autoReload)
if container == nil {
return constants.NoContainerFound
@@ -358,7 +385,7 @@ func createReloadedAnnotations(target *util.ReloadSource) (map[string]string, er
return annotations, nil
}
func updateContainerEnvVars(upgradeFuncs callbacks.RollingUpgradeFuncs, item interface{}, config util.Config, autoReload bool) constants.Result {
func updateContainerEnvVars(upgradeFuncs callbacks.RollingUpgradeFuncs, item runtime.Object, config util.Config, autoReload bool) constants.Result {
var result constants.Result
envVar := constants.EnvVarPrefix + util.ConvertToEnvVarName(config.ResourceName) + "_" + config.Type
container := getContainerUsingResource(upgradeFuncs, item, config, autoReload)

View File

@@ -16,7 +16,9 @@ import (
"github.com/stakater/Reloader/internal/pkg/testutil"
"github.com/stakater/Reloader/internal/pkg/util"
"github.com/stakater/Reloader/pkg/kube"
"k8s.io/apimachinery/pkg/api/meta"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
testclient "k8s.io/client-go/kubernetes/fake"
)
@@ -1186,7 +1188,7 @@ func TestRollingUpgradeForDeploymentWithConfigmapUsingArs(t *testing.T) {
deploymentFuncs := GetDeploymentRollingUpgradeFuncs()
collectors := getCollectors()
err := PerformRollingUpgrade(clients, config, deploymentFuncs, collectors)
err := PerformRollingUpgrade(clients, config, deploymentFuncs, collectors, nil)
time.Sleep(5 * time.Second)
if err != nil {
t.Errorf("Rolling upgrade failed for Deployment with Configmap")
@@ -1211,7 +1213,7 @@ func TestRollingUpgradeForDeploymentWithConfigmapInProjectedVolumeUsingArs(t *te
deploymentFuncs := GetDeploymentRollingUpgradeFuncs()
collectors := getCollectors()
err := PerformRollingUpgrade(clients, config, deploymentFuncs, collectors)
err := PerformRollingUpgrade(clients, config, deploymentFuncs, collectors, nil)
if err != nil {
t.Errorf("Rolling upgrade failed for Deployment with Configmap in projected volume")
}
@@ -1236,7 +1238,7 @@ func TestRollingUpgradeForDeploymentWithConfigmapViaSearchAnnotationUsingArs(t *
deploymentFuncs := GetDeploymentRollingUpgradeFuncs()
collectors := getCollectors()
err := PerformRollingUpgrade(clients, config, deploymentFuncs, collectors)
err := PerformRollingUpgrade(clients, config, deploymentFuncs, collectors, nil)
if err != nil {
t.Errorf("Rolling upgrade failed for Deployment with Configmap")
}
@@ -1261,7 +1263,7 @@ func TestRollingUpgradeForDeploymentWithConfigmapViaSearchAnnotationNoTriggersUs
deploymentFuncs := GetDeploymentRollingUpgradeFuncs()
collectors := getCollectors()
err := PerformRollingUpgrade(clients, config, deploymentFuncs, collectors)
err := PerformRollingUpgrade(clients, config, deploymentFuncs, collectors, nil)
if err != nil {
t.Errorf("Rolling upgrade failed for Deployment with Configmap")
}
@@ -1301,7 +1303,7 @@ func TestRollingUpgradeForDeploymentWithConfigmapViaSearchAnnotationNotMappedUsi
deploymentFuncs := GetDeploymentRollingUpgradeFuncs()
collectors := getCollectors()
err = PerformRollingUpgrade(clients, config, deploymentFuncs, collectors)
err = PerformRollingUpgrade(clients, config, deploymentFuncs, collectors, nil)
if err != nil {
t.Errorf("Rolling upgrade failed for Deployment with Configmap")
}
@@ -1325,7 +1327,7 @@ func TestRollingUpgradeForDeploymentWithConfigmapInInitContainerUsingArs(t *test
deploymentFuncs := GetDeploymentRollingUpgradeFuncs()
collectors := getCollectors()
err := PerformRollingUpgrade(clients, config, deploymentFuncs, collectors)
err := PerformRollingUpgrade(clients, config, deploymentFuncs, collectors, nil)
time.Sleep(5 * time.Second)
if err != nil {
t.Errorf("Rolling upgrade failed for Deployment with Configmap")
@@ -1350,7 +1352,7 @@ func TestRollingUpgradeForDeploymentWithConfigmapInProjectVolumeInInitContainerU
deploymentFuncs := GetDeploymentRollingUpgradeFuncs()
collectors := getCollectors()
err := PerformRollingUpgrade(clients, config, deploymentFuncs, collectors)
err := PerformRollingUpgrade(clients, config, deploymentFuncs, collectors, nil)
time.Sleep(5 * time.Second)
if err != nil {
t.Errorf("Rolling upgrade failed for Deployment with Configmap in projected volume")
@@ -1375,7 +1377,7 @@ func TestRollingUpgradeForDeploymentWithConfigmapAsEnvVarUsingArs(t *testing.T)
deploymentFuncs := GetDeploymentRollingUpgradeFuncs()
collectors := getCollectors()
err := PerformRollingUpgrade(clients, config, deploymentFuncs, collectors)
err := PerformRollingUpgrade(clients, config, deploymentFuncs, collectors, nil)
time.Sleep(5 * time.Second)
if err != nil {
t.Errorf("Rolling upgrade failed for Deployment with Configmap used as env var")
@@ -1400,7 +1402,7 @@ func TestRollingUpgradeForDeploymentWithConfigmapAsEnvVarInInitContainerUsingArs
deploymentFuncs := GetDeploymentRollingUpgradeFuncs()
collectors := getCollectors()
err := PerformRollingUpgrade(clients, config, deploymentFuncs, collectors)
err := PerformRollingUpgrade(clients, config, deploymentFuncs, collectors, nil)
time.Sleep(5 * time.Second)
if err != nil {
t.Errorf("Rolling upgrade failed for Deployment with Configmap used as env var")
@@ -1425,7 +1427,7 @@ func TestRollingUpgradeForDeploymentWithConfigmapAsEnvVarFromUsingArs(t *testing
deploymentFuncs := GetDeploymentRollingUpgradeFuncs()
collectors := getCollectors()
err := PerformRollingUpgrade(clients, config, deploymentFuncs, collectors)
err := PerformRollingUpgrade(clients, config, deploymentFuncs, collectors, nil)
time.Sleep(5 * time.Second)
if err != nil {
t.Errorf("Rolling upgrade failed for Deployment with Configmap used as env var")
@@ -1450,7 +1452,7 @@ func TestRollingUpgradeForDeploymentWithSecretUsingArs(t *testing.T) {
deploymentFuncs := GetDeploymentRollingUpgradeFuncs()
collectors := getCollectors()
err := PerformRollingUpgrade(clients, config, deploymentFuncs, collectors)
err := PerformRollingUpgrade(clients, config, deploymentFuncs, collectors, nil)
time.Sleep(5 * time.Second)
if err != nil {
t.Errorf("Rolling upgrade failed for Deployment with Secret")
@@ -1475,7 +1477,7 @@ func TestRollingUpgradeForDeploymentWithSecretInProjectedVolumeUsingArs(t *testi
deploymentFuncs := GetDeploymentRollingUpgradeFuncs()
collectors := getCollectors()
err := PerformRollingUpgrade(clients, config, deploymentFuncs, collectors)
err := PerformRollingUpgrade(clients, config, deploymentFuncs, collectors, nil)
time.Sleep(5 * time.Second)
if err != nil {
t.Errorf("Rolling upgrade failed for Deployment with Secret in projected volume")
@@ -1500,7 +1502,7 @@ func TestRollingUpgradeForDeploymentWithSecretinInitContainerUsingArs(t *testing
deploymentFuncs := GetDeploymentRollingUpgradeFuncs()
collectors := getCollectors()
err := PerformRollingUpgrade(clients, config, deploymentFuncs, collectors)
err := PerformRollingUpgrade(clients, config, deploymentFuncs, collectors, nil)
time.Sleep(5 * time.Second)
if err != nil {
t.Errorf("Rolling upgrade failed for Deployment with Secret")
@@ -1525,7 +1527,7 @@ func TestRollingUpgradeForDeploymentWithSecretInProjectedVolumeinInitContainerUs
deploymentFuncs := GetDeploymentRollingUpgradeFuncs()
collectors := getCollectors()
err := PerformRollingUpgrade(clients, config, deploymentFuncs, collectors)
err := PerformRollingUpgrade(clients, config, deploymentFuncs, collectors, nil)
time.Sleep(5 * time.Second)
if err != nil {
t.Errorf("Rolling upgrade failed for Deployment with Secret in projected volume")
@@ -1550,7 +1552,7 @@ func TestRollingUpgradeForDeploymentWithSecretAsEnvVarUsingArs(t *testing.T) {
deploymentFuncs := GetDeploymentRollingUpgradeFuncs()
collectors := getCollectors()
err := PerformRollingUpgrade(clients, config, deploymentFuncs, collectors)
err := PerformRollingUpgrade(clients, config, deploymentFuncs, collectors, nil)
time.Sleep(5 * time.Second)
if err != nil {
t.Errorf("Rolling upgrade failed for Deployment with Secret")
@@ -1575,7 +1577,7 @@ func TestRollingUpgradeForDeploymentWithSecretAsEnvVarFromUsingArs(t *testing.T)
deploymentFuncs := GetDeploymentRollingUpgradeFuncs()
collectors := getCollectors()
err := PerformRollingUpgrade(clients, config, deploymentFuncs, collectors)
err := PerformRollingUpgrade(clients, config, deploymentFuncs, collectors, nil)
time.Sleep(5 * time.Second)
if err != nil {
t.Errorf("Rolling upgrade failed for Deployment with Secret")
@@ -1600,7 +1602,7 @@ func TestRollingUpgradeForDeploymentWithSecretAsEnvVarInInitContainerUsingArs(t
deploymentFuncs := GetDeploymentRollingUpgradeFuncs()
collectors := getCollectors()
err := PerformRollingUpgrade(clients, config, deploymentFuncs, collectors)
err := PerformRollingUpgrade(clients, config, deploymentFuncs, collectors, nil)
time.Sleep(5 * time.Second)
if err != nil {
t.Errorf("Rolling upgrade failed for Deployment with Secret")
@@ -1625,7 +1627,7 @@ func TestRollingUpgradeForDaemonSetWithConfigmapUsingArs(t *testing.T) {
daemonSetFuncs := GetDaemonSetRollingUpgradeFuncs()
collectors := getCollectors()
err := PerformRollingUpgrade(clients, config, daemonSetFuncs, collectors)
err := PerformRollingUpgrade(clients, config, daemonSetFuncs, collectors, nil)
time.Sleep(5 * time.Second)
if err != nil {
t.Errorf("Rolling upgrade failed for DaemonSet with configmap")
@@ -1650,7 +1652,7 @@ func TestRollingUpgradeForDaemonSetWithConfigmapInProjectedVolumeUsingArs(t *tes
daemonSetFuncs := GetDaemonSetRollingUpgradeFuncs()
collectors := getCollectors()
err := PerformRollingUpgrade(clients, config, daemonSetFuncs, collectors)
err := PerformRollingUpgrade(clients, config, daemonSetFuncs, collectors, nil)
time.Sleep(5 * time.Second)
if err != nil {
t.Errorf("Rolling upgrade failed for DaemonSet with configmap in projected volume")
@@ -1675,7 +1677,7 @@ func TestRollingUpgradeForDaemonSetWithConfigmapAsEnvVarUsingArs(t *testing.T) {
daemonSetFuncs := GetDaemonSetRollingUpgradeFuncs()
collectors := getCollectors()
err := PerformRollingUpgrade(clients, config, daemonSetFuncs, collectors)
err := PerformRollingUpgrade(clients, config, daemonSetFuncs, collectors, nil)
time.Sleep(5 * time.Second)
if err != nil {
t.Errorf("Rolling upgrade failed for DaemonSet with configmap used as env var")
@@ -1700,7 +1702,7 @@ func TestRollingUpgradeForDaemonSetWithSecretUsingArs(t *testing.T) {
daemonSetFuncs := GetDaemonSetRollingUpgradeFuncs()
collectors := getCollectors()
err := PerformRollingUpgrade(clients, config, daemonSetFuncs, collectors)
err := PerformRollingUpgrade(clients, config, daemonSetFuncs, collectors, nil)
time.Sleep(5 * time.Second)
if err != nil {
t.Errorf("Rolling upgrade failed for DaemonSet with secret")
@@ -1725,7 +1727,7 @@ func TestRollingUpgradeForDaemonSetWithSecretInProjectedVolumeUsingArs(t *testin
daemonSetFuncs := GetDaemonSetRollingUpgradeFuncs()
collectors := getCollectors()
err := PerformRollingUpgrade(clients, config, daemonSetFuncs, collectors)
err := PerformRollingUpgrade(clients, config, daemonSetFuncs, collectors, nil)
time.Sleep(5 * time.Second)
if err != nil {
t.Errorf("Rolling upgrade failed for DaemonSet with secret in projected volume")
@@ -1750,7 +1752,7 @@ func TestRollingUpgradeForStatefulSetWithConfigmapUsingArs(t *testing.T) {
statefulSetFuncs := GetStatefulSetRollingUpgradeFuncs()
collectors := getCollectors()
err := PerformRollingUpgrade(clients, config, statefulSetFuncs, collectors)
err := PerformRollingUpgrade(clients, config, statefulSetFuncs, collectors, nil)
time.Sleep(5 * time.Second)
if err != nil {
t.Errorf("Rolling upgrade failed for StatefulSet with configmap")
@@ -1775,7 +1777,7 @@ func TestRollingUpgradeForStatefulSetWithConfigmapInProjectedVolumeUsingArs(t *t
statefulSetFuncs := GetStatefulSetRollingUpgradeFuncs()
collectors := getCollectors()
err := PerformRollingUpgrade(clients, config, statefulSetFuncs, collectors)
err := PerformRollingUpgrade(clients, config, statefulSetFuncs, collectors, nil)
time.Sleep(5 * time.Second)
if err != nil {
t.Errorf("Rolling upgrade failed for StatefulSet with configmap in projected volume")
@@ -1800,7 +1802,7 @@ func TestRollingUpgradeForStatefulSetWithSecretUsingArs(t *testing.T) {
statefulSetFuncs := GetStatefulSetRollingUpgradeFuncs()
collectors := getCollectors()
err := PerformRollingUpgrade(clients, config, statefulSetFuncs, collectors)
err := PerformRollingUpgrade(clients, config, statefulSetFuncs, collectors, nil)
time.Sleep(5 * time.Second)
if err != nil {
t.Errorf("Rolling upgrade failed for StatefulSet with secret")
@@ -1825,7 +1827,7 @@ func TestRollingUpgradeForStatefulSetWithSecretInProjectedVolumeUsingArs(t *test
statefulSetFuncs := GetStatefulSetRollingUpgradeFuncs()
collectors := getCollectors()
err := PerformRollingUpgrade(clients, config, statefulSetFuncs, collectors)
err := PerformRollingUpgrade(clients, config, statefulSetFuncs, collectors, nil)
time.Sleep(5 * time.Second)
if err != nil {
t.Errorf("Rolling upgrade failed for StatefulSet with secret in projected volume")
@@ -1850,7 +1852,7 @@ func TestRollingUpgradeForDeploymentWithPodAnnotationsUsingArs(t *testing.T) {
deploymentFuncs := GetDeploymentRollingUpgradeFuncs()
collectors := getCollectors()
err := PerformRollingUpgrade(clients, config, deploymentFuncs, collectors)
err := PerformRollingUpgrade(clients, config, deploymentFuncs, collectors, nil)
time.Sleep(5 * time.Second)
if err != nil {
t.Errorf("Rolling upgrade failed for Deployment with pod annotations")
@@ -1860,7 +1862,11 @@ func TestRollingUpgradeForDeploymentWithPodAnnotationsUsingArs(t *testing.T) {
items := deploymentFuncs.ItemsFunc(clients, config.Namespace)
var foundPod, foundBoth bool
for _, i := range items {
name := util.ToObjectMeta(i).Name
accessor, err := meta.Accessor(i)
if err != nil {
t.Errorf("Error getting accessor for item: %v", err)
}
name := accessor.GetName()
if name == arsConfigmapWithPodAnnotations {
annotations := deploymentFuncs.PodAnnotationsFunc(i)
updated := testutil.GetResourceSHAFromAnnotation(annotations)
@@ -1896,12 +1902,12 @@ func TestFailedRollingUpgradeUsingArs(t *testing.T) {
shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, arsNamespace, arsConfigmapName, "fail.stakater.com")
config := getConfigWithAnnotations(constants.ConfigmapEnvVarPostfix, arsConfigmapName, shaData, options.ConfigmapUpdateOnChangeAnnotation)
deploymentFuncs := GetDeploymentRollingUpgradeFuncs()
deploymentFuncs.UpdateFunc = func(_ kube.Clients, _ string, _ interface{}) error {
deploymentFuncs.UpdateFunc = func(_ kube.Clients, _ string, _ runtime.Object) error {
return fmt.Errorf("error")
}
collectors := getCollectors()
_ = PerformRollingUpgrade(clients, config, deploymentFuncs, collectors)
_ = PerformRollingUpgrade(clients, config, deploymentFuncs, collectors, nil)
if promtestutil.ToFloat64(collectors.Reloaded.With(labelFailed)) != 1 {
t.Errorf("Counter was not increased")
@@ -1916,7 +1922,7 @@ func TestRollingUpgradeForDeploymentWithConfigmapUsingErs(t *testing.T) {
deploymentFuncs := GetDeploymentRollingUpgradeFuncs()
collectors := getCollectors()
err := PerformRollingUpgrade(clients, config, deploymentFuncs, collectors)
err := PerformRollingUpgrade(clients, config, deploymentFuncs, collectors, nil)
time.Sleep(5 * time.Second)
if err != nil {
t.Errorf("Rolling upgrade failed for Deployment with Configmap")
@@ -1941,7 +1947,7 @@ func TestRollingUpgradeForDeploymentWithConfigmapInProjectedVolumeUsingErs(t *te
deploymentFuncs := GetDeploymentRollingUpgradeFuncs()
collectors := getCollectors()
err := PerformRollingUpgrade(clients, config, deploymentFuncs, collectors)
err := PerformRollingUpgrade(clients, config, deploymentFuncs, collectors, nil)
if err != nil {
t.Errorf("Rolling upgrade failed for Deployment with Configmap in projected volume")
}
@@ -1966,7 +1972,7 @@ func TestRollingUpgradeForDeploymentWithConfigmapViaSearchAnnotationUsingErs(t *
deploymentFuncs := GetDeploymentRollingUpgradeFuncs()
collectors := getCollectors()
err := PerformRollingUpgrade(clients, config, deploymentFuncs, collectors)
err := PerformRollingUpgrade(clients, config, deploymentFuncs, collectors, nil)
if err != nil {
t.Errorf("Rolling upgrade failed for Deployment with Configmap")
}
@@ -1991,7 +1997,7 @@ func TestRollingUpgradeForDeploymentWithConfigmapViaSearchAnnotationNoTriggersUs
deploymentFuncs := GetDeploymentRollingUpgradeFuncs()
collectors := getCollectors()
err := PerformRollingUpgrade(clients, config, deploymentFuncs, collectors)
err := PerformRollingUpgrade(clients, config, deploymentFuncs, collectors, nil)
if err != nil {
t.Errorf("Rolling upgrade failed for Deployment with Configmap")
}
@@ -2031,7 +2037,7 @@ func TestRollingUpgradeForDeploymentWithConfigmapViaSearchAnnotationNotMappedUsi
deploymentFuncs := GetDeploymentRollingUpgradeFuncs()
collectors := getCollectors()
err = PerformRollingUpgrade(clients, config, deploymentFuncs, collectors)
err = PerformRollingUpgrade(clients, config, deploymentFuncs, collectors, nil)
if err != nil {
t.Errorf("Rolling upgrade failed for Deployment with Configmap")
}
@@ -2055,7 +2061,7 @@ func TestRollingUpgradeForDeploymentWithConfigmapInInitContainerUsingErs(t *test
deploymentFuncs := GetDeploymentRollingUpgradeFuncs()
collectors := getCollectors()
err := PerformRollingUpgrade(clients, config, deploymentFuncs, collectors)
err := PerformRollingUpgrade(clients, config, deploymentFuncs, collectors, nil)
time.Sleep(5 * time.Second)
if err != nil {
t.Errorf("Rolling upgrade failed for Deployment with Configmap")
@@ -2080,7 +2086,7 @@ func TestRollingUpgradeForDeploymentWithConfigmapInProjectVolumeInInitContainerU
deploymentFuncs := GetDeploymentRollingUpgradeFuncs()
collectors := getCollectors()
err := PerformRollingUpgrade(clients, config, deploymentFuncs, collectors)
err := PerformRollingUpgrade(clients, config, deploymentFuncs, collectors, nil)
time.Sleep(5 * time.Second)
if err != nil {
t.Errorf("Rolling upgrade failed for Deployment with Configmap in projected volume")
@@ -2105,7 +2111,7 @@ func TestRollingUpgradeForDeploymentWithConfigmapAsEnvVarUsingErs(t *testing.T)
deploymentFuncs := GetDeploymentRollingUpgradeFuncs()
collectors := getCollectors()
err := PerformRollingUpgrade(clients, config, deploymentFuncs, collectors)
err := PerformRollingUpgrade(clients, config, deploymentFuncs, collectors, nil)
time.Sleep(5 * time.Second)
if err != nil {
t.Errorf("Rolling upgrade failed for Deployment with Configmap used as env var")
@@ -2130,7 +2136,7 @@ func TestRollingUpgradeForDeploymentWithConfigmapAsEnvVarInInitContainerUsingErs
deploymentFuncs := GetDeploymentRollingUpgradeFuncs()
collectors := getCollectors()
err := PerformRollingUpgrade(clients, config, deploymentFuncs, collectors)
err := PerformRollingUpgrade(clients, config, deploymentFuncs, collectors, nil)
time.Sleep(5 * time.Second)
if err != nil {
t.Errorf("Rolling upgrade failed for Deployment with Configmap used as env var")
@@ -2155,7 +2161,7 @@ func TestRollingUpgradeForDeploymentWithConfigmapAsEnvVarFromUsingErs(t *testing
deploymentFuncs := GetDeploymentRollingUpgradeFuncs()
collectors := getCollectors()
err := PerformRollingUpgrade(clients, config, deploymentFuncs, collectors)
err := PerformRollingUpgrade(clients, config, deploymentFuncs, collectors, nil)
time.Sleep(5 * time.Second)
if err != nil {
t.Errorf("Rolling upgrade failed for Deployment with Configmap used as env var")
@@ -2180,7 +2186,7 @@ func TestRollingUpgradeForDeploymentWithSecretUsingErs(t *testing.T) {
deploymentFuncs := GetDeploymentRollingUpgradeFuncs()
collectors := getCollectors()
err := PerformRollingUpgrade(clients, config, deploymentFuncs, collectors)
err := PerformRollingUpgrade(clients, config, deploymentFuncs, collectors, nil)
time.Sleep(5 * time.Second)
if err != nil {
t.Errorf("Rolling upgrade failed for Deployment with Secret")
@@ -2205,7 +2211,7 @@ func TestRollingUpgradeForDeploymentWithSecretInProjectedVolumeUsingErs(t *testi
deploymentFuncs := GetDeploymentRollingUpgradeFuncs()
collectors := getCollectors()
err := PerformRollingUpgrade(clients, config, deploymentFuncs, collectors)
err := PerformRollingUpgrade(clients, config, deploymentFuncs, collectors, nil)
time.Sleep(5 * time.Second)
if err != nil {
t.Errorf("Rolling upgrade failed for Deployment with Secret in projected volume")
@@ -2230,7 +2236,7 @@ func TestRollingUpgradeForDeploymentWithSecretinInitContainerUsingErs(t *testing
deploymentFuncs := GetDeploymentRollingUpgradeFuncs()
collectors := getCollectors()
err := PerformRollingUpgrade(clients, config, deploymentFuncs, collectors)
err := PerformRollingUpgrade(clients, config, deploymentFuncs, collectors, nil)
time.Sleep(5 * time.Second)
if err != nil {
t.Errorf("Rolling upgrade failed for Deployment with Secret")
@@ -2255,7 +2261,7 @@ func TestRollingUpgradeForDeploymentWithSecretInProjectedVolumeinInitContainerUs
deploymentFuncs := GetDeploymentRollingUpgradeFuncs()
collectors := getCollectors()
err := PerformRollingUpgrade(clients, config, deploymentFuncs, collectors)
err := PerformRollingUpgrade(clients, config, deploymentFuncs, collectors, nil)
time.Sleep(5 * time.Second)
if err != nil {
t.Errorf("Rolling upgrade failed for Deployment with Secret in projected volume")
@@ -2280,7 +2286,7 @@ func TestRollingUpgradeForDeploymentWithSecretAsEnvVarUsingErs(t *testing.T) {
deploymentFuncs := GetDeploymentRollingUpgradeFuncs()
collectors := getCollectors()
err := PerformRollingUpgrade(clients, config, deploymentFuncs, collectors)
err := PerformRollingUpgrade(clients, config, deploymentFuncs, collectors, nil)
time.Sleep(5 * time.Second)
if err != nil {
t.Errorf("Rolling upgrade failed for Deployment with Secret")
@@ -2305,7 +2311,7 @@ func TestRollingUpgradeForDeploymentWithSecretAsEnvVarFromUsingErs(t *testing.T)
deploymentFuncs := GetDeploymentRollingUpgradeFuncs()
collectors := getCollectors()
err := PerformRollingUpgrade(clients, config, deploymentFuncs, collectors)
err := PerformRollingUpgrade(clients, config, deploymentFuncs, collectors, nil)
time.Sleep(5 * time.Second)
if err != nil {
t.Errorf("Rolling upgrade failed for Deployment with Secret")
@@ -2330,7 +2336,7 @@ func TestRollingUpgradeForDeploymentWithSecretAsEnvVarInInitContainerUsingErs(t
deploymentFuncs := GetDeploymentRollingUpgradeFuncs()
collectors := getCollectors()
err := PerformRollingUpgrade(clients, config, deploymentFuncs, collectors)
err := PerformRollingUpgrade(clients, config, deploymentFuncs, collectors, nil)
time.Sleep(5 * time.Second)
if err != nil {
t.Errorf("Rolling upgrade failed for Deployment with Secret")
@@ -2355,7 +2361,7 @@ func TestRollingUpgradeForDaemonSetWithConfigmapUsingErs(t *testing.T) {
daemonSetFuncs := GetDaemonSetRollingUpgradeFuncs()
collectors := getCollectors()
err := PerformRollingUpgrade(clients, config, daemonSetFuncs, collectors)
err := PerformRollingUpgrade(clients, config, daemonSetFuncs, collectors, nil)
time.Sleep(5 * time.Second)
if err != nil {
t.Errorf("Rolling upgrade failed for DaemonSet with configmap")
@@ -2380,7 +2386,7 @@ func TestRollingUpgradeForDaemonSetWithConfigmapInProjectedVolumeUsingErs(t *tes
daemonSetFuncs := GetDaemonSetRollingUpgradeFuncs()
collectors := getCollectors()
err := PerformRollingUpgrade(clients, config, daemonSetFuncs, collectors)
err := PerformRollingUpgrade(clients, config, daemonSetFuncs, collectors, nil)
time.Sleep(5 * time.Second)
if err != nil {
t.Errorf("Rolling upgrade failed for DaemonSet with configmap in projected volume")
@@ -2405,7 +2411,7 @@ func TestRollingUpgradeForDaemonSetWithConfigmapAsEnvVarUsingErs(t *testing.T) {
daemonSetFuncs := GetDaemonSetRollingUpgradeFuncs()
collectors := getCollectors()
err := PerformRollingUpgrade(clients, config, daemonSetFuncs, collectors)
err := PerformRollingUpgrade(clients, config, daemonSetFuncs, collectors, nil)
time.Sleep(5 * time.Second)
if err != nil {
t.Errorf("Rolling upgrade failed for DaemonSet with configmap used as env var")
@@ -2430,7 +2436,7 @@ func TestRollingUpgradeForDaemonSetWithSecretUsingErs(t *testing.T) {
daemonSetFuncs := GetDaemonSetRollingUpgradeFuncs()
collectors := getCollectors()
err := PerformRollingUpgrade(clients, config, daemonSetFuncs, collectors)
err := PerformRollingUpgrade(clients, config, daemonSetFuncs, collectors, nil)
time.Sleep(5 * time.Second)
if err != nil {
t.Errorf("Rolling upgrade failed for DaemonSet with secret")
@@ -2455,7 +2461,7 @@ func TestRollingUpgradeForDaemonSetWithSecretInProjectedVolumeUsingErs(t *testin
daemonSetFuncs := GetDaemonSetRollingUpgradeFuncs()
collectors := getCollectors()
err := PerformRollingUpgrade(clients, config, daemonSetFuncs, collectors)
err := PerformRollingUpgrade(clients, config, daemonSetFuncs, collectors, nil)
time.Sleep(5 * time.Second)
if err != nil {
t.Errorf("Rolling upgrade failed for DaemonSet with secret in projected volume")
@@ -2480,7 +2486,7 @@ func TestRollingUpgradeForStatefulSetWithConfigmapUsingErs(t *testing.T) {
statefulSetFuncs := GetStatefulSetRollingUpgradeFuncs()
collectors := getCollectors()
err := PerformRollingUpgrade(clients, config, statefulSetFuncs, collectors)
err := PerformRollingUpgrade(clients, config, statefulSetFuncs, collectors, nil)
time.Sleep(5 * time.Second)
if err != nil {
t.Errorf("Rolling upgrade failed for StatefulSet with configmap")
@@ -2505,7 +2511,7 @@ func TestRollingUpgradeForStatefulSetWithConfigmapInProjectedVolumeUsingErs(t *t
statefulSetFuncs := GetStatefulSetRollingUpgradeFuncs()
collectors := getCollectors()
err := PerformRollingUpgrade(clients, config, statefulSetFuncs, collectors)
err := PerformRollingUpgrade(clients, config, statefulSetFuncs, collectors, nil)
time.Sleep(5 * time.Second)
if err != nil {
t.Errorf("Rolling upgrade failed for StatefulSet with configmap in projected volume")
@@ -2530,7 +2536,7 @@ func TestRollingUpgradeForStatefulSetWithSecretUsingErs(t *testing.T) {
statefulSetFuncs := GetStatefulSetRollingUpgradeFuncs()
collectors := getCollectors()
err := PerformRollingUpgrade(clients, config, statefulSetFuncs, collectors)
err := PerformRollingUpgrade(clients, config, statefulSetFuncs, collectors, nil)
time.Sleep(5 * time.Second)
if err != nil {
t.Errorf("Rolling upgrade failed for StatefulSet with secret")
@@ -2555,7 +2561,7 @@ func TestRollingUpgradeForStatefulSetWithSecretInProjectedVolumeUsingErs(t *test
statefulSetFuncs := GetStatefulSetRollingUpgradeFuncs()
collectors := getCollectors()
err := PerformRollingUpgrade(clients, config, statefulSetFuncs, collectors)
err := PerformRollingUpgrade(clients, config, statefulSetFuncs, collectors, nil)
time.Sleep(5 * time.Second)
if err != nil {
t.Errorf("Rolling upgrade failed for StatefulSet with secret in projected volume")
@@ -2580,7 +2586,7 @@ func TestRollingUpgradeForDeploymentWithPodAnnotationsUsingErs(t *testing.T) {
deploymentFuncs := GetDeploymentRollingUpgradeFuncs()
collectors := getCollectors()
err := PerformRollingUpgrade(clients, config, deploymentFuncs, collectors)
err := PerformRollingUpgrade(clients, config, deploymentFuncs, collectors, nil)
time.Sleep(5 * time.Second)
if err != nil {
t.Errorf("Rolling upgrade failed for Deployment with pod annotations")
@@ -2591,7 +2597,11 @@ func TestRollingUpgradeForDeploymentWithPodAnnotationsUsingErs(t *testing.T) {
items := deploymentFuncs.ItemsFunc(clients, config.Namespace)
var foundPod, foundBoth bool
for _, i := range items {
name := util.ToObjectMeta(i).Name
accessor, err := meta.Accessor(i)
if err != nil {
t.Errorf("Error getting accessor for item: %v", err)
}
name := accessor.GetName()
if name == ersConfigmapWithPodAnnotations {
containers := deploymentFuncs.ContainersFunc(i)
updated := testutil.GetResourceSHAFromEnvVar(containers, envName)
@@ -2627,12 +2637,12 @@ func TestFailedRollingUpgradeUsingErs(t *testing.T) {
shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, ersNamespace, ersConfigmapName, "fail.stakater.com")
config := getConfigWithAnnotations(constants.ConfigmapEnvVarPostfix, ersConfigmapName, shaData, options.ConfigmapUpdateOnChangeAnnotation)
deploymentFuncs := GetDeploymentRollingUpgradeFuncs()
deploymentFuncs.UpdateFunc = func(_ kube.Clients, _ string, _ interface{}) error {
deploymentFuncs.UpdateFunc = func(_ kube.Clients, _ string, _ runtime.Object) error {
return fmt.Errorf("error")
}
collectors := getCollectors()
_ = PerformRollingUpgrade(clients, config, deploymentFuncs, collectors)
_ = PerformRollingUpgrade(clients, config, deploymentFuncs, collectors, nil)
if promtestutil.ToFloat64(collectors.Reloaded.With(labelFailed)) != 1 {
t.Errorf("Counter was not increased")

View File

@@ -0,0 +1,107 @@
package leadership
import (
"context"
"net/http"
"sync"
"time"
"github.com/sirupsen/logrus"
"github.com/stakater/Reloader/internal/pkg/controller"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/tools/leaderelection"
"k8s.io/client-go/tools/leaderelection/resourcelock"
coordinationv1 "k8s.io/client-go/kubernetes/typed/coordination/v1"
)
var (
// Used for liveness probe
m sync.Mutex
healthy bool = true
)
func GetNewLock(client coordinationv1.CoordinationV1Interface, lockName, podname, namespace string) *resourcelock.LeaseLock {
return &resourcelock.LeaseLock{
LeaseMeta: v1.ObjectMeta{
Name: lockName,
Namespace: namespace,
},
Client: client,
LockConfig: resourcelock.ResourceLockConfig{
Identity: podname,
},
}
}
// runLeaderElection runs leadership election. If an instance of the controller is the leader and stops leading it will shutdown.
func RunLeaderElection(lock *resourcelock.LeaseLock, ctx context.Context, cancel context.CancelFunc, id string, controllers []*controller.Controller) {
// Construct channels for the controllers to use
var stopChannels []chan struct{}
for i := 0; i < len(controllers); i++ {
stop := make(chan struct{})
stopChannels = append(stopChannels, stop)
}
leaderelection.RunOrDie(ctx, leaderelection.LeaderElectionConfig{
Lock: lock,
ReleaseOnCancel: true,
LeaseDuration: 15 * time.Second,
RenewDeadline: 10 * time.Second,
RetryPeriod: 2 * time.Second,
Callbacks: leaderelection.LeaderCallbacks{
OnStartedLeading: func(c context.Context) {
logrus.Info("became leader, starting controllers")
runControllers(controllers, stopChannels)
},
OnStoppedLeading: func() {
logrus.Info("no longer leader, shutting down")
stopControllers(stopChannels)
cancel()
m.Lock()
defer m.Unlock()
healthy = false
},
OnNewLeader: func(current_id string) {
if current_id == id {
logrus.Info("still the leader!")
return
}
logrus.Infof("new leader is %s", current_id)
},
},
})
}
func runControllers(controllers []*controller.Controller, stopChannels []chan struct{}) {
for i, c := range controllers {
c := c
go c.Run(1, stopChannels[i])
}
}
func stopControllers(stopChannels []chan struct{}) {
for _, c := range stopChannels {
close(c)
}
}
// Healthz sets up the liveness probe endpoint. If leadership election is
// enabled and a replica stops leading the liveness probe will fail and the
// kubelet will restart the container.
func SetupLivenessEndpoint() {
http.HandleFunc("/live", healthz)
}
func healthz(w http.ResponseWriter, req *http.Request) {
m.Lock()
defer m.Unlock()
if healthy {
if i, err := w.Write([]byte("alive")); err != nil {
logrus.Infof("failed to write liveness response, wrote: %d bytes, got err: %s", i, err)
}
return
}
w.WriteHeader(http.StatusInternalServerError)
}

View File

@@ -0,0 +1,213 @@
package leadership
import (
"context"
"fmt"
"net/http"
"net/http/httptest"
"os"
"testing"
"time"
"github.com/sirupsen/logrus"
"github.com/stakater/Reloader/internal/pkg/constants"
"github.com/stakater/Reloader/internal/pkg/controller"
"github.com/stakater/Reloader/internal/pkg/handler"
"github.com/stakater/Reloader/internal/pkg/metrics"
"github.com/stakater/Reloader/internal/pkg/options"
"github.com/stakater/Reloader/internal/pkg/testutil"
"github.com/stakater/Reloader/internal/pkg/util"
"github.com/stakater/Reloader/pkg/kube"
)
func TestMain(m *testing.M) {
testutil.CreateNamespace(testutil.Namespace, testutil.Clients.KubernetesClient)
logrus.Infof("Running Testcases")
retCode := m.Run()
testutil.DeleteNamespace(testutil.Namespace, testutil.Clients.KubernetesClient)
os.Exit(retCode)
}
func TestHealthz(t *testing.T) {
request, err := http.NewRequest(http.MethodGet, "/live", nil)
if err != nil {
t.Fatalf(("failed to create request"))
}
response := httptest.NewRecorder()
healthz(response, request)
got := response.Code
want := 200
if got != want {
t.Fatalf("got: %q, want: %q", got, want)
}
// Have the liveness probe serve a 500
healthy = false
request, err = http.NewRequest(http.MethodGet, "/live", nil)
if err != nil {
t.Fatalf(("failed to create request"))
}
response = httptest.NewRecorder()
healthz(response, request)
got = response.Code
want = 500
if got != want {
t.Fatalf("got: %q, want: %q", got, want)
}
}
// TestRunLeaderElection validates that the liveness endpoint serves 500 when
// leadership election fails
func TestRunLeaderElection(t *testing.T) {
ctx, cancel := context.WithCancel(context.TODO())
lock := GetNewLock(testutil.Clients.KubernetesClient.CoordinationV1(), constants.LockName, testutil.Pod, testutil.Namespace)
go RunLeaderElection(lock, ctx, cancel, testutil.Pod, []*controller.Controller{})
// Liveness probe should be serving OK
request, err := http.NewRequest(http.MethodGet, "/live", nil)
if err != nil {
t.Fatalf(("failed to create request"))
}
response := httptest.NewRecorder()
healthz(response, request)
got := response.Code
want := 500
if got != want {
t.Fatalf("got: %q, want: %q", got, want)
}
// Cancel the leader election context, so leadership is released and
// live endpoint serves 500
cancel()
request, err = http.NewRequest(http.MethodGet, "/live", nil)
if err != nil {
t.Fatalf(("failed to create request"))
}
response = httptest.NewRecorder()
healthz(response, request)
got = response.Code
want = 500
if got != want {
t.Fatalf("got: %q, want: %q", got, want)
}
}
// TestRunLeaderElectionWithControllers tests that leadership election works
// wiht real controllers and that on context cancellation the controllers stop
// running.
func TestRunLeaderElectionWithControllers(t *testing.T) {
t.Logf("Creating controller")
var controllers []*controller.Controller
for k := range kube.ResourceMap {
c, err := controller.NewController(testutil.Clients.KubernetesClient, k, testutil.Namespace, []string{}, map[string]string{}, metrics.NewCollectors())
if err != nil {
logrus.Fatalf("%s", err)
}
controllers = append(controllers, c)
}
time.Sleep(3 * time.Second)
lock := GetNewLock(testutil.Clients.KubernetesClient.CoordinationV1(), fmt.Sprintf("%s-%d", constants.LockName, 1), testutil.Pod, testutil.Namespace)
ctx, cancel := context.WithCancel(context.TODO())
// Start running leadership election, this also starts the controllers
go RunLeaderElection(lock, ctx, cancel, testutil.Pod, controllers)
time.Sleep(3 * time.Second)
// Create some stuff and do a thing
configmapName := testutil.ConfigmapNamePrefix + "-update-" + testutil.RandSeq(5)
configmapClient, err := testutil.CreateConfigMap(testutil.Clients.KubernetesClient, testutil.Namespace, configmapName, "www.google.com")
if err != nil {
t.Fatalf("Error while creating the configmap %v", err)
}
// Creating deployment
_, err = testutil.CreateDeployment(testutil.Clients.KubernetesClient, configmapName, testutil.Namespace, true)
if err != nil {
t.Fatalf("Error in deployment creation: %v", err)
}
// Updating configmap for first time
updateErr := testutil.UpdateConfigMap(configmapClient, testutil.Namespace, configmapName, "", "www.stakater.com")
if updateErr != nil {
t.Fatalf("Configmap was not updated")
}
time.Sleep(3 * time.Second)
// Verifying deployment update
logrus.Infof("Verifying pod envvars has been created")
shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, testutil.Namespace, configmapName, "www.stakater.com")
config := util.Config{
Namespace: testutil.Namespace,
ResourceName: configmapName,
SHAValue: shaData,
Annotation: options.ConfigmapUpdateOnChangeAnnotation,
}
deploymentFuncs := handler.GetDeploymentRollingUpgradeFuncs()
updated := testutil.VerifyResourceEnvVarUpdate(testutil.Clients, config, constants.ConfigmapEnvVarPostfix, deploymentFuncs)
if !updated {
t.Fatalf("Deployment was not updated")
}
time.Sleep(testutil.SleepDuration)
// Cancel the leader election context, so leadership is released
logrus.Info("shutting down controller from test")
cancel()
time.Sleep(5 * time.Second)
// Updating configmap again
updateErr = testutil.UpdateConfigMap(configmapClient, testutil.Namespace, configmapName, "", "www.stakater.com/new")
if updateErr != nil {
t.Fatalf("Configmap was not updated")
}
// Verifying that the deployment was not updated as leadership has been lost
logrus.Infof("Verifying pod envvars has not been updated")
shaData = testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, testutil.Namespace, configmapName, "www.stakater.com/new")
config = util.Config{
Namespace: testutil.Namespace,
ResourceName: configmapName,
SHAValue: shaData,
Annotation: options.ConfigmapUpdateOnChangeAnnotation,
}
deploymentFuncs = handler.GetDeploymentRollingUpgradeFuncs()
updated = testutil.VerifyResourceEnvVarUpdate(testutil.Clients, config, constants.ConfigmapEnvVarPostfix, deploymentFuncs)
if updated {
t.Fatalf("Deployment was updated")
}
// Deleting deployment
err = testutil.DeleteDeployment(testutil.Clients.KubernetesClient, testutil.Namespace, configmapName)
if err != nil {
logrus.Errorf("Error while deleting the deployment %v", err)
}
// Deleting configmap
err = testutil.DeleteConfigMap(testutil.Clients.KubernetesClient, testutil.Namespace, configmapName)
if err != nil {
logrus.Errorf("Error while deleting the configmap %v", err)
}
time.Sleep(testutil.SleepDuration)
}

View File

@@ -3,7 +3,6 @@ package metrics
import (
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promhttp"
"github.com/sirupsen/logrus"
"net/http"
)
@@ -33,11 +32,7 @@ func NewCollectors() Collectors {
func SetupPrometheusEndpoint() Collectors {
collectors := NewCollectors()
prometheus.MustRegister(collectors.Reloaded)
go func() {
http.Handle("/metrics", promhttp.Handler())
logrus.Fatal(http.ListenAndServe(":9090", nil))
}()
http.Handle("/metrics", promhttp.Handler())
return collectors
}

View File

@@ -23,4 +23,8 @@ var (
IsArgoRollouts = "false"
// ReloadStrategy Specify the update strategy
ReloadStrategy = constants.EnvVarsReloadStrategy
// ReloadOnCreate Adds support to watch create events
ReloadOnCreate = "false"
// EnableHA adds support for running multiple replicas via leadership election
EnableHA = false
)

View File

@@ -16,11 +16,13 @@ import (
"github.com/stakater/Reloader/internal/pkg/callbacks"
"github.com/stakater/Reloader/internal/pkg/constants"
"github.com/stakater/Reloader/internal/pkg/crypto"
"github.com/stakater/Reloader/internal/pkg/metrics"
"github.com/stakater/Reloader/internal/pkg/options"
"github.com/stakater/Reloader/internal/pkg/util"
"github.com/stakater/Reloader/pkg/kube"
appsv1 "k8s.io/api/apps/v1"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
core_v1 "k8s.io/client-go/kubernetes/typed/core/v1"
@@ -34,6 +36,19 @@ var (
SecretResourceType = "secrets"
)
var (
Clients = kube.GetClients()
Pod = "test-reloader-" + RandSeq(5)
Namespace = "test-reloader-" + RandSeq(5)
ConfigmapNamePrefix = "testconfigmap-reloader"
SecretNamePrefix = "testsecret-reloader"
Data = "dGVzdFNlY3JldEVuY29kaW5nRm9yUmVsb2FkZXI="
NewData = "dGVzdE5ld1NlY3JldEVuY29kaW5nRm9yUmVsb2FkZXI="
UpdatedData = "dGVzdFVwZGF0ZWRTZWNyZXRFbmNvZGluZ0ZvclJlbG9hZGVy"
Collectors = metrics.NewCollectors()
SleepDuration = 3 * time.Second
)
// CreateNamespace creates namespace for testing
func CreateNamespace(namespace string, client kubernetes.Interface) {
_, err := client.CoreV1().Namespaces().Create(context.TODO(), &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}}, metav1.CreateOptions{})
@@ -600,7 +615,7 @@ func GetResourceSHAFromAnnotation(podAnnotations map[string]string) string {
return last.Hash
}
//ConvertResourceToSHA generates SHA from secret or configmap data
// ConvertResourceToSHA generates SHA from secret or configmap data
func ConvertResourceToSHA(resourceType string, namespace string, resourceName string, data string) string {
values := []string{}
if resourceType == SecretResourceType {
@@ -822,7 +837,6 @@ func DeleteSecret(client kubernetes.Interface, namespace string, secretName stri
// RandSeq generates a random sequence
func RandSeq(n int) string {
rand.Seed(time.Now().UnixNano())
b := make([]rune, n)
for i := range b {
b[i] = letters[rand.Intn(len(letters))]
@@ -835,10 +849,15 @@ func VerifyResourceEnvVarUpdate(clients kube.Clients, config util.Config, envVar
items := upgradeFuncs.ItemsFunc(clients, config.Namespace)
for _, i := range items {
containers := upgradeFuncs.ContainersFunc(i)
accessor, err := meta.Accessor(i)
if err != nil {
return false
}
annotations := accessor.GetAnnotations()
// match statefulsets with the correct annotation
annotationValue := util.ToObjectMeta(i).Annotations[config.Annotation]
searchAnnotationValue := util.ToObjectMeta(i).Annotations[options.AutoSearchAnnotation]
reloaderEnabledValue := util.ToObjectMeta(i).Annotations[options.ReloaderAutoAnnotation]
annotationValue := annotations[config.Annotation]
searchAnnotationValue := annotations[options.AutoSearchAnnotation]
reloaderEnabledValue := annotations[options.ReloaderAutoAnnotation]
reloaderEnabled, err := strconv.ParseBool(reloaderEnabledValue)
matches := false
if err == nil && reloaderEnabled {
@@ -874,10 +893,15 @@ func VerifyResourceAnnotationUpdate(clients kube.Clients, config util.Config, up
items := upgradeFuncs.ItemsFunc(clients, config.Namespace)
for _, i := range items {
podAnnotations := upgradeFuncs.PodAnnotationsFunc(i)
accessor, err := meta.Accessor(i)
if err != nil {
return false
}
annotations := accessor.GetAnnotations()
// match statefulsets with the correct annotation
annotationValue := util.ToObjectMeta(i).Annotations[config.Annotation]
searchAnnotationValue := util.ToObjectMeta(i).Annotations[options.AutoSearchAnnotation]
reloaderEnabledValue := util.ToObjectMeta(i).Annotations[options.ReloaderAutoAnnotation]
annotationValue := annotations[config.Annotation]
searchAnnotationValue := annotations[options.AutoSearchAnnotation]
reloaderEnabledValue := annotations[options.ReloaderAutoAnnotation]
reloaderEnabled, err := strconv.ParseBool(reloaderEnabledValue)
matches := false
if err == nil && reloaderEnabled {

View File

@@ -54,6 +54,8 @@ func GetSHAfromSecret(data map[string][]byte) string {
type List []string
type Map map[string]string
func (l *List) Contains(s string) bool {
for _, v := range *l {
if v == s {

View File

@@ -1,14 +1,17 @@
name: reloader-reloader
image: okteto/golang:1
command: bash
securityContext:
capabilities:
add:
- SYS_PTRACE
volumes:
- /go/pkg/
- /root/.cache/go-build/
sync:
- .:/app
forward:
- 2345:2345
dev:
reloader-reloader:
image: okteto/golang:1
command: bash
volumes:
- /go/pkg/
- /root/.cache/go-build/
sync:
- .:/app
forward:
- 2345:2345
workdir: /app
autocreate: true
securityContext:
capabilities:
add:
- SYS_PTRACE