Compare commits

...

116 Commits

Author SHA1 Message Date
stakater-user
503776c2a5 Bump Version to v0.0.40 2019-08-27 07:51:00 +00:00
Ali Kahoot
4c7afbcb71 Merge pull request #90 from stakater/update-manifests
[update-manifests] update values manifest
2019-08-27 12:40:17 +05:00
irti
b11514ec13 [update-manifests] update values.yaml.tmlp manifest 2019-08-27 12:39:03 +05:00
irti
ef7d98f5e3 [update-manifests] update values manifest 2019-08-27 11:36:59 +05:00
Ali Kahoot
a336304fb3 Merge pull request #85 from stakater/fix-issue-81
[fix-issue-81] remove reloader binary file
2019-08-21 12:08:19 +05:00
irti
65365e5fef [fix-issue-81] remove reloader binary file 2019-08-21 11:39:21 +05:00
stakater-user
7c2121b7c9 Bump Version to v0.0.39 2019-08-20 09:59:32 +00:00
Ali Kahoot
008f4b0fd2 Merge pull request #80 from stakater/fix-issue-79
fix-issue-79
2019-08-20 14:47:41 +05:00
irti
0bf2fc66cc [fix-issue-79] add reloader-name template in _helpers.tpl 2019-08-20 14:20:40 +05:00
irti
d599340549 [fix-issue-79] remove reloader-name template and replace it with reloader-fullname 2019-08-20 10:28:35 +05:00
irti
83a711885c [fix-issue-79] add nodeSelector, affinity and tolerations in deployment manifest 2019-08-13 18:14:51 +05:00
irti
0ddfe8406d [fix-issue-79] fix for default value issue for service account 2019-08-13 17:58:53 +05:00
stakater-user
036882b588 Bump Version to v0.0.38 2019-08-05 08:09:38 +00:00
Irtiza Ali
63619e360c Merge pull request #78 from stakater/fix-openshift-rbac
Add openshift flag to explicitly add rbac for openshift if desired
2019-08-05 12:58:55 +05:00
waseem
6d54f9faca Added brackets to isOpenshift 2019-08-05 09:45:04 +02:00
waseem
18254666a6 Add openshift flag to explicitly add rbac for openshift 2019-08-05 09:20:37 +02:00
Ali Kahoot
b0dcb4c5e2 Merge pull request #76 from stakater/update-readme
update-readme-by-adding-guideline-to-ignore-resource
2019-07-11 11:19:25 +05:00
irti
cd4a4fb324 [update-readme] update README.md file by fixing issue in table template 2019-07-10 11:16:09 +05:00
irti
7ae3a4259e [update-readme] update README.md file by adding not about ignore parameters 2019-07-10 11:14:30 +05:00
irti
e82926a9f6 [update-readme] update README.md file by adding the guideline on how ignore some resource configMaps and secrets 2019-07-10 10:35:20 +05:00
stakater-user
e462f7ab26 Bump Version to v0.0.37 2019-07-08 10:21:39 +00:00
Waseem Hassan
da4ffc9432 Merge pull request #74 from stakater/deploymentconfigs
Add support for DeploymentConfigs for openshift
2019-07-08 12:10:45 +02:00
waseem
cc65a1c039 Fix conflicts 2019-07-08 10:38:46 +02:00
waseem
8c1a9317ee Fix conflicts 2019-07-08 10:38:19 +02:00
Waseem Hassan
33d1918d71 Merge branch 'master' into deploymentconfigs 2019-07-08 10:35:23 +02:00
stakater-user
5fc34e885e Bump Version to v0.0.36 2019-07-08 08:28:55 +00:00
Ali Kahoot
acc61d504f Merge pull request #75 from stakater/fix-deployment-template
fix-deployment-template
2019-07-08 13:05:40 +05:00
irti
cf9c0fc685 [fix-deployment-template] fix deployment template. Custom annotations check causing the ignore-resources field addition to fail 2019-07-08 12:32:19 +05:00
waseem
4822dbae86 Remove GetClient from testutil 2019-07-05 12:30:11 +02:00
waseem
16d75d1d47 Fix merge conflicts 2019-07-05 12:24:46 +02:00
waseem
878bc5c442 Refactor Openshift detection 2019-07-05 12:23:14 +02:00
stakater-user
e7ccc40035 Bump Version to v0.0.35 2019-07-05 05:38:24 +00:00
Irtiza Ali
0197dec568 Merge pull request #72 from henryaj/master
Optionally disable secret checking
2019-07-04 22:40:36 +05:00
hstanley
795de2399b Fail if user tries to ignore all resource types 2019-07-04 10:21:19 +01:00
hstanley
a69674ba4c Remove errant 'resources-to-watch' flags 2019-07-04 10:03:14 +01:00
waseem
ff5f28ba00 Add comments where needed 2019-07-04 11:00:52 +02:00
waseem
df777332a9 Lint go 2019-07-04 10:56:17 +02:00
waseem
daca09e65e Add deploymentconfigs to clusterrole 2019-07-04 10:53:19 +02:00
waseem
50a908a59f Update minor version 2019-07-04 10:42:30 +02:00
waseem
121a550da5 Update readme 2019-07-04 10:42:06 +02:00
waseem
9f3c8379a6 Add IsOpenshift flag 2019-07-04 10:32:17 +02:00
waseem
9b48d320be Add check for openshift 2019-07-04 10:25:22 +02:00
waseem
a8edefcdde Fix install command 2019-07-04 10:21:35 +02:00
waseem
1689a9560b Add deploymentconfigs in rbac if platform is openshift 2019-07-04 10:16:48 +02:00
waseem
4f8377de15 Fix glide command in docker build 2019-07-04 10:10:26 +02:00
waseem
765ddbdf43 Update go version 2019-07-04 09:59:35 +02:00
waseem
181477de05 Update glide with openshift packages 2019-07-04 09:59:20 +02:00
waseem
6d8c0cf6cb Add controller tests and fixes 2019-07-04 09:58:58 +02:00
waseem
2dc7d20a37 Fix upgrade test cases 2019-07-04 09:58:49 +02:00
waseem
9cd5b87dab Add openshift apps client 2019-07-04 09:58:39 +02:00
waseem
20d88e0668 Fix test cases 2019-07-04 09:58:33 +02:00
waseem
de77785d4f Add support for DeploymentConfigs 2019-07-04 09:58:26 +02:00
waseem
bf6cb73fd7 Merge branch 'master' of github.com:stakater/Reloader into deploymentconfigs 2019-07-04 08:32:20 +02:00
stakater-user
b3d3c3704a Bump Version to v0.0.34 2019-07-04 06:29:52 +00:00
Ali Kahoot
58514e8610 Merge pull request #73 from stakater/fix-dnsPolicy-issue
fix-dnsPolicy-issue
2019-07-04 11:06:41 +05:00
irti
a26f7fc4ad [fix-dnsPolicy-issue] bump package version in glide.yaml file. k8s.io/api version 1.8.0 to 1.10.0 and client go version 5.0.0 to 6.0.0 2019-07-04 10:12:55 +05:00
waseem
e06394c940 Reduce wait times and fix indentations 2019-07-03 22:17:36 +02:00
waseem
362ea70e26 Fix test cases 2019-07-03 22:14:36 +02:00
hstanley
3e6c4a3f60 Switch from list of resources to monitor to list of resources to ignore 2019-07-03 16:01:15 +01:00
hstanley
8cf105726f Update helm charts (deployments and RBAC) 2019-07-03 10:56:09 +01:00
hstanley
361bea4373 move List to util.go 2019-07-03 10:32:33 +01:00
hstanley
59fd71d15f Add 'resources-to-watch' flag 2019-07-02 14:09:27 +01:00
hstanley
6c6776f2b4 Optionally disable checking k8s Secrets 2019-07-02 11:51:14 +01:00
stakater-user
8b824ef26a Bump Version to v0.0.33 2019-07-01 12:09:23 +00:00
Irtiza Ali
5fd170a7ca Merge pull request #69 from chrisns/kustomize
add Kustomize deployment code docs
2019-07-01 16:44:50 +05:00
Chris Nesbitt-Smith
69487f6caf update readme for kustomize 2019-06-27 14:34:07 +01:00
Chris Nesbitt-Smith
401a94bd36 add kustomization.yaml 2019-06-27 10:59:02 +01:00
stakater-user
bf12cbec15 Bump Version to v0.0.32 2019-06-27 08:30:31 +00:00
Irtiza Ali
fdc223a4a6 Merge pull request #68 from DevotedHealth/affinity_tolerations
Permit passing resources, tolerations, affinity to Helm chart
2019-06-27 13:07:34 +05:00
Steve Huff
dcbc0e0de0 Permit passing resources, tolerations, affinity to Helm chart
These values are all empty by default.
2019-06-26 18:44:39 +00:00
stakater-user
aff377718c Bump Version to v0.0.31 2019-06-25 09:27:27 +00:00
Ali Kahoot
112e8ba89d Merge pull request #67 from stakater/add-node-selector-support
add-node-selector-support
2019-06-25 14:04:40 +05:00
irti
c2e6231a46 [add-node-selector-support] revert README.md doc change 2019-06-20 12:21:35 +05:00
irti
51b42dc098 [add-node-selector-support] add char in README.md 2019-06-20 12:18:31 +05:00
irti
43200e127a [add-node-selector-support] add nodeSelector param in values.yaml file 2019-06-20 12:03:37 +05:00
irti
6db5106f85 [add-node-selector-support] fix issue 2019-06-20 11:55:21 +05:00
irti
703c0ea56e [add-node-selector-support] add nodeSelector support 2019-06-20 11:36:12 +05:00
stakater-user
21563abc07 Bump Version to v0.0.30 2019-06-11 07:36:19 +00:00
Ali Kahoot
ce96eb3810 Merge pull request #65 from blurpy/filesys_and_role
Fix issue with readOnlyRootFilesystem and statefulsets permissions
2019-06-11 12:13:13 +05:00
Christian Ihle
b5c8ee2ab9 Add optional support for readOnlyRootFilesystem 2019-06-06 11:59:42 +02:00
Christian Ihle
c27bb3929b Split permissions for apps and extensions apigroups since statefulsets does not have extensions 2019-06-06 11:54:14 +02:00
stakater-user
67913c9985 Bump Version to v0.0.29 2019-05-23 10:04:32 +00:00
Ali Kahoot
9dac1a30b6 Merge pull request #64 from cko/alpine_update
Update alpine container base image to 3.9
2019-05-23 14:41:30 +05:00
Christine Koppelt
ac7f9d09cc update alpine container base image to 3.9 2019-05-23 10:55:06 +02:00
stakater-user
d8ae3c76da Bump Version to v0.0.28 2019-05-08 10:22:39 +00:00
Ali Kahoot
3f115618cc Merge pull request #62 from stakater/issue-61
Initial implementation of fix for issue 61
2019-05-08 14:59:43 +05:00
faizanahmad055
06aa382910 Initial implementation of fix for issue 61
Signed-off-by: faizanahmad055 <faizan.ahmad55@outlook.com>
2019-05-07 10:51:58 +02:00
stakater-user
3b69599c77 Bump Version to v0.0.27 2019-04-25 10:18:02 +00:00
Ali Kahoot
125e7536af Merge pull request #60 from stakater/fix-config-change-logs-issue
fix-config-change-logs-issue
2019-04-25 14:55:07 +05:00
irti
66f9b07817 [fix-config-change-logs-issue] fixed log statement placement 2019-04-25 14:19:52 +05:00
irti
40aa9955cd [fix-config-change-logs-issue] fixed the issue in create.go file 2019-04-25 14:16:42 +05:00
irti
bfff7104aa [fix-config-change-logs-issue] logs issue fixed 2019-04-25 14:11:31 +05:00
Ali Kahoot
c0acfd0503 Merge pull request #58 from stakater/sealed-secret-doc
Sealed secret doc
2019-03-18 20:18:11 +05:00
faizanahmad055
379b6c0131 Add doc for how to use sealed secrets with reloader
Signed-off-by: faizanahmad055 <faizan.ahmad55@outlook.com>
2019-03-18 16:14:47 +01:00
faizanahmad055
3bf427e985 Add doc for how to use sealed secrets with reloader
Signed-off-by: faizanahmad055 <faizan.ahmad55@outlook.com>
2019-03-18 16:13:55 +01:00
stakater-user
2b6e5455dc Bump Version to v0.0.26 2019-02-20 10:57:07 +00:00
Ali Kahoot
9bc8d6b67d Merge pull request #56 from stakater/add-initContainer-support
Initial implementation for init container support
2019-02-20 15:32:44 +05:00
faizanahmad055
0c340fcb48 Fix same name configmap and secret conflict
Signed-off-by: faizanahmad055 <faizan.ahmad55@outlook.com>
2019-02-18 12:09:53 +01:00
faizanahmad055
889b16718a Add test cases
Signed-off-by: faizanahmad055 <faizan.ahmad55@outlook.com>
2019-02-18 10:20:52 +01:00
faizanahmad055
512cbd8c85 Fix incorrect container update
Signed-off-by: faizanahmad055 <faizan.ahmad55@outlook.com>
2019-02-16 16:52:12 +01:00
faizanahmad055
39944497f3 Implement golang-ci comment
Signed-off-by: faizanahmad055 <faizan.ahmad55@outlook.com>
2019-02-16 15:50:47 +01:00
faizanahmad055
6fb1266637 Initial implementation for init container support
Signed-off-by: faizanahmad055 <faizan.ahmad55@outlook.com>
2019-02-16 15:43:56 +01:00
stakater-user
724cda887e Bump Version to v0.0.25 2019-02-12 08:28:39 +00:00
Muhammad Ahsan
d29f3716b2 Merge pull request #53 from stakater/fix-helper-func
append reloader- to serviceAccountName
2019-02-12 13:03:43 +05:00
kahootali
2131f0ebf3 append reloader- to serviceAccountName 2019-02-12 12:20:28 +05:00
stakater-user
0f052162a2 Bump Version to v0.0.24 2019-02-08 10:55:07 +00:00
Faizan Ahmad
02c6de97c8 Merge pull request #50 from ParticleDecay/feat/annotation-flag
Parameterize all annotations
2019-02-08 11:31:38 +01:00
kahootali
16bce16f81 add custom annotation 2019-02-08 15:05:18 +05:00
Joey Espinosa
33443ccb29 feat: add cli flags to helm chart 2019-02-06 12:46:17 -05:00
Joey Espinosa
75b00733bf feat: parameterize all annotations 2019-02-04 12:33:53 -05:00
stakater-user
d2335f8ffd Bump Version to v0.0.23 2019-02-01 12:21:59 +00:00
Ali Kahoot
87f3a32f68 Merge pull request #49 from stakater/fix-48
Add support for envFrom for autoUpdate
2019-02-01 16:57:46 +05:00
faizanahmad055
95bd5e497f Use single container loop for envs
Signed-off-by: faizanahmad055 <faizan.ahmad55@outlook.com>
2019-02-01 12:06:18 +01:00
faizanahmad055
333957d82a Implemented golang CI comment
Signed-off-by: faizanahmad055 <faizan.ahmad55@outlook.com>
2019-01-31 16:32:47 +01:00
faizanahmad055
75f67ffa6e Add support for envFrom for autoUpdate
Signed-off-by: faizanahmad055 <faizanahmad@217-212-164-26.customer.telia.com>
2019-01-31 16:03:53 +01:00
Ziming Miao
0558fc3723 fix typo (#47) 2019-01-30 10:38:23 +05:00
43 changed files with 1600 additions and 689 deletions

1
.dockerignore Normal file
View File

@@ -0,0 +1 @@
vendor

View File

@@ -1 +1 @@
v0.0.22
v0.0.40

View File

@@ -20,7 +20,7 @@ LDFLAGS =
default: build test
install:
"$(GLIDECMD)" install
"$(GLIDECMD)" install --strip-vendor
build:
"$(GOCMD)" build ${GOFLAGS} ${LDFLAGS} -o "${BINARY}"

View File

@@ -13,15 +13,15 @@
## Problem
We would like to watch if some change happens in `ConfigMap` and/or `Secret`; then perform a rolling upgrade on relevant `Deployment`, `Deamonset` and `Statefulset`
We would like to watch if some change happens in `ConfigMap` and/or `Secret`; then perform a rolling upgrade on relevant `DeploymentConfig`, `Deployment`, `Daemonset` and `Statefulset`
## Solution
Reloader can watch changes in `ConfigMap` and `Secret` and do rolling upgrades on Pods with their associated `Deployments`, `Deamonsets` and `Statefulsets`.
Reloader can watch changes in `ConfigMap` and `Secret` and do rolling upgrades on Pods with their associated `DeploymentConfigs`, `Deployments`, `Daemonsets` and `Statefulsets`.
## How to use Reloader
For a `Deployment` called `foo` have a `ConfigMap` called `foo-configmap` or `Secret` called `foo-secret` or both. Then add this annotation to main metadata of your `Deployment`
For a `Deployment` called `foo` have a `ConfigMap` called `foo-configmap` or `Secret` called `foo-secret` or both. Then add your annotation (by default `reloader.stakater.com/auto`) to main metadata of your `Deployment`
```yaml
kind: Deployment
@@ -36,7 +36,7 @@ spec:
This will discover deployments/daemonsets/statefulset automatically where `foo-configmap` or `foo-secret` is being used either via environment variable or from volume mount. And it will perform rolling upgrade on related pods when `foo-configmap` or `foo-secret`are updated.
We can also specify a specific configmap or secret which would trigger rolling upgrade only upon change in our specified configmap or secret, this way, it will not trigger rolling upgrade upon changes in all configmaps or secrets used in a deployment, daemonset or statefulset.
To do this either set `reloader.stakater.com/auto: "false"` or remove this annotation altogather, and use annotations mentioned [here](#Configmap) or [here](#Secret)
To do this either set the auto annotation to `"false"` (`reloader.stakater.com/auto: "false"`) or remove it altogether, and use annotations mentioned [here](#Configmap) or [here](#Secret)
### Configmap
@@ -94,8 +94,13 @@ spec:
metadata:
```
### NOTE
`reloader.stakater.com/auto: "true"` will always override when use with either `secret.reloader.stakater.com/reload` or `configmap.reloader.stakater.com/reload` annotation.
### NOTES
- Reloader also supports [sealed-secrets](https://github.com/bitnami-labs/sealed-secrets). [Here](docs/Reloader-with-Sealed-Secrets.md) are the steps to use sealed-secrets with reloader.
- `reloader.stakater.com/auto: "true"` will only reload the pod, if the configmap or secret is used (as a volume mount or as an env) in `DeploymentConfigs/Deployment/Daemonsets/Statefulsets`
- `secret.reloader.stakater.com/reload` or `configmap.reloader.stakater.com/reload` annotation will reload the pod upon changes in specified configmap or secret, irrespective of the usage of configmap or secret.
- you may override the auto annotation with the `--auto-annotation` flag
- you may override the configmap annotation with the `--configmap-annotation` flag
- you may override the secret annotation with the `--secret-annotation` flag
## Deploying to Kubernetes
@@ -109,7 +114,41 @@ You can apply vanilla manifests by running the following command
kubectl apply -f https://raw.githubusercontent.com/stakater/Reloader/master/deployments/kubernetes/reloader.yaml
```
By default Reloader gets deployed in `default` namespace and watches changes `secrets` and `configmaps` in all namespaces.
By default Reloader gets deployed in `default` namespace and watches changes `secrets` and `configmaps` in all namespaces.
Reloader can be configured to ignore the resources `secrets` and `configmaps` by passing the following args (`spec.template.spec.containers.args`) to its container :
| Args | Description |
|---|---|
| --resources-to-ignore=configMaps | To ignore configMaps |
| --resources-to-ignore=secrets | To ignore secrets |
`Note`: At one time only one of these resource can be ignored, trying to do it will cause error in Reloader. Workaround for ignoring both resources is by scaling down the reloader pods to `0`.
### Vanilla kustomize
You can also apply the vanilla manifests by running the following command
```bash
kubectl apply -k https://github.com/stakater/Reloader/deployments/kubernetes
```
Similarly to vanilla manifests get deployed in `default` namespace and watches changes `secrets` and `configmaps` in all namespaces.
### Kustomize
You can write your own `kustomization.yaml` using ours as a 'base' and write patches to tweak the configuration.
```yaml
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namePrefix: reloader-
bases:
- https://github.com/stakater/Reloader/deployments/kubernetes
namespace: reloader
```
### Helm Charts
@@ -123,12 +162,23 @@ helm repo update
helm install stakater/reloader
```
**Note:** By default reloader watches in all namespaces. To watch in single namespace, please run following command. It will install reloader in `test` namespace which will only watch `Deployments`, `Deamonsets` and `Statefulsets` in `test` namespace.
**Note:** By default reloader watches in all namespaces. To watch in single namespace, please run following command. It will install reloader in `test` namespace which will only watch `Deployments`, `Daemonsets` and `Statefulsets` in `test` namespace.
```bash
helm install stakater/reloader --set reloader.watchGlobally=false --namespace test
```
Reloader can be configured to ignore the resources `secrets` and `configmaps` by using the following parameters of `values.yaml` file:
| Parameter | Description | Type |
|---|---|---|
| ignoreSecrets | To ignore secrets. Valid value are either `true` or `false` | boolean |
| ignoreConfigMaps | To ignore configMaps. Valid value are either `true` or `false` | boolean |
`Note`: At one time only one of these resource can be ignored, trying to do it will cause error in helm template compilation.
## Help
### Documentation

View File

@@ -1,4 +1,4 @@
FROM stakater/go-glide:1.9.3
FROM stakater/go-glide:1.12.6
MAINTAINER "Stakater Team"
RUN apk update
@@ -11,7 +11,7 @@ RUN apk -v --update \
ADD . "$GOPATH/src/github.com/stakater/Reloader"
RUN cd "$GOPATH/src/github.com/stakater/Reloader" && \
glide update && \
glide install --strip-vendor && \
CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -a --installsuffix cgo --ldflags="-s" -o /Reloader
COPY build/package/Dockerfile.run /

View File

@@ -1,4 +1,4 @@
FROM alpine:3.4
FROM alpine:3.9
MAINTAINER "Stakater Team"
RUN apk add --update ca-certificates

Binary file not shown.

View File

@@ -3,8 +3,8 @@
apiVersion: v1
name: reloader
description: Reloader chart that runs on kubernetes
version: v0.0.22
appVersion: v0.0.22
version: v0.0.40
appVersion: v0.0.40
keywords:
- Reloader
- kubernetes

View File

@@ -2,6 +2,7 @@
{{/*
Expand the name of the chart.
*/}}
{{- define "reloader-name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" | lower -}}
{{- end -}}
@@ -16,7 +17,7 @@ We truncate at 63 chars because some Kubernetes name fields are limited to this
{{- end -}}
{{- define "reloader-labels.chart" -}}
app: {{ template "reloader-name" . }}
app: {{ template "reloader-fullname" . }}
chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
release: {{ .Release.Name | quote }}
heritage: {{ .Release.Service | quote }}
@@ -25,7 +26,7 @@ heritage: {{ .Release.Service | quote }}
{{/*
Create the name of the service account to use
*/}}
{{- define "serviceAccountName" -}}
{{- define "reloader-serviceAccountName" -}}
{{- if .Values.reloader.serviceAccount.create -}}
{{ default (include "reloader-fullname" .) .Values.reloader.serviceAccount.name }}
{{- else -}}

View File

@@ -10,20 +10,35 @@ metadata:
{{- if .Values.reloader.matchLabels }}
{{ toYaml .Values.reloader.matchLabels | indent 4 }}
{{- end }}
name: {{ template "reloader-name" . }}-role
name: {{ template "reloader-fullname" . }}-role
namespace: {{ .Release.Namespace }}
rules:
- apiGroups:
- ""
resources:
resources:
{{- if .Values.reloader.ignoreSecrets }}{{- else }}
- secrets
{{- end }}
{{- if .Values.reloader.ignoreConfigMaps }}{{- else }}
- configmaps
{{- end }}
verbs:
- list
- get
- watch
{{- if or (.Capabilities.APIVersions.Has "apps.openshift.io/v1") (.Values.isOpenshift) }}
- apiGroups:
- "apps.openshift.io"
- ""
resources:
- deploymentconfigs
verbs:
- list
- get
- update
- patch
{{- end }}
- apiGroups:
- "extensions"
- "apps"
resources:
- deployments
@@ -34,4 +49,14 @@ rules:
- get
- update
- patch
- apiGroups:
- "extensions"
resources:
- deployments
- daemonsets
verbs:
- list
- get
- update
- patch
{{- end }}

View File

@@ -10,14 +10,14 @@ metadata:
{{- if .Values.reloader.matchLabels }}
{{ toYaml .Values.reloader.matchLabels | indent 4 }}
{{- end }}
name: {{ template "reloader-name" . }}-role-binding
name: {{ template "reloader-fullname" . }}-role-binding
namespace: {{ .Release.Namespace }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: {{ template "reloader-name" . }}-role
name: {{ template "reloader-fullname" . }}-role
subjects:
- kind: ServiceAccount
name: {{ template "serviceAccountName" . }}
name: {{ template "reloader-serviceAccountName" . }}
namespace: {{ .Release.Namespace }}
{{- end }}

View File

@@ -1,4 +1,4 @@
apiVersion: extensions/v1beta1
apiVersion: apps/v1
kind: Deployment
metadata:
{{- if .Values.reloader.deployment.annotations }}
@@ -13,13 +13,13 @@ metadata:
{{- if .Values.reloader.matchLabels }}
{{ toYaml .Values.reloader.matchLabels | indent 4 }}
{{- end }}
name: {{ template "reloader-name" . }}
name: {{ template "reloader-fullname" . }}
spec:
replicas: 1
revisionHistoryLimit: 2
selector:
matchLabels:
app: {{ template "reloader-name" . }}
app: {{ template "reloader-fullname" . }}
release: {{ .Release.Name | quote }}
{{- if .Values.reloader.matchLabels }}
{{ toYaml .Values.reloader.matchLabels | indent 6 }}
@@ -35,6 +35,18 @@ spec:
{{ toYaml .Values.reloader.matchLabels | indent 8 }}
{{- end }}
spec:
{{- if .Values.reloader.deployment.nodeSelector }}
nodeSelector:
{{ toYaml .Values.reloader.deployment.nodeSelector | indent 8 }}
{{- end }}
{{- if .Values.reloader.deployment.affinity }}
affinity:
{{ toYaml .Values.reloader.deployment.affinity | indent 8 }}
{{- end }}
{{- if .Values.reloader.deployment.tolerations }}
tolerations:
{{ toYaml .Values.reloader.deployment.tolerations | indent 8 }}
{{- end }}
containers:
- env:
{{- range $name, $value := .Values.reloader.deployment.env.open }}
@@ -69,5 +81,43 @@ spec:
{{- end }}
image: "{{ .Values.reloader.deployment.image.name }}:{{ .Values.reloader.deployment.image.tag }}"
imagePullPolicy: {{ .Values.reloader.deployment.image.pullPolicy }}
name: {{ template "reloader-name" . }}
serviceAccountName: {{ template "serviceAccountName" . }}
name: {{ template "reloader-fullname" . }}
{{- if eq .Values.reloader.readOnlyRootFileSystem true }}
volumeMounts:
- mountPath: /tmp/
name: tmp-volume
{{- end }}
args:
{{- if .Values.reloader.ignoreSecrets }}
- "--resources-to-ignore=secrets"
{{- end }}
{{- if eq .Values.reloader.ignoreConfigMaps true }}
- "--resources-to-ignore=configMaps"
{{- end }}
{{- if .Values.reloader.custom_annotations }}
{{- if .Values.reloader.custom_annotations.configmap }}
- "--configmap-annotation"
- "{{ .Values.reloader.custom_annotations.configmap }}"
{{- end }}
{{- if .Values.reloader.custom_annotations.secret }}
- "--secret-annotation"
- "{{ .Values.reloader.custom_annotations.secret }}"
{{- end }}
{{- if .Values.reloader.custom_annotations.auto }}
- "--auto-annotation"
- "{{ .Values.reloader.custom_annotations.auto }}"
{{- end }}
{{- end }}
{{- if .Values.reloader.deployment.resources }}
resources:
{{ toYaml .Values.reloader.deployment.resources | indent 10 }}
{{- end }}
serviceAccountName: {{ template "reloader-serviceAccountName" . }}
{{- if eq .Values.reloader.readOnlyRootFileSystem true }}
volumes:
- emptyDir: {}
name: tmp-volume
{{- end }}

View File

@@ -10,20 +10,35 @@ metadata:
{{- if .Values.reloader.matchLabels }}
{{ toYaml .Values.reloader.matchLabels | indent 4 }}
{{- end }}
name: {{ template "reloader-name" . }}-role
name: {{ template "reloader-fullname" . }}-role
namespace: {{ .Release.Namespace }}
rules:
- apiGroups:
- ""
resources:
resources:
{{- if .Values.reloader.ignoreSecrets }}{{- else }}
- secrets
{{- end }}
{{- if .Values.reloader.ignoreConfigMaps }}{{- else }}
- configmaps
{{- end }}
verbs:
- list
- get
- watch
{{- if or (.Capabilities.APIVersions.Has "apps.openshift.io/v1") (.Values.isOpenshift) }}
- apiGroups:
- "apps.openshift.io"
- ""
resources:
- deploymentconfigs
verbs:
- list
- get
- update
- patch
{{- end }}
- apiGroups:
- "extensions"
- "apps"
resources:
- deployments
@@ -34,4 +49,14 @@ rules:
- get
- update
- patch
- apiGroups:
- "extensions"
resources:
- deployments
- daemonsets
verbs:
- list
- get
- update
- patch
{{- end }}

View File

@@ -10,14 +10,14 @@ metadata:
{{- if .Values.reloader.matchLabels }}
{{ toYaml .Values.reloader.matchLabels | indent 4 }}
{{- end }}
name: {{ template "reloader-name" . }}-role-binding
name: {{ template "reloader-fullname" . }}-role-binding
namespace: {{ .Release.Namespace }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: {{ template "reloader-name" . }}-role
name: {{ template "reloader-fullname" . }}-role
subjects:
- kind: ServiceAccount
name: {{ template "serviceAccountName" . }}
name: {{ template "reloader-serviceAccountName" . }}
namespace: {{ .Release.Namespace }}
{{- end }}

View File

@@ -10,5 +10,5 @@ metadata:
{{- if .Values.reloader.matchLabels }}
{{ toYaml .Values.reloader.matchLabels | indent 4 }}
{{- end }}
name: {{ template "serviceAccountName" . }}
name: {{ template "reloader-serviceAccountName" . }}
{{- end }}

View File

@@ -4,17 +4,44 @@ kubernetes:
host: https://kubernetes.default
reloader:
isOpenshift: false
ignoreSecrets: false
ignoreConfigMaps: false
watchGlobally: true
# Set to true if you have a pod security policy that enforces readOnlyRootFilesystem
readOnlyRootFileSystem: false
matchLabels: {}
deployment:
nodeSelector:
# cloud.google.com/gke-nodepool: default-pool
# An affinity stanza to be applied to the Deployment.
# Example:
# affinity:
# nodeAffinity:
# requiredDuringSchedulingIgnoredDuringExecution:
# nodeSelectorTerms:
# - matchExpressions:
# - key: "node-role.kubernetes.io/infra-worker"
# operator: "Exists"
affinity: {}
# A list of tolerations to be applied to the Deployment.
# Example:
# tolerations:
# - key: "node-role.kubernetes.io/infra-worker"
# operator: "Exists"
# effect: "NoSchedule"
tolerations: []
annotations: {}
labels:
provider: stakater
group: com.stakater.platform
version: v0.0.22
version: v0.0.40
image:
name: stakater/reloader
tag: "v0.0.22"
tag: "v0.0.40"
pullPolicy: IfNotPresent
# Support for extra environment variables.
env:
@@ -24,6 +51,18 @@ reloader:
secret:
# field supports Key value pair as environment variables. It gets the values from other fields of pod.
field:
# Specify resource requests/limits for the deployment.
# Example:
# resources:
# limits:
# cpu: "100m"
# memory: "512Mi"
# requests:
# cpu: "10m"
# memory: "128Mi"
resources: {}
rbac:
enabled: true
labels: {}
@@ -34,4 +73,10 @@ reloader:
labels: {}
# The name of the ServiceAccount to use.
# If not set and create is true, a name is generated using the fullname template
name: reloader
name:
# Optional flags to pass to the Reloader entrypoint
# Example:
# custom_annotations:
# configmap: "my.company.com/configmap"
# secret: "my.company.com/secret"
custom_annotations: {}

View File

@@ -0,0 +1,10 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- manifests/clusterrole.yaml
- manifests/clusterrolebinding.yaml
- manifests/role.yaml
- manifests/rolebinding.yaml
- manifests/serviceaccount.yaml
- manifests/deployment.yaml

View File

@@ -5,16 +5,16 @@ apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
labels:
app: reloader
chart: "reloader-v0.0.22"
app: RELEASE-NAME-reloader
chart: "reloader-v0.0.40"
release: "RELEASE-NAME"
heritage: "Tiller"
name: reloader-role
name: RELEASE-NAME-reloader-role
namespace: default
rules:
- apiGroups:
- ""
resources:
resources:
- secrets
- configmaps
verbs:
@@ -22,7 +22,6 @@ rules:
- get
- watch
- apiGroups:
- "extensions"
- "apps"
resources:
- deployments
@@ -33,4 +32,14 @@ rules:
- get
- update
- patch
- apiGroups:
- "extensions"
resources:
- deployments
- daemonsets
verbs:
- list
- get
- update
- patch

View File

@@ -5,18 +5,18 @@ apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
labels:
app: reloader
chart: "reloader-v0.0.22"
app: RELEASE-NAME-reloader
chart: "reloader-v0.0.40"
release: "RELEASE-NAME"
heritage: "Tiller"
name: reloader-role-binding
name: RELEASE-NAME-reloader-role-binding
namespace: default
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: reloader-role
name: RELEASE-NAME-reloader-role
subjects:
- kind: ServiceAccount
name: reloader
name: RELEASE-NAME-reloader
namespace: default

View File

@@ -1,41 +1,42 @@
---
# Source: reloader/templates/deployment.yaml
apiVersion: extensions/v1beta1
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: reloader
chart: "reloader-v0.0.22"
app: RELEASE-NAME-reloader
chart: "reloader-v0.0.40"
release: "RELEASE-NAME"
heritage: "Tiller"
group: com.stakater.platform
provider: stakater
version: v0.0.22
version: v0.0.40
name: reloader
name: RELEASE-NAME-reloader
spec:
replicas: 1
revisionHistoryLimit: 2
selector:
matchLabels:
app: reloader
app: RELEASE-NAME-reloader
release: "RELEASE-NAME"
template:
metadata:
labels:
app: reloader
chart: "reloader-v0.0.22"
app: RELEASE-NAME-reloader
chart: "reloader-v0.0.40"
release: "RELEASE-NAME"
heritage: "Tiller"
group: com.stakater.platform
provider: stakater
version: v0.0.22
version: v0.0.40
spec:
containers:
- env:
image: "stakater/reloader:v0.0.22"
image: "stakater/reloader:v0.0.40"
imagePullPolicy: IfNotPresent
name: reloader
serviceAccountName: reloader
name: RELEASE-NAME-reloader
args:
serviceAccountName: RELEASE-NAME-reloader

View File

@@ -1,73 +0,0 @@
---
# Source: reloader/templates/rbac.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
app: reloader
group: com.stakater.platform
provider: stakater
version: 0.0.18
chart: "reloader-0.0.18"
release: "RELEASE-NAME"
heritage: "Tiller"
name: reloader
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
labels:
app: reloader
group: com.stakater.platform
provider: stakater
version: 0.0.18
chart: "reloader-0.0.18"
release: "RELEASE-NAME"
heritage: "Tiller"
name: reloader-role
namespace: default
rules:
- apiGroups:
- ""
resources:
- secrets
- configmaps
verbs:
- list
- get
- watch
- apiGroups:
- ""
- "extensions"
- "apps"
resources:
- deployments
- daemonsets
- statefulsets
verbs:
- list
- get
- update
- patch
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
labels:
app: reloader
group: com.stakater.platform
provider: stakater
version: 0.0.18
chart: "reloader-0.0.18"
release: "RELEASE-NAME"
heritage: "Tiller"
name: reloader-role-binding
namespace: default
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: reloader-role
subjects:
- kind: ServiceAccount
name: reloader
namespace: default

View File

@@ -5,9 +5,9 @@ apiVersion: v1
kind: ServiceAccount
metadata:
labels:
app: reloader
chart: "reloader-v0.0.22"
app: RELEASE-NAME-reloader
chart: "reloader-v0.0.40"
release: "RELEASE-NAME"
heritage: "Tiller"
name: reloader
name: RELEASE-NAME-reloader

View File

@@ -4,44 +4,45 @@
---
# Source: reloader/templates/deployment.yaml
apiVersion: extensions/v1beta1
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: reloader
chart: "reloader-v0.0.22"
app: RELEASE-NAME-reloader
chart: "reloader-v0.0.40"
release: "RELEASE-NAME"
heritage: "Tiller"
group: com.stakater.platform
provider: stakater
version: v0.0.22
version: v0.0.40
name: reloader
name: RELEASE-NAME-reloader
spec:
replicas: 1
revisionHistoryLimit: 2
selector:
matchLabels:
app: reloader
app: RELEASE-NAME-reloader
release: "RELEASE-NAME"
template:
metadata:
labels:
app: reloader
chart: "reloader-v0.0.22"
app: RELEASE-NAME-reloader
chart: "reloader-v0.0.40"
release: "RELEASE-NAME"
heritage: "Tiller"
group: com.stakater.platform
provider: stakater
version: v0.0.22
version: v0.0.40
spec:
containers:
- env:
image: "stakater/reloader:v0.0.22"
image: "stakater/reloader:v0.0.40"
imagePullPolicy: IfNotPresent
name: reloader
serviceAccountName: reloader
name: RELEASE-NAME-reloader
args:
serviceAccountName: RELEASE-NAME-reloader
---
# Source: reloader/templates/clusterrole.yaml
@@ -50,16 +51,16 @@ apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
labels:
app: reloader
chart: "reloader-v0.0.22"
app: RELEASE-NAME-reloader
chart: "reloader-v0.0.40"
release: "RELEASE-NAME"
heritage: "Tiller"
name: reloader-role
name: RELEASE-NAME-reloader-role
namespace: default
rules:
- apiGroups:
- ""
resources:
resources:
- secrets
- configmaps
verbs:
@@ -67,7 +68,6 @@ rules:
- get
- watch
- apiGroups:
- "extensions"
- "apps"
resources:
- deployments
@@ -78,84 +78,21 @@ rules:
- get
- update
- patch
- apiGroups:
- "extensions"
resources:
- deployments
- daemonsets
verbs:
- list
- get
- update
- patch
---
# Source: reloader/templates/rolebinding.yaml
---
# Source: reloader/templates/rbac.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
app: reloader
group: com.stakater.platform
provider: stakater
version: 0.0.18
chart: "reloader-0.0.18"
release: "RELEASE-NAME"
heritage: "Tiller"
name: reloader
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
labels:
app: reloader
group: com.stakater.platform
provider: stakater
version: 0.0.18
chart: "reloader-0.0.18"
release: "RELEASE-NAME"
heritage: "Tiller"
name: reloader-role
namespace: default
rules:
- apiGroups:
- ""
resources:
- secrets
- configmaps
verbs:
- list
- get
- watch
- apiGroups:
- ""
- "extensions"
- "apps"
resources:
- deployments
- daemonsets
- statefulsets
verbs:
- list
- get
- update
- patch
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
labels:
app: reloader
group: com.stakater.platform
provider: stakater
version: 0.0.18
chart: "reloader-0.0.18"
release: "RELEASE-NAME"
heritage: "Tiller"
name: reloader-role-binding
namespace: default
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: reloader-role
subjects:
- kind: ServiceAccount
name: reloader
namespace: default
---
# Source: reloader/templates/clusterrolebinding.yaml
@@ -163,19 +100,19 @@ apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
labels:
app: reloader
chart: "reloader-v0.0.22"
app: RELEASE-NAME-reloader
chart: "reloader-v0.0.40"
release: "RELEASE-NAME"
heritage: "Tiller"
name: reloader-role-binding
name: RELEASE-NAME-reloader-role-binding
namespace: default
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: reloader-role
name: RELEASE-NAME-reloader-role
subjects:
- kind: ServiceAccount
name: reloader
name: RELEASE-NAME-reloader
namespace: default
---
@@ -185,9 +122,9 @@ apiVersion: v1
kind: ServiceAccount
metadata:
labels:
app: reloader
chart: "reloader-v0.0.22"
app: RELEASE-NAME-reloader
chart: "reloader-v0.0.40"
release: "RELEASE-NAME"
heritage: "Tiller"
name: reloader
name: RELEASE-NAME-reloader

View File

@@ -4,9 +4,36 @@ kubernetes:
host: https://kubernetes.default
reloader:
isOpenshift: false
ignoreSecrets: false
ignoreConfigMaps: false
watchGlobally: true
# Set to true if you have a pod security policy that enforces readOnlyRootFilesystem
readOnlyRootFileSystem: false
matchLabels: {}
deployment:
nodeSelector:
# cloud.google.com/gke-nodepool: default-pool
# An affinity stanza to be applied to the Deployment.
# Example:
# affinity:
# nodeAffinity:
# requiredDuringSchedulingIgnoredDuringExecution:
# nodeSelectorTerms:
# - matchExpressions:
# - key: "node-role.kubernetes.io/infra-worker"
# operator: "Exists"
affinity: {}
# A list of tolerations to be applied to the Deployment.
# Example:
# tolerations:
# - key: "node-role.kubernetes.io/infra-worker"
# operator: "Exists"
# effect: "NoSchedule"
tolerations: []
annotations: {}
labels:
provider: stakater
@@ -24,6 +51,18 @@ reloader:
secret:
# field supports Key value pair as environment variables. It gets the values from other fields of pod.
field:
# Specify resource requests/limits for the deployment.
# Example:
# resources:
# limits:
# cpu: "100m"
# memory: "512Mi"
# requests:
# cpu: "10m"
# memory: "128Mi"
resources: {}
rbac:
enabled: true
labels: {}
@@ -34,4 +73,10 @@ reloader:
labels: {}
# The name of the ServiceAccount to use.
# If not set and create is true, a name is generated using the fullname template
name: reloader
name:
# Optional flags to pass to the Reloader entrypoint
# Example:
# custom_annotations:
# configmap: "my.company.com/configmap"
# secret: "my.company.com/secret"
custom_annotations: {}

View File

@@ -1,6 +1,6 @@
# How it works?
Reloader watches for `ConfigMap` and `Secret` and detects if there are changes in data of these objects. After change detection reloader performs rolling upgrade on relevant Pods via associated `Deployment`, `Deamonset` and `Statefulset`.
Reloader watches for `ConfigMap` and `Secret` and detects if there are changes in data of these objects. After change detection reloader performs rolling upgrade on relevant Pods via associated `Deployment`, `Daemonset` and `Statefulset`.
## How change detection works
@@ -17,23 +17,25 @@ The annotation value is comma separated list of `configmaps` or `secrets`. If a
### Annotation for Configmap
For a `Deployment` called `foo` have a `ConfigMap` called `foo`. Then add this annotation to your `Deployment`
For a `Deployment` called `foo` have a `ConfigMap` called `foo`. Then add this annotation* to your `Deployment`
```yaml
metadata:
annotations:
configmap.reloader.stakater.com/reload: "foo"
```
<small>*the default annotation can be changed with the `--configmap-annotation` flag</small>
### Annotation for Secret
For a `Deployment` called `foo` have a `Secret` called `foo`. Then add this annotation to your `Deployment`
For a `Deployment` called `foo` have a `Secret` called `foo`. Then add this annotation* to your `Deployment`
```yaml
metadata:
annotations:
secret.reloader.stakater.com/reload: "foo"
```
<small>*the default annotation can be changed with the `--secret-annotation` flag</small>
Above mentioned annotation are also work for `Daemonsets` and `Statefulsets`

View File

@@ -8,4 +8,5 @@ Reloader is inspired from [Configmapcontroller](https://github.com/fabric8io/con
| Reloader can watch both `secrets` and `configmaps`. | ConfigmapController can only watch changes in `configmaps`. It cannot detect changes in other resources like `secrets`. |
| Reloader can perform rolling upgrades on `deployments` as well as on `statefulsets` and `daemonsets` | ConfigmapController can only perform rolling upgrades on `deployments`. It currently does not support rolling upgrades on `statefulsets` and `daemonsets` |
| Reloader provides both unit test cases and end to end integration test cases for future updates. So one can make sure that new changes do not break any old functionality. | Currently there are not any unit test cases or end to end integration test cases in configmap controller. It add difficulties for any additional updates in configmap controller and one can not know for sure whether new changes breaks any old functionality or not. |
| Reloader uses SHA1 to encode the change in configmap or secret. It then saves the SHA1 value in `STAKATER_FOO_CONFIGMAP` or `STAKATER_FOO_SECRET` environment variable depending upon where the change has happened. The use of SHA1 provides a concise 40 characters encoded value that is very less pron to collision. | Configmap controller uses `FABRICB_FOO_REVISION` environment variable to store any change in configmap controller. It does not encode it or convert it in suitable hash value to avoid data pollution in deployment. |
| Reloader uses SHA1 to encode the change in configmap or secret. It then saves the SHA1 value in `STAKATER_FOO_CONFIGMAP` or `STAKATER_FOO_SECRET` environment variable depending upon where the change has happened. The use of SHA1 provides a concise 40 characters encoded value that is very less pron to collision. | Configmap controller uses `FABRICB_FOO_REVISION` environment variable to store any change in configmap controller. It does not encode it or convert it in suitable hash value to avoid data pollution in deployment. |
| Reloader allows you to customize your own annotation (for both Secrets and Configmaps) using command line flags | Configmap controller restricts you to only their provided annotation |

View File

@@ -25,4 +25,16 @@ Reloader supports deployment rollout as well as daemonsets and statefulsets roll
k8s-trigger-controller stores the hash value in an annotation `trigger.k8s.io/[secret|configMap]-NAME-last-hash`
#### Reloader:
Reloader stores the hash value in an environment variable `STAKATER_NAME_[SECRET|CONFIGMAP]`
Reloader stores the hash value in an environment variable `STAKATER_NAME_[SECRET|CONFIGMAP]`
### Customization
#### k8s-trigger-controller:
k8s-trigger-controller restricts you to using the `trigger.k8s.io/[secret-configMap]-NAME-last-hash` annotation
#### Reloader:
Reloader allows you to customize the annotation to fit your needs with command line flags:
- `--auto-annotation <annotation>`
- `--configmap-annotation <annotation>`
- `--secret-annotation <annotation>`

View File

@@ -0,0 +1,11 @@
Below are the steps to use reloader with Sealed Secrets.
1. Download and install the kubeseal client from [here](https://github.com/bitnami-labs/sealed-secrets).
2. Install the controller for sealed secrets
3. Fetch the encryption certificate
4. Encrypt the secret.
5. Apply the secret.
7. Install the tool which uses that sealed secret.
8. Install Reloader.
9. Once everything is setup, update the original secret at client and encrypt it with kubeseal to see reloader working.
10. Apply the updated sealed secret.
11. Reloader will resatart the pod to use that updated secret.

106
glide.lock generated
View File

@@ -1,26 +1,12 @@
hash: b6fe060028bdb1249ba2413746476c2550b267eeab3c166c36a86e000a8dd354
updated: 2018-07-24T21:12:43.027181463+05:00
hash: 0a37eeebda95f7ac050377c5b8ca8a6f7ab051ef66ba1752471090157e6a6ea2
updated: 2019-07-03T21:04:13.576837+02:00
imports:
- name: github.com/davecgh/go-spew
version: 782f4967f2dc4564575ca782fe2d04090b5faca8
subpackages:
- spew
- name: github.com/emicklei/go-restful
version: ff4f55a206334ef123e4f79bbf348980da81ca46
subpackages:
- log
- name: github.com/emicklei/go-restful-swagger12
version: dcef7f55730566d41eae5db10e7d6981829720f6
- name: github.com/ghodss/yaml
version: 73d445a93680fa1a78ae23a5839bad48f32ba1ee
- name: github.com/go-openapi/jsonpointer
version: 46af16f9f7b149af66e5d1bd010e3574dc06de98
- name: github.com/go-openapi/jsonreference
version: 13c6e3589ad90f49bd3e3bbe2c2cb3d7a4142272
- name: github.com/go-openapi/spec
version: 6aced65f8501fe1217321abf0749d354824ba2ff
- name: github.com/go-openapi/swag
version: 1d0bd113de87027671077d3c71eb3ac5d7dbba72
- name: github.com/gogo/protobuf
version: c0656edd0d9eab7c66d1eb0c568f9039345796f7
subpackages:
@@ -29,7 +15,7 @@ imports:
- name: github.com/golang/glog
version: 44145f04b68cf362d9c4df2182967c2275eaefed
- name: github.com/golang/protobuf
version: 4bd1920723d7b7c925de087aa32e2187708897f7
version: b4deda0973fb4c70b50d226b1af49f3da59f5265
subpackages:
- proto
- ptypes
@@ -54,32 +40,34 @@ imports:
version: a0d98a5f288019575c6d1f4bb1573fef2d1fcdc4
subpackages:
- simplelru
- name: github.com/howeyc/gopass
version: bf9dde6d0d2c004a008c27aaee91170c786f6db8
- name: github.com/imdario/mergo
version: 6633656539c1639d9d78127b7d47c622b5d7b6dc
- name: github.com/inconshreveable/mousetrap
version: 76626ae9c91c4f2a10f34cad8ce83ea42c93bb75
- name: github.com/json-iterator/go
version: 36b14963da70d11297d313183d7e6388c8510e1e
- name: github.com/juju/ratelimit
version: 5b9ff866471762aa2ab2dced63c9fb6f53921342
- name: github.com/mailru/easyjson
version: d5b7844b561a7bc640052f1b935f7b800330d7e0
version: f2b4162afba35581b6d4a50d3b8f34e33c144682
- name: github.com/modern-go/concurrent
version: bacd9c7ef1dd9b15be4a9909b8ac7a4e313eec94
- name: github.com/modern-go/reflect2
version: 05fbef0ca5da472bbf96c9322b84a53edc03c9fd
- name: github.com/openshift/api
version: d5b34b957e91dbf64013a866951c3ed5770db0b5
subpackages:
- buffer
- jlexer
- jwriter
- apps/v1
- name: github.com/openshift/client-go
version: 431ec9a26e5021f35fa41ee9a89842db9bfdb370
subpackages:
- apps/clientset/versioned
- apps/clientset/versioned/scheme
- apps/clientset/versioned/typed/apps/v1
- name: github.com/openshift/library-go
version: 0b8367a4679859036c27a30dbe010d76409e7075
- name: github.com/peterbourgon/diskv
version: 5f041e8faa004a95c88a202771f4cc3e991971e6
- name: github.com/PuerkitoBio/purell
version: 8a290539e2e8629dbc4e6bad948158f790ec31f4
- name: github.com/PuerkitoBio/urlesc
version: 5bd2802263f21d8788851d5305584c82a5c75d7e
- name: github.com/sirupsen/logrus
version: c155da19408a8799da419ed3eeb0cb5db0ad5dbc
- name: github.com/spf13/cobra
version: ef82de70bb3f60c65fb8eebacbb2d122ef517385
version: f62e98d28ab7ad31d707ba837a966378465c7b57
- name: github.com/spf13/pflag
version: 583c0c0531f06d5278b7d917446061adc344b5cd
- name: golang.org/x/crypto
@@ -95,32 +83,31 @@ imports:
- idna
- lex/httplex
- name: golang.org/x/sys
version: 7ddbeae9ae08c6a06a59597f0c9edbc5ff2444ce
version: 95c6576299259db960f6c5b9b69ea52422860fce
subpackages:
- unix
- windows
- name: golang.org/x/text
version: b19bf474d317b857955b12035d2c5acb57ce8b01
subpackages:
- cases
- internal
- internal/tag
- language
- runes
- secure/bidirule
- secure/precis
- transform
- unicode/bidi
- unicode/norm
- width
- name: golang.org/x/time
version: f51c12702a4d776e4c1fa9b0fabab841babae631
subpackages:
- rate
- name: gopkg.in/inf.v0
version: 3887ee99ecf07df5b447e9b00d9c0b2adaa9f3e4
- name: gopkg.in/yaml.v2
version: 53feefa2559fb8dfa8d81baad31be332c97d6c77
version: 670d4cfef0544295bc27a114dbac37980d83185a
- name: k8s.io/api
version: fe29995db37613b9c5b2a647544cf627bfa8d299
version: 2d6f90ab1293a1fb871cf149423ebb72aa7423aa
subpackages:
- admissionregistration/v1alpha1
- admissionregistration/v1beta1
- apps/v1
- apps/v1beta1
- apps/v1beta2
- authentication/v1
@@ -134,6 +121,7 @@ imports:
- batch/v2alpha1
- certificates/v1beta1
- core/v1
- events/v1beta1
- extensions/v1beta1
- networking/v1
- policy/v1beta1
@@ -141,23 +129,23 @@ imports:
- rbac/v1alpha1
- rbac/v1beta1
- scheduling/v1alpha1
- scheduling/v1beta1
- settings/v1alpha1
- storage/v1
- storage/v1alpha1
- storage/v1beta1
- name: k8s.io/apimachinery
version: 019ae5ada31de202164b118aee88ee2d14075c31
version: 103fd098999dc9c0c88536f5c9ad2e5da39373ae
subpackages:
- pkg/api/equality
- pkg/api/errors
- pkg/api/meta
- pkg/api/resource
- pkg/apis/meta/internalversion
- pkg/apis/meta/v1
- pkg/apis/meta/v1/unstructured
- pkg/apis/meta/v1alpha1
- pkg/apis/meta/v1beta1
- pkg/conversion
- pkg/conversion/queryparams
- pkg/conversion/unstructured
- pkg/fields
- pkg/labels
- pkg/runtime
@@ -177,18 +165,21 @@ imports:
- pkg/util/framer
- pkg/util/intstr
- pkg/util/json
- pkg/util/mergepatch
- pkg/util/net
- pkg/util/runtime
- pkg/util/sets
- pkg/util/strategicpatch
- pkg/util/validation
- pkg/util/validation/field
- pkg/util/wait
- pkg/util/yaml
- pkg/version
- pkg/watch
- third_party/forked/golang/json
- third_party/forked/golang/reflect
- name: k8s.io/client-go
version: 35874c597fed17ca62cd197e516d7d5ff9a2958c
version: 59698c7d9724b0f95f9dc9e7f7dfdcc3dfeceb82
subpackages:
- discovery
- discovery/fake
@@ -197,6 +188,10 @@ imports:
- kubernetes/scheme
- kubernetes/typed/admissionregistration/v1alpha1
- kubernetes/typed/admissionregistration/v1alpha1/fake
- kubernetes/typed/admissionregistration/v1beta1
- kubernetes/typed/admissionregistration/v1beta1/fake
- kubernetes/typed/apps/v1
- kubernetes/typed/apps/v1/fake
- kubernetes/typed/apps/v1beta1
- kubernetes/typed/apps/v1beta1/fake
- kubernetes/typed/apps/v1beta2
@@ -223,6 +218,8 @@ imports:
- kubernetes/typed/certificates/v1beta1/fake
- kubernetes/typed/core/v1
- kubernetes/typed/core/v1/fake
- kubernetes/typed/events/v1beta1
- kubernetes/typed/events/v1beta1/fake
- kubernetes/typed/extensions/v1beta1
- kubernetes/typed/extensions/v1beta1/fake
- kubernetes/typed/networking/v1
@@ -237,13 +234,21 @@ imports:
- kubernetes/typed/rbac/v1beta1/fake
- kubernetes/typed/scheduling/v1alpha1
- kubernetes/typed/scheduling/v1alpha1/fake
- kubernetes/typed/scheduling/v1beta1
- kubernetes/typed/scheduling/v1beta1/fake
- kubernetes/typed/settings/v1alpha1
- kubernetes/typed/settings/v1alpha1/fake
- kubernetes/typed/storage/v1
- kubernetes/typed/storage/v1/fake
- kubernetes/typed/storage/v1alpha1
- kubernetes/typed/storage/v1alpha1/fake
- kubernetes/typed/storage/v1beta1
- kubernetes/typed/storage/v1beta1/fake
- pkg/apis/clientauthentication
- pkg/apis/clientauthentication/v1alpha1
- pkg/apis/clientauthentication/v1beta1
- pkg/version
- plugin/pkg/client/auth/exec
- rest
- rest/watch
- testing
@@ -257,13 +262,16 @@ imports:
- tools/pager
- tools/reference
- transport
- util/buffer
- util/cert
- util/connrotation
- util/flowcontrol
- util/homedir
- util/integer
- util/retry
- util/workqueue
- name: k8s.io/kube-openapi
version: 868f2f29720b192240e18284659231b440f9cda5
version: 91cfa479c814065e420cee7ed227db0f63a5854e
subpackages:
- pkg/common
- pkg/util/proto
testImports: []

View File

@@ -1,14 +1,10 @@
package: github.com/stakater/Reloader
import:
- package: k8s.io/api
version: kubernetes-1.8.0
- package: k8s.io/apimachinery
version: kubernetes-1.8.0
- package: k8s.io/client-go
version: 5.0.0
- package: github.com/openshift/client-go
version: release-3.11
- package: github.com/spf13/cobra
version: 0.0.3
- package: github.com/spf13/pflag
version: 1.0.1
- package: github.com/sirupsen/logrus
version: 1.0.5
version: f62e98d28ab7ad31d707ba837a966378465c7b57
- package: github.com/openshift/library-go
version: release-3.11
- package: github.com/openshift/api
version: master

View File

@@ -3,61 +3,76 @@ package callbacks
import (
"github.com/sirupsen/logrus"
"github.com/stakater/Reloader/internal/pkg/util"
"github.com/stakater/Reloader/pkg/kube"
apps_v1beta1 "k8s.io/api/apps/v1beta1"
"k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
"k8s.io/api/extensions/v1beta1"
meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
openshiftv1 "github.com/openshift/api/apps/v1"
)
//ItemsFunc is a generic function to return a specific resource array in given namespace
type ItemsFunc func(kubernetes.Interface, string) []interface{}
type ItemsFunc func(kube.Clients, string) []interface{}
//ContainersFunc is a generic func to return containers
type ContainersFunc func(interface{}) []v1.Container
//InitContainersFunc is a generic func to return containers
type InitContainersFunc func(interface{}) []v1.Container
//VolumesFunc is a generic func to return volumes
type VolumesFunc func(interface{}) []v1.Volume
//UpdateFunc performs the resource update
type UpdateFunc func(kubernetes.Interface, string, interface{}) error
type UpdateFunc func(kube.Clients, string, interface{}) error
//RollingUpgradeFuncs contains generic functions to perform rolling upgrade
type RollingUpgradeFuncs struct {
ItemsFunc ItemsFunc
ContainersFunc ContainersFunc
UpdateFunc UpdateFunc
VolumesFunc VolumesFunc
ResourceType string
ItemsFunc ItemsFunc
ContainersFunc ContainersFunc
InitContainersFunc InitContainersFunc
UpdateFunc UpdateFunc
VolumesFunc VolumesFunc
ResourceType string
}
// GetDeploymentItems returns the deployments in given namespace
func GetDeploymentItems(client kubernetes.Interface, namespace string) []interface{} {
deployments, err := client.ExtensionsV1beta1().Deployments(namespace).List(meta_v1.ListOptions{})
func GetDeploymentItems(clients kube.Clients, namespace string) []interface{} {
deployments, err := clients.KubernetesClient.ExtensionsV1beta1().Deployments(namespace).List(meta_v1.ListOptions{})
if err != nil {
logrus.Errorf("Failed to list deployments %v", err)
}
return util.InterfaceSlice(deployments.Items)
}
// GetDaemonSetItems returns the daemonSet in given namespace
func GetDaemonSetItems(client kubernetes.Interface, namespace string) []interface{} {
daemonSets, err := client.ExtensionsV1beta1().DaemonSets(namespace).List(meta_v1.ListOptions{})
// GetDaemonSetItems returns the daemonSets in given namespace
func GetDaemonSetItems(clients kube.Clients, namespace string) []interface{} {
daemonSets, err := clients.KubernetesClient.ExtensionsV1beta1().DaemonSets(namespace).List(meta_v1.ListOptions{})
if err != nil {
logrus.Errorf("Failed to list daemonSets %v", err)
}
return util.InterfaceSlice(daemonSets.Items)
}
// GetStatefulSetItems returns the statefulSet in given namespace
func GetStatefulSetItems(client kubernetes.Interface, namespace string) []interface{} {
statefulSets, err := client.AppsV1beta1().StatefulSets(namespace).List(meta_v1.ListOptions{})
// GetStatefulSetItems returns the statefulSets in given namespace
func GetStatefulSetItems(clients kube.Clients, namespace string) []interface{} {
statefulSets, err := clients.KubernetesClient.AppsV1beta1().StatefulSets(namespace).List(meta_v1.ListOptions{})
if err != nil {
logrus.Errorf("Failed to list statefulSets %v", err)
}
return util.InterfaceSlice(statefulSets.Items)
}
// GetDeploymentConfigItems returns the deploymentConfigs in given namespace
func GetDeploymentConfigItems(clients kube.Clients, namespace string) []interface{} {
deploymentConfigs, err := clients.OpenshiftAppsClient.Apps().DeploymentConfigs(namespace).List(meta_v1.ListOptions{})
if err != nil {
logrus.Errorf("Failed to list deploymentConfigs %v", err)
}
return util.InterfaceSlice(deploymentConfigs.Items)
}
// GetDeploymentContainers returns the containers of given deployment
func GetDeploymentContainers(item interface{}) []v1.Container {
return item.(v1beta1.Deployment).Spec.Template.Spec.Containers
@@ -73,24 +88,56 @@ func GetStatefulsetContainers(item interface{}) []v1.Container {
return item.(apps_v1beta1.StatefulSet).Spec.Template.Spec.Containers
}
// GetDeploymentConfigContainers returns the containers of given deploymentConfig
func GetDeploymentConfigContainers(item interface{}) []v1.Container {
return item.(openshiftv1.DeploymentConfig).Spec.Template.Spec.Containers
}
// GetDeploymentInitContainers returns the containers of given deployment
func GetDeploymentInitContainers(item interface{}) []v1.Container {
return item.(v1beta1.Deployment).Spec.Template.Spec.InitContainers
}
// GetDaemonSetInitContainers returns the containers of given daemonset
func GetDaemonSetInitContainers(item interface{}) []v1.Container {
return item.(v1beta1.DaemonSet).Spec.Template.Spec.InitContainers
}
// GetStatefulsetInitContainers returns the containers of given statefulSet
func GetStatefulsetInitContainers(item interface{}) []v1.Container {
return item.(apps_v1beta1.StatefulSet).Spec.Template.Spec.InitContainers
}
// GetDeploymentConfigInitContainers returns the containers of given deploymentConfig
func GetDeploymentConfigInitContainers(item interface{}) []v1.Container {
return item.(openshiftv1.DeploymentConfig).Spec.Template.Spec.InitContainers
}
// UpdateDeployment performs rolling upgrade on deployment
func UpdateDeployment(client kubernetes.Interface, namespace string, resource interface{}) error {
func UpdateDeployment(clients kube.Clients, namespace string, resource interface{}) error {
deployment := resource.(v1beta1.Deployment)
_, err := client.ExtensionsV1beta1().Deployments(namespace).Update(&deployment)
_, err := clients.KubernetesClient.ExtensionsV1beta1().Deployments(namespace).Update(&deployment)
return err
}
// UpdateDaemonSet performs rolling upgrade on daemonSet
func UpdateDaemonSet(client kubernetes.Interface, namespace string, resource interface{}) error {
func UpdateDaemonSet(clients kube.Clients, namespace string, resource interface{}) error {
daemonSet := resource.(v1beta1.DaemonSet)
_, err := client.ExtensionsV1beta1().DaemonSets(namespace).Update(&daemonSet)
_, err := clients.KubernetesClient.ExtensionsV1beta1().DaemonSets(namespace).Update(&daemonSet)
return err
}
// UpdateStatefulset performs rolling upgrade on statefulSet
func UpdateStatefulset(client kubernetes.Interface, namespace string, resource interface{}) error {
func UpdateStatefulset(clients kube.Clients, namespace string, resource interface{}) error {
statefulSet := resource.(apps_v1beta1.StatefulSet)
_, err := client.AppsV1beta1().StatefulSets(namespace).Update(&statefulSet)
_, err := clients.KubernetesClient.AppsV1beta1().StatefulSets(namespace).Update(&statefulSet)
return err
}
// UpdateDeploymentConfig performs rolling upgrade on deploymentConfig
func UpdateDeploymentConfig(clients kube.Clients, namespace string, resource interface{}) error {
deploymentConfig := resource.(openshiftv1.DeploymentConfig)
_, err := clients.OpenshiftAppsClient.AppsV1().DeploymentConfigs(namespace).Update(&deploymentConfig)
return err
}
@@ -108,3 +155,8 @@ func GetDaemonSetVolumes(item interface{}) []v1.Volume {
func GetStatefulsetVolumes(item interface{}) []v1.Volume {
return item.(apps_v1beta1.StatefulSet).Spec.Template.Spec.Volumes
}
// GetDeploymentConfigVolumes returns the Volumes of given deploymentConfig
func GetDeploymentConfigVolumes(item interface{}) []v1.Volume {
return item.(openshiftv1.DeploymentConfig).Spec.Template.Spec.Volumes
}

View File

@@ -6,8 +6,10 @@ import (
"github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"github.com/stakater/Reloader/internal/pkg/controller"
"github.com/stakater/Reloader/internal/pkg/options"
"github.com/stakater/Reloader/internal/pkg/util"
"github.com/stakater/Reloader/pkg/kube"
"k8s.io/apimachinery/pkg/apis/meta/v1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// NewReloaderCommand starts the reloader controller
@@ -17,10 +19,20 @@ func NewReloaderCommand() *cobra.Command {
Short: "A watcher for your Kubernetes cluster",
Run: startReloader,
}
// options
cmd.PersistentFlags().StringVar(&options.ConfigmapUpdateOnChangeAnnotation, "configmap-annotation", "configmap.reloader.stakater.com/reload", "annotation to detect changes in configmaps")
cmd.PersistentFlags().StringVar(&options.SecretUpdateOnChangeAnnotation, "secret-annotation", "secret.reloader.stakater.com/reload", "annotation to detect changes in secrets")
cmd.PersistentFlags().StringVar(&options.ReloaderAutoAnnotation, "auto-annotation", "reloader.stakater.com/auto", "annotation to detect changes in secrets")
cmd.PersistentFlags().StringSlice("resources-to-ignore", []string{}, "list of resources to ignore (valid options 'configMaps' or 'secrets')")
return cmd
}
func startReloader(cmd *cobra.Command, args []string) {
var ignoreList util.List
var err error
logrus.Info("Starting Reloader")
currentNamespace := os.Getenv("KUBERNETES_NAMESPACE")
if len(currentNamespace) == 0 {
@@ -29,12 +41,31 @@ func startReloader(cmd *cobra.Command, args []string) {
}
// create the clientset
clientset, err := kube.GetClient()
clientset, err := kube.GetKubernetesClient()
if err != nil {
logrus.Fatal(err)
}
ignoreList, err = cmd.Flags().GetStringSlice("resources-to-ignore")
if err != nil {
logrus.Fatal(err)
}
for _, v := range ignoreList {
if v != "configMaps" && v != "secrets" {
logrus.Fatalf("'resources-to-ignore' only accepts 'configMaps' or 'secrets', not '%s'", v)
}
}
if len(ignoreList) > 1 {
logrus.Fatal("'resources-to-ignore' only accepts 'configMaps' or 'secrets', not both")
}
for k := range kube.ResourceMap {
if ignoreList.Contains(k) {
continue
}
c, err := controller.NewController(clientset, k, currentNamespace)
if err != nil {
logrus.Fatalf("%s", err)

File diff suppressed because it is too large Load Diff

View File

@@ -3,7 +3,7 @@ package handler
import (
"github.com/sirupsen/logrus"
"github.com/stakater/Reloader/internal/pkg/util"
"k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
)
// ResourceCreatedHandler contains new objects
@@ -17,7 +17,6 @@ func (r ResourceCreatedHandler) Handle() error {
logrus.Errorf("Resource creation handler received nil resource")
} else {
config, _ := r.GetConfig()
logrus.Infof("Resource '%s' of type '%s' in namespace '%s' has been created", config.ResourceName, config.Type, config.Namespace)
// process resource based on its type
doRollingUpgrade(config)
}

View File

@@ -3,7 +3,7 @@ package handler
import (
"github.com/sirupsen/logrus"
"github.com/stakater/Reloader/internal/pkg/util"
"k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
)
// ResourceUpdatedHandler contains updated objects
@@ -19,7 +19,6 @@ func (r ResourceUpdatedHandler) Handle() error {
} else {
config, oldSHAData := r.GetConfig()
if config.SHAValue != oldSHAData {
logrus.Infof("Changes detected in '%s' of type '%s' in namespace '%s'", config.ResourceName, config.Type, config.Namespace)
// process resource based on its type
doRollingUpgrade(config)
}

View File

@@ -7,97 +7,114 @@ import (
"github.com/sirupsen/logrus"
"github.com/stakater/Reloader/internal/pkg/callbacks"
"github.com/stakater/Reloader/internal/pkg/constants"
"github.com/stakater/Reloader/internal/pkg/options"
"github.com/stakater/Reloader/internal/pkg/util"
"github.com/stakater/Reloader/pkg/kube"
"k8s.io/api/core/v1"
"k8s.io/client-go/kubernetes"
v1 "k8s.io/api/core/v1"
)
// GetDeploymentRollingUpgradeFuncs returns all callback funcs for a deployment
func GetDeploymentRollingUpgradeFuncs() callbacks.RollingUpgradeFuncs {
return callbacks.RollingUpgradeFuncs{
ItemsFunc: callbacks.GetDeploymentItems,
ContainersFunc: callbacks.GetDeploymentContainers,
UpdateFunc: callbacks.UpdateDeployment,
VolumesFunc: callbacks.GetDeploymentVolumes,
ResourceType: "Deployment",
ItemsFunc: callbacks.GetDeploymentItems,
ContainersFunc: callbacks.GetDeploymentContainers,
InitContainersFunc: callbacks.GetDeploymentInitContainers,
UpdateFunc: callbacks.UpdateDeployment,
VolumesFunc: callbacks.GetDeploymentVolumes,
ResourceType: "Deployment",
}
}
// GetDaemonSetRollingUpgradeFuncs returns all callback funcs for a daemonset
func GetDaemonSetRollingUpgradeFuncs() callbacks.RollingUpgradeFuncs {
return callbacks.RollingUpgradeFuncs{
ItemsFunc: callbacks.GetDaemonSetItems,
ContainersFunc: callbacks.GetDaemonSetContainers,
UpdateFunc: callbacks.UpdateDaemonSet,
VolumesFunc: callbacks.GetDaemonSetVolumes,
ResourceType: "DaemonSet",
ItemsFunc: callbacks.GetDaemonSetItems,
ContainersFunc: callbacks.GetDaemonSetContainers,
InitContainersFunc: callbacks.GetDaemonSetInitContainers,
UpdateFunc: callbacks.UpdateDaemonSet,
VolumesFunc: callbacks.GetDaemonSetVolumes,
ResourceType: "DaemonSet",
}
}
// GetStatefulSetRollingUpgradeFuncs returns all callback funcs for a statefulSet
func GetStatefulSetRollingUpgradeFuncs() callbacks.RollingUpgradeFuncs {
return callbacks.RollingUpgradeFuncs{
ItemsFunc: callbacks.GetStatefulSetItems,
ContainersFunc: callbacks.GetStatefulsetContainers,
UpdateFunc: callbacks.UpdateStatefulset,
VolumesFunc: callbacks.GetStatefulsetVolumes,
ResourceType: "StatefulSet",
ItemsFunc: callbacks.GetStatefulSetItems,
ContainersFunc: callbacks.GetStatefulsetContainers,
InitContainersFunc: callbacks.GetStatefulsetInitContainers,
UpdateFunc: callbacks.UpdateStatefulset,
VolumesFunc: callbacks.GetStatefulsetVolumes,
ResourceType: "StatefulSet",
}
}
// GetDeploymentConfigRollingUpgradeFuncs returns all callback funcs for a deploymentConfig
func GetDeploymentConfigRollingUpgradeFuncs() callbacks.RollingUpgradeFuncs {
return callbacks.RollingUpgradeFuncs{
ItemsFunc: callbacks.GetDeploymentConfigItems,
ContainersFunc: callbacks.GetDeploymentConfigContainers,
InitContainersFunc: callbacks.GetDeploymentConfigInitContainers,
UpdateFunc: callbacks.UpdateDeploymentConfig,
VolumesFunc: callbacks.GetDeploymentConfigVolumes,
ResourceType: "DeploymentConfig",
}
}
func doRollingUpgrade(config util.Config) {
rollingUpgrade(config, GetDeploymentRollingUpgradeFuncs())
rollingUpgrade(config, GetDaemonSetRollingUpgradeFuncs())
rollingUpgrade(config, GetStatefulSetRollingUpgradeFuncs())
clients := kube.GetClients()
rollingUpgrade(clients, config, GetDeploymentRollingUpgradeFuncs())
rollingUpgrade(clients, config, GetDaemonSetRollingUpgradeFuncs())
rollingUpgrade(clients, config, GetStatefulSetRollingUpgradeFuncs())
if kube.IsOpenshift {
rollingUpgrade(clients, config, GetDeploymentConfigRollingUpgradeFuncs())
}
}
func rollingUpgrade(config util.Config, upgradeFuncs callbacks.RollingUpgradeFuncs) {
client, err := kube.GetClient()
if err != nil {
logrus.Fatalf("Unable to create Kubernetes client error = %v", err)
}
func rollingUpgrade(clients kube.Clients, config util.Config, upgradeFuncs callbacks.RollingUpgradeFuncs) {
err = PerformRollingUpgrade(client, config, upgradeFuncs)
err := PerformRollingUpgrade(clients, config, upgradeFuncs)
if err != nil {
logrus.Errorf("Rolling upgrade for '%s' failed with error = %v", config.ResourceName, err)
}
}
// PerformRollingUpgrade upgrades the deployment if there is any change in configmap or secret data
func PerformRollingUpgrade(client kubernetes.Interface, config util.Config, upgradeFuncs callbacks.RollingUpgradeFuncs) error {
items := upgradeFuncs.ItemsFunc(client, config.Namespace)
func PerformRollingUpgrade(clients kube.Clients, config util.Config, upgradeFuncs callbacks.RollingUpgradeFuncs) error {
items := upgradeFuncs.ItemsFunc(clients, config.Namespace)
var err error
for _, i := range items {
containers := upgradeFuncs.ContainersFunc(i)
volumes := upgradeFuncs.VolumesFunc(i)
// find correct annotation and update the resource
annotationValue := util.ToObjectMeta(i).Annotations[config.Annotation]
reloaderEnabledValue := util.ToObjectMeta(i).Annotations[constants.ReloaderAutoAnnotation]
if len(containers) > 0 {
resourceName := util.ToObjectMeta(i).Name
result := constants.NotUpdated
reloaderEnabled, err := strconv.ParseBool(reloaderEnabledValue)
if err == nil && reloaderEnabled {
result = updateContainers(volumes, containers, config.ResourceName, config)
} else if annotationValue != "" {
values := strings.Split(annotationValue, ",")
for _, value := range values {
if value == config.ResourceName {
result = updateContainers(volumes, containers, value, config)
if result == constants.Updated {
break
}
reloaderEnabledValue := util.ToObjectMeta(i).Annotations[options.ReloaderAutoAnnotation]
result := constants.NotUpdated
reloaderEnabled, err := strconv.ParseBool(reloaderEnabledValue)
if err == nil && reloaderEnabled {
result = updateContainers(upgradeFuncs, i, config, true)
}
if result != constants.Updated && annotationValue != "" {
values := strings.Split(annotationValue, ",")
for _, value := range values {
if value == config.ResourceName {
result = updateContainers(upgradeFuncs, i, config, false)
if result == constants.Updated {
break
}
}
}
if result == constants.Updated {
err = upgradeFuncs.UpdateFunc(client, config.Namespace, i)
if err != nil {
logrus.Errorf("Update for '%s' of type '%s' in namespace '%s' failed with error %v", resourceName, upgradeFuncs.ResourceType, config.Namespace, err)
} else {
logrus.Infof("Updated '%s' of type '%s' in namespace '%s'", resourceName, upgradeFuncs.ResourceType, config.Namespace)
}
}
if result == constants.Updated {
err = upgradeFuncs.UpdateFunc(clients, config.Namespace, i)
resourceName := util.ToObjectMeta(i).Name
if err != nil {
logrus.Errorf("Update for '%s' of type '%s' in namespace '%s' failed with error %v", resourceName, upgradeFuncs.ResourceType, config.Namespace, err)
} else {
logrus.Infof("Changes detected in '%s' of type '%s' in namespace '%s'", config.ResourceName, config.Type, config.Namespace)
logrus.Infof("Updated '%s' of type '%s' in namespace '%s'", resourceName, upgradeFuncs.ResourceType, config.Namespace)
}
}
}
@@ -115,34 +132,12 @@ func getVolumeMountName(volumes []v1.Volume, mountType string, volumeName string
return ""
}
func getContainerToUpdate(volumes []v1.Volume, containers []v1.Container, envarPostfix string, volumeName string) *v1.Container {
// Get the volumeMountName to find volumeMount in container
if len(volumes) > 0 {
volumeMountName := getVolumeMountName(volumes, envarPostfix, volumeName)
// Get the container with mounted configmap/secret
if volumeMountName != "" {
for i := range containers {
volumeMounts := containers[i].VolumeMounts
for j := range volumeMounts {
if volumeMounts[j].Name == volumeMountName {
return &containers[i]
}
}
}
}
}
// Get the container with referenced secret or configmap
func getContainerWithVolumeMount(containers []v1.Container, volumeMountName string) *v1.Container {
for i := range containers {
envs := containers[i].Env
for j := range envs {
envVarSource := envs[j].ValueFrom
if envVarSource != nil {
if envVarSource.SecretKeyRef != nil && envVarSource.SecretKeyRef.LocalObjectReference.Name == volumeName {
return &containers[i]
} else if envVarSource.ConfigMapKeyRef != nil && envVarSource.ConfigMapKeyRef.LocalObjectReference.Name == volumeName {
return &containers[i]
}
volumeMounts := containers[i].VolumeMounts
for j := range volumeMounts {
if volumeMounts[j].Name == volumeMountName {
return &containers[i]
}
}
}
@@ -150,17 +145,82 @@ func getContainerToUpdate(volumes []v1.Volume, containers []v1.Container, envarP
return nil
}
func updateContainers(volumes []v1.Volume, containers []v1.Container, annotationValue string, config util.Config) constants.Result {
func getContainerWithEnvReference(containers []v1.Container, resourceName string, resourceType string) *v1.Container {
for i := range containers {
envs := containers[i].Env
for j := range envs {
envVarSource := envs[j].ValueFrom
if envVarSource != nil {
if resourceType == constants.SecretEnvVarPostfix && envVarSource.SecretKeyRef != nil && envVarSource.SecretKeyRef.LocalObjectReference.Name == resourceName {
return &containers[i]
} else if resourceType == constants.ConfigmapEnvVarPostfix && envVarSource.ConfigMapKeyRef != nil && envVarSource.ConfigMapKeyRef.LocalObjectReference.Name == resourceName {
return &containers[i]
}
}
}
envsFrom := containers[i].EnvFrom
for j := range envsFrom {
if resourceType == constants.SecretEnvVarPostfix && envsFrom[j].SecretRef != nil && envsFrom[j].SecretRef.LocalObjectReference.Name == resourceName {
return &containers[i]
} else if resourceType == constants.ConfigmapEnvVarPostfix && envsFrom[j].ConfigMapRef != nil && envsFrom[j].ConfigMapRef.LocalObjectReference.Name == resourceName {
return &containers[i]
}
}
}
return nil
}
func getContainerToUpdate(upgradeFuncs callbacks.RollingUpgradeFuncs, item interface{}, config util.Config, autoReload bool) *v1.Container {
volumes := upgradeFuncs.VolumesFunc(item)
containers := upgradeFuncs.ContainersFunc(item)
initContainers := upgradeFuncs.InitContainersFunc(item)
var container *v1.Container
// Get the volumeMountName to find volumeMount in container
volumeMountName := getVolumeMountName(volumes, config.Type, config.ResourceName)
// Get the container with mounted configmap/secret
if volumeMountName != "" {
container = getContainerWithVolumeMount(containers, volumeMountName)
if container == nil && len(initContainers) > 0 {
container = getContainerWithVolumeMount(initContainers, volumeMountName)
if container != nil {
// if configmap/secret is being used in init container then return the first Pod container to save reloader env
return &containers[0]
}
} else if container != nil {
return container
}
}
// Get the container with referenced secret or configmap as env var
container = getContainerWithEnvReference(containers, config.ResourceName, config.Type)
if container == nil && len(initContainers) > 0 {
container = getContainerWithEnvReference(initContainers, config.ResourceName, config.Type)
if container != nil {
// if configmap/secret is being used in init container then return the first Pod container to save reloader env
return &containers[0]
}
}
// Get the first container if the annotation is related to specified configmap or secret i.e. configmap.reloader.stakater.com/reload
if container == nil && !autoReload {
return &containers[0]
}
return container
}
func updateContainers(upgradeFuncs callbacks.RollingUpgradeFuncs, item interface{}, config util.Config, autoReload bool) constants.Result {
var result constants.Result
envar := constants.EnvVarPrefix + util.ConvertToEnvVarName(annotationValue) + "_" + config.Type
container := getContainerToUpdate(volumes, containers, config.Type, config.ResourceName)
envar := constants.EnvVarPrefix + util.ConvertToEnvVarName(config.ResourceName) + "_" + config.Type
container := getContainerToUpdate(upgradeFuncs, item, config, autoReload)
if container == nil {
return constants.NoContainerFound
}
//update if env var exists
result = updateEnvVar(containers, envar, config.SHAValue)
result = updateEnvVar(upgradeFuncs.ContainersFunc(item), envar, config.SHAValue)
// if no existing env var exists lets create one
if result == constants.NoEnvVarFound {

View File

@@ -7,24 +7,32 @@ import (
"github.com/sirupsen/logrus"
"github.com/stakater/Reloader/internal/pkg/constants"
"github.com/stakater/Reloader/internal/pkg/options"
"github.com/stakater/Reloader/internal/pkg/testutil"
"github.com/stakater/Reloader/internal/pkg/util"
"github.com/stakater/Reloader/pkg/kube"
testclient "k8s.io/client-go/kubernetes/fake"
)
var (
client = testclient.NewSimpleClientset()
namespace = "test-handler-" + testutil.RandSeq(5)
configmapName = "testconfigmap-handler-" + testutil.RandSeq(5)
secretName = "testsecret-handler-" + testutil.RandSeq(5)
configmapWithEnvName = "testconfigmapWithEnv-handler-" + testutil.RandSeq(3)
secretWithEnvName = "testsecretWithEnv-handler-" + testutil.RandSeq(5)
clients = kube.Clients{KubernetesClient: testclient.NewSimpleClientset()}
namespace = "test-handler-" + testutil.RandSeq(5)
configmapName = "testconfigmap-handler-" + testutil.RandSeq(5)
secretName = "testsecret-handler-" + testutil.RandSeq(5)
configmapWithInitContainer = "testconfigmapInitContainerhandler-" + testutil.RandSeq(5)
secretWithInitContainer = "testsecretWithInitContainer-handler-" + testutil.RandSeq(5)
configmapWithInitEnv = "configmapWithInitEnv-" + testutil.RandSeq(5)
secretWithInitEnv = "secretWithInitEnv-handler-" + testutil.RandSeq(5)
configmapWithEnvName = "testconfigmapWithEnv-handler-" + testutil.RandSeq(5)
configmapWithEnvFromName = "testconfigmapWithEnvFrom-handler-" + testutil.RandSeq(5)
secretWithEnvName = "testsecretWithEnv-handler-" + testutil.RandSeq(5)
secretWithEnvFromName = "testsecretWithEnvFrom-handler-" + testutil.RandSeq(5)
)
func TestMain(m *testing.M) {
// Creating namespace
testutil.CreateNamespace(namespace, client)
testutil.CreateNamespace(namespace, clients.KubernetesClient)
logrus.Infof("Setting up the test resources")
setup()
@@ -40,97 +48,166 @@ func TestMain(m *testing.M) {
func setup() {
// Creating configmap
_, err := testutil.CreateConfigMap(client, namespace, configmapName, "www.google.com")
_, err := testutil.CreateConfigMap(clients.KubernetesClient, namespace, configmapName, "www.google.com")
if err != nil {
logrus.Errorf("Error in configmap creation: %v", err)
}
// Creating secret
data := "dGVzdFNlY3JldEVuY29kaW5nRm9yUmVsb2FkZXI="
_, err = testutil.CreateSecret(client, namespace, secretName, data)
_, err = testutil.CreateSecret(clients.KubernetesClient, namespace, secretName, data)
if err != nil {
logrus.Errorf("Error in secret creation: %v", err)
}
_, err = testutil.CreateConfigMap(client, namespace, configmapWithEnvName, "www.google.com")
_, err = testutil.CreateConfigMap(clients.KubernetesClient, namespace, configmapWithEnvName, "www.google.com")
if err != nil {
logrus.Errorf("Error in configmap creation: %v", err)
}
// Creating secret
_, err = testutil.CreateSecret(client, namespace, secretWithEnvName, data)
_, err = testutil.CreateSecret(clients.KubernetesClient, namespace, secretWithEnvName, data)
if err != nil {
logrus.Errorf("Error in secret creation: %v", err)
}
_, err = testutil.CreateConfigMap(clients.KubernetesClient, namespace, configmapWithEnvFromName, "www.google.com")
if err != nil {
logrus.Errorf("Error in configmap creation: %v", err)
}
// Creating secret
_, err = testutil.CreateSecret(clients.KubernetesClient, namespace, secretWithInitEnv, data)
if err != nil {
logrus.Errorf("Error in secret creation: %v", err)
}
_, err = testutil.CreateConfigMap(clients.KubernetesClient, namespace, configmapWithInitContainer, "www.google.com")
if err != nil {
logrus.Errorf("Error in configmap creation: %v", err)
}
// Creating secret
_, err = testutil.CreateSecret(clients.KubernetesClient, namespace, secretWithEnvFromName, data)
if err != nil {
logrus.Errorf("Error in secret creation: %v", err)
}
_, err = testutil.CreateConfigMap(clients.KubernetesClient, namespace, configmapWithInitEnv, "www.google.com")
if err != nil {
logrus.Errorf("Error in configmap creation: %v", err)
}
// Creating secret
_, err = testutil.CreateSecret(clients.KubernetesClient, namespace, secretWithInitContainer, data)
if err != nil {
logrus.Errorf("Error in secret creation: %v", err)
}
// Creating Deployment with configmap
_, err = testutil.CreateDeployment(client, configmapName, namespace, true)
_, err = testutil.CreateDeployment(clients.KubernetesClient, configmapName, namespace, true)
if err != nil {
logrus.Errorf("Error in Deployment with configmap creation: %v", err)
}
// Creating Deployment with configmap mounted in init container
_, err = testutil.CreateDeploymentWithInitContainer(clients.KubernetesClient, configmapWithInitContainer, namespace, true)
if err != nil {
logrus.Errorf("Error in Deployment with configmap creation: %v", err)
}
// Creating Deployment with secret mounted in init container
_, err = testutil.CreateDeploymentWithInitContainer(clients.KubernetesClient, secretWithInitContainer, namespace, true)
if err != nil {
logrus.Errorf("Error in Deployment with secret creation: %v", err)
}
// Creating Deployment with configmap mounted as Env in init container
_, err = testutil.CreateDeploymentWithInitContainer(clients.KubernetesClient, configmapWithInitEnv, namespace, false)
if err != nil {
logrus.Errorf("Error in Deployment with configmap creation: %v", err)
}
// Creating Deployment with secret mounted as Env in init container
_, err = testutil.CreateDeploymentWithInitContainer(clients.KubernetesClient, secretWithInitEnv, namespace, false)
if err != nil {
logrus.Errorf("Error in Deployment with secret creation: %v", err)
}
// Creating Deployment with secret
_, err = testutil.CreateDeployment(client, secretName, namespace, true)
_, err = testutil.CreateDeployment(clients.KubernetesClient, secretName, namespace, true)
if err != nil {
logrus.Errorf("Error in Deployment with secret creation: %v", err)
}
// Creating Deployment with env var source as configmap
_, err = testutil.CreateDeployment(client, configmapWithEnvName, namespace, false)
_, err = testutil.CreateDeployment(clients.KubernetesClient, configmapWithEnvName, namespace, false)
if err != nil {
logrus.Errorf("Error in Deployment with configmap configmap as env var source creation: %v", err)
}
// Creating Deployment with env var source as secret
_, err = testutil.CreateDeployment(client, secretWithEnvName, namespace, false)
_, err = testutil.CreateDeployment(clients.KubernetesClient, secretWithEnvName, namespace, false)
if err != nil {
logrus.Errorf("Error in Deployment with secret configmap as env var source creation: %v", err)
}
// Creating Deployment with envFrom source as secret
_, err = testutil.CreateDeploymentWithEnvVarSource(clients.KubernetesClient, configmapWithEnvFromName, namespace)
if err != nil {
logrus.Errorf("Error in Deployment with secret configmap as envFrom source creation: %v", err)
}
// Creating Deployment with envFrom source as secret
_, err = testutil.CreateDeploymentWithEnvVarSource(clients.KubernetesClient, secretWithEnvFromName, namespace)
if err != nil {
logrus.Errorf("Error in Deployment with secret configmap as envFrom source creation: %v", err)
}
// Creating DaemonSet with configmap
_, err = testutil.CreateDaemonSet(client, configmapName, namespace, true)
_, err = testutil.CreateDaemonSet(clients.KubernetesClient, configmapName, namespace, true)
if err != nil {
logrus.Errorf("Error in DaemonSet with configmap creation: %v", err)
}
// Creating DaemonSet with secret
_, err = testutil.CreateDaemonSet(client, secretName, namespace, true)
_, err = testutil.CreateDaemonSet(clients.KubernetesClient, secretName, namespace, true)
if err != nil {
logrus.Errorf("Error in DaemonSet with secret creation: %v", err)
}
// Creating DaemonSet with env var source as configmap
_, err = testutil.CreateDaemonSet(client, configmapWithEnvName, namespace, false)
_, err = testutil.CreateDaemonSet(clients.KubernetesClient, configmapWithEnvName, namespace, false)
if err != nil {
logrus.Errorf("Error in DaemonSet with configmap as env var source creation: %v", err)
}
// Creating DaemonSet with env var source as secret
_, err = testutil.CreateDaemonSet(client, secretWithEnvName, namespace, false)
_, err = testutil.CreateDaemonSet(clients.KubernetesClient, secretWithEnvName, namespace, false)
if err != nil {
logrus.Errorf("Error in DaemonSet with secret configmap as env var source creation: %v", err)
}
// Creating StatefulSet with configmap
_, err = testutil.CreateStatefulSet(client, configmapName, namespace, true)
_, err = testutil.CreateStatefulSet(clients.KubernetesClient, configmapName, namespace, true)
if err != nil {
logrus.Errorf("Error in StatefulSet with configmap creation: %v", err)
}
// Creating StatefulSet with secret
_, err = testutil.CreateStatefulSet(client, secretName, namespace, true)
_, err = testutil.CreateStatefulSet(clients.KubernetesClient, secretName, namespace, true)
if err != nil {
logrus.Errorf("Error in StatefulSet with secret creation: %v", err)
}
// Creating StatefulSet with env var source as configmap
_, err = testutil.CreateStatefulSet(client, configmapWithEnvName, namespace, false)
_, err = testutil.CreateStatefulSet(clients.KubernetesClient, configmapWithEnvName, namespace, false)
if err != nil {
logrus.Errorf("Error in StatefulSet with configmap configmap as env var source creation: %v", err)
}
// Creating StatefulSet with env var source as secret
_, err = testutil.CreateStatefulSet(client, secretWithEnvName, namespace, false)
_, err = testutil.CreateStatefulSet(clients.KubernetesClient, secretWithEnvName, namespace, false)
if err != nil {
logrus.Errorf("Error in StatefulSet with secret configmap as env var source creation: %v", err)
}
@@ -139,103 +216,175 @@ func setup() {
func teardown() {
// Deleting Deployment with configmap
deploymentError := testutil.DeleteDeployment(client, namespace, configmapName)
deploymentError := testutil.DeleteDeployment(clients.KubernetesClient, namespace, configmapName)
if deploymentError != nil {
logrus.Errorf("Error while deleting deployment with configmap %v", deploymentError)
}
// Deleting Deployment with secret
deploymentError = testutil.DeleteDeployment(client, namespace, secretName)
deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, namespace, secretName)
if deploymentError != nil {
logrus.Errorf("Error while deleting deployment with secret %v", deploymentError)
}
// Deleting Deployment with configmap as env var source
deploymentError = testutil.DeleteDeployment(client, namespace, configmapWithEnvName)
deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, namespace, configmapWithEnvName)
if deploymentError != nil {
logrus.Errorf("Error while deleting deployment with configmap as env var source %v", deploymentError)
}
// Deleting Deployment with secret
deploymentError = testutil.DeleteDeployment(client, namespace, secretWithEnvName)
deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, namespace, secretWithEnvName)
if deploymentError != nil {
logrus.Errorf("Error while deleting deployment with secret as env var source %v", deploymentError)
}
// Deleting Deployment with configmap mounted in init container
deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, namespace, configmapWithInitContainer)
if deploymentError != nil {
logrus.Errorf("Error while deleting deployment with configmap mounted in init container %v", deploymentError)
}
// Deleting Deployment with secret mounted in init container
deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, namespace, secretWithInitContainer)
if deploymentError != nil {
logrus.Errorf("Error while deleting deployment with secret mounted in init container %v", deploymentError)
}
// Deleting Deployment with configmap mounted as env in init container
deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, namespace, configmapWithInitEnv)
if deploymentError != nil {
logrus.Errorf("Error while deleting deployment with configmap mounted as env in init container %v", deploymentError)
}
// Deleting Deployment with secret mounted as env in init container
deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, namespace, secretWithInitEnv)
if deploymentError != nil {
logrus.Errorf("Error while deleting deployment with secret mounted as env in init container %v", deploymentError)
}
// Deleting Deployment with configmap as envFrom source
deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, namespace, configmapWithEnvFromName)
if deploymentError != nil {
logrus.Errorf("Error while deleting deployment with configmap as envFrom source %v", deploymentError)
}
// Deleting Deployment with secret as envFrom source
deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, namespace, secretWithEnvFromName)
if deploymentError != nil {
logrus.Errorf("Error while deleting deployment with secret as envFrom source %v", deploymentError)
}
// Deleting DaemonSet with configmap
daemonSetError := testutil.DeleteDaemonSet(client, namespace, configmapName)
daemonSetError := testutil.DeleteDaemonSet(clients.KubernetesClient, namespace, configmapName)
if daemonSetError != nil {
logrus.Errorf("Error while deleting daemonSet with configmap %v", daemonSetError)
}
// Deleting Deployment with secret
daemonSetError = testutil.DeleteDaemonSet(client, namespace, secretName)
daemonSetError = testutil.DeleteDaemonSet(clients.KubernetesClient, namespace, secretName)
if daemonSetError != nil {
logrus.Errorf("Error while deleting daemonSet with secret %v", daemonSetError)
}
// Deleting Deployment with configmap as env var source
daemonSetError = testutil.DeleteDaemonSet(client, namespace, configmapWithEnvName)
daemonSetError = testutil.DeleteDaemonSet(clients.KubernetesClient, namespace, configmapWithEnvName)
if daemonSetError != nil {
logrus.Errorf("Error while deleting daemonSet with configmap as env var source %v", daemonSetError)
}
// Deleting Deployment with secret as env var source
daemonSetError = testutil.DeleteDaemonSet(client, namespace, secretWithEnvName)
daemonSetError = testutil.DeleteDaemonSet(clients.KubernetesClient, namespace, secretWithEnvName)
if daemonSetError != nil {
logrus.Errorf("Error while deleting daemonSet with secret as env var source %v", daemonSetError)
}
// Deleting StatefulSet with configmap
statefulSetError := testutil.DeleteStatefulSet(client, namespace, configmapName)
statefulSetError := testutil.DeleteStatefulSet(clients.KubernetesClient, namespace, configmapName)
if statefulSetError != nil {
logrus.Errorf("Error while deleting statefulSet with configmap %v", statefulSetError)
}
// Deleting Deployment with secret
statefulSetError = testutil.DeleteStatefulSet(client, namespace, secretName)
statefulSetError = testutil.DeleteStatefulSet(clients.KubernetesClient, namespace, secretName)
if statefulSetError != nil {
logrus.Errorf("Error while deleting statefulSet with secret %v", statefulSetError)
}
// Deleting StatefulSet with configmap as env var source
statefulSetError = testutil.DeleteStatefulSet(client, namespace, configmapWithEnvName)
statefulSetError = testutil.DeleteStatefulSet(clients.KubernetesClient, namespace, configmapWithEnvName)
if statefulSetError != nil {
logrus.Errorf("Error while deleting statefulSet with configmap as env var source %v", statefulSetError)
}
// Deleting Deployment with secret as env var source
statefulSetError = testutil.DeleteStatefulSet(client, namespace, secretWithEnvName)
statefulSetError = testutil.DeleteStatefulSet(clients.KubernetesClient, namespace, secretWithEnvName)
if statefulSetError != nil {
logrus.Errorf("Error while deleting statefulSet with secret as env var source %v", statefulSetError)
}
// Deleting Configmap
err := testutil.DeleteConfigMap(client, namespace, configmapName)
err := testutil.DeleteConfigMap(clients.KubernetesClient, namespace, configmapName)
if err != nil {
logrus.Errorf("Error while deleting the configmap %v", err)
}
// Deleting Secret
err = testutil.DeleteSecret(client, namespace, secretName)
err = testutil.DeleteSecret(clients.KubernetesClient, namespace, secretName)
if err != nil {
logrus.Errorf("Error while deleting the secret %v", err)
}
// Deleting Configmap used as env var source
err = testutil.DeleteConfigMap(client, namespace, configmapWithEnvName)
err = testutil.DeleteConfigMap(clients.KubernetesClient, namespace, configmapWithEnvName)
if err != nil {
logrus.Errorf("Error while deleting the configmap used as env var source %v", err)
}
// Deleting Secret used as env var source
err = testutil.DeleteSecret(client, namespace, secretWithEnvName)
err = testutil.DeleteSecret(clients.KubernetesClient, namespace, secretWithEnvName)
if err != nil {
logrus.Errorf("Error while deleting the secret used as env var source %v", err)
}
// Deleting Configmap used in init container
err = testutil.DeleteConfigMap(clients.KubernetesClient, namespace, configmapWithInitContainer)
if err != nil {
logrus.Errorf("Error while deleting the configmap used in init container %v", err)
}
// Deleting Secret used in init container
err = testutil.DeleteSecret(clients.KubernetesClient, namespace, secretWithInitContainer)
if err != nil {
logrus.Errorf("Error while deleting the secret used in init container %v", err)
}
// Deleting Configmap used as env var source
err = testutil.DeleteConfigMap(clients.KubernetesClient, namespace, configmapWithEnvFromName)
if err != nil {
logrus.Errorf("Error while deleting the configmap used as env var source %v", err)
}
// Deleting Secret used as env var source
err = testutil.DeleteSecret(clients.KubernetesClient, namespace, secretWithEnvFromName)
if err != nil {
logrus.Errorf("Error while deleting the secret used as env var source %v", err)
}
// Deleting Configmap used as env var source
err = testutil.DeleteConfigMap(clients.KubernetesClient, namespace, configmapWithInitEnv)
if err != nil {
logrus.Errorf("Error while deleting the configmap used as env var source in init container %v", err)
}
// Deleting Secret used as env var source
err = testutil.DeleteSecret(clients.KubernetesClient, namespace, secretWithInitEnv)
if err != nil {
logrus.Errorf("Error while deleting the secret used as env var source in init container %v", err)
}
// Deleting namespace
testutil.DeleteNamespace(namespace, client)
testutil.DeleteNamespace(namespace, clients.KubernetesClient)
}
@@ -247,22 +396,39 @@ func getConfigWithAnnotations(resourceType string, name string, shaData string,
Annotation: annotation,
Type: resourceType,
}
}
func TestRollingUpgradeForDeploymentWithConfigmap(t *testing.T) {
shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapName, "www.stakater.com")
config := getConfigWithAnnotations(constants.ConfigmapEnvVarPostfix, configmapName, shaData, constants.ConfigmapUpdateOnChangeAnnotation)
config := getConfigWithAnnotations(constants.ConfigmapEnvVarPostfix, configmapName, shaData, options.ConfigmapUpdateOnChangeAnnotation)
deploymentFuncs := GetDeploymentRollingUpgradeFuncs()
err := PerformRollingUpgrade(client, config, deploymentFuncs)
err := PerformRollingUpgrade(clients, config, deploymentFuncs)
time.Sleep(5 * time.Second)
if err != nil {
t.Errorf("Rolling upgrade failed for Deployment with Configmap")
}
logrus.Infof("Verifying deployment update")
updated := testutil.VerifyResourceUpdate(client, config, constants.ConfigmapEnvVarPostfix, deploymentFuncs)
updated := testutil.VerifyResourceUpdate(clients, config, constants.ConfigmapEnvVarPostfix, deploymentFuncs)
if !updated {
t.Errorf("Deployment was not updated")
}
}
func TestRollingUpgradeForDeploymentWithConfigmapInInitContainer(t *testing.T) {
shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapWithInitContainer, "www.stakater.com")
config := getConfigWithAnnotations(constants.ConfigmapEnvVarPostfix, configmapWithInitContainer, shaData, options.ConfigmapUpdateOnChangeAnnotation)
deploymentFuncs := GetDeploymentRollingUpgradeFuncs()
err := PerformRollingUpgrade(clients, config, deploymentFuncs)
time.Sleep(5 * time.Second)
if err != nil {
t.Errorf("Rolling upgrade failed for Deployment with Configmap")
}
logrus.Infof("Verifying deployment update")
updated := testutil.VerifyResourceUpdate(clients, config, constants.ConfigmapEnvVarPostfix, deploymentFuncs)
if !updated {
t.Errorf("Deployment was not updated")
}
@@ -270,17 +436,53 @@ func TestRollingUpgradeForDeploymentWithConfigmap(t *testing.T) {
func TestRollingUpgradeForDeploymentWithConfigmapAsEnvVar(t *testing.T) {
shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapWithEnvName, "www.stakater.com")
config := getConfigWithAnnotations(constants.ConfigmapEnvVarPostfix, configmapWithEnvName, shaData, constants.ReloaderAutoAnnotation)
config := getConfigWithAnnotations(constants.ConfigmapEnvVarPostfix, configmapWithEnvName, shaData, options.ReloaderAutoAnnotation)
deploymentFuncs := GetDeploymentRollingUpgradeFuncs()
err := PerformRollingUpgrade(client, config, deploymentFuncs)
err := PerformRollingUpgrade(clients, config, deploymentFuncs)
time.Sleep(5 * time.Second)
if err != nil {
t.Errorf("Rolling upgrade failed for Deployment with Configmap used as env var")
}
logrus.Infof("Verifying deployment update")
updated := testutil.VerifyResourceUpdate(client, config, constants.ConfigmapEnvVarPostfix, deploymentFuncs)
updated := testutil.VerifyResourceUpdate(clients, config, constants.ConfigmapEnvVarPostfix, deploymentFuncs)
if !updated {
t.Errorf("Deployment was not updated")
}
}
func TestRollingUpgradeForDeploymentWithConfigmapAsEnvVarInInitContainer(t *testing.T) {
shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapWithInitEnv, "www.stakater.com")
config := getConfigWithAnnotations(constants.ConfigmapEnvVarPostfix, configmapWithInitEnv, shaData, options.ReloaderAutoAnnotation)
deploymentFuncs := GetDeploymentRollingUpgradeFuncs()
err := PerformRollingUpgrade(clients, config, deploymentFuncs)
time.Sleep(5 * time.Second)
if err != nil {
t.Errorf("Rolling upgrade failed for Deployment with Configmap used as env var")
}
logrus.Infof("Verifying deployment update")
updated := testutil.VerifyResourceUpdate(clients, config, constants.ConfigmapEnvVarPostfix, deploymentFuncs)
if !updated {
t.Errorf("Deployment was not updated")
}
}
func TestRollingUpgradeForDeploymentWithConfigmapAsEnvVarFrom(t *testing.T) {
shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapWithEnvFromName, "www.stakater.com")
config := getConfigWithAnnotations(constants.ConfigmapEnvVarPostfix, configmapWithEnvFromName, shaData, options.ReloaderAutoAnnotation)
deploymentFuncs := GetDeploymentRollingUpgradeFuncs()
err := PerformRollingUpgrade(clients, config, deploymentFuncs)
time.Sleep(5 * time.Second)
if err != nil {
t.Errorf("Rolling upgrade failed for Deployment with Configmap used as env var")
}
logrus.Infof("Verifying deployment update")
updated := testutil.VerifyResourceUpdate(clients, config, constants.ConfigmapEnvVarPostfix, deploymentFuncs)
if !updated {
t.Errorf("Deployment was not updated")
}
@@ -288,17 +490,35 @@ func TestRollingUpgradeForDeploymentWithConfigmapAsEnvVar(t *testing.T) {
func TestRollingUpgradeForDeploymentWithSecret(t *testing.T) {
shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, namespace, secretName, "dGVzdFVwZGF0ZWRTZWNyZXRFbmNvZGluZ0ZvclJlbG9hZGVy")
config := getConfigWithAnnotations(constants.SecretEnvVarPostfix, secretName, shaData, constants.SecretUpdateOnChangeAnnotation)
config := getConfigWithAnnotations(constants.SecretEnvVarPostfix, secretName, shaData, options.SecretUpdateOnChangeAnnotation)
deploymentFuncs := GetDeploymentRollingUpgradeFuncs()
err := PerformRollingUpgrade(client, config, deploymentFuncs)
err := PerformRollingUpgrade(clients, config, deploymentFuncs)
time.Sleep(5 * time.Second)
if err != nil {
t.Errorf("Rolling upgrade failed for Deployment with Secret")
}
logrus.Infof("Verifying deployment update")
updated := testutil.VerifyResourceUpdate(client, config, constants.SecretEnvVarPostfix, deploymentFuncs)
updated := testutil.VerifyResourceUpdate(clients, config, constants.SecretEnvVarPostfix, deploymentFuncs)
if !updated {
t.Errorf("Deployment was not updated")
}
}
func TestRollingUpgradeForDeploymentWithSecretinInitContainer(t *testing.T) {
shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, namespace, secretWithInitContainer, "dGVzdFVwZGF0ZWRTZWNyZXRFbmNvZGluZ0ZvclJlbG9hZGVy")
config := getConfigWithAnnotations(constants.SecretEnvVarPostfix, secretWithInitContainer, shaData, options.SecretUpdateOnChangeAnnotation)
deploymentFuncs := GetDeploymentRollingUpgradeFuncs()
err := PerformRollingUpgrade(clients, config, deploymentFuncs)
time.Sleep(5 * time.Second)
if err != nil {
t.Errorf("Rolling upgrade failed for Deployment with Secret")
}
logrus.Infof("Verifying deployment update")
updated := testutil.VerifyResourceUpdate(clients, config, constants.SecretEnvVarPostfix, deploymentFuncs)
if !updated {
t.Errorf("Deployment was not updated")
}
@@ -306,17 +526,53 @@ func TestRollingUpgradeForDeploymentWithSecret(t *testing.T) {
func TestRollingUpgradeForDeploymentWithSecretAsEnvVar(t *testing.T) {
shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, namespace, secretWithEnvName, "dGVzdFVwZGF0ZWRTZWNyZXRFbmNvZGluZ0ZvclJlbG9hZGVy")
config := getConfigWithAnnotations(constants.SecretEnvVarPostfix, secretWithEnvName, shaData, constants.ReloaderAutoAnnotation)
config := getConfigWithAnnotations(constants.SecretEnvVarPostfix, secretWithEnvName, shaData, options.ReloaderAutoAnnotation)
deploymentFuncs := GetDeploymentRollingUpgradeFuncs()
err := PerformRollingUpgrade(client, config, deploymentFuncs)
err := PerformRollingUpgrade(clients, config, deploymentFuncs)
time.Sleep(5 * time.Second)
if err != nil {
t.Errorf("Rolling upgrade failed for Deployment with Secret")
}
logrus.Infof("Verifying deployment update")
updated := testutil.VerifyResourceUpdate(client, config, constants.SecretEnvVarPostfix, deploymentFuncs)
updated := testutil.VerifyResourceUpdate(clients, config, constants.SecretEnvVarPostfix, deploymentFuncs)
if !updated {
t.Errorf("Deployment was not updated")
}
}
func TestRollingUpgradeForDeploymentWithSecretAsEnvVarFrom(t *testing.T) {
shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, namespace, secretWithEnvFromName, "dGVzdFVwZGF0ZWRTZWNyZXRFbmNvZGluZ0ZvclJlbG9hZGVy")
config := getConfigWithAnnotations(constants.SecretEnvVarPostfix, secretWithEnvFromName, shaData, options.ReloaderAutoAnnotation)
deploymentFuncs := GetDeploymentRollingUpgradeFuncs()
err := PerformRollingUpgrade(clients, config, deploymentFuncs)
time.Sleep(5 * time.Second)
if err != nil {
t.Errorf("Rolling upgrade failed for Deployment with Secret")
}
logrus.Infof("Verifying deployment update")
updated := testutil.VerifyResourceUpdate(clients, config, constants.SecretEnvVarPostfix, deploymentFuncs)
if !updated {
t.Errorf("Deployment was not updated")
}
}
func TestRollingUpgradeForDeploymentWithSecretAsEnvVarInInitContainer(t *testing.T) {
shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, namespace, secretWithInitEnv, "dGVzdFVwZGF0ZWRTZWNyZXRFbmNvZGluZ0ZvclJlbG9hZGVy")
config := getConfigWithAnnotations(constants.SecretEnvVarPostfix, secretWithInitEnv, shaData, options.ReloaderAutoAnnotation)
deploymentFuncs := GetDeploymentRollingUpgradeFuncs()
err := PerformRollingUpgrade(clients, config, deploymentFuncs)
time.Sleep(5 * time.Second)
if err != nil {
t.Errorf("Rolling upgrade failed for Deployment with Secret")
}
logrus.Infof("Verifying deployment update")
updated := testutil.VerifyResourceUpdate(clients, config, constants.SecretEnvVarPostfix, deploymentFuncs)
if !updated {
t.Errorf("Deployment was not updated")
}
@@ -324,17 +580,17 @@ func TestRollingUpgradeForDeploymentWithSecretAsEnvVar(t *testing.T) {
func TestRollingUpgradeForDaemonSetWithConfigmap(t *testing.T) {
shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapName, "www.facebook.com")
config := getConfigWithAnnotations(constants.ConfigmapEnvVarPostfix, configmapName, shaData, constants.ConfigmapUpdateOnChangeAnnotation)
config := getConfigWithAnnotations(constants.ConfigmapEnvVarPostfix, configmapName, shaData, options.ConfigmapUpdateOnChangeAnnotation)
daemonSetFuncs := GetDaemonSetRollingUpgradeFuncs()
err := PerformRollingUpgrade(client, config, daemonSetFuncs)
err := PerformRollingUpgrade(clients, config, daemonSetFuncs)
time.Sleep(5 * time.Second)
if err != nil {
t.Errorf("Rolling upgrade failed for DaemonSet with configmap")
}
logrus.Infof("Verifying daemonSet update")
updated := testutil.VerifyResourceUpdate(client, config, constants.ConfigmapEnvVarPostfix, daemonSetFuncs)
updated := testutil.VerifyResourceUpdate(clients, config, constants.ConfigmapEnvVarPostfix, daemonSetFuncs)
if !updated {
t.Errorf("DaemonSet was not updated")
}
@@ -342,17 +598,17 @@ func TestRollingUpgradeForDaemonSetWithConfigmap(t *testing.T) {
func TestRollingUpgradeForDaemonSetWithConfigmapAsEnvVar(t *testing.T) {
shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapWithEnvName, "www.facebook.com")
config := getConfigWithAnnotations(constants.ConfigmapEnvVarPostfix, configmapWithEnvName, shaData, constants.ReloaderAutoAnnotation)
config := getConfigWithAnnotations(constants.ConfigmapEnvVarPostfix, configmapWithEnvName, shaData, options.ReloaderAutoAnnotation)
daemonSetFuncs := GetDaemonSetRollingUpgradeFuncs()
err := PerformRollingUpgrade(client, config, daemonSetFuncs)
err := PerformRollingUpgrade(clients, config, daemonSetFuncs)
time.Sleep(5 * time.Second)
if err != nil {
t.Errorf("Rolling upgrade failed for DaemonSet with configmap used as env var")
}
logrus.Infof("Verifying daemonSet update")
updated := testutil.VerifyResourceUpdate(client, config, constants.ConfigmapEnvVarPostfix, daemonSetFuncs)
updated := testutil.VerifyResourceUpdate(clients, config, constants.ConfigmapEnvVarPostfix, daemonSetFuncs)
if !updated {
t.Errorf("DaemonSet was not updated")
}
@@ -360,17 +616,17 @@ func TestRollingUpgradeForDaemonSetWithConfigmapAsEnvVar(t *testing.T) {
func TestRollingUpgradeForDaemonSetWithSecret(t *testing.T) {
shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, namespace, secretName, "d3d3LmZhY2Vib29rLmNvbQ==")
config := getConfigWithAnnotations(constants.SecretEnvVarPostfix, secretName, shaData, constants.SecretUpdateOnChangeAnnotation)
config := getConfigWithAnnotations(constants.SecretEnvVarPostfix, secretName, shaData, options.SecretUpdateOnChangeAnnotation)
daemonSetFuncs := GetDaemonSetRollingUpgradeFuncs()
err := PerformRollingUpgrade(client, config, daemonSetFuncs)
err := PerformRollingUpgrade(clients, config, daemonSetFuncs)
time.Sleep(5 * time.Second)
if err != nil {
t.Errorf("Rolling upgrade failed for DaemonSet with secret")
}
logrus.Infof("Verifying daemonSet update")
updated := testutil.VerifyResourceUpdate(client, config, constants.SecretEnvVarPostfix, daemonSetFuncs)
updated := testutil.VerifyResourceUpdate(clients, config, constants.SecretEnvVarPostfix, daemonSetFuncs)
if !updated {
t.Errorf("DaemonSet was not updated")
}
@@ -378,17 +634,17 @@ func TestRollingUpgradeForDaemonSetWithSecret(t *testing.T) {
func TestRollingUpgradeForStatefulSetWithConfigmap(t *testing.T) {
shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapName, "www.twitter.com")
config := getConfigWithAnnotations(constants.ConfigmapEnvVarPostfix, configmapName, shaData, constants.ConfigmapUpdateOnChangeAnnotation)
config := getConfigWithAnnotations(constants.ConfigmapEnvVarPostfix, configmapName, shaData, options.ConfigmapUpdateOnChangeAnnotation)
statefulSetFuncs := GetStatefulSetRollingUpgradeFuncs()
err := PerformRollingUpgrade(client, config, statefulSetFuncs)
err := PerformRollingUpgrade(clients, config, statefulSetFuncs)
time.Sleep(5 * time.Second)
if err != nil {
t.Errorf("Rolling upgrade failed for StatefulSet with configmap")
}
logrus.Infof("Verifying statefulSet update")
updated := testutil.VerifyResourceUpdate(client, config, constants.ConfigmapEnvVarPostfix, statefulSetFuncs)
updated := testutil.VerifyResourceUpdate(clients, config, constants.ConfigmapEnvVarPostfix, statefulSetFuncs)
if !updated {
t.Errorf("StatefulSet was not updated")
}
@@ -396,17 +652,17 @@ func TestRollingUpgradeForStatefulSetWithConfigmap(t *testing.T) {
func TestRollingUpgradeForStatefulSetWithSecret(t *testing.T) {
shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, namespace, secretName, "d3d3LnR3aXR0ZXIuY29t")
config := getConfigWithAnnotations(constants.SecretEnvVarPostfix, secretName, shaData, constants.SecretUpdateOnChangeAnnotation)
config := getConfigWithAnnotations(constants.SecretEnvVarPostfix, secretName, shaData, options.SecretUpdateOnChangeAnnotation)
statefulSetFuncs := GetStatefulSetRollingUpgradeFuncs()
err := PerformRollingUpgrade(client, config, statefulSetFuncs)
err := PerformRollingUpgrade(clients, config, statefulSetFuncs)
time.Sleep(5 * time.Second)
if err != nil {
t.Errorf("Rolling upgrade failed for StatefulSet with secret")
}
logrus.Infof("Verifying statefulSet update")
updated := testutil.VerifyResourceUpdate(client, config, constants.SecretEnvVarPostfix, statefulSetFuncs)
updated := testutil.VerifyResourceUpdate(clients, config, constants.SecretEnvVarPostfix, statefulSetFuncs)
if !updated {
t.Errorf("StatefulSet was not updated")
}

View File

@@ -1,6 +1,6 @@
package constants
package options
const (
var (
// ConfigmapUpdateOnChangeAnnotation is an annotation to detect changes in configmaps
ConfigmapUpdateOnChangeAnnotation = "configmap.reloader.stakater.com/reload"
// SecretUpdateOnChangeAnnotation is an annotation to detect changes in secrets

View File

@@ -7,14 +7,17 @@ import (
"strings"
"time"
openshiftv1 "github.com/openshift/api/apps/v1"
appsclient "github.com/openshift/client-go/apps/clientset/versioned"
"github.com/sirupsen/logrus"
"github.com/stakater/Reloader/internal/pkg/callbacks"
"github.com/stakater/Reloader/internal/pkg/constants"
"github.com/stakater/Reloader/internal/pkg/crypto"
"github.com/stakater/Reloader/internal/pkg/options"
"github.com/stakater/Reloader/internal/pkg/util"
"github.com/stakater/Reloader/pkg/kube"
v1_beta1 "k8s.io/api/apps/v1beta1"
"k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
"k8s.io/api/extensions/v1beta1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
@@ -29,14 +32,6 @@ var (
SecretResourceType = "secrets"
)
func GetClient() *kubernetes.Clientset {
newClient, err := kube.GetClient()
if err != nil {
logrus.Fatalf("Unable to create Kubernetes client error = %v", err)
}
return newClient
}
// CreateNamespace creates namespace for testing
func CreateNamespace(namespace string, client kubernetes.Interface) {
_, err := client.CoreV1().Namespaces().Create(&v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}})
@@ -69,12 +64,67 @@ func getObjectMeta(namespace string, name string, autoReload bool) metav1.Object
func getAnnotations(name string, autoReload bool) map[string]string {
if autoReload {
return map[string]string{
constants.ReloaderAutoAnnotation: "true"}
options.ReloaderAutoAnnotation: "true"}
}
return map[string]string{
constants.ConfigmapUpdateOnChangeAnnotation: name,
constants.SecretUpdateOnChangeAnnotation: name}
options.ConfigmapUpdateOnChangeAnnotation: name,
options.SecretUpdateOnChangeAnnotation: name}
}
func getEnvVarSources(name string) []v1.EnvFromSource {
return []v1.EnvFromSource{
{
ConfigMapRef: &v1.ConfigMapEnvSource{
LocalObjectReference: v1.LocalObjectReference{
Name: name,
},
},
},
{
SecretRef: &v1.SecretEnvSource{
LocalObjectReference: v1.LocalObjectReference{
Name: name,
},
},
},
}
}
func getVolumes(name string) []v1.Volume {
return []v1.Volume{
{
Name: "configmap",
VolumeSource: v1.VolumeSource{
ConfigMap: &v1.ConfigMapVolumeSource{
LocalObjectReference: v1.LocalObjectReference{
Name: name,
},
},
},
},
{
Name: "secret",
VolumeSource: v1.VolumeSource{
Secret: &v1.SecretVolumeSource{
SecretName: name,
},
},
},
}
}
func getVolumeMounts(name string) []v1.VolumeMount {
return []v1.VolumeMount{
{
MountPath: "etc/config",
Name: "configmap",
},
{
MountPath: "etc/sec",
Name: "secret",
},
}
}
func getPodTemplateSpecWithEnvVars(name string) v1.PodTemplateSpec {
@@ -121,6 +171,23 @@ func getPodTemplateSpecWithEnvVars(name string) v1.PodTemplateSpec {
}
}
func getPodTemplateSpecWithEnvVarSources(name string) v1.PodTemplateSpec {
return v1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{"secondLabel": "temp"},
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Image: "tutum/hello-world",
Name: name,
EnvFrom: getEnvVarSources(name),
},
},
},
}
}
func getPodTemplateSpecWithVolumes(name string) v1.PodTemplateSpec {
return v1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
@@ -137,34 +204,65 @@ func getPodTemplateSpecWithVolumes(name string) v1.PodTemplateSpec {
Value: "test",
},
},
VolumeMounts: []v1.VolumeMount{
VolumeMounts: getVolumeMounts(name),
},
},
Volumes: getVolumes(name),
},
}
}
func getPodTemplateSpecWithInitContainer(name string) v1.PodTemplateSpec {
return v1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{"secondLabel": "temp"},
},
Spec: v1.PodSpec{
InitContainers: []v1.Container{
{
Image: "busybox",
Name: "busyBox",
VolumeMounts: getVolumeMounts(name),
},
},
Containers: []v1.Container{
{
Image: "tutum/hello-world",
Name: name,
Env: []v1.EnvVar{
{
MountPath: "etc/config",
Name: "configmap",
},
{
MountPath: "etc/sec",
Name: "secret",
Name: "BUCKET_NAME",
Value: "test",
},
},
},
},
Volumes: []v1.Volume{
Volumes: getVolumes(name),
},
}
}
func getPodTemplateSpecWithInitContainerAndEnv(name string) v1.PodTemplateSpec {
return v1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{"secondLabel": "temp"},
},
Spec: v1.PodSpec{
InitContainers: []v1.Container{
{
Name: "configmap",
VolumeSource: v1.VolumeSource{
ConfigMap: &v1.ConfigMapVolumeSource{
LocalObjectReference: v1.LocalObjectReference{
Name: name,
},
},
},
Image: "busybox",
Name: "busyBox",
EnvFrom: getEnvVarSources(name),
},
},
Containers: []v1.Container{
{
Name: "secret",
VolumeSource: v1.VolumeSource{
Secret: &v1.SecretVolumeSource{
SecretName: name,
Image: "tutum/hello-world",
Name: name,
Env: []v1.EnvVar{
{
Name: "BUCKET_NAME",
Value: "test",
},
},
},
@@ -188,6 +286,52 @@ func GetDeployment(namespace string, deploymentName string) *v1beta1.Deployment
}
}
// GetDeploymentConfig provides deployment for testing
func GetDeploymentConfig(namespace string, deploymentConfigName string) *openshiftv1.DeploymentConfig {
replicaset := int32(1)
podTemplateSpecWithVolume := getPodTemplateSpecWithVolumes(deploymentConfigName)
return &openshiftv1.DeploymentConfig{
ObjectMeta: getObjectMeta(namespace, deploymentConfigName, false),
Spec: openshiftv1.DeploymentConfigSpec{
Replicas: replicaset,
Strategy: openshiftv1.DeploymentStrategy{
Type: openshiftv1.DeploymentStrategyTypeRolling,
},
Template: &podTemplateSpecWithVolume,
},
}
}
// GetDeploymentWithInitContainer provides deployment with init container and volumeMounts
func GetDeploymentWithInitContainer(namespace string, deploymentName string) *v1beta1.Deployment {
replicaset := int32(1)
return &v1beta1.Deployment{
ObjectMeta: getObjectMeta(namespace, deploymentName, false),
Spec: v1beta1.DeploymentSpec{
Replicas: &replicaset,
Strategy: v1beta1.DeploymentStrategy{
Type: v1beta1.RollingUpdateDeploymentStrategyType,
},
Template: getPodTemplateSpecWithInitContainer(deploymentName),
},
}
}
// GetDeploymentWithInitContainerAndEnv provides deployment with init container and EnvSource
func GetDeploymentWithInitContainerAndEnv(namespace string, deploymentName string) *v1beta1.Deployment {
replicaset := int32(1)
return &v1beta1.Deployment{
ObjectMeta: getObjectMeta(namespace, deploymentName, true),
Spec: v1beta1.DeploymentSpec{
Replicas: &replicaset,
Strategy: v1beta1.DeploymentStrategy{
Type: v1beta1.RollingUpdateDeploymentStrategyType,
},
Template: getPodTemplateSpecWithInitContainerAndEnv(deploymentName),
},
}
}
func GetDeploymentWithEnvVars(namespace string, deploymentName string) *v1beta1.Deployment {
replicaset := int32(1)
return &v1beta1.Deployment{
@@ -202,6 +346,35 @@ func GetDeploymentWithEnvVars(namespace string, deploymentName string) *v1beta1.
}
}
func GetDeploymentConfigWithEnvVars(namespace string, deploymentConfigName string) *openshiftv1.DeploymentConfig {
replicaset := int32(1)
podTemplateSpecWithEnvVars := getPodTemplateSpecWithEnvVars(deploymentConfigName)
return &openshiftv1.DeploymentConfig{
ObjectMeta: getObjectMeta(namespace, deploymentConfigName, false),
Spec: openshiftv1.DeploymentConfigSpec{
Replicas: replicaset,
Strategy: openshiftv1.DeploymentStrategy{
Type: openshiftv1.DeploymentStrategyTypeRolling,
},
Template: &podTemplateSpecWithEnvVars,
},
}
}
func GetDeploymentWithEnvVarSources(namespace string, deploymentName string) *v1beta1.Deployment {
replicaset := int32(1)
return &v1beta1.Deployment{
ObjectMeta: getObjectMeta(namespace, deploymentName, true),
Spec: v1beta1.DeploymentSpec{
Replicas: &replicaset,
Strategy: v1beta1.DeploymentStrategy{
Type: v1beta1.RollingUpdateDeploymentStrategyType,
},
Template: getPodTemplateSpecWithEnvVarSources(deploymentName),
},
}
}
// GetDaemonSet provides daemonset for testing
func GetDaemonSet(namespace string, daemonsetName string) *v1beta1.DaemonSet {
return &v1beta1.DaemonSet{
@@ -337,7 +510,7 @@ func CreateConfigMap(client kubernetes.Interface, namespace string, configmapNam
logrus.Infof("Creating configmap")
configmapClient := client.CoreV1().ConfigMaps(namespace)
_, err := configmapClient.Create(GetConfigmap(namespace, configmapName, data))
time.Sleep(10 * time.Second)
time.Sleep(3 * time.Second)
return configmapClient, err
}
@@ -346,7 +519,7 @@ func CreateSecret(client kubernetes.Interface, namespace string, secretName stri
logrus.Infof("Creating secret")
secretClient := client.CoreV1().Secrets(namespace)
_, err := secretClient.Create(GetSecret(namespace, secretName, data))
time.Sleep(10 * time.Second)
time.Sleep(3 * time.Second)
return secretClient, err
}
@@ -361,7 +534,47 @@ func CreateDeployment(client kubernetes.Interface, deploymentName string, namesp
deploymentObj = GetDeploymentWithEnvVars(namespace, deploymentName)
}
deployment, err := deploymentClient.Create(deploymentObj)
time.Sleep(10 * time.Second)
time.Sleep(3 * time.Second)
return deployment, err
}
// CreateDeploymentConfig creates a deploymentConfig in given namespace and returns the DeploymentConfig
func CreateDeploymentConfig(client appsclient.Interface, deploymentName string, namespace string, volumeMount bool) (*openshiftv1.DeploymentConfig, error) {
logrus.Infof("Creating DeploymentConfig")
deploymentConfigsClient := client.AppsV1().DeploymentConfigs(namespace)
var deploymentConfigObj *openshiftv1.DeploymentConfig
if volumeMount {
deploymentConfigObj = GetDeploymentConfig(namespace, deploymentName)
} else {
deploymentConfigObj = GetDeploymentConfigWithEnvVars(namespace, deploymentName)
}
deploymentConfig, err := deploymentConfigsClient.Create(deploymentConfigObj)
time.Sleep(5 * time.Second)
return deploymentConfig, err
}
// CreateDeploymentWithInitContainer creates a deployment in given namespace with init container and returns the Deployment
func CreateDeploymentWithInitContainer(client kubernetes.Interface, deploymentName string, namespace string, volumeMount bool) (*v1beta1.Deployment, error) {
logrus.Infof("Creating Deployment")
deploymentClient := client.ExtensionsV1beta1().Deployments(namespace)
var deploymentObj *v1beta1.Deployment
if volumeMount {
deploymentObj = GetDeploymentWithInitContainer(namespace, deploymentName)
} else {
deploymentObj = GetDeploymentWithInitContainerAndEnv(namespace, deploymentName)
}
deployment, err := deploymentClient.Create(deploymentObj)
time.Sleep(3 * time.Second)
return deployment, err
}
// CreateDeploymentWithEnvVarSource creates a deployment in given namespace and returns the Deployment
func CreateDeploymentWithEnvVarSource(client kubernetes.Interface, deploymentName string, namespace string) (*v1beta1.Deployment, error) {
logrus.Infof("Creating Deployment")
deploymentClient := client.ExtensionsV1beta1().Deployments(namespace)
deploymentObj := GetDeploymentWithEnvVarSources(namespace, deploymentName)
deployment, err := deploymentClient.Create(deploymentObj)
time.Sleep(3 * time.Second)
return deployment, err
}
@@ -376,7 +589,7 @@ func CreateDaemonSet(client kubernetes.Interface, daemonsetName string, namespac
daemonsetObj = GetDaemonSetWithEnvVars(namespace, daemonsetName)
}
daemonset, err := daemonsetClient.Create(daemonsetObj)
time.Sleep(10 * time.Second)
time.Sleep(3 * time.Second)
return daemonset, err
}
@@ -391,7 +604,7 @@ func CreateStatefulSet(client kubernetes.Interface, statefulsetName string, name
statefulsetObj = GetStatefulSetWithEnvVar(namespace, statefulsetName)
}
statefulset, err := statefulsetClient.Create(statefulsetObj)
time.Sleep(10 * time.Second)
time.Sleep(3 * time.Second)
return statefulset, err
}
@@ -399,15 +612,23 @@ func CreateStatefulSet(client kubernetes.Interface, statefulsetName string, name
func DeleteDeployment(client kubernetes.Interface, namespace string, deploymentName string) error {
logrus.Infof("Deleting Deployment")
deploymentError := client.ExtensionsV1beta1().Deployments(namespace).Delete(deploymentName, &metav1.DeleteOptions{})
time.Sleep(10 * time.Second)
time.Sleep(3 * time.Second)
return deploymentError
}
// DeleteDeploymentConfig deletes a deploymentConfig in given namespace and returns the error if any
func DeleteDeploymentConfig(client appsclient.Interface, namespace string, deploymentConfigName string) error {
logrus.Infof("Deleting DeploymentConfig")
deploymentConfigError := client.AppsV1().DeploymentConfigs(namespace).Delete(deploymentConfigName, &metav1.DeleteOptions{})
time.Sleep(3 * time.Second)
return deploymentConfigError
}
// DeleteDaemonSet creates a daemonset in given namespace and returns the error if any
func DeleteDaemonSet(client kubernetes.Interface, namespace string, daemonsetName string) error {
logrus.Infof("Deleting DaemonSet %s", daemonsetName)
daemonsetError := client.ExtensionsV1beta1().DaemonSets(namespace).Delete(daemonsetName, &metav1.DeleteOptions{})
time.Sleep(10 * time.Second)
time.Sleep(3 * time.Second)
return daemonsetError
}
@@ -415,7 +636,7 @@ func DeleteDaemonSet(client kubernetes.Interface, namespace string, daemonsetNam
func DeleteStatefulSet(client kubernetes.Interface, namespace string, statefulsetName string) error {
logrus.Infof("Deleting StatefulSet %s", statefulsetName)
statefulsetError := client.AppsV1beta1().StatefulSets(namespace).Delete(statefulsetName, &metav1.DeleteOptions{})
time.Sleep(10 * time.Second)
time.Sleep(3 * time.Second)
return statefulsetError
}
@@ -429,7 +650,7 @@ func UpdateConfigMap(configmapClient core_v1.ConfigMapInterface, namespace strin
configmap = GetConfigmap(namespace, configmapName, data)
}
_, updateErr := configmapClient.Update(configmap)
time.Sleep(10 * time.Second)
time.Sleep(3 * time.Second)
return updateErr
}
@@ -443,7 +664,7 @@ func UpdateSecret(secretClient core_v1.SecretInterface, namespace string, secret
secret = GetSecret(namespace, secretName, data)
}
_, updateErr := secretClient.Update(secret)
time.Sleep(10 * time.Second)
time.Sleep(3 * time.Second)
return updateErr
}
@@ -451,7 +672,7 @@ func UpdateSecret(secretClient core_v1.SecretInterface, namespace string, secret
func DeleteConfigMap(client kubernetes.Interface, namespace string, configmapName string) error {
logrus.Infof("Deleting configmap %q.\n", configmapName)
err := client.CoreV1().ConfigMaps(namespace).Delete(configmapName, &metav1.DeleteOptions{})
time.Sleep(10 * time.Second)
time.Sleep(3 * time.Second)
return err
}
@@ -459,7 +680,7 @@ func DeleteConfigMap(client kubernetes.Interface, namespace string, configmapNam
func DeleteSecret(client kubernetes.Interface, namespace string, secretName string) error {
logrus.Infof("Deleting secret %q.\n", secretName)
err := client.CoreV1().Secrets(namespace).Delete(secretName, &metav1.DeleteOptions{})
time.Sleep(10 * time.Second)
time.Sleep(3 * time.Second)
return err
}
@@ -474,13 +695,13 @@ func RandSeq(n int) string {
}
// VerifyResourceUpdate verifies whether the rolling upgrade happened or not
func VerifyResourceUpdate(client kubernetes.Interface, config util.Config, envVarPostfix string, upgradeFuncs callbacks.RollingUpgradeFuncs) bool {
items := upgradeFuncs.ItemsFunc(client, config.Namespace)
func VerifyResourceUpdate(clients kube.Clients, config util.Config, envVarPostfix string, upgradeFuncs callbacks.RollingUpgradeFuncs) bool {
items := upgradeFuncs.ItemsFunc(clients, config.Namespace)
for _, i := range items {
containers := upgradeFuncs.ContainersFunc(i)
// match statefulsets with the correct annotation
annotationValue := util.ToObjectMeta(i).Annotations[config.Annotation]
reloaderEnabledValue := util.ToObjectMeta(i).Annotations[constants.ReloaderAutoAnnotation]
reloaderEnabledValue := util.ToObjectMeta(i).Annotations[options.ReloaderAutoAnnotation]
reloaderEnabled, err := strconv.ParseBool(reloaderEnabledValue)
matches := false
if err == nil && reloaderEnabled {

View File

@@ -2,7 +2,8 @@ package util
import (
"github.com/stakater/Reloader/internal/pkg/constants"
"k8s.io/api/core/v1"
"github.com/stakater/Reloader/internal/pkg/options"
v1 "k8s.io/api/core/v1"
)
//Config contains rolling upgrade configuration parameters
@@ -19,7 +20,7 @@ func GetConfigmapConfig(configmap *v1.ConfigMap) Config {
return Config{
Namespace: configmap.Namespace,
ResourceName: configmap.Name,
Annotation: constants.ConfigmapUpdateOnChangeAnnotation,
Annotation: options.ConfigmapUpdateOnChangeAnnotation,
SHAValue: GetSHAfromConfigmap(configmap.Data),
Type: constants.ConfigmapEnvVarPostfix,
}
@@ -30,7 +31,7 @@ func GetSecretConfig(secret *v1.Secret) Config {
return Config{
Namespace: secret.Namespace,
ResourceName: secret.Name,
Annotation: constants.SecretUpdateOnChangeAnnotation,
Annotation: options.SecretUpdateOnChangeAnnotation,
SHAValue: GetSHAfromSecret(secret.Data),
Type: constants.SecretEnvVarPostfix,
}

View File

@@ -46,3 +46,14 @@ func GetSHAfromSecret(data map[string][]byte) string {
sort.Strings(values)
return crypto.GenerateSHA(strings.Join(values, ";"))
}
type List []string
func (l *List) Contains(s string) bool {
for _, v := range *l {
if v == s {
return true
}
}
return false
}

View File

@@ -3,13 +3,80 @@ package kube
import (
"os"
"k8s.io/client-go/tools/clientcmd"
appsclient "github.com/openshift/client-go/apps/clientset/versioned"
"github.com/sirupsen/logrus"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
)
// GetClient gets the client for k8s, if ~/.kube/config exists so get that config else incluster config
func GetClient() (*kubernetes.Clientset, error) {
// Clients struct exposes interfaces for kubernetes as well as openshift if available
type Clients struct {
KubernetesClient kubernetes.Interface
OpenshiftAppsClient appsclient.Interface
}
var (
// IsOpenshift is true if environment is Openshift, it is false if environment is Kubernetes
IsOpenshift = isOpenshift()
)
// GetClients returns a `Clients` object containing both openshift and kubernetes clients with an openshift identifier
func GetClients() Clients {
client, err := GetKubernetesClient()
if err != nil {
logrus.Fatalf("Unable to create Kubernetes client error = %v", err)
}
var appsClient *appsclient.Clientset
if IsOpenshift {
appsClient, err = GetOpenshiftAppsClient()
if err != nil {
logrus.Warnf("Unable to create Openshift Apps client error = %v", err)
}
}
return Clients{
KubernetesClient: client,
OpenshiftAppsClient: appsClient,
}
}
func isOpenshift() bool {
client, err := GetKubernetesClient()
if err != nil {
logrus.Fatalf("Unable to create Kubernetes client error = %v", err)
}
_, err = client.RESTClient().Get().AbsPath("/apis/project.openshift.io").Do().Raw()
if err == nil {
logrus.Info("Environment: Openshift")
return true
}
logrus.Info("Environment: Kubernetes")
return false
}
// GetOpenshiftAppsClient returns an Openshift Client that can query on Apps
func GetOpenshiftAppsClient() (*appsclient.Clientset, error) {
config, err := getConfig()
if err != nil {
return nil, err
}
return appsclient.NewForConfig(config)
}
// GetKubernetesClient gets the client for k8s, if ~/.kube/config exists so get that config else incluster config
func GetKubernetesClient() (*kubernetes.Clientset, error) {
config, err := getConfig()
if err != nil {
return nil, err
}
return kubernetes.NewForConfig(config)
}
func getConfig() (*rest.Config, error) {
var config *rest.Config
var err error
kubeconfigPath := os.Getenv("KUBECONFIG")
@@ -31,5 +98,6 @@ func GetClient() (*kubernetes.Clientset, error) {
if err != nil {
return nil, err
}
return kubernetes.NewForConfig(config)
return config, nil
}

View File

@@ -1,7 +1,7 @@
package kube
import (
"k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/runtime"
)