Compare commits

...

71 Commits

Author SHA1 Message Date
stakater-user
1cec52637f Bump Version to v0.0.74 2020-10-28 17:08:26 +00:00
Ahmed Waleed Malik
1901a4eb49 Merge pull request #146 from mnach/add-metrics-service
add metrics endpoints to kubernetes specs
2020-10-28 21:57:15 +05:00
Mikhail Vladimirovich Nacharov
710396f66e add metrics endpoints to kubernetes specs 2020-10-28 01:13:49 +05:00
stakater-user
11bafa9f36 Bump Version to v0.0.73 2020-10-27 10:13:10 +00:00
Ahmed Waleed Malik
9a45318fc9 Merge pull request #175 from stakater/fix-issue-173
Fix issue 173
2020-10-26 12:50:12 +05:00
faizanahmad055
843f47600a Fix formatting of documenation to support helm3 migration
Signed-off-by: faizanahmad055 <faizan.ahmad55@outlook.com>
2020-10-25 19:28:37 +01:00
faizanahmad055
3d9dee27b5 Fix formatting of documenation to support helm3 migration
Signed-off-by: faizanahmad055 <faizan.ahmad55@outlook.com>
2020-10-25 19:27:58 +01:00
faizanahmad055
63fd3c2635 Add documenation to support helm3 migration
Signed-off-by: faizanahmad055 <faizan.ahmad55@outlook.com>
2020-10-25 19:24:41 +01:00
faizanahmad055
284ca59ca4 Add annotations, labels and documenation to support helm3
Signed-off-by: faizanahmad055 <faizan.ahmad55@outlook.com>
2020-10-25 19:22:05 +01:00
stakater-user
2ce24abe40 Bump Version to v0.0.72 2020-10-20 07:36:45 +00:00
Usama Ahmad
6419444663 Merge pull request #172 from stakater/fix-chart
Fix helm chart template
2020-10-20 12:26:56 +05:00
Waleed Malik
1a6fd3e302 Fix helm chart template 2020-10-20 10:44:59 +05:00
Ahmed Waleed Malik
7ac90b8c88 Merge pull request #170 from stakater/fix-issue-169
Fix#169 - Update Rbac api versions
2020-10-20 09:33:08 +05:00
faizanahmad055
faf27c2d5d Add support for legacy rbac
Signed-off-by: faizanahmad055 <faizan.ahmad55@outlook.com>
2020-10-19 16:00:54 +02:00
faizanahmad055
6a0dfd3ce0 Update Rbac api versions
Signed-off-by: faizanahmad055 <faizan.ahmad55@outlook.com>
2020-10-19 10:32:32 +02:00
stakater-user
fdbc3067ce Bump Version to v0.0.71 2020-10-13 03:56:54 +00:00
Ahmed Waleed Malik
c4ead210ee Merge pull request #168 from booleanbetrayal/namespaces-to-ignore_helm_support
Add Helm chart support for --namespaces-to-ignore flag
2020-10-13 08:47:22 +05:00
Brent Dearth
0441f6d481 Add Helm chart support for --namespaces-to-ignore flag 2020-10-12 15:04:08 -06:00
stakater-user
09b9a073a0 Bump Version to v0.0.70 2020-10-05 10:28:03 +00:00
Ahmed Waleed Malik
d6d188f224 Merge pull request #162 from pchico83/master
Add okteto manifest to develop Reloader directly on Kubernetes
2020-10-05 15:18:27 +05:00
stakater-user
422c291b06 Bump Version to v0.0.69 2020-09-22 13:39:09 +00:00
Júlia Biró
ed6ea026a8 Trim spaces in annotation list (#165)
* strip whitespace

* only trim spaces
2020-09-22 15:29:29 +02:00
Pablo Chico de Guzman
da30b4744b Add okteto manifest to develop Reloader directly on Kubernetes 2020-09-04 11:26:59 +02:00
stakater-user
503e357349 Bump Version to v0.0.68 2020-09-01 05:42:04 +00:00
Josh Soref
61e9202781 Spelling (#161)
* spelling: create-or
2020-09-01 10:32:16 +05:00
stakater-user
8dbe7a85af Bump Version to v0.0.67 2020-08-08 18:46:03 +00:00
Ahmad Iqbal Ali
e86f616305 update slack links in readme (#156) 2020-08-08 20:36:09 +02:00
stakater-user
0c36cfd602 Bump Version to v0.0.66 2020-08-06 18:20:25 +00:00
Faizan Ahmad
f38f86a45c Merge pull request #154 from clive-jevons/respect-configmap-binarydata-for-hash
Include data from ConfigMap.BinaryData when calculating SHA
2020-08-06 20:09:59 +02:00
Faizan Ahmad
5033b8fcdc Merge pull request #155 from kostyrev/master
Add fullnameOverride to helm chart
2020-08-06 20:09:45 +02:00
Aleksandr Kostyrev
be4285742a Add fullnameOverride to helm chart 2020-08-06 16:54:50 +03:00
Clive Jevons
6a008999f5 Include data from ConfigMap.BinaryData when calculating SHA 2020-08-06 13:37:50 +02:00
stakater-user
93f4ea240f Bump Version to v0.0.65 2020-08-04 09:17:57 +00:00
stakater-user
c6fbae2f62 Bump Version to v0.0.64 2020-08-04 08:15:47 +00:00
Ahmed Waleed Malik
3fe0ebb48a Merge pull request #152 from liuming-dev/refactor--code-polish
Polishing code
2020-08-04 13:02:17 +05:00
Ahmed Waleed Malik
67b847bf41 Merge pull request #151 from liuming-dev/style--gofmt
style: gofmt -l -w -s .
2020-08-04 13:01:44 +05:00
Liu Ming
eaa3db48f5 Polish code
Signed-off-by: Liu Ming <hit_oak_tree@126.com>
2020-07-29 10:25:55 +08:00
Liu Ming
a505d2e3b1 style: gofmt -l -w -s .
Signed-off-by: Liu Ming <hit_oak_tree@126.com>
2020-07-28 21:45:56 +08:00
stakater-user
9ec5515a39 Bump Version to v0.0.63 2020-07-20 17:56:16 +00:00
Ahmed Waleed Malik
8db17acf67 Merge pull request #150 from stakater/fix-watch-global
Fix watch global
2020-07-20 22:46:29 +05:00
faizanahmad055
b43719cf34 Remove duplicate condition
Signed-off-by: faizanahmad055 <faizan.ahmad55@outlook.com>
2020-07-20 19:33:38 +02:00
faizanahmad055
e8216069a5 Fix issue for watch global variable 2020-07-20 18:48:58 +02:00
stakater-user
732d35e45f Bump Version to v0.0.62 2020-07-17 09:17:35 +00:00
Ahmed Waleed Malik
dcedaa2cfe Merge pull request #147 from alexconlin/patch-1
Remove empty fields from kustomize deployment
2020-07-17 14:07:01 +05:00
Alex Conlin-Oakley
8d77121c3b only include env and args in container when needed 2020-07-15 17:42:08 +01:00
Alex Conlin
013cd92219 Merge branch 'master' into patch-1 2020-07-10 17:24:33 +01:00
stakater-user
39b5be37af Bump Version to v0.0.61 2020-07-10 15:54:41 +00:00
kahootali
86c2ed265d Add non-root security context 2020-07-10 20:40:48 +05:00
LucasBoisserie
87130f06bc Run as Non Root (#149) 2020-07-10 17:34:49 +02:00
Ali Kahoot
17f702f510 Merge pull request #148 from TBBle/patch-1
Typo fix "resatart"
2020-07-10 20:27:40 +05:00
Paul "TBBle" Hampson
16f3055e10 Typo fix "resatart" 2020-07-10 11:34:47 +10:00
Alex Conlin
4800af8e28 Remove empty fields from deployment manifest 2020-07-08 11:22:43 +01:00
Alex Conlin
db79c65334 Remove empty fields from kustomize deployment
Fixes #115
2020-07-07 22:52:53 +01:00
stakater-user
d2223f313f Bump Version to v0.0.60 2020-06-23 07:15:27 +00:00
Ahmed Waleed Malik
c9dabc3a14 Merge pull request #142 from vladlosev/feat-track-secrets-configmaps-by-annotation
Adds support for auto-reloading secrets and configmaps by annotation.
2020-06-23 12:05:22 +05:00
Vlad Losev
e61f9a6bdb Adds note on incompatibility with 'reloader.stakater.com/auto'. 2020-06-21 14:15:58 -07:00
Vlad Losev
6bcec06052 Fixes missing import. 2020-06-09 19:45:12 -07:00
Vlad Losev
0988e8947f Removes unused import. 2020-06-09 19:18:12 -07:00
Vlad Losev
ff27cc0f51 Simplifies test. 2020-06-09 19:17:19 -07:00
Vlad Losev
be7d454504 Fixes test using auto annotations. 2020-06-09 19:16:04 -07:00
Vlad Losev
3131116ed6 Re-adds sleep statements. 2020-06-09 19:16:04 -07:00
Vlad Losev
965cacf1ba Updates documentation. 2020-06-09 19:16:04 -07:00
Vlad Losev
e81b49d81b Renames search annotation. 2020-06-09 19:16:03 -07:00
Vlad Losev
17f8b81110 Simplifies annotations for searching secrets for reload. 2020-06-09 19:16:03 -07:00
Vlad Losev
5980c91560 Abstracts out configmap and deployment creation. 2020-06-09 19:16:01 -07:00
Vlad Losev
fda733ea5a Adds support for auto-reloading secrets and configmaps by annotation. 2020-06-09 19:14:14 -07:00
stakater-user
732cd5b53a Bump Version to v0.0.59 2020-06-09 16:50:55 +00:00
stakater-user
aae0c5c443 Merge pull request #141 from cucxabong/support-projected-volume
Support projected volume
2020-06-09 20:16:24 +05:00
Quan Hoang
d4223311de Support projected volume 2020-06-02 23:36:08 +07:00
stakater-user
29173c7364 Bump Version to v0.0.58 2020-03-30 15:12:34 +00:00
inductor
7767809a38 Refactor Dockerfile (#133)
* refactor dockerfile

* update go version on Dockerfile
2020-03-30 16:52:33 +02:00
38 changed files with 975 additions and 121 deletions

View File

@@ -1 +1 @@
version: v0.0.57
version: v0.0.74

3
.stignore Normal file
View File

@@ -0,0 +1,3 @@
.git
Reloader
__debug_bin

View File

@@ -22,6 +22,7 @@ Reloader can watch changes in `ConfigMap` and `Secret` and do rolling upgrades o
## Compatibility
Reloader is compatible with kubernetes >= 1.9
The `apiVersion: rbac.authorization.k8s.io/v1beta1` is depreciated since kubernetes = 1.17. To run it with older versions, please use the chart parameter `reloader.legacy.rbac=true`
## How to use Reloader
@@ -38,6 +39,41 @@ spec:
This will discover deployments/daemonsets/statefulset automatically where `foo-configmap` or `foo-secret` is being used either via environment variable or from volume mount. And it will perform rolling upgrade on related pods when `foo-configmap` or `foo-secret`are updated.
You can restrict this discovery to only `ConfigMap` or `Secret` objects that
are tagged with a special annotation. To take advantage of that, annotate
your deployment/daemonset/statefulset like this:
```yaml
kind: Deployment
metadata:
annotations:
reloader.stakater.com/search: "true"
spec:
template:
```
and Reloader will trigger the rolling upgrade upon modification of any
`ConfigMap` or `Secret` annotated like this:
```yaml
kind: ConfigMap
metadata:
annotations:
reloader.stakater.com/match: "true"
data:
key: value
```
provided the secret/configmap is being used in an environment variable or a
volume mount.
Please note that `reloader.stakater.com/search` and
`reloader.stakater.com/auto` do not work together. If you have the
`reloader.stakater.com/auto: "true"` annotation on your deployment, then it
will always restart upon a change in configmaps or secrets it uses, regardless
of whether they have the `reloader.stakater.com/match: "true"` annotation or
not.
We can also specify a specific configmap or secret which would trigger rolling upgrade only upon change in our specified configmap or secret, this way, it will not trigger rolling upgrade upon changes in all configmaps or secrets used in a deployment, daemonset or statefulset.
To do this either set the auto annotation to `"false"` (`reloader.stakater.com/auto: "false"`) or remove it altogether, and use annotations mentioned [here](#Configmap) or [here](#Secret)
@@ -99,6 +135,8 @@ spec:
- `reloader.stakater.com/auto: "true"` will only reload the pod, if the configmap or secret is used (as a volume mount or as an env) in `DeploymentConfigs/Deployment/Daemonsets/Statefulsets`
- `secret.reloader.stakater.com/reload` or `configmap.reloader.stakater.com/reload` annotation will reload the pod upon changes in specified configmap or secret, irrespective of the usage of configmap or secret.
- you may override the auto annotation with the `--auto-annotation` flag
- you may override the search annotation with the `--auto-search-annotation` flag
and the match annotation with the `--search-match-annotation` flag
- you may override the configmap annotation with the `--configmap-annotation` flag
- you may override the secret annotation with the `--secret-annotation` flag
- you may want to prevent watching certain namespaces with the `--namespaces-to-ignore` flag
@@ -154,20 +192,26 @@ namespace: reloader
### Helm Charts
Alternatively if you have configured helm on your cluster, you can add reloader to helm from our public chart repository and deploy it via helm using below mentioned commands
Alternatively if you have configured helm on your cluster, you can add reloader to helm from our public chart repository and deploy it via helm using below mentioned commands. Follow [this](docs/Helm2-to-Helm3.md) guide, in case you have trouble migrating reloader from Helm2 to Helm3
```bash
helm repo add stakater https://stakater.github.io/stakater-charts
helm repo update
helm install stakater/reloader
helm install stakater/reloader # For helm3 add --generate-name flag or set the release name
```
**Note:** The latest verion of reloader is using `apiVersion: rbac.authorization.k8s.io/v1` for rbac. The `apiVersion: rbac.authorization.k8s.io/v1beta1` is depreciated since kubernetes = 1.17. To run it with older versions, please use below command.
```bash
helm install stakater/reloader --set reloader.legacy.rbac=true # For helm3 add --generate-name flag or set the release name
```
**Note:** By default reloader watches in all namespaces. To watch in single namespace, please run following command. It will install reloader in `test` namespace which will only watch `Deployments`, `Daemonsets` and `Statefulsets` in `test` namespace.
```bash
helm install stakater/reloader --set reloader.watchGlobally=false --namespace test
helm install stakater/reloader --set reloader.watchGlobally=false --namespace test # For helm3 add --generate-name flag or set the release name
```
Reloader can be configured to ignore the resources `secrets` and `configmaps` by using the following parameters of `values.yaml` file:
@@ -195,8 +239,8 @@ File a GitHub [issue](https://github.com/stakater/Reloader/issues), or send us a
Join and talk to us on Slack for discussing Reloader
[![Join Slack](https://stakater.github.io/README/stakater-join-slack-btn.png)](https://stakater-slack.herokuapp.com/)
[![Chat](https://stakater.github.io/README/stakater-chat-btn.png)](https://stakater.slack.com/messages/CC5S05S12)
[![Join Slack](https://stakater.github.io/README/stakater-join-slack-btn.png)](https://slack.stakater.com/)
[![Chat](https://stakater.github.io/README/stakater-chat-btn.png)](https://stakater-community.slack.com/messages/CC5S05S12)
## Contributing
@@ -206,6 +250,11 @@ Please use the [issue tracker](https://github.com/stakater/Reloader/issues) to r
### Developing
1. Deploy Reloader.
2. Run `okteto up` to activate your development container.
3. `make build`.
4. `./Reloader`
PRs are welcome. In general, we follow the "fork-and-pull" Git workflow.
1. **Fork** the repo on GitHub

View File

@@ -1,18 +1,21 @@
FROM golang:1.13.1-alpine
MAINTAINER "Stakater Team"
RUN apk update
FROM golang:1.13.9-alpine
LABEL maintainer "Stakater Team"
RUN apk -v --update \
add git build-base && \
rm -rf /var/cache/apk/* && \
mkdir -p "$GOPATH/src/github.com/stakater/Reloader"
--no-cache \
add git build-base
ADD . "$GOPATH/src/github.com/stakater/Reloader"
WORKDIR "$GOPATH/src/github.com/stakater/Reloader"
RUN cd "$GOPATH/src/github.com/stakater/Reloader" && \
go mod download && \
CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -a --installsuffix cgo --ldflags="-s" -o /Reloader
COPY go.mod go.sum ./
RUN go mod download
COPY . .
ENV CGO_ENABLED=0 GOOS=linux GOARCH=amd64
RUN go build -a --installsuffix cgo --ldflags="-s" -o /Reloader
COPY build/package/Dockerfile.run /

View File

@@ -1,8 +1,14 @@
FROM alpine:3.9
MAINTAINER "Stakater Team"
FROM alpine:3.11
LABEL maintainer "Stakater Team"
RUN apk add --update ca-certificates
RUN apk add --update --no-cache ca-certificates
COPY Reloader /bin/Reloader
# On alpine 'nobody' has uid 65534
USER 65534
# Port for metrics and probes
EXPOSE 9090
ENTRYPOINT ["/bin/Reloader"]

View File

@@ -3,8 +3,8 @@
apiVersion: v1
name: reloader
description: Reloader chart that runs on kubernetes
version: v0.0.57
appVersion: v0.0.57
version: v0.0.74
appVersion: v0.0.74
keywords:
- Reloader
- kubernetes

View File

@@ -12,15 +12,20 @@ Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
*/}}
{{- define "reloader-fullname" -}}
{{- if .Values.fullnameOverride -}}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- $name := default .Chart.Name .Values.nameOverride -}}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- end -}}
{{- define "reloader-labels.chart" -}}
app: {{ template "reloader-fullname" . }}
chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
release: {{ .Release.Name | quote }}
heritage: {{ .Release.Service | quote }}
app.kubernetes.io/managed-by: {{ .Release.Service | quote }}
{{- end -}}
{{/*
@@ -33,3 +38,11 @@ Create the name of the service account to use
{{ default "default" .Values.reloader.serviceAccount.name }}
{{- end -}}
{{- end -}}
{{/*
Create the annotations to support helm3
*/}}
{{- define "reloader-helm3.annotations" -}}
meta.helm.sh/release-namespace: {{ .Release.Namespace | quote }}
meta.helm.sh/release-name: {{ .Release.Name | quote }}
{{- end -}}

View File

@@ -1,7 +1,13 @@
{{- if and .Values.reloader.watchGlobally (.Values.reloader.rbac.enabled) }}
{{- if and .Values.reloader.legacy.rbac }}
apiVersion: rbac.authorization.k8s.io/v1beta1
{{ else }}
apiVersion: rbac.authorization.k8s.io/v1
{{- end }}
kind: ClusterRole
metadata:
annotations:
{{ include "reloader-helm3.annotations" . | indent 4 }}
labels:
{{ include "reloader-labels.chart" . | indent 4 }}
{{- if .Values.reloader.rbac.labels }}

View File

@@ -1,7 +1,13 @@
{{- if and .Values.reloader.watchGlobally (.Values.reloader.rbac.enabled) }}
{{- if and .Values.reloader.legacy.rbac }}
apiVersion: rbac.authorization.k8s.io/v1beta1
{{ else }}
apiVersion: rbac.authorization.k8s.io/v1
{{- end }}
kind: ClusterRoleBinding
metadata:
annotations:
{{ include "reloader-helm3.annotations" . | indent 4 }}
labels:
{{ include "reloader-labels.chart" . | indent 4 }}
{{- if .Values.reloader.rbac.labels }}

View File

@@ -1,8 +1,9 @@
apiVersion: apps/v1
kind: Deployment
metadata:
{{- if .Values.reloader.deployment.annotations }}
annotations:
{{ include "reloader-helm3.annotations" . | indent 4 }}
{{- if .Values.reloader.deployment.annotations }}
{{ toYaml .Values.reloader.deployment.annotations | indent 4 }}
{{- end }}
labels:
@@ -52,7 +53,11 @@ spec:
{{ toYaml .Values.reloader.deployment.tolerations | indent 8 }}
{{- end }}
containers:
- env:
- image: "{{ .Values.reloader.deployment.image.name }}:{{ .Values.reloader.deployment.image.tag }}"
imagePullPolicy: {{ .Values.reloader.deployment.image.pullPolicy }}
name: {{ template "reloader-fullname" . }}
{{- if or (.Values.reloader.deployment.env.open) (.Values.reloader.deployment.env.secret) (.Values.reloader.deployment.env.field) (eq .Values.reloader.watchGlobally false) }}
env:
{{- range $name, $value := .Values.reloader.deployment.env.open }}
{{- if not (empty $value) }}
- name: {{ $name | quote }}
@@ -83,14 +88,26 @@ spec:
fieldRef:
fieldPath: metadata.namespace
{{- end }}
image: "{{ .Values.reloader.deployment.image.name }}:{{ .Values.reloader.deployment.image.tag }}"
imagePullPolicy: {{ .Values.reloader.deployment.image.pullPolicy }}
name: {{ template "reloader-fullname" . }}
{{- end }}
ports:
- name: http
containerPort: 9090
livenessProbe:
httpGet:
path: /metrics
port: http
readinessProbe:
httpGet:
path: /metrics
port: http
{{- if eq .Values.reloader.readOnlyRootFileSystem true }}
volumeMounts:
- mountPath: /tmp/
name: tmp-volume
{{- end }}
{{- if or (.Values.reloader.logFormat) (.Values.reloader.ignoreSecrets) (.Values.reloader.ignoreNamespaces) (.Values.reloader.ignoreConfigMaps) (.Values.reloader.custom_annotations) }}
args:
{{- if .Values.reloader.logFormat }}
- "--log-format={{ .Values.reloader.logFormat }}"
@@ -98,9 +115,12 @@ spec:
{{- if .Values.reloader.ignoreSecrets }}
- "--resources-to-ignore=secrets"
{{- end }}
{{- if eq .Values.reloader.ignoreConfigMaps true }}
{{- if .Values.reloader.ignoreConfigMaps }}
- "--resources-to-ignore=configMaps"
{{- end }}
{{- if .Values.reloader.ignoreNamespaces }}
- "--namespaces-to-ignore={{ .Values.reloader.ignoreNamespaces }}"
{{- end }}
{{- if .Values.reloader.custom_annotations }}
{{- if .Values.reloader.custom_annotations.configmap }}
@@ -116,7 +136,7 @@ spec:
- "{{ .Values.reloader.custom_annotations.auto }}"
{{- end }}
{{- end }}
{{- end }}
{{- if .Values.reloader.deployment.resources }}
resources:
{{ toYaml .Values.reloader.deployment.resources | indent 10 }}

View File

@@ -1,7 +1,13 @@
{{- if and (not (.Values.reloader.watchGlobally)) (.Values.reloader.rbac.enabled) }}
{{- if and .Values.reloader.legacy.rbac }}
apiVersion: rbac.authorization.k8s.io/v1beta1
{{ else }}
apiVersion: rbac.authorization.k8s.io/v1
{{- end }}
kind: Role
metadata:
annotations:
{{ include "reloader-helm3.annotations" . | indent 4 }}
labels:
{{ include "reloader-labels.chart" . | indent 4 }}
{{- if .Values.reloader.rbac.labels }}

View File

@@ -1,7 +1,13 @@
{{- if and (not (.Values.reloader.watchGlobally)) (.Values.reloader.rbac.enabled) }}
{{- if and .Values.reloader.legacy.rbac }}
apiVersion: rbac.authorization.k8s.io/v1beta1
{{ else }}
apiVersion: rbac.authorization.k8s.io/v1
{{- end }}
kind: RoleBinding
metadata:
annotations:
{{ include "reloader-helm3.annotations" . | indent 4 }}
labels:
{{ include "reloader-labels.chart" . | indent 4 }}
{{- if .Values.reloader.rbac.labels }}

View File

@@ -2,8 +2,9 @@
apiVersion: v1
kind: Service
metadata:
{{- if .Values.reloader.service.annotations }}
annotations:
{{ include "reloader-helm3.annotations" . | indent 4 }}
{{- if .Values.reloader.service.annotations }}
{{ toYaml .Values.reloader.service.annotations | indent 4 }}
{{- end }}
labels:
@@ -21,5 +22,8 @@ spec:
{{ toYaml .Values.reloader.matchLabels | indent 4 }}
{{- end }}
ports:
{{ toYaml .Values.reloader.service.ports | indent 4 }}
{{- end }}
- port: {{ .Values.reloader.service.port }}
name: http
protocol: TCP
targetPort: http
{{- end }}

View File

@@ -5,6 +5,8 @@ kind: ServiceAccount
imagePullSecrets: {{ toYaml .Values.global.imagePullSecrets | nindent 2 }}
{{- end }}
metadata:
annotations:
{{ include "reloader-helm3.annotations" . | indent 4 }}
labels:
{{ include "reloader-labels.chart" . | indent 4 }}
{{- if .Values.reloader.serviceAccount.labels }}

View File

@@ -12,10 +12,13 @@ reloader:
isOpenshift: false
ignoreSecrets: false
ignoreConfigMaps: false
ignoreNamespaces: "" # Comma separated list of namespaces to ignore
logFormat: "" #json
watchGlobally: true
# Set to true if you have a pod security policy that enforces readOnlyRootFilesystem
readOnlyRootFileSystem: false
legacy:
rbac: false
matchLabels: {}
deployment:
nodeSelector:
@@ -32,6 +35,10 @@ reloader:
# operator: "Exists"
affinity: {}
securityContext:
runAsNonRoot: true
runAsUser: 65534
# A list of tolerations to be applied to the Deployment.
# Example:
# tolerations:
@@ -44,10 +51,10 @@ reloader:
labels:
provider: stakater
group: com.stakater.platform
version: v0.0.57
version: v0.0.74
image:
name: stakater/reloader
tag: "v0.0.57"
tag: "v0.0.74"
pullPolicy: IfNotPresent
# Support for extra environment variables.
env:

View File

@@ -1,14 +1,18 @@
---
# Source: reloader/templates/clusterrole.yaml
apiVersion: rbac.authorization.k8s.io/v1beta1
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
annotations:
meta.helm.sh/release-namespace: "default"
meta.helm.sh/release-name: "reloader"
labels:
app: reloader-reloader
chart: "reloader-v0.0.57"
chart: "reloader-v0.0.74"
release: "reloader"
heritage: "Tiller"
app.kubernetes.io/managed-by: "Tiller"
name: reloader-reloader-role
namespace: default
rules:

View File

@@ -1,14 +1,18 @@
---
# Source: reloader/templates/clusterrolebinding.yaml
apiVersion: rbac.authorization.k8s.io/v1beta1
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
annotations:
meta.helm.sh/release-namespace: "default"
meta.helm.sh/release-name: "reloader"
labels:
app: reloader-reloader
chart: "reloader-v0.0.57"
chart: "reloader-v0.0.74"
release: "reloader"
heritage: "Tiller"
app.kubernetes.io/managed-by: "Tiller"
name: reloader-reloader-role-binding
namespace: default
roleRef:

View File

@@ -3,14 +3,18 @@
apiVersion: apps/v1
kind: Deployment
metadata:
annotations:
meta.helm.sh/release-namespace: "default"
meta.helm.sh/release-name: "reloader"
labels:
app: reloader-reloader
chart: "reloader-v0.0.57"
chart: "reloader-v0.0.74"
release: "reloader"
heritage: "Tiller"
app.kubernetes.io/managed-by: "Tiller"
group: com.stakater.platform
provider: stakater
version: v0.0.57
version: v0.0.74
name: reloader-reloader
spec:
@@ -24,19 +28,34 @@ spec:
metadata:
labels:
app: reloader-reloader
chart: "reloader-v0.0.57"
chart: "reloader-v0.0.74"
release: "reloader"
heritage: "Tiller"
app.kubernetes.io/managed-by: "Tiller"
group: com.stakater.platform
provider: stakater
version: v0.0.57
version: v0.0.74
spec:
containers:
- env:
image: "stakater/reloader:v0.0.57"
- image: "stakater/reloader:v0.0.74"
imagePullPolicy: IfNotPresent
name: reloader-reloader
args:
ports:
- name: http
containerPort: 9090
livenessProbe:
httpGet:
path: /metrics
port: http
readinessProbe:
httpGet:
path: /metrics
port: http
securityContext:
runAsNonRoot: true
runAsUser: 65534
serviceAccountName: reloader-reloader

View File

@@ -1,3 +1,4 @@
---
# Source: reloader/templates/service.yaml

View File

@@ -4,10 +4,14 @@
apiVersion: v1
kind: ServiceAccount
metadata:
annotations:
meta.helm.sh/release-namespace: "default"
meta.helm.sh/release-name: "reloader"
labels:
app: reloader-reloader
chart: "reloader-v0.0.57"
chart: "reloader-v0.0.74"
release: "reloader"
heritage: "Tiller"
app.kubernetes.io/managed-by: "Tiller"
name: reloader-reloader

View File

@@ -1,14 +1,18 @@
---
# Source: reloader/templates/clusterrole.yaml
apiVersion: rbac.authorization.k8s.io/v1beta1
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
annotations:
meta.helm.sh/release-namespace: "default"
meta.helm.sh/release-name: "reloader"
labels:
app: reloader-reloader
chart: "reloader-v0.0.57"
chart: "reloader-v0.0.74"
release: "reloader"
heritage: "Tiller"
app.kubernetes.io/managed-by: "Tiller"
name: reloader-reloader-role
namespace: default
rules:
@@ -46,14 +50,18 @@ rules:
---
# Source: reloader/templates/clusterrolebinding.yaml
apiVersion: rbac.authorization.k8s.io/v1beta1
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
annotations:
meta.helm.sh/release-namespace: "default"
meta.helm.sh/release-name: "reloader"
labels:
app: reloader-reloader
chart: "reloader-v0.0.57"
chart: "reloader-v0.0.74"
release: "reloader"
heritage: "Tiller"
app.kubernetes.io/managed-by: "Tiller"
name: reloader-reloader-role-binding
namespace: default
roleRef:
@@ -70,14 +78,18 @@ subjects:
apiVersion: apps/v1
kind: Deployment
metadata:
annotations:
meta.helm.sh/release-namespace: "default"
meta.helm.sh/release-name: "reloader"
labels:
app: reloader-reloader
chart: "reloader-v0.0.57"
chart: "reloader-v0.0.74"
release: "reloader"
heritage: "Tiller"
app.kubernetes.io/managed-by: "Tiller"
group: com.stakater.platform
provider: stakater
version: v0.0.57
version: v0.0.74
name: reloader-reloader
spec:
@@ -91,20 +103,35 @@ spec:
metadata:
labels:
app: reloader-reloader
chart: "reloader-v0.0.57"
chart: "reloader-v0.0.74"
release: "reloader"
heritage: "Tiller"
app.kubernetes.io/managed-by: "Tiller"
group: com.stakater.platform
provider: stakater
version: v0.0.57
version: v0.0.74
spec:
containers:
- env:
image: "stakater/reloader:v0.0.57"
- image: "stakater/reloader:v0.0.74"
imagePullPolicy: IfNotPresent
name: reloader-reloader
args:
ports:
- name: http
containerPort: 9090
livenessProbe:
httpGet:
path: /metrics
port: http
readinessProbe:
httpGet:
path: /metrics
port: http
securityContext:
runAsNonRoot: true
runAsUser: 65534
serviceAccountName: reloader-reloader
---
@@ -115,19 +142,24 @@ spec:
# Source: reloader/templates/rolebinding.yaml
---
# Source: reloader/templates/service.yaml
---
# Source: reloader/templates/serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
annotations:
meta.helm.sh/release-namespace: "default"
meta.helm.sh/release-name: "reloader"
labels:
app: reloader-reloader
chart: "reloader-v0.0.57"
chart: "reloader-v0.0.74"
release: "reloader"
heritage: "Tiller"
app.kubernetes.io/managed-by: "Tiller"
name: reloader-reloader
---
# Source: reloader/templates/service.yaml

View File

@@ -12,10 +12,13 @@ reloader:
isOpenshift: false
ignoreSecrets: false
ignoreConfigMaps: false
ignoreNamespaces: "" # Comma separated list of namespaces to ignore
logFormat: "" #json
watchGlobally: true
# Set to true if you have a pod security policy that enforces readOnlyRootFilesystem
readOnlyRootFileSystem: false
legacy:
rbac: false
matchLabels: {}
deployment:
nodeSelector:
@@ -32,6 +35,10 @@ reloader:
# operator: "Exists"
affinity: {}
securityContext:
runAsNonRoot: true
runAsUser: 65534
# A list of tolerations to be applied to the Deployment.
# Example:
# tolerations:

62
docs/Helm2-to-Helm3.md Normal file
View File

@@ -0,0 +1,62 @@
# Helm2 to Helm3 Migration
Follow below mentioned instructions to migrate reloader from Helm2 to Helm3
## Instrcutions:
There are 3 steps involved in migrating the reloader from Helm2 to Helm3.
### Step 1:
Install the helm-2to3 plugin
```bash
helm3 plugin install https://github.com/helm/helm-2to3
helm3 2to3 convert <release-name>
helm3 2to3 cleanup --release-cleanup --skip-confirmation
```
### Step 2:
Add the following Helm3 labels and annotations on reloader resources.
Label:
```yaml
app.kubernetes.io/managed-by=Helm
```
Annotations:
```yaml
meta.helm.sh/release-name=<release-name>
meta.helm.sh/release-namespace=<namespace>
```
For example, to label and annotate the ClusterRoleBinding and ClusterRole:
```bash
KIND=ClusterRoleBinding
NAME=reloader-reloader-role-binding
RELEASE=reloader
NAMESPACE=kube-system
kubectl annotate $KIND $NAME meta.helm.sh/release-name=$RELEASE
kubectl annotate $KIND $NAME meta.helm.sh/release-namespace=$NAMESPACE
kubectl label $KIND $NAME app.kubernetes.io/managed-by=Helm
KIND=ClusterRole
NAME=reloader-reloader-role
RELEASE=reloader
NAMESPACE=kube-system
kubectl annotate $KIND $NAME meta.helm.sh/release-name=$RELEASE
kubectl annotate $KIND $NAME meta.helm.sh/release-namespace=$NAMESPACE
kubectl label $KIND $NAME app.kubernetes.io/managed-by=Helm
```
### Step 3:
Upgrade to desired version
```bash
helm3 repo add stakater https://stakater.github.io/stakater-charts
helm3 repo update
helm3 upgrade <release-name> stakater/reloader --version=v0.0.72
```

View File

@@ -8,5 +8,5 @@ Reloader is inspired from [Configmapcontroller](https://github.com/fabric8io/con
| Reloader can watch both `secrets` and `configmaps`. | ConfigmapController can only watch changes in `configmaps`. It cannot detect changes in other resources like `secrets`. |
| Reloader can perform rolling upgrades on `deployments` as well as on `statefulsets` and `daemonsets` | ConfigmapController can only perform rolling upgrades on `deployments`. It currently does not support rolling upgrades on `statefulsets` and `daemonsets` |
| Reloader provides both unit test cases and end to end integration test cases for future updates. So one can make sure that new changes do not break any old functionality. | Currently there are not any unit test cases or end to end integration test cases in configmap controller. It add difficulties for any additional updates in configmap controller and one can not know for sure whether new changes breaks any old functionality or not. |
| Reloader uses SHA1 to encode the change in configmap or secret. It then saves the SHA1 value in `STAKATER_FOO_CONFIGMAP` or `STAKATER_FOO_SECRET` environment variable depending upon where the change has happened. The use of SHA1 provides a concise 40 characters encoded value that is very less pron to collision. | Configmap controller uses `FABRICB_FOO_REVISION` environment variable to store any change in configmap controller. It does not encode it or convert it in suitable hash value to avoid data pollution in deployment. |
| Reloader allows you to customize your own annotation (for both Secrets and Configmaps) using command line flags | Configmap controller restricts you to only their provided annotation |
| Reloader uses SHA1 to encode the change in configmap or secret. It then saves the SHA1 value in `STAKATER_FOO_CONFIGMAP` or `STAKATER_FOO_SECRET` environment variable depending upon where the change has happened. The use of SHA1 provides a concise 40 characters encoded value that is very less prone to collision. | Configmap controller uses `FABRICB_FOO_REVISION` environment variable to store any change in configmap controller. It does not encode it or convert it in suitable hash value to avoid data pollution in deployment. |
| Reloader allows you to customize your own annotation (for both Secrets and Configmaps) using command line flags | Configmap controller restricts you to only their provided annotation |

View File

@@ -8,4 +8,4 @@ Below are the steps to use reloader with Sealed Secrets.
8. Install Reloader.
9. Once everything is setup, update the original secret at client and encrypt it with kubeseal to see reloader working.
10. Apply the updated sealed secret.
11. Reloader will resatart the pod to use that updated secret.
11. Reloader will restart the pod to use that updated secret.

View File

@@ -2,6 +2,6 @@
These are the key features of Reloader:
1. Restart pod in a depoloyment on change in linked/related configmap's or secret's
1. Restart pod in a deployment on change in linked/related configmap's or secret's
2. Restart pod in a daemonset on change in linked/related configmap's or secret's
3. Restart pod in a statefulset on change in linked/related configmap's or secret's

View File

@@ -24,9 +24,11 @@ func NewReloaderCommand() *cobra.Command {
}
// options
cmd.PersistentFlags().StringVar(&options.ConfigmapUpdateOnChangeAnnotation, "configmap-annotation", "configmap.reloader.stakater.com/reload", "annotation to detect changes in configmaps")
cmd.PersistentFlags().StringVar(&options.SecretUpdateOnChangeAnnotation, "secret-annotation", "secret.reloader.stakater.com/reload", "annotation to detect changes in secrets")
cmd.PersistentFlags().StringVar(&options.ConfigmapUpdateOnChangeAnnotation, "configmap-annotation", "configmap.reloader.stakater.com/reload", "annotation to detect changes in configmaps, specified by name")
cmd.PersistentFlags().StringVar(&options.SecretUpdateOnChangeAnnotation, "secret-annotation", "secret.reloader.stakater.com/reload", "annotation to detect changes in secrets, specified by name")
cmd.PersistentFlags().StringVar(&options.ReloaderAutoAnnotation, "auto-annotation", "reloader.stakater.com/auto", "annotation to detect changes in secrets")
cmd.PersistentFlags().StringVar(&options.AutoSearchAnnotation, "auto-search-annotation", "reloader.stakater.com/search", "annotation to detect changes in configmaps or secrets tagged with special match annotation")
cmd.PersistentFlags().StringVar(&options.SearchMatchAnnotation, "search-match-annotation", "reloader.stakater.com/match", "annotation to mark secrets or configmapts to match the search")
cmd.PersistentFlags().StringVar(&options.LogFormat, "log-format", "", "Log format to use (empty string for text, or JSON")
cmd.PersistentFlags().StringSlice("resources-to-ignore", []string{}, "list of resources to ignore (valid options 'configMaps' or 'secrets')")
cmd.PersistentFlags().StringSlice("namespaces-to-ignore", []string{}, "list of namespaces to ignore")

View File

@@ -336,7 +336,7 @@ func TestControllerForUpdatingConfigmapShouldUpdateDeployment(t *testing.T) {
}
// Do not Perform rolling upgrade on deployment and create env var upon updating the labels configmap
func TestControllerUpdatingConfigmapLabelsShouldNotCreateorUpdateEnvInDeployment(t *testing.T) {
func TestControllerUpdatingConfigmapLabelsShouldNotCreateOrUpdateEnvInDeployment(t *testing.T) {
// Creating configmap
configmapName := configmapNamePrefix + "-update-" + testutil.RandSeq(5)
configmapClient, err := testutil.CreateConfigMap(clients.KubernetesClient, namespace, configmapName, "www.google.com")
@@ -552,7 +552,7 @@ func TestControllerUpdatingSecretShouldUpdateEnvInDeployment(t *testing.T) {
}
// Do not Perform rolling upgrade on pod and create or update a env var upon updating the label in secret
func TestControllerUpdatingSecretLabelsShouldNotCreateorUpdateEnvInDeployment(t *testing.T) {
func TestControllerUpdatingSecretLabelsShouldNotCreateOrUpdateEnvInDeployment(t *testing.T) {
// Creating secret
secretName := secretNamePrefix + "-update-" + testutil.RandSeq(5)
secretClient, err := testutil.CreateSecret(clients.KubernetesClient, namespace, secretName, data)
@@ -820,7 +820,7 @@ func TestControllerUpdatingSecretShouldUpdateEnvInDaemonSet(t *testing.T) {
}
// Do not Perform rolling upgrade on pod and create or update a env var upon updating the label in secret
func TestControllerUpdatingSecretLabelsShouldNotCreateorUpdateEnvInDaemonSet(t *testing.T) {
func TestControllerUpdatingSecretLabelsShouldNotCreateOrUpdateEnvInDaemonSet(t *testing.T) {
// Creating secret
secretName := secretNamePrefix + "-update-" + testutil.RandSeq(5)
secretClient, err := testutil.CreateSecret(clients.KubernetesClient, namespace, secretName, data)

View File

@@ -33,7 +33,7 @@ func (r ResourceUpdatedHandler) GetConfig() (util.Config, string) {
var oldSHAData string
var config util.Config
if _, ok := r.Resource.(*v1.ConfigMap); ok {
oldSHAData = util.GetSHAfromConfigmap(r.OldResource.(*v1.ConfigMap).Data)
oldSHAData = util.GetSHAfromConfigmap(r.OldResource.(*v1.ConfigMap))
config = util.GetConfigmapConfig(r.Resource.(*v1.ConfigMap))
} else if _, ok := r.Resource.(*v1.Secret); ok {
oldSHAData = util.GetSHAfromSecret(r.OldResource.(*v1.Secret).Data)

View File

@@ -99,10 +99,12 @@ func PerformRollingUpgrade(clients kube.Clients, config util.Config, upgradeFunc
// find correct annotation and update the resource
annotations := upgradeFuncs.AnnotationsFunc(i)
annotationValue, found := annotations[config.Annotation]
searchAnnotationValue, foundSearchAnn := annotations[options.AutoSearchAnnotation]
reloaderEnabledValue, foundAuto := annotations[options.ReloaderAutoAnnotation]
if !found && !foundAuto {
if !found && !foundAuto && !foundSearchAnn {
annotations = upgradeFuncs.PodAnnotationsFunc(i)
annotationValue = annotations[config.Annotation]
searchAnnotationValue = annotations[options.AutoSearchAnnotation]
reloaderEnabledValue = annotations[options.ReloaderAutoAnnotation]
}
result := constants.NotUpdated
@@ -114,6 +116,7 @@ func PerformRollingUpgrade(clients kube.Clients, config util.Config, upgradeFunc
if result != constants.Updated && annotationValue != "" {
values := strings.Split(annotationValue, ",")
for _, value := range values {
value = strings.Trim(value, " ")
if value == config.ResourceName {
result = updateContainers(upgradeFuncs, i, config, false)
if result == constants.Updated {
@@ -123,6 +126,13 @@ func PerformRollingUpgrade(clients kube.Clients, config util.Config, upgradeFunc
}
}
if result != constants.Updated && searchAnnotationValue == "true" {
matchAnnotationValue := config.ResourceAnnotations[options.SearchMatchAnnotation]
if matchAnnotationValue == "true" {
result = updateContainers(upgradeFuncs, i, config, true)
}
}
if result == constants.Updated {
err = upgradeFuncs.UpdateFunc(clients, config.Namespace, i)
resourceName := util.ToObjectMeta(i).Name
@@ -141,12 +151,33 @@ func PerformRollingUpgrade(clients kube.Clients, config util.Config, upgradeFunc
func getVolumeMountName(volumes []v1.Volume, mountType string, volumeName string) string {
for i := range volumes {
if mountType == constants.ConfigmapEnvVarPostfix && volumes[i].ConfigMap != nil && volumes[i].ConfigMap.Name == volumeName {
return volumes[i].Name
} else if mountType == constants.SecretEnvVarPostfix && volumes[i].Secret != nil && volumes[i].Secret.SecretName == volumeName {
return volumes[i].Name
if mountType == constants.ConfigmapEnvVarPostfix {
if volumes[i].ConfigMap != nil && volumes[i].ConfigMap.Name == volumeName {
return volumes[i].Name
}
if volumes[i].Projected != nil {
for j := range volumes[i].Projected.Sources {
if volumes[i].Projected.Sources[j].ConfigMap != nil && volumes[i].Projected.Sources[j].ConfigMap.Name == volumeName {
return volumes[i].Name
}
}
}
} else if mountType == constants.SecretEnvVarPostfix {
if volumes[i].Secret != nil && volumes[i].Secret.SecretName == volumeName {
return volumes[i].Name
}
if volumes[i].Projected != nil {
for j := range volumes[i].Projected.Sources {
if volumes[i].Projected.Sources[j].Secret != nil && volumes[i].Projected.Sources[j].Secret.Name == volumeName {
return volumes[i].Name
}
}
}
}
}
return ""
}
@@ -230,7 +261,7 @@ func getContainerToUpdate(upgradeFuncs callbacks.RollingUpgradeFuncs, item inter
func updateContainers(upgradeFuncs callbacks.RollingUpgradeFuncs, item interface{}, config util.Config, autoReload bool) constants.Result {
var result constants.Result
envar := constants.EnvVarPrefix + util.ConvertToEnvVarName(config.ResourceName) + "_" + config.Type
envVar := constants.EnvVarPrefix + util.ConvertToEnvVarName(config.ResourceName) + "_" + config.Type
container := getContainerToUpdate(upgradeFuncs, item, config, autoReload)
if container == nil {
@@ -238,12 +269,12 @@ func updateContainers(upgradeFuncs callbacks.RollingUpgradeFuncs, item interface
}
//update if env var exists
result = updateEnvVar(upgradeFuncs.ContainersFunc(item), envar, config.SHAValue)
result = updateEnvVar(upgradeFuncs.ContainersFunc(item), envVar, config.SHAValue)
// if no existing env var exists lets create one
if result == constants.NoEnvVarFound {
e := v1.EnvVar{
Name: envar,
Name: envVar,
Value: config.SHAValue,
}
container.Env = append(container.Env, e)
@@ -252,11 +283,11 @@ func updateContainers(upgradeFuncs callbacks.RollingUpgradeFuncs, item interface
return result
}
func updateEnvVar(containers []v1.Container, envar string, shaData string) constants.Result {
func updateEnvVar(containers []v1.Container, envVar string, shaData string) constants.Result {
for i := range containers {
envs := containers[i].Env
for j := range envs {
if envs[j].Name == envar {
if envs[j].Name == envVar {
if envs[j].Value != shaData {
envs[j].Value = shaData
return constants.Updated

View File

@@ -15,24 +15,31 @@ import (
"github.com/stakater/Reloader/internal/pkg/testutil"
"github.com/stakater/Reloader/internal/pkg/util"
"github.com/stakater/Reloader/pkg/kube"
core_v1 "k8s.io/api/core/v1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
testclient "k8s.io/client-go/kubernetes/fake"
)
var (
clients = kube.Clients{KubernetesClient: testclient.NewSimpleClientset()}
namespace = "test-handler-" + testutil.RandSeq(5)
configmapName = "testconfigmap-handler-" + testutil.RandSeq(5)
secretName = "testsecret-handler-" + testutil.RandSeq(5)
configmapWithInitContainer = "testconfigmapInitContainerhandler-" + testutil.RandSeq(5)
secretWithInitContainer = "testsecretWithInitContainer-handler-" + testutil.RandSeq(5)
configmapWithInitEnv = "configmapWithInitEnv-" + testutil.RandSeq(5)
secretWithInitEnv = "secretWithInitEnv-handler-" + testutil.RandSeq(5)
configmapWithEnvName = "testconfigmapWithEnv-handler-" + testutil.RandSeq(5)
configmapWithEnvFromName = "testconfigmapWithEnvFrom-handler-" + testutil.RandSeq(5)
secretWithEnvName = "testsecretWithEnv-handler-" + testutil.RandSeq(5)
secretWithEnvFromName = "testsecretWithEnvFrom-handler-" + testutil.RandSeq(5)
configmapWithPodAnnotations = "testconfigmapPodAnnotations-handler-" + testutil.RandSeq(5)
configmapWithBothAnnotations = "testconfigmapBothAnnotations-handler-" + testutil.RandSeq(5)
clients = kube.Clients{KubernetesClient: testclient.NewSimpleClientset()}
namespace = "test-handler-" + testutil.RandSeq(5)
configmapName = "testconfigmap-handler-" + testutil.RandSeq(5)
secretName = "testsecret-handler-" + testutil.RandSeq(5)
projectedConfigMapName = "testprojectedconfigmap-handler-" + testutil.RandSeq(5)
projectedSecretName = "testprojectedsecret-handler-" + testutil.RandSeq(5)
configmapWithInitContainer = "testconfigmapInitContainerhandler-" + testutil.RandSeq(5)
secretWithInitContainer = "testsecretWithInitContainer-handler-" + testutil.RandSeq(5)
projectedConfigMapWithInitContainer = "testProjectedConfigMapWithInitContainer-handler" + testutil.RandSeq(5)
projectedSecretWithInitContainer = "testProjectedSecretWithInitContainer-handler" + testutil.RandSeq(5)
configmapWithInitEnv = "configmapWithInitEnv-" + testutil.RandSeq(5)
secretWithInitEnv = "secretWithInitEnv-handler-" + testutil.RandSeq(5)
configmapWithEnvName = "testconfigmapWithEnv-handler-" + testutil.RandSeq(5)
configmapWithEnvFromName = "testconfigmapWithEnvFrom-handler-" + testutil.RandSeq(5)
secretWithEnvName = "testsecretWithEnv-handler-" + testutil.RandSeq(5)
secretWithEnvFromName = "testsecretWithEnvFrom-handler-" + testutil.RandSeq(5)
configmapWithPodAnnotations = "testconfigmapPodAnnotations-handler-" + testutil.RandSeq(5)
configmapWithBothAnnotations = "testconfigmapBothAnnotations-handler-" + testutil.RandSeq(5)
configmapAnnotated = "testconfigmapAnnotated-handler-" + testutil.RandSeq(5)
)
func TestMain(m *testing.M) {
@@ -66,6 +73,30 @@ func setup() {
logrus.Errorf("Error in secret creation: %v", err)
}
// Creating configmap will be used in projected volume
_, err = testutil.CreateConfigMap(clients.KubernetesClient, namespace, projectedConfigMapName, "www.google.com")
if err != nil {
logrus.Errorf("Error in configmap creation: %v", err)
}
// Creating secret will be used in projected volume
_, err = testutil.CreateSecret(clients.KubernetesClient, namespace, projectedSecretName, data)
if err != nil {
logrus.Errorf("Error in secret creation: %v", err)
}
// Creating configmap will be used in projected volume in init containers
_, err = testutil.CreateConfigMap(clients.KubernetesClient, namespace, projectedConfigMapWithInitContainer, "www.google.com")
if err != nil {
logrus.Errorf("Error in configmap creation: %v", err)
}
// Creating secret will be used in projected volume in init containers
_, err = testutil.CreateSecret(clients.KubernetesClient, namespace, projectedSecretWithInitContainer, data)
if err != nil {
logrus.Errorf("Error in secret creation: %v", err)
}
_, err = testutil.CreateConfigMap(clients.KubernetesClient, namespace, configmapWithEnvName, "www.google.com")
if err != nil {
logrus.Errorf("Error in configmap creation: %v", err)
@@ -127,6 +158,30 @@ func setup() {
logrus.Errorf("Error in Deployment with configmap creation: %v", err)
}
// Creating Deployment with configmap in projected volume
_, err = testutil.CreateDeployment(clients.KubernetesClient, projectedConfigMapName, namespace, true)
if err != nil {
logrus.Errorf("Error in Deployment with configmap creation: %v", err)
}
// Creating Deployment with configmap in projected volume mounted in init container
_, err = testutil.CreateDeploymentWithInitContainer(clients.KubernetesClient, projectedConfigMapWithInitContainer, namespace, true)
if err != nil {
logrus.Errorf("Error in Deployment with configmap creation: %v", err)
}
// Creating Deployment with secret in projected volume
_, err = testutil.CreateDeployment(clients.KubernetesClient, projectedSecretName, namespace, true)
if err != nil {
logrus.Errorf("Error in Deployment with secret creation: %v", err)
}
// Creating Deployment with secret in projected volume mounted in init container
_, err = testutil.CreateDeploymentWithInitContainer(clients.KubernetesClient, projectedSecretWithInitContainer, namespace, true)
if err != nil {
logrus.Errorf("Error in Deployment with secret creation: %v", err)
}
// Creating Deployment with secret mounted in init container
_, err = testutil.CreateDeploymentWithInitContainer(clients.KubernetesClient, secretWithInitContainer, namespace, true)
if err != nil {
@@ -175,6 +230,17 @@ func setup() {
logrus.Errorf("Error in Deployment with secret configmap as envFrom source creation: %v", err)
}
// Creating Deployment with envFrom source as secret
_, err = testutil.CreateDeploymentWithEnvVarSourceAndAnnotations(
clients.KubernetesClient,
configmapAnnotated,
namespace,
map[string]string{"reloader.stakater.com/search": "true"},
)
if err != nil {
logrus.Errorf("Error in Deployment with secret configmap as envFrom source creation: %v", err)
}
// Creating DaemonSet with configmap
_, err = testutil.CreateDaemonSet(clients.KubernetesClient, configmapName, namespace, true)
if err != nil {
@@ -187,6 +253,18 @@ func setup() {
logrus.Errorf("Error in DaemonSet with secret creation: %v", err)
}
// Creating DaemonSet with configmap in projected volume
_, err = testutil.CreateDaemonSet(clients.KubernetesClient, projectedConfigMapName, namespace, true)
if err != nil {
logrus.Errorf("Error in DaemonSet with configmap creation: %v", err)
}
// Creating DaemonSet with secret in projected volume
_, err = testutil.CreateDaemonSet(clients.KubernetesClient, projectedSecretName, namespace, true)
if err != nil {
logrus.Errorf("Error in DaemonSet with secret creation: %v", err)
}
// Creating DaemonSet with env var source as configmap
_, err = testutil.CreateDaemonSet(clients.KubernetesClient, configmapWithEnvName, namespace, false)
if err != nil {
@@ -211,6 +289,18 @@ func setup() {
logrus.Errorf("Error in StatefulSet with secret creation: %v", err)
}
// Creating StatefulSet with configmap in projected volume
_, err = testutil.CreateStatefulSet(clients.KubernetesClient, projectedConfigMapName, namespace, true)
if err != nil {
logrus.Errorf("Error in StatefulSet with configmap creation: %v", err)
}
// Creating StatefulSet with secret in projected volume
_, err = testutil.CreateStatefulSet(clients.KubernetesClient, projectedSecretName, namespace, true)
if err != nil {
logrus.Errorf("Error in StatefulSet with configmap creation: %v", err)
}
// Creating StatefulSet with env var source as configmap
_, err = testutil.CreateStatefulSet(clients.KubernetesClient, configmapWithEnvName, namespace, false)
if err != nil {
@@ -249,6 +339,30 @@ func teardown() {
logrus.Errorf("Error while deleting deployment with secret %v", deploymentError)
}
// Deleting Deployment with configmap in projected volume
deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, namespace, projectedConfigMapName)
if deploymentError != nil {
logrus.Errorf("Error while deleting deployment with configmap %v", deploymentError)
}
// Deleting Deployment with configmap in projected volume mounted in init container
deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, namespace, projectedConfigMapWithInitContainer)
if deploymentError != nil {
logrus.Errorf("Error while deleting deployment with configmap %v", deploymentError)
}
// Deleting Deployment with secret in projected volume
deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, namespace, projectedSecretName)
if deploymentError != nil {
logrus.Errorf("Error while deleting deployment with configmap %v", deploymentError)
}
// Deleting Deployment with secret in projected volume mounted in init container
deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, namespace, projectedSecretWithInitContainer)
if deploymentError != nil {
logrus.Errorf("Error while deleting deployment with configmap %v", deploymentError)
}
// Deleting Deployment with configmap as env var source
deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, namespace, configmapWithEnvName)
if deploymentError != nil {
@@ -309,6 +423,12 @@ func teardown() {
logrus.Errorf("Error while deleting deployment with both annotations %v", deploymentError)
}
// Deleting Deployment with search annotation
deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, namespace, configmapAnnotated)
if deploymentError != nil {
logrus.Errorf("Error while deleting deployment with search annotation %v", deploymentError)
}
// Deleting DaemonSet with configmap
daemonSetError := testutil.DeleteDaemonSet(clients.KubernetesClient, namespace, configmapName)
if daemonSetError != nil {
@@ -321,6 +441,18 @@ func teardown() {
logrus.Errorf("Error while deleting daemonSet with secret %v", daemonSetError)
}
// Deleting DaemonSet with configmap in projected volume
daemonSetError = testutil.DeleteDaemonSet(clients.KubernetesClient, namespace, projectedConfigMapName)
if daemonSetError != nil {
logrus.Errorf("Error while deleting daemonSet with configmap %v", daemonSetError)
}
// Deleting Deployment with secret in projected volume
daemonSetError = testutil.DeleteDaemonSet(clients.KubernetesClient, namespace, projectedSecretName)
if daemonSetError != nil {
logrus.Errorf("Error while deleting daemonSet with secret %v", daemonSetError)
}
// Deleting Deployment with configmap as env var source
daemonSetError = testutil.DeleteDaemonSet(clients.KubernetesClient, namespace, configmapWithEnvName)
if daemonSetError != nil {
@@ -345,6 +477,18 @@ func teardown() {
logrus.Errorf("Error while deleting statefulSet with secret %v", statefulSetError)
}
// Deleting StatefulSet with configmap in projected volume
statefulSetError = testutil.DeleteStatefulSet(clients.KubernetesClient, namespace, projectedConfigMapName)
if statefulSetError != nil {
logrus.Errorf("Error while deleting statefulSet with configmap %v", statefulSetError)
}
// Deleting Deployment with secret in projected volume
statefulSetError = testutil.DeleteStatefulSet(clients.KubernetesClient, namespace, projectedSecretName)
if statefulSetError != nil {
logrus.Errorf("Error while deleting statefulSet with secret %v", statefulSetError)
}
// Deleting StatefulSet with configmap as env var source
statefulSetError = testutil.DeleteStatefulSet(clients.KubernetesClient, namespace, configmapWithEnvName)
if statefulSetError != nil {
@@ -369,6 +513,30 @@ func teardown() {
logrus.Errorf("Error while deleting the secret %v", err)
}
// Deleting configmap used in projected volume
err = testutil.DeleteConfigMap(clients.KubernetesClient, namespace, projectedConfigMapName)
if err != nil {
logrus.Errorf("Error while deleting the configmap %v", err)
}
// Deleting Secret used in projected volume
err = testutil.DeleteSecret(clients.KubernetesClient, namespace, projectedSecretName)
if err != nil {
logrus.Errorf("Error while deleting the secret %v", err)
}
// Deleting configmap used in projected volume in init containers
err = testutil.DeleteConfigMap(clients.KubernetesClient, namespace, projectedConfigMapWithInitContainer)
if err != nil {
logrus.Errorf("Error while deleting the configmap %v", err)
}
// Deleting Configmap used projected volume in init containers
err = testutil.DeleteSecret(clients.KubernetesClient, namespace, projectedSecretWithInitContainer)
if err != nil {
logrus.Errorf("Error while deleting the secret %v", err)
}
// Deleting Configmap used as env var source
err = testutil.DeleteConfigMap(clients.KubernetesClient, namespace, configmapWithEnvName)
if err != nil {
@@ -467,6 +635,115 @@ func TestRollingUpgradeForDeploymentWithConfigmap(t *testing.T) {
}
}
func TestRollingUpgradeForDeploymentWithConfigmapInProjectedVolume(t *testing.T) {
shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, projectedConfigMapName, "www.stakater.com")
config := getConfigWithAnnotations(constants.ConfigmapEnvVarPostfix, projectedConfigMapName, shaData, options.ConfigmapUpdateOnChangeAnnotation)
deploymentFuncs := GetDeploymentRollingUpgradeFuncs()
collectors := getCollectors()
err := PerformRollingUpgrade(clients, config, deploymentFuncs, collectors)
if err != nil {
t.Errorf("Rolling upgrade failed for Deployment with Configmap in projected volume")
}
logrus.Infof("Verifying deployment update")
updated := testutil.VerifyResourceUpdate(clients, config, constants.ConfigmapEnvVarPostfix, deploymentFuncs)
if !updated {
t.Errorf("Deployment was not updated")
}
if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 {
t.Errorf("Counter was not increased")
}
}
func createConfigMap(clients *kube.Clients, namespace, name string, annotations map[string]string) (*core_v1.ConfigMap, error) {
configmapObj := testutil.GetConfigmap(namespace, name, "www.google.com")
configmapObj.Annotations = annotations
return clients.KubernetesClient.CoreV1().ConfigMaps(namespace).Create(configmapObj)
}
func TestRollingUpgradeForDeploymentWithConfigmapViaSearchAnnotation(t *testing.T) {
shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapAnnotated, "www.stakater.com")
config := getConfigWithAnnotations(constants.ConfigmapEnvVarPostfix, configmapAnnotated, shaData, "")
config.ResourceAnnotations = map[string]string{"reloader.stakater.com/match": "true"}
deploymentFuncs := GetDeploymentRollingUpgradeFuncs()
collectors := getCollectors()
err := PerformRollingUpgrade(clients, config, deploymentFuncs, collectors)
if err != nil {
t.Errorf("Rolling upgrade failed for Deployment with Configmap")
}
logrus.Infof("Verifying deployment update")
updated := testutil.VerifyResourceUpdate(clients, config, constants.ConfigmapEnvVarPostfix, deploymentFuncs)
if !updated {
t.Errorf("Deployment was not updated")
}
if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 {
t.Errorf("Counter was not increased")
}
}
func TestRollingUpgradeForDeploymentWithConfigmapViaSearchAnnotationNoTriggers(t *testing.T) {
shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapAnnotated, "www.stakater.com")
config := getConfigWithAnnotations(constants.ConfigmapEnvVarPostfix, configmapAnnotated, shaData, "")
config.ResourceAnnotations = map[string]string{"reloader.stakater.com/match": "false"}
deploymentFuncs := GetDeploymentRollingUpgradeFuncs()
collectors := getCollectors()
err := PerformRollingUpgrade(clients, config, deploymentFuncs, collectors)
if err != nil {
t.Errorf("Rolling upgrade failed for Deployment with Configmap")
}
logrus.Infof("Verifying deployment update")
updated := testutil.VerifyResourceUpdate(clients, config, constants.ConfigmapEnvVarPostfix, deploymentFuncs)
time.Sleep(5 * time.Second)
if updated {
t.Errorf("Deployment was updated unexpectedly")
}
if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) > 0 {
t.Errorf("Counter was increased unexpectedly")
}
}
func TestRollingUpgradeForDeploymentWithConfigmapViaSearchAnnotationNotMapped(t *testing.T) {
deployment, err := testutil.CreateDeploymentWithEnvVarSourceAndAnnotations(
clients.KubernetesClient,
configmapAnnotated+"-different",
namespace,
map[string]string{"reloader.stakater.com/search": "true"},
)
if err != nil {
t.Errorf("Failed to create deployment with search annotation.")
}
defer clients.KubernetesClient.AppsV1().Deployments(namespace).Delete(deployment.Name, &v1.DeleteOptions{})
shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapAnnotated, "www.stakater.com")
config := getConfigWithAnnotations(constants.ConfigmapEnvVarPostfix, configmapAnnotated, shaData, "")
config.ResourceAnnotations = map[string]string{"reloader.stakater.com/match": "false"}
deploymentFuncs := GetDeploymentRollingUpgradeFuncs()
collectors := getCollectors()
err = PerformRollingUpgrade(clients, config, deploymentFuncs, collectors)
if err != nil {
t.Errorf("Rolling upgrade failed for Deployment with Configmap")
}
logrus.Infof("Verifying deployment update")
updated := testutil.VerifyResourceUpdate(clients, config, constants.ConfigmapEnvVarPostfix, deploymentFuncs)
if updated {
t.Errorf("Deployment was updated unexpectedly")
}
if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) > 0 {
t.Errorf("Counter was increased unexpectedly")
}
}
func TestRollingUpgradeForDeploymentWithConfigmapInInitContainer(t *testing.T) {
shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapWithInitContainer, "www.stakater.com")
config := getConfigWithAnnotations(constants.ConfigmapEnvVarPostfix, configmapWithInitContainer, shaData, options.ConfigmapUpdateOnChangeAnnotation)
@@ -490,6 +767,29 @@ func TestRollingUpgradeForDeploymentWithConfigmapInInitContainer(t *testing.T) {
}
}
func TestRollingUpgradeForDeploymentWithConfigmapInProjectVolumeInInitContainer(t *testing.T) {
shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, projectedConfigMapWithInitContainer, "www.stakater.com")
config := getConfigWithAnnotations(constants.ConfigmapEnvVarPostfix, projectedConfigMapWithInitContainer, shaData, options.ConfigmapUpdateOnChangeAnnotation)
deploymentFuncs := GetDeploymentRollingUpgradeFuncs()
collectors := getCollectors()
err := PerformRollingUpgrade(clients, config, deploymentFuncs, collectors)
time.Sleep(5 * time.Second)
if err != nil {
t.Errorf("Rolling upgrade failed for Deployment with Configmap in projected volume")
}
logrus.Infof("Verifying deployment update")
updated := testutil.VerifyResourceUpdate(clients, config, constants.ConfigmapEnvVarPostfix, deploymentFuncs)
if !updated {
t.Errorf("Deployment was not updated")
}
if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 {
t.Errorf("Counter was not increased")
}
}
func TestRollingUpgradeForDeploymentWithConfigmapAsEnvVar(t *testing.T) {
shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapWithEnvName, "www.stakater.com")
config := getConfigWithAnnotations(constants.ConfigmapEnvVarPostfix, configmapWithEnvName, shaData, options.ReloaderAutoAnnotation)
@@ -582,6 +882,29 @@ func TestRollingUpgradeForDeploymentWithSecret(t *testing.T) {
}
}
func TestRollingUpgradeForDeploymentWithSecretInProjectedVolume(t *testing.T) {
shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, namespace, projectedSecretName, "dGVzdFVwZGF0ZWRTZWNyZXRFbmNvZGluZ0ZvclJlbG9hZGVy")
config := getConfigWithAnnotations(constants.SecretEnvVarPostfix, projectedSecretName, shaData, options.SecretUpdateOnChangeAnnotation)
deploymentFuncs := GetDeploymentRollingUpgradeFuncs()
collectors := getCollectors()
err := PerformRollingUpgrade(clients, config, deploymentFuncs, collectors)
time.Sleep(5 * time.Second)
if err != nil {
t.Errorf("Rolling upgrade failed for Deployment with Secret in projected volume")
}
logrus.Infof("Verifying deployment update")
updated := testutil.VerifyResourceUpdate(clients, config, constants.SecretEnvVarPostfix, deploymentFuncs)
if !updated {
t.Errorf("Deployment was not updated")
}
if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 {
t.Errorf("Counter was not increased")
}
}
func TestRollingUpgradeForDeploymentWithSecretinInitContainer(t *testing.T) {
shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, namespace, secretWithInitContainer, "dGVzdFVwZGF0ZWRTZWNyZXRFbmNvZGluZ0ZvclJlbG9hZGVy")
config := getConfigWithAnnotations(constants.SecretEnvVarPostfix, secretWithInitContainer, shaData, options.SecretUpdateOnChangeAnnotation)
@@ -605,6 +928,29 @@ func TestRollingUpgradeForDeploymentWithSecretinInitContainer(t *testing.T) {
}
}
func TestRollingUpgradeForDeploymentWithSecretInProjectedVolumeinInitContainer(t *testing.T) {
shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, namespace, projectedSecretWithInitContainer, "dGVzdFVwZGF0ZWRTZWNyZXRFbmNvZGluZ0ZvclJlbG9hZGVy")
config := getConfigWithAnnotations(constants.SecretEnvVarPostfix, projectedSecretWithInitContainer, shaData, options.SecretUpdateOnChangeAnnotation)
deploymentFuncs := GetDeploymentRollingUpgradeFuncs()
collectors := getCollectors()
err := PerformRollingUpgrade(clients, config, deploymentFuncs, collectors)
time.Sleep(5 * time.Second)
if err != nil {
t.Errorf("Rolling upgrade failed for Deployment with Secret in projected volume")
}
logrus.Infof("Verifying deployment update")
updated := testutil.VerifyResourceUpdate(clients, config, constants.SecretEnvVarPostfix, deploymentFuncs)
if !updated {
t.Errorf("Deployment was not updated")
}
if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 {
t.Errorf("Counter was not increased")
}
}
func TestRollingUpgradeForDeploymentWithSecretAsEnvVar(t *testing.T) {
shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, namespace, secretWithEnvName, "dGVzdFVwZGF0ZWRTZWNyZXRFbmNvZGluZ0ZvclJlbG9hZGVy")
config := getConfigWithAnnotations(constants.SecretEnvVarPostfix, secretWithEnvName, shaData, options.ReloaderAutoAnnotation)
@@ -697,6 +1043,29 @@ func TestRollingUpgradeForDaemonSetWithConfigmap(t *testing.T) {
}
}
func TestRollingUpgradeForDaemonSetWithConfigmapInProjectedVolume(t *testing.T) {
shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, projectedConfigMapName, "www.facebook.com")
config := getConfigWithAnnotations(constants.ConfigmapEnvVarPostfix, projectedConfigMapName, shaData, options.ConfigmapUpdateOnChangeAnnotation)
daemonSetFuncs := GetDaemonSetRollingUpgradeFuncs()
collectors := getCollectors()
err := PerformRollingUpgrade(clients, config, daemonSetFuncs, collectors)
time.Sleep(5 * time.Second)
if err != nil {
t.Errorf("Rolling upgrade failed for DaemonSet with configmap in projected volume")
}
logrus.Infof("Verifying daemonSet update")
updated := testutil.VerifyResourceUpdate(clients, config, constants.ConfigmapEnvVarPostfix, daemonSetFuncs)
if !updated {
t.Errorf("DaemonSet was not updated")
}
if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 {
t.Errorf("Counter was not increased")
}
}
func TestRollingUpgradeForDaemonSetWithConfigmapAsEnvVar(t *testing.T) {
shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapWithEnvName, "www.facebook.com")
config := getConfigWithAnnotations(constants.ConfigmapEnvVarPostfix, configmapWithEnvName, shaData, options.ReloaderAutoAnnotation)
@@ -743,6 +1112,29 @@ func TestRollingUpgradeForDaemonSetWithSecret(t *testing.T) {
}
}
func TestRollingUpgradeForDaemonSetWithSecretInProjectedVolume(t *testing.T) {
shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, namespace, projectedSecretName, "d3d3LmZhY2Vib29rLmNvbQ==")
config := getConfigWithAnnotations(constants.SecretEnvVarPostfix, projectedSecretName, shaData, options.SecretUpdateOnChangeAnnotation)
daemonSetFuncs := GetDaemonSetRollingUpgradeFuncs()
collectors := getCollectors()
err := PerformRollingUpgrade(clients, config, daemonSetFuncs, collectors)
time.Sleep(5 * time.Second)
if err != nil {
t.Errorf("Rolling upgrade failed for DaemonSet with secret in projected volume")
}
logrus.Infof("Verifying daemonSet update")
updated := testutil.VerifyResourceUpdate(clients, config, constants.SecretEnvVarPostfix, daemonSetFuncs)
if !updated {
t.Errorf("DaemonSet was not updated")
}
if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 {
t.Errorf("Counter was not increased")
}
}
func TestRollingUpgradeForStatefulSetWithConfigmap(t *testing.T) {
shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapName, "www.twitter.com")
config := getConfigWithAnnotations(constants.ConfigmapEnvVarPostfix, configmapName, shaData, options.ConfigmapUpdateOnChangeAnnotation)
@@ -766,6 +1158,29 @@ func TestRollingUpgradeForStatefulSetWithConfigmap(t *testing.T) {
}
}
func TestRollingUpgradeForStatefulSetWithConfigmapInProjectedVolume(t *testing.T) {
shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, projectedConfigMapName, "www.twitter.com")
config := getConfigWithAnnotations(constants.ConfigmapEnvVarPostfix, projectedConfigMapName, shaData, options.ConfigmapUpdateOnChangeAnnotation)
statefulSetFuncs := GetStatefulSetRollingUpgradeFuncs()
collectors := getCollectors()
err := PerformRollingUpgrade(clients, config, statefulSetFuncs, collectors)
time.Sleep(5 * time.Second)
if err != nil {
t.Errorf("Rolling upgrade failed for StatefulSet with configmap in projected volume")
}
logrus.Infof("Verifying statefulSet update")
updated := testutil.VerifyResourceUpdate(clients, config, constants.ConfigmapEnvVarPostfix, statefulSetFuncs)
if !updated {
t.Errorf("StatefulSet was not updated")
}
if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 {
t.Errorf("Counter was not increased")
}
}
func TestRollingUpgradeForStatefulSetWithSecret(t *testing.T) {
shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, namespace, secretName, "d3d3LnR3aXR0ZXIuY29t")
config := getConfigWithAnnotations(constants.SecretEnvVarPostfix, secretName, shaData, options.SecretUpdateOnChangeAnnotation)
@@ -789,6 +1204,29 @@ func TestRollingUpgradeForStatefulSetWithSecret(t *testing.T) {
}
}
func TestRollingUpgradeForStatefulSetWithSecretInProjectedVolume(t *testing.T) {
shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, namespace, projectedSecretName, "d3d3LnR3aXR0ZXIuY29t")
config := getConfigWithAnnotations(constants.SecretEnvVarPostfix, projectedSecretName, shaData, options.SecretUpdateOnChangeAnnotation)
statefulSetFuncs := GetStatefulSetRollingUpgradeFuncs()
collectors := getCollectors()
err := PerformRollingUpgrade(clients, config, statefulSetFuncs, collectors)
time.Sleep(5 * time.Second)
if err != nil {
t.Errorf("Rolling upgrade failed for StatefulSet with secret in projected volume")
}
logrus.Infof("Verifying statefulSet update")
updated := testutil.VerifyResourceUpdate(clients, config, constants.SecretEnvVarPostfix, statefulSetFuncs)
if !updated {
t.Errorf("StatefulSet was not updated")
}
if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 {
t.Errorf("Counter was not increased")
}
}
func TestRollingUpgradeForDeploymentWithPodAnnotations(t *testing.T) {
shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapWithPodAnnotations, "www.stakater.com")
config := getConfigWithAnnotations(constants.ConfigmapEnvVarPostfix, configmapWithPodAnnotations, shaData, options.ConfigmapUpdateOnChangeAnnotation)

View File

@@ -1,12 +1,20 @@
package options
var (
// ConfigmapUpdateOnChangeAnnotation is an annotation to detect changes in configmaps
// ConfigmapUpdateOnChangeAnnotation is an annotation to detect changes in
// configmaps specified by name
ConfigmapUpdateOnChangeAnnotation = "configmap.reloader.stakater.com/reload"
// SecretUpdateOnChangeAnnotation is an annotation to detect changes in secrets
// SecretUpdateOnChangeAnnotation is an annotation to detect changes in
// secrets specified by name
SecretUpdateOnChangeAnnotation = "secret.reloader.stakater.com/reload"
// ReloaderAutoAnnotation is an annotation to detect changes in secrets
ReloaderAutoAnnotation = "reloader.stakater.com/auto"
// AutoSearchAnnotation is an annotation to detect changes in
// configmaps or triggers with the SearchMatchAnnotation
AutoSearchAnnotation = "reloader.stakater.com/search"
// SearchMatchAnnotation is an annotation to tag secrets to be found with
// AutoSearchAnnotation
SearchMatchAnnotation = "reloader.stakater.com/match"
// LogFormat is the log format to use (json, or empty string for default)
LogFormat = ""
)

View File

@@ -92,6 +92,38 @@ func getEnvVarSources(name string) []v1.EnvFromSource {
func getVolumes(name string) []v1.Volume {
return []v1.Volume{
{
Name: "projectedconfigmap",
VolumeSource: v1.VolumeSource{
Projected: &v1.ProjectedVolumeSource{
Sources: []v1.VolumeProjection{
{
ConfigMap: &v1.ConfigMapProjection{
LocalObjectReference: v1.LocalObjectReference{
Name: name,
},
},
},
},
},
},
},
{
Name: "projectedsecret",
VolumeSource: v1.VolumeSource{
Projected: &v1.ProjectedVolumeSource{
Sources: []v1.VolumeProjection{
{
Secret: &v1.SecretProjection{
LocalObjectReference: v1.LocalObjectReference{
Name: name,
},
},
},
},
},
},
},
{
Name: "configmap",
VolumeSource: v1.VolumeSource{
@@ -123,6 +155,14 @@ func getVolumeMounts(name string) []v1.VolumeMount {
MountPath: "etc/sec",
Name: "secret",
},
{
MountPath: "etc/projectedconfig",
Name: "projectedconfigmap",
},
{
MountPath: "etc/projectedsec",
Name: "projectedsecret",
},
}
}
@@ -523,11 +563,11 @@ func GetSecretWithUpdatedLabel(namespace string, secretName string, label string
}
// GetResourceSHA returns the SHA value of given environment variable
func GetResourceSHA(containers []v1.Container, envar string) string {
func GetResourceSHA(containers []v1.Container, envVar string) string {
for i := range containers {
envs := containers[i].Env
for j := range envs {
if envs[j].Name == envar {
if envs[j].Name == envVar {
return envs[j].Value
}
}
@@ -624,9 +664,10 @@ func CreateDeploymentWithEnvVarSource(client kubernetes.Interface, deploymentNam
deployment, err := deploymentClient.Create(deploymentObj)
time.Sleep(3 * time.Second)
return deployment, err
}
// CreateDeployment creates a deployment in given namespace and returns the Deployment
// CreateDeploymentWithPodAnnotations creates a deployment in given namespace and returns the Deployment
func CreateDeploymentWithPodAnnotations(client kubernetes.Interface, deploymentName string, namespace string, both bool) (*appsv1.Deployment, error) {
logrus.Infof("Creating Deployment")
deploymentClient := client.AppsV1().Deployments(namespace)
@@ -636,6 +677,18 @@ func CreateDeploymentWithPodAnnotations(client kubernetes.Interface, deploymentN
return deployment, err
}
// CreateDeploymentWithEnvVarSourceAndAnnotations returns a deployment in given
// namespace with given annotations.
func CreateDeploymentWithEnvVarSourceAndAnnotations(client kubernetes.Interface, deploymentName string, namespace string, annotations map[string]string) (*appsv1.Deployment, error) {
logrus.Infof("Creating Deployment")
deploymentClient := client.AppsV1().Deployments(namespace)
deploymentObj := GetDeploymentWithEnvVarSources(namespace, deploymentName)
deploymentObj.Annotations = annotations
deployment, err := deploymentClient.Create(deploymentObj)
time.Sleep(3 * time.Second)
return deployment, err
}
// CreateDaemonSet creates a deployment in given namespace and returns the DaemonSet
func CreateDaemonSet(client kubernetes.Interface, daemonsetName string, namespace string, volumeMount bool) (*appsv1.DaemonSet, error) {
logrus.Infof("Creating DaemonSet")
@@ -759,6 +812,7 @@ func VerifyResourceUpdate(clients kube.Clients, config util.Config, envVarPostfi
containers := upgradeFuncs.ContainersFunc(i)
// match statefulsets with the correct annotation
annotationValue := util.ToObjectMeta(i).Annotations[config.Annotation]
searchAnnotationValue := util.ToObjectMeta(i).Annotations[options.AutoSearchAnnotation]
reloaderEnabledValue := util.ToObjectMeta(i).Annotations[options.ReloaderAutoAnnotation]
reloaderEnabled, err := strconv.ParseBool(reloaderEnabledValue)
matches := false
@@ -767,11 +821,16 @@ func VerifyResourceUpdate(clients kube.Clients, config util.Config, envVarPostfi
} else if annotationValue != "" {
values := strings.Split(annotationValue, ",")
for _, value := range values {
value = strings.Trim(value, " ")
if value == config.ResourceName {
matches = true
break
}
}
} else if searchAnnotationValue == "true" {
if config.ResourceAnnotations[options.SearchMatchAnnotation] == "true" {
matches = true
}
}
if matches {

View File

@@ -8,31 +8,34 @@ import (
//Config contains rolling upgrade configuration parameters
type Config struct {
Namespace string
ResourceName string
Annotation string
SHAValue string
Type string
Namespace string
ResourceName string
ResourceAnnotations map[string]string
Annotation string
SHAValue string
Type string
}
// GetConfigmapConfig provides utility config for configmap
func GetConfigmapConfig(configmap *v1.ConfigMap) Config {
return Config{
Namespace: configmap.Namespace,
ResourceName: configmap.Name,
Annotation: options.ConfigmapUpdateOnChangeAnnotation,
SHAValue: GetSHAfromConfigmap(configmap.Data),
Type: constants.ConfigmapEnvVarPostfix,
Namespace: configmap.Namespace,
ResourceName: configmap.Name,
ResourceAnnotations: configmap.Annotations,
Annotation: options.ConfigmapUpdateOnChangeAnnotation,
SHAValue: GetSHAfromConfigmap(configmap),
Type: constants.ConfigmapEnvVarPostfix,
}
}
// GetSecretConfig provides utility config for secret
func GetSecretConfig(secret *v1.Secret) Config {
return Config{
Namespace: secret.Namespace,
ResourceName: secret.Name,
Annotation: options.SecretUpdateOnChangeAnnotation,
SHAValue: GetSHAfromSecret(secret.Data),
Type: constants.SecretEnvVarPostfix,
Namespace: secret.Namespace,
ResourceName: secret.Name,
ResourceAnnotations: secret.Annotations,
Annotation: options.SecretUpdateOnChangeAnnotation,
SHAValue: GetSHAfromSecret(secret.Data),
Type: constants.SecretEnvVarPostfix,
}
}

View File

@@ -2,10 +2,12 @@ package util
import (
"bytes"
"encoding/base64"
"sort"
"strings"
"github.com/stakater/Reloader/internal/pkg/crypto"
v1 "k8s.io/api/core/v1"
)
// ConvertToEnvVarName converts the given text into a usable env var
@@ -29,11 +31,14 @@ func ConvertToEnvVarName(text string) string {
return buffer.String()
}
func GetSHAfromConfigmap(data map[string]string) string {
func GetSHAfromConfigmap(configmap *v1.ConfigMap) string {
values := []string{}
for k, v := range data {
for k, v := range configmap.Data {
values = append(values, k+"="+v)
}
for k, v := range configmap.BinaryData {
values = append(values, k+"="+base64.StdEncoding.EncodeToString(v))
}
sort.Strings(values)
return crypto.GenerateSHA(strings.Join(values, ";"))
}

View File

@@ -2,6 +2,8 @@ package util
import (
"testing"
v1 "k8s.io/api/core/v1"
)
func TestConvertToEnvVarName(t *testing.T) {
@@ -11,3 +13,35 @@ func TestConvertToEnvVarName(t *testing.T) {
t.Errorf("Failed to convert data into environment variable")
}
}
func TestGetHashFromConfigMap(t *testing.T) {
data := map[*v1.ConfigMap]string{
{
Data: map[string]string{"test": "test"},
}: "Only Data",
{
Data: map[string]string{"test": "test"},
BinaryData: map[string][]byte{"bintest": []byte("test")},
}: "Both Data and BinaryData",
{
BinaryData: map[string][]byte{"bintest": []byte("test")},
}: "Only BinaryData",
}
converted := map[string]string{}
for cm, cmName := range data {
converted[cmName] = GetSHAfromConfigmap(cm)
}
// Test that the has for each configmap is really unique
for cmName, cmHash := range converted {
count := 0
for _, cmHash2 := range converted {
if cmHash == cmHash2 {
count++
}
}
if count > 1 {
t.Errorf("Found duplicate hashes for %v", cmName)
}
}
}

14
okteto.yml Normal file
View File

@@ -0,0 +1,14 @@
name: reloader-reloader
image: okteto/golang:1
command: bash
securityContext:
capabilities:
add:
- SYS_PTRACE
volumes:
- /go/pkg/
- /root/.cache/go-build/
sync:
- .:/app
forward:
- 2345:2345

View File

@@ -78,7 +78,6 @@ func GetKubernetesClient() (*kubernetes.Clientset, error) {
func getConfig() (*rest.Config, error) {
var config *rest.Config
var err error
kubeconfigPath := os.Getenv("KUBECONFIG")
if kubeconfigPath == "" {
kubeconfigPath = os.Getenv("HOME") + "/.kube/config"
@@ -95,9 +94,6 @@ func getConfig() (*rest.Config, error) {
return nil, err
}
}
if err != nil {
return nil, err
}
return config, nil
}