Compare commits

...

95 Commits

Author SHA1 Message Date
stakater-user
11bafa9f36 Bump Version to v0.0.73 2020-10-27 10:13:10 +00:00
Ahmed Waleed Malik
9a45318fc9 Merge pull request #175 from stakater/fix-issue-173
Fix issue 173
2020-10-26 12:50:12 +05:00
faizanahmad055
843f47600a Fix formatting of documenation to support helm3 migration
Signed-off-by: faizanahmad055 <faizan.ahmad55@outlook.com>
2020-10-25 19:28:37 +01:00
faizanahmad055
3d9dee27b5 Fix formatting of documenation to support helm3 migration
Signed-off-by: faizanahmad055 <faizan.ahmad55@outlook.com>
2020-10-25 19:27:58 +01:00
faizanahmad055
63fd3c2635 Add documenation to support helm3 migration
Signed-off-by: faizanahmad055 <faizan.ahmad55@outlook.com>
2020-10-25 19:24:41 +01:00
faizanahmad055
284ca59ca4 Add annotations, labels and documenation to support helm3
Signed-off-by: faizanahmad055 <faizan.ahmad55@outlook.com>
2020-10-25 19:22:05 +01:00
stakater-user
2ce24abe40 Bump Version to v0.0.72 2020-10-20 07:36:45 +00:00
Usama Ahmad
6419444663 Merge pull request #172 from stakater/fix-chart
Fix helm chart template
2020-10-20 12:26:56 +05:00
Waleed Malik
1a6fd3e302 Fix helm chart template 2020-10-20 10:44:59 +05:00
Ahmed Waleed Malik
7ac90b8c88 Merge pull request #170 from stakater/fix-issue-169
Fix#169 - Update Rbac api versions
2020-10-20 09:33:08 +05:00
faizanahmad055
faf27c2d5d Add support for legacy rbac
Signed-off-by: faizanahmad055 <faizan.ahmad55@outlook.com>
2020-10-19 16:00:54 +02:00
faizanahmad055
6a0dfd3ce0 Update Rbac api versions
Signed-off-by: faizanahmad055 <faizan.ahmad55@outlook.com>
2020-10-19 10:32:32 +02:00
stakater-user
fdbc3067ce Bump Version to v0.0.71 2020-10-13 03:56:54 +00:00
Ahmed Waleed Malik
c4ead210ee Merge pull request #168 from booleanbetrayal/namespaces-to-ignore_helm_support
Add Helm chart support for --namespaces-to-ignore flag
2020-10-13 08:47:22 +05:00
Brent Dearth
0441f6d481 Add Helm chart support for --namespaces-to-ignore flag 2020-10-12 15:04:08 -06:00
stakater-user
09b9a073a0 Bump Version to v0.0.70 2020-10-05 10:28:03 +00:00
Ahmed Waleed Malik
d6d188f224 Merge pull request #162 from pchico83/master
Add okteto manifest to develop Reloader directly on Kubernetes
2020-10-05 15:18:27 +05:00
stakater-user
422c291b06 Bump Version to v0.0.69 2020-09-22 13:39:09 +00:00
Júlia Biró
ed6ea026a8 Trim spaces in annotation list (#165)
* strip whitespace

* only trim spaces
2020-09-22 15:29:29 +02:00
Pablo Chico de Guzman
da30b4744b Add okteto manifest to develop Reloader directly on Kubernetes 2020-09-04 11:26:59 +02:00
stakater-user
503e357349 Bump Version to v0.0.68 2020-09-01 05:42:04 +00:00
Josh Soref
61e9202781 Spelling (#161)
* spelling: create-or
2020-09-01 10:32:16 +05:00
stakater-user
8dbe7a85af Bump Version to v0.0.67 2020-08-08 18:46:03 +00:00
Ahmad Iqbal Ali
e86f616305 update slack links in readme (#156) 2020-08-08 20:36:09 +02:00
stakater-user
0c36cfd602 Bump Version to v0.0.66 2020-08-06 18:20:25 +00:00
Faizan Ahmad
f38f86a45c Merge pull request #154 from clive-jevons/respect-configmap-binarydata-for-hash
Include data from ConfigMap.BinaryData when calculating SHA
2020-08-06 20:09:59 +02:00
Faizan Ahmad
5033b8fcdc Merge pull request #155 from kostyrev/master
Add fullnameOverride to helm chart
2020-08-06 20:09:45 +02:00
Aleksandr Kostyrev
be4285742a Add fullnameOverride to helm chart 2020-08-06 16:54:50 +03:00
Clive Jevons
6a008999f5 Include data from ConfigMap.BinaryData when calculating SHA 2020-08-06 13:37:50 +02:00
stakater-user
93f4ea240f Bump Version to v0.0.65 2020-08-04 09:17:57 +00:00
stakater-user
c6fbae2f62 Bump Version to v0.0.64 2020-08-04 08:15:47 +00:00
Ahmed Waleed Malik
3fe0ebb48a Merge pull request #152 from liuming-dev/refactor--code-polish
Polishing code
2020-08-04 13:02:17 +05:00
Ahmed Waleed Malik
67b847bf41 Merge pull request #151 from liuming-dev/style--gofmt
style: gofmt -l -w -s .
2020-08-04 13:01:44 +05:00
Liu Ming
eaa3db48f5 Polish code
Signed-off-by: Liu Ming <hit_oak_tree@126.com>
2020-07-29 10:25:55 +08:00
Liu Ming
a505d2e3b1 style: gofmt -l -w -s .
Signed-off-by: Liu Ming <hit_oak_tree@126.com>
2020-07-28 21:45:56 +08:00
stakater-user
9ec5515a39 Bump Version to v0.0.63 2020-07-20 17:56:16 +00:00
Ahmed Waleed Malik
8db17acf67 Merge pull request #150 from stakater/fix-watch-global
Fix watch global
2020-07-20 22:46:29 +05:00
faizanahmad055
b43719cf34 Remove duplicate condition
Signed-off-by: faizanahmad055 <faizan.ahmad55@outlook.com>
2020-07-20 19:33:38 +02:00
faizanahmad055
e8216069a5 Fix issue for watch global variable 2020-07-20 18:48:58 +02:00
stakater-user
732d35e45f Bump Version to v0.0.62 2020-07-17 09:17:35 +00:00
Ahmed Waleed Malik
dcedaa2cfe Merge pull request #147 from alexconlin/patch-1
Remove empty fields from kustomize deployment
2020-07-17 14:07:01 +05:00
Alex Conlin-Oakley
8d77121c3b only include env and args in container when needed 2020-07-15 17:42:08 +01:00
Alex Conlin
013cd92219 Merge branch 'master' into patch-1 2020-07-10 17:24:33 +01:00
stakater-user
39b5be37af Bump Version to v0.0.61 2020-07-10 15:54:41 +00:00
kahootali
86c2ed265d Add non-root security context 2020-07-10 20:40:48 +05:00
LucasBoisserie
87130f06bc Run as Non Root (#149) 2020-07-10 17:34:49 +02:00
Ali Kahoot
17f702f510 Merge pull request #148 from TBBle/patch-1
Typo fix "resatart"
2020-07-10 20:27:40 +05:00
Paul "TBBle" Hampson
16f3055e10 Typo fix "resatart" 2020-07-10 11:34:47 +10:00
Alex Conlin
4800af8e28 Remove empty fields from deployment manifest 2020-07-08 11:22:43 +01:00
Alex Conlin
db79c65334 Remove empty fields from kustomize deployment
Fixes #115
2020-07-07 22:52:53 +01:00
stakater-user
d2223f313f Bump Version to v0.0.60 2020-06-23 07:15:27 +00:00
Ahmed Waleed Malik
c9dabc3a14 Merge pull request #142 from vladlosev/feat-track-secrets-configmaps-by-annotation
Adds support for auto-reloading secrets and configmaps by annotation.
2020-06-23 12:05:22 +05:00
Vlad Losev
e61f9a6bdb Adds note on incompatibility with 'reloader.stakater.com/auto'. 2020-06-21 14:15:58 -07:00
Vlad Losev
6bcec06052 Fixes missing import. 2020-06-09 19:45:12 -07:00
Vlad Losev
0988e8947f Removes unused import. 2020-06-09 19:18:12 -07:00
Vlad Losev
ff27cc0f51 Simplifies test. 2020-06-09 19:17:19 -07:00
Vlad Losev
be7d454504 Fixes test using auto annotations. 2020-06-09 19:16:04 -07:00
Vlad Losev
3131116ed6 Re-adds sleep statements. 2020-06-09 19:16:04 -07:00
Vlad Losev
965cacf1ba Updates documentation. 2020-06-09 19:16:04 -07:00
Vlad Losev
e81b49d81b Renames search annotation. 2020-06-09 19:16:03 -07:00
Vlad Losev
17f8b81110 Simplifies annotations for searching secrets for reload. 2020-06-09 19:16:03 -07:00
Vlad Losev
5980c91560 Abstracts out configmap and deployment creation. 2020-06-09 19:16:01 -07:00
Vlad Losev
fda733ea5a Adds support for auto-reloading secrets and configmaps by annotation. 2020-06-09 19:14:14 -07:00
stakater-user
732cd5b53a Bump Version to v0.0.59 2020-06-09 16:50:55 +00:00
stakater-user
aae0c5c443 Merge pull request #141 from cucxabong/support-projected-volume
Support projected volume
2020-06-09 20:16:24 +05:00
Quan Hoang
d4223311de Support projected volume 2020-06-02 23:36:08 +07:00
stakater-user
29173c7364 Bump Version to v0.0.58 2020-03-30 15:12:34 +00:00
inductor
7767809a38 Refactor Dockerfile (#133)
* refactor dockerfile

* update go version on Dockerfile
2020-03-30 16:52:33 +02:00
stakater-user
f0d6a9e646 Bump Version to v0.0.57 2020-03-18 09:26:04 +00:00
Usama Ahmad
d1538dbeec Merge pull request #131 from stakater/fix-pipeline
Update pipeline library and pipeline tools version
2020-03-18 14:15:55 +05:00
Waleed Malik
8470962383 Update pipeline library and pipeline tools version 2020-03-18 14:02:05 +05:00
Ali Kahoot
139bc1ca38 Merge pull request #128 from katainaka0503/add-prometheus-endpoint
Implement prometheus endpoint
2020-03-17 20:43:03 +05:00
kahootali
6d9f89a452 add service manifest 2020-03-17 15:27:47 +05:00
Ali Kahoot
5ba914d6bb Fix documentation 2020-03-17 15:10:42 +05:00
katainaka0503
9a5094a4ed Add document of prometheus endpoint 2020-03-01 21:56:05 +09:00
katainaka0503
3fe7ad04e9 Implement prometheus endpoint 2020-03-01 21:56:02 +09:00
stakater-user
bf6a247f54 Bump Version to v0.0.56 2020-03-01 12:47:59 +00:00
stakater-user
8203cc3c11 Bump Version to v0.0.55 2020-03-01 12:37:09 +00:00
stakater-user
f287d84b6a Bump Version to v0.0.54 2020-03-01 12:26:27 +00:00
katainaka
205d36512c Remove redundant namePrefix from kustomization.yaml sample (#126) 2020-03-01 13:16:35 +01:00
stakater-user
0b39353c12 Bump Version to v0.0.53 2020-02-28 12:53:28 +00:00
Ali Kahoot
97a5616e60 Merge pull request #125 from stakater/update-library-version
update library version and toolsImage
2020-02-28 17:43:19 +05:00
usamaahmadkhan
b274ac0947 update library version and toolsImage 2020-02-28 17:28:26 +05:00
stakater-user
ed29d1d18c Bump Version to v0.0.52 2020-02-14 06:45:57 +00:00
aure-olli
2384d65953 feat: allow annotations in the pod template too (#123)
Several public charts only allow to edit the annotations of the pod template, not the deployment. Annotations will also be checked in the pod template if not present in the deployment.

fix: #122

Signed-off-by: Aurélien Lambert <aure@olli-ai.com>
2020-02-14 07:32:31 +01:00
stakater-user
7b19601423 Bump Version to v0.0.51 2020-01-13 13:33:00 +00:00
Usama Ahmad
76bf43cb13 Merge pull request #121 from elblivion/json-logging
JSON logging
2020-01-13 18:22:46 +05:00
kahootali
1b7bb3bead add support for logFormat argument in deployment 2020-01-13 17:10:08 +05:00
kahootali
c844f12f73 add logFormat default value 2020-01-13 17:09:53 +05:00
kahootali
5ac2164a1c add logFormat parameter in Readme 2020-01-13 17:09:25 +05:00
Anthony Stanton
c9b89c37c1 Update README 2020-01-03 13:44:01 +01:00
Anthony Stanton
55bc4c3e22 JSON logging 2020-01-02 22:10:41 +01:00
stakater-user
77c7d63296 Bump Version to v0.0.50 2019-12-31 05:49:32 +00:00
Irtiza Ali
2ae4753efb Merge pull request #119 from rjshrjndrn/patch-1
Include annotations in templating.
2019-12-31 10:39:13 +05:00
Rajesh Rajendran
68d0349793 Include annotations in templating.
for example, If I have istio enabled in a namespace, and I want to deploy reloader in that, It doesn't make sense to include envoy there. So there should be an option to add `sidecar.istio.io/inject: "false"` annotation.
2019-12-31 10:52:19 +05:30
46 changed files with 1516 additions and 208 deletions

View File

@@ -1 +1 @@
version: v0.0.49
version: v0.0.73

3
.stignore Normal file
View File

@@ -0,0 +1,3 @@
.git
Reloader
__debug_bin

3
Jenkinsfile vendored
View File

@@ -1,7 +1,8 @@
#!/usr/bin/groovy
@Library('github.com/stakater/stakater-pipeline-library@v2.16.10') _
@Library('github.com/stakater/stakater-pipeline-library@v2.16.24') _
goBuildViaGoReleaser {
publicChartRepositoryURL = 'https://stakater.github.io/stakater-charts'
publicChartGitURL = 'git@github.com:stakater/stakater-charts.git'
toolsImage = 'stakater/pipeline-tools:v2.0.18'
}

View File

@@ -22,6 +22,7 @@ Reloader can watch changes in `ConfigMap` and `Secret` and do rolling upgrades o
## Compatibility
Reloader is compatible with kubernetes >= 1.9
The `apiVersion: rbac.authorization.k8s.io/v1beta1` is depreciated since kubernetes = 1.17. To run it with older versions, please use the chart parameter `reloader.legacy.rbac=true`
## How to use Reloader
@@ -38,6 +39,41 @@ spec:
This will discover deployments/daemonsets/statefulset automatically where `foo-configmap` or `foo-secret` is being used either via environment variable or from volume mount. And it will perform rolling upgrade on related pods when `foo-configmap` or `foo-secret`are updated.
You can restrict this discovery to only `ConfigMap` or `Secret` objects that
are tagged with a special annotation. To take advantage of that, annotate
your deployment/daemonset/statefulset like this:
```yaml
kind: Deployment
metadata:
annotations:
reloader.stakater.com/search: "true"
spec:
template:
```
and Reloader will trigger the rolling upgrade upon modification of any
`ConfigMap` or `Secret` annotated like this:
```yaml
kind: ConfigMap
metadata:
annotations:
reloader.stakater.com/match: "true"
data:
key: value
```
provided the secret/configmap is being used in an environment variable or a
volume mount.
Please note that `reloader.stakater.com/search` and
`reloader.stakater.com/auto` do not work together. If you have the
`reloader.stakater.com/auto: "true"` annotation on your deployment, then it
will always restart upon a change in configmaps or secrets it uses, regardless
of whether they have the `reloader.stakater.com/match: "true"` annotation or
not.
We can also specify a specific configmap or secret which would trigger rolling upgrade only upon change in our specified configmap or secret, this way, it will not trigger rolling upgrade upon changes in all configmaps or secrets used in a deployment, daemonset or statefulset.
To do this either set the auto annotation to `"false"` (`reloader.stakater.com/auto: "false"`) or remove it altogether, and use annotations mentioned [here](#Configmap) or [here](#Secret)
@@ -99,10 +135,13 @@ spec:
- `reloader.stakater.com/auto: "true"` will only reload the pod, if the configmap or secret is used (as a volume mount or as an env) in `DeploymentConfigs/Deployment/Daemonsets/Statefulsets`
- `secret.reloader.stakater.com/reload` or `configmap.reloader.stakater.com/reload` annotation will reload the pod upon changes in specified configmap or secret, irrespective of the usage of configmap or secret.
- you may override the auto annotation with the `--auto-annotation` flag
- you may override the search annotation with the `--auto-search-annotation` flag
and the match annotation with the `--search-match-annotation` flag
- you may override the configmap annotation with the `--configmap-annotation` flag
- you may override the secret annotation with the `--secret-annotation` flag
- you may want to prevent watching certain namespaces with the `--namespaces-to-ignore` flag
- you may want to prevent watching certain resources with the `--resources-to-ignore` flag
- you can configure logging in JSON format with the `--log-format=json` option
## Deploying to Kubernetes
@@ -145,8 +184,6 @@ You can write your own `kustomization.yaml` using ours as a 'base' and write pat
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namePrefix: reloader-
bases:
- https://github.com/stakater/Reloader/deployments/kubernetes
@@ -155,20 +192,26 @@ namespace: reloader
### Helm Charts
Alternatively if you have configured helm on your cluster, you can add reloader to helm from our public chart repository and deploy it via helm using below mentioned commands
Alternatively if you have configured helm on your cluster, you can add reloader to helm from our public chart repository and deploy it via helm using below mentioned commands. Follow [this](docs/Helm2-to-Helm3.md) guide, in case you have trouble migrating reloader from Helm2 to Helm3
```bash
helm repo add stakater https://stakater.github.io/stakater-charts
helm repo update
helm install stakater/reloader
helm install stakater/reloader # For helm3 add --generate-name flag or set the release name
```
**Note:** The latest verion of reloader is using `apiVersion: rbac.authorization.k8s.io/v1` for rbac. The `apiVersion: rbac.authorization.k8s.io/v1beta1` is depreciated since kubernetes = 1.17. To run it with older versions, please use below command.
```bash
helm install stakater/reloader --set reloader.legacy.rbac=true # For helm3 add --generate-name flag or set the release name
```
**Note:** By default reloader watches in all namespaces. To watch in single namespace, please run following command. It will install reloader in `test` namespace which will only watch `Deployments`, `Daemonsets` and `Statefulsets` in `test` namespace.
```bash
helm install stakater/reloader --set reloader.watchGlobally=false --namespace test
helm install stakater/reloader --set reloader.watchGlobally=false --namespace test # For helm3 add --generate-name flag or set the release name
```
Reloader can be configured to ignore the resources `secrets` and `configmaps` by using the following parameters of `values.yaml` file:
@@ -180,6 +223,8 @@ Reloader can be configured to ignore the resources `secrets` and `configmaps` by
`Note`: At one time only one of these resource can be ignored, trying to do it will cause error in helm template compilation.
You can also set the log format of Reloader to json by setting `logFormat` to `json` in values.yaml and apply the chart
## Help
### Documentation
@@ -194,8 +239,8 @@ File a GitHub [issue](https://github.com/stakater/Reloader/issues), or send us a
Join and talk to us on Slack for discussing Reloader
[![Join Slack](https://stakater.github.io/README/stakater-join-slack-btn.png)](https://stakater-slack.herokuapp.com/)
[![Chat](https://stakater.github.io/README/stakater-chat-btn.png)](https://stakater.slack.com/messages/CC5S05S12)
[![Join Slack](https://stakater.github.io/README/stakater-join-slack-btn.png)](https://slack.stakater.com/)
[![Chat](https://stakater.github.io/README/stakater-chat-btn.png)](https://stakater-community.slack.com/messages/CC5S05S12)
## Contributing
@@ -205,6 +250,11 @@ Please use the [issue tracker](https://github.com/stakater/Reloader/issues) to r
### Developing
1. Deploy Reloader.
2. Run `okteto up` to activate your development container.
3. `make build`.
4. `./Reloader`
PRs are welcome. In general, we follow the "fork-and-pull" Git workflow.
1. **Fork** the repo on GitHub

View File

@@ -1,18 +1,21 @@
FROM golang:1.13.1-alpine
MAINTAINER "Stakater Team"
RUN apk update
FROM golang:1.13.9-alpine
LABEL maintainer "Stakater Team"
RUN apk -v --update \
add git build-base && \
rm -rf /var/cache/apk/* && \
mkdir -p "$GOPATH/src/github.com/stakater/Reloader"
--no-cache \
add git build-base
ADD . "$GOPATH/src/github.com/stakater/Reloader"
WORKDIR "$GOPATH/src/github.com/stakater/Reloader"
RUN cd "$GOPATH/src/github.com/stakater/Reloader" && \
go mod download && \
CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -a --installsuffix cgo --ldflags="-s" -o /Reloader
COPY go.mod go.sum ./
RUN go mod download
COPY . .
ENV CGO_ENABLED=0 GOOS=linux GOARCH=amd64
RUN go build -a --installsuffix cgo --ldflags="-s" -o /Reloader
COPY build/package/Dockerfile.run /

View File

@@ -1,8 +1,11 @@
FROM alpine:3.9
MAINTAINER "Stakater Team"
FROM alpine:3.11
LABEL maintainer "Stakater Team"
RUN apk add --update ca-certificates
RUN apk add --update --no-cache ca-certificates
COPY Reloader /bin/Reloader
# On alpine 'nobody' has uid 65534
USER 65534
ENTRYPOINT ["/bin/Reloader"]

View File

@@ -3,8 +3,8 @@
apiVersion: v1
name: reloader
description: Reloader chart that runs on kubernetes
version: v0.0.49
appVersion: v0.0.49
version: v0.0.73
appVersion: v0.0.73
keywords:
- Reloader
- kubernetes

View File

@@ -12,15 +12,20 @@ Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
*/}}
{{- define "reloader-fullname" -}}
{{- if .Values.fullnameOverride -}}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- $name := default .Chart.Name .Values.nameOverride -}}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- end -}}
{{- define "reloader-labels.chart" -}}
app: {{ template "reloader-fullname" . }}
chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
release: {{ .Release.Name | quote }}
heritage: {{ .Release.Service | quote }}
app.kubernetes.io/managed-by: {{ .Release.Service | quote }}
{{- end -}}
{{/*
@@ -33,3 +38,11 @@ Create the name of the service account to use
{{ default "default" .Values.reloader.serviceAccount.name }}
{{- end -}}
{{- end -}}
{{/*
Create the annotations to support helm3
*/}}
{{- define "reloader-helm3.annotations" -}}
meta.helm.sh/release-namespace: {{ .Release.Namespace | quote }}
meta.helm.sh/release-name: {{ .Release.Name | quote }}
{{- end -}}

View File

@@ -1,7 +1,13 @@
{{- if and .Values.reloader.watchGlobally (.Values.reloader.rbac.enabled) }}
{{- if and .Values.reloader.legacy.rbac }}
apiVersion: rbac.authorization.k8s.io/v1beta1
{{ else }}
apiVersion: rbac.authorization.k8s.io/v1
{{- end }}
kind: ClusterRole
metadata:
annotations:
{{ include "reloader-helm3.annotations" . | indent 4 }}
labels:
{{ include "reloader-labels.chart" . | indent 4 }}
{{- if .Values.reloader.rbac.labels }}

View File

@@ -1,7 +1,13 @@
{{- if and .Values.reloader.watchGlobally (.Values.reloader.rbac.enabled) }}
{{- if and .Values.reloader.legacy.rbac }}
apiVersion: rbac.authorization.k8s.io/v1beta1
{{ else }}
apiVersion: rbac.authorization.k8s.io/v1
{{- end }}
kind: ClusterRoleBinding
metadata:
annotations:
{{ include "reloader-helm3.annotations" . | indent 4 }}
labels:
{{ include "reloader-labels.chart" . | indent 4 }}
{{- if .Values.reloader.rbac.labels }}

View File

@@ -1,8 +1,9 @@
apiVersion: apps/v1
kind: Deployment
metadata:
{{- if .Values.reloader.deployment.annotations }}
annotations:
{{ include "reloader-helm3.annotations" . | indent 4 }}
{{- if .Values.reloader.deployment.annotations }}
{{ toYaml .Values.reloader.deployment.annotations | indent 4 }}
{{- end }}
labels:
@@ -26,6 +27,10 @@ spec:
{{- end }}
template:
metadata:
{{- if .Values.reloader.deployment.pod.annotations }}
annotations:
{{ toYaml .Values.reloader.deployment.pod.annotations | indent 8 }}
{{- end }}
labels:
{{ include "reloader-labels.chart" . | indent 8 }}
{{- if .Values.reloader.deployment.labels }}
@@ -48,7 +53,11 @@ spec:
{{ toYaml .Values.reloader.deployment.tolerations | indent 8 }}
{{- end }}
containers:
- env:
- image: "{{ .Values.reloader.deployment.image.name }}:{{ .Values.reloader.deployment.image.tag }}"
imagePullPolicy: {{ .Values.reloader.deployment.image.pullPolicy }}
name: {{ template "reloader-fullname" . }}
{{- if or (.Values.reloader.deployment.env.open) (.Values.reloader.deployment.env.secret) (.Values.reloader.deployment.env.field) (eq .Values.reloader.watchGlobally false) }}
env:
{{- range $name, $value := .Values.reloader.deployment.env.open }}
{{- if not (empty $value) }}
- name: {{ $name | quote }}
@@ -79,21 +88,26 @@ spec:
fieldRef:
fieldPath: metadata.namespace
{{- end }}
image: "{{ .Values.reloader.deployment.image.name }}:{{ .Values.reloader.deployment.image.tag }}"
imagePullPolicy: {{ .Values.reloader.deployment.image.pullPolicy }}
name: {{ template "reloader-fullname" . }}
{{- end }}
{{- if eq .Values.reloader.readOnlyRootFileSystem true }}
volumeMounts:
- mountPath: /tmp/
name: tmp-volume
{{- end }}
{{- if or (.Values.reloader.logFormat) (.Values.reloader.ignoreSecrets) (.Values.reloader.ignoreNamespaces) (.Values.reloader.ignoreConfigMaps) (.Values.reloader.custom_annotations) }}
args:
{{- if .Values.reloader.logFormat }}
- "--log-format={{ .Values.reloader.logFormat }}"
{{- end }}
{{- if .Values.reloader.ignoreSecrets }}
- "--resources-to-ignore=secrets"
{{- end }}
{{- if eq .Values.reloader.ignoreConfigMaps true }}
{{- if .Values.reloader.ignoreConfigMaps }}
- "--resources-to-ignore=configMaps"
{{- end }}
{{- if .Values.reloader.ignoreNamespaces }}
- "--namespaces-to-ignore={{ .Values.reloader.ignoreNamespaces }}"
{{- end }}
{{- if .Values.reloader.custom_annotations }}
{{- if .Values.reloader.custom_annotations.configmap }}
@@ -109,7 +123,7 @@ spec:
- "{{ .Values.reloader.custom_annotations.auto }}"
{{- end }}
{{- end }}
{{- end }}
{{- if .Values.reloader.deployment.resources }}
resources:
{{ toYaml .Values.reloader.deployment.resources | indent 10 }}

View File

@@ -1,7 +1,13 @@
{{- if and (not (.Values.reloader.watchGlobally)) (.Values.reloader.rbac.enabled) }}
{{- if and .Values.reloader.legacy.rbac }}
apiVersion: rbac.authorization.k8s.io/v1beta1
{{ else }}
apiVersion: rbac.authorization.k8s.io/v1
{{- end }}
kind: Role
metadata:
annotations:
{{ include "reloader-helm3.annotations" . | indent 4 }}
labels:
{{ include "reloader-labels.chart" . | indent 4 }}
{{- if .Values.reloader.rbac.labels }}

View File

@@ -1,7 +1,13 @@
{{- if and (not (.Values.reloader.watchGlobally)) (.Values.reloader.rbac.enabled) }}
{{- if and .Values.reloader.legacy.rbac }}
apiVersion: rbac.authorization.k8s.io/v1beta1
{{ else }}
apiVersion: rbac.authorization.k8s.io/v1
{{- end }}
kind: RoleBinding
metadata:
annotations:
{{ include "reloader-helm3.annotations" . | indent 4 }}
labels:
{{ include "reloader-labels.chart" . | indent 4 }}
{{- if .Values.reloader.rbac.labels }}

View File

@@ -0,0 +1,26 @@
{{- if .Values.reloader.service }}
apiVersion: v1
kind: Service
metadata:
annotations:
{{ include "reloader-helm3.annotations" . | indent 4 }}
{{- if .Values.reloader.service.annotations }}
{{ toYaml .Values.reloader.service.annotations | indent 4 }}
{{- end }}
labels:
{{ include "reloader-labels.chart" . | indent 4 }}
{{- if .Values.reloader.service.labels }}
{{ toYaml .Values.reloader.service.labels | indent 4 }}
{{- end }}
name: {{ template "reloader-fullname" . }}
spec:
selector:
{{- if .Values.reloader.deployment.labels }}
{{ toYaml .Values.reloader.deployment.labels | indent 4 }}
{{- end }}
{{- if .Values.reloader.matchLabels }}
{{ toYaml .Values.reloader.matchLabels | indent 4 }}
{{- end }}
ports:
{{ toYaml .Values.reloader.service.ports | indent 4 }}
{{- end }}

View File

@@ -5,6 +5,8 @@ kind: ServiceAccount
imagePullSecrets: {{ toYaml .Values.global.imagePullSecrets | nindent 2 }}
{{- end }}
metadata:
annotations:
{{ include "reloader-helm3.annotations" . | indent 4 }}
labels:
{{ include "reloader-labels.chart" . | indent 4 }}
{{- if .Values.reloader.serviceAccount.labels }}

View File

@@ -12,9 +12,13 @@ reloader:
isOpenshift: false
ignoreSecrets: false
ignoreConfigMaps: false
ignoreNamespaces: "" # Comma separated list of namespaces to ignore
logFormat: "" #json
watchGlobally: true
# Set to true if you have a pod security policy that enforces readOnlyRootFilesystem
readOnlyRootFileSystem: false
legacy:
rbac: false
matchLabels: {}
deployment:
nodeSelector:
@@ -31,6 +35,10 @@ reloader:
# operator: "Exists"
affinity: {}
securityContext:
runAsNonRoot: true
runAsUser: 65534
# A list of tolerations to be applied to the Deployment.
# Example:
# tolerations:
@@ -43,10 +51,10 @@ reloader:
labels:
provider: stakater
group: com.stakater.platform
version: v0.0.49
version: v0.0.73
image:
name: stakater/reloader
tag: "v0.0.49"
tag: "v0.0.73"
pullPolicy: IfNotPresent
# Support for extra environment variables.
env:
@@ -67,6 +75,17 @@ reloader:
# cpu: "10m"
# memory: "128Mi"
resources: {}
pod:
annotations: {}
service: {}
# labels: {}
# annotations: {}
# ports:
# - port: 9090
# name: http
# protocol: TCP
# targetPort: 9090
rbac:
enabled: true

View File

@@ -1,14 +1,18 @@
---
# Source: reloader/templates/clusterrole.yaml
apiVersion: rbac.authorization.k8s.io/v1beta1
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
annotations:
meta.helm.sh/release-namespace: "default"
meta.helm.sh/release-name: "reloader"
labels:
app: reloader-reloader
chart: "reloader-v0.0.49"
chart: "reloader-v0.0.73"
release: "reloader"
heritage: "Tiller"
app.kubernetes.io/managed-by: "Tiller"
name: reloader-reloader-role
namespace: default
rules:

View File

@@ -1,14 +1,18 @@
---
# Source: reloader/templates/clusterrolebinding.yaml
apiVersion: rbac.authorization.k8s.io/v1beta1
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
annotations:
meta.helm.sh/release-namespace: "default"
meta.helm.sh/release-name: "reloader"
labels:
app: reloader-reloader
chart: "reloader-v0.0.49"
chart: "reloader-v0.0.73"
release: "reloader"
heritage: "Tiller"
app.kubernetes.io/managed-by: "Tiller"
name: reloader-reloader-role-binding
namespace: default
roleRef:

View File

@@ -3,14 +3,18 @@
apiVersion: apps/v1
kind: Deployment
metadata:
annotations:
meta.helm.sh/release-namespace: "default"
meta.helm.sh/release-name: "reloader"
labels:
app: reloader-reloader
chart: "reloader-v0.0.49"
chart: "reloader-v0.0.73"
release: "reloader"
heritage: "Tiller"
app.kubernetes.io/managed-by: "Tiller"
group: com.stakater.platform
provider: stakater
version: v0.0.49
version: v0.0.73
name: reloader-reloader
spec:
@@ -24,19 +28,22 @@ spec:
metadata:
labels:
app: reloader-reloader
chart: "reloader-v0.0.49"
chart: "reloader-v0.0.73"
release: "reloader"
heritage: "Tiller"
app.kubernetes.io/managed-by: "Tiller"
group: com.stakater.platform
provider: stakater
version: v0.0.49
version: v0.0.73
spec:
containers:
- env:
image: "stakater/reloader:v0.0.49"
- image: "stakater/reloader:v0.0.73"
imagePullPolicy: IfNotPresent
name: reloader-reloader
args:
securityContext:
runAsNonRoot: true
runAsUser: 65534
serviceAccountName: reloader-reloader

View File

@@ -0,0 +1,3 @@
---
# Source: reloader/templates/service.yaml

View File

@@ -4,10 +4,14 @@
apiVersion: v1
kind: ServiceAccount
metadata:
annotations:
meta.helm.sh/release-namespace: "default"
meta.helm.sh/release-name: "reloader"
labels:
app: reloader-reloader
chart: "reloader-v0.0.49"
chart: "reloader-v0.0.73"
release: "reloader"
heritage: "Tiller"
app.kubernetes.io/managed-by: "Tiller"
name: reloader-reloader

View File

@@ -1,60 +1,18 @@
---
# Source: reloader/templates/role.yaml
---
# Source: reloader/templates/deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: reloader-reloader
chart: "reloader-v0.0.49"
release: "reloader"
heritage: "Tiller"
group: com.stakater.platform
provider: stakater
version: v0.0.49
name: reloader-reloader
spec:
replicas: 1
revisionHistoryLimit: 2
selector:
matchLabels:
app: reloader-reloader
release: "reloader"
template:
metadata:
labels:
app: reloader-reloader
chart: "reloader-v0.0.49"
release: "reloader"
heritage: "Tiller"
group: com.stakater.platform
provider: stakater
version: v0.0.49
spec:
containers:
- env:
image: "stakater/reloader:v0.0.49"
imagePullPolicy: IfNotPresent
name: reloader-reloader
args:
serviceAccountName: reloader-reloader
---
# Source: reloader/templates/clusterrole.yaml
apiVersion: rbac.authorization.k8s.io/v1beta1
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
annotations:
meta.helm.sh/release-namespace: "default"
meta.helm.sh/release-name: "reloader"
labels:
app: reloader-reloader
chart: "reloader-v0.0.49"
chart: "reloader-v0.0.73"
release: "reloader"
heritage: "Tiller"
app.kubernetes.io/managed-by: "Tiller"
name: reloader-reloader-role
namespace: default
rules:
@@ -89,21 +47,21 @@ rules:
- update
- patch
---
# Source: reloader/templates/rolebinding.yaml
---
# Source: reloader/templates/clusterrolebinding.yaml
apiVersion: rbac.authorization.k8s.io/v1beta1
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
annotations:
meta.helm.sh/release-namespace: "default"
meta.helm.sh/release-name: "reloader"
labels:
app: reloader-reloader
chart: "reloader-v0.0.49"
chart: "reloader-v0.0.73"
release: "reloader"
heritage: "Tiller"
app.kubernetes.io/managed-by: "Tiller"
name: reloader-reloader-role-binding
namespace: default
roleRef:
@@ -115,16 +73,80 @@ subjects:
name: reloader-reloader
namespace: default
---
# Source: reloader/templates/deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
annotations:
meta.helm.sh/release-namespace: "default"
meta.helm.sh/release-name: "reloader"
labels:
app: reloader-reloader
chart: "reloader-v0.0.73"
release: "reloader"
heritage: "Tiller"
app.kubernetes.io/managed-by: "Tiller"
group: com.stakater.platform
provider: stakater
version: v0.0.73
name: reloader-reloader
spec:
replicas: 1
revisionHistoryLimit: 2
selector:
matchLabels:
app: reloader-reloader
release: "reloader"
template:
metadata:
labels:
app: reloader-reloader
chart: "reloader-v0.0.73"
release: "reloader"
heritage: "Tiller"
app.kubernetes.io/managed-by: "Tiller"
group: com.stakater.platform
provider: stakater
version: v0.0.73
spec:
containers:
- image: "stakater/reloader:v0.0.73"
imagePullPolicy: IfNotPresent
name: reloader-reloader
securityContext:
runAsNonRoot: true
runAsUser: 65534
serviceAccountName: reloader-reloader
---
# Source: reloader/templates/role.yaml
---
# Source: reloader/templates/rolebinding.yaml
---
# Source: reloader/templates/service.yaml
---
# Source: reloader/templates/serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
annotations:
meta.helm.sh/release-namespace: "default"
meta.helm.sh/release-name: "reloader"
labels:
app: reloader-reloader
chart: "reloader-v0.0.49"
chart: "reloader-v0.0.73"
release: "reloader"
heritage: "Tiller"
app.kubernetes.io/managed-by: "Tiller"
name: reloader-reloader

View File

@@ -12,9 +12,13 @@ reloader:
isOpenshift: false
ignoreSecrets: false
ignoreConfigMaps: false
ignoreNamespaces: "" # Comma separated list of namespaces to ignore
logFormat: "" #json
watchGlobally: true
# Set to true if you have a pod security policy that enforces readOnlyRootFilesystem
readOnlyRootFileSystem: false
legacy:
rbac: false
matchLabels: {}
deployment:
nodeSelector:
@@ -31,6 +35,10 @@ reloader:
# operator: "Exists"
affinity: {}
securityContext:
runAsNonRoot: true
runAsUser: 65534
# A list of tolerations to be applied to the Deployment.
# Example:
# tolerations:
@@ -67,6 +75,17 @@ reloader:
# cpu: "10m"
# memory: "128Mi"
resources: {}
pod:
annotations: {}
service: {}
# labels: {}
# annotations: {}
# ports:
# - port: 9090
# name: http
# protocol: TCP
# targetPort: 9090
rbac:
enabled: true

62
docs/Helm2-to-Helm3.md Normal file
View File

@@ -0,0 +1,62 @@
# Helm2 to Helm3 Migration
Follow below mentioned instructions to migrate reloader from Helm2 to Helm3
## Instrcutions:
There are 3 steps involved in migrating the reloader from Helm2 to Helm3.
### Step 1:
Install the helm-2to3 plugin
```bash
helm3 plugin install https://github.com/helm/helm-2to3
helm3 2to3 convert <release-name>
helm3 2to3 cleanup --release-cleanup --skip-confirmation
```
### Step 2:
Add the following Helm3 labels and annotations on reloader resources.
Label:
```yaml
app.kubernetes.io/managed-by=Helm
```
Annotations:
```yaml
meta.helm.sh/release-name=<release-name>
meta.helm.sh/release-namespace=<namespace>
```
For example, to label and annotate the ClusterRoleBinding and ClusterRole:
```bash
KIND=ClusterRoleBinding
NAME=reloader-reloader-role-binding
RELEASE=reloader
NAMESPACE=kube-system
kubectl annotate $KIND $NAME meta.helm.sh/release-name=$RELEASE
kubectl annotate $KIND $NAME meta.helm.sh/release-namespace=$NAMESPACE
kubectl label $KIND $NAME app.kubernetes.io/managed-by=Helm
KIND=ClusterRole
NAME=reloader-reloader-role
RELEASE=reloader
NAMESPACE=kube-system
kubectl annotate $KIND $NAME meta.helm.sh/release-name=$RELEASE
kubectl annotate $KIND $NAME meta.helm.sh/release-namespace=$NAMESPACE
kubectl label $KIND $NAME app.kubernetes.io/managed-by=Helm
```
### Step 3:
Upgrade to desired version
```bash
helm3 repo add stakater https://stakater.github.io/stakater-charts
helm3 repo update
helm3 upgrade <release-name> stakater/reloader --version=v0.0.72
```

View File

@@ -8,5 +8,5 @@ Reloader is inspired from [Configmapcontroller](https://github.com/fabric8io/con
| Reloader can watch both `secrets` and `configmaps`. | ConfigmapController can only watch changes in `configmaps`. It cannot detect changes in other resources like `secrets`. |
| Reloader can perform rolling upgrades on `deployments` as well as on `statefulsets` and `daemonsets` | ConfigmapController can only perform rolling upgrades on `deployments`. It currently does not support rolling upgrades on `statefulsets` and `daemonsets` |
| Reloader provides both unit test cases and end to end integration test cases for future updates. So one can make sure that new changes do not break any old functionality. | Currently there are not any unit test cases or end to end integration test cases in configmap controller. It add difficulties for any additional updates in configmap controller and one can not know for sure whether new changes breaks any old functionality or not. |
| Reloader uses SHA1 to encode the change in configmap or secret. It then saves the SHA1 value in `STAKATER_FOO_CONFIGMAP` or `STAKATER_FOO_SECRET` environment variable depending upon where the change has happened. The use of SHA1 provides a concise 40 characters encoded value that is very less pron to collision. | Configmap controller uses `FABRICB_FOO_REVISION` environment variable to store any change in configmap controller. It does not encode it or convert it in suitable hash value to avoid data pollution in deployment. |
| Reloader allows you to customize your own annotation (for both Secrets and Configmaps) using command line flags | Configmap controller restricts you to only their provided annotation |
| Reloader uses SHA1 to encode the change in configmap or secret. It then saves the SHA1 value in `STAKATER_FOO_CONFIGMAP` or `STAKATER_FOO_SECRET` environment variable depending upon where the change has happened. The use of SHA1 provides a concise 40 characters encoded value that is very less prone to collision. | Configmap controller uses `FABRICB_FOO_REVISION` environment variable to store any change in configmap controller. It does not encode it or convert it in suitable hash value to avoid data pollution in deployment. |
| Reloader allows you to customize your own annotation (for both Secrets and Configmaps) using command line flags | Configmap controller restricts you to only their provided annotation |

View File

@@ -8,4 +8,4 @@ Below are the steps to use reloader with Sealed Secrets.
8. Install Reloader.
9. Once everything is setup, update the original secret at client and encrypt it with kubeseal to see reloader working.
10. Apply the updated sealed secret.
11. Reloader will resatart the pod to use that updated secret.
11. Reloader will restart the pod to use that updated secret.

View File

@@ -1,6 +1,6 @@
# Verify Reloader's Working
Reloader's working can be verified by two ways.
Reloader's working can be verified by three ways.
## Verify from logs
@@ -49,3 +49,13 @@ After a change in `secret` or `configmap`. Run the below mentioned command and v
```bash
kubectl get pods <pod name> -n <namespace name>
```
## Verify from metrics
Some metrics are exported to prometheus endpoint `/metrics` on port `9090`.
When reloader is unable to reload, `reloader_reload_executed_total{success="false"}` metric gets incremented and when it reloads successfully, `reloader_reload_executed_total{success="true"}` gets incremented. You will be able to see the following metrics, with some other metrics, at `/metrics` endpoint.
```
reloader_reload_executed_total{success="false"} 15
reloader_reload_executed_total{success="true"} 12
```

View File

@@ -2,6 +2,6 @@
These are the key features of Reloader:
1. Restart pod in a depoloyment on change in linked/related configmap's or secret's
1. Restart pod in a deployment on change in linked/related configmap's or secret's
2. Restart pod in a daemonset on change in linked/related configmap's or secret's
3. Restart pod in a statefulset on change in linked/related configmap's or secret's

5
go.mod
View File

@@ -9,11 +9,10 @@ require (
github.com/onsi/gomega v1.7.0 // indirect
github.com/openshift/api v3.9.1-0.20190923092516-169848dd8137+incompatible
github.com/openshift/client-go v0.0.0-20190923092832-6afefc9bb372
github.com/sirupsen/logrus v1.0.5
github.com/prometheus/client_golang v1.4.1
github.com/sirupsen/logrus v1.4.2
github.com/spf13/cobra v0.0.0-20160722081547-f62e98d28ab7
github.com/stretchr/testify v1.4.0 // indirect
gopkg.in/airbrake/gobrake.v2 v2.0.9 // indirect
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 // indirect
gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2 // indirect
k8s.io/api v0.0.0-20190918155943-95b840bb6a1f
k8s.io/apimachinery v0.0.0-20191004115801-a2eda9f80ab8

66
go.sum
View File

@@ -12,6 +12,16 @@ github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03
github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ=
github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY=
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/davecgh/go-spew v0.0.0-20151105211317-5215b55f46b2/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
@@ -27,11 +37,17 @@ github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680 h1:ZktWZesgun21uEDrwW7iEV1zPCGQldM2atlJZ3TdvVM=
github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas=
github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0=
github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg=
github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc=
github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d h1:3PaI8p3seN09VjbTYC/QWlUZdZ1qS1zGjy7LH2Wt07I=
github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58=
@@ -46,10 +62,15 @@ github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.1 h1:YF8+flBXS5eO826T4nzqPrxfhQThhXl0YzfuUPu4SBg=
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs=
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4=
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/gofuzz v0.0.0-20161122191042-44d81051d367 h1:ScAXWS+TR6MZKex+7Z8rneuSJH+FSDqd6ocQyl+ZHo4=
github.com/google/gofuzz v0.0.0-20161122191042-44d81051d367/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI=
github.com/google/gofuzz v1.0.0 h1:A8PeW59pxE9IoFRqBp37U+mSNaQoZ46F1f0f863XSXw=
@@ -74,17 +95,26 @@ github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NH
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
github.com/json-iterator/go v0.0.0-20180612202835-f2b4162afba3 h1:/UewZcckqhvnnS0C6r3Sher2hSEbVmM6Ogpcjen08+Y=
github.com/json-iterator/go v0.0.0-20180612202835-f2b4162afba3/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
github.com/json-iterator/go v1.1.7 h1:KfgG9LzI+pYjr4xvmz/5H4FXjokeP+rlHLhv3iH62Fo=
github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/json-iterator/go v1.1.9 h1:9yzud/Ht36ygwatGx56VwCZtlI/2AD15T1X2sjSuGns=
github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
@@ -94,6 +124,7 @@ github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lN
github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI=
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw=
github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
@@ -110,11 +141,31 @@ github.com/openshift/client-go v0.0.0-20190923092832-6afefc9bb372 h1:iOFI/ua5QKK
github.com/openshift/client-go v0.0.0-20190923092832-6afefc9bb372/go.mod h1:6rzn+JTr7+WYS2E1TExP4gByoABxMznR6y2SnUIkmxk=
github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI=
github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU=
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v0.0.0-20151028094244-d8ed2627bdf0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
github.com/prometheus/client_golang v1.4.1 h1:FFSuS004yOQEtDdTq+TAOLP5xUq63KqAFYyOi8zA+Y8=
github.com/prometheus/client_golang v1.4.1/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M=
github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/common v0.9.1 h1:KOMtN28tlbam3/7ZKEYKHhKoJZYYj3gMH4uc62x7X7U=
github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4=
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/prometheus/procfs v0.0.8 h1:+fpWZdT24pJBiqJdAwYBjPSk+5YmQzYNPYzQsdzLkt8=
github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A=
github.com/sirupsen/logrus v1.0.5 h1:8c8b5uO0zS4X6RPl/sd1ENwSkIc0/H2PaHxE3udaE8I=
github.com/sirupsen/logrus v1.0.5/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc=
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4=
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk=
github.com/spf13/cobra v0.0.0-20160722081547-f62e98d28ab7 h1:vX79RLf8hJqzYiv0FVgbIqSD76lK4dTIoIN29Kc4MXo=
github.com/spf13/cobra v0.0.0-20160722081547-f62e98d28ab7/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
@@ -122,11 +173,14 @@ github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzu
github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg=
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v0.0.0-20151208002404-e3a8ff8ce365/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8 h1:1wopBVtVdWnn03fZelqdXTqk7U7zPQCb+T4rbU9ZEoU=
@@ -140,10 +194,12 @@ golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73r
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd h1:nTDtHvHSdCn1m6ITfMRqtOd/9+7a3s8RBNOZ3eYZzJA=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190812203447-cdfb69ac37fc h1:gkKoSkUmnU6bpS/VhkuO27bzQeSA51uaEfbOW5dNb68=
golang.org/x/net v0.0.0-20190812203447-cdfb69ac37fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
@@ -154,15 +210,21 @@ golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20170830134202-bb24a47a89ea/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e h1:o3PsSEY8E4eXWkXrIP9YJALUkVZqzHJT5DOasTyn8Vs=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190209173611-3b5209105503/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f h1:25KHgbfyiSm6vwQLbM3zZIe1v9p/3ea4Rz+nnM5K/i4=
golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200122134326-e047566fdf82 h1:ywK/j/KkyTHcdyYSZNXGjMwgmDSfjglYZ3vStQ/gSCU=
golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
@@ -177,6 +239,7 @@ golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGm
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
@@ -188,6 +251,7 @@ google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRn
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
gopkg.in/airbrake/gobrake.v2 v2.0.9 h1:7z2uVWwn7oVeeugY1DtlPAy5H+KYgB1KeKTnqjNatLo=
gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo=
@@ -205,6 +269,8 @@ gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.4 h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I=
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.5 h1:ymVxjfMaHvXD8RqPRmzHHsB3VvucivSkIAvJFDI5O3c=
gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
k8s.io/api v0.0.0-20191004120104-195af9ec3521 h1:StP5An9aFEWfPckudvHEJc4q/WUDCUVGoZJrE/efGME=

View File

@@ -26,9 +26,17 @@ type VolumesFunc func(interface{}) []v1.Volume
//UpdateFunc performs the resource update
type UpdateFunc func(kube.Clients, string, interface{}) error
//AnnotationsFunc is a generic func to return annotations
type AnnotationsFunc func(interface{}) map[string]string
//PodAnnotationsFunc is a generic func to return annotations
type PodAnnotationsFunc func(interface{}) map[string]string
//RollingUpgradeFuncs contains generic functions to perform rolling upgrade
type RollingUpgradeFuncs struct {
ItemsFunc ItemsFunc
AnnotationsFunc AnnotationsFunc
PodAnnotationsFunc PodAnnotationsFunc
ContainersFunc ContainersFunc
InitContainersFunc InitContainersFunc
UpdateFunc UpdateFunc
@@ -72,18 +80,58 @@ func GetDeploymentConfigItems(clients kube.Clients, namespace string) []interfac
return util.InterfaceSlice(deploymentConfigs.Items)
}
// GetDeploymentAnnotations returns the annotations of given deployment
func GetDeploymentAnnotations(item interface{}) map[string]string {
return item.(appsv1.Deployment).ObjectMeta.Annotations
}
// GetDaemonSetAnnotations returns the annotations of given daemonSet
func GetDaemonSetAnnotations(item interface{}) map[string]string {
return item.(appsv1.DaemonSet).ObjectMeta.Annotations
}
// GetStatefulSetAnnotations returns the annotations of given statefulSet
func GetStatefulSetAnnotations(item interface{}) map[string]string {
return item.(appsv1.StatefulSet).ObjectMeta.Annotations
}
// GetDeploymentConfigAnnotations returns the annotations of given deploymentConfig
func GetDeploymentConfigAnnotations(item interface{}) map[string]string {
return item.(openshiftv1.DeploymentConfig).ObjectMeta.Annotations
}
// GetDeploymentPodAnnotations returns the pod's annotations of given deployment
func GetDeploymentPodAnnotations(item interface{}) map[string]string {
return item.(appsv1.Deployment).Spec.Template.ObjectMeta.Annotations
}
// GetDaemonSetPodAnnotations returns the pod's annotations of given daemonSet
func GetDaemonSetPodAnnotations(item interface{}) map[string]string {
return item.(appsv1.DaemonSet).Spec.Template.ObjectMeta.Annotations
}
// GetStatefulSetPodAnnotations returns the pod's annotations of given statefulSet
func GetStatefulSetPodAnnotations(item interface{}) map[string]string {
return item.(appsv1.StatefulSet).Spec.Template.ObjectMeta.Annotations
}
// GetDeploymentConfigPodAnnotations returns the pod's annotations of given deploymentConfig
func GetDeploymentConfigPodAnnotations(item interface{}) map[string]string {
return item.(openshiftv1.DeploymentConfig).Spec.Template.ObjectMeta.Annotations
}
// GetDeploymentContainers returns the containers of given deployment
func GetDeploymentContainers(item interface{}) []v1.Container {
return item.(appsv1.Deployment).Spec.Template.Spec.Containers
}
// GetDaemonSetContainers returns the containers of given daemonset
// GetDaemonSetContainers returns the containers of given daemonSet
func GetDaemonSetContainers(item interface{}) []v1.Container {
return item.(appsv1.DaemonSet).Spec.Template.Spec.Containers
}
// GetStatefulsetContainers returns the containers of given statefulSet
func GetStatefulsetContainers(item interface{}) []v1.Container {
// GetStatefulSetContainers returns the containers of given statefulSet
func GetStatefulSetContainers(item interface{}) []v1.Container {
return item.(appsv1.StatefulSet).Spec.Template.Spec.Containers
}
@@ -97,13 +145,13 @@ func GetDeploymentInitContainers(item interface{}) []v1.Container {
return item.(appsv1.Deployment).Spec.Template.Spec.InitContainers
}
// GetDaemonSetInitContainers returns the containers of given daemonset
// GetDaemonSetInitContainers returns the containers of given daemonSet
func GetDaemonSetInitContainers(item interface{}) []v1.Container {
return item.(appsv1.DaemonSet).Spec.Template.Spec.InitContainers
}
// GetStatefulsetInitContainers returns the containers of given statefulSet
func GetStatefulsetInitContainers(item interface{}) []v1.Container {
// GetStatefulSetInitContainers returns the containers of given statefulSet
func GetStatefulSetInitContainers(item interface{}) []v1.Container {
return item.(appsv1.StatefulSet).Spec.Template.Spec.InitContainers
}
@@ -126,8 +174,8 @@ func UpdateDaemonSet(clients kube.Clients, namespace string, resource interface{
return err
}
// UpdateStatefulset performs rolling upgrade on statefulSet
func UpdateStatefulset(clients kube.Clients, namespace string, resource interface{}) error {
// UpdateStatefulSet performs rolling upgrade on statefulSet
func UpdateStatefulSet(clients kube.Clients, namespace string, resource interface{}) error {
statefulSet := resource.(appsv1.StatefulSet)
_, err := clients.KubernetesClient.AppsV1().StatefulSets(namespace).Update(&statefulSet)
return err
@@ -145,13 +193,13 @@ func GetDeploymentVolumes(item interface{}) []v1.Volume {
return item.(appsv1.Deployment).Spec.Template.Spec.Volumes
}
// GetDaemonSetVolumes returns the Volumes of given daemonset
// GetDaemonSetVolumes returns the Volumes of given daemonSet
func GetDaemonSetVolumes(item interface{}) []v1.Volume {
return item.(appsv1.DaemonSet).Spec.Template.Spec.Volumes
}
// GetStatefulsetVolumes returns the Volumes of given statefulSet
func GetStatefulsetVolumes(item interface{}) []v1.Volume {
// GetStatefulSetVolumes returns the Volumes of given statefulSet
func GetStatefulSetVolumes(item interface{}) []v1.Volume {
return item.(appsv1.StatefulSet).Spec.Template.Spec.Volumes
}

View File

@@ -8,6 +8,7 @@ import (
"github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"github.com/stakater/Reloader/internal/pkg/controller"
"github.com/stakater/Reloader/internal/pkg/metrics"
"github.com/stakater/Reloader/internal/pkg/options"
"github.com/stakater/Reloader/internal/pkg/util"
"github.com/stakater/Reloader/pkg/kube"
@@ -23,15 +24,36 @@ func NewReloaderCommand() *cobra.Command {
}
// options
cmd.PersistentFlags().StringVar(&options.ConfigmapUpdateOnChangeAnnotation, "configmap-annotation", "configmap.reloader.stakater.com/reload", "annotation to detect changes in configmaps")
cmd.PersistentFlags().StringVar(&options.SecretUpdateOnChangeAnnotation, "secret-annotation", "secret.reloader.stakater.com/reload", "annotation to detect changes in secrets")
cmd.PersistentFlags().StringVar(&options.ConfigmapUpdateOnChangeAnnotation, "configmap-annotation", "configmap.reloader.stakater.com/reload", "annotation to detect changes in configmaps, specified by name")
cmd.PersistentFlags().StringVar(&options.SecretUpdateOnChangeAnnotation, "secret-annotation", "secret.reloader.stakater.com/reload", "annotation to detect changes in secrets, specified by name")
cmd.PersistentFlags().StringVar(&options.ReloaderAutoAnnotation, "auto-annotation", "reloader.stakater.com/auto", "annotation to detect changes in secrets")
cmd.PersistentFlags().StringVar(&options.AutoSearchAnnotation, "auto-search-annotation", "reloader.stakater.com/search", "annotation to detect changes in configmaps or secrets tagged with special match annotation")
cmd.PersistentFlags().StringVar(&options.SearchMatchAnnotation, "search-match-annotation", "reloader.stakater.com/match", "annotation to mark secrets or configmapts to match the search")
cmd.PersistentFlags().StringVar(&options.LogFormat, "log-format", "", "Log format to use (empty string for text, or JSON")
cmd.PersistentFlags().StringSlice("resources-to-ignore", []string{}, "list of resources to ignore (valid options 'configMaps' or 'secrets')")
cmd.PersistentFlags().StringSlice("namespaces-to-ignore", []string{}, "list of namespaces to ignore")
return cmd
}
func configureLogging(logFormat string) error {
switch logFormat {
case "json":
logrus.SetFormatter(&logrus.JSONFormatter{})
default:
// just let the library use default on empty string.
if logFormat != "" {
return fmt.Errorf("unsupported logging formatter: %q", logFormat)
}
}
return nil
}
func startReloader(cmd *cobra.Command, args []string) {
err := configureLogging(options.LogFormat)
if err != nil {
logrus.Warn(err)
}
logrus.Info("Starting Reloader")
currentNamespace := os.Getenv("KUBERNETES_NAMESPACE")
if len(currentNamespace) == 0 {
@@ -55,12 +77,14 @@ func startReloader(cmd *cobra.Command, args []string) {
logrus.Fatal(err)
}
collectors := metrics.SetupPrometheusEndpoint()
for k := range kube.ResourceMap {
if ignoredResourcesList.Contains(k) {
continue
}
c, err := controller.NewController(clientset, k, currentNamespace, ignoredNamespacesList)
c, err := controller.NewController(clientset, k, currentNamespace, ignoredNamespacesList, collectors)
if err != nil {
logrus.Fatalf("%s", err)
}

View File

@@ -6,6 +6,7 @@ import (
"github.com/sirupsen/logrus"
"github.com/stakater/Reloader/internal/pkg/handler"
"github.com/stakater/Reloader/internal/pkg/metrics"
"github.com/stakater/Reloader/internal/pkg/util"
"github.com/stakater/Reloader/pkg/kube"
"k8s.io/apimachinery/pkg/fields"
@@ -26,11 +27,12 @@ type Controller struct {
informer cache.Controller
namespace string
ignoredNamespaces util.List
collectors metrics.Collectors
}
// NewController for initializing a Controller
func NewController(
client kubernetes.Interface, resource string, namespace string, ignoredNamespaces []string) (*Controller, error) {
client kubernetes.Interface, resource string, namespace string, ignoredNamespaces []string, collectors metrics.Collectors) (*Controller, error) {
c := Controller{
client: client,
@@ -49,6 +51,7 @@ func NewController(
c.indexer = indexer
c.informer = informer
c.queue = queue
c.collectors = collectors
return &c, nil
}
@@ -56,7 +59,8 @@ func NewController(
func (c *Controller) Add(obj interface{}) {
if !c.resourceInIgnoredNamespace(obj) {
c.queue.Add(handler.ResourceCreatedHandler{
Resource: obj,
Resource: obj,
Collectors: c.collectors,
})
}
}
@@ -77,6 +81,7 @@ func (c *Controller) Update(old interface{}, new interface{}) {
c.queue.Add(handler.ResourceUpdatedHandler{
Resource: new,
OldResource: old,
Collectors: c.collectors,
})
}
}

View File

@@ -1,6 +1,7 @@
package controller
import (
"github.com/stakater/Reloader/internal/pkg/metrics"
"os"
"testing"
"time"
@@ -25,6 +26,7 @@ var (
data = "dGVzdFNlY3JldEVuY29kaW5nRm9yUmVsb2FkZXI="
newData = "dGVzdE5ld1NlY3JldEVuY29kaW5nRm9yUmVsb2FkZXI="
updatedData = "dGVzdFVwZGF0ZWRTZWNyZXRFbmNvZGluZ0ZvclJlbG9hZGVy"
collectors = metrics.NewCollectors()
)
func TestMain(m *testing.M) {
@@ -33,7 +35,7 @@ func TestMain(m *testing.M) {
logrus.Infof("Creating controller")
for k := range kube.ResourceMap {
c, err := NewController(clients.KubernetesClient, k, namespace, []string{})
c, err := NewController(clients.KubernetesClient, k, namespace, []string{}, collectors)
if err != nil {
logrus.Fatalf("%s", err)
}
@@ -334,7 +336,7 @@ func TestControllerForUpdatingConfigmapShouldUpdateDeployment(t *testing.T) {
}
// Do not Perform rolling upgrade on deployment and create env var upon updating the labels configmap
func TestControllerUpdatingConfigmapLabelsShouldNotCreateorUpdateEnvInDeployment(t *testing.T) {
func TestControllerUpdatingConfigmapLabelsShouldNotCreateOrUpdateEnvInDeployment(t *testing.T) {
// Creating configmap
configmapName := configmapNamePrefix + "-update-" + testutil.RandSeq(5)
configmapClient, err := testutil.CreateConfigMap(clients.KubernetesClient, namespace, configmapName, "www.google.com")
@@ -550,7 +552,7 @@ func TestControllerUpdatingSecretShouldUpdateEnvInDeployment(t *testing.T) {
}
// Do not Perform rolling upgrade on pod and create or update a env var upon updating the label in secret
func TestControllerUpdatingSecretLabelsShouldNotCreateorUpdateEnvInDeployment(t *testing.T) {
func TestControllerUpdatingSecretLabelsShouldNotCreateOrUpdateEnvInDeployment(t *testing.T) {
// Creating secret
secretName := secretNamePrefix + "-update-" + testutil.RandSeq(5)
secretClient, err := testutil.CreateSecret(clients.KubernetesClient, namespace, secretName, data)
@@ -818,7 +820,7 @@ func TestControllerUpdatingSecretShouldUpdateEnvInDaemonSet(t *testing.T) {
}
// Do not Perform rolling upgrade on pod and create or update a env var upon updating the label in secret
func TestControllerUpdatingSecretLabelsShouldNotCreateorUpdateEnvInDaemonSet(t *testing.T) {
func TestControllerUpdatingSecretLabelsShouldNotCreateOrUpdateEnvInDaemonSet(t *testing.T) {
// Creating secret
secretName := secretNamePrefix + "-update-" + testutil.RandSeq(5)
secretClient, err := testutil.CreateSecret(clients.KubernetesClient, namespace, secretName, data)

View File

@@ -2,13 +2,15 @@ package handler
import (
"github.com/sirupsen/logrus"
"github.com/stakater/Reloader/internal/pkg/metrics"
"github.com/stakater/Reloader/internal/pkg/util"
v1 "k8s.io/api/core/v1"
)
// ResourceCreatedHandler contains new objects
type ResourceCreatedHandler struct {
Resource interface{}
Resource interface{}
Collectors metrics.Collectors
}
// Handle processes the newly created resource
@@ -18,7 +20,7 @@ func (r ResourceCreatedHandler) Handle() error {
} else {
config, _ := r.GetConfig()
// process resource based on its type
doRollingUpgrade(config)
doRollingUpgrade(config, r.Collectors)
}
return nil
}

View File

@@ -2,6 +2,7 @@ package handler
import (
"github.com/sirupsen/logrus"
"github.com/stakater/Reloader/internal/pkg/metrics"
"github.com/stakater/Reloader/internal/pkg/util"
v1 "k8s.io/api/core/v1"
)
@@ -10,6 +11,7 @@ import (
type ResourceUpdatedHandler struct {
Resource interface{}
OldResource interface{}
Collectors metrics.Collectors
}
// Handle processes the updated resource
@@ -20,7 +22,7 @@ func (r ResourceUpdatedHandler) Handle() error {
config, oldSHAData := r.GetConfig()
if config.SHAValue != oldSHAData {
// process resource based on its type
doRollingUpgrade(config)
doRollingUpgrade(config, r.Collectors)
}
}
return nil
@@ -31,7 +33,7 @@ func (r ResourceUpdatedHandler) GetConfig() (util.Config, string) {
var oldSHAData string
var config util.Config
if _, ok := r.Resource.(*v1.ConfigMap); ok {
oldSHAData = util.GetSHAfromConfigmap(r.OldResource.(*v1.ConfigMap).Data)
oldSHAData = util.GetSHAfromConfigmap(r.OldResource.(*v1.ConfigMap))
config = util.GetConfigmapConfig(r.Resource.(*v1.ConfigMap))
} else if _, ok := r.Resource.(*v1.Secret); ok {
oldSHAData = util.GetSHAfromSecret(r.OldResource.(*v1.Secret).Data)

View File

@@ -4,9 +4,11 @@ import (
"strconv"
"strings"
"github.com/prometheus/client_golang/prometheus"
"github.com/sirupsen/logrus"
"github.com/stakater/Reloader/internal/pkg/callbacks"
"github.com/stakater/Reloader/internal/pkg/constants"
"github.com/stakater/Reloader/internal/pkg/metrics"
"github.com/stakater/Reloader/internal/pkg/options"
"github.com/stakater/Reloader/internal/pkg/util"
"github.com/stakater/Reloader/pkg/kube"
@@ -17,6 +19,8 @@ import (
func GetDeploymentRollingUpgradeFuncs() callbacks.RollingUpgradeFuncs {
return callbacks.RollingUpgradeFuncs{
ItemsFunc: callbacks.GetDeploymentItems,
AnnotationsFunc: callbacks.GetDeploymentAnnotations,
PodAnnotationsFunc: callbacks.GetDeploymentPodAnnotations,
ContainersFunc: callbacks.GetDeploymentContainers,
InitContainersFunc: callbacks.GetDeploymentInitContainers,
UpdateFunc: callbacks.UpdateDeployment,
@@ -29,6 +33,8 @@ func GetDeploymentRollingUpgradeFuncs() callbacks.RollingUpgradeFuncs {
func GetDaemonSetRollingUpgradeFuncs() callbacks.RollingUpgradeFuncs {
return callbacks.RollingUpgradeFuncs{
ItemsFunc: callbacks.GetDaemonSetItems,
AnnotationsFunc: callbacks.GetDaemonSetAnnotations,
PodAnnotationsFunc: callbacks.GetDaemonSetPodAnnotations,
ContainersFunc: callbacks.GetDaemonSetContainers,
InitContainersFunc: callbacks.GetDaemonSetInitContainers,
UpdateFunc: callbacks.UpdateDaemonSet,
@@ -41,10 +47,12 @@ func GetDaemonSetRollingUpgradeFuncs() callbacks.RollingUpgradeFuncs {
func GetStatefulSetRollingUpgradeFuncs() callbacks.RollingUpgradeFuncs {
return callbacks.RollingUpgradeFuncs{
ItemsFunc: callbacks.GetStatefulSetItems,
ContainersFunc: callbacks.GetStatefulsetContainers,
InitContainersFunc: callbacks.GetStatefulsetInitContainers,
UpdateFunc: callbacks.UpdateStatefulset,
VolumesFunc: callbacks.GetStatefulsetVolumes,
AnnotationsFunc: callbacks.GetStatefulSetAnnotations,
PodAnnotationsFunc: callbacks.GetStatefulSetPodAnnotations,
ContainersFunc: callbacks.GetStatefulSetContainers,
InitContainersFunc: callbacks.GetStatefulSetInitContainers,
UpdateFunc: callbacks.UpdateStatefulSet,
VolumesFunc: callbacks.GetStatefulSetVolumes,
ResourceType: "StatefulSet",
}
}
@@ -53,6 +61,8 @@ func GetStatefulSetRollingUpgradeFuncs() callbacks.RollingUpgradeFuncs {
func GetDeploymentConfigRollingUpgradeFuncs() callbacks.RollingUpgradeFuncs {
return callbacks.RollingUpgradeFuncs{
ItemsFunc: callbacks.GetDeploymentConfigItems,
AnnotationsFunc: callbacks.GetDeploymentConfigAnnotations,
PodAnnotationsFunc: callbacks.GetDeploymentConfigPodAnnotations,
ContainersFunc: callbacks.GetDeploymentConfigContainers,
InitContainersFunc: callbacks.GetDeploymentConfigInitContainers,
UpdateFunc: callbacks.UpdateDeploymentConfig,
@@ -61,34 +71,42 @@ func GetDeploymentConfigRollingUpgradeFuncs() callbacks.RollingUpgradeFuncs {
}
}
func doRollingUpgrade(config util.Config) {
func doRollingUpgrade(config util.Config, collectors metrics.Collectors) {
clients := kube.GetClients()
rollingUpgrade(clients, config, GetDeploymentRollingUpgradeFuncs())
rollingUpgrade(clients, config, GetDaemonSetRollingUpgradeFuncs())
rollingUpgrade(clients, config, GetStatefulSetRollingUpgradeFuncs())
rollingUpgrade(clients, config, GetDeploymentRollingUpgradeFuncs(), collectors)
rollingUpgrade(clients, config, GetDaemonSetRollingUpgradeFuncs(), collectors)
rollingUpgrade(clients, config, GetStatefulSetRollingUpgradeFuncs(), collectors)
if kube.IsOpenshift {
rollingUpgrade(clients, config, GetDeploymentConfigRollingUpgradeFuncs())
rollingUpgrade(clients, config, GetDeploymentConfigRollingUpgradeFuncs(), collectors)
}
}
func rollingUpgrade(clients kube.Clients, config util.Config, upgradeFuncs callbacks.RollingUpgradeFuncs) {
func rollingUpgrade(clients kube.Clients, config util.Config, upgradeFuncs callbacks.RollingUpgradeFuncs, collectors metrics.Collectors) {
err := PerformRollingUpgrade(clients, config, upgradeFuncs)
err := PerformRollingUpgrade(clients, config, upgradeFuncs, collectors)
if err != nil {
logrus.Errorf("Rolling upgrade for '%s' failed with error = %v", config.ResourceName, err)
}
}
// PerformRollingUpgrade upgrades the deployment if there is any change in configmap or secret data
func PerformRollingUpgrade(clients kube.Clients, config util.Config, upgradeFuncs callbacks.RollingUpgradeFuncs) error {
func PerformRollingUpgrade(clients kube.Clients, config util.Config, upgradeFuncs callbacks.RollingUpgradeFuncs, collectors metrics.Collectors) error {
items := upgradeFuncs.ItemsFunc(clients, config.Namespace)
var err error
for _, i := range items {
// find correct annotation and update the resource
annotationValue := util.ToObjectMeta(i).Annotations[config.Annotation]
reloaderEnabledValue := util.ToObjectMeta(i).Annotations[options.ReloaderAutoAnnotation]
annotations := upgradeFuncs.AnnotationsFunc(i)
annotationValue, found := annotations[config.Annotation]
searchAnnotationValue, foundSearchAnn := annotations[options.AutoSearchAnnotation]
reloaderEnabledValue, foundAuto := annotations[options.ReloaderAutoAnnotation]
if !found && !foundAuto && !foundSearchAnn {
annotations = upgradeFuncs.PodAnnotationsFunc(i)
annotationValue = annotations[config.Annotation]
searchAnnotationValue = annotations[options.AutoSearchAnnotation]
reloaderEnabledValue = annotations[options.ReloaderAutoAnnotation]
}
result := constants.NotUpdated
reloaderEnabled, err := strconv.ParseBool(reloaderEnabledValue)
if err == nil && reloaderEnabled {
@@ -98,6 +116,7 @@ func PerformRollingUpgrade(clients kube.Clients, config util.Config, upgradeFunc
if result != constants.Updated && annotationValue != "" {
values := strings.Split(annotationValue, ",")
for _, value := range values {
value = strings.Trim(value, " ")
if value == config.ResourceName {
result = updateContainers(upgradeFuncs, i, config, false)
if result == constants.Updated {
@@ -107,14 +126,23 @@ func PerformRollingUpgrade(clients kube.Clients, config util.Config, upgradeFunc
}
}
if result != constants.Updated && searchAnnotationValue == "true" {
matchAnnotationValue := config.ResourceAnnotations[options.SearchMatchAnnotation]
if matchAnnotationValue == "true" {
result = updateContainers(upgradeFuncs, i, config, true)
}
}
if result == constants.Updated {
err = upgradeFuncs.UpdateFunc(clients, config.Namespace, i)
resourceName := util.ToObjectMeta(i).Name
if err != nil {
logrus.Errorf("Update for '%s' of type '%s' in namespace '%s' failed with error %v", resourceName, upgradeFuncs.ResourceType, config.Namespace, err)
collectors.Reloaded.With(prometheus.Labels{"success": "false"}).Inc()
} else {
logrus.Infof("Changes detected in '%s' of type '%s' in namespace '%s'", config.ResourceName, config.Type, config.Namespace)
logrus.Infof("Updated '%s' of type '%s' in namespace '%s'", resourceName, upgradeFuncs.ResourceType, config.Namespace)
collectors.Reloaded.With(prometheus.Labels{"success": "true"}).Inc()
}
}
}
@@ -123,12 +151,33 @@ func PerformRollingUpgrade(clients kube.Clients, config util.Config, upgradeFunc
func getVolumeMountName(volumes []v1.Volume, mountType string, volumeName string) string {
for i := range volumes {
if mountType == constants.ConfigmapEnvVarPostfix && volumes[i].ConfigMap != nil && volumes[i].ConfigMap.Name == volumeName {
return volumes[i].Name
} else if mountType == constants.SecretEnvVarPostfix && volumes[i].Secret != nil && volumes[i].Secret.SecretName == volumeName {
return volumes[i].Name
if mountType == constants.ConfigmapEnvVarPostfix {
if volumes[i].ConfigMap != nil && volumes[i].ConfigMap.Name == volumeName {
return volumes[i].Name
}
if volumes[i].Projected != nil {
for j := range volumes[i].Projected.Sources {
if volumes[i].Projected.Sources[j].ConfigMap != nil && volumes[i].Projected.Sources[j].ConfigMap.Name == volumeName {
return volumes[i].Name
}
}
}
} else if mountType == constants.SecretEnvVarPostfix {
if volumes[i].Secret != nil && volumes[i].Secret.SecretName == volumeName {
return volumes[i].Name
}
if volumes[i].Projected != nil {
for j := range volumes[i].Projected.Sources {
if volumes[i].Projected.Sources[j].Secret != nil && volumes[i].Projected.Sources[j].Secret.Name == volumeName {
return volumes[i].Name
}
}
}
}
}
return ""
}
@@ -212,7 +261,7 @@ func getContainerToUpdate(upgradeFuncs callbacks.RollingUpgradeFuncs, item inter
func updateContainers(upgradeFuncs callbacks.RollingUpgradeFuncs, item interface{}, config util.Config, autoReload bool) constants.Result {
var result constants.Result
envar := constants.EnvVarPrefix + util.ConvertToEnvVarName(config.ResourceName) + "_" + config.Type
envVar := constants.EnvVarPrefix + util.ConvertToEnvVarName(config.ResourceName) + "_" + config.Type
container := getContainerToUpdate(upgradeFuncs, item, config, autoReload)
if container == nil {
@@ -220,12 +269,12 @@ func updateContainers(upgradeFuncs callbacks.RollingUpgradeFuncs, item interface
}
//update if env var exists
result = updateEnvVar(upgradeFuncs.ContainersFunc(item), envar, config.SHAValue)
result = updateEnvVar(upgradeFuncs.ContainersFunc(item), envVar, config.SHAValue)
// if no existing env var exists lets create one
if result == constants.NoEnvVarFound {
e := v1.EnvVar{
Name: envar,
Name: envVar,
Value: config.SHAValue,
}
container.Env = append(container.Env, e)
@@ -234,11 +283,11 @@ func updateContainers(upgradeFuncs callbacks.RollingUpgradeFuncs, item interface
return result
}
func updateEnvVar(containers []v1.Container, envar string, shaData string) constants.Result {
func updateEnvVar(containers []v1.Container, envVar string, shaData string) constants.Result {
for i := range containers {
envs := containers[i].Env
for j := range envs {
if envs[j].Name == envar {
if envs[j].Name == envVar {
if envs[j].Value != shaData {
envs[j].Value = shaData
return constants.Updated

View File

@@ -1,32 +1,45 @@
package handler
import (
"fmt"
"os"
"testing"
"time"
"github.com/prometheus/client_golang/prometheus"
promtestutil "github.com/prometheus/client_golang/prometheus/testutil"
"github.com/sirupsen/logrus"
"github.com/stakater/Reloader/internal/pkg/constants"
"github.com/stakater/Reloader/internal/pkg/metrics"
"github.com/stakater/Reloader/internal/pkg/options"
"github.com/stakater/Reloader/internal/pkg/testutil"
"github.com/stakater/Reloader/internal/pkg/util"
"github.com/stakater/Reloader/pkg/kube"
core_v1 "k8s.io/api/core/v1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
testclient "k8s.io/client-go/kubernetes/fake"
)
var (
clients = kube.Clients{KubernetesClient: testclient.NewSimpleClientset()}
namespace = "test-handler-" + testutil.RandSeq(5)
configmapName = "testconfigmap-handler-" + testutil.RandSeq(5)
secretName = "testsecret-handler-" + testutil.RandSeq(5)
configmapWithInitContainer = "testconfigmapInitContainerhandler-" + testutil.RandSeq(5)
secretWithInitContainer = "testsecretWithInitContainer-handler-" + testutil.RandSeq(5)
configmapWithInitEnv = "configmapWithInitEnv-" + testutil.RandSeq(5)
secretWithInitEnv = "secretWithInitEnv-handler-" + testutil.RandSeq(5)
configmapWithEnvName = "testconfigmapWithEnv-handler-" + testutil.RandSeq(5)
configmapWithEnvFromName = "testconfigmapWithEnvFrom-handler-" + testutil.RandSeq(5)
secretWithEnvName = "testsecretWithEnv-handler-" + testutil.RandSeq(5)
secretWithEnvFromName = "testsecretWithEnvFrom-handler-" + testutil.RandSeq(5)
clients = kube.Clients{KubernetesClient: testclient.NewSimpleClientset()}
namespace = "test-handler-" + testutil.RandSeq(5)
configmapName = "testconfigmap-handler-" + testutil.RandSeq(5)
secretName = "testsecret-handler-" + testutil.RandSeq(5)
projectedConfigMapName = "testprojectedconfigmap-handler-" + testutil.RandSeq(5)
projectedSecretName = "testprojectedsecret-handler-" + testutil.RandSeq(5)
configmapWithInitContainer = "testconfigmapInitContainerhandler-" + testutil.RandSeq(5)
secretWithInitContainer = "testsecretWithInitContainer-handler-" + testutil.RandSeq(5)
projectedConfigMapWithInitContainer = "testProjectedConfigMapWithInitContainer-handler" + testutil.RandSeq(5)
projectedSecretWithInitContainer = "testProjectedSecretWithInitContainer-handler" + testutil.RandSeq(5)
configmapWithInitEnv = "configmapWithInitEnv-" + testutil.RandSeq(5)
secretWithInitEnv = "secretWithInitEnv-handler-" + testutil.RandSeq(5)
configmapWithEnvName = "testconfigmapWithEnv-handler-" + testutil.RandSeq(5)
configmapWithEnvFromName = "testconfigmapWithEnvFrom-handler-" + testutil.RandSeq(5)
secretWithEnvName = "testsecretWithEnv-handler-" + testutil.RandSeq(5)
secretWithEnvFromName = "testsecretWithEnvFrom-handler-" + testutil.RandSeq(5)
configmapWithPodAnnotations = "testconfigmapPodAnnotations-handler-" + testutil.RandSeq(5)
configmapWithBothAnnotations = "testconfigmapBothAnnotations-handler-" + testutil.RandSeq(5)
configmapAnnotated = "testconfigmapAnnotated-handler-" + testutil.RandSeq(5)
)
func TestMain(m *testing.M) {
@@ -60,6 +73,30 @@ func setup() {
logrus.Errorf("Error in secret creation: %v", err)
}
// Creating configmap will be used in projected volume
_, err = testutil.CreateConfigMap(clients.KubernetesClient, namespace, projectedConfigMapName, "www.google.com")
if err != nil {
logrus.Errorf("Error in configmap creation: %v", err)
}
// Creating secret will be used in projected volume
_, err = testutil.CreateSecret(clients.KubernetesClient, namespace, projectedSecretName, data)
if err != nil {
logrus.Errorf("Error in secret creation: %v", err)
}
// Creating configmap will be used in projected volume in init containers
_, err = testutil.CreateConfigMap(clients.KubernetesClient, namespace, projectedConfigMapWithInitContainer, "www.google.com")
if err != nil {
logrus.Errorf("Error in configmap creation: %v", err)
}
// Creating secret will be used in projected volume in init containers
_, err = testutil.CreateSecret(clients.KubernetesClient, namespace, projectedSecretWithInitContainer, data)
if err != nil {
logrus.Errorf("Error in secret creation: %v", err)
}
_, err = testutil.CreateConfigMap(clients.KubernetesClient, namespace, configmapWithEnvName, "www.google.com")
if err != nil {
logrus.Errorf("Error in configmap creation: %v", err)
@@ -104,6 +141,11 @@ func setup() {
logrus.Errorf("Error in secret creation: %v", err)
}
_, err = testutil.CreateConfigMap(clients.KubernetesClient, namespace, configmapWithPodAnnotations, "www.google.com")
if err != nil {
logrus.Errorf("Error in configmap creation: %v", err)
}
// Creating Deployment with configmap
_, err = testutil.CreateDeployment(clients.KubernetesClient, configmapName, namespace, true)
if err != nil {
@@ -116,6 +158,30 @@ func setup() {
logrus.Errorf("Error in Deployment with configmap creation: %v", err)
}
// Creating Deployment with configmap in projected volume
_, err = testutil.CreateDeployment(clients.KubernetesClient, projectedConfigMapName, namespace, true)
if err != nil {
logrus.Errorf("Error in Deployment with configmap creation: %v", err)
}
// Creating Deployment with configmap in projected volume mounted in init container
_, err = testutil.CreateDeploymentWithInitContainer(clients.KubernetesClient, projectedConfigMapWithInitContainer, namespace, true)
if err != nil {
logrus.Errorf("Error in Deployment with configmap creation: %v", err)
}
// Creating Deployment with secret in projected volume
_, err = testutil.CreateDeployment(clients.KubernetesClient, projectedSecretName, namespace, true)
if err != nil {
logrus.Errorf("Error in Deployment with secret creation: %v", err)
}
// Creating Deployment with secret in projected volume mounted in init container
_, err = testutil.CreateDeploymentWithInitContainer(clients.KubernetesClient, projectedSecretWithInitContainer, namespace, true)
if err != nil {
logrus.Errorf("Error in Deployment with secret creation: %v", err)
}
// Creating Deployment with secret mounted in init container
_, err = testutil.CreateDeploymentWithInitContainer(clients.KubernetesClient, secretWithInitContainer, namespace, true)
if err != nil {
@@ -164,6 +230,17 @@ func setup() {
logrus.Errorf("Error in Deployment with secret configmap as envFrom source creation: %v", err)
}
// Creating Deployment with envFrom source as secret
_, err = testutil.CreateDeploymentWithEnvVarSourceAndAnnotations(
clients.KubernetesClient,
configmapAnnotated,
namespace,
map[string]string{"reloader.stakater.com/search": "true"},
)
if err != nil {
logrus.Errorf("Error in Deployment with secret configmap as envFrom source creation: %v", err)
}
// Creating DaemonSet with configmap
_, err = testutil.CreateDaemonSet(clients.KubernetesClient, configmapName, namespace, true)
if err != nil {
@@ -176,6 +253,18 @@ func setup() {
logrus.Errorf("Error in DaemonSet with secret creation: %v", err)
}
// Creating DaemonSet with configmap in projected volume
_, err = testutil.CreateDaemonSet(clients.KubernetesClient, projectedConfigMapName, namespace, true)
if err != nil {
logrus.Errorf("Error in DaemonSet with configmap creation: %v", err)
}
// Creating DaemonSet with secret in projected volume
_, err = testutil.CreateDaemonSet(clients.KubernetesClient, projectedSecretName, namespace, true)
if err != nil {
logrus.Errorf("Error in DaemonSet with secret creation: %v", err)
}
// Creating DaemonSet with env var source as configmap
_, err = testutil.CreateDaemonSet(clients.KubernetesClient, configmapWithEnvName, namespace, false)
if err != nil {
@@ -200,6 +289,18 @@ func setup() {
logrus.Errorf("Error in StatefulSet with secret creation: %v", err)
}
// Creating StatefulSet with configmap in projected volume
_, err = testutil.CreateStatefulSet(clients.KubernetesClient, projectedConfigMapName, namespace, true)
if err != nil {
logrus.Errorf("Error in StatefulSet with configmap creation: %v", err)
}
// Creating StatefulSet with secret in projected volume
_, err = testutil.CreateStatefulSet(clients.KubernetesClient, projectedSecretName, namespace, true)
if err != nil {
logrus.Errorf("Error in StatefulSet with configmap creation: %v", err)
}
// Creating StatefulSet with env var source as configmap
_, err = testutil.CreateStatefulSet(clients.KubernetesClient, configmapWithEnvName, namespace, false)
if err != nil {
@@ -212,6 +313,17 @@ func setup() {
logrus.Errorf("Error in StatefulSet with secret configmap as env var source creation: %v", err)
}
// Creating Deployment with pod annotations
_, err = testutil.CreateDeploymentWithPodAnnotations(clients.KubernetesClient, configmapWithPodAnnotations, namespace, false)
if err != nil {
logrus.Errorf("Error in Deployment with pod annotations: %v", err)
}
// Creating Deployment with both annotations
_, err = testutil.CreateDeploymentWithPodAnnotations(clients.KubernetesClient, configmapWithBothAnnotations, namespace, true)
if err != nil {
logrus.Errorf("Error in Deployment with both annotations: %v", err)
}
}
func teardown() {
@@ -227,6 +339,30 @@ func teardown() {
logrus.Errorf("Error while deleting deployment with secret %v", deploymentError)
}
// Deleting Deployment with configmap in projected volume
deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, namespace, projectedConfigMapName)
if deploymentError != nil {
logrus.Errorf("Error while deleting deployment with configmap %v", deploymentError)
}
// Deleting Deployment with configmap in projected volume mounted in init container
deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, namespace, projectedConfigMapWithInitContainer)
if deploymentError != nil {
logrus.Errorf("Error while deleting deployment with configmap %v", deploymentError)
}
// Deleting Deployment with secret in projected volume
deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, namespace, projectedSecretName)
if deploymentError != nil {
logrus.Errorf("Error while deleting deployment with configmap %v", deploymentError)
}
// Deleting Deployment with secret in projected volume mounted in init container
deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, namespace, projectedSecretWithInitContainer)
if deploymentError != nil {
logrus.Errorf("Error while deleting deployment with configmap %v", deploymentError)
}
// Deleting Deployment with configmap as env var source
deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, namespace, configmapWithEnvName)
if deploymentError != nil {
@@ -275,6 +411,24 @@ func teardown() {
logrus.Errorf("Error while deleting deployment with secret as envFrom source %v", deploymentError)
}
// Deleting Deployment with pod annotations
deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, namespace, configmapWithPodAnnotations)
if deploymentError != nil {
logrus.Errorf("Error while deleting deployment with pod annotations %v", deploymentError)
}
// Deleting Deployment with both annotations
deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, namespace, configmapWithBothAnnotations)
if deploymentError != nil {
logrus.Errorf("Error while deleting deployment with both annotations %v", deploymentError)
}
// Deleting Deployment with search annotation
deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, namespace, configmapAnnotated)
if deploymentError != nil {
logrus.Errorf("Error while deleting deployment with search annotation %v", deploymentError)
}
// Deleting DaemonSet with configmap
daemonSetError := testutil.DeleteDaemonSet(clients.KubernetesClient, namespace, configmapName)
if daemonSetError != nil {
@@ -287,6 +441,18 @@ func teardown() {
logrus.Errorf("Error while deleting daemonSet with secret %v", daemonSetError)
}
// Deleting DaemonSet with configmap in projected volume
daemonSetError = testutil.DeleteDaemonSet(clients.KubernetesClient, namespace, projectedConfigMapName)
if daemonSetError != nil {
logrus.Errorf("Error while deleting daemonSet with configmap %v", daemonSetError)
}
// Deleting Deployment with secret in projected volume
daemonSetError = testutil.DeleteDaemonSet(clients.KubernetesClient, namespace, projectedSecretName)
if daemonSetError != nil {
logrus.Errorf("Error while deleting daemonSet with secret %v", daemonSetError)
}
// Deleting Deployment with configmap as env var source
daemonSetError = testutil.DeleteDaemonSet(clients.KubernetesClient, namespace, configmapWithEnvName)
if daemonSetError != nil {
@@ -311,6 +477,18 @@ func teardown() {
logrus.Errorf("Error while deleting statefulSet with secret %v", statefulSetError)
}
// Deleting StatefulSet with configmap in projected volume
statefulSetError = testutil.DeleteStatefulSet(clients.KubernetesClient, namespace, projectedConfigMapName)
if statefulSetError != nil {
logrus.Errorf("Error while deleting statefulSet with configmap %v", statefulSetError)
}
// Deleting Deployment with secret in projected volume
statefulSetError = testutil.DeleteStatefulSet(clients.KubernetesClient, namespace, projectedSecretName)
if statefulSetError != nil {
logrus.Errorf("Error while deleting statefulSet with secret %v", statefulSetError)
}
// Deleting StatefulSet with configmap as env var source
statefulSetError = testutil.DeleteStatefulSet(clients.KubernetesClient, namespace, configmapWithEnvName)
if statefulSetError != nil {
@@ -335,6 +513,30 @@ func teardown() {
logrus.Errorf("Error while deleting the secret %v", err)
}
// Deleting configmap used in projected volume
err = testutil.DeleteConfigMap(clients.KubernetesClient, namespace, projectedConfigMapName)
if err != nil {
logrus.Errorf("Error while deleting the configmap %v", err)
}
// Deleting Secret used in projected volume
err = testutil.DeleteSecret(clients.KubernetesClient, namespace, projectedSecretName)
if err != nil {
logrus.Errorf("Error while deleting the secret %v", err)
}
// Deleting configmap used in projected volume in init containers
err = testutil.DeleteConfigMap(clients.KubernetesClient, namespace, projectedConfigMapWithInitContainer)
if err != nil {
logrus.Errorf("Error while deleting the configmap %v", err)
}
// Deleting Configmap used projected volume in init containers
err = testutil.DeleteSecret(clients.KubernetesClient, namespace, projectedSecretWithInitContainer)
if err != nil {
logrus.Errorf("Error while deleting the secret %v", err)
}
// Deleting Configmap used as env var source
err = testutil.DeleteConfigMap(clients.KubernetesClient, namespace, configmapWithEnvName)
if err != nil {
@@ -383,6 +585,11 @@ func teardown() {
logrus.Errorf("Error while deleting the secret used as env var source in init container %v", err)
}
err = testutil.DeleteConfigMap(clients.KubernetesClient, namespace, configmapWithPodAnnotations)
if err != nil {
logrus.Errorf("Error while deleting the configmap used with pod annotations: %v", err)
}
// Deleting namespace
testutil.DeleteNamespace(namespace, clients.KubernetesClient)
@@ -398,12 +605,20 @@ func getConfigWithAnnotations(resourceType string, name string, shaData string,
}
}
func getCollectors() metrics.Collectors {
return metrics.NewCollectors()
}
var labelSucceeded = prometheus.Labels{"success": "true"}
var labelFailed = prometheus.Labels{"success": "false"}
func TestRollingUpgradeForDeploymentWithConfigmap(t *testing.T) {
shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapName, "www.stakater.com")
config := getConfigWithAnnotations(constants.ConfigmapEnvVarPostfix, configmapName, shaData, options.ConfigmapUpdateOnChangeAnnotation)
deploymentFuncs := GetDeploymentRollingUpgradeFuncs()
collectors := getCollectors()
err := PerformRollingUpgrade(clients, config, deploymentFuncs)
err := PerformRollingUpgrade(clients, config, deploymentFuncs, collectors)
time.Sleep(5 * time.Second)
if err != nil {
t.Errorf("Rolling upgrade failed for Deployment with Configmap")
@@ -414,14 +629,128 @@ func TestRollingUpgradeForDeploymentWithConfigmap(t *testing.T) {
if !updated {
t.Errorf("Deployment was not updated")
}
if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 {
t.Errorf("Counter was not increased")
}
}
func TestRollingUpgradeForDeploymentWithConfigmapInProjectedVolume(t *testing.T) {
shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, projectedConfigMapName, "www.stakater.com")
config := getConfigWithAnnotations(constants.ConfigmapEnvVarPostfix, projectedConfigMapName, shaData, options.ConfigmapUpdateOnChangeAnnotation)
deploymentFuncs := GetDeploymentRollingUpgradeFuncs()
collectors := getCollectors()
err := PerformRollingUpgrade(clients, config, deploymentFuncs, collectors)
if err != nil {
t.Errorf("Rolling upgrade failed for Deployment with Configmap in projected volume")
}
logrus.Infof("Verifying deployment update")
updated := testutil.VerifyResourceUpdate(clients, config, constants.ConfigmapEnvVarPostfix, deploymentFuncs)
if !updated {
t.Errorf("Deployment was not updated")
}
if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 {
t.Errorf("Counter was not increased")
}
}
func createConfigMap(clients *kube.Clients, namespace, name string, annotations map[string]string) (*core_v1.ConfigMap, error) {
configmapObj := testutil.GetConfigmap(namespace, name, "www.google.com")
configmapObj.Annotations = annotations
return clients.KubernetesClient.CoreV1().ConfigMaps(namespace).Create(configmapObj)
}
func TestRollingUpgradeForDeploymentWithConfigmapViaSearchAnnotation(t *testing.T) {
shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapAnnotated, "www.stakater.com")
config := getConfigWithAnnotations(constants.ConfigmapEnvVarPostfix, configmapAnnotated, shaData, "")
config.ResourceAnnotations = map[string]string{"reloader.stakater.com/match": "true"}
deploymentFuncs := GetDeploymentRollingUpgradeFuncs()
collectors := getCollectors()
err := PerformRollingUpgrade(clients, config, deploymentFuncs, collectors)
if err != nil {
t.Errorf("Rolling upgrade failed for Deployment with Configmap")
}
logrus.Infof("Verifying deployment update")
updated := testutil.VerifyResourceUpdate(clients, config, constants.ConfigmapEnvVarPostfix, deploymentFuncs)
if !updated {
t.Errorf("Deployment was not updated")
}
if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 {
t.Errorf("Counter was not increased")
}
}
func TestRollingUpgradeForDeploymentWithConfigmapViaSearchAnnotationNoTriggers(t *testing.T) {
shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapAnnotated, "www.stakater.com")
config := getConfigWithAnnotations(constants.ConfigmapEnvVarPostfix, configmapAnnotated, shaData, "")
config.ResourceAnnotations = map[string]string{"reloader.stakater.com/match": "false"}
deploymentFuncs := GetDeploymentRollingUpgradeFuncs()
collectors := getCollectors()
err := PerformRollingUpgrade(clients, config, deploymentFuncs, collectors)
if err != nil {
t.Errorf("Rolling upgrade failed for Deployment with Configmap")
}
logrus.Infof("Verifying deployment update")
updated := testutil.VerifyResourceUpdate(clients, config, constants.ConfigmapEnvVarPostfix, deploymentFuncs)
time.Sleep(5 * time.Second)
if updated {
t.Errorf("Deployment was updated unexpectedly")
}
if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) > 0 {
t.Errorf("Counter was increased unexpectedly")
}
}
func TestRollingUpgradeForDeploymentWithConfigmapViaSearchAnnotationNotMapped(t *testing.T) {
deployment, err := testutil.CreateDeploymentWithEnvVarSourceAndAnnotations(
clients.KubernetesClient,
configmapAnnotated+"-different",
namespace,
map[string]string{"reloader.stakater.com/search": "true"},
)
if err != nil {
t.Errorf("Failed to create deployment with search annotation.")
}
defer clients.KubernetesClient.AppsV1().Deployments(namespace).Delete(deployment.Name, &v1.DeleteOptions{})
shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapAnnotated, "www.stakater.com")
config := getConfigWithAnnotations(constants.ConfigmapEnvVarPostfix, configmapAnnotated, shaData, "")
config.ResourceAnnotations = map[string]string{"reloader.stakater.com/match": "false"}
deploymentFuncs := GetDeploymentRollingUpgradeFuncs()
collectors := getCollectors()
err = PerformRollingUpgrade(clients, config, deploymentFuncs, collectors)
if err != nil {
t.Errorf("Rolling upgrade failed for Deployment with Configmap")
}
logrus.Infof("Verifying deployment update")
updated := testutil.VerifyResourceUpdate(clients, config, constants.ConfigmapEnvVarPostfix, deploymentFuncs)
if updated {
t.Errorf("Deployment was updated unexpectedly")
}
if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) > 0 {
t.Errorf("Counter was increased unexpectedly")
}
}
func TestRollingUpgradeForDeploymentWithConfigmapInInitContainer(t *testing.T) {
shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapWithInitContainer, "www.stakater.com")
config := getConfigWithAnnotations(constants.ConfigmapEnvVarPostfix, configmapWithInitContainer, shaData, options.ConfigmapUpdateOnChangeAnnotation)
deploymentFuncs := GetDeploymentRollingUpgradeFuncs()
collectors := getCollectors()
err := PerformRollingUpgrade(clients, config, deploymentFuncs)
err := PerformRollingUpgrade(clients, config, deploymentFuncs, collectors)
time.Sleep(5 * time.Second)
if err != nil {
t.Errorf("Rolling upgrade failed for Deployment with Configmap")
@@ -432,14 +761,42 @@ func TestRollingUpgradeForDeploymentWithConfigmapInInitContainer(t *testing.T) {
if !updated {
t.Errorf("Deployment was not updated")
}
if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 {
t.Errorf("Counter was not increased")
}
}
func TestRollingUpgradeForDeploymentWithConfigmapInProjectVolumeInInitContainer(t *testing.T) {
shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, projectedConfigMapWithInitContainer, "www.stakater.com")
config := getConfigWithAnnotations(constants.ConfigmapEnvVarPostfix, projectedConfigMapWithInitContainer, shaData, options.ConfigmapUpdateOnChangeAnnotation)
deploymentFuncs := GetDeploymentRollingUpgradeFuncs()
collectors := getCollectors()
err := PerformRollingUpgrade(clients, config, deploymentFuncs, collectors)
time.Sleep(5 * time.Second)
if err != nil {
t.Errorf("Rolling upgrade failed for Deployment with Configmap in projected volume")
}
logrus.Infof("Verifying deployment update")
updated := testutil.VerifyResourceUpdate(clients, config, constants.ConfigmapEnvVarPostfix, deploymentFuncs)
if !updated {
t.Errorf("Deployment was not updated")
}
if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 {
t.Errorf("Counter was not increased")
}
}
func TestRollingUpgradeForDeploymentWithConfigmapAsEnvVar(t *testing.T) {
shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapWithEnvName, "www.stakater.com")
config := getConfigWithAnnotations(constants.ConfigmapEnvVarPostfix, configmapWithEnvName, shaData, options.ReloaderAutoAnnotation)
deploymentFuncs := GetDeploymentRollingUpgradeFuncs()
collectors := getCollectors()
err := PerformRollingUpgrade(clients, config, deploymentFuncs)
err := PerformRollingUpgrade(clients, config, deploymentFuncs, collectors)
time.Sleep(5 * time.Second)
if err != nil {
t.Errorf("Rolling upgrade failed for Deployment with Configmap used as env var")
@@ -450,14 +807,19 @@ func TestRollingUpgradeForDeploymentWithConfigmapAsEnvVar(t *testing.T) {
if !updated {
t.Errorf("Deployment was not updated")
}
if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 {
t.Errorf("Counter was not increased")
}
}
func TestRollingUpgradeForDeploymentWithConfigmapAsEnvVarInInitContainer(t *testing.T) {
shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapWithInitEnv, "www.stakater.com")
config := getConfigWithAnnotations(constants.ConfigmapEnvVarPostfix, configmapWithInitEnv, shaData, options.ReloaderAutoAnnotation)
deploymentFuncs := GetDeploymentRollingUpgradeFuncs()
collectors := getCollectors()
err := PerformRollingUpgrade(clients, config, deploymentFuncs)
err := PerformRollingUpgrade(clients, config, deploymentFuncs, collectors)
time.Sleep(5 * time.Second)
if err != nil {
t.Errorf("Rolling upgrade failed for Deployment with Configmap used as env var")
@@ -468,14 +830,19 @@ func TestRollingUpgradeForDeploymentWithConfigmapAsEnvVarInInitContainer(t *test
if !updated {
t.Errorf("Deployment was not updated")
}
if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 {
t.Errorf("Counter was not increased")
}
}
func TestRollingUpgradeForDeploymentWithConfigmapAsEnvVarFrom(t *testing.T) {
shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapWithEnvFromName, "www.stakater.com")
config := getConfigWithAnnotations(constants.ConfigmapEnvVarPostfix, configmapWithEnvFromName, shaData, options.ReloaderAutoAnnotation)
deploymentFuncs := GetDeploymentRollingUpgradeFuncs()
collectors := getCollectors()
err := PerformRollingUpgrade(clients, config, deploymentFuncs)
err := PerformRollingUpgrade(clients, config, deploymentFuncs, collectors)
time.Sleep(5 * time.Second)
if err != nil {
t.Errorf("Rolling upgrade failed for Deployment with Configmap used as env var")
@@ -486,14 +853,19 @@ func TestRollingUpgradeForDeploymentWithConfigmapAsEnvVarFrom(t *testing.T) {
if !updated {
t.Errorf("Deployment was not updated")
}
if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 {
t.Errorf("Counter was not increased")
}
}
func TestRollingUpgradeForDeploymentWithSecret(t *testing.T) {
shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, namespace, secretName, "dGVzdFVwZGF0ZWRTZWNyZXRFbmNvZGluZ0ZvclJlbG9hZGVy")
config := getConfigWithAnnotations(constants.SecretEnvVarPostfix, secretName, shaData, options.SecretUpdateOnChangeAnnotation)
deploymentFuncs := GetDeploymentRollingUpgradeFuncs()
collectors := getCollectors()
err := PerformRollingUpgrade(clients, config, deploymentFuncs)
err := PerformRollingUpgrade(clients, config, deploymentFuncs, collectors)
time.Sleep(5 * time.Second)
if err != nil {
t.Errorf("Rolling upgrade failed for Deployment with Secret")
@@ -504,14 +876,42 @@ func TestRollingUpgradeForDeploymentWithSecret(t *testing.T) {
if !updated {
t.Errorf("Deployment was not updated")
}
if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 {
t.Errorf("Counter was not increased")
}
}
func TestRollingUpgradeForDeploymentWithSecretInProjectedVolume(t *testing.T) {
shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, namespace, projectedSecretName, "dGVzdFVwZGF0ZWRTZWNyZXRFbmNvZGluZ0ZvclJlbG9hZGVy")
config := getConfigWithAnnotations(constants.SecretEnvVarPostfix, projectedSecretName, shaData, options.SecretUpdateOnChangeAnnotation)
deploymentFuncs := GetDeploymentRollingUpgradeFuncs()
collectors := getCollectors()
err := PerformRollingUpgrade(clients, config, deploymentFuncs, collectors)
time.Sleep(5 * time.Second)
if err != nil {
t.Errorf("Rolling upgrade failed for Deployment with Secret in projected volume")
}
logrus.Infof("Verifying deployment update")
updated := testutil.VerifyResourceUpdate(clients, config, constants.SecretEnvVarPostfix, deploymentFuncs)
if !updated {
t.Errorf("Deployment was not updated")
}
if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 {
t.Errorf("Counter was not increased")
}
}
func TestRollingUpgradeForDeploymentWithSecretinInitContainer(t *testing.T) {
shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, namespace, secretWithInitContainer, "dGVzdFVwZGF0ZWRTZWNyZXRFbmNvZGluZ0ZvclJlbG9hZGVy")
config := getConfigWithAnnotations(constants.SecretEnvVarPostfix, secretWithInitContainer, shaData, options.SecretUpdateOnChangeAnnotation)
deploymentFuncs := GetDeploymentRollingUpgradeFuncs()
collectors := getCollectors()
err := PerformRollingUpgrade(clients, config, deploymentFuncs)
err := PerformRollingUpgrade(clients, config, deploymentFuncs, collectors)
time.Sleep(5 * time.Second)
if err != nil {
t.Errorf("Rolling upgrade failed for Deployment with Secret")
@@ -522,14 +922,42 @@ func TestRollingUpgradeForDeploymentWithSecretinInitContainer(t *testing.T) {
if !updated {
t.Errorf("Deployment was not updated")
}
if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 {
t.Errorf("Counter was not increased")
}
}
func TestRollingUpgradeForDeploymentWithSecretInProjectedVolumeinInitContainer(t *testing.T) {
shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, namespace, projectedSecretWithInitContainer, "dGVzdFVwZGF0ZWRTZWNyZXRFbmNvZGluZ0ZvclJlbG9hZGVy")
config := getConfigWithAnnotations(constants.SecretEnvVarPostfix, projectedSecretWithInitContainer, shaData, options.SecretUpdateOnChangeAnnotation)
deploymentFuncs := GetDeploymentRollingUpgradeFuncs()
collectors := getCollectors()
err := PerformRollingUpgrade(clients, config, deploymentFuncs, collectors)
time.Sleep(5 * time.Second)
if err != nil {
t.Errorf("Rolling upgrade failed for Deployment with Secret in projected volume")
}
logrus.Infof("Verifying deployment update")
updated := testutil.VerifyResourceUpdate(clients, config, constants.SecretEnvVarPostfix, deploymentFuncs)
if !updated {
t.Errorf("Deployment was not updated")
}
if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 {
t.Errorf("Counter was not increased")
}
}
func TestRollingUpgradeForDeploymentWithSecretAsEnvVar(t *testing.T) {
shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, namespace, secretWithEnvName, "dGVzdFVwZGF0ZWRTZWNyZXRFbmNvZGluZ0ZvclJlbG9hZGVy")
config := getConfigWithAnnotations(constants.SecretEnvVarPostfix, secretWithEnvName, shaData, options.ReloaderAutoAnnotation)
deploymentFuncs := GetDeploymentRollingUpgradeFuncs()
collectors := getCollectors()
err := PerformRollingUpgrade(clients, config, deploymentFuncs)
err := PerformRollingUpgrade(clients, config, deploymentFuncs, collectors)
time.Sleep(5 * time.Second)
if err != nil {
t.Errorf("Rolling upgrade failed for Deployment with Secret")
@@ -540,14 +968,19 @@ func TestRollingUpgradeForDeploymentWithSecretAsEnvVar(t *testing.T) {
if !updated {
t.Errorf("Deployment was not updated")
}
if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 {
t.Errorf("Counter was not increased")
}
}
func TestRollingUpgradeForDeploymentWithSecretAsEnvVarFrom(t *testing.T) {
shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, namespace, secretWithEnvFromName, "dGVzdFVwZGF0ZWRTZWNyZXRFbmNvZGluZ0ZvclJlbG9hZGVy")
config := getConfigWithAnnotations(constants.SecretEnvVarPostfix, secretWithEnvFromName, shaData, options.ReloaderAutoAnnotation)
deploymentFuncs := GetDeploymentRollingUpgradeFuncs()
collectors := getCollectors()
err := PerformRollingUpgrade(clients, config, deploymentFuncs)
err := PerformRollingUpgrade(clients, config, deploymentFuncs, collectors)
time.Sleep(5 * time.Second)
if err != nil {
t.Errorf("Rolling upgrade failed for Deployment with Secret")
@@ -558,14 +991,19 @@ func TestRollingUpgradeForDeploymentWithSecretAsEnvVarFrom(t *testing.T) {
if !updated {
t.Errorf("Deployment was not updated")
}
if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 {
t.Errorf("Counter was not increased")
}
}
func TestRollingUpgradeForDeploymentWithSecretAsEnvVarInInitContainer(t *testing.T) {
shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, namespace, secretWithInitEnv, "dGVzdFVwZGF0ZWRTZWNyZXRFbmNvZGluZ0ZvclJlbG9hZGVy")
config := getConfigWithAnnotations(constants.SecretEnvVarPostfix, secretWithInitEnv, shaData, options.ReloaderAutoAnnotation)
deploymentFuncs := GetDeploymentRollingUpgradeFuncs()
collectors := getCollectors()
err := PerformRollingUpgrade(clients, config, deploymentFuncs)
err := PerformRollingUpgrade(clients, config, deploymentFuncs, collectors)
time.Sleep(5 * time.Second)
if err != nil {
t.Errorf("Rolling upgrade failed for Deployment with Secret")
@@ -576,14 +1014,19 @@ func TestRollingUpgradeForDeploymentWithSecretAsEnvVarInInitContainer(t *testing
if !updated {
t.Errorf("Deployment was not updated")
}
if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 {
t.Errorf("Counter was not increased")
}
}
func TestRollingUpgradeForDaemonSetWithConfigmap(t *testing.T) {
shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapName, "www.facebook.com")
config := getConfigWithAnnotations(constants.ConfigmapEnvVarPostfix, configmapName, shaData, options.ConfigmapUpdateOnChangeAnnotation)
daemonSetFuncs := GetDaemonSetRollingUpgradeFuncs()
collectors := getCollectors()
err := PerformRollingUpgrade(clients, config, daemonSetFuncs)
err := PerformRollingUpgrade(clients, config, daemonSetFuncs, collectors)
time.Sleep(5 * time.Second)
if err != nil {
t.Errorf("Rolling upgrade failed for DaemonSet with configmap")
@@ -594,14 +1037,42 @@ func TestRollingUpgradeForDaemonSetWithConfigmap(t *testing.T) {
if !updated {
t.Errorf("DaemonSet was not updated")
}
if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 {
t.Errorf("Counter was not increased")
}
}
func TestRollingUpgradeForDaemonSetWithConfigmapInProjectedVolume(t *testing.T) {
shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, projectedConfigMapName, "www.facebook.com")
config := getConfigWithAnnotations(constants.ConfigmapEnvVarPostfix, projectedConfigMapName, shaData, options.ConfigmapUpdateOnChangeAnnotation)
daemonSetFuncs := GetDaemonSetRollingUpgradeFuncs()
collectors := getCollectors()
err := PerformRollingUpgrade(clients, config, daemonSetFuncs, collectors)
time.Sleep(5 * time.Second)
if err != nil {
t.Errorf("Rolling upgrade failed for DaemonSet with configmap in projected volume")
}
logrus.Infof("Verifying daemonSet update")
updated := testutil.VerifyResourceUpdate(clients, config, constants.ConfigmapEnvVarPostfix, daemonSetFuncs)
if !updated {
t.Errorf("DaemonSet was not updated")
}
if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 {
t.Errorf("Counter was not increased")
}
}
func TestRollingUpgradeForDaemonSetWithConfigmapAsEnvVar(t *testing.T) {
shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapWithEnvName, "www.facebook.com")
config := getConfigWithAnnotations(constants.ConfigmapEnvVarPostfix, configmapWithEnvName, shaData, options.ReloaderAutoAnnotation)
daemonSetFuncs := GetDaemonSetRollingUpgradeFuncs()
collectors := getCollectors()
err := PerformRollingUpgrade(clients, config, daemonSetFuncs)
err := PerformRollingUpgrade(clients, config, daemonSetFuncs, collectors)
time.Sleep(5 * time.Second)
if err != nil {
t.Errorf("Rolling upgrade failed for DaemonSet with configmap used as env var")
@@ -612,14 +1083,19 @@ func TestRollingUpgradeForDaemonSetWithConfigmapAsEnvVar(t *testing.T) {
if !updated {
t.Errorf("DaemonSet was not updated")
}
if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 {
t.Errorf("Counter was not increased")
}
}
func TestRollingUpgradeForDaemonSetWithSecret(t *testing.T) {
shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, namespace, secretName, "d3d3LmZhY2Vib29rLmNvbQ==")
config := getConfigWithAnnotations(constants.SecretEnvVarPostfix, secretName, shaData, options.SecretUpdateOnChangeAnnotation)
daemonSetFuncs := GetDaemonSetRollingUpgradeFuncs()
collectors := getCollectors()
err := PerformRollingUpgrade(clients, config, daemonSetFuncs)
err := PerformRollingUpgrade(clients, config, daemonSetFuncs, collectors)
time.Sleep(5 * time.Second)
if err != nil {
t.Errorf("Rolling upgrade failed for DaemonSet with secret")
@@ -630,14 +1106,42 @@ func TestRollingUpgradeForDaemonSetWithSecret(t *testing.T) {
if !updated {
t.Errorf("DaemonSet was not updated")
}
if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 {
t.Errorf("Counter was not increased")
}
}
func TestRollingUpgradeForDaemonSetWithSecretInProjectedVolume(t *testing.T) {
shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, namespace, projectedSecretName, "d3d3LmZhY2Vib29rLmNvbQ==")
config := getConfigWithAnnotations(constants.SecretEnvVarPostfix, projectedSecretName, shaData, options.SecretUpdateOnChangeAnnotation)
daemonSetFuncs := GetDaemonSetRollingUpgradeFuncs()
collectors := getCollectors()
err := PerformRollingUpgrade(clients, config, daemonSetFuncs, collectors)
time.Sleep(5 * time.Second)
if err != nil {
t.Errorf("Rolling upgrade failed for DaemonSet with secret in projected volume")
}
logrus.Infof("Verifying daemonSet update")
updated := testutil.VerifyResourceUpdate(clients, config, constants.SecretEnvVarPostfix, daemonSetFuncs)
if !updated {
t.Errorf("DaemonSet was not updated")
}
if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 {
t.Errorf("Counter was not increased")
}
}
func TestRollingUpgradeForStatefulSetWithConfigmap(t *testing.T) {
shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapName, "www.twitter.com")
config := getConfigWithAnnotations(constants.ConfigmapEnvVarPostfix, configmapName, shaData, options.ConfigmapUpdateOnChangeAnnotation)
statefulSetFuncs := GetStatefulSetRollingUpgradeFuncs()
collectors := getCollectors()
err := PerformRollingUpgrade(clients, config, statefulSetFuncs)
err := PerformRollingUpgrade(clients, config, statefulSetFuncs, collectors)
time.Sleep(5 * time.Second)
if err != nil {
t.Errorf("Rolling upgrade failed for StatefulSet with configmap")
@@ -648,14 +1152,42 @@ func TestRollingUpgradeForStatefulSetWithConfigmap(t *testing.T) {
if !updated {
t.Errorf("StatefulSet was not updated")
}
if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 {
t.Errorf("Counter was not increased")
}
}
func TestRollingUpgradeForStatefulSetWithConfigmapInProjectedVolume(t *testing.T) {
shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, projectedConfigMapName, "www.twitter.com")
config := getConfigWithAnnotations(constants.ConfigmapEnvVarPostfix, projectedConfigMapName, shaData, options.ConfigmapUpdateOnChangeAnnotation)
statefulSetFuncs := GetStatefulSetRollingUpgradeFuncs()
collectors := getCollectors()
err := PerformRollingUpgrade(clients, config, statefulSetFuncs, collectors)
time.Sleep(5 * time.Second)
if err != nil {
t.Errorf("Rolling upgrade failed for StatefulSet with configmap in projected volume")
}
logrus.Infof("Verifying statefulSet update")
updated := testutil.VerifyResourceUpdate(clients, config, constants.ConfigmapEnvVarPostfix, statefulSetFuncs)
if !updated {
t.Errorf("StatefulSet was not updated")
}
if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 {
t.Errorf("Counter was not increased")
}
}
func TestRollingUpgradeForStatefulSetWithSecret(t *testing.T) {
shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, namespace, secretName, "d3d3LnR3aXR0ZXIuY29t")
config := getConfigWithAnnotations(constants.SecretEnvVarPostfix, secretName, shaData, options.SecretUpdateOnChangeAnnotation)
statefulSetFuncs := GetStatefulSetRollingUpgradeFuncs()
collectors := getCollectors()
err := PerformRollingUpgrade(clients, config, statefulSetFuncs)
err := PerformRollingUpgrade(clients, config, statefulSetFuncs, collectors)
time.Sleep(5 * time.Second)
if err != nil {
t.Errorf("Rolling upgrade failed for StatefulSet with secret")
@@ -666,4 +1198,94 @@ func TestRollingUpgradeForStatefulSetWithSecret(t *testing.T) {
if !updated {
t.Errorf("StatefulSet was not updated")
}
if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 {
t.Errorf("Counter was not increased")
}
}
func TestRollingUpgradeForStatefulSetWithSecretInProjectedVolume(t *testing.T) {
shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, namespace, projectedSecretName, "d3d3LnR3aXR0ZXIuY29t")
config := getConfigWithAnnotations(constants.SecretEnvVarPostfix, projectedSecretName, shaData, options.SecretUpdateOnChangeAnnotation)
statefulSetFuncs := GetStatefulSetRollingUpgradeFuncs()
collectors := getCollectors()
err := PerformRollingUpgrade(clients, config, statefulSetFuncs, collectors)
time.Sleep(5 * time.Second)
if err != nil {
t.Errorf("Rolling upgrade failed for StatefulSet with secret in projected volume")
}
logrus.Infof("Verifying statefulSet update")
updated := testutil.VerifyResourceUpdate(clients, config, constants.SecretEnvVarPostfix, statefulSetFuncs)
if !updated {
t.Errorf("StatefulSet was not updated")
}
if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 {
t.Errorf("Counter was not increased")
}
}
func TestRollingUpgradeForDeploymentWithPodAnnotations(t *testing.T) {
shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapWithPodAnnotations, "www.stakater.com")
config := getConfigWithAnnotations(constants.ConfigmapEnvVarPostfix, configmapWithPodAnnotations, shaData, options.ConfigmapUpdateOnChangeAnnotation)
deploymentFuncs := GetDeploymentRollingUpgradeFuncs()
collectors := getCollectors()
err := PerformRollingUpgrade(clients, config, deploymentFuncs, collectors)
time.Sleep(5 * time.Second)
if err != nil {
t.Errorf("Rolling upgrade failed for Deployment with pod annotations")
}
logrus.Infof("Verifying deployment update")
envName := constants.EnvVarPrefix + util.ConvertToEnvVarName(config.ResourceName) + "_" + constants.ConfigmapEnvVarPostfix
items := deploymentFuncs.ItemsFunc(clients, config.Namespace)
var foundPod, foundBoth bool
for _, i := range items {
name := util.ToObjectMeta(i).Name
if name == configmapWithPodAnnotations {
containers := deploymentFuncs.ContainersFunc(i)
updated := testutil.GetResourceSHA(containers, envName)
if updated != config.SHAValue {
t.Errorf("Deployment was not updated")
}
foundPod = true
}
if name == configmapWithBothAnnotations {
containers := deploymentFuncs.ContainersFunc(i)
updated := testutil.GetResourceSHA(containers, envName)
if updated == config.SHAValue {
t.Errorf("Deployment was updated")
}
foundBoth = true
}
}
if !foundPod {
t.Errorf("Deployment with pod annotations was not found")
}
if !foundBoth {
t.Errorf("Deployment with both annotations was not found")
}
if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 {
t.Errorf("Counter was not increased")
}
}
func TestFailedRollingUpgrade(t *testing.T) {
shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapName, "fail.stakater.com")
config := getConfigWithAnnotations(constants.ConfigmapEnvVarPostfix, configmapName, shaData, options.ConfigmapUpdateOnChangeAnnotation)
deploymentFuncs := GetDeploymentRollingUpgradeFuncs()
deploymentFuncs.UpdateFunc = func(_ kube.Clients, _ string, _ interface{}) error {
return fmt.Errorf("error")
}
collectors := getCollectors()
_ = PerformRollingUpgrade(clients, config, deploymentFuncs, collectors)
if promtestutil.ToFloat64(collectors.Reloaded.With(labelFailed)) != 1 {
t.Errorf("Counter was not increased")
}
}

View File

@@ -0,0 +1,43 @@
package metrics
import (
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promhttp"
"github.com/sirupsen/logrus"
"net/http"
)
type Collectors struct {
Reloaded *prometheus.CounterVec
}
func NewCollectors() Collectors {
reloaded := prometheus.NewCounterVec(
prometheus.CounterOpts{
Namespace: "reloader",
Name: "reload_executed_total",
Help: "Counter of reloads executed by Reloader.",
},
[]string{"success"},
)
//set 0 as default value
reloaded.With(prometheus.Labels{"success": "true"}).Add(0)
reloaded.With(prometheus.Labels{"success": "false"}).Add(0)
return Collectors{
Reloaded: reloaded,
}
}
func SetupPrometheusEndpoint() Collectors {
collectors := NewCollectors()
prometheus.MustRegister(collectors.Reloaded)
go func() {
http.Handle("/metrics", promhttp.Handler())
logrus.Fatal(http.ListenAndServe(":9090", nil))
}()
return collectors
}

View File

@@ -1,10 +1,20 @@
package options
var (
// ConfigmapUpdateOnChangeAnnotation is an annotation to detect changes in configmaps
// ConfigmapUpdateOnChangeAnnotation is an annotation to detect changes in
// configmaps specified by name
ConfigmapUpdateOnChangeAnnotation = "configmap.reloader.stakater.com/reload"
// SecretUpdateOnChangeAnnotation is an annotation to detect changes in secrets
// SecretUpdateOnChangeAnnotation is an annotation to detect changes in
// secrets specified by name
SecretUpdateOnChangeAnnotation = "secret.reloader.stakater.com/reload"
// ReloaderAutoAnnotation is an annotation to detect changes in secrets
ReloaderAutoAnnotation = "reloader.stakater.com/auto"
// AutoSearchAnnotation is an annotation to detect changes in
// configmaps or triggers with the SearchMatchAnnotation
AutoSearchAnnotation = "reloader.stakater.com/search"
// SearchMatchAnnotation is an annotation to tag secrets to be found with
// AutoSearchAnnotation
SearchMatchAnnotation = "reloader.stakater.com/match"
// LogFormat is the log format to use (json, or empty string for default)
LogFormat = ""
)

View File

@@ -92,6 +92,38 @@ func getEnvVarSources(name string) []v1.EnvFromSource {
func getVolumes(name string) []v1.Volume {
return []v1.Volume{
{
Name: "projectedconfigmap",
VolumeSource: v1.VolumeSource{
Projected: &v1.ProjectedVolumeSource{
Sources: []v1.VolumeProjection{
{
ConfigMap: &v1.ConfigMapProjection{
LocalObjectReference: v1.LocalObjectReference{
Name: name,
},
},
},
},
},
},
},
{
Name: "projectedsecret",
VolumeSource: v1.VolumeSource{
Projected: &v1.ProjectedVolumeSource{
Sources: []v1.VolumeProjection{
{
Secret: &v1.SecretProjection{
LocalObjectReference: v1.LocalObjectReference{
Name: name,
},
},
},
},
},
},
},
{
Name: "configmap",
VolumeSource: v1.VolumeSource{
@@ -123,6 +155,14 @@ func getVolumeMounts(name string) []v1.VolumeMount {
MountPath: "etc/sec",
Name: "secret",
},
{
MountPath: "etc/projectedconfig",
Name: "projectedconfigmap",
},
{
MountPath: "etc/projectedsec",
Name: "projectedsecret",
},
}
}
@@ -389,6 +429,28 @@ func GetDeploymentWithEnvVarSources(namespace string, deploymentName string) *ap
}
}
func GetDeploymentWithPodAnnotations(namespace string, deploymentName string, both bool) *appsv1.Deployment {
replicaset := int32(1)
deployment := &appsv1.Deployment{
ObjectMeta: getObjectMeta(namespace, deploymentName, false),
Spec: appsv1.DeploymentSpec{
Selector: &metav1.LabelSelector{
MatchLabels: map[string]string{"secondLabel": "temp"},
},
Replicas: &replicaset,
Strategy: appsv1.DeploymentStrategy{
Type: appsv1.RollingUpdateDeploymentStrategyType,
},
Template: getPodTemplateSpecWithEnvVarSources(deploymentName),
},
}
if !both {
deployment.ObjectMeta.Annotations = nil
}
deployment.Spec.Template.ObjectMeta.Annotations = getAnnotations(deploymentName, true)
return deployment
}
// GetDaemonSet provides daemonset for testing
func GetDaemonSet(namespace string, daemonsetName string) *appsv1.DaemonSet {
return &appsv1.DaemonSet{
@@ -501,11 +563,11 @@ func GetSecretWithUpdatedLabel(namespace string, secretName string, label string
}
// GetResourceSHA returns the SHA value of given environment variable
func GetResourceSHA(containers []v1.Container, envar string) string {
func GetResourceSHA(containers []v1.Container, envVar string) string {
for i := range containers {
envs := containers[i].Env
for j := range envs {
if envs[j].Name == envar {
if envs[j].Name == envVar {
return envs[j].Value
}
}
@@ -602,6 +664,29 @@ func CreateDeploymentWithEnvVarSource(client kubernetes.Interface, deploymentNam
deployment, err := deploymentClient.Create(deploymentObj)
time.Sleep(3 * time.Second)
return deployment, err
}
// CreateDeploymentWithPodAnnotations creates a deployment in given namespace and returns the Deployment
func CreateDeploymentWithPodAnnotations(client kubernetes.Interface, deploymentName string, namespace string, both bool) (*appsv1.Deployment, error) {
logrus.Infof("Creating Deployment")
deploymentClient := client.AppsV1().Deployments(namespace)
deploymentObj := GetDeploymentWithPodAnnotations(namespace, deploymentName, both)
deployment, err := deploymentClient.Create(deploymentObj)
time.Sleep(3 * time.Second)
return deployment, err
}
// CreateDeploymentWithEnvVarSourceAndAnnotations returns a deployment in given
// namespace with given annotations.
func CreateDeploymentWithEnvVarSourceAndAnnotations(client kubernetes.Interface, deploymentName string, namespace string, annotations map[string]string) (*appsv1.Deployment, error) {
logrus.Infof("Creating Deployment")
deploymentClient := client.AppsV1().Deployments(namespace)
deploymentObj := GetDeploymentWithEnvVarSources(namespace, deploymentName)
deploymentObj.Annotations = annotations
deployment, err := deploymentClient.Create(deploymentObj)
time.Sleep(3 * time.Second)
return deployment, err
}
// CreateDaemonSet creates a deployment in given namespace and returns the DaemonSet
@@ -727,6 +812,7 @@ func VerifyResourceUpdate(clients kube.Clients, config util.Config, envVarPostfi
containers := upgradeFuncs.ContainersFunc(i)
// match statefulsets with the correct annotation
annotationValue := util.ToObjectMeta(i).Annotations[config.Annotation]
searchAnnotationValue := util.ToObjectMeta(i).Annotations[options.AutoSearchAnnotation]
reloaderEnabledValue := util.ToObjectMeta(i).Annotations[options.ReloaderAutoAnnotation]
reloaderEnabled, err := strconv.ParseBool(reloaderEnabledValue)
matches := false
@@ -735,11 +821,16 @@ func VerifyResourceUpdate(clients kube.Clients, config util.Config, envVarPostfi
} else if annotationValue != "" {
values := strings.Split(annotationValue, ",")
for _, value := range values {
value = strings.Trim(value, " ")
if value == config.ResourceName {
matches = true
break
}
}
} else if searchAnnotationValue == "true" {
if config.ResourceAnnotations[options.SearchMatchAnnotation] == "true" {
matches = true
}
}
if matches {

View File

@@ -8,31 +8,34 @@ import (
//Config contains rolling upgrade configuration parameters
type Config struct {
Namespace string
ResourceName string
Annotation string
SHAValue string
Type string
Namespace string
ResourceName string
ResourceAnnotations map[string]string
Annotation string
SHAValue string
Type string
}
// GetConfigmapConfig provides utility config for configmap
func GetConfigmapConfig(configmap *v1.ConfigMap) Config {
return Config{
Namespace: configmap.Namespace,
ResourceName: configmap.Name,
Annotation: options.ConfigmapUpdateOnChangeAnnotation,
SHAValue: GetSHAfromConfigmap(configmap.Data),
Type: constants.ConfigmapEnvVarPostfix,
Namespace: configmap.Namespace,
ResourceName: configmap.Name,
ResourceAnnotations: configmap.Annotations,
Annotation: options.ConfigmapUpdateOnChangeAnnotation,
SHAValue: GetSHAfromConfigmap(configmap),
Type: constants.ConfigmapEnvVarPostfix,
}
}
// GetSecretConfig provides utility config for secret
func GetSecretConfig(secret *v1.Secret) Config {
return Config{
Namespace: secret.Namespace,
ResourceName: secret.Name,
Annotation: options.SecretUpdateOnChangeAnnotation,
SHAValue: GetSHAfromSecret(secret.Data),
Type: constants.SecretEnvVarPostfix,
Namespace: secret.Namespace,
ResourceName: secret.Name,
ResourceAnnotations: secret.Annotations,
Annotation: options.SecretUpdateOnChangeAnnotation,
SHAValue: GetSHAfromSecret(secret.Data),
Type: constants.SecretEnvVarPostfix,
}
}

View File

@@ -2,10 +2,12 @@ package util
import (
"bytes"
"encoding/base64"
"sort"
"strings"
"github.com/stakater/Reloader/internal/pkg/crypto"
v1 "k8s.io/api/core/v1"
)
// ConvertToEnvVarName converts the given text into a usable env var
@@ -29,11 +31,14 @@ func ConvertToEnvVarName(text string) string {
return buffer.String()
}
func GetSHAfromConfigmap(data map[string]string) string {
func GetSHAfromConfigmap(configmap *v1.ConfigMap) string {
values := []string{}
for k, v := range data {
for k, v := range configmap.Data {
values = append(values, k+"="+v)
}
for k, v := range configmap.BinaryData {
values = append(values, k+"="+base64.StdEncoding.EncodeToString(v))
}
sort.Strings(values)
return crypto.GenerateSHA(strings.Join(values, ";"))
}

View File

@@ -2,6 +2,8 @@ package util
import (
"testing"
v1 "k8s.io/api/core/v1"
)
func TestConvertToEnvVarName(t *testing.T) {
@@ -11,3 +13,35 @@ func TestConvertToEnvVarName(t *testing.T) {
t.Errorf("Failed to convert data into environment variable")
}
}
func TestGetHashFromConfigMap(t *testing.T) {
data := map[*v1.ConfigMap]string{
{
Data: map[string]string{"test": "test"},
}: "Only Data",
{
Data: map[string]string{"test": "test"},
BinaryData: map[string][]byte{"bintest": []byte("test")},
}: "Both Data and BinaryData",
{
BinaryData: map[string][]byte{"bintest": []byte("test")},
}: "Only BinaryData",
}
converted := map[string]string{}
for cm, cmName := range data {
converted[cmName] = GetSHAfromConfigmap(cm)
}
// Test that the has for each configmap is really unique
for cmName, cmHash := range converted {
count := 0
for _, cmHash2 := range converted {
if cmHash == cmHash2 {
count++
}
}
if count > 1 {
t.Errorf("Found duplicate hashes for %v", cmName)
}
}
}

14
okteto.yml Normal file
View File

@@ -0,0 +1,14 @@
name: reloader-reloader
image: okteto/golang:1
command: bash
securityContext:
capabilities:
add:
- SYS_PTRACE
volumes:
- /go/pkg/
- /root/.cache/go-build/
sync:
- .:/app
forward:
- 2345:2345

View File

@@ -78,7 +78,6 @@ func GetKubernetesClient() (*kubernetes.Clientset, error) {
func getConfig() (*rest.Config, error) {
var config *rest.Config
var err error
kubeconfigPath := os.Getenv("KUBECONFIG")
if kubeconfigPath == "" {
kubeconfigPath = os.Getenv("HOME") + "/.kube/config"
@@ -95,9 +94,6 @@ func getConfig() (*rest.Config, error) {
return nil, err
}
}
if err != nil {
return nil, err
}
return config, nil
}