mirror of
https://github.com/stakater/Reloader.git
synced 2026-02-14 18:09:50 +00:00
Compare commits
130 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
7c2121b7c9 | ||
|
|
008f4b0fd2 | ||
|
|
0bf2fc66cc | ||
|
|
d599340549 | ||
|
|
83a711885c | ||
|
|
0ddfe8406d | ||
|
|
036882b588 | ||
|
|
63619e360c | ||
|
|
6d54f9faca | ||
|
|
18254666a6 | ||
|
|
b0dcb4c5e2 | ||
|
|
cd4a4fb324 | ||
|
|
7ae3a4259e | ||
|
|
e82926a9f6 | ||
|
|
e462f7ab26 | ||
|
|
da4ffc9432 | ||
|
|
cc65a1c039 | ||
|
|
8c1a9317ee | ||
|
|
33d1918d71 | ||
|
|
5fc34e885e | ||
|
|
acc61d504f | ||
|
|
cf9c0fc685 | ||
|
|
4822dbae86 | ||
|
|
16d75d1d47 | ||
|
|
878bc5c442 | ||
|
|
e7ccc40035 | ||
|
|
0197dec568 | ||
|
|
795de2399b | ||
|
|
a69674ba4c | ||
|
|
ff5f28ba00 | ||
|
|
df777332a9 | ||
|
|
daca09e65e | ||
|
|
50a908a59f | ||
|
|
121a550da5 | ||
|
|
9f3c8379a6 | ||
|
|
9b48d320be | ||
|
|
a8edefcdde | ||
|
|
1689a9560b | ||
|
|
4f8377de15 | ||
|
|
765ddbdf43 | ||
|
|
181477de05 | ||
|
|
6d8c0cf6cb | ||
|
|
2dc7d20a37 | ||
|
|
9cd5b87dab | ||
|
|
20d88e0668 | ||
|
|
de77785d4f | ||
|
|
bf6cb73fd7 | ||
|
|
b3d3c3704a | ||
|
|
58514e8610 | ||
|
|
a26f7fc4ad | ||
|
|
e06394c940 | ||
|
|
362ea70e26 | ||
|
|
3e6c4a3f60 | ||
|
|
8cf105726f | ||
|
|
361bea4373 | ||
|
|
59fd71d15f | ||
|
|
6c6776f2b4 | ||
|
|
8b824ef26a | ||
|
|
5fd170a7ca | ||
|
|
69487f6caf | ||
|
|
401a94bd36 | ||
|
|
bf12cbec15 | ||
|
|
fdc223a4a6 | ||
|
|
dcbc0e0de0 | ||
|
|
aff377718c | ||
|
|
112e8ba89d | ||
|
|
c2e6231a46 | ||
|
|
51b42dc098 | ||
|
|
43200e127a | ||
|
|
6db5106f85 | ||
|
|
703c0ea56e | ||
|
|
21563abc07 | ||
|
|
ce96eb3810 | ||
|
|
b5c8ee2ab9 | ||
|
|
c27bb3929b | ||
|
|
67913c9985 | ||
|
|
9dac1a30b6 | ||
|
|
ac7f9d09cc | ||
|
|
d8ae3c76da | ||
|
|
3f115618cc | ||
|
|
06aa382910 | ||
|
|
3b69599c77 | ||
|
|
125e7536af | ||
|
|
66f9b07817 | ||
|
|
40aa9955cd | ||
|
|
bfff7104aa | ||
|
|
c0acfd0503 | ||
|
|
379b6c0131 | ||
|
|
3bf427e985 | ||
|
|
2b6e5455dc | ||
|
|
9bc8d6b67d | ||
|
|
0c340fcb48 | ||
|
|
889b16718a | ||
|
|
512cbd8c85 | ||
|
|
39944497f3 | ||
|
|
6fb1266637 | ||
|
|
724cda887e | ||
|
|
d29f3716b2 | ||
|
|
2131f0ebf3 | ||
|
|
0f052162a2 | ||
|
|
02c6de97c8 | ||
|
|
16bce16f81 | ||
|
|
33443ccb29 | ||
|
|
75b00733bf | ||
|
|
d2335f8ffd | ||
|
|
87f3a32f68 | ||
|
|
95bd5e497f | ||
|
|
333957d82a | ||
|
|
75f67ffa6e | ||
|
|
0558fc3723 | ||
|
|
b637e33d9d | ||
|
|
130741480e | ||
|
|
cd19d739ab | ||
|
|
f15ec9b4d4 | ||
|
|
013fcdc052 | ||
|
|
78e98c7999 | ||
|
|
44c6333910 | ||
|
|
0316bcd938 | ||
|
|
8c77c230e0 | ||
|
|
bcffb62e3f | ||
|
|
85c10da63d | ||
|
|
b17ab76a24 | ||
|
|
23ab5692fa | ||
|
|
55c93a4dd8 | ||
|
|
32eb9e7959 | ||
|
|
09046c45ff | ||
|
|
f346ca84ab | ||
|
|
5041d1a1cf | ||
|
|
1ad93145b4 | ||
|
|
61b398539e |
1
.dockerignore
Normal file
1
.dockerignore
Normal file
@@ -0,0 +1 @@
|
||||
vendor
|
||||
8
Jenkinsfile
vendored
8
Jenkinsfile
vendored
@@ -1,8 +1,10 @@
|
||||
#!/usr/bin/groovy
|
||||
@Library('github.com/stakater/fabric8-pipeline-library@v2.7.2')
|
||||
@Library('github.com/stakater/fabric8-pipeline-library@v2.10.8')
|
||||
|
||||
def dummy
|
||||
|
||||
goBuildAndRelease {
|
||||
|
||||
}
|
||||
chartRepositoryURL = 'https://chartmuseum.release.stakater.com'
|
||||
publicChartRepositoryURL = 'https://stakater.github.io/stakater-charts'
|
||||
publicChartGitURL = 'git@github.com:stakater/stakater-charts.git'
|
||||
}
|
||||
2
Makefile
2
Makefile
@@ -20,7 +20,7 @@ LDFLAGS =
|
||||
default: build test
|
||||
|
||||
install:
|
||||
"$(GLIDECMD)" install
|
||||
"$(GLIDECMD)" install --strip-vendor
|
||||
|
||||
build:
|
||||
"$(GOCMD)" build ${GOFLAGS} ${LDFLAGS} -o "${BINARY}"
|
||||
|
||||
112
README.md
112
README.md
@@ -13,50 +13,95 @@
|
||||
|
||||
## Problem
|
||||
|
||||
We would like to watch if some change happens in `ConfigMap` and/or `Secret`; then perform a rolling upgrade on relevant `Deployment`, `Deamonset` and `Statefulset`
|
||||
We would like to watch if some change happens in `ConfigMap` and/or `Secret`; then perform a rolling upgrade on relevant `DeploymentConfig`, `Deployment`, `Daemonset` and `Statefulset`
|
||||
|
||||
## Solution
|
||||
|
||||
Reloader can watch changes in `ConfigMap` and `Secret` and do rolling upgrades on Pods with their associated `Deployments`, `Deamonsets` and `Statefulsets`.
|
||||
Reloader can watch changes in `ConfigMap` and `Secret` and do rolling upgrades on Pods with their associated `DeploymentConfigs`, `Deployments`, `Daemonsets` and `Statefulsets`.
|
||||
|
||||
## How to use Reloader
|
||||
|
||||
### Configmap
|
||||
|
||||
For a `Deployment` called `foo` have a `ConfigMap` called `foo-configmap`. Then add this annotation to your `Deployment`
|
||||
For a `Deployment` called `foo` have a `ConfigMap` called `foo-configmap` or `Secret` called `foo-secret` or both. Then add your annotation (by default `reloader.stakater.com/auto`) to main metadata of your `Deployment`
|
||||
|
||||
```yaml
|
||||
kind: Deployment
|
||||
metadata:
|
||||
annotations:
|
||||
reloader.stakater.com/auto: "true"
|
||||
spec:
|
||||
template:
|
||||
metadata:
|
||||
```
|
||||
|
||||
This will discover deployments/daemonsets/statefulset automatically where `foo-configmap` or `foo-secret` is being used either via environment variable or from volume mount. And it will perform rolling upgrade on related pods when `foo-configmap` or `foo-secret`are updated.
|
||||
|
||||
We can also specify a specific configmap or secret which would trigger rolling upgrade only upon change in our specified configmap or secret, this way, it will not trigger rolling upgrade upon changes in all configmaps or secrets used in a deployment, daemonset or statefulset.
|
||||
To do this either set the auto annotation to `"false"` (`reloader.stakater.com/auto: "false"`) or remove it altogether, and use annotations mentioned [here](#Configmap) or [here](#Secret)
|
||||
|
||||
### Configmap
|
||||
|
||||
To perform rolling upgrade when change happens only on specific configmaps use below annotation.
|
||||
|
||||
For a `Deployment` called `foo` have a `ConfigMap` called `foo-configmap`. Then add this annotation to main metadata of your `Deployment`
|
||||
|
||||
```yaml
|
||||
kind: Deployment
|
||||
metadata:
|
||||
annotations:
|
||||
configmap.reloader.stakater.com/reload: "foo-configmap"
|
||||
spec:
|
||||
template:
|
||||
metadata:
|
||||
```
|
||||
|
||||
Use comma separated list to define multiple configmaps.
|
||||
|
||||
```yaml
|
||||
kind: Deployment
|
||||
metadata:
|
||||
annotations:
|
||||
configmap.reloader.stakater.com/reload: "foo-configmap,bar-configmap,baz-configmap"
|
||||
spec:
|
||||
template:
|
||||
metadata:
|
||||
```
|
||||
|
||||
### Secret
|
||||
|
||||
For a `Deployment` called `foo` have a `Secret` called `foo-secret`. Then add this annotation to your `Deployment`
|
||||
To perform rolling upgrade when change happens only on specific secrets use below annotation.
|
||||
|
||||
For a `Deployment` called `foo` have a `Secret` called `foo-secret`. Then add this annotation to main metadata of your `Deployment`
|
||||
|
||||
```yaml
|
||||
kind: Deployment
|
||||
metadata:
|
||||
annotations:
|
||||
secret.reloader.stakater.com/reload: "foo-secret"
|
||||
spec:
|
||||
template:
|
||||
metadata:
|
||||
```
|
||||
|
||||
Use comma separated list to define multiple secrets.
|
||||
|
||||
```yaml
|
||||
kind: Deployment
|
||||
metadata:
|
||||
annotations:
|
||||
secret.reloader.stakater.com/reload: "foo-secret,bar-secret,baz-secret"
|
||||
spec:
|
||||
template:
|
||||
metadata:
|
||||
```
|
||||
|
||||
### NOTES
|
||||
- Reloader also supports [sealed-secrets](https://github.com/bitnami-labs/sealed-secrets). [Here](docs/Reloader-with-Sealed-Secrets.md) are the steps to use sealed-secrets with reloader.
|
||||
- `reloader.stakater.com/auto: "true"` will only reload the pod, if the configmap or secret is used (as a volume mount or as an env) in `DeploymentConfigs/Deployment/Daemonsets/Statefulsets`
|
||||
- `secret.reloader.stakater.com/reload` or `configmap.reloader.stakater.com/reload` annotation will reload the pod upon changes in specified configmap or secret, irrespective of the usage of configmap or secret.
|
||||
- you may override the auto annotation with the `--auto-annotation` flag
|
||||
- you may override the configmap annotation with the `--configmap-annotation` flag
|
||||
- you may override the secret annotation with the `--secret-annotation` flag
|
||||
|
||||
## Deploying to Kubernetes
|
||||
|
||||
You can deploy Reloader by following methods:
|
||||
@@ -69,7 +114,41 @@ You can apply vanilla manifests by running the following command
|
||||
kubectl apply -f https://raw.githubusercontent.com/stakater/Reloader/master/deployments/kubernetes/reloader.yaml
|
||||
```
|
||||
|
||||
By default Reloader gets deployed in `default` namespace and watches changes `secrets` and `configmaps` in all namespaces.
|
||||
By default Reloader gets deployed in `default` namespace and watches changes `secrets` and `configmaps` in all namespaces.
|
||||
|
||||
Reloader can be configured to ignore the resources `secrets` and `configmaps` by passing the following args (`spec.template.spec.containers.args`) to its container :
|
||||
|
||||
| Args | Description |
|
||||
|---|---|
|
||||
| --resources-to-ignore=configMaps | To ignore configMaps |
|
||||
| --resources-to-ignore=secrets | To ignore secrets |
|
||||
|
||||
`Note`: At one time only one of these resource can be ignored, trying to do it will cause error in Reloader. Workaround for ignoring both resources is by scaling down the reloader pods to `0`.
|
||||
|
||||
|
||||
### Vanilla kustomize
|
||||
|
||||
You can also apply the vanilla manifests by running the following command
|
||||
```bash
|
||||
kubectl apply -k https://github.com/stakater/Reloader/deployments/kubernetes
|
||||
```
|
||||
Similarly to vanilla manifests get deployed in `default` namespace and watches changes `secrets` and `configmaps` in all namespaces.
|
||||
|
||||
### Kustomize
|
||||
|
||||
You can write your own `kustomization.yaml` using ours as a 'base' and write patches to tweak the configuration.
|
||||
|
||||
```yaml
|
||||
apiVersion: kustomize.config.k8s.io/v1beta1
|
||||
kind: Kustomization
|
||||
|
||||
namePrefix: reloader-
|
||||
|
||||
bases:
|
||||
- https://github.com/stakater/Reloader/deployments/kubernetes
|
||||
|
||||
namespace: reloader
|
||||
```
|
||||
|
||||
### Helm Charts
|
||||
|
||||
@@ -83,6 +162,23 @@ helm repo update
|
||||
helm install stakater/reloader
|
||||
```
|
||||
|
||||
**Note:** By default reloader watches in all namespaces. To watch in single namespace, please run following command. It will install reloader in `test` namespace which will only watch `Deployments`, `Daemonsets` and `Statefulsets` in `test` namespace.
|
||||
|
||||
```bash
|
||||
helm install stakater/reloader --set reloader.watchGlobally=false --namespace test
|
||||
```
|
||||
|
||||
Reloader can be configured to ignore the resources `secrets` and `configmaps` by using the following parameters of `values.yaml` file:
|
||||
|
||||
| Parameter | Description | Type |
|
||||
|---|---|---|
|
||||
| ignoreSecrets | To ignore secrets. Valid value are either `true` or `false` | boolean |
|
||||
| ignoreConfigMaps | To ignore configMaps. Valid value are either `true` or `false` | boolean |
|
||||
|
||||
`Note`: At one time only one of these resource can be ignored, trying to do it will cause error in helm template compilation.
|
||||
|
||||
|
||||
|
||||
## Help
|
||||
|
||||
### Documentation
|
||||
@@ -96,7 +192,7 @@ File a GitHub [issue](https://github.com/stakater/Reloader/issues), or send us a
|
||||
Join and talk to us on Slack for discussing Reloader
|
||||
|
||||
[](https://stakater-slack.herokuapp.com/)
|
||||
[](https://stakater.slack.com/)
|
||||
[](https://stakater.slack.com/messages/CC5S05S12)
|
||||
|
||||
## Contributing
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
FROM stakater/go-glide:1.9.3
|
||||
FROM stakater/go-glide:1.12.6
|
||||
MAINTAINER "Stakater Team"
|
||||
|
||||
RUN apk update
|
||||
@@ -11,7 +11,7 @@ RUN apk -v --update \
|
||||
ADD . "$GOPATH/src/github.com/stakater/Reloader"
|
||||
|
||||
RUN cd "$GOPATH/src/github.com/stakater/Reloader" && \
|
||||
glide update && \
|
||||
glide install --strip-vendor && \
|
||||
CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -a --installsuffix cgo --ldflags="-s" -o /Reloader
|
||||
|
||||
COPY build/package/Dockerfile.run /
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
FROM alpine:3.4
|
||||
FROM alpine:3.9
|
||||
MAINTAINER "Stakater Team"
|
||||
|
||||
RUN apk add --update ca-certificates
|
||||
|
||||
2
deployments/kubernetes/chart/reloader/.helmignore
Normal file
2
deployments/kubernetes/chart/reloader/.helmignore
Normal file
@@ -0,0 +1,2 @@
|
||||
# OWNERS file for Kubernetes
|
||||
OWNERS
|
||||
@@ -3,11 +3,27 @@
|
||||
apiVersion: v1
|
||||
name: reloader
|
||||
description: Reloader chart that runs on kubernetes
|
||||
version: 0.0.17
|
||||
version: v0.0.39
|
||||
appVersion: v0.0.39
|
||||
keywords:
|
||||
- Reloader
|
||||
- kubernetes
|
||||
home: https://github.com/stakater/Reloader
|
||||
sources:
|
||||
- https://github.com/stakater/IngressMonitorController
|
||||
icon: https://raw.githubusercontent.com/stakater/Reloader/master/assets/web/reloader-round-100px.png
|
||||
maintainers:
|
||||
- name: Stakater
|
||||
email: hello@stakater.com
|
||||
email: hello@stakater.com
|
||||
- name: rasheedamir
|
||||
email: rasheed@aurorasolutions.io
|
||||
- name: waseem-h
|
||||
email: waseemhassan@stakater.com
|
||||
- name: faizanahmad055
|
||||
email: faizan.ahmad55@outlook.com
|
||||
- name: kahootali
|
||||
email: ali.kahoot@aurorasolutions.io
|
||||
- name: ahmadiq
|
||||
email: ahmad@aurorasolutions.io
|
||||
- name: ahsan-storm
|
||||
email: ahsanmuhammad1@outlook.com
|
||||
|
||||
14
deployments/kubernetes/chart/reloader/OWNERS
Normal file
14
deployments/kubernetes/chart/reloader/OWNERS
Normal file
@@ -0,0 +1,14 @@
|
||||
approvers:
|
||||
- faizanahmad055
|
||||
- kahootali
|
||||
- ahmadiq
|
||||
- waseem-h
|
||||
- rasheedamir
|
||||
- ahsan-storm
|
||||
reviewers:
|
||||
- faizanahmad055
|
||||
- kahootali
|
||||
- ahmadiq
|
||||
- waseem-h
|
||||
- rasheedamir
|
||||
- ahsan-storm
|
||||
@@ -0,0 +1,7 @@
|
||||
- For a `Deployment` called `foo` have a `ConfigMap` called `foo-configmap`. Then add this annotation to main metadata of your `Deployment`
|
||||
configmap.reloader.stakater.com/reload: "foo-configmap"
|
||||
|
||||
- For a `Deployment` called `foo` have a `Secret` called `foo-secret`. Then add this annotation to main metadata of your `Deployment`
|
||||
secret.reloader.stakater.com/reload: "foo-secret"
|
||||
|
||||
- After successful installation, your pods will get rolling updates when a change in data of configmap or secret will happen.
|
||||
@@ -2,6 +2,7 @@
|
||||
{{/*
|
||||
Expand the name of the chart.
|
||||
*/}}
|
||||
|
||||
{{- define "reloader-name" -}}
|
||||
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" | lower -}}
|
||||
{{- end -}}
|
||||
@@ -15,19 +16,20 @@ We truncate at 63 chars because some Kubernetes name fields are limited to this
|
||||
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
|
||||
{{- end -}}
|
||||
|
||||
{{- define "reloader-labels.selector" -}}
|
||||
app: {{ template "reloader-name" . }}
|
||||
group: {{ .Values.reloader.labels.group }}
|
||||
provider: {{ .Values.reloader.labels.provider }}
|
||||
{{- end -}}
|
||||
|
||||
{{- define "reloader-labels.stakater" -}}
|
||||
{{ template "reloader-labels.selector" . }}
|
||||
version: {{ .Values.reloader.labels.version }}
|
||||
{{- end -}}
|
||||
|
||||
{{- define "reloader-labels.chart" -}}
|
||||
app: {{ template "reloader-fullname" . }}
|
||||
chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
|
||||
release: {{ .Release.Name | quote }}
|
||||
heritage: {{ .Release.Service | quote }}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Create the name of the service account to use
|
||||
*/}}
|
||||
{{- define "reloader-serviceAccountName" -}}
|
||||
{{- if .Values.reloader.serviceAccount.create -}}
|
||||
{{ default (include "reloader-fullname" .) .Values.reloader.serviceAccount.name }}
|
||||
{{- else -}}
|
||||
{{ default "default" .Values.reloader.serviceAccount.name }}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
||||
@@ -0,0 +1,62 @@
|
||||
{{- if and .Values.reloader.watchGlobally (.Values.reloader.rbac.enabled) }}
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
labels:
|
||||
{{ include "reloader-labels.chart" . | indent 4 }}
|
||||
{{- if .Values.reloader.rbac.labels }}
|
||||
{{ toYaml .Values.reloader.rbac.labels | indent 4 }}
|
||||
{{- end }}
|
||||
{{- if .Values.reloader.matchLabels }}
|
||||
{{ toYaml .Values.reloader.matchLabels | indent 4 }}
|
||||
{{- end }}
|
||||
name: {{ template "reloader-fullname" . }}-role
|
||||
namespace: {{ .Release.Namespace }}
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
{{- if .Values.reloader.ignoreSecrets }}{{- else }}
|
||||
- secrets
|
||||
{{- end }}
|
||||
{{- if .Values.reloader.ignoreConfigMaps }}{{- else }}
|
||||
- configmaps
|
||||
{{- end }}
|
||||
verbs:
|
||||
- list
|
||||
- get
|
||||
- watch
|
||||
{{- if or (.Capabilities.APIVersions.Has "apps.openshift.io/v1") (.Values.isOpenshift) }}
|
||||
- apiGroups:
|
||||
- "apps.openshift.io"
|
||||
- ""
|
||||
resources:
|
||||
- deploymentconfigs
|
||||
verbs:
|
||||
- list
|
||||
- get
|
||||
- update
|
||||
- patch
|
||||
{{- end }}
|
||||
- apiGroups:
|
||||
- "apps"
|
||||
resources:
|
||||
- deployments
|
||||
- daemonsets
|
||||
- statefulsets
|
||||
verbs:
|
||||
- list
|
||||
- get
|
||||
- update
|
||||
- patch
|
||||
- apiGroups:
|
||||
- "extensions"
|
||||
resources:
|
||||
- deployments
|
||||
- daemonsets
|
||||
verbs:
|
||||
- list
|
||||
- get
|
||||
- update
|
||||
- patch
|
||||
{{- end }}
|
||||
@@ -0,0 +1,23 @@
|
||||
{{- if and .Values.reloader.watchGlobally (.Values.reloader.rbac.enabled) }}
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
labels:
|
||||
{{ include "reloader-labels.chart" . | indent 4 }}
|
||||
{{- if .Values.reloader.rbac.labels }}
|
||||
{{ toYaml .Values.reloader.rbac.labels | indent 4 }}
|
||||
{{- end }}
|
||||
{{- if .Values.reloader.matchLabels }}
|
||||
{{ toYaml .Values.reloader.matchLabels | indent 4 }}
|
||||
{{- end }}
|
||||
name: {{ template "reloader-fullname" . }}-role-binding
|
||||
namespace: {{ .Release.Namespace }}
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: {{ template "reloader-fullname" . }}-role
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: {{ template "reloader-serviceAccountName" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
{{- end }}
|
||||
@@ -1,31 +1,123 @@
|
||||
apiVersion: extensions/v1beta1
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
{{- if .Values.reloader.deployment.annotations }}
|
||||
annotations:
|
||||
{{ toYaml .Values.reloader.deployment.annotations | indent 4 }}
|
||||
{{- end }}
|
||||
labels:
|
||||
{{ include "reloader-labels.stakater" . | indent 4 }}
|
||||
{{ include "reloader-labels.chart" . | indent 4 }}
|
||||
name: {{ template "reloader-name" . }}
|
||||
{{- if .Values.reloader.deployment.labels }}
|
||||
{{ toYaml .Values.reloader.deployment.labels | indent 4 }}
|
||||
{{- end }}
|
||||
{{- if .Values.reloader.matchLabels }}
|
||||
{{ toYaml .Values.reloader.matchLabels | indent 4 }}
|
||||
{{- end }}
|
||||
name: {{ template "reloader-fullname" . }}
|
||||
spec:
|
||||
replicas: 1
|
||||
revisionHistoryLimit: 2
|
||||
selector:
|
||||
matchLabels:
|
||||
{{ include "reloader-labels.selector" . | indent 6 }}
|
||||
app: {{ template "reloader-fullname" . }}
|
||||
release: {{ .Release.Name | quote }}
|
||||
{{- if .Values.reloader.matchLabels }}
|
||||
{{ toYaml .Values.reloader.matchLabels | indent 6 }}
|
||||
{{- end }}
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
{{ include "reloader-labels.selector" . | indent 8 }}
|
||||
{{ include "reloader-labels.chart" . | indent 8 }}
|
||||
{{- if .Values.reloader.deployment.labels }}
|
||||
{{ toYaml .Values.reloader.deployment.labels | indent 8 }}
|
||||
{{- end }}
|
||||
{{- if .Values.reloader.matchLabels }}
|
||||
{{ toYaml .Values.reloader.matchLabels | indent 8 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
{{- if .Values.reloader.deployment.nodeSelector }}
|
||||
nodeSelector:
|
||||
{{ toYaml .Values.reloader.deployment.nodeSelector | indent 8 }}
|
||||
{{- end }}
|
||||
{{- if .Values.reloader.deployment.affinity }}
|
||||
affinity:
|
||||
{{ toYaml .Values.reloader.deployment.affinity | indent 8 }}
|
||||
{{- end }}
|
||||
{{- if .Values.reloader.deployment.tolerations }}
|
||||
tolerations:
|
||||
{{ toYaml .Values.reloader.deployment.tolerations | indent 8 }}
|
||||
{{- end }}
|
||||
containers:
|
||||
- env:
|
||||
{{- range $name, $value := .Values.reloader.deployment.env.open }}
|
||||
{{- if not (empty $value) }}
|
||||
- name: {{ $name | quote }}
|
||||
value: {{ $value | quote }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- $secret_name := include "reloader-fullname" . }}
|
||||
{{- range $name, $value := .Values.reloader.deployment.env.secret }}
|
||||
{{- if not ( empty $value) }}
|
||||
- name: {{ $name | quote }}
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: {{ $secret_name }}
|
||||
key: {{ $name | quote }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- range $name, $value := .Values.reloader.deployment.env.field }}
|
||||
{{- if not ( empty $value) }}
|
||||
- name: {{ $name | quote }}
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: {{ $value | quote}}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- if eq .Values.reloader.watchGlobally false }}
|
||||
- name: KUBERNETES_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
{{- end }}
|
||||
image: "{{ .Values.reloader.image.name }}:{{ .Values.reloader.image.tag }}"
|
||||
imagePullPolicy: {{ .Values.reloader.image.pullPolicy }}
|
||||
name: {{ template "reloader-name" . }}
|
||||
serviceAccountName: {{ template "reloader-name" . }}
|
||||
|
||||
image: "{{ .Values.reloader.deployment.image.name }}:{{ .Values.reloader.deployment.image.tag }}"
|
||||
imagePullPolicy: {{ .Values.reloader.deployment.image.pullPolicy }}
|
||||
name: {{ template "reloader-fullname" . }}
|
||||
{{- if eq .Values.reloader.readOnlyRootFileSystem true }}
|
||||
volumeMounts:
|
||||
- mountPath: /tmp/
|
||||
name: tmp-volume
|
||||
{{- end }}
|
||||
args:
|
||||
|
||||
{{- if .Values.reloader.ignoreSecrets }}
|
||||
- "--resources-to-ignore=secrets"
|
||||
{{- end }}
|
||||
{{- if eq .Values.reloader.ignoreConfigMaps true }}
|
||||
- "--resources-to-ignore=configMaps"
|
||||
{{- end }}
|
||||
|
||||
{{- if .Values.reloader.custom_annotations }}
|
||||
{{- if .Values.reloader.custom_annotations.configmap }}
|
||||
- "--configmap-annotation"
|
||||
- "{{ .Values.reloader.custom_annotations.configmap }}"
|
||||
{{- end }}
|
||||
{{- if .Values.reloader.custom_annotations.secret }}
|
||||
- "--secret-annotation"
|
||||
- "{{ .Values.reloader.custom_annotations.secret }}"
|
||||
{{- end }}
|
||||
{{- if .Values.reloader.custom_annotations.auto }}
|
||||
- "--auto-annotation"
|
||||
- "{{ .Values.reloader.custom_annotations.auto }}"
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{- if .Values.reloader.deployment.resources }}
|
||||
resources:
|
||||
{{ toYaml .Values.reloader.deployment.resources | indent 10 }}
|
||||
{{- end }}
|
||||
serviceAccountName: {{ template "reloader-serviceAccountName" . }}
|
||||
{{- if eq .Values.reloader.readOnlyRootFileSystem true }}
|
||||
volumes:
|
||||
- emptyDir: {}
|
||||
name: tmp-volume
|
||||
{{- end }}
|
||||
|
||||
@@ -1,107 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
labels:
|
||||
{{ include "reloader-labels.stakater" . | indent 4 }}
|
||||
{{ include "reloader-labels.chart" . | indent 4 }}
|
||||
name: {{ template "reloader-name" . }}
|
||||
---
|
||||
{{- if .Values.reloader.watchGlobally }}
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
labels:
|
||||
{{ include "reloader-labels.stakater" . | indent 4 }}
|
||||
{{ include "reloader-labels.chart" . | indent 4 }}
|
||||
name: {{ template "reloader-name" . }}-role
|
||||
namespace: {{ .Release.Namespace }}
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- secrets
|
||||
- configmaps
|
||||
verbs:
|
||||
- list
|
||||
- get
|
||||
- watch
|
||||
- apiGroups:
|
||||
- ""
|
||||
- "extensions"
|
||||
- "apps"
|
||||
resources:
|
||||
- deployments
|
||||
- daemonsets
|
||||
- statefulsets
|
||||
verbs:
|
||||
- list
|
||||
- get
|
||||
- update
|
||||
- patch
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
labels:
|
||||
{{ include "reloader-labels.stakater" . | indent 4 }}
|
||||
{{ include "reloader-labels.chart" . | indent 4 }}
|
||||
name: {{ template "reloader-name" . }}-role-binding
|
||||
namespace: {{ .Release.Namespace }}
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: {{ template "reloader-name" . }}-role
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: {{ template "reloader-name" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
{{- else }}
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: Role
|
||||
metadata:
|
||||
labels:
|
||||
{{ include "reloader-labels.stakater" . | indent 4 }}
|
||||
{{ include "reloader-labels.chart" . | indent 4 }}
|
||||
name: {{ template "reloader-name" . }}-role
|
||||
namespace: {{ .Release.Namespace }}
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- secrets
|
||||
- configmaps
|
||||
verbs:
|
||||
- list
|
||||
- get
|
||||
- watch
|
||||
- apiGroups:
|
||||
- ""
|
||||
- "extensions"
|
||||
- "apps"
|
||||
resources:
|
||||
- deployments
|
||||
- daemonsets
|
||||
- statefulsets
|
||||
verbs:
|
||||
- list
|
||||
- get
|
||||
- update
|
||||
- patch
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
labels:
|
||||
{{ include "reloader-labels.stakater" . | indent 4 }}
|
||||
{{ include "reloader-labels.chart" . | indent 4 }}
|
||||
name: {{ template "reloader-name" . }}-role-binding
|
||||
namespace: {{ .Release.Namespace }}
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: {{ template "reloader-name" . }}-role
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: {{ template "reloader-name" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
{{- end }}
|
||||
62
deployments/kubernetes/chart/reloader/templates/role.yaml
Normal file
62
deployments/kubernetes/chart/reloader/templates/role.yaml
Normal file
@@ -0,0 +1,62 @@
|
||||
{{- if and (not (.Values.reloader.watchGlobally)) (.Values.reloader.rbac.enabled) }}
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: Role
|
||||
metadata:
|
||||
labels:
|
||||
{{ include "reloader-labels.chart" . | indent 4 }}
|
||||
{{- if .Values.reloader.rbac.labels }}
|
||||
{{ toYaml .Values.reloader.rbac.labels | indent 4 }}
|
||||
{{- end }}
|
||||
{{- if .Values.reloader.matchLabels }}
|
||||
{{ toYaml .Values.reloader.matchLabels | indent 4 }}
|
||||
{{- end }}
|
||||
name: {{ template "reloader-fullname" . }}-role
|
||||
namespace: {{ .Release.Namespace }}
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
{{- if .Values.reloader.ignoreSecrets }}{{- else }}
|
||||
- secrets
|
||||
{{- end }}
|
||||
{{- if .Values.reloader.ignoreConfigMaps }}{{- else }}
|
||||
- configmaps
|
||||
{{- end }}
|
||||
verbs:
|
||||
- list
|
||||
- get
|
||||
- watch
|
||||
{{- if or (.Capabilities.APIVersions.Has "apps.openshift.io/v1") (.Values.isOpenshift) }}
|
||||
- apiGroups:
|
||||
- "apps.openshift.io"
|
||||
- ""
|
||||
resources:
|
||||
- deploymentconfigs
|
||||
verbs:
|
||||
- list
|
||||
- get
|
||||
- update
|
||||
- patch
|
||||
{{- end }}
|
||||
- apiGroups:
|
||||
- "apps"
|
||||
resources:
|
||||
- deployments
|
||||
- daemonsets
|
||||
- statefulsets
|
||||
verbs:
|
||||
- list
|
||||
- get
|
||||
- update
|
||||
- patch
|
||||
- apiGroups:
|
||||
- "extensions"
|
||||
resources:
|
||||
- deployments
|
||||
- daemonsets
|
||||
verbs:
|
||||
- list
|
||||
- get
|
||||
- update
|
||||
- patch
|
||||
{{- end }}
|
||||
@@ -0,0 +1,23 @@
|
||||
{{- if and (not (.Values.reloader.watchGlobally)) (.Values.reloader.rbac.enabled) }}
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
labels:
|
||||
{{ include "reloader-labels.chart" . | indent 4 }}
|
||||
{{- if .Values.reloader.rbac.labels }}
|
||||
{{ toYaml .Values.reloader.rbac.labels | indent 4 }}
|
||||
{{- end }}
|
||||
{{- if .Values.reloader.matchLabels }}
|
||||
{{ toYaml .Values.reloader.matchLabels | indent 4 }}
|
||||
{{- end }}
|
||||
name: {{ template "reloader-fullname" . }}-role-binding
|
||||
namespace: {{ .Release.Namespace }}
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: {{ template "reloader-fullname" . }}-role
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: {{ template "reloader-serviceAccountName" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
{{- end }}
|
||||
@@ -0,0 +1,14 @@
|
||||
{{- if .Values.reloader.serviceAccount.create }}
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
labels:
|
||||
{{ include "reloader-labels.chart" . | indent 4 }}
|
||||
{{- if .Values.reloader.serviceAccount.labels }}
|
||||
{{ toYaml .Values.reloader.serviceAccount.labels | indent 4 }}
|
||||
{{- end }}
|
||||
{{- if .Values.reloader.matchLabels }}
|
||||
{{ toYaml .Values.reloader.matchLabels | indent 4 }}
|
||||
{{- end }}
|
||||
name: {{ template "reloader-serviceAccountName" . }}
|
||||
{{- end }}
|
||||
@@ -4,12 +4,79 @@ kubernetes:
|
||||
host: https://kubernetes.default
|
||||
|
||||
reloader:
|
||||
labels:
|
||||
provider: stakater
|
||||
group: com.stakater.platform
|
||||
version: 0.0.17
|
||||
image:
|
||||
name: stakater/reloader
|
||||
tag: "0.0.17"
|
||||
pullPolicy: IfNotPresent
|
||||
watchGlobally: true
|
||||
isOpenshift: false
|
||||
ignoreSecrets: false
|
||||
ignoreConfigMaps: false
|
||||
watchGlobally: true
|
||||
# Set to true if you have a pod security policy that enforces readOnlyRootFilesystem
|
||||
readOnlyRootFileSystem: false
|
||||
matchLabels: {}
|
||||
deployment:
|
||||
nodeSelector:
|
||||
# cloud.google.com/gke-nodepool: default-pool
|
||||
|
||||
# An affinity stanza to be applied to the Deployment.
|
||||
# Example:
|
||||
# affinity:
|
||||
# nodeAffinity:
|
||||
# requiredDuringSchedulingIgnoredDuringExecution:
|
||||
# nodeSelectorTerms:
|
||||
# - matchExpressions:
|
||||
# - key: "node-role.kubernetes.io/infra-worker"
|
||||
# operator: "Exists"
|
||||
affinity: {}
|
||||
|
||||
# A list of tolerations to be applied to the Deployment.
|
||||
# Example:
|
||||
# tolerations:
|
||||
# - key: "node-role.kubernetes.io/infra-worker"
|
||||
# operator: "Exists"
|
||||
# effect: "NoSchedule"
|
||||
tolerations: []
|
||||
|
||||
annotations: {}
|
||||
labels:
|
||||
provider: stakater
|
||||
group: com.stakater.platform
|
||||
version: v0.0.39
|
||||
image:
|
||||
name: stakater/reloader
|
||||
tag: "v0.0.39"
|
||||
pullPolicy: IfNotPresent
|
||||
# Support for extra environment variables.
|
||||
env:
|
||||
# Open supports Key value pair as environment variables.
|
||||
open:
|
||||
# secret supports Key value pair as environment variables. It gets the values based on keys from default reloader secret if any.
|
||||
secret:
|
||||
# field supports Key value pair as environment variables. It gets the values from other fields of pod.
|
||||
field:
|
||||
|
||||
# Specify resource requests/limits for the deployment.
|
||||
# Example:
|
||||
# resources:
|
||||
# limits:
|
||||
# cpu: "100m"
|
||||
# memory: "512Mi"
|
||||
# requests:
|
||||
# cpu: "10m"
|
||||
# memory: "128Mi"
|
||||
resources: {}
|
||||
|
||||
rbac:
|
||||
enabled: true
|
||||
labels: {}
|
||||
# Service account config for the agent pods
|
||||
serviceAccount:
|
||||
# Specifies whether a ServiceAccount should be created
|
||||
create: true
|
||||
labels: {}
|
||||
# The name of the ServiceAccount to use.
|
||||
# If not set and create is true, a name is generated using the fullname template
|
||||
name: reloader
|
||||
# Optional flags to pass to the Reloader entrypoint
|
||||
# Example:
|
||||
# custom_annotations:
|
||||
# configmap: "my.company.com/configmap"
|
||||
# secret: "my.company.com/secret"
|
||||
custom_annotations: {}
|
||||
|
||||
10
deployments/kubernetes/kustomization.yaml
Normal file
10
deployments/kubernetes/kustomization.yaml
Normal file
@@ -0,0 +1,10 @@
|
||||
apiVersion: kustomize.config.k8s.io/v1beta1
|
||||
kind: Kustomization
|
||||
|
||||
resources:
|
||||
- manifests/clusterrole.yaml
|
||||
- manifests/clusterrolebinding.yaml
|
||||
- manifests/role.yaml
|
||||
- manifests/rolebinding.yaml
|
||||
- manifests/serviceaccount.yaml
|
||||
- manifests/deployment.yaml
|
||||
45
deployments/kubernetes/manifests/clusterrole.yaml
Normal file
45
deployments/kubernetes/manifests/clusterrole.yaml
Normal file
@@ -0,0 +1,45 @@
|
||||
---
|
||||
# Source: reloader/templates/clusterrole.yaml
|
||||
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
labels:
|
||||
app: RELEASE-NAME-reloader
|
||||
chart: "reloader-v0.0.39"
|
||||
release: "RELEASE-NAME"
|
||||
heritage: "Tiller"
|
||||
name: RELEASE-NAME-reloader-role
|
||||
namespace: default
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- secrets
|
||||
- configmaps
|
||||
verbs:
|
||||
- list
|
||||
- get
|
||||
- watch
|
||||
- apiGroups:
|
||||
- "apps"
|
||||
resources:
|
||||
- deployments
|
||||
- daemonsets
|
||||
- statefulsets
|
||||
verbs:
|
||||
- list
|
||||
- get
|
||||
- update
|
||||
- patch
|
||||
- apiGroups:
|
||||
- "extensions"
|
||||
resources:
|
||||
- deployments
|
||||
- daemonsets
|
||||
verbs:
|
||||
- list
|
||||
- get
|
||||
- update
|
||||
- patch
|
||||
|
||||
22
deployments/kubernetes/manifests/clusterrolebinding.yaml
Normal file
22
deployments/kubernetes/manifests/clusterrolebinding.yaml
Normal file
@@ -0,0 +1,22 @@
|
||||
---
|
||||
# Source: reloader/templates/clusterrolebinding.yaml
|
||||
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
labels:
|
||||
app: RELEASE-NAME-reloader
|
||||
chart: "reloader-v0.0.39"
|
||||
release: "RELEASE-NAME"
|
||||
heritage: "Tiller"
|
||||
name: RELEASE-NAME-reloader-role-binding
|
||||
namespace: default
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: RELEASE-NAME-reloader-role
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: reloader
|
||||
namespace: default
|
||||
|
||||
@@ -1,36 +1,42 @@
|
||||
---
|
||||
# Source: reloader/templates/deployment.yaml
|
||||
apiVersion: extensions/v1beta1
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
labels:
|
||||
app: reloader
|
||||
group: com.stakater.platform
|
||||
provider: stakater
|
||||
version: 0.0.17
|
||||
chart: "reloader-0.0.17"
|
||||
app: RELEASE-NAME-reloader
|
||||
chart: "reloader-v0.0.39"
|
||||
release: "RELEASE-NAME"
|
||||
heritage: "Tiller"
|
||||
name: reloader
|
||||
group: com.stakater.platform
|
||||
provider: stakater
|
||||
version: v0.0.39
|
||||
|
||||
name: RELEASE-NAME-reloader
|
||||
spec:
|
||||
replicas: 1
|
||||
revisionHistoryLimit: 2
|
||||
selector:
|
||||
matchLabels:
|
||||
app: reloader
|
||||
group: com.stakater.platform
|
||||
provider: stakater
|
||||
app: RELEASE-NAME-reloader
|
||||
release: "RELEASE-NAME"
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: reloader
|
||||
app: RELEASE-NAME-reloader
|
||||
chart: "reloader-v0.0.39"
|
||||
release: "RELEASE-NAME"
|
||||
heritage: "Tiller"
|
||||
group: com.stakater.platform
|
||||
provider: stakater
|
||||
version: v0.0.39
|
||||
|
||||
spec:
|
||||
containers:
|
||||
- env:
|
||||
image: "stakater/reloader:0.0.17"
|
||||
image: "stakater/reloader:v0.0.39"
|
||||
imagePullPolicy: IfNotPresent
|
||||
name: reloader
|
||||
name: RELEASE-NAME-reloader
|
||||
args:
|
||||
serviceAccountName: reloader
|
||||
|
||||
|
||||
|
||||
@@ -1,73 +0,0 @@
|
||||
---
|
||||
# Source: reloader/templates/rbac.yaml
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
labels:
|
||||
app: reloader
|
||||
group: com.stakater.platform
|
||||
provider: stakater
|
||||
version: 0.0.17
|
||||
chart: "reloader-0.0.17"
|
||||
release: "RELEASE-NAME"
|
||||
heritage: "Tiller"
|
||||
name: reloader
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
labels:
|
||||
app: reloader
|
||||
group: com.stakater.platform
|
||||
provider: stakater
|
||||
version: 0.0.17
|
||||
chart: "reloader-0.0.17"
|
||||
release: "RELEASE-NAME"
|
||||
heritage: "Tiller"
|
||||
name: reloader-role
|
||||
namespace: default
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- secrets
|
||||
- configmaps
|
||||
verbs:
|
||||
- list
|
||||
- get
|
||||
- watch
|
||||
- apiGroups:
|
||||
- ""
|
||||
- "extensions"
|
||||
- "apps"
|
||||
resources:
|
||||
- deployments
|
||||
- daemonsets
|
||||
- statefulsets
|
||||
verbs:
|
||||
- list
|
||||
- get
|
||||
- update
|
||||
- patch
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
labels:
|
||||
app: reloader
|
||||
group: com.stakater.platform
|
||||
provider: stakater
|
||||
version: 0.0.17
|
||||
chart: "reloader-0.0.17"
|
||||
release: "RELEASE-NAME"
|
||||
heritage: "Tiller"
|
||||
name: reloader-role-binding
|
||||
namespace: default
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: reloader-role
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: reloader
|
||||
namespace: default
|
||||
4
deployments/kubernetes/manifests/role.yaml
Normal file
4
deployments/kubernetes/manifests/role.yaml
Normal file
@@ -0,0 +1,4 @@
|
||||
---
|
||||
# Source: reloader/templates/role.yaml
|
||||
|
||||
|
||||
4
deployments/kubernetes/manifests/rolebinding.yaml
Normal file
4
deployments/kubernetes/manifests/rolebinding.yaml
Normal file
@@ -0,0 +1,4 @@
|
||||
---
|
||||
# Source: reloader/templates/rolebinding.yaml
|
||||
|
||||
|
||||
13
deployments/kubernetes/manifests/serviceaccount.yaml
Normal file
13
deployments/kubernetes/manifests/serviceaccount.yaml
Normal file
@@ -0,0 +1,13 @@
|
||||
---
|
||||
# Source: reloader/templates/serviceaccount.yaml
|
||||
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
labels:
|
||||
app: RELEASE-NAME-reloader
|
||||
chart: "reloader-v0.0.39"
|
||||
release: "RELEASE-NAME"
|
||||
heritage: "Tiller"
|
||||
name: reloader
|
||||
|
||||
@@ -1,71 +1,66 @@
|
||||
---
|
||||
# Source: reloader/templates/role.yaml
|
||||
|
||||
|
||||
---
|
||||
# Source: reloader/templates/deployment.yaml
|
||||
apiVersion: extensions/v1beta1
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
labels:
|
||||
app: reloader
|
||||
group: com.stakater.platform
|
||||
provider: stakater
|
||||
version: 0.0.17
|
||||
chart: "reloader-0.0.17"
|
||||
app: RELEASE-NAME-reloader
|
||||
chart: "reloader-v0.0.39"
|
||||
release: "RELEASE-NAME"
|
||||
heritage: "Tiller"
|
||||
name: reloader
|
||||
group: com.stakater.platform
|
||||
provider: stakater
|
||||
version: v0.0.39
|
||||
|
||||
name: RELEASE-NAME-reloader
|
||||
spec:
|
||||
replicas: 1
|
||||
revisionHistoryLimit: 2
|
||||
selector:
|
||||
matchLabels:
|
||||
app: reloader
|
||||
group: com.stakater.platform
|
||||
provider: stakater
|
||||
app: RELEASE-NAME-reloader
|
||||
release: "RELEASE-NAME"
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: reloader
|
||||
app: RELEASE-NAME-reloader
|
||||
chart: "reloader-v0.0.39"
|
||||
release: "RELEASE-NAME"
|
||||
heritage: "Tiller"
|
||||
group: com.stakater.platform
|
||||
provider: stakater
|
||||
version: v0.0.39
|
||||
|
||||
spec:
|
||||
containers:
|
||||
- env:
|
||||
image: "stakater/reloader:0.0.17"
|
||||
image: "stakater/reloader:v0.0.39"
|
||||
imagePullPolicy: IfNotPresent
|
||||
name: reloader
|
||||
name: RELEASE-NAME-reloader
|
||||
args:
|
||||
serviceAccountName: reloader
|
||||
|
||||
---
|
||||
# Source: reloader/templates/rbac.yaml
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
labels:
|
||||
app: reloader
|
||||
group: com.stakater.platform
|
||||
provider: stakater
|
||||
version: 0.0.17
|
||||
chart: "reloader-0.0.17"
|
||||
release: "RELEASE-NAME"
|
||||
heritage: "Tiller"
|
||||
name: reloader
|
||||
|
||||
---
|
||||
# Source: reloader/templates/clusterrole.yaml
|
||||
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
labels:
|
||||
app: reloader
|
||||
group: com.stakater.platform
|
||||
provider: stakater
|
||||
version: 0.0.17
|
||||
chart: "reloader-0.0.17"
|
||||
labels:
|
||||
app: RELEASE-NAME-reloader
|
||||
chart: "reloader-v0.0.39"
|
||||
release: "RELEASE-NAME"
|
||||
heritage: "Tiller"
|
||||
name: reloader-role
|
||||
name: RELEASE-NAME-reloader-role
|
||||
namespace: default
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
resources:
|
||||
- secrets
|
||||
- configmaps
|
||||
verbs:
|
||||
@@ -73,8 +68,6 @@ rules:
|
||||
- get
|
||||
- watch
|
||||
- apiGroups:
|
||||
- ""
|
||||
- "extensions"
|
||||
- "apps"
|
||||
resources:
|
||||
- deployments
|
||||
@@ -85,25 +78,53 @@ rules:
|
||||
- get
|
||||
- update
|
||||
- patch
|
||||
- apiGroups:
|
||||
- "extensions"
|
||||
resources:
|
||||
- deployments
|
||||
- daemonsets
|
||||
verbs:
|
||||
- list
|
||||
- get
|
||||
- update
|
||||
- patch
|
||||
|
||||
---
|
||||
# Source: reloader/templates/rolebinding.yaml
|
||||
|
||||
|
||||
---
|
||||
# Source: reloader/templates/clusterrolebinding.yaml
|
||||
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
labels:
|
||||
app: reloader
|
||||
group: com.stakater.platform
|
||||
provider: stakater
|
||||
version: 0.0.17
|
||||
chart: "reloader-0.0.17"
|
||||
labels:
|
||||
app: RELEASE-NAME-reloader
|
||||
chart: "reloader-v0.0.39"
|
||||
release: "RELEASE-NAME"
|
||||
heritage: "Tiller"
|
||||
name: reloader-role-binding
|
||||
name: RELEASE-NAME-reloader-role-binding
|
||||
namespace: default
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: reloader-role
|
||||
name: RELEASE-NAME-reloader-role
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: reloader
|
||||
namespace: default
|
||||
|
||||
---
|
||||
# Source: reloader/templates/serviceaccount.yaml
|
||||
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
labels:
|
||||
app: RELEASE-NAME-reloader
|
||||
chart: "reloader-v0.0.39"
|
||||
release: "RELEASE-NAME"
|
||||
heritage: "Tiller"
|
||||
name: reloader
|
||||
|
||||
|
||||
@@ -4,10 +4,26 @@ apiVersion: v1
|
||||
name: reloader
|
||||
description: Reloader chart that runs on kubernetes
|
||||
version: {{ getenv "VERSION" }}
|
||||
appVersion: {{ getenv "VERSION" }}
|
||||
keywords:
|
||||
- Reloader
|
||||
- kubernetes
|
||||
home: https://github.com/stakater/Reloader
|
||||
sources:
|
||||
- https://github.com/stakater/IngressMonitorController
|
||||
icon: https://raw.githubusercontent.com/stakater/Reloader/master/assets/web/reloader-round-100px.png
|
||||
maintainers:
|
||||
- name: Stakater
|
||||
email: hello@stakater.com
|
||||
email: hello@stakater.com
|
||||
- name: rasheedamir
|
||||
email: rasheed@aurorasolutions.io
|
||||
- name: waseem-h
|
||||
email: waseemhassan@stakater.com
|
||||
- name: faizanahmad055
|
||||
email: faizan.ahmad55@outlook.com
|
||||
- name: kahootali
|
||||
email: ali.kahoot@aurorasolutions.io
|
||||
- name: ahmadiq
|
||||
email: ahmad@aurorasolutions.io
|
||||
- name: ahsan-storm
|
||||
email: ahsanmuhammad1@outlook.com
|
||||
|
||||
@@ -4,12 +4,79 @@ kubernetes:
|
||||
host: https://kubernetes.default
|
||||
|
||||
reloader:
|
||||
labels:
|
||||
provider: stakater
|
||||
group: com.stakater.platform
|
||||
version: {{ getenv "VERSION" }}
|
||||
image:
|
||||
name: {{ getenv "DOCKER_IMAGE" }}
|
||||
tag: "{{ getenv "VERSION" }}"
|
||||
pullPolicy: IfNotPresent
|
||||
watchGlobally: true
|
||||
isOpenshift: false
|
||||
ignoreSecrets: false
|
||||
ignoreConfigMaps: false
|
||||
watchGlobally: true
|
||||
# Set to true if you have a pod security policy that enforces readOnlyRootFilesystem
|
||||
readOnlyRootFileSystem: false
|
||||
matchLabels: {}
|
||||
deployment:
|
||||
nodeSelector:
|
||||
# cloud.google.com/gke-nodepool: default-pool
|
||||
|
||||
# An affinity stanza to be applied to the Deployment.
|
||||
# Example:
|
||||
# affinity:
|
||||
# nodeAffinity:
|
||||
# requiredDuringSchedulingIgnoredDuringExecution:
|
||||
# nodeSelectorTerms:
|
||||
# - matchExpressions:
|
||||
# - key: "node-role.kubernetes.io/infra-worker"
|
||||
# operator: "Exists"
|
||||
affinity: {}
|
||||
|
||||
# A list of tolerations to be applied to the Deployment.
|
||||
# Example:
|
||||
# tolerations:
|
||||
# - key: "node-role.kubernetes.io/infra-worker"
|
||||
# operator: "Exists"
|
||||
# effect: "NoSchedule"
|
||||
tolerations: []
|
||||
|
||||
annotations: {}
|
||||
labels:
|
||||
provider: stakater
|
||||
group: com.stakater.platform
|
||||
version: {{ getenv "VERSION" }}
|
||||
image:
|
||||
name: {{ getenv "DOCKER_IMAGE" }}
|
||||
tag: "{{ getenv "VERSION" }}"
|
||||
pullPolicy: IfNotPresent
|
||||
# Support for extra environment variables.
|
||||
env:
|
||||
# Open supports Key value pair as environment variables.
|
||||
open:
|
||||
# secret supports Key value pair as environment variables. It gets the values based on keys from default reloader secret if any.
|
||||
secret:
|
||||
# field supports Key value pair as environment variables. It gets the values from other fields of pod.
|
||||
field:
|
||||
|
||||
# Specify resource requests/limits for the deployment.
|
||||
# Example:
|
||||
# resources:
|
||||
# limits:
|
||||
# cpu: "100m"
|
||||
# memory: "512Mi"
|
||||
# requests:
|
||||
# cpu: "10m"
|
||||
# memory: "128Mi"
|
||||
resources: {}
|
||||
|
||||
rbac:
|
||||
enabled: true
|
||||
labels: {}
|
||||
# Service account config for the agent pods
|
||||
serviceAccount:
|
||||
# Specifies whether a ServiceAccount should be created
|
||||
create: true
|
||||
labels: {}
|
||||
# The name of the ServiceAccount to use.
|
||||
# If not set and create is true, a name is generated using the fullname template
|
||||
name: reloader
|
||||
# Optional flags to pass to the Reloader entrypoint
|
||||
# Example:
|
||||
# custom_annotations:
|
||||
# configmap: "my.company.com/configmap"
|
||||
# secret: "my.company.com/secret"
|
||||
custom_annotations: {}
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
# How it works?
|
||||
|
||||
Reloader watches for `ConfigMap` and `Secret` and detects if there are changes in data of these objects. After change detection reloader performs rolling upgrade on relevant Pods via associated `Deployment`, `Deamonset` and `Statefulset`.
|
||||
Reloader watches for `ConfigMap` and `Secret` and detects if there are changes in data of these objects. After change detection reloader performs rolling upgrade on relevant Pods via associated `Deployment`, `Daemonset` and `Statefulset`.
|
||||
|
||||
## How change detection works
|
||||
|
||||
@@ -17,23 +17,25 @@ The annotation value is comma separated list of `configmaps` or `secrets`. If a
|
||||
|
||||
### Annotation for Configmap
|
||||
|
||||
For a `Deployment` called `foo` have a `ConfigMap` called `foo`. Then add this annotation to your `Deployment`
|
||||
For a `Deployment` called `foo` have a `ConfigMap` called `foo`. Then add this annotation* to your `Deployment`
|
||||
|
||||
```yaml
|
||||
metadata:
|
||||
annotations:
|
||||
configmap.reloader.stakater.com/reload: "foo"
|
||||
```
|
||||
<small>*the default annotation can be changed with the `--configmap-annotation` flag</small>
|
||||
|
||||
### Annotation for Secret
|
||||
|
||||
For a `Deployment` called `foo` have a `Secret` called `foo`. Then add this annotation to your `Deployment`
|
||||
For a `Deployment` called `foo` have a `Secret` called `foo`. Then add this annotation* to your `Deployment`
|
||||
|
||||
```yaml
|
||||
metadata:
|
||||
annotations:
|
||||
secret.reloader.stakater.com/reload: "foo"
|
||||
```
|
||||
<small>*the default annotation can be changed with the `--secret-annotation` flag</small>
|
||||
|
||||
Above mentioned annotation are also work for `Daemonsets` and `Statefulsets`
|
||||
|
||||
@@ -75,3 +77,6 @@ And render manifest file using helm command
|
||||
helm --namespace {replace this with namespace name} template . > reloader.yaml
|
||||
```
|
||||
The output file can then be used to deploy reloader in specific namespace.
|
||||
|
||||
## Compatibility with helm install and upgrade
|
||||
Reloader has no impact on helm deployment cycle. Reloader only injects an environment variable in `deployment`, `daemonset` or `statefulset`. The environment variable contains the SHA1 value of configmap's or secret's data. So if a deployment is created using Helm and Reloader updates the deployment, then next time you upgrade the helm release, reloader will do nothing except changing that environment variable value in `deployment` , `daemonset` or `statefulset`.
|
||||
|
||||
@@ -8,4 +8,5 @@ Reloader is inspired from [Configmapcontroller](https://github.com/fabric8io/con
|
||||
| Reloader can watch both `secrets` and `configmaps`. | ConfigmapController can only watch changes in `configmaps`. It cannot detect changes in other resources like `secrets`. |
|
||||
| Reloader can perform rolling upgrades on `deployments` as well as on `statefulsets` and `daemonsets` | ConfigmapController can only perform rolling upgrades on `deployments`. It currently does not support rolling upgrades on `statefulsets` and `daemonsets` |
|
||||
| Reloader provides both unit test cases and end to end integration test cases for future updates. So one can make sure that new changes do not break any old functionality. | Currently there are not any unit test cases or end to end integration test cases in configmap controller. It add difficulties for any additional updates in configmap controller and one can not know for sure whether new changes breaks any old functionality or not. |
|
||||
| Reloader uses SHA1 to encode the change in configmap or secret. It then saves the SHA1 value in `STAKATER_FOO_CONFIGMAP` or `STAKATER_FOO_SECRET` environment variable depending upon where the change has happened. The use of SHA1 provides a concise 40 characters encoded value that is very less pron to collision. | Configmap controller uses `FABRICB_FOO_REVISION` environment variable to store any change in configmap controller. It does not encode it or convert it in suitable hash value to avoid data pollution in deployment. |
|
||||
| Reloader uses SHA1 to encode the change in configmap or secret. It then saves the SHA1 value in `STAKATER_FOO_CONFIGMAP` or `STAKATER_FOO_SECRET` environment variable depending upon where the change has happened. The use of SHA1 provides a concise 40 characters encoded value that is very less pron to collision. | Configmap controller uses `FABRICB_FOO_REVISION` environment variable to store any change in configmap controller. It does not encode it or convert it in suitable hash value to avoid data pollution in deployment. |
|
||||
| Reloader allows you to customize your own annotation (for both Secrets and Configmaps) using command line flags | Configmap controller restricts you to only their provided annotation |
|
||||
@@ -25,4 +25,16 @@ Reloader supports deployment rollout as well as daemonsets and statefulsets roll
|
||||
k8s-trigger-controller stores the hash value in an annotation `trigger.k8s.io/[secret|configMap]-NAME-last-hash`
|
||||
|
||||
#### Reloader:
|
||||
Reloader stores the hash value in an environment variable `STAKATER_NAME_[SECRET|CONFIGMAP]`
|
||||
Reloader stores the hash value in an environment variable `STAKATER_NAME_[SECRET|CONFIGMAP]`
|
||||
|
||||
### Customization
|
||||
|
||||
#### k8s-trigger-controller:
|
||||
k8s-trigger-controller restricts you to using the `trigger.k8s.io/[secret-configMap]-NAME-last-hash` annotation
|
||||
|
||||
#### Reloader:
|
||||
Reloader allows you to customize the annotation to fit your needs with command line flags:
|
||||
|
||||
- `--auto-annotation <annotation>`
|
||||
- `--configmap-annotation <annotation>`
|
||||
- `--secret-annotation <annotation>`
|
||||
11
docs/Reloader-with-Sealed-Secrets.md
Normal file
11
docs/Reloader-with-Sealed-Secrets.md
Normal file
@@ -0,0 +1,11 @@
|
||||
Below are the steps to use reloader with Sealed Secrets.
|
||||
1. Download and install the kubeseal client from [here](https://github.com/bitnami-labs/sealed-secrets).
|
||||
2. Install the controller for sealed secrets
|
||||
3. Fetch the encryption certificate
|
||||
4. Encrypt the secret.
|
||||
5. Apply the secret.
|
||||
7. Install the tool which uses that sealed secret.
|
||||
8. Install Reloader.
|
||||
9. Once everything is setup, update the original secret at client and encrypt it with kubeseal to see reloader working.
|
||||
10. Apply the updated sealed secret.
|
||||
11. Reloader will resatart the pod to use that updated secret.
|
||||
106
glide.lock
generated
106
glide.lock
generated
@@ -1,26 +1,12 @@
|
||||
hash: b6fe060028bdb1249ba2413746476c2550b267eeab3c166c36a86e000a8dd354
|
||||
updated: 2018-07-24T21:12:43.027181463+05:00
|
||||
hash: 0a37eeebda95f7ac050377c5b8ca8a6f7ab051ef66ba1752471090157e6a6ea2
|
||||
updated: 2019-07-03T21:04:13.576837+02:00
|
||||
imports:
|
||||
- name: github.com/davecgh/go-spew
|
||||
version: 782f4967f2dc4564575ca782fe2d04090b5faca8
|
||||
subpackages:
|
||||
- spew
|
||||
- name: github.com/emicklei/go-restful
|
||||
version: ff4f55a206334ef123e4f79bbf348980da81ca46
|
||||
subpackages:
|
||||
- log
|
||||
- name: github.com/emicklei/go-restful-swagger12
|
||||
version: dcef7f55730566d41eae5db10e7d6981829720f6
|
||||
- name: github.com/ghodss/yaml
|
||||
version: 73d445a93680fa1a78ae23a5839bad48f32ba1ee
|
||||
- name: github.com/go-openapi/jsonpointer
|
||||
version: 46af16f9f7b149af66e5d1bd010e3574dc06de98
|
||||
- name: github.com/go-openapi/jsonreference
|
||||
version: 13c6e3589ad90f49bd3e3bbe2c2cb3d7a4142272
|
||||
- name: github.com/go-openapi/spec
|
||||
version: 6aced65f8501fe1217321abf0749d354824ba2ff
|
||||
- name: github.com/go-openapi/swag
|
||||
version: 1d0bd113de87027671077d3c71eb3ac5d7dbba72
|
||||
- name: github.com/gogo/protobuf
|
||||
version: c0656edd0d9eab7c66d1eb0c568f9039345796f7
|
||||
subpackages:
|
||||
@@ -29,7 +15,7 @@ imports:
|
||||
- name: github.com/golang/glog
|
||||
version: 44145f04b68cf362d9c4df2182967c2275eaefed
|
||||
- name: github.com/golang/protobuf
|
||||
version: 4bd1920723d7b7c925de087aa32e2187708897f7
|
||||
version: b4deda0973fb4c70b50d226b1af49f3da59f5265
|
||||
subpackages:
|
||||
- proto
|
||||
- ptypes
|
||||
@@ -54,32 +40,34 @@ imports:
|
||||
version: a0d98a5f288019575c6d1f4bb1573fef2d1fcdc4
|
||||
subpackages:
|
||||
- simplelru
|
||||
- name: github.com/howeyc/gopass
|
||||
version: bf9dde6d0d2c004a008c27aaee91170c786f6db8
|
||||
- name: github.com/imdario/mergo
|
||||
version: 6633656539c1639d9d78127b7d47c622b5d7b6dc
|
||||
- name: github.com/inconshreveable/mousetrap
|
||||
version: 76626ae9c91c4f2a10f34cad8ce83ea42c93bb75
|
||||
- name: github.com/json-iterator/go
|
||||
version: 36b14963da70d11297d313183d7e6388c8510e1e
|
||||
- name: github.com/juju/ratelimit
|
||||
version: 5b9ff866471762aa2ab2dced63c9fb6f53921342
|
||||
- name: github.com/mailru/easyjson
|
||||
version: d5b7844b561a7bc640052f1b935f7b800330d7e0
|
||||
version: f2b4162afba35581b6d4a50d3b8f34e33c144682
|
||||
- name: github.com/modern-go/concurrent
|
||||
version: bacd9c7ef1dd9b15be4a9909b8ac7a4e313eec94
|
||||
- name: github.com/modern-go/reflect2
|
||||
version: 05fbef0ca5da472bbf96c9322b84a53edc03c9fd
|
||||
- name: github.com/openshift/api
|
||||
version: d5b34b957e91dbf64013a866951c3ed5770db0b5
|
||||
subpackages:
|
||||
- buffer
|
||||
- jlexer
|
||||
- jwriter
|
||||
- apps/v1
|
||||
- name: github.com/openshift/client-go
|
||||
version: 431ec9a26e5021f35fa41ee9a89842db9bfdb370
|
||||
subpackages:
|
||||
- apps/clientset/versioned
|
||||
- apps/clientset/versioned/scheme
|
||||
- apps/clientset/versioned/typed/apps/v1
|
||||
- name: github.com/openshift/library-go
|
||||
version: 0b8367a4679859036c27a30dbe010d76409e7075
|
||||
- name: github.com/peterbourgon/diskv
|
||||
version: 5f041e8faa004a95c88a202771f4cc3e991971e6
|
||||
- name: github.com/PuerkitoBio/purell
|
||||
version: 8a290539e2e8629dbc4e6bad948158f790ec31f4
|
||||
- name: github.com/PuerkitoBio/urlesc
|
||||
version: 5bd2802263f21d8788851d5305584c82a5c75d7e
|
||||
- name: github.com/sirupsen/logrus
|
||||
version: c155da19408a8799da419ed3eeb0cb5db0ad5dbc
|
||||
- name: github.com/spf13/cobra
|
||||
version: ef82de70bb3f60c65fb8eebacbb2d122ef517385
|
||||
version: f62e98d28ab7ad31d707ba837a966378465c7b57
|
||||
- name: github.com/spf13/pflag
|
||||
version: 583c0c0531f06d5278b7d917446061adc344b5cd
|
||||
- name: golang.org/x/crypto
|
||||
@@ -95,32 +83,31 @@ imports:
|
||||
- idna
|
||||
- lex/httplex
|
||||
- name: golang.org/x/sys
|
||||
version: 7ddbeae9ae08c6a06a59597f0c9edbc5ff2444ce
|
||||
version: 95c6576299259db960f6c5b9b69ea52422860fce
|
||||
subpackages:
|
||||
- unix
|
||||
- windows
|
||||
- name: golang.org/x/text
|
||||
version: b19bf474d317b857955b12035d2c5acb57ce8b01
|
||||
subpackages:
|
||||
- cases
|
||||
- internal
|
||||
- internal/tag
|
||||
- language
|
||||
- runes
|
||||
- secure/bidirule
|
||||
- secure/precis
|
||||
- transform
|
||||
- unicode/bidi
|
||||
- unicode/norm
|
||||
- width
|
||||
- name: golang.org/x/time
|
||||
version: f51c12702a4d776e4c1fa9b0fabab841babae631
|
||||
subpackages:
|
||||
- rate
|
||||
- name: gopkg.in/inf.v0
|
||||
version: 3887ee99ecf07df5b447e9b00d9c0b2adaa9f3e4
|
||||
- name: gopkg.in/yaml.v2
|
||||
version: 53feefa2559fb8dfa8d81baad31be332c97d6c77
|
||||
version: 670d4cfef0544295bc27a114dbac37980d83185a
|
||||
- name: k8s.io/api
|
||||
version: fe29995db37613b9c5b2a647544cf627bfa8d299
|
||||
version: 2d6f90ab1293a1fb871cf149423ebb72aa7423aa
|
||||
subpackages:
|
||||
- admissionregistration/v1alpha1
|
||||
- admissionregistration/v1beta1
|
||||
- apps/v1
|
||||
- apps/v1beta1
|
||||
- apps/v1beta2
|
||||
- authentication/v1
|
||||
@@ -134,6 +121,7 @@ imports:
|
||||
- batch/v2alpha1
|
||||
- certificates/v1beta1
|
||||
- core/v1
|
||||
- events/v1beta1
|
||||
- extensions/v1beta1
|
||||
- networking/v1
|
||||
- policy/v1beta1
|
||||
@@ -141,23 +129,23 @@ imports:
|
||||
- rbac/v1alpha1
|
||||
- rbac/v1beta1
|
||||
- scheduling/v1alpha1
|
||||
- scheduling/v1beta1
|
||||
- settings/v1alpha1
|
||||
- storage/v1
|
||||
- storage/v1alpha1
|
||||
- storage/v1beta1
|
||||
- name: k8s.io/apimachinery
|
||||
version: 019ae5ada31de202164b118aee88ee2d14075c31
|
||||
version: 103fd098999dc9c0c88536f5c9ad2e5da39373ae
|
||||
subpackages:
|
||||
- pkg/api/equality
|
||||
- pkg/api/errors
|
||||
- pkg/api/meta
|
||||
- pkg/api/resource
|
||||
- pkg/apis/meta/internalversion
|
||||
- pkg/apis/meta/v1
|
||||
- pkg/apis/meta/v1/unstructured
|
||||
- pkg/apis/meta/v1alpha1
|
||||
- pkg/apis/meta/v1beta1
|
||||
- pkg/conversion
|
||||
- pkg/conversion/queryparams
|
||||
- pkg/conversion/unstructured
|
||||
- pkg/fields
|
||||
- pkg/labels
|
||||
- pkg/runtime
|
||||
@@ -177,18 +165,21 @@ imports:
|
||||
- pkg/util/framer
|
||||
- pkg/util/intstr
|
||||
- pkg/util/json
|
||||
- pkg/util/mergepatch
|
||||
- pkg/util/net
|
||||
- pkg/util/runtime
|
||||
- pkg/util/sets
|
||||
- pkg/util/strategicpatch
|
||||
- pkg/util/validation
|
||||
- pkg/util/validation/field
|
||||
- pkg/util/wait
|
||||
- pkg/util/yaml
|
||||
- pkg/version
|
||||
- pkg/watch
|
||||
- third_party/forked/golang/json
|
||||
- third_party/forked/golang/reflect
|
||||
- name: k8s.io/client-go
|
||||
version: 35874c597fed17ca62cd197e516d7d5ff9a2958c
|
||||
version: 59698c7d9724b0f95f9dc9e7f7dfdcc3dfeceb82
|
||||
subpackages:
|
||||
- discovery
|
||||
- discovery/fake
|
||||
@@ -197,6 +188,10 @@ imports:
|
||||
- kubernetes/scheme
|
||||
- kubernetes/typed/admissionregistration/v1alpha1
|
||||
- kubernetes/typed/admissionregistration/v1alpha1/fake
|
||||
- kubernetes/typed/admissionregistration/v1beta1
|
||||
- kubernetes/typed/admissionregistration/v1beta1/fake
|
||||
- kubernetes/typed/apps/v1
|
||||
- kubernetes/typed/apps/v1/fake
|
||||
- kubernetes/typed/apps/v1beta1
|
||||
- kubernetes/typed/apps/v1beta1/fake
|
||||
- kubernetes/typed/apps/v1beta2
|
||||
@@ -223,6 +218,8 @@ imports:
|
||||
- kubernetes/typed/certificates/v1beta1/fake
|
||||
- kubernetes/typed/core/v1
|
||||
- kubernetes/typed/core/v1/fake
|
||||
- kubernetes/typed/events/v1beta1
|
||||
- kubernetes/typed/events/v1beta1/fake
|
||||
- kubernetes/typed/extensions/v1beta1
|
||||
- kubernetes/typed/extensions/v1beta1/fake
|
||||
- kubernetes/typed/networking/v1
|
||||
@@ -237,13 +234,21 @@ imports:
|
||||
- kubernetes/typed/rbac/v1beta1/fake
|
||||
- kubernetes/typed/scheduling/v1alpha1
|
||||
- kubernetes/typed/scheduling/v1alpha1/fake
|
||||
- kubernetes/typed/scheduling/v1beta1
|
||||
- kubernetes/typed/scheduling/v1beta1/fake
|
||||
- kubernetes/typed/settings/v1alpha1
|
||||
- kubernetes/typed/settings/v1alpha1/fake
|
||||
- kubernetes/typed/storage/v1
|
||||
- kubernetes/typed/storage/v1/fake
|
||||
- kubernetes/typed/storage/v1alpha1
|
||||
- kubernetes/typed/storage/v1alpha1/fake
|
||||
- kubernetes/typed/storage/v1beta1
|
||||
- kubernetes/typed/storage/v1beta1/fake
|
||||
- pkg/apis/clientauthentication
|
||||
- pkg/apis/clientauthentication/v1alpha1
|
||||
- pkg/apis/clientauthentication/v1beta1
|
||||
- pkg/version
|
||||
- plugin/pkg/client/auth/exec
|
||||
- rest
|
||||
- rest/watch
|
||||
- testing
|
||||
@@ -257,13 +262,16 @@ imports:
|
||||
- tools/pager
|
||||
- tools/reference
|
||||
- transport
|
||||
- util/buffer
|
||||
- util/cert
|
||||
- util/connrotation
|
||||
- util/flowcontrol
|
||||
- util/homedir
|
||||
- util/integer
|
||||
- util/retry
|
||||
- util/workqueue
|
||||
- name: k8s.io/kube-openapi
|
||||
version: 868f2f29720b192240e18284659231b440f9cda5
|
||||
version: 91cfa479c814065e420cee7ed227db0f63a5854e
|
||||
subpackages:
|
||||
- pkg/common
|
||||
- pkg/util/proto
|
||||
testImports: []
|
||||
|
||||
18
glide.yaml
18
glide.yaml
@@ -1,14 +1,10 @@
|
||||
package: github.com/stakater/Reloader
|
||||
import:
|
||||
- package: k8s.io/api
|
||||
version: kubernetes-1.8.0
|
||||
- package: k8s.io/apimachinery
|
||||
version: kubernetes-1.8.0
|
||||
- package: k8s.io/client-go
|
||||
version: 5.0.0
|
||||
- package: github.com/openshift/client-go
|
||||
version: release-3.11
|
||||
- package: github.com/spf13/cobra
|
||||
version: 0.0.3
|
||||
- package: github.com/spf13/pflag
|
||||
version: 1.0.1
|
||||
- package: github.com/sirupsen/logrus
|
||||
version: 1.0.5
|
||||
version: f62e98d28ab7ad31d707ba837a966378465c7b57
|
||||
- package: github.com/openshift/library-go
|
||||
version: release-3.11
|
||||
- package: github.com/openshift/api
|
||||
version: master
|
||||
@@ -3,57 +3,76 @@ package callbacks
|
||||
import (
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/stakater/Reloader/internal/pkg/util"
|
||||
"github.com/stakater/Reloader/pkg/kube"
|
||||
apps_v1beta1 "k8s.io/api/apps/v1beta1"
|
||||
"k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/api/extensions/v1beta1"
|
||||
meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
|
||||
openshiftv1 "github.com/openshift/api/apps/v1"
|
||||
)
|
||||
|
||||
//ItemsFunc is a generic function to return a specific resource array in given namespace
|
||||
type ItemsFunc func(kubernetes.Interface, string) []interface{}
|
||||
type ItemsFunc func(kube.Clients, string) []interface{}
|
||||
|
||||
//ContainersFunc is a generic func to return containers
|
||||
type ContainersFunc func(interface{}) []v1.Container
|
||||
|
||||
//InitContainersFunc is a generic func to return containers
|
||||
type InitContainersFunc func(interface{}) []v1.Container
|
||||
|
||||
//VolumesFunc is a generic func to return volumes
|
||||
type VolumesFunc func(interface{}) []v1.Volume
|
||||
|
||||
//UpdateFunc performs the resource update
|
||||
type UpdateFunc func(kubernetes.Interface, string, interface{}) error
|
||||
type UpdateFunc func(kube.Clients, string, interface{}) error
|
||||
|
||||
//RollingUpgradeFuncs contains generic functions to perform rolling upgrade
|
||||
type RollingUpgradeFuncs struct {
|
||||
ItemsFunc ItemsFunc
|
||||
ContainersFunc ContainersFunc
|
||||
UpdateFunc UpdateFunc
|
||||
ResourceType string
|
||||
ItemsFunc ItemsFunc
|
||||
ContainersFunc ContainersFunc
|
||||
InitContainersFunc InitContainersFunc
|
||||
UpdateFunc UpdateFunc
|
||||
VolumesFunc VolumesFunc
|
||||
ResourceType string
|
||||
}
|
||||
|
||||
// GetDeploymentItems returns the deployments in given namespace
|
||||
func GetDeploymentItems(client kubernetes.Interface, namespace string) []interface{} {
|
||||
deployments, err := client.ExtensionsV1beta1().Deployments(namespace).List(meta_v1.ListOptions{})
|
||||
func GetDeploymentItems(clients kube.Clients, namespace string) []interface{} {
|
||||
deployments, err := clients.KubernetesClient.ExtensionsV1beta1().Deployments(namespace).List(meta_v1.ListOptions{})
|
||||
if err != nil {
|
||||
logrus.Errorf("Failed to list deployments %v", err)
|
||||
}
|
||||
return util.InterfaceSlice(deployments.Items)
|
||||
}
|
||||
|
||||
// GetDaemonSetItems returns the daemonSet in given namespace
|
||||
func GetDaemonSetItems(client kubernetes.Interface, namespace string) []interface{} {
|
||||
daemonSets, err := client.ExtensionsV1beta1().DaemonSets(namespace).List(meta_v1.ListOptions{})
|
||||
// GetDaemonSetItems returns the daemonSets in given namespace
|
||||
func GetDaemonSetItems(clients kube.Clients, namespace string) []interface{} {
|
||||
daemonSets, err := clients.KubernetesClient.ExtensionsV1beta1().DaemonSets(namespace).List(meta_v1.ListOptions{})
|
||||
if err != nil {
|
||||
logrus.Errorf("Failed to list daemonSets %v", err)
|
||||
}
|
||||
return util.InterfaceSlice(daemonSets.Items)
|
||||
}
|
||||
|
||||
// GetStatefulSetItems returns the statefulSet in given namespace
|
||||
func GetStatefulSetItems(client kubernetes.Interface, namespace string) []interface{} {
|
||||
statefulSets, err := client.AppsV1beta1().StatefulSets(namespace).List(meta_v1.ListOptions{})
|
||||
// GetStatefulSetItems returns the statefulSets in given namespace
|
||||
func GetStatefulSetItems(clients kube.Clients, namespace string) []interface{} {
|
||||
statefulSets, err := clients.KubernetesClient.AppsV1beta1().StatefulSets(namespace).List(meta_v1.ListOptions{})
|
||||
if err != nil {
|
||||
logrus.Errorf("Failed to list statefulSets %v", err)
|
||||
}
|
||||
return util.InterfaceSlice(statefulSets.Items)
|
||||
}
|
||||
|
||||
// GetDeploymentConfigItems returns the deploymentConfigs in given namespace
|
||||
func GetDeploymentConfigItems(clients kube.Clients, namespace string) []interface{} {
|
||||
deploymentConfigs, err := clients.OpenshiftAppsClient.Apps().DeploymentConfigs(namespace).List(meta_v1.ListOptions{})
|
||||
if err != nil {
|
||||
logrus.Errorf("Failed to list deploymentConfigs %v", err)
|
||||
}
|
||||
return util.InterfaceSlice(deploymentConfigs.Items)
|
||||
}
|
||||
|
||||
// GetDeploymentContainers returns the containers of given deployment
|
||||
func GetDeploymentContainers(item interface{}) []v1.Container {
|
||||
return item.(v1beta1.Deployment).Spec.Template.Spec.Containers
|
||||
@@ -69,23 +88,75 @@ func GetStatefulsetContainers(item interface{}) []v1.Container {
|
||||
return item.(apps_v1beta1.StatefulSet).Spec.Template.Spec.Containers
|
||||
}
|
||||
|
||||
// GetDeploymentConfigContainers returns the containers of given deploymentConfig
|
||||
func GetDeploymentConfigContainers(item interface{}) []v1.Container {
|
||||
return item.(openshiftv1.DeploymentConfig).Spec.Template.Spec.Containers
|
||||
}
|
||||
|
||||
// GetDeploymentInitContainers returns the containers of given deployment
|
||||
func GetDeploymentInitContainers(item interface{}) []v1.Container {
|
||||
return item.(v1beta1.Deployment).Spec.Template.Spec.InitContainers
|
||||
}
|
||||
|
||||
// GetDaemonSetInitContainers returns the containers of given daemonset
|
||||
func GetDaemonSetInitContainers(item interface{}) []v1.Container {
|
||||
return item.(v1beta1.DaemonSet).Spec.Template.Spec.InitContainers
|
||||
}
|
||||
|
||||
// GetStatefulsetInitContainers returns the containers of given statefulSet
|
||||
func GetStatefulsetInitContainers(item interface{}) []v1.Container {
|
||||
return item.(apps_v1beta1.StatefulSet).Spec.Template.Spec.InitContainers
|
||||
}
|
||||
|
||||
// GetDeploymentConfigInitContainers returns the containers of given deploymentConfig
|
||||
func GetDeploymentConfigInitContainers(item interface{}) []v1.Container {
|
||||
return item.(openshiftv1.DeploymentConfig).Spec.Template.Spec.InitContainers
|
||||
}
|
||||
|
||||
// UpdateDeployment performs rolling upgrade on deployment
|
||||
func UpdateDeployment(client kubernetes.Interface, namespace string, resource interface{}) error {
|
||||
func UpdateDeployment(clients kube.Clients, namespace string, resource interface{}) error {
|
||||
deployment := resource.(v1beta1.Deployment)
|
||||
_, err := client.ExtensionsV1beta1().Deployments(namespace).Update(&deployment)
|
||||
_, err := clients.KubernetesClient.ExtensionsV1beta1().Deployments(namespace).Update(&deployment)
|
||||
return err
|
||||
}
|
||||
|
||||
// UpdateDaemonSet performs rolling upgrade on daemonSet
|
||||
func UpdateDaemonSet(client kubernetes.Interface, namespace string, resource interface{}) error {
|
||||
func UpdateDaemonSet(clients kube.Clients, namespace string, resource interface{}) error {
|
||||
daemonSet := resource.(v1beta1.DaemonSet)
|
||||
_, err := client.ExtensionsV1beta1().DaemonSets(namespace).Update(&daemonSet)
|
||||
_, err := clients.KubernetesClient.ExtensionsV1beta1().DaemonSets(namespace).Update(&daemonSet)
|
||||
return err
|
||||
}
|
||||
|
||||
// UpdateStatefulset performs rolling upgrade on statefulSet
|
||||
func UpdateStatefulset(client kubernetes.Interface, namespace string, resource interface{}) error {
|
||||
func UpdateStatefulset(clients kube.Clients, namespace string, resource interface{}) error {
|
||||
statefulSet := resource.(apps_v1beta1.StatefulSet)
|
||||
_, err := client.AppsV1beta1().StatefulSets(namespace).Update(&statefulSet)
|
||||
_, err := clients.KubernetesClient.AppsV1beta1().StatefulSets(namespace).Update(&statefulSet)
|
||||
return err
|
||||
}
|
||||
|
||||
// UpdateDeploymentConfig performs rolling upgrade on deploymentConfig
|
||||
func UpdateDeploymentConfig(clients kube.Clients, namespace string, resource interface{}) error {
|
||||
deploymentConfig := resource.(openshiftv1.DeploymentConfig)
|
||||
_, err := clients.OpenshiftAppsClient.AppsV1().DeploymentConfigs(namespace).Update(&deploymentConfig)
|
||||
return err
|
||||
}
|
||||
|
||||
// GetDeploymentVolumes returns the Volumes of given deployment
|
||||
func GetDeploymentVolumes(item interface{}) []v1.Volume {
|
||||
return item.(v1beta1.Deployment).Spec.Template.Spec.Volumes
|
||||
}
|
||||
|
||||
// GetDaemonSetVolumes returns the Volumes of given daemonset
|
||||
func GetDaemonSetVolumes(item interface{}) []v1.Volume {
|
||||
return item.(v1beta1.DaemonSet).Spec.Template.Spec.Volumes
|
||||
}
|
||||
|
||||
// GetStatefulsetVolumes returns the Volumes of given statefulSet
|
||||
func GetStatefulsetVolumes(item interface{}) []v1.Volume {
|
||||
return item.(apps_v1beta1.StatefulSet).Spec.Template.Spec.Volumes
|
||||
}
|
||||
|
||||
// GetDeploymentConfigVolumes returns the Volumes of given deploymentConfig
|
||||
func GetDeploymentConfigVolumes(item interface{}) []v1.Volume {
|
||||
return item.(openshiftv1.DeploymentConfig).Spec.Template.Spec.Volumes
|
||||
}
|
||||
|
||||
@@ -6,8 +6,10 @@ import (
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/stakater/Reloader/internal/pkg/controller"
|
||||
"github.com/stakater/Reloader/internal/pkg/options"
|
||||
"github.com/stakater/Reloader/internal/pkg/util"
|
||||
"github.com/stakater/Reloader/pkg/kube"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
// NewReloaderCommand starts the reloader controller
|
||||
@@ -17,10 +19,20 @@ func NewReloaderCommand() *cobra.Command {
|
||||
Short: "A watcher for your Kubernetes cluster",
|
||||
Run: startReloader,
|
||||
}
|
||||
|
||||
// options
|
||||
cmd.PersistentFlags().StringVar(&options.ConfigmapUpdateOnChangeAnnotation, "configmap-annotation", "configmap.reloader.stakater.com/reload", "annotation to detect changes in configmaps")
|
||||
cmd.PersistentFlags().StringVar(&options.SecretUpdateOnChangeAnnotation, "secret-annotation", "secret.reloader.stakater.com/reload", "annotation to detect changes in secrets")
|
||||
cmd.PersistentFlags().StringVar(&options.ReloaderAutoAnnotation, "auto-annotation", "reloader.stakater.com/auto", "annotation to detect changes in secrets")
|
||||
cmd.PersistentFlags().StringSlice("resources-to-ignore", []string{}, "list of resources to ignore (valid options 'configMaps' or 'secrets')")
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
func startReloader(cmd *cobra.Command, args []string) {
|
||||
var ignoreList util.List
|
||||
var err error
|
||||
|
||||
logrus.Info("Starting Reloader")
|
||||
currentNamespace := os.Getenv("KUBERNETES_NAMESPACE")
|
||||
if len(currentNamespace) == 0 {
|
||||
@@ -29,12 +41,31 @@ func startReloader(cmd *cobra.Command, args []string) {
|
||||
}
|
||||
|
||||
// create the clientset
|
||||
clientset, err := kube.GetClient()
|
||||
clientset, err := kube.GetKubernetesClient()
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
}
|
||||
|
||||
ignoreList, err = cmd.Flags().GetStringSlice("resources-to-ignore")
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
}
|
||||
|
||||
for _, v := range ignoreList {
|
||||
if v != "configMaps" && v != "secrets" {
|
||||
logrus.Fatalf("'resources-to-ignore' only accepts 'configMaps' or 'secrets', not '%s'", v)
|
||||
}
|
||||
}
|
||||
|
||||
if len(ignoreList) > 1 {
|
||||
logrus.Fatal("'resources-to-ignore' only accepts 'configMaps' or 'secrets', not both")
|
||||
}
|
||||
|
||||
for k := range kube.ResourceMap {
|
||||
if ignoreList.Contains(k) {
|
||||
continue
|
||||
}
|
||||
|
||||
c, err := controller.NewController(clientset, k, currentNamespace)
|
||||
if err != nil {
|
||||
logrus.Fatalf("%s", err)
|
||||
@@ -43,7 +74,7 @@ func startReloader(cmd *cobra.Command, args []string) {
|
||||
// Now let's start the controller
|
||||
stop := make(chan struct{})
|
||||
defer close(stop)
|
||||
|
||||
logrus.Infof("Starting Controller to watch resource type: %s", k)
|
||||
go c.Run(1, stop)
|
||||
}
|
||||
|
||||
|
||||
15
internal/pkg/constants/enums.go
Normal file
15
internal/pkg/constants/enums.go
Normal file
@@ -0,0 +1,15 @@
|
||||
package constants
|
||||
|
||||
// Result is a status for deployment update
|
||||
type Result int
|
||||
|
||||
const (
|
||||
// Updated is returned when environment variable is created/updated
|
||||
Updated Result = 1 + iota
|
||||
// NotUpdated is returned when environment variable is found but had value equals to the new value
|
||||
NotUpdated
|
||||
// NoEnvVarFound is returned when no environment variable is found
|
||||
NoEnvVarFound
|
||||
// NoContainerFound is returned when no environment variable is found
|
||||
NoContainerFound
|
||||
)
|
||||
@@ -64,13 +64,11 @@ func (c *Controller) Update(old interface{}, new interface{}) {
|
||||
|
||||
// Delete function to add an object to the queue in case of deleting a resource
|
||||
func (c *Controller) Delete(old interface{}) {
|
||||
logrus.Infof("Resource deletion has been detected but no further implementation found to take action")
|
||||
// Todo: Any future delete event can be handled here
|
||||
}
|
||||
|
||||
//Run function for controller which handles the queue
|
||||
func (c *Controller) Run(threadiness int, stopCh chan struct{}) {
|
||||
|
||||
logrus.Infof("Starting Controller")
|
||||
defer runtime.HandleCrash()
|
||||
|
||||
// Let the workers stop when we are done
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -2,6 +2,8 @@ package handler
|
||||
|
||||
import (
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/stakater/Reloader/internal/pkg/util"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
)
|
||||
|
||||
// ResourceCreatedHandler contains new objects
|
||||
@@ -13,6 +15,24 @@ type ResourceCreatedHandler struct {
|
||||
func (r ResourceCreatedHandler) Handle() error {
|
||||
if r.Resource == nil {
|
||||
logrus.Errorf("Resource creation handler received nil resource")
|
||||
} else {
|
||||
config, _ := r.GetConfig()
|
||||
// process resource based on its type
|
||||
doRollingUpgrade(config)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetConfig gets configurations containing SHA, annotations, namespace and resource name
|
||||
func (r ResourceCreatedHandler) GetConfig() (util.Config, string) {
|
||||
var oldSHAData string
|
||||
var config util.Config
|
||||
if _, ok := r.Resource.(*v1.ConfigMap); ok {
|
||||
config = util.GetConfigmapConfig(r.Resource.(*v1.ConfigMap))
|
||||
} else if _, ok := r.Resource.(*v1.Secret); ok {
|
||||
config = util.GetSecretConfig(r.Resource.(*v1.Secret))
|
||||
} else {
|
||||
logrus.Warnf("Invalid resource: Resource should be 'Secret' or 'Configmap' but found, %v", r.Resource)
|
||||
}
|
||||
return config, oldSHAData
|
||||
}
|
||||
|
||||
@@ -1,6 +1,11 @@
|
||||
package handler
|
||||
|
||||
import (
|
||||
"github.com/stakater/Reloader/internal/pkg/util"
|
||||
)
|
||||
|
||||
// ResourceHandler handles the creation and update of resources
|
||||
type ResourceHandler interface {
|
||||
Handle() error
|
||||
GetConfig() (util.Config, string)
|
||||
}
|
||||
|
||||
@@ -1,17 +1,9 @@
|
||||
package handler
|
||||
|
||||
import (
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/stakater/Reloader/internal/pkg/callbacks"
|
||||
"github.com/stakater/Reloader/internal/pkg/constants"
|
||||
"github.com/stakater/Reloader/internal/pkg/crypto"
|
||||
"github.com/stakater/Reloader/internal/pkg/util"
|
||||
"github.com/stakater/Reloader/pkg/kube"
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
)
|
||||
|
||||
// ResourceUpdatedHandler contains updated objects
|
||||
@@ -25,162 +17,27 @@ func (r ResourceUpdatedHandler) Handle() error {
|
||||
if r.Resource == nil || r.OldResource == nil {
|
||||
logrus.Errorf("Resource update handler received nil resource")
|
||||
} else {
|
||||
config, envVarPostfix, oldSHAData := getConfig(r)
|
||||
config, oldSHAData := r.GetConfig()
|
||||
if config.SHAValue != oldSHAData {
|
||||
logrus.Infof("Changes detected in %s of type '%s' in namespace: %s", config.ResourceName, envVarPostfix, config.Namespace)
|
||||
// process resource based on its type
|
||||
rollingUpgrade(r, config, envVarPostfix, callbacks.RollingUpgradeFuncs{
|
||||
ItemsFunc: callbacks.GetDeploymentItems,
|
||||
ContainersFunc: callbacks.GetDeploymentContainers,
|
||||
UpdateFunc: callbacks.UpdateDeployment,
|
||||
ResourceType: "Deployment",
|
||||
})
|
||||
rollingUpgrade(r, config, envVarPostfix, callbacks.RollingUpgradeFuncs{
|
||||
ItemsFunc: callbacks.GetDaemonSetItems,
|
||||
ContainersFunc: callbacks.GetDaemonSetContainers,
|
||||
UpdateFunc: callbacks.UpdateDaemonSet,
|
||||
ResourceType: "DaemonSet",
|
||||
})
|
||||
rollingUpgrade(r, config, envVarPostfix, callbacks.RollingUpgradeFuncs{
|
||||
ItemsFunc: callbacks.GetStatefulSetItems,
|
||||
ContainersFunc: callbacks.GetStatefulsetContainers,
|
||||
UpdateFunc: callbacks.UpdateStatefulset,
|
||||
ResourceType: "StatefulSet",
|
||||
})
|
||||
doRollingUpgrade(config)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func rollingUpgrade(r ResourceUpdatedHandler, config util.Config, envarPostfix string, upgradeFuncs callbacks.RollingUpgradeFuncs) {
|
||||
client, err := kube.GetClient()
|
||||
if err != nil {
|
||||
logrus.Fatalf("Unable to create Kubernetes client error = %v", err)
|
||||
}
|
||||
|
||||
err = PerformRollingUpgrade(client, config, envarPostfix, upgradeFuncs)
|
||||
if err != nil {
|
||||
logrus.Errorf("Rolling upgrade for %s failed with error = %v", config.ResourceName, err)
|
||||
}
|
||||
}
|
||||
|
||||
func getConfig(r ResourceUpdatedHandler) (util.Config, string, string) {
|
||||
var oldSHAData, envVarPostfix string
|
||||
// GetConfig gets configurations containing SHA, annotations, namespace and resource name
|
||||
func (r ResourceUpdatedHandler) GetConfig() (util.Config, string) {
|
||||
var oldSHAData string
|
||||
var config util.Config
|
||||
if _, ok := r.Resource.(*v1.ConfigMap); ok {
|
||||
oldSHAData = getSHAfromConfigmap(r.OldResource.(*v1.ConfigMap).Data)
|
||||
config = getConfigmapConfig(r)
|
||||
envVarPostfix = constants.ConfigmapEnvVarPostfix
|
||||
oldSHAData = util.GetSHAfromConfigmap(r.OldResource.(*v1.ConfigMap).Data)
|
||||
config = util.GetConfigmapConfig(r.Resource.(*v1.ConfigMap))
|
||||
} else if _, ok := r.Resource.(*v1.Secret); ok {
|
||||
oldSHAData = getSHAfromSecret(r.OldResource.(*v1.Secret).Data)
|
||||
config = getSecretConfig(r)
|
||||
envVarPostfix = constants.SecretEnvVarPostfix
|
||||
oldSHAData = util.GetSHAfromSecret(r.OldResource.(*v1.Secret).Data)
|
||||
config = util.GetSecretConfig(r.Resource.(*v1.Secret))
|
||||
} else {
|
||||
logrus.Warnf("Invalid resource: Resource should be 'Secret' or 'Configmap' but found, %v", r.Resource)
|
||||
}
|
||||
return config, envVarPostfix, oldSHAData
|
||||
}
|
||||
|
||||
func getConfigmapConfig(r ResourceUpdatedHandler) util.Config {
|
||||
configmap := r.Resource.(*v1.ConfigMap)
|
||||
return util.Config{
|
||||
Namespace: configmap.Namespace,
|
||||
ResourceName: configmap.Name,
|
||||
Annotation: constants.ConfigmapUpdateOnChangeAnnotation,
|
||||
SHAValue: getSHAfromConfigmap(configmap.Data),
|
||||
}
|
||||
}
|
||||
|
||||
func getSecretConfig(r ResourceUpdatedHandler) util.Config {
|
||||
secret := r.Resource.(*v1.Secret)
|
||||
return util.Config{
|
||||
Namespace: secret.Namespace,
|
||||
ResourceName: secret.Name,
|
||||
Annotation: constants.SecretUpdateOnChangeAnnotation,
|
||||
SHAValue: getSHAfromSecret(secret.Data),
|
||||
}
|
||||
}
|
||||
|
||||
// PerformRollingUpgrade upgrades the deployment if there is any change in configmap or secret data
|
||||
func PerformRollingUpgrade(client kubernetes.Interface, config util.Config, envarPostfix string, upgradeFuncs callbacks.RollingUpgradeFuncs) error {
|
||||
items := upgradeFuncs.ItemsFunc(client, config.Namespace)
|
||||
var err error
|
||||
for _, i := range items {
|
||||
containers := upgradeFuncs.ContainersFunc(i)
|
||||
resourceName := util.ToObjectMeta(i).Name
|
||||
// find correct annotation and update the resource
|
||||
annotationValue := util.ToObjectMeta(i).Annotations[config.Annotation]
|
||||
if annotationValue != "" {
|
||||
values := strings.Split(annotationValue, ",")
|
||||
for _, value := range values {
|
||||
if value == config.ResourceName {
|
||||
updated := updateContainers(containers, value, config.SHAValue, envarPostfix)
|
||||
if !updated {
|
||||
logrus.Warnf("Rolling upgrade failed because no container found to add environment variable in %s of type %s in namespace: %s", resourceName, upgradeFuncs.ResourceType, config.Namespace)
|
||||
} else {
|
||||
err = upgradeFuncs.UpdateFunc(client, config.Namespace, i)
|
||||
if err != nil {
|
||||
logrus.Errorf("Update for %s of type %s in namespace %s failed with error %v", resourceName, upgradeFuncs.ResourceType, config.Namespace, err)
|
||||
} else {
|
||||
logrus.Infof("Updated %s of type %s in namespace: %s ", resourceName, upgradeFuncs.ResourceType, config.Namespace)
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func updateContainers(containers []v1.Container, annotationValue string, shaData string, envarPostfix string) bool {
|
||||
updated := false
|
||||
envar := constants.EnvVarPrefix + util.ConvertToEnvVarName(annotationValue)+ "_" + envarPostfix
|
||||
for i := range containers {
|
||||
envs := containers[i].Env
|
||||
|
||||
//update if env var exists
|
||||
updated = updateEnvVar(envs, envar, shaData)
|
||||
|
||||
// if no existing env var exists lets create one
|
||||
if !updated {
|
||||
e := v1.EnvVar{
|
||||
Name: envar,
|
||||
Value: shaData,
|
||||
}
|
||||
containers[i].Env = append(containers[i].Env, e)
|
||||
updated = true
|
||||
}
|
||||
}
|
||||
return updated
|
||||
}
|
||||
|
||||
func updateEnvVar(envs []v1.EnvVar, envar string, shaData string) bool {
|
||||
for j := range envs {
|
||||
if envs[j].Name == envar {
|
||||
if envs[j].Value != shaData {
|
||||
envs[j].Value = shaData
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func getSHAfromConfigmap(data map[string]string) string {
|
||||
values := []string{}
|
||||
for k, v := range data {
|
||||
values = append(values, k+"="+v)
|
||||
}
|
||||
sort.Strings(values)
|
||||
return crypto.GenerateSHA(strings.Join(values, ";"))
|
||||
}
|
||||
|
||||
func getSHAfromSecret(data map[string][]byte) string {
|
||||
values := []string{}
|
||||
for k, v := range data {
|
||||
values = append(values, k+"="+string(v[:]))
|
||||
}
|
||||
sort.Strings(values)
|
||||
return crypto.GenerateSHA(strings.Join(values, ";"))
|
||||
return config, oldSHAData
|
||||
}
|
||||
|
||||
@@ -1,315 +0,0 @@
|
||||
package handler
|
||||
|
||||
import (
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/stakater/Reloader/internal/pkg/callbacks"
|
||||
"github.com/stakater/Reloader/internal/pkg/constants"
|
||||
"github.com/stakater/Reloader/internal/pkg/testutil"
|
||||
"github.com/stakater/Reloader/internal/pkg/util"
|
||||
testclient "k8s.io/client-go/kubernetes/fake"
|
||||
)
|
||||
|
||||
var (
|
||||
client = testclient.NewSimpleClientset()
|
||||
namespace = "test-handler-" + testutil.RandSeq(5)
|
||||
configmapName = "testconfigmap-handler-" + testutil.RandSeq(5)
|
||||
secretName = "testsecret-handler-" + testutil.RandSeq(5)
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
|
||||
// Creating namespace
|
||||
testutil.CreateNamespace(namespace, client)
|
||||
|
||||
logrus.Infof("Setting up the test resources")
|
||||
setup()
|
||||
|
||||
logrus.Infof("Running Testcases")
|
||||
retCode := m.Run()
|
||||
|
||||
logrus.Infof("tearing down the test resources")
|
||||
teardown()
|
||||
|
||||
os.Exit(retCode)
|
||||
}
|
||||
|
||||
func setup() {
|
||||
// Creating configmap
|
||||
_, err := testutil.CreateConfigMap(client, namespace, configmapName, "www.google.com")
|
||||
if err != nil {
|
||||
logrus.Errorf("Error in configmap creation: %v", err)
|
||||
}
|
||||
|
||||
// Creating secret
|
||||
data := "dGVzdFNlY3JldEVuY29kaW5nRm9yUmVsb2FkZXI="
|
||||
_, err = testutil.CreateSecret(client, namespace, secretName, data)
|
||||
if err != nil {
|
||||
logrus.Errorf("Error in secret creation: %v", err)
|
||||
}
|
||||
|
||||
// Creating Deployment with configmap
|
||||
_, err = testutil.CreateDeployment(client, configmapName, namespace)
|
||||
if err != nil {
|
||||
logrus.Errorf("Error in Deployment with configmap creation: %v", err)
|
||||
}
|
||||
|
||||
// Creating Deployment with secret
|
||||
_, err = testutil.CreateDeployment(client, secretName, namespace)
|
||||
if err != nil {
|
||||
logrus.Errorf("Error in Deployment with secret creation: %v", err)
|
||||
}
|
||||
|
||||
// Creating DaemonSet with configmap
|
||||
_, err = testutil.CreateDaemonSet(client, configmapName, namespace)
|
||||
if err != nil {
|
||||
logrus.Errorf("Error in DaemonSet with configmap creation: %v", err)
|
||||
}
|
||||
|
||||
// Creating DaemonSet with secret
|
||||
_, err = testutil.CreateDaemonSet(client, secretName, namespace)
|
||||
if err != nil {
|
||||
logrus.Errorf("Error in DaemonSet with secret creation: %v", err)
|
||||
}
|
||||
|
||||
// Creating StatefulSet with configmap
|
||||
_, err = testutil.CreateStatefulSet(client, configmapName, namespace)
|
||||
if err != nil {
|
||||
logrus.Errorf("Error in StatefulSet with configmap creation: %v", err)
|
||||
}
|
||||
|
||||
// Creating StatefulSet with secret
|
||||
_, err = testutil.CreateStatefulSet(client, secretName, namespace)
|
||||
if err != nil {
|
||||
logrus.Errorf("Error in StatefulSet with secret creation: %v", err)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func teardown() {
|
||||
// Deleting Deployment with configmap
|
||||
deploymentError := testutil.DeleteDeployment(client, namespace, configmapName)
|
||||
if deploymentError != nil {
|
||||
logrus.Errorf("Error while deleting deployment with configmap %v", deploymentError)
|
||||
}
|
||||
|
||||
// Deleting Deployment with secret
|
||||
deploymentError = testutil.DeleteDeployment(client, namespace, secretName)
|
||||
if deploymentError != nil {
|
||||
logrus.Errorf("Error while deleting deployment with secret %v", deploymentError)
|
||||
}
|
||||
|
||||
// Deleting DaemonSet with configmap
|
||||
daemonSetError := testutil.DeleteDaemonSet(client, namespace, configmapName)
|
||||
if daemonSetError != nil {
|
||||
logrus.Errorf("Error while deleting daemonSet with configmap %v", daemonSetError)
|
||||
}
|
||||
|
||||
// Deleting Deployment with secret
|
||||
daemonSetError = testutil.DeleteDaemonSet(client, namespace, secretName)
|
||||
if daemonSetError != nil {
|
||||
logrus.Errorf("Error while deleting daemonSet with secret %v", daemonSetError)
|
||||
}
|
||||
|
||||
// Deleting StatefulSet with configmap
|
||||
statefulSetError := testutil.DeleteStatefulSet(client, namespace, configmapName)
|
||||
if statefulSetError != nil {
|
||||
logrus.Errorf("Error while deleting statefulSet with configmap %v", statefulSetError)
|
||||
}
|
||||
|
||||
// Deleting Deployment with secret
|
||||
statefulSetError = testutil.DeleteStatefulSet(client, namespace, secretName)
|
||||
if statefulSetError != nil {
|
||||
logrus.Errorf("Error while deleting statefulSet with secret %v", statefulSetError)
|
||||
}
|
||||
|
||||
// Deleting Configmap
|
||||
err := testutil.DeleteConfigMap(client, namespace, configmapName)
|
||||
if err != nil {
|
||||
logrus.Errorf("Error while deleting the configmap %v", err)
|
||||
}
|
||||
|
||||
// Deleting Secret
|
||||
err = testutil.DeleteSecret(client, namespace, secretName)
|
||||
if err != nil {
|
||||
logrus.Errorf("Error while deleting the secret %v", err)
|
||||
}
|
||||
|
||||
// Deleting namespace
|
||||
testutil.DeleteNamespace(namespace, client)
|
||||
|
||||
}
|
||||
|
||||
func TestRollingUpgradeForDeploymentWithConfigmap(t *testing.T) {
|
||||
shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, namespace, configmapName, "www.stakater.com")
|
||||
config := util.Config{
|
||||
Namespace: namespace,
|
||||
ResourceName: configmapName,
|
||||
SHAValue: shaData,
|
||||
Annotation: constants.ConfigmapUpdateOnChangeAnnotation,
|
||||
}
|
||||
deploymentFuncs := callbacks.RollingUpgradeFuncs{
|
||||
ItemsFunc: callbacks.GetDeploymentItems,
|
||||
ContainersFunc: callbacks.GetDeploymentContainers,
|
||||
UpdateFunc: callbacks.UpdateDeployment,
|
||||
ResourceType: "Deployment",
|
||||
}
|
||||
|
||||
err := PerformRollingUpgrade(client, config, constants.ConfigmapEnvVarPostfix, deploymentFuncs)
|
||||
time.Sleep(5 * time.Second)
|
||||
if err != nil {
|
||||
t.Errorf("Rolling upgrade failed for Deployment with Configmap")
|
||||
}
|
||||
|
||||
logrus.Infof("Verifying deployment update")
|
||||
updated := testutil.VerifyResourceUpdate(client, config, constants.ConfigmapEnvVarPostfix, deploymentFuncs)
|
||||
if !updated {
|
||||
t.Errorf("Deployment was not updated")
|
||||
}
|
||||
}
|
||||
|
||||
func TestRollingUpgradeForDeploymentWithSecret(t *testing.T) {
|
||||
shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, namespace, secretName, "dGVzdFVwZGF0ZWRTZWNyZXRFbmNvZGluZ0ZvclJlbG9hZGVy")
|
||||
config := util.Config{
|
||||
Namespace: namespace,
|
||||
ResourceName: secretName,
|
||||
SHAValue: shaData,
|
||||
Annotation: constants.SecretUpdateOnChangeAnnotation,
|
||||
}
|
||||
deploymentFuncs := callbacks.RollingUpgradeFuncs{
|
||||
ItemsFunc: callbacks.GetDeploymentItems,
|
||||
ContainersFunc: callbacks.GetDeploymentContainers,
|
||||
UpdateFunc: callbacks.UpdateDeployment,
|
||||
ResourceType: "Deployment",
|
||||
}
|
||||
|
||||
err := PerformRollingUpgrade(client, config, constants.SecretEnvVarPostfix, deploymentFuncs)
|
||||
time.Sleep(5 * time.Second)
|
||||
if err != nil {
|
||||
t.Errorf("Rolling upgrade failed for Deployment with Secret")
|
||||
}
|
||||
|
||||
logrus.Infof("Verifying deployment update")
|
||||
updated := testutil.VerifyResourceUpdate(client, config, constants.SecretEnvVarPostfix, deploymentFuncs)
|
||||
if !updated {
|
||||
t.Errorf("Deployment was not updated")
|
||||
}
|
||||
}
|
||||
|
||||
func TestRollingUpgradeForDaemonSetWithConfigmap(t *testing.T) {
|
||||
shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapName, "www.facebook.com")
|
||||
config := util.Config{
|
||||
Namespace: namespace,
|
||||
ResourceName: configmapName,
|
||||
SHAValue: shaData,
|
||||
Annotation: constants.ConfigmapUpdateOnChangeAnnotation,
|
||||
}
|
||||
daemonSetFuncs := callbacks.RollingUpgradeFuncs{
|
||||
ItemsFunc: callbacks.GetDaemonSetItems,
|
||||
ContainersFunc: callbacks.GetDaemonSetContainers,
|
||||
UpdateFunc: callbacks.UpdateDaemonSet,
|
||||
ResourceType: "DaemonSet",
|
||||
}
|
||||
|
||||
err := PerformRollingUpgrade(client, config, constants.ConfigmapEnvVarPostfix, daemonSetFuncs)
|
||||
time.Sleep(5 * time.Second)
|
||||
if err != nil {
|
||||
t.Errorf("Rolling upgrade failed for DaemonSet with configmap")
|
||||
}
|
||||
|
||||
logrus.Infof("Verifying daemonSet update")
|
||||
updated := testutil.VerifyResourceUpdate(client, config, constants.ConfigmapEnvVarPostfix, daemonSetFuncs)
|
||||
if !updated {
|
||||
t.Errorf("DaemonSet was not updated")
|
||||
}
|
||||
}
|
||||
|
||||
func TestRollingUpgradeForDaemonSetWithSecret(t *testing.T) {
|
||||
shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, namespace, secretName, "d3d3LmZhY2Vib29rLmNvbQ==")
|
||||
|
||||
config := util.Config{
|
||||
Namespace: namespace,
|
||||
ResourceName: secretName,
|
||||
SHAValue: shaData,
|
||||
Annotation: constants.SecretUpdateOnChangeAnnotation,
|
||||
}
|
||||
daemonSetFuncs := callbacks.RollingUpgradeFuncs{
|
||||
ItemsFunc: callbacks.GetDaemonSetItems,
|
||||
ContainersFunc: callbacks.GetDaemonSetContainers,
|
||||
UpdateFunc: callbacks.UpdateDaemonSet,
|
||||
ResourceType: "DaemonSet",
|
||||
}
|
||||
|
||||
err := PerformRollingUpgrade(client, config, constants.SecretEnvVarPostfix, daemonSetFuncs)
|
||||
time.Sleep(5 * time.Second)
|
||||
if err != nil {
|
||||
t.Errorf("Rolling upgrade failed for DaemonSet with secret")
|
||||
}
|
||||
|
||||
logrus.Infof("Verifying daemonSet update")
|
||||
updated := testutil.VerifyResourceUpdate(client, config, constants.SecretEnvVarPostfix, daemonSetFuncs)
|
||||
if !updated {
|
||||
t.Errorf("DaemonSet was not updated")
|
||||
}
|
||||
}
|
||||
|
||||
func TestRollingUpgradeForStatefulSetWithConfigmap(t *testing.T) {
|
||||
shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapName, "www.twitter.com")
|
||||
|
||||
config := util.Config{
|
||||
Namespace: namespace,
|
||||
ResourceName: configmapName,
|
||||
SHAValue: shaData,
|
||||
Annotation: constants.ConfigmapUpdateOnChangeAnnotation,
|
||||
}
|
||||
statefulSetFuncs := callbacks.RollingUpgradeFuncs{
|
||||
ItemsFunc: callbacks.GetStatefulSetItems,
|
||||
ContainersFunc: callbacks.GetStatefulsetContainers,
|
||||
UpdateFunc: callbacks.UpdateStatefulset,
|
||||
ResourceType: "StatefulSet",
|
||||
}
|
||||
|
||||
err := PerformRollingUpgrade(client, config, constants.ConfigmapEnvVarPostfix, statefulSetFuncs)
|
||||
time.Sleep(5 * time.Second)
|
||||
if err != nil {
|
||||
t.Errorf("Rolling upgrade failed for StatefulSet with configmap")
|
||||
}
|
||||
|
||||
logrus.Infof("Verifying statefulSet update")
|
||||
updated := testutil.VerifyResourceUpdate(client, config, constants.ConfigmapEnvVarPostfix, statefulSetFuncs)
|
||||
if !updated {
|
||||
t.Errorf("StatefulSet was not updated")
|
||||
}
|
||||
}
|
||||
|
||||
func TestRollingUpgradeForStatefulSetWithSecret(t *testing.T) {
|
||||
shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, namespace, secretName, "d3d3LnR3aXR0ZXIuY29t")
|
||||
|
||||
config := util.Config{
|
||||
Namespace: namespace,
|
||||
ResourceName: secretName,
|
||||
SHAValue: shaData,
|
||||
Annotation: constants.SecretUpdateOnChangeAnnotation,
|
||||
}
|
||||
statefulSetFuncs := callbacks.RollingUpgradeFuncs{
|
||||
ItemsFunc: callbacks.GetStatefulSetItems,
|
||||
ContainersFunc: callbacks.GetStatefulsetContainers,
|
||||
UpdateFunc: callbacks.UpdateStatefulset,
|
||||
ResourceType: "StatefulSet",
|
||||
}
|
||||
|
||||
err := PerformRollingUpgrade(client, config, constants.SecretEnvVarPostfix, statefulSetFuncs)
|
||||
time.Sleep(5 * time.Second)
|
||||
if err != nil {
|
||||
t.Errorf("Rolling upgrade failed for StatefulSet with secret")
|
||||
}
|
||||
|
||||
logrus.Infof("Verifying statefulSet update")
|
||||
updated := testutil.VerifyResourceUpdate(client, config, constants.SecretEnvVarPostfix, statefulSetFuncs)
|
||||
if !updated {
|
||||
t.Errorf("StatefulSet was not updated")
|
||||
}
|
||||
}
|
||||
251
internal/pkg/handler/upgrade.go
Normal file
251
internal/pkg/handler/upgrade.go
Normal file
@@ -0,0 +1,251 @@
|
||||
package handler
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/stakater/Reloader/internal/pkg/callbacks"
|
||||
"github.com/stakater/Reloader/internal/pkg/constants"
|
||||
"github.com/stakater/Reloader/internal/pkg/options"
|
||||
"github.com/stakater/Reloader/internal/pkg/util"
|
||||
"github.com/stakater/Reloader/pkg/kube"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
)
|
||||
|
||||
// GetDeploymentRollingUpgradeFuncs returns all callback funcs for a deployment
|
||||
func GetDeploymentRollingUpgradeFuncs() callbacks.RollingUpgradeFuncs {
|
||||
return callbacks.RollingUpgradeFuncs{
|
||||
ItemsFunc: callbacks.GetDeploymentItems,
|
||||
ContainersFunc: callbacks.GetDeploymentContainers,
|
||||
InitContainersFunc: callbacks.GetDeploymentInitContainers,
|
||||
UpdateFunc: callbacks.UpdateDeployment,
|
||||
VolumesFunc: callbacks.GetDeploymentVolumes,
|
||||
ResourceType: "Deployment",
|
||||
}
|
||||
}
|
||||
|
||||
// GetDaemonSetRollingUpgradeFuncs returns all callback funcs for a daemonset
|
||||
func GetDaemonSetRollingUpgradeFuncs() callbacks.RollingUpgradeFuncs {
|
||||
return callbacks.RollingUpgradeFuncs{
|
||||
ItemsFunc: callbacks.GetDaemonSetItems,
|
||||
ContainersFunc: callbacks.GetDaemonSetContainers,
|
||||
InitContainersFunc: callbacks.GetDaemonSetInitContainers,
|
||||
UpdateFunc: callbacks.UpdateDaemonSet,
|
||||
VolumesFunc: callbacks.GetDaemonSetVolumes,
|
||||
ResourceType: "DaemonSet",
|
||||
}
|
||||
}
|
||||
|
||||
// GetStatefulSetRollingUpgradeFuncs returns all callback funcs for a statefulSet
|
||||
func GetStatefulSetRollingUpgradeFuncs() callbacks.RollingUpgradeFuncs {
|
||||
return callbacks.RollingUpgradeFuncs{
|
||||
ItemsFunc: callbacks.GetStatefulSetItems,
|
||||
ContainersFunc: callbacks.GetStatefulsetContainers,
|
||||
InitContainersFunc: callbacks.GetStatefulsetInitContainers,
|
||||
UpdateFunc: callbacks.UpdateStatefulset,
|
||||
VolumesFunc: callbacks.GetStatefulsetVolumes,
|
||||
ResourceType: "StatefulSet",
|
||||
}
|
||||
}
|
||||
|
||||
// GetDeploymentConfigRollingUpgradeFuncs returns all callback funcs for a deploymentConfig
|
||||
func GetDeploymentConfigRollingUpgradeFuncs() callbacks.RollingUpgradeFuncs {
|
||||
return callbacks.RollingUpgradeFuncs{
|
||||
ItemsFunc: callbacks.GetDeploymentConfigItems,
|
||||
ContainersFunc: callbacks.GetDeploymentConfigContainers,
|
||||
InitContainersFunc: callbacks.GetDeploymentConfigInitContainers,
|
||||
UpdateFunc: callbacks.UpdateDeploymentConfig,
|
||||
VolumesFunc: callbacks.GetDeploymentConfigVolumes,
|
||||
ResourceType: "DeploymentConfig",
|
||||
}
|
||||
}
|
||||
|
||||
func doRollingUpgrade(config util.Config) {
|
||||
clients := kube.GetClients()
|
||||
|
||||
rollingUpgrade(clients, config, GetDeploymentRollingUpgradeFuncs())
|
||||
rollingUpgrade(clients, config, GetDaemonSetRollingUpgradeFuncs())
|
||||
rollingUpgrade(clients, config, GetStatefulSetRollingUpgradeFuncs())
|
||||
|
||||
if kube.IsOpenshift {
|
||||
rollingUpgrade(clients, config, GetDeploymentConfigRollingUpgradeFuncs())
|
||||
}
|
||||
}
|
||||
|
||||
func rollingUpgrade(clients kube.Clients, config util.Config, upgradeFuncs callbacks.RollingUpgradeFuncs) {
|
||||
|
||||
err := PerformRollingUpgrade(clients, config, upgradeFuncs)
|
||||
if err != nil {
|
||||
logrus.Errorf("Rolling upgrade for '%s' failed with error = %v", config.ResourceName, err)
|
||||
}
|
||||
}
|
||||
|
||||
// PerformRollingUpgrade upgrades the deployment if there is any change in configmap or secret data
|
||||
func PerformRollingUpgrade(clients kube.Clients, config util.Config, upgradeFuncs callbacks.RollingUpgradeFuncs) error {
|
||||
items := upgradeFuncs.ItemsFunc(clients, config.Namespace)
|
||||
var err error
|
||||
for _, i := range items {
|
||||
// find correct annotation and update the resource
|
||||
annotationValue := util.ToObjectMeta(i).Annotations[config.Annotation]
|
||||
reloaderEnabledValue := util.ToObjectMeta(i).Annotations[options.ReloaderAutoAnnotation]
|
||||
result := constants.NotUpdated
|
||||
reloaderEnabled, err := strconv.ParseBool(reloaderEnabledValue)
|
||||
if err == nil && reloaderEnabled {
|
||||
result = updateContainers(upgradeFuncs, i, config, true)
|
||||
}
|
||||
|
||||
if result != constants.Updated && annotationValue != "" {
|
||||
values := strings.Split(annotationValue, ",")
|
||||
for _, value := range values {
|
||||
if value == config.ResourceName {
|
||||
result = updateContainers(upgradeFuncs, i, config, false)
|
||||
if result == constants.Updated {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if result == constants.Updated {
|
||||
err = upgradeFuncs.UpdateFunc(clients, config.Namespace, i)
|
||||
resourceName := util.ToObjectMeta(i).Name
|
||||
if err != nil {
|
||||
logrus.Errorf("Update for '%s' of type '%s' in namespace '%s' failed with error %v", resourceName, upgradeFuncs.ResourceType, config.Namespace, err)
|
||||
} else {
|
||||
logrus.Infof("Changes detected in '%s' of type '%s' in namespace '%s'", config.ResourceName, config.Type, config.Namespace)
|
||||
logrus.Infof("Updated '%s' of type '%s' in namespace '%s'", resourceName, upgradeFuncs.ResourceType, config.Namespace)
|
||||
}
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func getVolumeMountName(volumes []v1.Volume, mountType string, volumeName string) string {
|
||||
for i := range volumes {
|
||||
if mountType == constants.ConfigmapEnvVarPostfix && volumes[i].ConfigMap != nil && volumes[i].ConfigMap.Name == volumeName {
|
||||
return volumes[i].Name
|
||||
} else if mountType == constants.SecretEnvVarPostfix && volumes[i].Secret != nil && volumes[i].Secret.SecretName == volumeName {
|
||||
return volumes[i].Name
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func getContainerWithVolumeMount(containers []v1.Container, volumeMountName string) *v1.Container {
|
||||
for i := range containers {
|
||||
volumeMounts := containers[i].VolumeMounts
|
||||
for j := range volumeMounts {
|
||||
if volumeMounts[j].Name == volumeMountName {
|
||||
return &containers[i]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func getContainerWithEnvReference(containers []v1.Container, resourceName string, resourceType string) *v1.Container {
|
||||
for i := range containers {
|
||||
envs := containers[i].Env
|
||||
for j := range envs {
|
||||
envVarSource := envs[j].ValueFrom
|
||||
if envVarSource != nil {
|
||||
if resourceType == constants.SecretEnvVarPostfix && envVarSource.SecretKeyRef != nil && envVarSource.SecretKeyRef.LocalObjectReference.Name == resourceName {
|
||||
return &containers[i]
|
||||
} else if resourceType == constants.ConfigmapEnvVarPostfix && envVarSource.ConfigMapKeyRef != nil && envVarSource.ConfigMapKeyRef.LocalObjectReference.Name == resourceName {
|
||||
return &containers[i]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
envsFrom := containers[i].EnvFrom
|
||||
for j := range envsFrom {
|
||||
if resourceType == constants.SecretEnvVarPostfix && envsFrom[j].SecretRef != nil && envsFrom[j].SecretRef.LocalObjectReference.Name == resourceName {
|
||||
return &containers[i]
|
||||
} else if resourceType == constants.ConfigmapEnvVarPostfix && envsFrom[j].ConfigMapRef != nil && envsFrom[j].ConfigMapRef.LocalObjectReference.Name == resourceName {
|
||||
return &containers[i]
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func getContainerToUpdate(upgradeFuncs callbacks.RollingUpgradeFuncs, item interface{}, config util.Config, autoReload bool) *v1.Container {
|
||||
volumes := upgradeFuncs.VolumesFunc(item)
|
||||
containers := upgradeFuncs.ContainersFunc(item)
|
||||
initContainers := upgradeFuncs.InitContainersFunc(item)
|
||||
var container *v1.Container
|
||||
// Get the volumeMountName to find volumeMount in container
|
||||
volumeMountName := getVolumeMountName(volumes, config.Type, config.ResourceName)
|
||||
// Get the container with mounted configmap/secret
|
||||
if volumeMountName != "" {
|
||||
container = getContainerWithVolumeMount(containers, volumeMountName)
|
||||
if container == nil && len(initContainers) > 0 {
|
||||
container = getContainerWithVolumeMount(initContainers, volumeMountName)
|
||||
if container != nil {
|
||||
// if configmap/secret is being used in init container then return the first Pod container to save reloader env
|
||||
return &containers[0]
|
||||
}
|
||||
} else if container != nil {
|
||||
return container
|
||||
}
|
||||
}
|
||||
|
||||
// Get the container with referenced secret or configmap as env var
|
||||
container = getContainerWithEnvReference(containers, config.ResourceName, config.Type)
|
||||
if container == nil && len(initContainers) > 0 {
|
||||
container = getContainerWithEnvReference(initContainers, config.ResourceName, config.Type)
|
||||
if container != nil {
|
||||
// if configmap/secret is being used in init container then return the first Pod container to save reloader env
|
||||
return &containers[0]
|
||||
}
|
||||
}
|
||||
|
||||
// Get the first container if the annotation is related to specified configmap or secret i.e. configmap.reloader.stakater.com/reload
|
||||
if container == nil && !autoReload {
|
||||
return &containers[0]
|
||||
}
|
||||
|
||||
return container
|
||||
}
|
||||
|
||||
func updateContainers(upgradeFuncs callbacks.RollingUpgradeFuncs, item interface{}, config util.Config, autoReload bool) constants.Result {
|
||||
var result constants.Result
|
||||
envar := constants.EnvVarPrefix + util.ConvertToEnvVarName(config.ResourceName) + "_" + config.Type
|
||||
container := getContainerToUpdate(upgradeFuncs, item, config, autoReload)
|
||||
|
||||
if container == nil {
|
||||
return constants.NoContainerFound
|
||||
}
|
||||
|
||||
//update if env var exists
|
||||
result = updateEnvVar(upgradeFuncs.ContainersFunc(item), envar, config.SHAValue)
|
||||
|
||||
// if no existing env var exists lets create one
|
||||
if result == constants.NoEnvVarFound {
|
||||
e := v1.EnvVar{
|
||||
Name: envar,
|
||||
Value: config.SHAValue,
|
||||
}
|
||||
container.Env = append(container.Env, e)
|
||||
result = constants.Updated
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
func updateEnvVar(containers []v1.Container, envar string, shaData string) constants.Result {
|
||||
for i := range containers {
|
||||
envs := containers[i].Env
|
||||
for j := range envs {
|
||||
if envs[j].Name == envar {
|
||||
if envs[j].Value != shaData {
|
||||
envs[j].Value = shaData
|
||||
return constants.Updated
|
||||
}
|
||||
return constants.NotUpdated
|
||||
}
|
||||
}
|
||||
}
|
||||
return constants.NoEnvVarFound
|
||||
}
|
||||
669
internal/pkg/handler/upgrade_test.go
Normal file
669
internal/pkg/handler/upgrade_test.go
Normal file
@@ -0,0 +1,669 @@
|
||||
package handler
|
||||
|
||||
import (
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/stakater/Reloader/internal/pkg/constants"
|
||||
"github.com/stakater/Reloader/internal/pkg/options"
|
||||
"github.com/stakater/Reloader/internal/pkg/testutil"
|
||||
"github.com/stakater/Reloader/internal/pkg/util"
|
||||
"github.com/stakater/Reloader/pkg/kube"
|
||||
testclient "k8s.io/client-go/kubernetes/fake"
|
||||
)
|
||||
|
||||
var (
|
||||
clients = kube.Clients{KubernetesClient: testclient.NewSimpleClientset()}
|
||||
namespace = "test-handler-" + testutil.RandSeq(5)
|
||||
configmapName = "testconfigmap-handler-" + testutil.RandSeq(5)
|
||||
secretName = "testsecret-handler-" + testutil.RandSeq(5)
|
||||
configmapWithInitContainer = "testconfigmapInitContainerhandler-" + testutil.RandSeq(5)
|
||||
secretWithInitContainer = "testsecretWithInitContainer-handler-" + testutil.RandSeq(5)
|
||||
configmapWithInitEnv = "configmapWithInitEnv-" + testutil.RandSeq(5)
|
||||
secretWithInitEnv = "secretWithInitEnv-handler-" + testutil.RandSeq(5)
|
||||
configmapWithEnvName = "testconfigmapWithEnv-handler-" + testutil.RandSeq(5)
|
||||
configmapWithEnvFromName = "testconfigmapWithEnvFrom-handler-" + testutil.RandSeq(5)
|
||||
secretWithEnvName = "testsecretWithEnv-handler-" + testutil.RandSeq(5)
|
||||
secretWithEnvFromName = "testsecretWithEnvFrom-handler-" + testutil.RandSeq(5)
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
|
||||
// Creating namespace
|
||||
testutil.CreateNamespace(namespace, clients.KubernetesClient)
|
||||
|
||||
logrus.Infof("Setting up the test resources")
|
||||
setup()
|
||||
|
||||
logrus.Infof("Running Testcases")
|
||||
retCode := m.Run()
|
||||
|
||||
logrus.Infof("tearing down the test resources")
|
||||
teardown()
|
||||
|
||||
os.Exit(retCode)
|
||||
}
|
||||
|
||||
func setup() {
|
||||
// Creating configmap
|
||||
_, err := testutil.CreateConfigMap(clients.KubernetesClient, namespace, configmapName, "www.google.com")
|
||||
if err != nil {
|
||||
logrus.Errorf("Error in configmap creation: %v", err)
|
||||
}
|
||||
|
||||
// Creating secret
|
||||
data := "dGVzdFNlY3JldEVuY29kaW5nRm9yUmVsb2FkZXI="
|
||||
_, err = testutil.CreateSecret(clients.KubernetesClient, namespace, secretName, data)
|
||||
if err != nil {
|
||||
logrus.Errorf("Error in secret creation: %v", err)
|
||||
}
|
||||
|
||||
_, err = testutil.CreateConfigMap(clients.KubernetesClient, namespace, configmapWithEnvName, "www.google.com")
|
||||
if err != nil {
|
||||
logrus.Errorf("Error in configmap creation: %v", err)
|
||||
}
|
||||
|
||||
// Creating secret
|
||||
_, err = testutil.CreateSecret(clients.KubernetesClient, namespace, secretWithEnvName, data)
|
||||
if err != nil {
|
||||
logrus.Errorf("Error in secret creation: %v", err)
|
||||
}
|
||||
|
||||
_, err = testutil.CreateConfigMap(clients.KubernetesClient, namespace, configmapWithEnvFromName, "www.google.com")
|
||||
if err != nil {
|
||||
logrus.Errorf("Error in configmap creation: %v", err)
|
||||
}
|
||||
|
||||
// Creating secret
|
||||
_, err = testutil.CreateSecret(clients.KubernetesClient, namespace, secretWithInitEnv, data)
|
||||
if err != nil {
|
||||
logrus.Errorf("Error in secret creation: %v", err)
|
||||
}
|
||||
|
||||
_, err = testutil.CreateConfigMap(clients.KubernetesClient, namespace, configmapWithInitContainer, "www.google.com")
|
||||
if err != nil {
|
||||
logrus.Errorf("Error in configmap creation: %v", err)
|
||||
}
|
||||
|
||||
// Creating secret
|
||||
_, err = testutil.CreateSecret(clients.KubernetesClient, namespace, secretWithEnvFromName, data)
|
||||
if err != nil {
|
||||
logrus.Errorf("Error in secret creation: %v", err)
|
||||
}
|
||||
|
||||
_, err = testutil.CreateConfigMap(clients.KubernetesClient, namespace, configmapWithInitEnv, "www.google.com")
|
||||
if err != nil {
|
||||
logrus.Errorf("Error in configmap creation: %v", err)
|
||||
}
|
||||
|
||||
// Creating secret
|
||||
_, err = testutil.CreateSecret(clients.KubernetesClient, namespace, secretWithInitContainer, data)
|
||||
if err != nil {
|
||||
logrus.Errorf("Error in secret creation: %v", err)
|
||||
}
|
||||
|
||||
// Creating Deployment with configmap
|
||||
_, err = testutil.CreateDeployment(clients.KubernetesClient, configmapName, namespace, true)
|
||||
if err != nil {
|
||||
logrus.Errorf("Error in Deployment with configmap creation: %v", err)
|
||||
}
|
||||
|
||||
// Creating Deployment with configmap mounted in init container
|
||||
_, err = testutil.CreateDeploymentWithInitContainer(clients.KubernetesClient, configmapWithInitContainer, namespace, true)
|
||||
if err != nil {
|
||||
logrus.Errorf("Error in Deployment with configmap creation: %v", err)
|
||||
}
|
||||
|
||||
// Creating Deployment with secret mounted in init container
|
||||
_, err = testutil.CreateDeploymentWithInitContainer(clients.KubernetesClient, secretWithInitContainer, namespace, true)
|
||||
if err != nil {
|
||||
logrus.Errorf("Error in Deployment with secret creation: %v", err)
|
||||
}
|
||||
|
||||
// Creating Deployment with configmap mounted as Env in init container
|
||||
_, err = testutil.CreateDeploymentWithInitContainer(clients.KubernetesClient, configmapWithInitEnv, namespace, false)
|
||||
if err != nil {
|
||||
logrus.Errorf("Error in Deployment with configmap creation: %v", err)
|
||||
}
|
||||
|
||||
// Creating Deployment with secret mounted as Env in init container
|
||||
_, err = testutil.CreateDeploymentWithInitContainer(clients.KubernetesClient, secretWithInitEnv, namespace, false)
|
||||
if err != nil {
|
||||
logrus.Errorf("Error in Deployment with secret creation: %v", err)
|
||||
}
|
||||
|
||||
// Creating Deployment with secret
|
||||
_, err = testutil.CreateDeployment(clients.KubernetesClient, secretName, namespace, true)
|
||||
if err != nil {
|
||||
logrus.Errorf("Error in Deployment with secret creation: %v", err)
|
||||
}
|
||||
|
||||
// Creating Deployment with env var source as configmap
|
||||
_, err = testutil.CreateDeployment(clients.KubernetesClient, configmapWithEnvName, namespace, false)
|
||||
if err != nil {
|
||||
logrus.Errorf("Error in Deployment with configmap configmap as env var source creation: %v", err)
|
||||
}
|
||||
|
||||
// Creating Deployment with env var source as secret
|
||||
_, err = testutil.CreateDeployment(clients.KubernetesClient, secretWithEnvName, namespace, false)
|
||||
if err != nil {
|
||||
logrus.Errorf("Error in Deployment with secret configmap as env var source creation: %v", err)
|
||||
}
|
||||
|
||||
// Creating Deployment with envFrom source as secret
|
||||
_, err = testutil.CreateDeploymentWithEnvVarSource(clients.KubernetesClient, configmapWithEnvFromName, namespace)
|
||||
if err != nil {
|
||||
logrus.Errorf("Error in Deployment with secret configmap as envFrom source creation: %v", err)
|
||||
}
|
||||
|
||||
// Creating Deployment with envFrom source as secret
|
||||
_, err = testutil.CreateDeploymentWithEnvVarSource(clients.KubernetesClient, secretWithEnvFromName, namespace)
|
||||
if err != nil {
|
||||
logrus.Errorf("Error in Deployment with secret configmap as envFrom source creation: %v", err)
|
||||
}
|
||||
|
||||
// Creating DaemonSet with configmap
|
||||
_, err = testutil.CreateDaemonSet(clients.KubernetesClient, configmapName, namespace, true)
|
||||
if err != nil {
|
||||
logrus.Errorf("Error in DaemonSet with configmap creation: %v", err)
|
||||
}
|
||||
|
||||
// Creating DaemonSet with secret
|
||||
_, err = testutil.CreateDaemonSet(clients.KubernetesClient, secretName, namespace, true)
|
||||
if err != nil {
|
||||
logrus.Errorf("Error in DaemonSet with secret creation: %v", err)
|
||||
}
|
||||
|
||||
// Creating DaemonSet with env var source as configmap
|
||||
_, err = testutil.CreateDaemonSet(clients.KubernetesClient, configmapWithEnvName, namespace, false)
|
||||
if err != nil {
|
||||
logrus.Errorf("Error in DaemonSet with configmap as env var source creation: %v", err)
|
||||
}
|
||||
|
||||
// Creating DaemonSet with env var source as secret
|
||||
_, err = testutil.CreateDaemonSet(clients.KubernetesClient, secretWithEnvName, namespace, false)
|
||||
if err != nil {
|
||||
logrus.Errorf("Error in DaemonSet with secret configmap as env var source creation: %v", err)
|
||||
}
|
||||
|
||||
// Creating StatefulSet with configmap
|
||||
_, err = testutil.CreateStatefulSet(clients.KubernetesClient, configmapName, namespace, true)
|
||||
if err != nil {
|
||||
logrus.Errorf("Error in StatefulSet with configmap creation: %v", err)
|
||||
}
|
||||
|
||||
// Creating StatefulSet with secret
|
||||
_, err = testutil.CreateStatefulSet(clients.KubernetesClient, secretName, namespace, true)
|
||||
if err != nil {
|
||||
logrus.Errorf("Error in StatefulSet with secret creation: %v", err)
|
||||
}
|
||||
|
||||
// Creating StatefulSet with env var source as configmap
|
||||
_, err = testutil.CreateStatefulSet(clients.KubernetesClient, configmapWithEnvName, namespace, false)
|
||||
if err != nil {
|
||||
logrus.Errorf("Error in StatefulSet with configmap configmap as env var source creation: %v", err)
|
||||
}
|
||||
|
||||
// Creating StatefulSet with env var source as secret
|
||||
_, err = testutil.CreateStatefulSet(clients.KubernetesClient, secretWithEnvName, namespace, false)
|
||||
if err != nil {
|
||||
logrus.Errorf("Error in StatefulSet with secret configmap as env var source creation: %v", err)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func teardown() {
|
||||
// Deleting Deployment with configmap
|
||||
deploymentError := testutil.DeleteDeployment(clients.KubernetesClient, namespace, configmapName)
|
||||
if deploymentError != nil {
|
||||
logrus.Errorf("Error while deleting deployment with configmap %v", deploymentError)
|
||||
}
|
||||
|
||||
// Deleting Deployment with secret
|
||||
deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, namespace, secretName)
|
||||
if deploymentError != nil {
|
||||
logrus.Errorf("Error while deleting deployment with secret %v", deploymentError)
|
||||
}
|
||||
|
||||
// Deleting Deployment with configmap as env var source
|
||||
deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, namespace, configmapWithEnvName)
|
||||
if deploymentError != nil {
|
||||
logrus.Errorf("Error while deleting deployment with configmap as env var source %v", deploymentError)
|
||||
}
|
||||
|
||||
// Deleting Deployment with secret
|
||||
deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, namespace, secretWithEnvName)
|
||||
if deploymentError != nil {
|
||||
logrus.Errorf("Error while deleting deployment with secret as env var source %v", deploymentError)
|
||||
}
|
||||
|
||||
// Deleting Deployment with configmap mounted in init container
|
||||
deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, namespace, configmapWithInitContainer)
|
||||
if deploymentError != nil {
|
||||
logrus.Errorf("Error while deleting deployment with configmap mounted in init container %v", deploymentError)
|
||||
}
|
||||
|
||||
// Deleting Deployment with secret mounted in init container
|
||||
deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, namespace, secretWithInitContainer)
|
||||
if deploymentError != nil {
|
||||
logrus.Errorf("Error while deleting deployment with secret mounted in init container %v", deploymentError)
|
||||
}
|
||||
|
||||
// Deleting Deployment with configmap mounted as env in init container
|
||||
deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, namespace, configmapWithInitEnv)
|
||||
if deploymentError != nil {
|
||||
logrus.Errorf("Error while deleting deployment with configmap mounted as env in init container %v", deploymentError)
|
||||
}
|
||||
|
||||
// Deleting Deployment with secret mounted as env in init container
|
||||
deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, namespace, secretWithInitEnv)
|
||||
if deploymentError != nil {
|
||||
logrus.Errorf("Error while deleting deployment with secret mounted as env in init container %v", deploymentError)
|
||||
}
|
||||
|
||||
// Deleting Deployment with configmap as envFrom source
|
||||
deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, namespace, configmapWithEnvFromName)
|
||||
if deploymentError != nil {
|
||||
logrus.Errorf("Error while deleting deployment with configmap as envFrom source %v", deploymentError)
|
||||
}
|
||||
|
||||
// Deleting Deployment with secret as envFrom source
|
||||
deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, namespace, secretWithEnvFromName)
|
||||
if deploymentError != nil {
|
||||
logrus.Errorf("Error while deleting deployment with secret as envFrom source %v", deploymentError)
|
||||
}
|
||||
|
||||
// Deleting DaemonSet with configmap
|
||||
daemonSetError := testutil.DeleteDaemonSet(clients.KubernetesClient, namespace, configmapName)
|
||||
if daemonSetError != nil {
|
||||
logrus.Errorf("Error while deleting daemonSet with configmap %v", daemonSetError)
|
||||
}
|
||||
|
||||
// Deleting Deployment with secret
|
||||
daemonSetError = testutil.DeleteDaemonSet(clients.KubernetesClient, namespace, secretName)
|
||||
if daemonSetError != nil {
|
||||
logrus.Errorf("Error while deleting daemonSet with secret %v", daemonSetError)
|
||||
}
|
||||
|
||||
// Deleting Deployment with configmap as env var source
|
||||
daemonSetError = testutil.DeleteDaemonSet(clients.KubernetesClient, namespace, configmapWithEnvName)
|
||||
if daemonSetError != nil {
|
||||
logrus.Errorf("Error while deleting daemonSet with configmap as env var source %v", daemonSetError)
|
||||
}
|
||||
|
||||
// Deleting Deployment with secret as env var source
|
||||
daemonSetError = testutil.DeleteDaemonSet(clients.KubernetesClient, namespace, secretWithEnvName)
|
||||
if daemonSetError != nil {
|
||||
logrus.Errorf("Error while deleting daemonSet with secret as env var source %v", daemonSetError)
|
||||
}
|
||||
|
||||
// Deleting StatefulSet with configmap
|
||||
statefulSetError := testutil.DeleteStatefulSet(clients.KubernetesClient, namespace, configmapName)
|
||||
if statefulSetError != nil {
|
||||
logrus.Errorf("Error while deleting statefulSet with configmap %v", statefulSetError)
|
||||
}
|
||||
|
||||
// Deleting Deployment with secret
|
||||
statefulSetError = testutil.DeleteStatefulSet(clients.KubernetesClient, namespace, secretName)
|
||||
if statefulSetError != nil {
|
||||
logrus.Errorf("Error while deleting statefulSet with secret %v", statefulSetError)
|
||||
}
|
||||
|
||||
// Deleting StatefulSet with configmap as env var source
|
||||
statefulSetError = testutil.DeleteStatefulSet(clients.KubernetesClient, namespace, configmapWithEnvName)
|
||||
if statefulSetError != nil {
|
||||
logrus.Errorf("Error while deleting statefulSet with configmap as env var source %v", statefulSetError)
|
||||
}
|
||||
|
||||
// Deleting Deployment with secret as env var source
|
||||
statefulSetError = testutil.DeleteStatefulSet(clients.KubernetesClient, namespace, secretWithEnvName)
|
||||
if statefulSetError != nil {
|
||||
logrus.Errorf("Error while deleting statefulSet with secret as env var source %v", statefulSetError)
|
||||
}
|
||||
|
||||
// Deleting Configmap
|
||||
err := testutil.DeleteConfigMap(clients.KubernetesClient, namespace, configmapName)
|
||||
if err != nil {
|
||||
logrus.Errorf("Error while deleting the configmap %v", err)
|
||||
}
|
||||
|
||||
// Deleting Secret
|
||||
err = testutil.DeleteSecret(clients.KubernetesClient, namespace, secretName)
|
||||
if err != nil {
|
||||
logrus.Errorf("Error while deleting the secret %v", err)
|
||||
}
|
||||
|
||||
// Deleting Configmap used as env var source
|
||||
err = testutil.DeleteConfigMap(clients.KubernetesClient, namespace, configmapWithEnvName)
|
||||
if err != nil {
|
||||
logrus.Errorf("Error while deleting the configmap used as env var source %v", err)
|
||||
}
|
||||
|
||||
// Deleting Secret used as env var source
|
||||
err = testutil.DeleteSecret(clients.KubernetesClient, namespace, secretWithEnvName)
|
||||
if err != nil {
|
||||
logrus.Errorf("Error while deleting the secret used as env var source %v", err)
|
||||
}
|
||||
|
||||
// Deleting Configmap used in init container
|
||||
err = testutil.DeleteConfigMap(clients.KubernetesClient, namespace, configmapWithInitContainer)
|
||||
if err != nil {
|
||||
logrus.Errorf("Error while deleting the configmap used in init container %v", err)
|
||||
}
|
||||
|
||||
// Deleting Secret used in init container
|
||||
err = testutil.DeleteSecret(clients.KubernetesClient, namespace, secretWithInitContainer)
|
||||
if err != nil {
|
||||
logrus.Errorf("Error while deleting the secret used in init container %v", err)
|
||||
}
|
||||
|
||||
// Deleting Configmap used as env var source
|
||||
err = testutil.DeleteConfigMap(clients.KubernetesClient, namespace, configmapWithEnvFromName)
|
||||
if err != nil {
|
||||
logrus.Errorf("Error while deleting the configmap used as env var source %v", err)
|
||||
}
|
||||
|
||||
// Deleting Secret used as env var source
|
||||
err = testutil.DeleteSecret(clients.KubernetesClient, namespace, secretWithEnvFromName)
|
||||
if err != nil {
|
||||
logrus.Errorf("Error while deleting the secret used as env var source %v", err)
|
||||
}
|
||||
|
||||
// Deleting Configmap used as env var source
|
||||
err = testutil.DeleteConfigMap(clients.KubernetesClient, namespace, configmapWithInitEnv)
|
||||
if err != nil {
|
||||
logrus.Errorf("Error while deleting the configmap used as env var source in init container %v", err)
|
||||
}
|
||||
|
||||
// Deleting Secret used as env var source
|
||||
err = testutil.DeleteSecret(clients.KubernetesClient, namespace, secretWithInitEnv)
|
||||
if err != nil {
|
||||
logrus.Errorf("Error while deleting the secret used as env var source in init container %v", err)
|
||||
}
|
||||
|
||||
// Deleting namespace
|
||||
testutil.DeleteNamespace(namespace, clients.KubernetesClient)
|
||||
|
||||
}
|
||||
|
||||
func getConfigWithAnnotations(resourceType string, name string, shaData string, annotation string) util.Config {
|
||||
return util.Config{
|
||||
Namespace: namespace,
|
||||
ResourceName: name,
|
||||
SHAValue: shaData,
|
||||
Annotation: annotation,
|
||||
Type: resourceType,
|
||||
}
|
||||
}
|
||||
|
||||
func TestRollingUpgradeForDeploymentWithConfigmap(t *testing.T) {
|
||||
shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapName, "www.stakater.com")
|
||||
config := getConfigWithAnnotations(constants.ConfigmapEnvVarPostfix, configmapName, shaData, options.ConfigmapUpdateOnChangeAnnotation)
|
||||
deploymentFuncs := GetDeploymentRollingUpgradeFuncs()
|
||||
|
||||
err := PerformRollingUpgrade(clients, config, deploymentFuncs)
|
||||
time.Sleep(5 * time.Second)
|
||||
if err != nil {
|
||||
t.Errorf("Rolling upgrade failed for Deployment with Configmap")
|
||||
}
|
||||
|
||||
logrus.Infof("Verifying deployment update")
|
||||
updated := testutil.VerifyResourceUpdate(clients, config, constants.ConfigmapEnvVarPostfix, deploymentFuncs)
|
||||
if !updated {
|
||||
t.Errorf("Deployment was not updated")
|
||||
}
|
||||
}
|
||||
|
||||
func TestRollingUpgradeForDeploymentWithConfigmapInInitContainer(t *testing.T) {
|
||||
shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapWithInitContainer, "www.stakater.com")
|
||||
config := getConfigWithAnnotations(constants.ConfigmapEnvVarPostfix, configmapWithInitContainer, shaData, options.ConfigmapUpdateOnChangeAnnotation)
|
||||
deploymentFuncs := GetDeploymentRollingUpgradeFuncs()
|
||||
|
||||
err := PerformRollingUpgrade(clients, config, deploymentFuncs)
|
||||
time.Sleep(5 * time.Second)
|
||||
if err != nil {
|
||||
t.Errorf("Rolling upgrade failed for Deployment with Configmap")
|
||||
}
|
||||
|
||||
logrus.Infof("Verifying deployment update")
|
||||
updated := testutil.VerifyResourceUpdate(clients, config, constants.ConfigmapEnvVarPostfix, deploymentFuncs)
|
||||
if !updated {
|
||||
t.Errorf("Deployment was not updated")
|
||||
}
|
||||
}
|
||||
|
||||
func TestRollingUpgradeForDeploymentWithConfigmapAsEnvVar(t *testing.T) {
|
||||
shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapWithEnvName, "www.stakater.com")
|
||||
config := getConfigWithAnnotations(constants.ConfigmapEnvVarPostfix, configmapWithEnvName, shaData, options.ReloaderAutoAnnotation)
|
||||
deploymentFuncs := GetDeploymentRollingUpgradeFuncs()
|
||||
|
||||
err := PerformRollingUpgrade(clients, config, deploymentFuncs)
|
||||
time.Sleep(5 * time.Second)
|
||||
if err != nil {
|
||||
t.Errorf("Rolling upgrade failed for Deployment with Configmap used as env var")
|
||||
}
|
||||
|
||||
logrus.Infof("Verifying deployment update")
|
||||
updated := testutil.VerifyResourceUpdate(clients, config, constants.ConfigmapEnvVarPostfix, deploymentFuncs)
|
||||
if !updated {
|
||||
t.Errorf("Deployment was not updated")
|
||||
}
|
||||
}
|
||||
|
||||
func TestRollingUpgradeForDeploymentWithConfigmapAsEnvVarInInitContainer(t *testing.T) {
|
||||
shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapWithInitEnv, "www.stakater.com")
|
||||
config := getConfigWithAnnotations(constants.ConfigmapEnvVarPostfix, configmapWithInitEnv, shaData, options.ReloaderAutoAnnotation)
|
||||
deploymentFuncs := GetDeploymentRollingUpgradeFuncs()
|
||||
|
||||
err := PerformRollingUpgrade(clients, config, deploymentFuncs)
|
||||
time.Sleep(5 * time.Second)
|
||||
if err != nil {
|
||||
t.Errorf("Rolling upgrade failed for Deployment with Configmap used as env var")
|
||||
}
|
||||
|
||||
logrus.Infof("Verifying deployment update")
|
||||
updated := testutil.VerifyResourceUpdate(clients, config, constants.ConfigmapEnvVarPostfix, deploymentFuncs)
|
||||
if !updated {
|
||||
t.Errorf("Deployment was not updated")
|
||||
}
|
||||
}
|
||||
|
||||
func TestRollingUpgradeForDeploymentWithConfigmapAsEnvVarFrom(t *testing.T) {
|
||||
shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapWithEnvFromName, "www.stakater.com")
|
||||
config := getConfigWithAnnotations(constants.ConfigmapEnvVarPostfix, configmapWithEnvFromName, shaData, options.ReloaderAutoAnnotation)
|
||||
deploymentFuncs := GetDeploymentRollingUpgradeFuncs()
|
||||
|
||||
err := PerformRollingUpgrade(clients, config, deploymentFuncs)
|
||||
time.Sleep(5 * time.Second)
|
||||
if err != nil {
|
||||
t.Errorf("Rolling upgrade failed for Deployment with Configmap used as env var")
|
||||
}
|
||||
|
||||
logrus.Infof("Verifying deployment update")
|
||||
updated := testutil.VerifyResourceUpdate(clients, config, constants.ConfigmapEnvVarPostfix, deploymentFuncs)
|
||||
if !updated {
|
||||
t.Errorf("Deployment was not updated")
|
||||
}
|
||||
}
|
||||
|
||||
func TestRollingUpgradeForDeploymentWithSecret(t *testing.T) {
|
||||
shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, namespace, secretName, "dGVzdFVwZGF0ZWRTZWNyZXRFbmNvZGluZ0ZvclJlbG9hZGVy")
|
||||
config := getConfigWithAnnotations(constants.SecretEnvVarPostfix, secretName, shaData, options.SecretUpdateOnChangeAnnotation)
|
||||
deploymentFuncs := GetDeploymentRollingUpgradeFuncs()
|
||||
|
||||
err := PerformRollingUpgrade(clients, config, deploymentFuncs)
|
||||
time.Sleep(5 * time.Second)
|
||||
if err != nil {
|
||||
t.Errorf("Rolling upgrade failed for Deployment with Secret")
|
||||
}
|
||||
|
||||
logrus.Infof("Verifying deployment update")
|
||||
updated := testutil.VerifyResourceUpdate(clients, config, constants.SecretEnvVarPostfix, deploymentFuncs)
|
||||
if !updated {
|
||||
t.Errorf("Deployment was not updated")
|
||||
}
|
||||
}
|
||||
|
||||
func TestRollingUpgradeForDeploymentWithSecretinInitContainer(t *testing.T) {
|
||||
shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, namespace, secretWithInitContainer, "dGVzdFVwZGF0ZWRTZWNyZXRFbmNvZGluZ0ZvclJlbG9hZGVy")
|
||||
config := getConfigWithAnnotations(constants.SecretEnvVarPostfix, secretWithInitContainer, shaData, options.SecretUpdateOnChangeAnnotation)
|
||||
deploymentFuncs := GetDeploymentRollingUpgradeFuncs()
|
||||
|
||||
err := PerformRollingUpgrade(clients, config, deploymentFuncs)
|
||||
time.Sleep(5 * time.Second)
|
||||
if err != nil {
|
||||
t.Errorf("Rolling upgrade failed for Deployment with Secret")
|
||||
}
|
||||
|
||||
logrus.Infof("Verifying deployment update")
|
||||
updated := testutil.VerifyResourceUpdate(clients, config, constants.SecretEnvVarPostfix, deploymentFuncs)
|
||||
if !updated {
|
||||
t.Errorf("Deployment was not updated")
|
||||
}
|
||||
}
|
||||
|
||||
func TestRollingUpgradeForDeploymentWithSecretAsEnvVar(t *testing.T) {
|
||||
shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, namespace, secretWithEnvName, "dGVzdFVwZGF0ZWRTZWNyZXRFbmNvZGluZ0ZvclJlbG9hZGVy")
|
||||
config := getConfigWithAnnotations(constants.SecretEnvVarPostfix, secretWithEnvName, shaData, options.ReloaderAutoAnnotation)
|
||||
deploymentFuncs := GetDeploymentRollingUpgradeFuncs()
|
||||
|
||||
err := PerformRollingUpgrade(clients, config, deploymentFuncs)
|
||||
time.Sleep(5 * time.Second)
|
||||
if err != nil {
|
||||
t.Errorf("Rolling upgrade failed for Deployment with Secret")
|
||||
}
|
||||
|
||||
logrus.Infof("Verifying deployment update")
|
||||
updated := testutil.VerifyResourceUpdate(clients, config, constants.SecretEnvVarPostfix, deploymentFuncs)
|
||||
if !updated {
|
||||
t.Errorf("Deployment was not updated")
|
||||
}
|
||||
}
|
||||
|
||||
func TestRollingUpgradeForDeploymentWithSecretAsEnvVarFrom(t *testing.T) {
|
||||
shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, namespace, secretWithEnvFromName, "dGVzdFVwZGF0ZWRTZWNyZXRFbmNvZGluZ0ZvclJlbG9hZGVy")
|
||||
config := getConfigWithAnnotations(constants.SecretEnvVarPostfix, secretWithEnvFromName, shaData, options.ReloaderAutoAnnotation)
|
||||
deploymentFuncs := GetDeploymentRollingUpgradeFuncs()
|
||||
|
||||
err := PerformRollingUpgrade(clients, config, deploymentFuncs)
|
||||
time.Sleep(5 * time.Second)
|
||||
if err != nil {
|
||||
t.Errorf("Rolling upgrade failed for Deployment with Secret")
|
||||
}
|
||||
|
||||
logrus.Infof("Verifying deployment update")
|
||||
updated := testutil.VerifyResourceUpdate(clients, config, constants.SecretEnvVarPostfix, deploymentFuncs)
|
||||
if !updated {
|
||||
t.Errorf("Deployment was not updated")
|
||||
}
|
||||
}
|
||||
|
||||
func TestRollingUpgradeForDeploymentWithSecretAsEnvVarInInitContainer(t *testing.T) {
|
||||
shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, namespace, secretWithInitEnv, "dGVzdFVwZGF0ZWRTZWNyZXRFbmNvZGluZ0ZvclJlbG9hZGVy")
|
||||
config := getConfigWithAnnotations(constants.SecretEnvVarPostfix, secretWithInitEnv, shaData, options.ReloaderAutoAnnotation)
|
||||
deploymentFuncs := GetDeploymentRollingUpgradeFuncs()
|
||||
|
||||
err := PerformRollingUpgrade(clients, config, deploymentFuncs)
|
||||
time.Sleep(5 * time.Second)
|
||||
if err != nil {
|
||||
t.Errorf("Rolling upgrade failed for Deployment with Secret")
|
||||
}
|
||||
|
||||
logrus.Infof("Verifying deployment update")
|
||||
updated := testutil.VerifyResourceUpdate(clients, config, constants.SecretEnvVarPostfix, deploymentFuncs)
|
||||
if !updated {
|
||||
t.Errorf("Deployment was not updated")
|
||||
}
|
||||
}
|
||||
|
||||
func TestRollingUpgradeForDaemonSetWithConfigmap(t *testing.T) {
|
||||
shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapName, "www.facebook.com")
|
||||
config := getConfigWithAnnotations(constants.ConfigmapEnvVarPostfix, configmapName, shaData, options.ConfigmapUpdateOnChangeAnnotation)
|
||||
daemonSetFuncs := GetDaemonSetRollingUpgradeFuncs()
|
||||
|
||||
err := PerformRollingUpgrade(clients, config, daemonSetFuncs)
|
||||
time.Sleep(5 * time.Second)
|
||||
if err != nil {
|
||||
t.Errorf("Rolling upgrade failed for DaemonSet with configmap")
|
||||
}
|
||||
|
||||
logrus.Infof("Verifying daemonSet update")
|
||||
updated := testutil.VerifyResourceUpdate(clients, config, constants.ConfigmapEnvVarPostfix, daemonSetFuncs)
|
||||
if !updated {
|
||||
t.Errorf("DaemonSet was not updated")
|
||||
}
|
||||
}
|
||||
|
||||
func TestRollingUpgradeForDaemonSetWithConfigmapAsEnvVar(t *testing.T) {
|
||||
shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapWithEnvName, "www.facebook.com")
|
||||
config := getConfigWithAnnotations(constants.ConfigmapEnvVarPostfix, configmapWithEnvName, shaData, options.ReloaderAutoAnnotation)
|
||||
daemonSetFuncs := GetDaemonSetRollingUpgradeFuncs()
|
||||
|
||||
err := PerformRollingUpgrade(clients, config, daemonSetFuncs)
|
||||
time.Sleep(5 * time.Second)
|
||||
if err != nil {
|
||||
t.Errorf("Rolling upgrade failed for DaemonSet with configmap used as env var")
|
||||
}
|
||||
|
||||
logrus.Infof("Verifying daemonSet update")
|
||||
updated := testutil.VerifyResourceUpdate(clients, config, constants.ConfigmapEnvVarPostfix, daemonSetFuncs)
|
||||
if !updated {
|
||||
t.Errorf("DaemonSet was not updated")
|
||||
}
|
||||
}
|
||||
|
||||
func TestRollingUpgradeForDaemonSetWithSecret(t *testing.T) {
|
||||
shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, namespace, secretName, "d3d3LmZhY2Vib29rLmNvbQ==")
|
||||
config := getConfigWithAnnotations(constants.SecretEnvVarPostfix, secretName, shaData, options.SecretUpdateOnChangeAnnotation)
|
||||
daemonSetFuncs := GetDaemonSetRollingUpgradeFuncs()
|
||||
|
||||
err := PerformRollingUpgrade(clients, config, daemonSetFuncs)
|
||||
time.Sleep(5 * time.Second)
|
||||
if err != nil {
|
||||
t.Errorf("Rolling upgrade failed for DaemonSet with secret")
|
||||
}
|
||||
|
||||
logrus.Infof("Verifying daemonSet update")
|
||||
updated := testutil.VerifyResourceUpdate(clients, config, constants.SecretEnvVarPostfix, daemonSetFuncs)
|
||||
if !updated {
|
||||
t.Errorf("DaemonSet was not updated")
|
||||
}
|
||||
}
|
||||
|
||||
func TestRollingUpgradeForStatefulSetWithConfigmap(t *testing.T) {
|
||||
shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapName, "www.twitter.com")
|
||||
config := getConfigWithAnnotations(constants.ConfigmapEnvVarPostfix, configmapName, shaData, options.ConfigmapUpdateOnChangeAnnotation)
|
||||
statefulSetFuncs := GetStatefulSetRollingUpgradeFuncs()
|
||||
|
||||
err := PerformRollingUpgrade(clients, config, statefulSetFuncs)
|
||||
time.Sleep(5 * time.Second)
|
||||
if err != nil {
|
||||
t.Errorf("Rolling upgrade failed for StatefulSet with configmap")
|
||||
}
|
||||
|
||||
logrus.Infof("Verifying statefulSet update")
|
||||
updated := testutil.VerifyResourceUpdate(clients, config, constants.ConfigmapEnvVarPostfix, statefulSetFuncs)
|
||||
if !updated {
|
||||
t.Errorf("StatefulSet was not updated")
|
||||
}
|
||||
}
|
||||
|
||||
func TestRollingUpgradeForStatefulSetWithSecret(t *testing.T) {
|
||||
shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, namespace, secretName, "d3d3LnR3aXR0ZXIuY29t")
|
||||
config := getConfigWithAnnotations(constants.SecretEnvVarPostfix, secretName, shaData, options.SecretUpdateOnChangeAnnotation)
|
||||
statefulSetFuncs := GetStatefulSetRollingUpgradeFuncs()
|
||||
|
||||
err := PerformRollingUpgrade(clients, config, statefulSetFuncs)
|
||||
time.Sleep(5 * time.Second)
|
||||
if err != nil {
|
||||
t.Errorf("Rolling upgrade failed for StatefulSet with secret")
|
||||
}
|
||||
|
||||
logrus.Infof("Verifying statefulSet update")
|
||||
updated := testutil.VerifyResourceUpdate(clients, config, constants.SecretEnvVarPostfix, statefulSetFuncs)
|
||||
if !updated {
|
||||
t.Errorf("StatefulSet was not updated")
|
||||
}
|
||||
}
|
||||
@@ -1,8 +1,10 @@
|
||||
package constants
|
||||
package options
|
||||
|
||||
const (
|
||||
var (
|
||||
// ConfigmapUpdateOnChangeAnnotation is an annotation to detect changes in configmaps
|
||||
ConfigmapUpdateOnChangeAnnotation = "configmap.reloader.stakater.com/reload"
|
||||
// SecretUpdateOnChangeAnnotation is an annotation to detect changes in secrets
|
||||
SecretUpdateOnChangeAnnotation = "secret.reloader.stakater.com/reload"
|
||||
// ReloaderAutoAnnotation is an annotation to detect changes in secrets
|
||||
ReloaderAutoAnnotation = "reloader.stakater.com/auto"
|
||||
)
|
||||
@@ -3,17 +3,21 @@ package testutil
|
||||
import (
|
||||
"math/rand"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
openshiftv1 "github.com/openshift/api/apps/v1"
|
||||
appsclient "github.com/openshift/client-go/apps/clientset/versioned"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/stakater/Reloader/internal/pkg/callbacks"
|
||||
"github.com/stakater/Reloader/internal/pkg/constants"
|
||||
"github.com/stakater/Reloader/internal/pkg/crypto"
|
||||
"github.com/stakater/Reloader/internal/pkg/options"
|
||||
"github.com/stakater/Reloader/internal/pkg/util"
|
||||
"github.com/stakater/Reloader/pkg/kube"
|
||||
v1_beta1 "k8s.io/api/apps/v1beta1"
|
||||
"k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/api/extensions/v1beta1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
@@ -28,19 +32,11 @@ var (
|
||||
SecretResourceType = "secrets"
|
||||
)
|
||||
|
||||
func GetClient() *kubernetes.Clientset {
|
||||
newClient, err := kube.GetClient()
|
||||
if err != nil {
|
||||
logrus.Fatalf("Unable to create Kubernetes client error = %v", err)
|
||||
}
|
||||
return newClient
|
||||
}
|
||||
|
||||
// CreateNamespace creates namespace for testing
|
||||
func CreateNamespace(namespace string, client kubernetes.Interface) {
|
||||
_, err := client.CoreV1().Namespaces().Create(&v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}})
|
||||
if err != nil {
|
||||
logrus.Fatalf("Failed to create namespace for testing", err)
|
||||
logrus.Fatalf("Failed to create namespace for testing %v", err)
|
||||
} else {
|
||||
logrus.Infof("Creating namespace for testing = %s", namespace)
|
||||
}
|
||||
@@ -50,48 +46,331 @@ func CreateNamespace(namespace string, client kubernetes.Interface) {
|
||||
func DeleteNamespace(namespace string, client kubernetes.Interface) {
|
||||
err := client.CoreV1().Namespaces().Delete(namespace, &metav1.DeleteOptions{})
|
||||
if err != nil {
|
||||
logrus.Fatalf("Failed to delete namespace that was created for testing", err)
|
||||
logrus.Fatalf("Failed to delete namespace that was created for testing %v", err)
|
||||
} else {
|
||||
logrus.Infof("Deleting namespace for testing = %s", namespace)
|
||||
}
|
||||
}
|
||||
|
||||
func getObjectMeta(namespace string, name string, autoReload bool) metav1.ObjectMeta {
|
||||
return metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Namespace: namespace,
|
||||
Labels: map[string]string{"firstLabel": "temp"},
|
||||
Annotations: getAnnotations(name, autoReload),
|
||||
}
|
||||
}
|
||||
|
||||
func getAnnotations(name string, autoReload bool) map[string]string {
|
||||
if autoReload {
|
||||
return map[string]string{
|
||||
options.ReloaderAutoAnnotation: "true"}
|
||||
}
|
||||
|
||||
return map[string]string{
|
||||
options.ConfigmapUpdateOnChangeAnnotation: name,
|
||||
options.SecretUpdateOnChangeAnnotation: name}
|
||||
}
|
||||
|
||||
func getEnvVarSources(name string) []v1.EnvFromSource {
|
||||
return []v1.EnvFromSource{
|
||||
{
|
||||
ConfigMapRef: &v1.ConfigMapEnvSource{
|
||||
LocalObjectReference: v1.LocalObjectReference{
|
||||
Name: name,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
SecretRef: &v1.SecretEnvSource{
|
||||
LocalObjectReference: v1.LocalObjectReference{
|
||||
Name: name,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func getVolumes(name string) []v1.Volume {
|
||||
return []v1.Volume{
|
||||
{
|
||||
Name: "configmap",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
ConfigMap: &v1.ConfigMapVolumeSource{
|
||||
LocalObjectReference: v1.LocalObjectReference{
|
||||
Name: name,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "secret",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
Secret: &v1.SecretVolumeSource{
|
||||
SecretName: name,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func getVolumeMounts(name string) []v1.VolumeMount {
|
||||
return []v1.VolumeMount{
|
||||
{
|
||||
MountPath: "etc/config",
|
||||
Name: "configmap",
|
||||
},
|
||||
{
|
||||
MountPath: "etc/sec",
|
||||
Name: "secret",
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func getPodTemplateSpecWithEnvVars(name string) v1.PodTemplateSpec {
|
||||
return v1.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: map[string]string{"secondLabel": "temp"},
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Image: "tutum/hello-world",
|
||||
Name: name,
|
||||
Env: []v1.EnvVar{
|
||||
{
|
||||
Name: "BUCKET_NAME",
|
||||
Value: "test",
|
||||
},
|
||||
{
|
||||
Name: "CONFIGMAP_" + util.ConvertToEnvVarName(name),
|
||||
ValueFrom: &v1.EnvVarSource{
|
||||
ConfigMapKeyRef: &v1.ConfigMapKeySelector{
|
||||
LocalObjectReference: v1.LocalObjectReference{
|
||||
Name: name,
|
||||
},
|
||||
Key: "test.url",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "SECRET_" + util.ConvertToEnvVarName(name),
|
||||
ValueFrom: &v1.EnvVarSource{
|
||||
SecretKeyRef: &v1.SecretKeySelector{
|
||||
LocalObjectReference: v1.LocalObjectReference{
|
||||
Name: name,
|
||||
},
|
||||
Key: "test.url",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func getPodTemplateSpecWithEnvVarSources(name string) v1.PodTemplateSpec {
|
||||
return v1.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: map[string]string{"secondLabel": "temp"},
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Image: "tutum/hello-world",
|
||||
Name: name,
|
||||
EnvFrom: getEnvVarSources(name),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func getPodTemplateSpecWithVolumes(name string) v1.PodTemplateSpec {
|
||||
return v1.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: map[string]string{"secondLabel": "temp"},
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Image: "tutum/hello-world",
|
||||
Name: name,
|
||||
Env: []v1.EnvVar{
|
||||
{
|
||||
Name: "BUCKET_NAME",
|
||||
Value: "test",
|
||||
},
|
||||
},
|
||||
VolumeMounts: getVolumeMounts(name),
|
||||
},
|
||||
},
|
||||
Volumes: getVolumes(name),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func getPodTemplateSpecWithInitContainer(name string) v1.PodTemplateSpec {
|
||||
return v1.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: map[string]string{"secondLabel": "temp"},
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
InitContainers: []v1.Container{
|
||||
{
|
||||
Image: "busybox",
|
||||
Name: "busyBox",
|
||||
VolumeMounts: getVolumeMounts(name),
|
||||
},
|
||||
},
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Image: "tutum/hello-world",
|
||||
Name: name,
|
||||
Env: []v1.EnvVar{
|
||||
{
|
||||
Name: "BUCKET_NAME",
|
||||
Value: "test",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Volumes: getVolumes(name),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func getPodTemplateSpecWithInitContainerAndEnv(name string) v1.PodTemplateSpec {
|
||||
return v1.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: map[string]string{"secondLabel": "temp"},
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
InitContainers: []v1.Container{
|
||||
{
|
||||
Image: "busybox",
|
||||
Name: "busyBox",
|
||||
EnvFrom: getEnvVarSources(name),
|
||||
},
|
||||
},
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Image: "tutum/hello-world",
|
||||
Name: name,
|
||||
Env: []v1.EnvVar{
|
||||
{
|
||||
Name: "BUCKET_NAME",
|
||||
Value: "test",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// GetDeployment provides deployment for testing
|
||||
func GetDeployment(namespace string, deploymentName string) *v1beta1.Deployment {
|
||||
replicaset := int32(1)
|
||||
return &v1beta1.Deployment{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: deploymentName,
|
||||
Namespace: namespace,
|
||||
Labels: map[string]string{"firstLabel": "temp"},
|
||||
Annotations: map[string]string{
|
||||
constants.ConfigmapUpdateOnChangeAnnotation: deploymentName,
|
||||
constants.SecretUpdateOnChangeAnnotation: deploymentName},
|
||||
},
|
||||
ObjectMeta: getObjectMeta(namespace, deploymentName, false),
|
||||
Spec: v1beta1.DeploymentSpec{
|
||||
Replicas: &replicaset,
|
||||
Strategy: v1beta1.DeploymentStrategy{
|
||||
Type: v1beta1.RollingUpdateDeploymentStrategyType,
|
||||
},
|
||||
Template: v1.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: map[string]string{"secondLabel": "temp"},
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Image: "tutum/hello-world",
|
||||
Name: deploymentName,
|
||||
Env: []v1.EnvVar{
|
||||
{
|
||||
Name: "BUCKET_NAME",
|
||||
Value: "test",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Template: getPodTemplateSpecWithVolumes(deploymentName),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// GetDeploymentConfig provides deployment for testing
|
||||
func GetDeploymentConfig(namespace string, deploymentConfigName string) *openshiftv1.DeploymentConfig {
|
||||
replicaset := int32(1)
|
||||
podTemplateSpecWithVolume := getPodTemplateSpecWithVolumes(deploymentConfigName)
|
||||
return &openshiftv1.DeploymentConfig{
|
||||
ObjectMeta: getObjectMeta(namespace, deploymentConfigName, false),
|
||||
Spec: openshiftv1.DeploymentConfigSpec{
|
||||
Replicas: replicaset,
|
||||
Strategy: openshiftv1.DeploymentStrategy{
|
||||
Type: openshiftv1.DeploymentStrategyTypeRolling,
|
||||
},
|
||||
Template: &podTemplateSpecWithVolume,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// GetDeploymentWithInitContainer provides deployment with init container and volumeMounts
|
||||
func GetDeploymentWithInitContainer(namespace string, deploymentName string) *v1beta1.Deployment {
|
||||
replicaset := int32(1)
|
||||
return &v1beta1.Deployment{
|
||||
ObjectMeta: getObjectMeta(namespace, deploymentName, false),
|
||||
Spec: v1beta1.DeploymentSpec{
|
||||
Replicas: &replicaset,
|
||||
Strategy: v1beta1.DeploymentStrategy{
|
||||
Type: v1beta1.RollingUpdateDeploymentStrategyType,
|
||||
},
|
||||
Template: getPodTemplateSpecWithInitContainer(deploymentName),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// GetDeploymentWithInitContainerAndEnv provides deployment with init container and EnvSource
|
||||
func GetDeploymentWithInitContainerAndEnv(namespace string, deploymentName string) *v1beta1.Deployment {
|
||||
replicaset := int32(1)
|
||||
return &v1beta1.Deployment{
|
||||
ObjectMeta: getObjectMeta(namespace, deploymentName, true),
|
||||
Spec: v1beta1.DeploymentSpec{
|
||||
Replicas: &replicaset,
|
||||
Strategy: v1beta1.DeploymentStrategy{
|
||||
Type: v1beta1.RollingUpdateDeploymentStrategyType,
|
||||
},
|
||||
Template: getPodTemplateSpecWithInitContainerAndEnv(deploymentName),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func GetDeploymentWithEnvVars(namespace string, deploymentName string) *v1beta1.Deployment {
|
||||
replicaset := int32(1)
|
||||
return &v1beta1.Deployment{
|
||||
ObjectMeta: getObjectMeta(namespace, deploymentName, true),
|
||||
Spec: v1beta1.DeploymentSpec{
|
||||
Replicas: &replicaset,
|
||||
Strategy: v1beta1.DeploymentStrategy{
|
||||
Type: v1beta1.RollingUpdateDeploymentStrategyType,
|
||||
},
|
||||
Template: getPodTemplateSpecWithEnvVars(deploymentName),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func GetDeploymentConfigWithEnvVars(namespace string, deploymentConfigName string) *openshiftv1.DeploymentConfig {
|
||||
replicaset := int32(1)
|
||||
podTemplateSpecWithEnvVars := getPodTemplateSpecWithEnvVars(deploymentConfigName)
|
||||
return &openshiftv1.DeploymentConfig{
|
||||
ObjectMeta: getObjectMeta(namespace, deploymentConfigName, false),
|
||||
Spec: openshiftv1.DeploymentConfigSpec{
|
||||
Replicas: replicaset,
|
||||
Strategy: openshiftv1.DeploymentStrategy{
|
||||
Type: openshiftv1.DeploymentStrategyTypeRolling,
|
||||
},
|
||||
Template: &podTemplateSpecWithEnvVars,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func GetDeploymentWithEnvVarSources(namespace string, deploymentName string) *v1beta1.Deployment {
|
||||
replicaset := int32(1)
|
||||
return &v1beta1.Deployment{
|
||||
ObjectMeta: getObjectMeta(namespace, deploymentName, true),
|
||||
Spec: v1beta1.DeploymentSpec{
|
||||
Replicas: &replicaset,
|
||||
Strategy: v1beta1.DeploymentStrategy{
|
||||
Type: v1beta1.RollingUpdateDeploymentStrategyType,
|
||||
},
|
||||
Template: getPodTemplateSpecWithEnvVarSources(deploymentName),
|
||||
},
|
||||
}
|
||||
}
|
||||
@@ -99,37 +378,24 @@ func GetDeployment(namespace string, deploymentName string) *v1beta1.Deployment
|
||||
// GetDaemonSet provides daemonset for testing
|
||||
func GetDaemonSet(namespace string, daemonsetName string) *v1beta1.DaemonSet {
|
||||
return &v1beta1.DaemonSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: daemonsetName,
|
||||
Namespace: namespace,
|
||||
Labels: map[string]string{"firstLabel": "temp"},
|
||||
Annotations: map[string]string{
|
||||
constants.ConfigmapUpdateOnChangeAnnotation: daemonsetName,
|
||||
constants.SecretUpdateOnChangeAnnotation: daemonsetName},
|
||||
},
|
||||
ObjectMeta: getObjectMeta(namespace, daemonsetName, false),
|
||||
Spec: v1beta1.DaemonSetSpec{
|
||||
UpdateStrategy: v1beta1.DaemonSetUpdateStrategy{
|
||||
Type: v1beta1.RollingUpdateDaemonSetStrategyType,
|
||||
},
|
||||
Template: v1.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: map[string]string{"secondLabel": "temp"},
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Image: "tutum/hello-world",
|
||||
Name: daemonsetName,
|
||||
Env: []v1.EnvVar{
|
||||
{
|
||||
Name: "BUCKET_NAME",
|
||||
Value: "test",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Template: getPodTemplateSpecWithVolumes(daemonsetName),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func GetDaemonSetWithEnvVars(namespace string, daemonSetName string) *v1beta1.DaemonSet {
|
||||
return &v1beta1.DaemonSet{
|
||||
ObjectMeta: getObjectMeta(namespace, daemonSetName, true),
|
||||
Spec: v1beta1.DaemonSetSpec{
|
||||
UpdateStrategy: v1beta1.DaemonSetUpdateStrategy{
|
||||
Type: v1beta1.RollingUpdateDaemonSetStrategyType,
|
||||
},
|
||||
Template: getPodTemplateSpecWithEnvVars(daemonSetName),
|
||||
},
|
||||
}
|
||||
}
|
||||
@@ -137,37 +403,25 @@ func GetDaemonSet(namespace string, daemonsetName string) *v1beta1.DaemonSet {
|
||||
// GetStatefulSet provides statefulset for testing
|
||||
func GetStatefulSet(namespace string, statefulsetName string) *v1_beta1.StatefulSet {
|
||||
return &v1_beta1.StatefulSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: statefulsetName,
|
||||
Namespace: namespace,
|
||||
Labels: map[string]string{"firstLabel": "temp"},
|
||||
Annotations: map[string]string{
|
||||
constants.ConfigmapUpdateOnChangeAnnotation: statefulsetName,
|
||||
constants.SecretUpdateOnChangeAnnotation: statefulsetName},
|
||||
},
|
||||
ObjectMeta: getObjectMeta(namespace, statefulsetName, false),
|
||||
Spec: v1_beta1.StatefulSetSpec{
|
||||
UpdateStrategy: v1_beta1.StatefulSetUpdateStrategy{
|
||||
Type: v1_beta1.RollingUpdateStatefulSetStrategyType,
|
||||
},
|
||||
Template: v1.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: map[string]string{"secondLabel": "temp"},
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Image: "tutum/hello-world",
|
||||
Name: statefulsetName,
|
||||
Env: []v1.EnvVar{
|
||||
{
|
||||
Name: "BUCKET_NAME",
|
||||
Value: "test",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Template: getPodTemplateSpecWithVolumes(statefulsetName),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// GetStatefulSet provides statefulset for testing
|
||||
func GetStatefulSetWithEnvVar(namespace string, statefulsetName string) *v1_beta1.StatefulSet {
|
||||
return &v1_beta1.StatefulSet{
|
||||
ObjectMeta: getObjectMeta(namespace, statefulsetName, true),
|
||||
Spec: v1_beta1.StatefulSetSpec{
|
||||
UpdateStrategy: v1_beta1.StatefulSetUpdateStrategy{
|
||||
Type: v1_beta1.RollingUpdateStatefulSetStrategyType,
|
||||
},
|
||||
Template: getPodTemplateSpecWithEnvVars(statefulsetName),
|
||||
},
|
||||
}
|
||||
}
|
||||
@@ -256,7 +510,7 @@ func CreateConfigMap(client kubernetes.Interface, namespace string, configmapNam
|
||||
logrus.Infof("Creating configmap")
|
||||
configmapClient := client.CoreV1().ConfigMaps(namespace)
|
||||
_, err := configmapClient.Create(GetConfigmap(namespace, configmapName, data))
|
||||
time.Sleep(10 * time.Second)
|
||||
time.Sleep(3 * time.Second)
|
||||
return configmapClient, err
|
||||
}
|
||||
|
||||
@@ -265,34 +519,92 @@ func CreateSecret(client kubernetes.Interface, namespace string, secretName stri
|
||||
logrus.Infof("Creating secret")
|
||||
secretClient := client.CoreV1().Secrets(namespace)
|
||||
_, err := secretClient.Create(GetSecret(namespace, secretName, data))
|
||||
time.Sleep(10 * time.Second)
|
||||
time.Sleep(3 * time.Second)
|
||||
return secretClient, err
|
||||
}
|
||||
|
||||
// CreateDeployment creates a deployment in given namespace and returns the Deployment
|
||||
func CreateDeployment(client kubernetes.Interface, deploymentName string, namespace string) (*v1beta1.Deployment, error) {
|
||||
func CreateDeployment(client kubernetes.Interface, deploymentName string, namespace string, volumeMount bool) (*v1beta1.Deployment, error) {
|
||||
logrus.Infof("Creating Deployment")
|
||||
deploymentClient := client.ExtensionsV1beta1().Deployments(namespace)
|
||||
deployment, err := deploymentClient.Create(GetDeployment(namespace, deploymentName))
|
||||
time.Sleep(10 * time.Second)
|
||||
var deploymentObj *v1beta1.Deployment
|
||||
if volumeMount {
|
||||
deploymentObj = GetDeployment(namespace, deploymentName)
|
||||
} else {
|
||||
deploymentObj = GetDeploymentWithEnvVars(namespace, deploymentName)
|
||||
}
|
||||
deployment, err := deploymentClient.Create(deploymentObj)
|
||||
time.Sleep(3 * time.Second)
|
||||
return deployment, err
|
||||
}
|
||||
|
||||
// CreateDeploymentConfig creates a deploymentConfig in given namespace and returns the DeploymentConfig
|
||||
func CreateDeploymentConfig(client appsclient.Interface, deploymentName string, namespace string, volumeMount bool) (*openshiftv1.DeploymentConfig, error) {
|
||||
logrus.Infof("Creating DeploymentConfig")
|
||||
deploymentConfigsClient := client.AppsV1().DeploymentConfigs(namespace)
|
||||
var deploymentConfigObj *openshiftv1.DeploymentConfig
|
||||
if volumeMount {
|
||||
deploymentConfigObj = GetDeploymentConfig(namespace, deploymentName)
|
||||
} else {
|
||||
deploymentConfigObj = GetDeploymentConfigWithEnvVars(namespace, deploymentName)
|
||||
}
|
||||
deploymentConfig, err := deploymentConfigsClient.Create(deploymentConfigObj)
|
||||
time.Sleep(5 * time.Second)
|
||||
return deploymentConfig, err
|
||||
}
|
||||
|
||||
// CreateDeploymentWithInitContainer creates a deployment in given namespace with init container and returns the Deployment
|
||||
func CreateDeploymentWithInitContainer(client kubernetes.Interface, deploymentName string, namespace string, volumeMount bool) (*v1beta1.Deployment, error) {
|
||||
logrus.Infof("Creating Deployment")
|
||||
deploymentClient := client.ExtensionsV1beta1().Deployments(namespace)
|
||||
var deploymentObj *v1beta1.Deployment
|
||||
if volumeMount {
|
||||
deploymentObj = GetDeploymentWithInitContainer(namespace, deploymentName)
|
||||
} else {
|
||||
deploymentObj = GetDeploymentWithInitContainerAndEnv(namespace, deploymentName)
|
||||
}
|
||||
deployment, err := deploymentClient.Create(deploymentObj)
|
||||
time.Sleep(3 * time.Second)
|
||||
return deployment, err
|
||||
}
|
||||
|
||||
// CreateDeploymentWithEnvVarSource creates a deployment in given namespace and returns the Deployment
|
||||
func CreateDeploymentWithEnvVarSource(client kubernetes.Interface, deploymentName string, namespace string) (*v1beta1.Deployment, error) {
|
||||
logrus.Infof("Creating Deployment")
|
||||
deploymentClient := client.ExtensionsV1beta1().Deployments(namespace)
|
||||
deploymentObj := GetDeploymentWithEnvVarSources(namespace, deploymentName)
|
||||
deployment, err := deploymentClient.Create(deploymentObj)
|
||||
time.Sleep(3 * time.Second)
|
||||
return deployment, err
|
||||
}
|
||||
|
||||
// CreateDaemonSet creates a deployment in given namespace and returns the DaemonSet
|
||||
func CreateDaemonSet(client kubernetes.Interface, daemonsetName string, namespace string) (*v1beta1.DaemonSet, error) {
|
||||
func CreateDaemonSet(client kubernetes.Interface, daemonsetName string, namespace string, volumeMount bool) (*v1beta1.DaemonSet, error) {
|
||||
logrus.Infof("Creating DaemonSet")
|
||||
daemonsetClient := client.ExtensionsV1beta1().DaemonSets(namespace)
|
||||
daemonset, err := daemonsetClient.Create(GetDaemonSet(namespace, daemonsetName))
|
||||
time.Sleep(10 * time.Second)
|
||||
var daemonsetObj *v1beta1.DaemonSet
|
||||
if volumeMount {
|
||||
daemonsetObj = GetDaemonSet(namespace, daemonsetName)
|
||||
} else {
|
||||
daemonsetObj = GetDaemonSetWithEnvVars(namespace, daemonsetName)
|
||||
}
|
||||
daemonset, err := daemonsetClient.Create(daemonsetObj)
|
||||
time.Sleep(3 * time.Second)
|
||||
return daemonset, err
|
||||
}
|
||||
|
||||
// CreateStatefulSet creates a deployment in given namespace and returns the StatefulSet
|
||||
func CreateStatefulSet(client kubernetes.Interface, statefulsetName string, namespace string) (*v1_beta1.StatefulSet, error) {
|
||||
func CreateStatefulSet(client kubernetes.Interface, statefulsetName string, namespace string, volumeMount bool) (*v1_beta1.StatefulSet, error) {
|
||||
logrus.Infof("Creating StatefulSet")
|
||||
statefulsetClient := client.AppsV1beta1().StatefulSets(namespace)
|
||||
statefulset, err := statefulsetClient.Create(GetStatefulSet(namespace, statefulsetName))
|
||||
time.Sleep(10 * time.Second)
|
||||
var statefulsetObj *v1_beta1.StatefulSet
|
||||
if volumeMount {
|
||||
statefulsetObj = GetStatefulSet(namespace, statefulsetName)
|
||||
} else {
|
||||
statefulsetObj = GetStatefulSetWithEnvVar(namespace, statefulsetName)
|
||||
}
|
||||
statefulset, err := statefulsetClient.Create(statefulsetObj)
|
||||
time.Sleep(3 * time.Second)
|
||||
return statefulset, err
|
||||
}
|
||||
|
||||
@@ -300,15 +612,23 @@ func CreateStatefulSet(client kubernetes.Interface, statefulsetName string, name
|
||||
func DeleteDeployment(client kubernetes.Interface, namespace string, deploymentName string) error {
|
||||
logrus.Infof("Deleting Deployment")
|
||||
deploymentError := client.ExtensionsV1beta1().Deployments(namespace).Delete(deploymentName, &metav1.DeleteOptions{})
|
||||
time.Sleep(10 * time.Second)
|
||||
time.Sleep(3 * time.Second)
|
||||
return deploymentError
|
||||
}
|
||||
|
||||
// DeleteDeploymentConfig deletes a deploymentConfig in given namespace and returns the error if any
|
||||
func DeleteDeploymentConfig(client appsclient.Interface, namespace string, deploymentConfigName string) error {
|
||||
logrus.Infof("Deleting DeploymentConfig")
|
||||
deploymentConfigError := client.AppsV1().DeploymentConfigs(namespace).Delete(deploymentConfigName, &metav1.DeleteOptions{})
|
||||
time.Sleep(3 * time.Second)
|
||||
return deploymentConfigError
|
||||
}
|
||||
|
||||
// DeleteDaemonSet creates a daemonset in given namespace and returns the error if any
|
||||
func DeleteDaemonSet(client kubernetes.Interface, namespace string, daemonsetName string) error {
|
||||
logrus.Infof("Deleting DaemonSet %s", daemonsetName)
|
||||
daemonsetError := client.ExtensionsV1beta1().DaemonSets(namespace).Delete(daemonsetName, &metav1.DeleteOptions{})
|
||||
time.Sleep(10 * time.Second)
|
||||
time.Sleep(3 * time.Second)
|
||||
return daemonsetError
|
||||
}
|
||||
|
||||
@@ -316,7 +636,7 @@ func DeleteDaemonSet(client kubernetes.Interface, namespace string, daemonsetNam
|
||||
func DeleteStatefulSet(client kubernetes.Interface, namespace string, statefulsetName string) error {
|
||||
logrus.Infof("Deleting StatefulSet %s", statefulsetName)
|
||||
statefulsetError := client.AppsV1beta1().StatefulSets(namespace).Delete(statefulsetName, &metav1.DeleteOptions{})
|
||||
time.Sleep(10 * time.Second)
|
||||
time.Sleep(3 * time.Second)
|
||||
return statefulsetError
|
||||
}
|
||||
|
||||
@@ -330,7 +650,7 @@ func UpdateConfigMap(configmapClient core_v1.ConfigMapInterface, namespace strin
|
||||
configmap = GetConfigmap(namespace, configmapName, data)
|
||||
}
|
||||
_, updateErr := configmapClient.Update(configmap)
|
||||
time.Sleep(10 * time.Second)
|
||||
time.Sleep(3 * time.Second)
|
||||
return updateErr
|
||||
}
|
||||
|
||||
@@ -344,7 +664,7 @@ func UpdateSecret(secretClient core_v1.SecretInterface, namespace string, secret
|
||||
secret = GetSecret(namespace, secretName, data)
|
||||
}
|
||||
_, updateErr := secretClient.Update(secret)
|
||||
time.Sleep(10 * time.Second)
|
||||
time.Sleep(3 * time.Second)
|
||||
return updateErr
|
||||
}
|
||||
|
||||
@@ -352,7 +672,7 @@ func UpdateSecret(secretClient core_v1.SecretInterface, namespace string, secret
|
||||
func DeleteConfigMap(client kubernetes.Interface, namespace string, configmapName string) error {
|
||||
logrus.Infof("Deleting configmap %q.\n", configmapName)
|
||||
err := client.CoreV1().ConfigMaps(namespace).Delete(configmapName, &metav1.DeleteOptions{})
|
||||
time.Sleep(10 * time.Second)
|
||||
time.Sleep(3 * time.Second)
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -360,7 +680,7 @@ func DeleteConfigMap(client kubernetes.Interface, namespace string, configmapNam
|
||||
func DeleteSecret(client kubernetes.Interface, namespace string, secretName string) error {
|
||||
logrus.Infof("Deleting secret %q.\n", secretName)
|
||||
err := client.CoreV1().Secrets(namespace).Delete(secretName, &metav1.DeleteOptions{})
|
||||
time.Sleep(10 * time.Second)
|
||||
time.Sleep(3 * time.Second)
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -374,28 +694,33 @@ func RandSeq(n int) string {
|
||||
return string(b)
|
||||
}
|
||||
|
||||
func VerifyResourceUpdate(client kubernetes.Interface, config util.Config, envVarPostfix string, upgradeFuncs callbacks.RollingUpgradeFuncs) bool {
|
||||
items := upgradeFuncs.ItemsFunc(client, config.Namespace)
|
||||
// VerifyResourceUpdate verifies whether the rolling upgrade happened or not
|
||||
func VerifyResourceUpdate(clients kube.Clients, config util.Config, envVarPostfix string, upgradeFuncs callbacks.RollingUpgradeFuncs) bool {
|
||||
items := upgradeFuncs.ItemsFunc(clients, config.Namespace)
|
||||
for _, i := range items {
|
||||
containers := upgradeFuncs.ContainersFunc(i)
|
||||
// match statefulsets with the correct annotation
|
||||
annotationValue := util.ToObjectMeta(i).Annotations[config.Annotation]
|
||||
if annotationValue != "" {
|
||||
reloaderEnabledValue := util.ToObjectMeta(i).Annotations[options.ReloaderAutoAnnotation]
|
||||
reloaderEnabled, err := strconv.ParseBool(reloaderEnabledValue)
|
||||
matches := false
|
||||
if err == nil && reloaderEnabled {
|
||||
matches = true
|
||||
} else if annotationValue != "" {
|
||||
values := strings.Split(annotationValue, ",")
|
||||
matches := false
|
||||
for _, value := range values {
|
||||
if value == config.ResourceName {
|
||||
matches = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if matches {
|
||||
envName := constants.EnvVarPrefix + util.ConvertToEnvVarName(annotationValue) + "_" + envVarPostfix
|
||||
updated := GetResourceSHA(containers, envName)
|
||||
}
|
||||
|
||||
if updated == config.SHAValue {
|
||||
return true
|
||||
}
|
||||
if matches {
|
||||
envName := constants.EnvVarPrefix + util.ConvertToEnvVarName(config.ResourceName) + "_" + envVarPostfix
|
||||
updated := GetResourceSHA(containers, envName)
|
||||
if updated == config.SHAValue {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,9 +1,38 @@
|
||||
package util
|
||||
|
||||
import (
|
||||
"github.com/stakater/Reloader/internal/pkg/constants"
|
||||
"github.com/stakater/Reloader/internal/pkg/options"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
)
|
||||
|
||||
//Config contains rolling upgrade configuration parameters
|
||||
type Config struct {
|
||||
Namespace string
|
||||
ResourceName string
|
||||
Annotation string
|
||||
SHAValue string
|
||||
Type string
|
||||
}
|
||||
|
||||
// GetConfigmapConfig provides utility config for configmap
|
||||
func GetConfigmapConfig(configmap *v1.ConfigMap) Config {
|
||||
return Config{
|
||||
Namespace: configmap.Namespace,
|
||||
ResourceName: configmap.Name,
|
||||
Annotation: options.ConfigmapUpdateOnChangeAnnotation,
|
||||
SHAValue: GetSHAfromConfigmap(configmap.Data),
|
||||
Type: constants.ConfigmapEnvVarPostfix,
|
||||
}
|
||||
}
|
||||
|
||||
// GetSecretConfig provides utility config for secret
|
||||
func GetSecretConfig(secret *v1.Secret) Config {
|
||||
return Config{
|
||||
Namespace: secret.Namespace,
|
||||
ResourceName: secret.Name,
|
||||
Annotation: options.SecretUpdateOnChangeAnnotation,
|
||||
SHAValue: GetSHAfromSecret(secret.Data),
|
||||
Type: constants.SecretEnvVarPostfix,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2,6 +2,7 @@ package util
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"strconv"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
@@ -36,3 +37,14 @@ func ToObjectMeta(kubernetesObject interface{}) ObjectMeta {
|
||||
ObjectMeta: field,
|
||||
}
|
||||
}
|
||||
|
||||
// ParseBool returns result in bool format after parsing
|
||||
func ParseBool(value interface{}) bool {
|
||||
if reflect.Bool == reflect.TypeOf(value).Kind() {
|
||||
return value.(bool)
|
||||
} else if reflect.String == reflect.TypeOf(value).Kind() {
|
||||
result, _ := strconv.ParseBool(value.(string))
|
||||
return result
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -2,7 +2,10 @@ package util
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/stakater/Reloader/internal/pkg/crypto"
|
||||
)
|
||||
|
||||
// ConvertToEnvVarName converts the given text into a usable env var
|
||||
@@ -25,3 +28,32 @@ func ConvertToEnvVarName(text string) string {
|
||||
}
|
||||
return buffer.String()
|
||||
}
|
||||
|
||||
func GetSHAfromConfigmap(data map[string]string) string {
|
||||
values := []string{}
|
||||
for k, v := range data {
|
||||
values = append(values, k+"="+v)
|
||||
}
|
||||
sort.Strings(values)
|
||||
return crypto.GenerateSHA(strings.Join(values, ";"))
|
||||
}
|
||||
|
||||
func GetSHAfromSecret(data map[string][]byte) string {
|
||||
values := []string{}
|
||||
for k, v := range data {
|
||||
values = append(values, k+"="+string(v[:]))
|
||||
}
|
||||
sort.Strings(values)
|
||||
return crypto.GenerateSHA(strings.Join(values, ";"))
|
||||
}
|
||||
|
||||
type List []string
|
||||
|
||||
func (l *List) Contains(s string) bool {
|
||||
for _, v := range *l {
|
||||
if v == s {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -3,13 +3,80 @@ package kube
|
||||
import (
|
||||
"os"
|
||||
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
|
||||
appsclient "github.com/openshift/client-go/apps/clientset/versioned"
|
||||
"github.com/sirupsen/logrus"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
)
|
||||
|
||||
// GetClient gets the client for k8s, if ~/.kube/config exists so get that config else incluster config
|
||||
func GetClient() (*kubernetes.Clientset, error) {
|
||||
// Clients struct exposes interfaces for kubernetes as well as openshift if available
|
||||
type Clients struct {
|
||||
KubernetesClient kubernetes.Interface
|
||||
OpenshiftAppsClient appsclient.Interface
|
||||
}
|
||||
|
||||
var (
|
||||
// IsOpenshift is true if environment is Openshift, it is false if environment is Kubernetes
|
||||
IsOpenshift = isOpenshift()
|
||||
)
|
||||
|
||||
// GetClients returns a `Clients` object containing both openshift and kubernetes clients with an openshift identifier
|
||||
func GetClients() Clients {
|
||||
client, err := GetKubernetesClient()
|
||||
if err != nil {
|
||||
logrus.Fatalf("Unable to create Kubernetes client error = %v", err)
|
||||
}
|
||||
|
||||
var appsClient *appsclient.Clientset
|
||||
|
||||
if IsOpenshift {
|
||||
appsClient, err = GetOpenshiftAppsClient()
|
||||
if err != nil {
|
||||
logrus.Warnf("Unable to create Openshift Apps client error = %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
return Clients{
|
||||
KubernetesClient: client,
|
||||
OpenshiftAppsClient: appsClient,
|
||||
}
|
||||
}
|
||||
|
||||
func isOpenshift() bool {
|
||||
client, err := GetKubernetesClient()
|
||||
if err != nil {
|
||||
logrus.Fatalf("Unable to create Kubernetes client error = %v", err)
|
||||
}
|
||||
_, err = client.RESTClient().Get().AbsPath("/apis/project.openshift.io").Do().Raw()
|
||||
if err == nil {
|
||||
logrus.Info("Environment: Openshift")
|
||||
return true
|
||||
}
|
||||
logrus.Info("Environment: Kubernetes")
|
||||
return false
|
||||
}
|
||||
|
||||
// GetOpenshiftAppsClient returns an Openshift Client that can query on Apps
|
||||
func GetOpenshiftAppsClient() (*appsclient.Clientset, error) {
|
||||
config, err := getConfig()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return appsclient.NewForConfig(config)
|
||||
}
|
||||
|
||||
// GetKubernetesClient gets the client for k8s, if ~/.kube/config exists so get that config else incluster config
|
||||
func GetKubernetesClient() (*kubernetes.Clientset, error) {
|
||||
config, err := getConfig()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return kubernetes.NewForConfig(config)
|
||||
}
|
||||
|
||||
func getConfig() (*rest.Config, error) {
|
||||
var config *rest.Config
|
||||
var err error
|
||||
kubeconfigPath := os.Getenv("KUBECONFIG")
|
||||
@@ -31,5 +98,6 @@ func GetClient() (*kubernetes.Clientset, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return kubernetes.NewForConfig(config)
|
||||
|
||||
return config, nil
|
||||
}
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
package kube
|
||||
|
||||
import (
|
||||
"k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
)
|
||||
|
||||
|
||||
Reference in New Issue
Block a user