mirror of
https://github.com/stakater/Reloader.git
synced 2026-02-14 18:09:50 +00:00
Compare commits
46 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
64d12a7c31 | ||
|
|
078fc034d2 | ||
|
|
a3125e876c | ||
|
|
2f56d5c05b | ||
|
|
003eaee887 | ||
|
|
3030ddebf1 | ||
|
|
aa8b415fd4 | ||
|
|
d5c66bc235 | ||
|
|
9bc62e1f4e | ||
|
|
1bd5fb5620 | ||
|
|
4b233872bb | ||
|
|
569c0469fe | ||
|
|
599ceafd6b | ||
|
|
30ca4b463a | ||
|
|
e4756034ea | ||
|
|
2cc4941ffe | ||
|
|
53ca2ef340 | ||
|
|
9d38927e4a | ||
|
|
c670fb7e65 | ||
|
|
0427d58a4f | ||
|
|
1ad7a8fa76 | ||
|
|
1f1862a0f5 | ||
|
|
0b0679bbd8 | ||
|
|
dd2b3de46d | ||
|
|
0454d18510 | ||
|
|
b99994e997 | ||
|
|
c8c0f98c1d | ||
|
|
e11e1744cd | ||
|
|
2cac0cc713 | ||
|
|
5b467b731c | ||
|
|
61a2af1782 | ||
|
|
c7d4a0aa9d | ||
|
|
effc40cab1 | ||
|
|
034d2dcd93 | ||
|
|
81c7b3ef25 | ||
|
|
5befb4d6eb | ||
|
|
97b7286c2b | ||
|
|
17b6d58300 | ||
|
|
f89f59c5b2 | ||
|
|
a317555db9 | ||
|
|
a3f8f30a6f | ||
|
|
f05d4ed0eb | ||
|
|
6bc9072418 | ||
|
|
d9379d18f2 | ||
|
|
d2ae8ce4cb | ||
|
|
6a5341aebf |
1
.gitignore
vendored
1
.gitignore
vendored
@@ -6,4 +6,5 @@ release
|
||||
out/
|
||||
_gopath/
|
||||
.DS_Store
|
||||
.vscode
|
||||
vendor
|
||||
2
Jenkinsfile
vendored
2
Jenkinsfile
vendored
@@ -1,5 +1,5 @@
|
||||
#!/usr/bin/groovy
|
||||
@Library('github.com/stakater/fabric8-pipeline-library@v2.4.0')
|
||||
@Library('github.com/stakater/fabric8-pipeline-library@v2.5.3')
|
||||
|
||||
def dummy
|
||||
|
||||
|
||||
2
Makefile
2
Makefile
@@ -32,7 +32,7 @@ binary-image: builder-image
|
||||
@docker run --network host --rm "${BUILDER}" | docker build --network host -t "${REPOSITORY}" -f Dockerfile.run -
|
||||
|
||||
test:
|
||||
"$(GOCMD)" test -v ./...
|
||||
"$(GOCMD)" test -timeout 1800s -v ./...
|
||||
|
||||
stop:
|
||||
@docker stop "${BINARY}"
|
||||
|
||||
47
README.md
47
README.md
@@ -1,16 +1,12 @@
|
||||
# RELOADER
|
||||
|
||||
## WHY NAME RELOADER
|
||||
|
||||
In english language, Reloader is a thing/tool that can reload certain stuff. So refereig to that meaning relaoder can reload
|
||||
|
||||
## Problem
|
||||
|
||||
We would like to watch if some change happens in `ConfigMap` and `Secret` objects and then perform certain upgrade on relavent `Deployment`, `Deamonset` and `Statefulset`
|
||||
We would like to watch if some change happens in `ConfigMap` and `Secret` objects and then perform rolling upgrade on relevant `Deployment`, `Deamonset` and `Statefulset`
|
||||
|
||||
## Solution
|
||||
|
||||
Reloader can watch any changes in `ConfigMap` and `Secret` objects and update or recreate Pods for their associated `Deployments`, `Deamonsets` and `Statefulsets`. In this way Pods can get the latest changes in `ConfigMap` or `Secret` objects.
|
||||
Reloader can watch changes in `ConfigMap` and `Secret` and do rolling upgrades on Pods with their associated `Deployments`, `Deamonsets` and `Statefulsets`.
|
||||
|
||||
**NOTE:** This controller has been inspired from [configmapController](https://github.com/fabric8io/configmapcontroller)
|
||||
|
||||
@@ -21,31 +17,46 @@ For a `Deployment` called `foo` have a `ConfigMap` called `foo`. Then add this a
|
||||
```yaml
|
||||
metadata:
|
||||
annotations:
|
||||
reloader.stakater.com/update-on-change: "foo"
|
||||
configmap.reloader.stakater.com/reload: "foo"
|
||||
```
|
||||
|
||||
Then, providing `Reloader` is running, whenever you edit the `ConfigMap` called `foo` the Reloader will update the `Deployment` by adding the environment variable:
|
||||
OR
|
||||
|
||||
For a `Deployment` called `foo` have a `Secret` called `foo`. Then add this annotation to your `Deployment`
|
||||
|
||||
```yaml
|
||||
metadata:
|
||||
annotations:
|
||||
secret.reloader.stakater.com/reload: "foo"
|
||||
```
|
||||
STAKATER_FOO_REVISION=${reloaderRevision}
|
||||
```
|
||||
|
||||
This then triggers a rolling upgrade of your deployment's pods to use the new configuration.
|
||||
|
||||
Same procedure can be followed to perform rolling upgrade on `Deamonsets` and `Statefulsets` as well.
|
||||
|
||||
## Deploying to Kubernetes
|
||||
|
||||
You can deploy Reloader by running the following kubectl commands:
|
||||
You can deploy Reloader by following methods:
|
||||
|
||||
### Vanilla Manifests
|
||||
|
||||
You can apply vanilla manifests by running the following command
|
||||
|
||||
```bash
|
||||
kubectl apply -f rbac.yaml -n <namespace>
|
||||
kubectl apply -f deployment.yaml -n <namespace>
|
||||
kubectl apply -f https://raw.githubusercontent.com/stakater/Reloader/master/deployments/kubernetes/reloader.yaml
|
||||
```
|
||||
|
||||
### Helm Charts
|
||||
|
||||
Or alternatively if you configured `helm` on your cluster, you can deploy Reloader via helm chart located under `deployments/kubernetes/chart/reloader` folder.
|
||||
Alternatively if you have configured helm on your cluster, you can add reloader to helm from our public chart repository and deploy it via helm using below mentioned commands
|
||||
|
||||
```bash
|
||||
helm repo add stakater https://stakater.github.io/stakater-charts
|
||||
|
||||
helm repo update
|
||||
|
||||
helm install stakater/reloader
|
||||
```
|
||||
|
||||
## Monitor All namespaces
|
||||
|
||||
By default Reloader gets deployed in `default` namespace and watches changes `secrets` and `configmaps` in all namespaces.
|
||||
|
||||
## Help
|
||||
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
apiVersion: v1
|
||||
name: reloader
|
||||
description: Reloader chart that runs on kubernetes
|
||||
version: 0.0.1
|
||||
version: 0.0.8
|
||||
keywords:
|
||||
- Reloader
|
||||
- kubernetes
|
||||
|
||||
@@ -18,10 +18,12 @@ spec:
|
||||
spec:
|
||||
containers:
|
||||
- env:
|
||||
{{- if eq .Values.reloader.watchGlobally false }}
|
||||
- name: KUBERNETES_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
{{- end }}
|
||||
image: "{{ .Values.reloader.image.name }}:{{ .Values.reloader.image.tag }}"
|
||||
imagePullPolicy: {{ .Values.reloader.image.pullPolicy }}
|
||||
name: {{ template "reloader-name" . }}
|
||||
|
||||
@@ -6,6 +6,56 @@ metadata:
|
||||
{{ include "reloader-labels.chart" . | indent 4 }}
|
||||
name: {{ template "reloader-name" . }}
|
||||
---
|
||||
{{- if .Values.reloader.watchGlobally }}
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
labels:
|
||||
{{ include "reloader-labels.stakater" . | indent 4 }}
|
||||
{{ include "reloader-labels.chart" . | indent 4 }}
|
||||
name: {{ template "reloader-name" . }}-role
|
||||
namespace: {{ .Release.Namespace }}
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- secrets
|
||||
- configmaps
|
||||
verbs:
|
||||
- list
|
||||
- get
|
||||
- watch
|
||||
- apiGroups:
|
||||
- ""
|
||||
- "extensions"
|
||||
- "apps"
|
||||
resources:
|
||||
- deployments
|
||||
- daemonsets
|
||||
- statefulsets
|
||||
verbs:
|
||||
- list
|
||||
- get
|
||||
- update
|
||||
- patch
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
labels:
|
||||
{{ include "reloader-labels.stakater" . | indent 4 }}
|
||||
{{ include "reloader-labels.chart" . | indent 4 }}
|
||||
name: {{ template "reloader-name" . }}-role-binding
|
||||
namespace: {{ .Release.Namespace }}
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: {{ template "reloader-name" . }}-role
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: {{ template "reloader-name" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
{{- else }}
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: Role
|
||||
metadata:
|
||||
@@ -24,6 +74,19 @@ rules:
|
||||
- list
|
||||
- get
|
||||
- watch
|
||||
- apiGroups:
|
||||
- ""
|
||||
- "extensions"
|
||||
- "apps"
|
||||
resources:
|
||||
- deployments
|
||||
- daemonsets
|
||||
- statefulsets
|
||||
verbs:
|
||||
- list
|
||||
- get
|
||||
- update
|
||||
- patch
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: RoleBinding
|
||||
@@ -40,4 +103,5 @@ roleRef:
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: {{ template "reloader-name" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
{{- end }}
|
||||
@@ -7,8 +7,9 @@ reloader:
|
||||
labels:
|
||||
provider: stakater
|
||||
group: com.stakater.platform
|
||||
version: 0.0.1
|
||||
version: 0.0.8
|
||||
image:
|
||||
name: stakater/reloader
|
||||
tag: "0.0.1"
|
||||
pullPolicy: IfNotPresent
|
||||
tag: "0.0.8"
|
||||
pullPolicy: IfNotPresent
|
||||
watchGlobally: true
|
||||
@@ -7,8 +7,8 @@ metadata:
|
||||
app: reloader
|
||||
group: com.stakater.platform
|
||||
provider: stakater
|
||||
version: 0.0.1
|
||||
chart: "reloader-0.0.1"
|
||||
version: 0.0.8
|
||||
chart: "reloader-0.0.8"
|
||||
release: "RELEASE-NAME"
|
||||
heritage: "Tiller"
|
||||
name: reloader
|
||||
@@ -29,11 +29,7 @@ spec:
|
||||
spec:
|
||||
containers:
|
||||
- env:
|
||||
- name: KUBERNETES_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
image: "stakater/reloader:0.0.1"
|
||||
image: "stakater/reloader:0.0.8"
|
||||
imagePullPolicy: IfNotPresent
|
||||
name: reloader
|
||||
serviceAccountName: reloader
|
||||
|
||||
@@ -7,25 +7,25 @@ metadata:
|
||||
app: reloader
|
||||
group: com.stakater.platform
|
||||
provider: stakater
|
||||
version: 0.0.1
|
||||
chart: "reloader-0.0.1"
|
||||
version: 0.0.8
|
||||
chart: "reloader-0.0.8"
|
||||
release: "RELEASE-NAME"
|
||||
heritage: "Tiller"
|
||||
name: reloader
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: Role
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
labels:
|
||||
app: reloader
|
||||
group: com.stakater.platform
|
||||
provider: stakater
|
||||
version: 0.0.1
|
||||
chart: "reloader-0.0.1"
|
||||
version: 0.0.8
|
||||
chart: "reloader-0.0.8"
|
||||
release: "RELEASE-NAME"
|
||||
heritage: "Tiller"
|
||||
name: reloader-role
|
||||
namespace: tools
|
||||
namespace: default
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
@@ -36,25 +36,38 @@ rules:
|
||||
- list
|
||||
- get
|
||||
- watch
|
||||
- apiGroups:
|
||||
- ""
|
||||
- "extensions"
|
||||
- "apps"
|
||||
resources:
|
||||
- deployments
|
||||
- daemonsets
|
||||
- statefulsets
|
||||
verbs:
|
||||
- list
|
||||
- get
|
||||
- update
|
||||
- patch
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: RoleBinding
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
labels:
|
||||
app: reloader
|
||||
group: com.stakater.platform
|
||||
provider: stakater
|
||||
version: 0.0.1
|
||||
chart: "reloader-0.0.1"
|
||||
version: 0.0.8
|
||||
chart: "reloader-0.0.8"
|
||||
release: "RELEASE-NAME"
|
||||
heritage: "Tiller"
|
||||
name: reloader-role-binding
|
||||
namespace: tools
|
||||
namespace: default
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
kind: ClusterRole
|
||||
name: reloader-role
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: reloader
|
||||
namespace: tools
|
||||
namespace: default
|
||||
|
||||
109
deployments/kubernetes/reloader.yaml
Normal file
109
deployments/kubernetes/reloader.yaml
Normal file
@@ -0,0 +1,109 @@
|
||||
---
|
||||
# Source: reloader/templates/deployment.yaml
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
labels:
|
||||
app: reloader
|
||||
group: com.stakater.platform
|
||||
provider: stakater
|
||||
version: 0.0.8
|
||||
chart: "reloader-0.0.8"
|
||||
release: "RELEASE-NAME"
|
||||
heritage: "Tiller"
|
||||
name: reloader
|
||||
spec:
|
||||
replicas: 1
|
||||
revisionHistoryLimit: 2
|
||||
selector:
|
||||
matchLabels:
|
||||
app: reloader
|
||||
group: com.stakater.platform
|
||||
provider: stakater
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: reloader
|
||||
group: com.stakater.platform
|
||||
provider: stakater
|
||||
spec:
|
||||
containers:
|
||||
- env:
|
||||
image: "stakater/reloader:0.0.8"
|
||||
imagePullPolicy: IfNotPresent
|
||||
name: reloader
|
||||
serviceAccountName: reloader
|
||||
|
||||
---
|
||||
# Source: reloader/templates/rbac.yaml
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
labels:
|
||||
app: reloader
|
||||
group: com.stakater.platform
|
||||
provider: stakater
|
||||
version: 0.0.8
|
||||
chart: "reloader-0.0.8"
|
||||
release: "RELEASE-NAME"
|
||||
heritage: "Tiller"
|
||||
name: reloader
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
labels:
|
||||
app: reloader
|
||||
group: com.stakater.platform
|
||||
provider: stakater
|
||||
version: 0.0.8
|
||||
chart: "reloader-0.0.8"
|
||||
release: "RELEASE-NAME"
|
||||
heritage: "Tiller"
|
||||
name: reloader-role
|
||||
namespace: default
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- secrets
|
||||
- configmaps
|
||||
verbs:
|
||||
- list
|
||||
- get
|
||||
- watch
|
||||
- apiGroups:
|
||||
- ""
|
||||
- "extensions"
|
||||
- "apps"
|
||||
resources:
|
||||
- deployments
|
||||
- daemonsets
|
||||
- statefulsets
|
||||
verbs:
|
||||
- list
|
||||
- get
|
||||
- update
|
||||
- patch
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
labels:
|
||||
app: reloader
|
||||
group: com.stakater.platform
|
||||
provider: stakater
|
||||
version: 0.0.8
|
||||
chart: "reloader-0.0.8"
|
||||
release: "RELEASE-NAME"
|
||||
heritage: "Tiller"
|
||||
name: reloader-role-binding
|
||||
namespace: default
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: reloader-role
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: reloader
|
||||
namespace: default
|
||||
@@ -11,4 +11,5 @@ reloader:
|
||||
image:
|
||||
name: {{ getenv "DOCKER_IMAGE" }}
|
||||
tag: "{{ getenv "VERSION" }}"
|
||||
pullPolicy: IfNotPresent
|
||||
pullPolicy: IfNotPresent
|
||||
watchGlobally: true
|
||||
77
docs/How-it-works.md
Normal file
77
docs/How-it-works.md
Normal file
@@ -0,0 +1,77 @@
|
||||
# How it works?
|
||||
|
||||
Reloader watches for `ConfigMap` and `Secret` and detects if there are changes in data of these objects. After change detection reloader performs rolling upgrade on relevant Pods via associated `Deployment`, `Deamonset` and `Statefulset`.
|
||||
|
||||
## How change detection works
|
||||
|
||||
Reloader watches changes in `configmaps` and `secrets` data. As soon as it detects a change in these. It forwards these objects to an update handler which decides if and how to perform the rolling upgrade.
|
||||
|
||||
## Requirements for rolling upgrade
|
||||
|
||||
To perform rolling upgrade a `deployment`, `daemonset` or `statefulset` must have
|
||||
|
||||
- support for rolling upgrade strategy
|
||||
- specific annotation for `configmaps` or `secrets`
|
||||
|
||||
The annotation value is comma separated list of `configmaps` or `secrets`. If a change is detected in data of these `configmaps` or `secrets`, reloader will perform rolling upgrades on their associated `deployments`, `daemonsets` or `statefulsets`.
|
||||
|
||||
### Annotation for Configmap
|
||||
|
||||
For a `Deployment` called `foo` have a `ConfigMap` called `foo`. Then add this annotation to your `Deployment`
|
||||
|
||||
```yaml
|
||||
metadata:
|
||||
annotations:
|
||||
configmap.reloader.stakater.com/reload: "foo"
|
||||
```
|
||||
|
||||
### Annotation for Secret
|
||||
|
||||
For a `Deployment` called `foo` have a `Secret` called `foo`. Then add this annotation to your `Deployment`
|
||||
|
||||
```yaml
|
||||
metadata:
|
||||
annotations:
|
||||
secret.reloader.stakater.com/reload: "foo"
|
||||
```
|
||||
|
||||
Above mentioned annotation are also work for `Daemonsets` and `Statefulsets`
|
||||
|
||||
## How Rolling upgrade works?
|
||||
|
||||
When reloader detects changes in configmap. It gets two objects of configmap. First object is an old configmap object which has a state before the latest change. Second object is new configmap object which contains latest changes. Reloader compares both objects and see whether any change in data occurred or not. If reloader finds any change in new configmap object, only then, it move forward with rolling upgrade.
|
||||
|
||||
After that, reloader gets the list of all deployments, daemonsets and statefulset and looks for above mentioned annotation for configmap. If the annotation value contains the configmap name, it then looks for an environment variable which can contain the configmap or secret data change hash.
|
||||
|
||||
### Environment variable for Configmap
|
||||
|
||||
If configmap name is foo then
|
||||
|
||||
```yaml
|
||||
STAKATER_FOO_CONFIGMAP
|
||||
```
|
||||
|
||||
### Environment variable for Secret
|
||||
|
||||
If Secret name is foo then
|
||||
|
||||
```yaml
|
||||
STAKATER_FOO_SECRET
|
||||
```
|
||||
|
||||
If the environment variable is found then it gets its value and compares it with new configmap hash value. If old value in environment variable is different from new hash value then reloader updates the environment variable. If the environment variable does not exist then it creates a new environment variable with latest hash value from configmap and updates the relevant `deployment`, `daemonset` or `statefulset`
|
||||
|
||||
Note: Rolling upgrade also works in the same way for secrets.
|
||||
|
||||
### Hash value Computation
|
||||
|
||||
Reloader uses SHA1 to compute hash value. SHA1 is used because it is efficient and less prone to collision.
|
||||
|
||||
## Monitor All Namespaces
|
||||
|
||||
By default reloader deploys in default namespace and monitors changes in all namespaces. To monitor changes in a specific namespace deploy the reloader in that namespace and set the `watchGlobally` flag to `false` in values file located under `deployments/kubernetes/chart/reloader`
|
||||
And render manifest file using helm command
|
||||
```bash
|
||||
helm --namespace {replace this with namespace name} template . > reloader.yaml
|
||||
```
|
||||
The output file can then be used to deploy reloader in specific namespace.
|
||||
10
docs/Reloader-vs-ConfigmapController.md
Normal file
10
docs/Reloader-vs-ConfigmapController.md
Normal file
@@ -0,0 +1,10 @@
|
||||
# Reloader vs ConfigmapController
|
||||
|
||||
Reloader is inspired from [configmapcontroller](https://github.com/fabric8io/configmapcontroller) but there are many ways in which it differs from configmapController. Below is the small comparison between these two controllers.
|
||||
|
||||
| Configmap | Reloader |
|
||||
|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||
| ConfigmapController can only watch changes in `configmaps`. It cannot detect changes in other resources like `secrets`. | Reloader on the other hand can watch and detect changes in both `secrets` and `configmaps`. |
|
||||
| ConfigmapController can only perform rolling upgrades on `deployments`. It currently does not support rolling upgrades on `statefulsets` and `daemonsets` | Reloader on the other hand can perform rolling upgrades on `deployments` as well as on `statefulsets` and `daemonsets` |
|
||||
| Currently there are no unit test cases or end to end integration test cases in configmap controller. It add difficulties for any additional updates in configmap controller and one can not know for sure if new changes break any older functionality or not. | Reloader provides both unit test cases and end to end integration test cases for future updates. So one can make sure that new changes do not break any older functionality. |
|
||||
| Configmap controller uses `FABRICB_FOO_REVISION` environment variable to store any change in configmap controller. It does not encode it or convert it in suitable hash value to avoid data pollution in deployment. | Reloader uses SHA1 to encode the change in configmap or secret. It then saves the SHA1 value in `STAKATER_FOO_CONFIGMAP` or `STAKATER_FOO_SECRET` environment variable depending upon where the change has happened. The use of SHA1 provides a concise 40 characters encoded value that is also very less pron to collision. |
|
||||
28
docs/Reloader-vs-k8s-trigger-controller.md
Normal file
28
docs/Reloader-vs-k8s-trigger-controller.md
Normal file
@@ -0,0 +1,28 @@
|
||||
# Reloader vs k8s-trigger-controller
|
||||
|
||||
Reloader and k8s-trigger-controller are both built for same purpose. So there are quite a few similarities and differences between these.
|
||||
|
||||
## Similarities
|
||||
|
||||
- Both controllers support change detection in configmap and secrets
|
||||
- Both controllers support deployment rollout
|
||||
- Both controllers use SHA1 for hashing
|
||||
- Both controllers have end to end as well as unit test cases.
|
||||
|
||||
## Differences
|
||||
|
||||
### Support for Daemonsets and Statefulsets.
|
||||
|
||||
#### k8s-trigger-controller:
|
||||
k8s-trigger-controller only support for deployment rollout. It does not support daemonsets and statefulsets rollout.
|
||||
|
||||
#### Reloader:
|
||||
Reloader supports deployment rollout as well as daemonsets and statefulsets rollout.
|
||||
|
||||
### Hashing usage
|
||||
|
||||
#### k8s-trigger-controller:
|
||||
k8s-trigger-controller stores the hash value in an annotation `trigger.k8s.io/[secret|configMap]-NAME-last-hash`
|
||||
|
||||
#### Reloader:
|
||||
Reloader stores the hash value in an environment variable `STAKATER_NAME_[SECRET|CONFIGMAP]`
|
||||
51
docs/Verify-Reloader-Working.md
Normal file
51
docs/Verify-Reloader-Working.md
Normal file
@@ -0,0 +1,51 @@
|
||||
# Verify Reloader's Working
|
||||
|
||||
Reloader's working can be verified by two ways.
|
||||
|
||||
## Verify from logs
|
||||
|
||||
Check the logs of reloader and verify that you can see logs looks like below, if you are able to find these logs then it means reloader is working.
|
||||
|
||||
```text
|
||||
Changes Detected in test-object of type 'SECRET' in namespace: test-reloader
|
||||
|
||||
Updated test-resource of type Deployment in namespace: test-reloader
|
||||
```
|
||||
|
||||
Below are the details that explain these logs:
|
||||
|
||||
### test-object
|
||||
|
||||
`test-object` is the name of a `secret` or a `deployment` in which change has been detected.
|
||||
|
||||
### SECRET
|
||||
|
||||
`SECRET` is the type of `test-object`. It can either be `SECRET` or `CONFIGMAP`
|
||||
|
||||
### test-reloader
|
||||
|
||||
`test-reloader` is the name of namespace in which reloader has detected the change.
|
||||
|
||||
### test-resource
|
||||
|
||||
`test-resource` is the name of resource which is going to be updated
|
||||
|
||||
### Deployment
|
||||
|
||||
`Deployment` is the type of `test-resource`. It can either be a `Deployment`, `Daemonset` or `Statefulset`
|
||||
|
||||
## Verify by checking the age of Pod
|
||||
|
||||
A pod's age can tell whether reloader is working correctly or not. If you know that a change in a `secret` or `configmap` has occurred, then check the relevant Pod's age immediately. It should be newly created few moments ago.
|
||||
|
||||
### Verify from kubernetes Dashboard
|
||||
|
||||
`kubernetes dashboard` can be used to verify the working of Reloader. After a change in `secret` or `configmap`, check the relevant Pod's age from dashboard. It should be newly created few moments ago.
|
||||
|
||||
### Verify from command line
|
||||
|
||||
After a change in `secret` or `configmap`. Run the below mentioned command and verify that the pod is newly created.
|
||||
|
||||
```bash
|
||||
kubectl get pods <pod name> -n <namespace name>
|
||||
```
|
||||
32
glide.lock
generated
32
glide.lock
generated
@@ -1,5 +1,5 @@
|
||||
hash: b6fe060028bdb1249ba2413746476c2550b267eeab3c166c36a86e000a8dd354
|
||||
updated: 2018-07-17T09:08:20.493153674Z
|
||||
updated: 2018-07-24T21:12:43.027181463+05:00
|
||||
imports:
|
||||
- name: github.com/davecgh/go-spew
|
||||
version: 782f4967f2dc4564575ca782fe2d04090b5faca8
|
||||
@@ -90,13 +90,10 @@ imports:
|
||||
version: 1c05540f6879653db88113bc4a2b70aec4bd491f
|
||||
subpackages:
|
||||
- context
|
||||
- html
|
||||
- html/atom
|
||||
- http2
|
||||
- http2/hpack
|
||||
- idna
|
||||
- lex/httplex
|
||||
- websocket
|
||||
- name: golang.org/x/sys
|
||||
version: 7ddbeae9ae08c6a06a59597f0c9edbc5ff2444ce
|
||||
subpackages:
|
||||
@@ -194,35 +191,62 @@ imports:
|
||||
version: 35874c597fed17ca62cd197e516d7d5ff9a2958c
|
||||
subpackages:
|
||||
- discovery
|
||||
- discovery/fake
|
||||
- kubernetes
|
||||
- kubernetes/fake
|
||||
- kubernetes/scheme
|
||||
- kubernetes/typed/admissionregistration/v1alpha1
|
||||
- kubernetes/typed/admissionregistration/v1alpha1/fake
|
||||
- kubernetes/typed/apps/v1beta1
|
||||
- kubernetes/typed/apps/v1beta1/fake
|
||||
- kubernetes/typed/apps/v1beta2
|
||||
- kubernetes/typed/apps/v1beta2/fake
|
||||
- kubernetes/typed/authentication/v1
|
||||
- kubernetes/typed/authentication/v1/fake
|
||||
- kubernetes/typed/authentication/v1beta1
|
||||
- kubernetes/typed/authentication/v1beta1/fake
|
||||
- kubernetes/typed/authorization/v1
|
||||
- kubernetes/typed/authorization/v1/fake
|
||||
- kubernetes/typed/authorization/v1beta1
|
||||
- kubernetes/typed/authorization/v1beta1/fake
|
||||
- kubernetes/typed/autoscaling/v1
|
||||
- kubernetes/typed/autoscaling/v1/fake
|
||||
- kubernetes/typed/autoscaling/v2beta1
|
||||
- kubernetes/typed/autoscaling/v2beta1/fake
|
||||
- kubernetes/typed/batch/v1
|
||||
- kubernetes/typed/batch/v1/fake
|
||||
- kubernetes/typed/batch/v1beta1
|
||||
- kubernetes/typed/batch/v1beta1/fake
|
||||
- kubernetes/typed/batch/v2alpha1
|
||||
- kubernetes/typed/batch/v2alpha1/fake
|
||||
- kubernetes/typed/certificates/v1beta1
|
||||
- kubernetes/typed/certificates/v1beta1/fake
|
||||
- kubernetes/typed/core/v1
|
||||
- kubernetes/typed/core/v1/fake
|
||||
- kubernetes/typed/extensions/v1beta1
|
||||
- kubernetes/typed/extensions/v1beta1/fake
|
||||
- kubernetes/typed/networking/v1
|
||||
- kubernetes/typed/networking/v1/fake
|
||||
- kubernetes/typed/policy/v1beta1
|
||||
- kubernetes/typed/policy/v1beta1/fake
|
||||
- kubernetes/typed/rbac/v1
|
||||
- kubernetes/typed/rbac/v1/fake
|
||||
- kubernetes/typed/rbac/v1alpha1
|
||||
- kubernetes/typed/rbac/v1alpha1/fake
|
||||
- kubernetes/typed/rbac/v1beta1
|
||||
- kubernetes/typed/rbac/v1beta1/fake
|
||||
- kubernetes/typed/scheduling/v1alpha1
|
||||
- kubernetes/typed/scheduling/v1alpha1/fake
|
||||
- kubernetes/typed/settings/v1alpha1
|
||||
- kubernetes/typed/settings/v1alpha1/fake
|
||||
- kubernetes/typed/storage/v1
|
||||
- kubernetes/typed/storage/v1/fake
|
||||
- kubernetes/typed/storage/v1beta1
|
||||
- kubernetes/typed/storage/v1beta1/fake
|
||||
- pkg/version
|
||||
- rest
|
||||
- rest/watch
|
||||
- testing
|
||||
- tools/auth
|
||||
- tools/cache
|
||||
- tools/clientcmd
|
||||
|
||||
91
internal/pkg/callbacks/rolling_upgrade.go
Normal file
91
internal/pkg/callbacks/rolling_upgrade.go
Normal file
@@ -0,0 +1,91 @@
|
||||
package callbacks
|
||||
|
||||
import (
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/stakater/Reloader/internal/pkg/util"
|
||||
apps_v1beta1 "k8s.io/api/apps/v1beta1"
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/api/extensions/v1beta1"
|
||||
meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
)
|
||||
|
||||
//ItemsFunc is a generic function to return a specific resource array in given namespace
|
||||
type ItemsFunc func(kubernetes.Interface, string) []interface{}
|
||||
|
||||
//ContainersFunc is a generic func to return containers
|
||||
type ContainersFunc func(interface{}) []v1.Container
|
||||
|
||||
//UpdateFunc performs the resource update
|
||||
type UpdateFunc func(kubernetes.Interface, string, interface{}) error
|
||||
|
||||
//RollingUpgradeFuncs contains generic functions to perform rolling upgrade
|
||||
type RollingUpgradeFuncs struct {
|
||||
ItemsFunc ItemsFunc
|
||||
ContainersFunc ContainersFunc
|
||||
UpdateFunc UpdateFunc
|
||||
ResourceType string
|
||||
}
|
||||
|
||||
// GetDeploymentItems returns the deployments in given namespace
|
||||
func GetDeploymentItems(client kubernetes.Interface, namespace string) []interface{} {
|
||||
deployments, err := client.ExtensionsV1beta1().Deployments(namespace).List(meta_v1.ListOptions{})
|
||||
if err != nil {
|
||||
logrus.Errorf("Failed to list deployments %v", err)
|
||||
}
|
||||
return util.InterfaceSlice(deployments.Items)
|
||||
}
|
||||
|
||||
// GetDaemonSetItems returns the daemonSet in given namespace
|
||||
func GetDaemonSetItems(client kubernetes.Interface, namespace string) []interface{} {
|
||||
daemonSets, err := client.ExtensionsV1beta1().DaemonSets(namespace).List(meta_v1.ListOptions{})
|
||||
if err != nil {
|
||||
logrus.Errorf("Failed to list daemonSets %v", err)
|
||||
}
|
||||
return util.InterfaceSlice(daemonSets.Items)
|
||||
}
|
||||
|
||||
// GetStatefulSetItems returns the statefulSet in given namespace
|
||||
func GetStatefulSetItems(client kubernetes.Interface, namespace string) []interface{} {
|
||||
statefulSets, err := client.AppsV1beta1().StatefulSets(namespace).List(meta_v1.ListOptions{})
|
||||
if err != nil {
|
||||
logrus.Errorf("Failed to list statefulSets %v", err)
|
||||
}
|
||||
return util.InterfaceSlice(statefulSets.Items)
|
||||
}
|
||||
|
||||
// GetDeploymentContainers returns the containers of given deployment
|
||||
func GetDeploymentContainers(item interface{}) []v1.Container {
|
||||
return item.(v1beta1.Deployment).Spec.Template.Spec.Containers
|
||||
}
|
||||
|
||||
// GetDaemonSetContainers returns the containers of given daemonset
|
||||
func GetDaemonSetContainers(item interface{}) []v1.Container {
|
||||
return item.(v1beta1.DaemonSet).Spec.Template.Spec.Containers
|
||||
}
|
||||
|
||||
// GetStatefulsetContainers returns the containers of given statefulSet
|
||||
func GetStatefulsetContainers(item interface{}) []v1.Container {
|
||||
return item.(apps_v1beta1.StatefulSet).Spec.Template.Spec.Containers
|
||||
}
|
||||
|
||||
// UpdateDeployment performs rolling upgrade on deployment
|
||||
func UpdateDeployment(client kubernetes.Interface, namespace string, resource interface{}) error {
|
||||
deployment := resource.(v1beta1.Deployment)
|
||||
_, err := client.ExtensionsV1beta1().Deployments(namespace).Update(&deployment)
|
||||
return err
|
||||
}
|
||||
|
||||
// UpdateDaemonSet performs rolling upgrade on daemonSet
|
||||
func UpdateDaemonSet(client kubernetes.Interface, namespace string, resource interface{}) error {
|
||||
daemonSet := resource.(v1beta1.DaemonSet)
|
||||
_, err := client.ExtensionsV1beta1().DaemonSets(namespace).Update(&daemonSet)
|
||||
return err
|
||||
}
|
||||
|
||||
// UpdateStatefulset performs rolling upgrade on statefulSet
|
||||
func UpdateStatefulset(client kubernetes.Interface, namespace string, resource interface{}) error {
|
||||
statefulSet := resource.(apps_v1beta1.StatefulSet)
|
||||
_, err := client.AppsV1beta1().StatefulSets(namespace).Update(&statefulSet)
|
||||
return err
|
||||
}
|
||||
8
internal/pkg/constants/annotations.go
Normal file
8
internal/pkg/constants/annotations.go
Normal file
@@ -0,0 +1,8 @@
|
||||
package constants
|
||||
|
||||
const (
|
||||
// ConfigmapUpdateOnChangeAnnotation is an annotation to detect changes in configmaps
|
||||
ConfigmapUpdateOnChangeAnnotation = "configmap.reloader.stakater.com/reload"
|
||||
// SecretUpdateOnChangeAnnotation is an annotation to detect changes in secrets
|
||||
SecretUpdateOnChangeAnnotation = "secret.reloader.stakater.com/reload"
|
||||
)
|
||||
10
internal/pkg/constants/constants.go
Normal file
10
internal/pkg/constants/constants.go
Normal file
@@ -0,0 +1,10 @@
|
||||
package constants
|
||||
|
||||
const (
|
||||
// ConfigmapEnvVarPostfix is a postfix for configmap envVar
|
||||
ConfigmapEnvVarPostfix = "_CONFIGMAP"
|
||||
// SecretEnvVarPostfix is a postfix for secret envVar
|
||||
SecretEnvVarPostfix = "_SECRET"
|
||||
// EnvVarPrefix is a Prefix for environment variable
|
||||
EnvVarPrefix = "STAKATER_"
|
||||
)
|
||||
@@ -9,7 +9,6 @@ import (
|
||||
"github.com/stakater/Reloader/pkg/kube"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
"k8s.io/apimachinery/pkg/util/runtime"
|
||||
errorHandler "k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
@@ -65,15 +64,14 @@ func (c *Controller) Update(old interface{}, new interface{}) {
|
||||
|
||||
// Delete function to add an object to the queue in case of deleting a resource
|
||||
func (c *Controller) Delete(old interface{}) {
|
||||
// TODO Added this function for future usecase
|
||||
logrus.Infof("Deleted resource has been detected but no further implementation found to take action")
|
||||
logrus.Infof("Resource deletion has been detected but no further implementation found to take action")
|
||||
}
|
||||
|
||||
//Run function for controller which handles the queue
|
||||
func (c *Controller) Run(threadiness int, stopCh chan struct{}) {
|
||||
|
||||
logrus.Infof("Starting Controller")
|
||||
defer errorHandler.HandleCrash()
|
||||
defer runtime.HandleCrash()
|
||||
|
||||
// Let the workers stop when we are done
|
||||
defer c.queue.ShutDown()
|
||||
@@ -82,7 +80,7 @@ func (c *Controller) Run(threadiness int, stopCh chan struct{}) {
|
||||
|
||||
// Wait for all involved caches to be synced, before processing items from the queue is started
|
||||
if !cache.WaitForCacheSync(stopCh, c.informer.HasSynced) {
|
||||
errorHandler.HandleError(fmt.Errorf("Timed out waiting for caches to sync"))
|
||||
runtime.HandleError(fmt.Errorf("Timed out waiting for caches to sync"))
|
||||
return
|
||||
}
|
||||
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
20
internal/pkg/crypto/sha.go
Normal file
20
internal/pkg/crypto/sha.go
Normal file
@@ -0,0 +1,20 @@
|
||||
package crypto
|
||||
|
||||
import (
|
||||
"crypto/sha1"
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// GenerateSHA generates SHA from string
|
||||
func GenerateSHA(data string) string {
|
||||
hasher := sha1.New()
|
||||
_, err := io.WriteString(hasher, data)
|
||||
if err != nil {
|
||||
logrus.Errorf("Unable to write data in hash writer %v", err)
|
||||
}
|
||||
sha := hasher.Sum(nil)
|
||||
return fmt.Sprintf("%x", sha)
|
||||
}
|
||||
15
internal/pkg/crypto/sha_test.go
Normal file
15
internal/pkg/crypto/sha_test.go
Normal file
@@ -0,0 +1,15 @@
|
||||
package crypto
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
// TestGenerateSHA generates the sha from given data and verifies whether it is correct or not
|
||||
func TestGenerateSHA(t *testing.T) {
|
||||
data := "www.stakater.com"
|
||||
sha := "abd4ed82fb04548388a6cf3c339fd9dc84d275df"
|
||||
result := GenerateSHA(data)
|
||||
if result != sha {
|
||||
t.Errorf("Failed to generate SHA")
|
||||
}
|
||||
}
|
||||
@@ -18,11 +18,11 @@ func (r ResourceCreatedHandler) Handle() error {
|
||||
logrus.Infof("Detected changes in object %s", r.Resource)
|
||||
// process resource based on its type
|
||||
if _, ok := r.Resource.(*v1.ConfigMap); ok {
|
||||
logrus.Infof("Performing 'Added' action for resource of type 'configmap'")
|
||||
logrus.Infof("A 'configmap' has been 'Added' but no implementation found to take action")
|
||||
} else if _, ok := r.Resource.(*v1.Secret); ok {
|
||||
logrus.Infof("Performing 'Added' action for resource of type 'secret'")
|
||||
logrus.Infof("A 'secret' has been 'Added' but no implementation found to take action")
|
||||
} else {
|
||||
logrus.Warnf("Invalid resource: Resource should be 'Secret' or 'Configmap' but found %v", r.Resource)
|
||||
logrus.Warnf("Invalid resource: Resource should be 'Secret' or 'Configmap' but found, %v", r.Resource)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
194
internal/pkg/handler/update.go
Normal file
194
internal/pkg/handler/update.go
Normal file
@@ -0,0 +1,194 @@
|
||||
package handler
|
||||
|
||||
import (
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/stakater/Reloader/internal/pkg/callbacks"
|
||||
"github.com/stakater/Reloader/internal/pkg/constants"
|
||||
"github.com/stakater/Reloader/internal/pkg/crypto"
|
||||
"github.com/stakater/Reloader/internal/pkg/util"
|
||||
"github.com/stakater/Reloader/pkg/kube"
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
)
|
||||
|
||||
// ResourceUpdatedHandler contains updated objects
|
||||
type ResourceUpdatedHandler struct {
|
||||
Resource interface{}
|
||||
OldResource interface{}
|
||||
}
|
||||
|
||||
// Handle processes the updated resource
|
||||
func (r ResourceUpdatedHandler) Handle() error {
|
||||
if r.Resource == nil || r.OldResource == nil {
|
||||
logrus.Errorf("Error in Handler")
|
||||
} else {
|
||||
logrus.Infof("Detected changes in object %s", r.Resource)
|
||||
// process resource based on its type
|
||||
rollingUpgrade(r, callbacks.RollingUpgradeFuncs{
|
||||
ItemsFunc: callbacks.GetDeploymentItems,
|
||||
ContainersFunc: callbacks.GetDeploymentContainers,
|
||||
UpdateFunc: callbacks.UpdateDeployment,
|
||||
ResourceType: "Deployment",
|
||||
})
|
||||
rollingUpgrade(r, callbacks.RollingUpgradeFuncs{
|
||||
ItemsFunc: callbacks.GetDaemonSetItems,
|
||||
ContainersFunc: callbacks.GetDaemonSetContainers,
|
||||
UpdateFunc: callbacks.UpdateDaemonSet,
|
||||
ResourceType: "DaemonSet",
|
||||
})
|
||||
rollingUpgrade(r, callbacks.RollingUpgradeFuncs{
|
||||
ItemsFunc: callbacks.GetStatefulSetItems,
|
||||
ContainersFunc: callbacks.GetStatefulsetContainers,
|
||||
UpdateFunc: callbacks.UpdateStatefulset,
|
||||
ResourceType: "StatefulSet",
|
||||
})
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func rollingUpgrade(r ResourceUpdatedHandler, upgradeFuncs callbacks.RollingUpgradeFuncs) {
|
||||
client, err := kube.GetClient()
|
||||
if err != nil {
|
||||
logrus.Fatalf("Unable to create Kubernetes client error = %v", err)
|
||||
}
|
||||
|
||||
config, envVarPostfix, oldSHAData := getConfig(r)
|
||||
|
||||
if config.SHAValue != oldSHAData {
|
||||
err = PerformRollingUpgrade(client, config, envVarPostfix, upgradeFuncs)
|
||||
if err != nil {
|
||||
logrus.Fatalf("Rolling upgrade failed with error = %v", err)
|
||||
}
|
||||
} else {
|
||||
logrus.Infof("Rolling upgrade will not happend because no actual change in data has been detected")
|
||||
}
|
||||
}
|
||||
|
||||
func getConfig(r ResourceUpdatedHandler) (util.Config, string, string) {
|
||||
var oldSHAData, envVarPostfix string
|
||||
var config util.Config
|
||||
if _, ok := r.Resource.(*v1.ConfigMap); ok {
|
||||
logrus.Infof("Performing 'Updated' action for resource of type 'configmap'")
|
||||
oldSHAData = getSHAfromConfigmap(r.OldResource.(*v1.ConfigMap).Data)
|
||||
config = getConfigmapConfig(r)
|
||||
envVarPostfix = constants.ConfigmapEnvVarPostfix
|
||||
} else if _, ok := r.Resource.(*v1.Secret); ok {
|
||||
logrus.Infof("Performing 'Updated' action for resource of type 'secret'")
|
||||
oldSHAData = getSHAfromSecret(r.OldResource.(*v1.Secret).Data)
|
||||
config = getSecretConfig(r)
|
||||
envVarPostfix = constants.SecretEnvVarPostfix
|
||||
} else {
|
||||
logrus.Warnf("Invalid resource: Resource should be 'Secret' or 'Configmap' but found, %v", r.Resource)
|
||||
}
|
||||
return config, envVarPostfix, oldSHAData
|
||||
}
|
||||
|
||||
func getConfigmapConfig(r ResourceUpdatedHandler) util.Config {
|
||||
configmap := r.Resource.(*v1.ConfigMap)
|
||||
return util.Config{
|
||||
Namespace: configmap.Namespace,
|
||||
ResourceName: configmap.Name,
|
||||
Annotation: constants.ConfigmapUpdateOnChangeAnnotation,
|
||||
SHAValue: getSHAfromConfigmap(configmap.Data),
|
||||
}
|
||||
}
|
||||
|
||||
func getSecretConfig(r ResourceUpdatedHandler) util.Config {
|
||||
secret := r.Resource.(*v1.Secret)
|
||||
return util.Config{
|
||||
Namespace: secret.Namespace,
|
||||
ResourceName: secret.Name,
|
||||
Annotation: constants.SecretUpdateOnChangeAnnotation,
|
||||
SHAValue: getSHAfromSecret(secret.Data),
|
||||
}
|
||||
}
|
||||
|
||||
// PerformRollingUpgrade upgrades the deployment if there is any change in configmap or secret data
|
||||
func PerformRollingUpgrade(client kubernetes.Interface, config util.Config, envarPostfix string, upgradeFuncs callbacks.RollingUpgradeFuncs) error {
|
||||
items := upgradeFuncs.ItemsFunc(client, config.Namespace)
|
||||
var err error
|
||||
for _, i := range items {
|
||||
containers := upgradeFuncs.ContainersFunc(i)
|
||||
// find correct annotation and update the resource
|
||||
annotationValue := util.ToObjectMeta(i).Annotations[config.Annotation]
|
||||
if annotationValue != "" {
|
||||
values := strings.Split(annotationValue, ",")
|
||||
for _, value := range values {
|
||||
if value == config.ResourceName {
|
||||
updated := updateContainers(containers, value, config.SHAValue, envarPostfix)
|
||||
if !updated {
|
||||
logrus.Warnf("Rolling upgrade did not happen")
|
||||
} else {
|
||||
err = upgradeFuncs.UpdateFunc(client, config.Namespace, i)
|
||||
if err != nil {
|
||||
logrus.Errorf("Update %s failed %v", upgradeFuncs.ResourceType, err)
|
||||
} else {
|
||||
logrus.Infof("Updated %s of type %s", config.ResourceName, upgradeFuncs.ResourceType)
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func updateContainers(containers []v1.Container, annotationValue string, shaData string, envarPostfix string) bool {
|
||||
updated := false
|
||||
envar := constants.EnvVarPrefix + util.ConvertToEnvVarName(annotationValue) + envarPostfix
|
||||
logrus.Infof("Generated environment variable: %s", envar)
|
||||
for i := range containers {
|
||||
envs := containers[i].Env
|
||||
|
||||
//update if env var exists
|
||||
updated = updateEnvVar(envs, envar, shaData)
|
||||
|
||||
// if no existing env var exists lets create one
|
||||
if !updated {
|
||||
e := v1.EnvVar{
|
||||
Name: envar,
|
||||
Value: shaData,
|
||||
}
|
||||
containers[i].Env = append(containers[i].Env, e)
|
||||
updated = true
|
||||
logrus.Infof("%s environment variable does not exist, creating a new envVar", envar)
|
||||
}
|
||||
}
|
||||
return updated
|
||||
}
|
||||
|
||||
func updateEnvVar(envs []v1.EnvVar, envar string, shaData string) bool {
|
||||
for j := range envs {
|
||||
if envs[j].Name == envar {
|
||||
logrus.Infof("%s environment variable found", envar)
|
||||
if envs[j].Value != shaData {
|
||||
logrus.Infof("Updating %s", envar)
|
||||
envs[j].Value = shaData
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func getSHAfromConfigmap(data map[string]string) string {
|
||||
values := []string{}
|
||||
for k, v := range data {
|
||||
values = append(values, k+"="+v)
|
||||
}
|
||||
sort.Strings(values)
|
||||
return crypto.GenerateSHA(strings.Join(values, ";"))
|
||||
}
|
||||
|
||||
func getSHAfromSecret(data map[string][]byte) string {
|
||||
values := []string{}
|
||||
for k, v := range data {
|
||||
values = append(values, k+"="+string(v[:]))
|
||||
}
|
||||
sort.Strings(values)
|
||||
return crypto.GenerateSHA(strings.Join(values, ";"))
|
||||
}
|
||||
315
internal/pkg/handler/update_test.go
Normal file
315
internal/pkg/handler/update_test.go
Normal file
@@ -0,0 +1,315 @@
|
||||
package handler
|
||||
|
||||
import (
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/stakater/Reloader/internal/pkg/callbacks"
|
||||
"github.com/stakater/Reloader/internal/pkg/constants"
|
||||
"github.com/stakater/Reloader/internal/pkg/testutil"
|
||||
"github.com/stakater/Reloader/internal/pkg/util"
|
||||
testclient "k8s.io/client-go/kubernetes/fake"
|
||||
)
|
||||
|
||||
var (
|
||||
client = testclient.NewSimpleClientset()
|
||||
namespace = "test-handler-" + testutil.RandSeq(5)
|
||||
configmapName = "testconfigmap-handler-" + testutil.RandSeq(5)
|
||||
secretName = "testsecret-handler-" + testutil.RandSeq(5)
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
|
||||
logrus.Infof("Creating namespace %s", namespace)
|
||||
testutil.CreateNamespace(namespace, client)
|
||||
|
||||
logrus.Infof("Setting up the test resources")
|
||||
setup()
|
||||
|
||||
logrus.Infof("Running Testcases")
|
||||
retCode := m.Run()
|
||||
|
||||
logrus.Infof("tearing down the test resources")
|
||||
teardown()
|
||||
|
||||
os.Exit(retCode)
|
||||
}
|
||||
|
||||
func setup() {
|
||||
// Creating configmap
|
||||
_, err := testutil.CreateConfigMap(client, namespace, configmapName, "www.google.com")
|
||||
if err != nil {
|
||||
logrus.Errorf("Error in configmap creation: %v", err)
|
||||
}
|
||||
|
||||
// Creating secret
|
||||
data := "dGVzdFNlY3JldEVuY29kaW5nRm9yUmVsb2FkZXI="
|
||||
_, err = testutil.CreateSecret(client, namespace, secretName, data)
|
||||
if err != nil {
|
||||
logrus.Errorf("Error in secret creation: %v", err)
|
||||
}
|
||||
|
||||
// Creating Deployment with configmap
|
||||
_, err = testutil.CreateDeployment(client, configmapName, namespace)
|
||||
if err != nil {
|
||||
logrus.Errorf("Error in Deployment with configmap creation: %v", err)
|
||||
}
|
||||
|
||||
// Creating Deployment with secret
|
||||
_, err = testutil.CreateDeployment(client, secretName, namespace)
|
||||
if err != nil {
|
||||
logrus.Errorf("Error in Deployment with secret creation: %v", err)
|
||||
}
|
||||
|
||||
// Creating DaemonSet with configmap
|
||||
_, err = testutil.CreateDaemonSet(client, configmapName, namespace)
|
||||
if err != nil {
|
||||
logrus.Errorf("Error in DaemonSet with configmap creation: %v", err)
|
||||
}
|
||||
|
||||
// Creating DaemonSet with secret
|
||||
_, err = testutil.CreateDaemonSet(client, secretName, namespace)
|
||||
if err != nil {
|
||||
logrus.Errorf("Error in DaemonSet with secret creation: %v", err)
|
||||
}
|
||||
|
||||
// Creating StatefulSet with configmap
|
||||
_, err = testutil.CreateStatefulSet(client, configmapName, namespace)
|
||||
if err != nil {
|
||||
logrus.Errorf("Error in StatefulSet with configmap creation: %v", err)
|
||||
}
|
||||
|
||||
// Creating StatefulSet with secret
|
||||
_, err = testutil.CreateStatefulSet(client, secretName, namespace)
|
||||
if err != nil {
|
||||
logrus.Errorf("Error in StatefulSet with secret creation: %v", err)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func teardown() {
|
||||
// Deleting Deployment with configmap
|
||||
deploymentError := testutil.DeleteDeployment(client, namespace, configmapName)
|
||||
if deploymentError != nil {
|
||||
logrus.Errorf("Error while deleting deployment with configmap %v", deploymentError)
|
||||
}
|
||||
|
||||
// Deleting Deployment with secret
|
||||
deploymentError = testutil.DeleteDeployment(client, namespace, secretName)
|
||||
if deploymentError != nil {
|
||||
logrus.Errorf("Error while deleting deployment with secret %v", deploymentError)
|
||||
}
|
||||
|
||||
// Deleting DaemonSet with configmap
|
||||
daemonSetError := testutil.DeleteDaemonSet(client, namespace, configmapName)
|
||||
if daemonSetError != nil {
|
||||
logrus.Errorf("Error while deleting daemonSet with configmap %v", daemonSetError)
|
||||
}
|
||||
|
||||
// Deleting Deployment with secret
|
||||
daemonSetError = testutil.DeleteDaemonSet(client, namespace, secretName)
|
||||
if daemonSetError != nil {
|
||||
logrus.Errorf("Error while deleting daemonSet with secret %v", daemonSetError)
|
||||
}
|
||||
|
||||
// Deleting StatefulSet with configmap
|
||||
statefulSetError := testutil.DeleteStatefulSet(client, namespace, configmapName)
|
||||
if statefulSetError != nil {
|
||||
logrus.Errorf("Error while deleting statefulSet with configmap %v", statefulSetError)
|
||||
}
|
||||
|
||||
// Deleting Deployment with secret
|
||||
statefulSetError = testutil.DeleteStatefulSet(client, namespace, secretName)
|
||||
if statefulSetError != nil {
|
||||
logrus.Errorf("Error while deleting statefulSet with secret %v", statefulSetError)
|
||||
}
|
||||
|
||||
// Deleting Configmap
|
||||
err := testutil.DeleteConfigMap(client, namespace, configmapName)
|
||||
if err != nil {
|
||||
logrus.Errorf("Error while deleting the configmap %v", err)
|
||||
}
|
||||
|
||||
// Deleting Secret
|
||||
err = testutil.DeleteSecret(client, namespace, secretName)
|
||||
if err != nil {
|
||||
logrus.Errorf("Error while deleting the secret %v", err)
|
||||
}
|
||||
|
||||
// Deleting namespace
|
||||
testutil.DeleteNamespace(namespace, client)
|
||||
|
||||
}
|
||||
|
||||
func TestRollingUpgradeForDeploymentWithConfigmap(t *testing.T) {
|
||||
shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, namespace, configmapName, "www.stakater.com")
|
||||
config := util.Config{
|
||||
Namespace: namespace,
|
||||
ResourceName: configmapName,
|
||||
SHAValue: shaData,
|
||||
Annotation: constants.ConfigmapUpdateOnChangeAnnotation,
|
||||
}
|
||||
deploymentFuncs := callbacks.RollingUpgradeFuncs{
|
||||
ItemsFunc: callbacks.GetDeploymentItems,
|
||||
ContainersFunc: callbacks.GetDeploymentContainers,
|
||||
UpdateFunc: callbacks.UpdateDeployment,
|
||||
ResourceType: "Deployment",
|
||||
}
|
||||
|
||||
err := PerformRollingUpgrade(client, config, constants.ConfigmapEnvVarPostfix, deploymentFuncs)
|
||||
time.Sleep(5 * time.Second)
|
||||
if err != nil {
|
||||
t.Errorf("Rolling upgrade failed for Deployment with Configmap")
|
||||
}
|
||||
|
||||
logrus.Infof("Verifying deployment update")
|
||||
updated := testutil.VerifyResourceUpdate(client, config, constants.ConfigmapEnvVarPostfix, deploymentFuncs)
|
||||
if !updated {
|
||||
t.Errorf("Deployment was not updated")
|
||||
}
|
||||
}
|
||||
|
||||
func TestRollingUpgradeForDeploymentWithSecret(t *testing.T) {
|
||||
shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, namespace, secretName, "dGVzdFVwZGF0ZWRTZWNyZXRFbmNvZGluZ0ZvclJlbG9hZGVy")
|
||||
config := util.Config{
|
||||
Namespace: namespace,
|
||||
ResourceName: secretName,
|
||||
SHAValue: shaData,
|
||||
Annotation: constants.SecretUpdateOnChangeAnnotation,
|
||||
}
|
||||
deploymentFuncs := callbacks.RollingUpgradeFuncs{
|
||||
ItemsFunc: callbacks.GetDeploymentItems,
|
||||
ContainersFunc: callbacks.GetDeploymentContainers,
|
||||
UpdateFunc: callbacks.UpdateDeployment,
|
||||
ResourceType: "Deployment",
|
||||
}
|
||||
|
||||
err := PerformRollingUpgrade(client, config, constants.SecretEnvVarPostfix, deploymentFuncs)
|
||||
time.Sleep(5 * time.Second)
|
||||
if err != nil {
|
||||
t.Errorf("Rolling upgrade failed for Deployment with Secret")
|
||||
}
|
||||
|
||||
logrus.Infof("Verifying deployment update")
|
||||
updated := testutil.VerifyResourceUpdate(client, config, constants.SecretEnvVarPostfix, deploymentFuncs)
|
||||
if !updated {
|
||||
t.Errorf("Deployment was not updated")
|
||||
}
|
||||
}
|
||||
|
||||
func TestRollingUpgradeForDaemonSetWithConfigmap(t *testing.T) {
|
||||
shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapName, "www.facebook.com")
|
||||
config := util.Config{
|
||||
Namespace: namespace,
|
||||
ResourceName: configmapName,
|
||||
SHAValue: shaData,
|
||||
Annotation: constants.ConfigmapUpdateOnChangeAnnotation,
|
||||
}
|
||||
daemonSetFuncs := callbacks.RollingUpgradeFuncs{
|
||||
ItemsFunc: callbacks.GetDaemonSetItems,
|
||||
ContainersFunc: callbacks.GetDaemonSetContainers,
|
||||
UpdateFunc: callbacks.UpdateDaemonSet,
|
||||
ResourceType: "DaemonSet",
|
||||
}
|
||||
|
||||
err := PerformRollingUpgrade(client, config, constants.ConfigmapEnvVarPostfix, daemonSetFuncs)
|
||||
time.Sleep(5 * time.Second)
|
||||
if err != nil {
|
||||
t.Errorf("Rolling upgrade failed for DaemonSet with configmap")
|
||||
}
|
||||
|
||||
logrus.Infof("Verifying daemonSet update")
|
||||
updated := testutil.VerifyResourceUpdate(client, config, constants.ConfigmapEnvVarPostfix, daemonSetFuncs)
|
||||
if !updated {
|
||||
t.Errorf("DaemonSet was not updated")
|
||||
}
|
||||
}
|
||||
|
||||
func TestRollingUpgradeForDaemonSetWithSecret(t *testing.T) {
|
||||
shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, namespace, secretName, "d3d3LmZhY2Vib29rLmNvbQ==")
|
||||
|
||||
config := util.Config{
|
||||
Namespace: namespace,
|
||||
ResourceName: secretName,
|
||||
SHAValue: shaData,
|
||||
Annotation: constants.SecretUpdateOnChangeAnnotation,
|
||||
}
|
||||
daemonSetFuncs := callbacks.RollingUpgradeFuncs{
|
||||
ItemsFunc: callbacks.GetDaemonSetItems,
|
||||
ContainersFunc: callbacks.GetDaemonSetContainers,
|
||||
UpdateFunc: callbacks.UpdateDaemonSet,
|
||||
ResourceType: "DaemonSet",
|
||||
}
|
||||
|
||||
err := PerformRollingUpgrade(client, config, constants.SecretEnvVarPostfix, daemonSetFuncs)
|
||||
time.Sleep(5 * time.Second)
|
||||
if err != nil {
|
||||
t.Errorf("Rolling upgrade failed for DaemonSet with secret")
|
||||
}
|
||||
|
||||
logrus.Infof("Verifying daemonSet update")
|
||||
updated := testutil.VerifyResourceUpdate(client, config, constants.SecretEnvVarPostfix, daemonSetFuncs)
|
||||
if !updated {
|
||||
t.Errorf("DaemonSet was not updated")
|
||||
}
|
||||
}
|
||||
|
||||
func TestRollingUpgradeForStatefulSetWithConfigmap(t *testing.T) {
|
||||
shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapName, "www.twitter.com")
|
||||
|
||||
config := util.Config{
|
||||
Namespace: namespace,
|
||||
ResourceName: configmapName,
|
||||
SHAValue: shaData,
|
||||
Annotation: constants.ConfigmapUpdateOnChangeAnnotation,
|
||||
}
|
||||
statefulSetFuncs := callbacks.RollingUpgradeFuncs{
|
||||
ItemsFunc: callbacks.GetStatefulSetItems,
|
||||
ContainersFunc: callbacks.GetStatefulsetContainers,
|
||||
UpdateFunc: callbacks.UpdateStatefulset,
|
||||
ResourceType: "StatefulSet",
|
||||
}
|
||||
|
||||
err := PerformRollingUpgrade(client, config, constants.ConfigmapEnvVarPostfix, statefulSetFuncs)
|
||||
time.Sleep(5 * time.Second)
|
||||
if err != nil {
|
||||
t.Errorf("Rolling upgrade failed for StatefulSet with configmap")
|
||||
}
|
||||
|
||||
logrus.Infof("Verifying statefulSet update")
|
||||
updated := testutil.VerifyResourceUpdate(client, config, constants.ConfigmapEnvVarPostfix, statefulSetFuncs)
|
||||
if !updated {
|
||||
t.Errorf("StatefulSet was not updated")
|
||||
}
|
||||
}
|
||||
|
||||
func TestRollingUpgradeForStatefulSetWithSecret(t *testing.T) {
|
||||
shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, namespace, secretName, "d3d3LnR3aXR0ZXIuY29t")
|
||||
|
||||
config := util.Config{
|
||||
Namespace: namespace,
|
||||
ResourceName: secretName,
|
||||
SHAValue: shaData,
|
||||
Annotation: constants.SecretUpdateOnChangeAnnotation,
|
||||
}
|
||||
statefulSetFuncs := callbacks.RollingUpgradeFuncs{
|
||||
ItemsFunc: callbacks.GetStatefulSetItems,
|
||||
ContainersFunc: callbacks.GetStatefulsetContainers,
|
||||
UpdateFunc: callbacks.UpdateStatefulset,
|
||||
ResourceType: "StatefulSet",
|
||||
}
|
||||
|
||||
err := PerformRollingUpgrade(client, config, constants.SecretEnvVarPostfix, statefulSetFuncs)
|
||||
time.Sleep(5 * time.Second)
|
||||
if err != nil {
|
||||
t.Errorf("Rolling upgrade failed for StatefulSet with secret")
|
||||
}
|
||||
|
||||
logrus.Infof("Verifying statefulSet update")
|
||||
updated := testutil.VerifyResourceUpdate(client, config, constants.SecretEnvVarPostfix, statefulSetFuncs)
|
||||
if !updated {
|
||||
t.Errorf("StatefulSet was not updated")
|
||||
}
|
||||
}
|
||||
@@ -1,30 +0,0 @@
|
||||
package handler
|
||||
|
||||
import (
|
||||
"github.com/sirupsen/logrus"
|
||||
"k8s.io/api/core/v1"
|
||||
)
|
||||
|
||||
// ResourceUpdatedHandler contains updated objects
|
||||
type ResourceUpdatedHandler struct {
|
||||
Resource interface{}
|
||||
OldResource interface{}
|
||||
}
|
||||
|
||||
// Handle processes the updated resource
|
||||
func (r ResourceUpdatedHandler) Handle() error {
|
||||
if r.Resource == nil || r.OldResource == nil {
|
||||
logrus.Errorf("Error in Handler")
|
||||
} else {
|
||||
logrus.Infof("Detected changes in object %s", r.Resource)
|
||||
// process resource based on its type
|
||||
if _, ok := r.Resource.(*v1.ConfigMap); ok {
|
||||
logrus.Infof("Performing 'Updated' action for resource of type 'configmap'")
|
||||
} else if _, ok := r.Resource.(*v1.Secret); ok {
|
||||
logrus.Infof("Performing 'Updated' action for resource of type 'secret'")
|
||||
} else {
|
||||
logrus.Warnf("Invalid resource: Resource should be 'Secret' or 'Configmap' but found %v", r.Resource)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
404
internal/pkg/testutil/kube.go
Normal file
404
internal/pkg/testutil/kube.go
Normal file
@@ -0,0 +1,404 @@
|
||||
package testutil
|
||||
|
||||
import (
|
||||
"math/rand"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/stakater/Reloader/internal/pkg/callbacks"
|
||||
"github.com/stakater/Reloader/internal/pkg/constants"
|
||||
"github.com/stakater/Reloader/internal/pkg/crypto"
|
||||
"github.com/stakater/Reloader/internal/pkg/util"
|
||||
"github.com/stakater/Reloader/pkg/kube"
|
||||
v1_beta1 "k8s.io/api/apps/v1beta1"
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/api/extensions/v1beta1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
core_v1 "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
)
|
||||
|
||||
var (
|
||||
letters = []rune("abcdefghijklmnopqrstuvwxyz")
|
||||
// ConfigmapResourceType is a resource type which controller watches for changes
|
||||
ConfigmapResourceType = "configMaps"
|
||||
// SecretResourceType is a resource type which controller watches for changes
|
||||
SecretResourceType = "secrets"
|
||||
)
|
||||
|
||||
func GetClient() *kubernetes.Clientset {
|
||||
newClient, err := kube.GetClient()
|
||||
if err != nil {
|
||||
logrus.Fatalf("Unable to create Kubernetes client error = %v", err)
|
||||
}
|
||||
return newClient
|
||||
}
|
||||
|
||||
// CreateNamespace creates namespace for testing
|
||||
func CreateNamespace(namespace string, client kubernetes.Interface) {
|
||||
_, err := client.CoreV1().Namespaces().Create(&v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}})
|
||||
if err != nil {
|
||||
logrus.Fatalf("Failed to create namespace for testing", err)
|
||||
} else {
|
||||
logrus.Infof("Creating namespace for testing = %s", namespace)
|
||||
}
|
||||
}
|
||||
|
||||
// DeleteNamespace deletes namespace for testing
|
||||
func DeleteNamespace(namespace string, client kubernetes.Interface) {
|
||||
err := client.CoreV1().Namespaces().Delete(namespace, &metav1.DeleteOptions{})
|
||||
if err != nil {
|
||||
logrus.Fatalf("Failed to delete namespace that was created for testing", err)
|
||||
} else {
|
||||
logrus.Infof("Deleting namespace for testing = %s", namespace)
|
||||
}
|
||||
}
|
||||
|
||||
// GetDeployment provides deployment for testing
|
||||
func GetDeployment(namespace string, deploymentName string) *v1beta1.Deployment {
|
||||
replicaset := int32(1)
|
||||
return &v1beta1.Deployment{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: deploymentName,
|
||||
Namespace: namespace,
|
||||
Labels: map[string]string{"firstLabel": "temp"},
|
||||
Annotations: map[string]string{
|
||||
constants.ConfigmapUpdateOnChangeAnnotation: deploymentName,
|
||||
constants.SecretUpdateOnChangeAnnotation: deploymentName},
|
||||
},
|
||||
Spec: v1beta1.DeploymentSpec{
|
||||
Replicas: &replicaset,
|
||||
Strategy: v1beta1.DeploymentStrategy{
|
||||
Type: v1beta1.RollingUpdateDeploymentStrategyType,
|
||||
},
|
||||
Template: v1.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: map[string]string{"secondLabel": "temp"},
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Image: "tutum/hello-world",
|
||||
Name: deploymentName,
|
||||
Env: []v1.EnvVar{
|
||||
{
|
||||
Name: "BUCKET_NAME",
|
||||
Value: "test",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// GetDaemonSet provides daemonset for testing
|
||||
func GetDaemonSet(namespace string, daemonsetName string) *v1beta1.DaemonSet {
|
||||
return &v1beta1.DaemonSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: daemonsetName,
|
||||
Namespace: namespace,
|
||||
Labels: map[string]string{"firstLabel": "temp"},
|
||||
Annotations: map[string]string{
|
||||
constants.ConfigmapUpdateOnChangeAnnotation: daemonsetName,
|
||||
constants.SecretUpdateOnChangeAnnotation: daemonsetName},
|
||||
},
|
||||
Spec: v1beta1.DaemonSetSpec{
|
||||
UpdateStrategy: v1beta1.DaemonSetUpdateStrategy{
|
||||
Type: v1beta1.RollingUpdateDaemonSetStrategyType,
|
||||
},
|
||||
Template: v1.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: map[string]string{"secondLabel": "temp"},
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Image: "tutum/hello-world",
|
||||
Name: daemonsetName,
|
||||
Env: []v1.EnvVar{
|
||||
{
|
||||
Name: "BUCKET_NAME",
|
||||
Value: "test",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// GetStatefulSet provides statefulset for testing
|
||||
func GetStatefulSet(namespace string, statefulsetName string) *v1_beta1.StatefulSet {
|
||||
return &v1_beta1.StatefulSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: statefulsetName,
|
||||
Namespace: namespace,
|
||||
Labels: map[string]string{"firstLabel": "temp"},
|
||||
Annotations: map[string]string{
|
||||
constants.ConfigmapUpdateOnChangeAnnotation: statefulsetName,
|
||||
constants.SecretUpdateOnChangeAnnotation: statefulsetName},
|
||||
},
|
||||
Spec: v1_beta1.StatefulSetSpec{
|
||||
UpdateStrategy: v1_beta1.StatefulSetUpdateStrategy{
|
||||
Type: v1_beta1.RollingUpdateStatefulSetStrategyType,
|
||||
},
|
||||
Template: v1.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: map[string]string{"secondLabel": "temp"},
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Image: "tutum/hello-world",
|
||||
Name: statefulsetName,
|
||||
Env: []v1.EnvVar{
|
||||
{
|
||||
Name: "BUCKET_NAME",
|
||||
Value: "test",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// GetConfigmap provides configmap for testing
|
||||
func GetConfigmap(namespace string, configmapName string, testData string) *v1.ConfigMap {
|
||||
return &v1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: configmapName,
|
||||
Namespace: namespace,
|
||||
Labels: map[string]string{"firstLabel": "temp"},
|
||||
},
|
||||
Data: map[string]string{"test.url": testData},
|
||||
}
|
||||
}
|
||||
|
||||
// GetConfigmapWithUpdatedLabel provides configmap for testing
|
||||
func GetConfigmapWithUpdatedLabel(namespace string, configmapName string, testLabel string, testData string) *v1.ConfigMap {
|
||||
return &v1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: configmapName,
|
||||
Namespace: namespace,
|
||||
Labels: map[string]string{"firstLabel": testLabel},
|
||||
},
|
||||
Data: map[string]string{"test.url": testData},
|
||||
}
|
||||
}
|
||||
|
||||
// GetSecret provides secret for testing
|
||||
func GetSecret(namespace string, secretName string, data string) *v1.Secret {
|
||||
return &v1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: secretName,
|
||||
Namespace: namespace,
|
||||
Labels: map[string]string{"firstLabel": "temp"},
|
||||
},
|
||||
Data: map[string][]byte{"test.url": []byte(data)},
|
||||
}
|
||||
}
|
||||
|
||||
// GetSecretWithUpdatedLabel provides secret for testing
|
||||
func GetSecretWithUpdatedLabel(namespace string, secretName string, label string, data string) *v1.Secret {
|
||||
return &v1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: secretName,
|
||||
Namespace: namespace,
|
||||
Labels: map[string]string{"firstLabel": label},
|
||||
},
|
||||
Data: map[string][]byte{"test.url": []byte(data)},
|
||||
}
|
||||
}
|
||||
|
||||
// GetResourceSHA returns the SHA value of given environment variable
|
||||
func GetResourceSHA(containers []v1.Container, envar string) string {
|
||||
for i := range containers {
|
||||
envs := containers[i].Env
|
||||
for j := range envs {
|
||||
if envs[j].Name == envar {
|
||||
return envs[j].Value
|
||||
}
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
//ConvertResourceToSHA generates SHA from secret or configmap data
|
||||
func ConvertResourceToSHA(resourceType string, namespace string, resourceName string, data string) string {
|
||||
values := []string{}
|
||||
logrus.Infof("Generating SHA for secret data")
|
||||
if resourceType == SecretResourceType {
|
||||
secret := GetSecret(namespace, resourceName, data)
|
||||
for k, v := range secret.Data {
|
||||
values = append(values, k+"="+string(v[:]))
|
||||
}
|
||||
} else if resourceType == ConfigmapResourceType {
|
||||
configmap := GetConfigmap(namespace, resourceName, data)
|
||||
for k, v := range configmap.Data {
|
||||
values = append(values, k+"="+v)
|
||||
}
|
||||
}
|
||||
sort.Strings(values)
|
||||
return crypto.GenerateSHA(strings.Join(values, ";"))
|
||||
}
|
||||
|
||||
// CreateConfigMap creates a configmap in given namespace and returns the ConfigMapInterface
|
||||
func CreateConfigMap(client kubernetes.Interface, namespace string, configmapName string, data string) (core_v1.ConfigMapInterface, error) {
|
||||
logrus.Infof("Creating configmap")
|
||||
configmapClient := client.CoreV1().ConfigMaps(namespace)
|
||||
_, err := configmapClient.Create(GetConfigmap(namespace, configmapName, data))
|
||||
time.Sleep(10 * time.Second)
|
||||
return configmapClient, err
|
||||
}
|
||||
|
||||
// CreateSecret creates a secret in given namespace and returns the SecretInterface
|
||||
func CreateSecret(client kubernetes.Interface, namespace string, secretName string, data string) (core_v1.SecretInterface, error) {
|
||||
logrus.Infof("Creating secret")
|
||||
secretClient := client.CoreV1().Secrets(namespace)
|
||||
_, err := secretClient.Create(GetSecret(namespace, secretName, data))
|
||||
time.Sleep(10 * time.Second)
|
||||
return secretClient, err
|
||||
}
|
||||
|
||||
// CreateDeployment creates a deployment in given namespace and returns the Deployment
|
||||
func CreateDeployment(client kubernetes.Interface, deploymentName string, namespace string) (*v1beta1.Deployment, error) {
|
||||
logrus.Infof("Creating Deployment")
|
||||
deploymentClient := client.ExtensionsV1beta1().Deployments(namespace)
|
||||
deployment, err := deploymentClient.Create(GetDeployment(namespace, deploymentName))
|
||||
time.Sleep(10 * time.Second)
|
||||
return deployment, err
|
||||
}
|
||||
|
||||
// CreateDaemonSet creates a deployment in given namespace and returns the DaemonSet
|
||||
func CreateDaemonSet(client kubernetes.Interface, daemonsetName string, namespace string) (*v1beta1.DaemonSet, error) {
|
||||
logrus.Infof("Creating DaemonSet")
|
||||
daemonsetClient := client.ExtensionsV1beta1().DaemonSets(namespace)
|
||||
daemonset, err := daemonsetClient.Create(GetDaemonSet(namespace, daemonsetName))
|
||||
time.Sleep(10 * time.Second)
|
||||
return daemonset, err
|
||||
}
|
||||
|
||||
// CreateStatefulSet creates a deployment in given namespace and returns the StatefulSet
|
||||
func CreateStatefulSet(client kubernetes.Interface, statefulsetName string, namespace string) (*v1_beta1.StatefulSet, error) {
|
||||
logrus.Infof("Creating StatefulSet")
|
||||
statefulsetClient := client.AppsV1beta1().StatefulSets(namespace)
|
||||
statefulset, err := statefulsetClient.Create(GetStatefulSet(namespace, statefulsetName))
|
||||
time.Sleep(10 * time.Second)
|
||||
return statefulset, err
|
||||
}
|
||||
|
||||
// DeleteDeployment creates a deployment in given namespace and returns the error if any
|
||||
func DeleteDeployment(client kubernetes.Interface, namespace string, deploymentName string) error {
|
||||
logrus.Infof("Deleting Deployment")
|
||||
deploymentError := client.ExtensionsV1beta1().Deployments(namespace).Delete(deploymentName, &metav1.DeleteOptions{})
|
||||
time.Sleep(10 * time.Second)
|
||||
return deploymentError
|
||||
}
|
||||
|
||||
// DeleteDaemonSet creates a daemonset in given namespace and returns the error if any
|
||||
func DeleteDaemonSet(client kubernetes.Interface, namespace string, daemonsetName string) error {
|
||||
logrus.Infof("Deleting DaemonSet %s", daemonsetName)
|
||||
daemonsetError := client.ExtensionsV1beta1().DaemonSets(namespace).Delete(daemonsetName, &metav1.DeleteOptions{})
|
||||
time.Sleep(10 * time.Second)
|
||||
return daemonsetError
|
||||
}
|
||||
|
||||
// DeleteStatefulSet creates a statefulset in given namespace and returns the error if any
|
||||
func DeleteStatefulSet(client kubernetes.Interface, namespace string, statefulsetName string) error {
|
||||
logrus.Infof("Deleting StatefulSet %s", statefulsetName)
|
||||
statefulsetError := client.AppsV1beta1().StatefulSets(namespace).Delete(statefulsetName, &metav1.DeleteOptions{})
|
||||
time.Sleep(10 * time.Second)
|
||||
return statefulsetError
|
||||
}
|
||||
|
||||
// UpdateConfigMap updates a configmap in given namespace and returns the error if any
|
||||
func UpdateConfigMap(configmapClient core_v1.ConfigMapInterface, namespace string, configmapName string, label string, data string) error {
|
||||
logrus.Infof("Updating configmap %q.\n", configmapName)
|
||||
var configmap *v1.ConfigMap
|
||||
if label != "" {
|
||||
configmap = GetConfigmapWithUpdatedLabel(namespace, configmapName, label, data)
|
||||
} else {
|
||||
configmap = GetConfigmap(namespace, configmapName, data)
|
||||
}
|
||||
_, updateErr := configmapClient.Update(configmap)
|
||||
time.Sleep(10 * time.Second)
|
||||
return updateErr
|
||||
}
|
||||
|
||||
// UpdateSecret updates a secret in given namespace and returns the error if any
|
||||
func UpdateSecret(secretClient core_v1.SecretInterface, namespace string, secretName string, label string, data string) error {
|
||||
logrus.Infof("Updating secret %q.\n", secretName)
|
||||
var secret *v1.Secret
|
||||
if label != "" {
|
||||
secret = GetSecretWithUpdatedLabel(namespace, secretName, label, data)
|
||||
} else {
|
||||
secret = GetSecret(namespace, secretName, data)
|
||||
}
|
||||
_, updateErr := secretClient.Update(secret)
|
||||
time.Sleep(10 * time.Second)
|
||||
return updateErr
|
||||
}
|
||||
|
||||
// DeleteConfigMap deletes a configmap in given namespace and returns the error if any
|
||||
func DeleteConfigMap(client kubernetes.Interface, namespace string, configmapName string) error {
|
||||
logrus.Infof("Deleting configmap %q.\n", configmapName)
|
||||
err := client.CoreV1().ConfigMaps(namespace).Delete(configmapName, &metav1.DeleteOptions{})
|
||||
time.Sleep(10 * time.Second)
|
||||
return err
|
||||
}
|
||||
|
||||
// DeleteSecret deletes a secret in given namespace and returns the error if any
|
||||
func DeleteSecret(client kubernetes.Interface, namespace string, secretName string) error {
|
||||
logrus.Infof("Deleting secret %q.\n", secretName)
|
||||
err := client.CoreV1().Secrets(namespace).Delete(secretName, &metav1.DeleteOptions{})
|
||||
time.Sleep(10 * time.Second)
|
||||
return err
|
||||
}
|
||||
|
||||
// RandSeq generates a random sequence
|
||||
func RandSeq(n int) string {
|
||||
rand.Seed(time.Now().UnixNano())
|
||||
b := make([]rune, n)
|
||||
for i := range b {
|
||||
b[i] = letters[rand.Intn(len(letters))]
|
||||
}
|
||||
return string(b)
|
||||
}
|
||||
|
||||
func VerifyResourceUpdate(client kubernetes.Interface, config util.Config, envVarPostfix string, upgradeFuncs callbacks.RollingUpgradeFuncs) bool {
|
||||
items := upgradeFuncs.ItemsFunc(client, config.Namespace)
|
||||
for _, i := range items {
|
||||
containers := upgradeFuncs.ContainersFunc(i)
|
||||
// match statefulsets with the correct annotation
|
||||
annotationValue := util.ToObjectMeta(i).Annotations[config.Annotation]
|
||||
if annotationValue != "" {
|
||||
values := strings.Split(annotationValue, ",")
|
||||
matches := false
|
||||
for _, value := range values {
|
||||
if value == config.ResourceName {
|
||||
matches = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if matches {
|
||||
envName := constants.EnvVarPrefix + util.ConvertToEnvVarName(annotationValue) + envVarPostfix
|
||||
updated := GetResourceSHA(containers, envName)
|
||||
|
||||
if updated == config.SHAValue {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
9
internal/pkg/util/config.go
Normal file
9
internal/pkg/util/config.go
Normal file
@@ -0,0 +1,9 @@
|
||||
package util
|
||||
|
||||
//Config contains rolling upgrade configuration parameters
|
||||
type Config struct {
|
||||
Namespace string
|
||||
ResourceName string
|
||||
Annotation string
|
||||
SHAValue string
|
||||
}
|
||||
38
internal/pkg/util/interface.go
Normal file
38
internal/pkg/util/interface.go
Normal file
@@ -0,0 +1,38 @@
|
||||
package util
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
// InterfaceSlice converts an interface to an interface array
|
||||
func InterfaceSlice(slice interface{}) []interface{} {
|
||||
s := reflect.ValueOf(slice)
|
||||
if s.Kind() != reflect.Slice {
|
||||
logrus.Errorf("InterfaceSlice() given a non-slice type")
|
||||
}
|
||||
|
||||
ret := make([]interface{}, s.Len())
|
||||
|
||||
for i := 0; i < s.Len(); i++ {
|
||||
ret[i] = s.Index(i).Interface()
|
||||
}
|
||||
|
||||
return ret
|
||||
}
|
||||
|
||||
type ObjectMeta struct {
|
||||
metav1.ObjectMeta
|
||||
}
|
||||
|
||||
func ToObjectMeta(kubernetesObject interface{}) ObjectMeta {
|
||||
objectValue := reflect.ValueOf(kubernetesObject)
|
||||
fieldName := reflect.TypeOf((*metav1.ObjectMeta)(nil)).Elem().Name()
|
||||
field := objectValue.FieldByName(fieldName).Interface().(metav1.ObjectMeta)
|
||||
|
||||
return ObjectMeta{
|
||||
ObjectMeta: field,
|
||||
}
|
||||
}
|
||||
27
internal/pkg/util/util.go
Normal file
27
internal/pkg/util/util.go
Normal file
@@ -0,0 +1,27 @@
|
||||
package util
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// ConvertToEnvVarName converts the given text into a usable env var
|
||||
// removing any special chars with '_' and transforming text to upper case
|
||||
func ConvertToEnvVarName(text string) string {
|
||||
var buffer bytes.Buffer
|
||||
upper := strings.ToUpper(text)
|
||||
lastCharValid := false
|
||||
for i := 0; i < len(upper); i++ {
|
||||
ch := upper[i]
|
||||
if (ch >= 'A' && ch <= 'Z') || (ch >= '0' && ch <= '9') {
|
||||
buffer.WriteString(string(ch))
|
||||
lastCharValid = true
|
||||
} else {
|
||||
if lastCharValid {
|
||||
buffer.WriteString("_")
|
||||
}
|
||||
lastCharValid = false
|
||||
}
|
||||
}
|
||||
return buffer.String()
|
||||
}
|
||||
13
internal/pkg/util/util_test.go
Normal file
13
internal/pkg/util/util_test.go
Normal file
@@ -0,0 +1,13 @@
|
||||
package util
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestConvertToEnvVarName(t *testing.T) {
|
||||
data := "www.stakater.com"
|
||||
envVar := ConvertToEnvVarName(data)
|
||||
if envVar != "WWW_STAKATER_COM" {
|
||||
t.Errorf("Failed to convert data into environment variable")
|
||||
}
|
||||
}
|
||||
Reference in New Issue
Block a user