mirror of
https://github.com/stakater/Reloader.git
synced 2026-02-14 18:09:50 +00:00
Compare commits
65 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
7f331907d3 | ||
|
|
29aa52a1c7 | ||
|
|
ada8dbb5f3 | ||
|
|
cfe1754c44 | ||
|
|
2cfce5144b | ||
|
|
2fe863a054 | ||
|
|
3e01091d01 | ||
|
|
7f85a8e53b | ||
|
|
c679157e24 | ||
|
|
9e7b70964e | ||
|
|
8ebbb476b2 | ||
|
|
9263b812eb | ||
|
|
f70dd52b2d | ||
|
|
e0a8f1ad04 | ||
|
|
45dac417cb | ||
|
|
1514c5bcd2 | ||
|
|
e7cfafd6d6 | ||
|
|
15d7263c95 | ||
|
|
de21a400ab | ||
|
|
e702610dc6 | ||
|
|
481eeeffc4 | ||
|
|
801e1dabed | ||
|
|
5c44c1e8f5 | ||
|
|
0ef6dcb510 | ||
|
|
0ef5e75673 | ||
|
|
e5f85ae37b | ||
|
|
5d0e9ca70b | ||
|
|
297baa08d5 | ||
|
|
dd1433a7a9 | ||
|
|
9875c416df | ||
|
|
b414e3b350 | ||
|
|
e417e8bc12 | ||
|
|
aafe3365eb | ||
|
|
694baf715c | ||
|
|
48b188d7b4 | ||
|
|
d1cb53b65a | ||
|
|
cefd633176 | ||
|
|
08e6f81a15 | ||
|
|
edbad45637 | ||
|
|
363fbd3b77 | ||
|
|
82ee3ef3d1 | ||
|
|
21502e2bb4 | ||
|
|
9d3b70d4d2 | ||
|
|
5662919f72 | ||
|
|
445d0f870e | ||
|
|
81e74fe830 | ||
|
|
50791ad51a | ||
|
|
6a65657e27 | ||
|
|
1c7190884a | ||
|
|
488eaa9bef | ||
|
|
676c3703aa | ||
|
|
deec4df125 | ||
|
|
eedc8e81d0 | ||
|
|
28456ffafe | ||
|
|
a7c3ae37aa | ||
|
|
d043bcf7be | ||
|
|
72a1c59cac | ||
|
|
6299b1d8e9 | ||
|
|
11ae057b0a | ||
|
|
d34c99baf4 | ||
|
|
b7e83b74d8 | ||
|
|
919f75bb62 | ||
|
|
16079bd1d4 | ||
|
|
401d4227d1 | ||
|
|
7f9f32ca58 |
6
.github/dependabot.yml
vendored
Normal file
6
.github/dependabot.yml
vendored
Normal file
@@ -0,0 +1,6 @@
|
||||
version: 2
|
||||
updates:
|
||||
- package-ecosystem: "gomod"
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: "weekly"
|
||||
4
.github/workflows/pull_request.yaml
vendored
4
.github/workflows/pull_request.yaml
vendored
@@ -7,7 +7,7 @@ on:
|
||||
|
||||
env:
|
||||
DOCKER_FILE_PATH: Dockerfile
|
||||
GOLANG_VERSION: 1.18.2
|
||||
GOLANG_VERSION: 1.19.5
|
||||
KUBERNETES_VERSION: "1.18.0"
|
||||
KIND_VERSION: "0.10.0"
|
||||
|
||||
@@ -24,7 +24,7 @@ jobs:
|
||||
|
||||
# Setting up helm binary
|
||||
- name: Set up Helm
|
||||
uses: azure/setup-helm@v1
|
||||
uses: azure/setup-helm@v3.4
|
||||
|
||||
- name: Set up Go
|
||||
id: go
|
||||
|
||||
6
.github/workflows/push.yaml
vendored
6
.github/workflows/push.yaml
vendored
@@ -7,7 +7,7 @@ on:
|
||||
|
||||
env:
|
||||
DOCKER_FILE_PATH: Dockerfile
|
||||
GOLANG_VERSION: 1.18.2
|
||||
GOLANG_VERSION: 1.19.5
|
||||
KUBERNETES_VERSION: "1.18.0"
|
||||
KIND_VERSION: "0.10.0"
|
||||
HELM_REGISTRY_URL: "https://stakater.github.io/stakater-charts"
|
||||
@@ -27,7 +27,7 @@ jobs:
|
||||
|
||||
# Setting up helm binary
|
||||
- name: Set up Helm
|
||||
uses: azure/setup-helm@v1
|
||||
uses: azure/setup-helm@v3.4
|
||||
|
||||
- name: Set up Go
|
||||
id: go
|
||||
@@ -132,7 +132,7 @@ jobs:
|
||||
- name: Helm Template
|
||||
run: |
|
||||
helm template reloader deployments/kubernetes/chart/reloader/ > deployments/kubernetes/reloader.yaml
|
||||
helm template reloader deployments/kubernetes/chart/reloader/ --output-dir deployments/kubernetes/manifests/ && mv deployments/kubernetes/manifests/reloader/templates/* deployments/kubernetes/manifests/ && rm -r deployments/kubernetes/manifests/reloader
|
||||
helm template reloader deployments/kubernetes/chart/reloader/ --output-dir deployments/kubernetes/manifests && mv deployments/kubernetes/manifests/reloader/templates/* deployments/kubernetes/manifests/ && rm -r deployments/kubernetes/manifests/reloader
|
||||
|
||||
# Publish helm chart
|
||||
- name: Publish Helm chart
|
||||
|
||||
2
.github/workflows/release.yaml
vendored
2
.github/workflows/release.yaml
vendored
@@ -6,7 +6,7 @@ on:
|
||||
- "v*"
|
||||
|
||||
env:
|
||||
GOLANG_VERSION: 1.18.2
|
||||
GOLANG_VERSION: 1.19.5
|
||||
|
||||
jobs:
|
||||
build:
|
||||
|
||||
@@ -2,7 +2,7 @@ ARG BUILDER_IMAGE
|
||||
ARG BASE_IMAGE
|
||||
|
||||
# Build the manager binary
|
||||
FROM --platform=${BUILDPLATFORM} ${BUILDER_IMAGE:-golang:1.18.2} as builder
|
||||
FROM --platform=${BUILDPLATFORM} ${BUILDER_IMAGE:-golang:1.19.5} as builder
|
||||
|
||||
ARG TARGETOS
|
||||
ARG TARGETARCH
|
||||
|
||||
34
README.md
34
README.md
@@ -143,6 +143,7 @@ spec:
|
||||
- you may override the configmap annotation with the `--configmap-annotation` flag
|
||||
- you may override the secret annotation with the `--secret-annotation` flag
|
||||
- you may want to prevent watching certain namespaces with the `--namespaces-to-ignore` flag
|
||||
- you may want to watch only a set of namespaces with certain labels by using the `--namespace-selector` flag
|
||||
- you may want to prevent watching certain resources with the `--resources-to-ignore` flag
|
||||
- you can configure logging in JSON format with the `--log-format=json` option
|
||||
- you can configure the "reload strategy" with the `--reload-strategy=<strategy-name>` option (details below)
|
||||
@@ -182,6 +183,25 @@ Reloader can be configured to ignore the resources `secrets` and `configmaps` by
|
||||
|
||||
`Note`: At one time only one of these resource can be ignored, trying to do it will cause error in Reloader. Workaround for ignoring both resources is by scaling down the reloader pods to `0`.
|
||||
|
||||
Reloader can be configured to watch only namespaces labeled with (one or more) labels of your choosing by using the `--namespace-selector` parameter, for example:
|
||||
```
|
||||
--namespace-selector=reloder:enabled,test:true
|
||||
```
|
||||
|
||||
Only namespaces labeled like the following namespace YAML will be watched:
|
||||
```yaml
|
||||
kind: Namespace
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
...
|
||||
labels:
|
||||
reloder: enabled
|
||||
test: true
|
||||
...
|
||||
```
|
||||
If you want to select namespace only by the key of the label use ```*``` as the value.
|
||||
For example, for ```--namespace-selector=select-this:*``` all namespaces with label-key "select-this" will be selected regardless of the labels value
|
||||
|
||||
### Vanilla kustomize
|
||||
|
||||
You can also apply the vanilla manifests by running the following command
|
||||
@@ -233,6 +253,12 @@ Reloader can be configured to ignore the resources `secrets` and `configmaps` by
|
||||
|
||||
`Note`: At one time only one of these resource can be ignored, trying to do it will cause error in helm template compilation.
|
||||
|
||||
Reloader can be configured to watch only namespaces labeled with (one or more) labels of your choosing by using the `namespaceSelector` parameter
|
||||
|
||||
| Parameter | Description | Type |
|
||||
| ---------------- | -------------------------------------------------------------- | ------- |
|
||||
| namespaceSelector | list of comma separated key:value namespace | string |
|
||||
|
||||
You can also set the log format of Reloader to json by setting `logFormat` to `json` in values.yaml and apply the chart
|
||||
|
||||
You can enable to scrape Reloader's Prometheus metrics by setting `serviceMonitor.enabled` or `podMonitor.enabled` to `true` in values.yaml file. Service monitor will be removed in future releases of reloader in favour of Pod monitor.
|
||||
@@ -245,6 +271,14 @@ You can enable to scrape Reloader's Prometheus metrics by setting `serviceMonito
|
||||
| isArgoRollouts | Enable Argo Rollouts. Valid value are either `true` or `false` | boolean |
|
||||
| reloadOnCreate | Enable reload on create events. Valid value are either `true` or `false` | boolean |
|
||||
|
||||
**ReloadOnCreate** reloadOnCreate controls how Reloader handles secrets being added to the cache for the first time. If reloadOnCreate is set to true:
|
||||
* Configmaps/secrets being added to the cache will cause Reloader to perform a rolling update of the associated workload.
|
||||
* When applications are deployed for the first time, Reloader will perform a rolling update of the associated workload.
|
||||
* If you are running Reloader in HA mode all workloads will have a rolling update performed when a new leader is elected.
|
||||
|
||||
If ReloadOnCreate is set to false:
|
||||
* Updates to configMaps/Secrets that occur while there is no leader will not be picked up by the new leader until a subsequent update of the configmap/secret occurs. In the worst case the window in which there can be no leader is 15s as this is the LeaseDuration.
|
||||
|
||||
## Help
|
||||
|
||||
### Documentation
|
||||
|
||||
@@ -3,8 +3,8 @@
|
||||
apiVersion: v1
|
||||
name: reloader
|
||||
description: Reloader chart that runs on kubernetes
|
||||
version: v0.0.122
|
||||
appVersion: v0.0.122
|
||||
version: v1.0.2
|
||||
appVersion: v1.0.2
|
||||
keywords:
|
||||
- Reloader
|
||||
- kubernetes
|
||||
@@ -16,16 +16,6 @@ maintainers:
|
||||
- name: Stakater
|
||||
email: hello@stakater.com
|
||||
- name: rasheedamir
|
||||
email: rasheed@aurorasolutions.io
|
||||
- name: waseem-h
|
||||
email: waseemhassan@stakater.com
|
||||
email: rasheed@stakater.com
|
||||
- name: faizanahmad055
|
||||
email: faizan.ahmad55@outlook.com
|
||||
- name: kahootali
|
||||
email: ali.kahoot@aurorasolutions.io
|
||||
- name: ahmadiq
|
||||
email: ahmad@aurorasolutions.io
|
||||
- name: ahsan-storm
|
||||
email: ahsanmuhammad1@outlook.com
|
||||
- name: ahmedwaleedmalik
|
||||
email: waleed@stakater.com
|
||||
|
||||
@@ -28,6 +28,23 @@ heritage: {{ .Release.Service | quote }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service | quote }}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Create pod anti affinity labels
|
||||
*/}}
|
||||
{{- define "reloader-podAntiAffinity" -}}
|
||||
podAntiAffinity:
|
||||
preferredDuringSchedulingIgnoredDuringExecution:
|
||||
- weight: 100
|
||||
podAffinityTerm:
|
||||
labelSelector:
|
||||
matchExpressions:
|
||||
- key: app
|
||||
operator: In
|
||||
values:
|
||||
- {{ template "reloader-fullname" . }}
|
||||
topologyKey: "kubernetes.io/hostname"
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Create the name of the service account to use
|
||||
*/}}
|
||||
@@ -45,4 +62,4 @@ Create the annotations to support helm3
|
||||
{{- define "reloader-helm3.annotations" -}}
|
||||
meta.helm.sh/release-namespace: {{ .Release.Namespace | quote }}
|
||||
meta.helm.sh/release-name: {{ .Release.Name | quote }}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
||||
@@ -17,7 +17,7 @@ metadata:
|
||||
{{ toYaml .Values.reloader.matchLabels | indent 4 }}
|
||||
{{- end }}
|
||||
name: {{ template "reloader-fullname" . }}-role
|
||||
namespace: {{ .Release.Namespace }}
|
||||
namespace: {{ .Values.namespace | default .Release.Namespace }}
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
@@ -32,6 +32,14 @@ rules:
|
||||
- list
|
||||
- get
|
||||
- watch
|
||||
{{- if .Values.reloader.namespaceSelector }}
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- namespaces
|
||||
verbs:
|
||||
- get
|
||||
{{- end }}
|
||||
{{- if and (.Capabilities.APIVersions.Has "apps.openshift.io/v1") (.Values.reloader.isOpenshift) }}
|
||||
- apiGroups:
|
||||
- "apps.openshift.io"
|
||||
@@ -77,6 +85,16 @@ rules:
|
||||
- get
|
||||
- update
|
||||
- patch
|
||||
{{- if .Values.reloader.enableHA }}
|
||||
- apiGroups:
|
||||
- "coordination.k8s.io"
|
||||
resources:
|
||||
- leases
|
||||
verbs:
|
||||
- create
|
||||
- get
|
||||
- update
|
||||
{{- end}}
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
|
||||
@@ -17,7 +17,7 @@ metadata:
|
||||
{{ toYaml .Values.reloader.matchLabels | indent 4 }}
|
||||
{{- end }}
|
||||
name: {{ template "reloader-fullname" . }}-role-binding
|
||||
namespace: {{ .Release.Namespace }}
|
||||
namespace: {{ .Values.namespace | default .Release.Namespace }}
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
@@ -25,5 +25,5 @@ roleRef:
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: {{ template "reloader-serviceAccountName" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
namespace: {{ .Values.namespace | default .Release.Namespace }}
|
||||
{{- end }}
|
||||
|
||||
@@ -15,9 +15,13 @@ metadata:
|
||||
{{ toYaml .Values.reloader.matchLabels | indent 4 }}
|
||||
{{- end }}
|
||||
name: {{ template "reloader-fullname" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
namespace: {{ .Values.namespace | default .Release.Namespace }}
|
||||
spec:
|
||||
{{- if not (.Values.reloader.enableHA) }}
|
||||
replicas: 1
|
||||
{{- else }}
|
||||
replicas: {{ .Values.reloader.deployment.replicas }}
|
||||
{{- end}}
|
||||
revisionHistoryLimit: 2
|
||||
selector:
|
||||
matchLabels:
|
||||
@@ -41,13 +45,20 @@ spec:
|
||||
{{ toYaml .Values.reloader.matchLabels | indent 8 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
{{- with .Values.reloader.deployment.imagePullSecrets }}
|
||||
imagePullSecrets:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- if .Values.reloader.deployment.nodeSelector }}
|
||||
nodeSelector:
|
||||
{{ toYaml .Values.reloader.deployment.nodeSelector | indent 8 }}
|
||||
{{- end }}
|
||||
{{- if .Values.reloader.deployment.affinity }}
|
||||
{{- if or (.Values.reloader.deployment.affinity) (.Values.reloader.enableHA) }}
|
||||
affinity:
|
||||
{{- if .Values.reloader.deployment.affinity }}
|
||||
{{ toYaml .Values.reloader.deployment.affinity | indent 8 }}
|
||||
{{- end}}
|
||||
{{ include "reloader-podAntiAffinity" . | indent 8 }}
|
||||
{{- end }}
|
||||
{{- if .Values.reloader.deployment.tolerations }}
|
||||
tolerations:
|
||||
@@ -60,7 +71,7 @@ spec:
|
||||
- image: "{{ .Values.reloader.deployment.image.name }}:{{ .Values.reloader.deployment.image.tag }}"
|
||||
imagePullPolicy: {{ .Values.reloader.deployment.image.pullPolicy }}
|
||||
name: {{ template "reloader-fullname" . }}
|
||||
{{- if or (.Values.reloader.deployment.env.open) (.Values.reloader.deployment.env.secret) (.Values.reloader.deployment.env.field) (eq .Values.reloader.watchGlobally false) }}
|
||||
{{- if or (.Values.reloader.deployment.env.open) (.Values.reloader.deployment.env.secret) (.Values.reloader.deployment.env.field) (.Values.reloader.deployment.env.existing) (eq .Values.reloader.watchGlobally false) (.Values.reloader.enableHA)}}
|
||||
env:
|
||||
{{- range $name, $value := .Values.reloader.deployment.env.open }}
|
||||
{{- if not (empty $value) }}
|
||||
@@ -78,6 +89,17 @@ spec:
|
||||
key: {{ $name | quote }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- range $secret, $values := .Values.reloader.deployment.env.existing }}
|
||||
{{- range $name, $key := $values }}
|
||||
{{- if not ( empty $name) }}
|
||||
- name: {{ $name | quote }}
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: {{ $secret | quote }}
|
||||
key: {{ $key | quote }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- range $name, $value := .Values.reloader.deployment.env.field }}
|
||||
{{- if not ( empty $value) }}
|
||||
- name: {{ $name | quote }}
|
||||
@@ -92,14 +114,26 @@ spec:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
{{- end }}
|
||||
{{- if .Values.reloader.enableHA }}
|
||||
- name: POD_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.name
|
||||
- name: POD_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
ports:
|
||||
- name: http
|
||||
containerPort: 9091
|
||||
- name: metrics
|
||||
containerPort: 9090
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /metrics
|
||||
path: /live
|
||||
port: http
|
||||
timeoutSeconds: {{ .Values.reloader.deployment.livenessProbe.timeoutSeconds | default "5" }}
|
||||
failureThreshold: {{ .Values.reloader.deployment.livenessProbe.failureThreshold | default "5" }}
|
||||
@@ -108,7 +142,7 @@ spec:
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /metrics
|
||||
port: http
|
||||
port: metrics
|
||||
timeoutSeconds: {{ .Values.reloader.deployment.readinessProbe.timeoutSeconds | default "5" }}
|
||||
failureThreshold: {{ .Values.reloader.deployment.readinessProbe.failureThreshold | default "5" }}
|
||||
periodSeconds: {{ .Values.reloader.deployment.readinessProbe.periodSeconds | default "10" }}
|
||||
@@ -123,7 +157,7 @@ spec:
|
||||
- mountPath: /tmp/
|
||||
name: tmp-volume
|
||||
{{- end }}
|
||||
{{- if or (.Values.reloader.logFormat) (.Values.reloader.ignoreSecrets) (.Values.reloader.ignoreNamespaces) (.Values.reloader.ignoreConfigMaps) (.Values.reloader.custom_annotations) (eq .Values.reloader.isArgoRollouts true) (eq .Values.reloader.reloadOnCreate true) (ne .Values.reloader.reloadStrategy "default")}}
|
||||
{{- if or (.Values.reloader.logFormat) (.Values.reloader.ignoreSecrets) (.Values.reloader.ignoreNamespaces) (.Values.reloader.namespaceSelector) (.Values.reloader.ignoreConfigMaps) (.Values.reloader.custom_annotations) (eq .Values.reloader.isArgoRollouts true) (eq .Values.reloader.reloadOnCreate true) (ne .Values.reloader.reloadStrategy "default") (.Values.reloader.enableHA)}}
|
||||
args:
|
||||
{{- if .Values.reloader.logFormat }}
|
||||
- "--log-format={{ .Values.reloader.logFormat }}"
|
||||
@@ -137,7 +171,9 @@ spec:
|
||||
{{- if .Values.reloader.ignoreNamespaces }}
|
||||
- "--namespaces-to-ignore={{ .Values.reloader.ignoreNamespaces }}"
|
||||
{{- end }}
|
||||
|
||||
{{- if .Values.reloader.namespaceSelector }}
|
||||
- "--namespace-selector={{ .Values.reloader.namespaceSelector }}"
|
||||
{{- end }}
|
||||
{{- if .Values.reloader.custom_annotations }}
|
||||
{{- if .Values.reloader.custom_annotations.configmap }}
|
||||
- "--configmap-annotation"
|
||||
@@ -169,6 +205,9 @@ spec:
|
||||
{{- if ne .Values.reloader.reloadStrategy "default" }}
|
||||
- "--reload-strategy={{ .Values.reloader.reloadStrategy }}"
|
||||
{{- end }}
|
||||
{{- if or (gt .Values.reloader.deployment.replicas 1.0) (.Values.reloader.enableHA) }}
|
||||
- "--enable-ha=true"
|
||||
{{- end}}
|
||||
{{- end }}
|
||||
{{- if .Values.reloader.deployment.resources }}
|
||||
resources:
|
||||
|
||||
@@ -0,0 +1,11 @@
|
||||
{{- if .Values.reloader.podDisruptionBudget.enabled }}
|
||||
apiVersion: policy/v1
|
||||
kind: PodDisruptionBudget
|
||||
metadata:
|
||||
name: {{ template "reloader-fullname" . }}
|
||||
spec:
|
||||
minAvailable: {{ .Values.reloader.podDisruptionBudget.minAvailable }}
|
||||
selector:
|
||||
matchLabels:
|
||||
app: {{ template "reloader-fullname" . }}
|
||||
{{- end }}
|
||||
@@ -17,7 +17,7 @@ metadata:
|
||||
{{ toYaml .Values.reloader.matchLabels | indent 4 }}
|
||||
{{- end }}
|
||||
name: {{ template "reloader-fullname" . }}-role
|
||||
namespace: {{ .Release.Namespace }}
|
||||
namespace: {{ .Values.namespace | default .Release.Namespace }}
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
@@ -77,6 +77,16 @@ rules:
|
||||
- get
|
||||
- update
|
||||
- patch
|
||||
{{- if .Values.reloader.enableHA }}
|
||||
- apiGroups:
|
||||
- "coordination.k8s.io"
|
||||
resources:
|
||||
- leases
|
||||
verbs:
|
||||
- create
|
||||
- get
|
||||
- update
|
||||
{{- end}}
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
|
||||
@@ -17,7 +17,7 @@ metadata:
|
||||
{{ toYaml .Values.reloader.matchLabels | indent 4 }}
|
||||
{{- end }}
|
||||
name: {{ template "reloader-fullname" . }}-role-binding
|
||||
namespace: {{ .Release.Namespace }}
|
||||
namespace: {{ .Values.namespace | default .Release.Namespace }}
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
@@ -25,5 +25,5 @@ roleRef:
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: {{ template "reloader-serviceAccountName" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
namespace: {{ .Values.namespace | default .Release.Namespace }}
|
||||
{{- end }}
|
||||
|
||||
@@ -3,7 +3,7 @@ apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: {{ template "reloader-fullname" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
namespace: {{ .Values.namespace | default .Release.Namespace }}
|
||||
type: Opaque
|
||||
data:
|
||||
{{ if .Values.reloader.deployment.env.secret.ALERT_ON_RELOAD -}}
|
||||
@@ -18,4 +18,4 @@ data:
|
||||
{{- if .Values.reloader.deployment.env.secret.ALERT_ADDITIONAL_INFO -}}
|
||||
ALERT_ADDITIONAL_INFO: {{ .Values.reloader.deployment.env.secret.ALERT_ADDITIONAL_INFO | b64enc | quote }}
|
||||
{{ end }}
|
||||
{{ end }}
|
||||
{{ end }}
|
||||
|
||||
@@ -13,7 +13,7 @@ metadata:
|
||||
{{ toYaml .Values.reloader.service.labels | indent 4 }}
|
||||
{{- end }}
|
||||
name: {{ template "reloader-fullname" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
namespace: {{ .Values.namespace | default .Release.Namespace }}
|
||||
spec:
|
||||
selector:
|
||||
{{- if .Values.reloader.deployment.labels }}
|
||||
|
||||
@@ -22,5 +22,5 @@ metadata:
|
||||
{{ toYaml .Values.reloader.matchLabels | indent 4 }}
|
||||
{{- end }}
|
||||
name: {{ template "reloader-serviceAccountName" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
namespace: {{ .Values.namespace | default .Release.Namespace }}
|
||||
{{- end }}
|
||||
|
||||
@@ -16,14 +16,18 @@ reloader:
|
||||
reloadOnCreate: false
|
||||
reloadStrategy: default # Set to default, env-vars or annotations
|
||||
ignoreNamespaces: "" # Comma separated list of namespaces to ignore
|
||||
namespaceSelector: "" # Comma separated list of 'key:value' labels for namespaces selection
|
||||
logFormat: "" #json
|
||||
watchGlobally: true
|
||||
# Set to true to enable leadership election allowing you to run multiple replicas
|
||||
enableHA: false
|
||||
# Set to true if you have a pod security policy that enforces readOnlyRootFilesystem
|
||||
readOnlyRootFileSystem: false
|
||||
legacy:
|
||||
rbac: false
|
||||
matchLabels: {}
|
||||
deployment:
|
||||
# If you wish to run multiple replicas set reloader.enableHA = true
|
||||
replicas: 1
|
||||
nodeSelector:
|
||||
# cloud.google.com/gke-nodepool: default-pool
|
||||
@@ -62,10 +66,10 @@ reloader:
|
||||
labels:
|
||||
provider: stakater
|
||||
group: com.stakater.platform
|
||||
version: v0.0.122
|
||||
version: v1.0.2
|
||||
image:
|
||||
name: stakater/reloader
|
||||
tag: v0.0.122
|
||||
tag: v1.0.2
|
||||
pullPolicy: IfNotPresent
|
||||
# Support for extra environment variables.
|
||||
env:
|
||||
@@ -79,6 +83,15 @@ reloader:
|
||||
# ALERT_ADDITIONAL_INFO: <"Additional Info like Cluster Name if needed">
|
||||
# field supports Key value pair as environment variables. It gets the values from other fields of pod.
|
||||
field:
|
||||
# existing secret, you can specify multiple existing secrets, for each
|
||||
# specify the env var name followed by the key in existing secret that
|
||||
# will be used to populate the env var
|
||||
existing:
|
||||
# existing_secret_name:
|
||||
# ALERT_ON_RELOAD: alert_on_reload_key
|
||||
# ALERT_SINK: alert_sink_key
|
||||
# ALERT_WEBHOOK_URL: alert_webhook_key
|
||||
# ALERT_ADDITIONAL_INFO: alert_additional_info_key
|
||||
|
||||
# Liveness and readiness probe timeout values.
|
||||
livenessProbe: {}
|
||||
@@ -105,6 +118,8 @@ reloader:
|
||||
pod:
|
||||
annotations: {}
|
||||
priorityClassName: ""
|
||||
# imagePullSecrets:
|
||||
# - name: myregistrykey
|
||||
|
||||
service: {}
|
||||
# labels: {}
|
||||
@@ -153,3 +168,8 @@ reloader:
|
||||
# labels:
|
||||
# Set timeout for scrape
|
||||
# timeout: 10s
|
||||
|
||||
podDisruptionBudget:
|
||||
enabled: false
|
||||
# Set the minimum available replicas
|
||||
# minAvailable: 1
|
||||
|
||||
@@ -9,7 +9,7 @@ metadata:
|
||||
meta.helm.sh/release-name: "reloader"
|
||||
labels:
|
||||
app: reloader-reloader
|
||||
chart: "reloader-v0.0.122"
|
||||
chart: "reloader-v1.0.2"
|
||||
release: "reloader"
|
||||
heritage: "Helm"
|
||||
app.kubernetes.io/managed-by: "Helm"
|
||||
|
||||
@@ -9,7 +9,7 @@ metadata:
|
||||
meta.helm.sh/release-name: "reloader"
|
||||
labels:
|
||||
app: reloader-reloader
|
||||
chart: "reloader-v0.0.122"
|
||||
chart: "reloader-v1.0.2"
|
||||
release: "reloader"
|
||||
heritage: "Helm"
|
||||
app.kubernetes.io/managed-by: "Helm"
|
||||
|
||||
@@ -8,13 +8,13 @@ metadata:
|
||||
meta.helm.sh/release-name: "reloader"
|
||||
labels:
|
||||
app: reloader-reloader
|
||||
chart: "reloader-v0.0.122"
|
||||
chart: "reloader-v1.0.2"
|
||||
release: "reloader"
|
||||
heritage: "Helm"
|
||||
app.kubernetes.io/managed-by: "Helm"
|
||||
group: com.stakater.platform
|
||||
provider: stakater
|
||||
version: v0.0.122
|
||||
version: v1.0.2
|
||||
name: reloader-reloader
|
||||
namespace: default
|
||||
spec:
|
||||
@@ -28,25 +28,27 @@ spec:
|
||||
metadata:
|
||||
labels:
|
||||
app: reloader-reloader
|
||||
chart: "reloader-v0.0.122"
|
||||
chart: "reloader-v1.0.2"
|
||||
release: "reloader"
|
||||
heritage: "Helm"
|
||||
app.kubernetes.io/managed-by: "Helm"
|
||||
group: com.stakater.platform
|
||||
provider: stakater
|
||||
version: v0.0.122
|
||||
version: v1.0.2
|
||||
spec:
|
||||
containers:
|
||||
- image: "stakater/reloader:v0.0.122"
|
||||
- image: "stakater/reloader:v1.0.2"
|
||||
imagePullPolicy: IfNotPresent
|
||||
name: reloader-reloader
|
||||
|
||||
ports:
|
||||
- name: http
|
||||
containerPort: 9091
|
||||
- name: metrics
|
||||
containerPort: 9090
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /metrics
|
||||
path: /live
|
||||
port: http
|
||||
timeoutSeconds: 5
|
||||
failureThreshold: 5
|
||||
@@ -55,7 +57,7 @@ spec:
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /metrics
|
||||
port: http
|
||||
port: metrics
|
||||
timeoutSeconds: 5
|
||||
failureThreshold: 5
|
||||
periodSeconds: 10
|
||||
|
||||
@@ -8,7 +8,7 @@ metadata:
|
||||
meta.helm.sh/release-name: "reloader"
|
||||
labels:
|
||||
app: reloader-reloader
|
||||
chart: "reloader-v0.0.122"
|
||||
chart: "reloader-v1.0.2"
|
||||
release: "reloader"
|
||||
heritage: "Helm"
|
||||
app.kubernetes.io/managed-by: "Helm"
|
||||
|
||||
@@ -8,7 +8,7 @@ metadata:
|
||||
meta.helm.sh/release-name: "reloader"
|
||||
labels:
|
||||
app: reloader-reloader
|
||||
chart: "reloader-v0.0.122"
|
||||
chart: "reloader-v1.0.2"
|
||||
release: "reloader"
|
||||
heritage: "Helm"
|
||||
app.kubernetes.io/managed-by: "Helm"
|
||||
@@ -25,7 +25,7 @@ metadata:
|
||||
meta.helm.sh/release-name: "reloader"
|
||||
labels:
|
||||
app: reloader-reloader
|
||||
chart: "reloader-v0.0.122"
|
||||
chart: "reloader-v1.0.2"
|
||||
release: "reloader"
|
||||
heritage: "Helm"
|
||||
app.kubernetes.io/managed-by: "Helm"
|
||||
@@ -80,7 +80,7 @@ metadata:
|
||||
meta.helm.sh/release-name: "reloader"
|
||||
labels:
|
||||
app: reloader-reloader
|
||||
chart: "reloader-v0.0.122"
|
||||
chart: "reloader-v1.0.2"
|
||||
release: "reloader"
|
||||
heritage: "Helm"
|
||||
app.kubernetes.io/managed-by: "Helm"
|
||||
@@ -104,13 +104,13 @@ metadata:
|
||||
meta.helm.sh/release-name: "reloader"
|
||||
labels:
|
||||
app: reloader-reloader
|
||||
chart: "reloader-v0.0.122"
|
||||
chart: "reloader-v1.0.2"
|
||||
release: "reloader"
|
||||
heritage: "Helm"
|
||||
app.kubernetes.io/managed-by: "Helm"
|
||||
group: com.stakater.platform
|
||||
provider: stakater
|
||||
version: v0.0.122
|
||||
version: v1.0.2
|
||||
name: reloader-reloader
|
||||
namespace: default
|
||||
spec:
|
||||
@@ -124,25 +124,27 @@ spec:
|
||||
metadata:
|
||||
labels:
|
||||
app: reloader-reloader
|
||||
chart: "reloader-v0.0.122"
|
||||
chart: "reloader-v1.0.2"
|
||||
release: "reloader"
|
||||
heritage: "Helm"
|
||||
app.kubernetes.io/managed-by: "Helm"
|
||||
group: com.stakater.platform
|
||||
provider: stakater
|
||||
version: v0.0.122
|
||||
version: v1.0.2
|
||||
spec:
|
||||
containers:
|
||||
- image: "stakater/reloader:v0.0.122"
|
||||
- image: "stakater/reloader:v1.0.2"
|
||||
imagePullPolicy: IfNotPresent
|
||||
name: reloader-reloader
|
||||
|
||||
ports:
|
||||
- name: http
|
||||
containerPort: 9091
|
||||
- name: metrics
|
||||
containerPort: 9090
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /metrics
|
||||
path: /live
|
||||
port: http
|
||||
timeoutSeconds: 5
|
||||
failureThreshold: 5
|
||||
@@ -151,7 +153,7 @@ spec:
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /metrics
|
||||
port: http
|
||||
port: metrics
|
||||
timeoutSeconds: 5
|
||||
failureThreshold: 5
|
||||
periodSeconds: 10
|
||||
|
||||
57
go.mod
57
go.mod
@@ -1,26 +1,26 @@
|
||||
module github.com/stakater/Reloader
|
||||
|
||||
go 1.18
|
||||
go 1.19
|
||||
|
||||
require (
|
||||
github.com/argoproj/argo-rollouts v1.2.1
|
||||
github.com/argoproj/argo-rollouts v1.4.0
|
||||
github.com/openshift/api v0.0.0-20210527122704-efd9d5958e01
|
||||
github.com/openshift/client-go v0.0.0-20210521082421-73d9475a9142
|
||||
github.com/parnurzeal/gorequest v0.2.16
|
||||
github.com/prometheus/client_golang v1.12.2
|
||||
github.com/sirupsen/logrus v1.8.1
|
||||
github.com/spf13/cobra v1.5.0
|
||||
k8s.io/api v0.24.2
|
||||
k8s.io/apimachinery v0.24.2
|
||||
k8s.io/client-go v0.24.2
|
||||
k8s.io/kubectl v0.23.1
|
||||
github.com/prometheus/client_golang v1.14.0
|
||||
github.com/sirupsen/logrus v1.9.0
|
||||
github.com/spf13/cobra v1.6.1
|
||||
k8s.io/api v0.26.1
|
||||
k8s.io/apimachinery v0.26.1
|
||||
k8s.io/client-go v0.26.1
|
||||
k8s.io/kubectl v0.26.1
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.1.2 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.2.0 // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/emicklei/go-restful/v3 v3.8.0 // indirect
|
||||
github.com/emicklei/go-restful/v3 v3.10.1 // indirect
|
||||
github.com/evanphx/json-patch v5.6.0+incompatible // indirect
|
||||
github.com/go-logr/logr v1.2.3 // indirect
|
||||
github.com/go-openapi/jsonpointer v0.19.5 // indirect
|
||||
@@ -30,39 +30,40 @@ require (
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
|
||||
github.com/golang/protobuf v1.5.2 // indirect
|
||||
github.com/google/gnostic v0.6.9 // indirect
|
||||
github.com/google/go-cmp v0.5.8 // indirect
|
||||
github.com/google/go-cmp v0.5.9 // indirect
|
||||
github.com/google/gofuzz v1.2.0 // indirect
|
||||
github.com/imdario/mergo v0.3.13 // indirect
|
||||
github.com/inconshreveable/mousetrap v1.0.0 // indirect
|
||||
github.com/inconshreveable/mousetrap v1.0.1 // indirect
|
||||
github.com/josharian/intern v1.0.0 // indirect
|
||||
github.com/json-iterator/go v1.1.12 // indirect
|
||||
github.com/mailru/easyjson v0.7.7 // indirect
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
|
||||
github.com/pkg/errors v0.9.1 // indirect
|
||||
github.com/prometheus/client_model v0.2.0 // indirect
|
||||
github.com/prometheus/common v0.36.0 // indirect
|
||||
github.com/prometheus/procfs v0.7.3 // indirect
|
||||
github.com/prometheus/client_model v0.3.0 // indirect
|
||||
github.com/prometheus/common v0.39.0 // indirect
|
||||
github.com/prometheus/procfs v0.8.0 // indirect
|
||||
github.com/smartystreets/goconvey v1.7.2 // indirect
|
||||
github.com/spf13/pflag v1.0.5 // indirect
|
||||
golang.org/x/net v0.0.0-20220708220712-1185a9018129 // indirect
|
||||
golang.org/x/oauth2 v0.0.0-20220630143837-2104d58473e0 // indirect
|
||||
golang.org/x/sys v0.0.0-20220712014510-0a85c31ab51e // indirect
|
||||
golang.org/x/term v0.0.0-20220526004731-065cf7ba2467 // indirect
|
||||
golang.org/x/text v0.3.7 // indirect
|
||||
golang.org/x/net v0.4.0 // indirect
|
||||
golang.org/x/oauth2 v0.3.0 // indirect
|
||||
golang.org/x/sys v0.3.0 // indirect
|
||||
golang.org/x/term v0.3.0 // indirect
|
||||
golang.org/x/text v0.5.0 // indirect
|
||||
golang.org/x/time v0.0.0-20220609170525-579cf78fd858 // indirect
|
||||
google.golang.org/appengine v1.6.7 // indirect
|
||||
google.golang.org/protobuf v1.28.0 // indirect
|
||||
google.golang.org/protobuf v1.28.1 // indirect
|
||||
gopkg.in/inf.v0 v0.9.1 // indirect
|
||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
k8s.io/klog/v2 v2.60.1 // indirect
|
||||
k8s.io/kube-openapi v0.0.0-20220627174259-011e075b9cb8 // indirect
|
||||
k8s.io/utils v0.0.0-20220706174534-f6158b442e7c // indirect
|
||||
k8s.io/klog/v2 v2.80.1 // indirect
|
||||
k8s.io/kube-openapi v0.0.0-20221012153701-172d655c2280 // indirect
|
||||
k8s.io/utils v0.0.0-20221107191617-1a15be271d1d // indirect
|
||||
moul.io/http2curl v1.0.0 // indirect
|
||||
sigs.k8s.io/json v0.0.0-20211208200746-9f7c6b3444d2 // indirect
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.2.1 // indirect
|
||||
sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2 // indirect
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect
|
||||
sigs.k8s.io/yaml v1.3.0 // indirect
|
||||
)
|
||||
|
||||
|
||||
@@ -1,12 +1,15 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"github.com/stakater/Reloader/internal/pkg/constants"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/stakater/Reloader/internal/pkg/constants"
|
||||
"github.com/stakater/Reloader/internal/pkg/leadership"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/stakater/Reloader/internal/pkg/controller"
|
||||
@@ -35,24 +38,38 @@ func NewReloaderCommand() *cobra.Command {
|
||||
cmd.PersistentFlags().StringVar(&options.LogFormat, "log-format", "", "Log format to use (empty string for text, or JSON")
|
||||
cmd.PersistentFlags().StringSlice("resources-to-ignore", []string{}, "list of resources to ignore (valid options 'configMaps' or 'secrets')")
|
||||
cmd.PersistentFlags().StringSlice("namespaces-to-ignore", []string{}, "list of namespaces to ignore")
|
||||
cmd.PersistentFlags().StringSlice("namespace-selector", []string{}, "list of key:vaule namespace labels to include")
|
||||
cmd.PersistentFlags().StringVar(&options.IsArgoRollouts, "is-Argo-Rollouts", "false", "Add support for argo rollouts")
|
||||
cmd.PersistentFlags().StringVar(&options.ReloadStrategy, constants.ReloadStrategyFlag, constants.EnvVarsReloadStrategy, "Specifies the desired reload strategy")
|
||||
cmd.PersistentFlags().StringVar(&options.ReloadOnCreate, "reload-on-create", "false", "Add support to watch create events")
|
||||
cmd.PersistentFlags().BoolVar(&options.EnableHA, "enable-ha", false, "Adds support for running multiple replicas via leadership election")
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
func validateFlags(*cobra.Command, []string) error {
|
||||
// Ensure the reload strategy is one of the following...
|
||||
var validReloadStrategy bool
|
||||
valid := []string{constants.EnvVarsReloadStrategy, constants.AnnotationsReloadStrategy}
|
||||
for _, s := range valid {
|
||||
if s == options.ReloadStrategy {
|
||||
return nil
|
||||
validReloadStrategy = true
|
||||
}
|
||||
}
|
||||
|
||||
err := fmt.Sprintf("%s must be one of: %s", constants.ReloadStrategyFlag, strings.Join(valid, ", "))
|
||||
return errors.New(err)
|
||||
if !validReloadStrategy {
|
||||
err := fmt.Sprintf("%s must be one of: %s", constants.ReloadStrategyFlag, strings.Join(valid, ", "))
|
||||
return errors.New(err)
|
||||
}
|
||||
|
||||
// Validate that HA options are correct
|
||||
if options.EnableHA {
|
||||
if err := validateHAEnvs(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func configureLogging(logFormat string) error {
|
||||
@@ -68,6 +85,25 @@ func configureLogging(logFormat string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func validateHAEnvs() error {
|
||||
podName, podNamespace := getHAEnvs()
|
||||
|
||||
if podName == "" {
|
||||
return fmt.Errorf("%s not set, cannot run in HA mode without %s set", constants.PodNameEnv, constants.PodNameEnv)
|
||||
}
|
||||
if podNamespace == "" {
|
||||
return fmt.Errorf("%s not set, cannot run in HA mode without %s set", constants.PodNamespaceEnv, constants.PodNamespaceEnv)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func getHAEnvs() (string, string) {
|
||||
podName := os.Getenv(constants.PodNameEnv)
|
||||
podNamespace := os.Getenv(constants.PodNamespaceEnv)
|
||||
|
||||
return podName, podNamespace
|
||||
}
|
||||
|
||||
func startReloader(cmd *cobra.Command, args []string) {
|
||||
err := configureLogging(options.LogFormat)
|
||||
if err != nil {
|
||||
@@ -97,18 +133,34 @@ func startReloader(cmd *cobra.Command, args []string) {
|
||||
logrus.Fatal(err)
|
||||
}
|
||||
|
||||
namespaceLabelSelector, err := getNamespaceLabelSelector(cmd)
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
}
|
||||
|
||||
if len(namespaceLabelSelector) > 0 {
|
||||
logrus.Warnf("namespace-selector is set, will detect changes in namespaces with these labels: %s.", namespaceLabelSelector)
|
||||
}
|
||||
|
||||
collectors := metrics.SetupPrometheusEndpoint()
|
||||
|
||||
var controllers []*controller.Controller
|
||||
for k := range kube.ResourceMap {
|
||||
if ignoredResourcesList.Contains(k) {
|
||||
continue
|
||||
}
|
||||
|
||||
c, err := controller.NewController(clientset, k, currentNamespace, ignoredNamespacesList, collectors)
|
||||
c, err := controller.NewController(clientset, k, currentNamespace, ignoredNamespacesList, namespaceLabelSelector, collectors)
|
||||
if err != nil {
|
||||
logrus.Fatalf("%s", err)
|
||||
}
|
||||
|
||||
controllers = append(controllers, c)
|
||||
|
||||
// If HA is enabled we only run the controller when
|
||||
if options.EnableHA {
|
||||
continue
|
||||
}
|
||||
// Now let's start the controller
|
||||
stop := make(chan struct{})
|
||||
defer close(stop)
|
||||
@@ -116,14 +168,37 @@ func startReloader(cmd *cobra.Command, args []string) {
|
||||
go c.Run(1, stop)
|
||||
}
|
||||
|
||||
// Wait forever
|
||||
select {}
|
||||
// Run leadership election
|
||||
if options.EnableHA {
|
||||
podName, podNamespace := getHAEnvs()
|
||||
lock := leadership.GetNewLock(clientset.CoordinationV1(), constants.LockName, podName, podNamespace)
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
go leadership.RunLeaderElection(lock, ctx, cancel, podName, controllers)
|
||||
}
|
||||
|
||||
logrus.Fatal(leadership.Healthz())
|
||||
}
|
||||
|
||||
func getIgnoredNamespacesList(cmd *cobra.Command) (util.List, error) {
|
||||
return getStringSliceFromFlags(cmd, "namespaces-to-ignore")
|
||||
}
|
||||
|
||||
func getNamespaceLabelSelector(cmd *cobra.Command) (util.Map, error) {
|
||||
slice, err := getStringSliceFromFlags(cmd, "namespace-selector")
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
}
|
||||
|
||||
var namespaceSelectorMap util.Map = make(util.Map)
|
||||
for _, kv := range slice {
|
||||
split := strings.Split(kv, ":")
|
||||
namespaceSelectorMap[split[0]] = split[1]
|
||||
}
|
||||
|
||||
return namespaceSelectorMap, nil
|
||||
}
|
||||
|
||||
func getStringSliceFromFlags(cmd *cobra.Command, flag string) ([]string, error) {
|
||||
slice, err := cmd.Flags().GetStringSlice(flag)
|
||||
if err != nil {
|
||||
|
||||
@@ -20,3 +20,10 @@ const (
|
||||
// AnnotationsReloadStrategy instructs Reloader to add pod template annotations to facilitate a restart
|
||||
AnnotationsReloadStrategy = "annotations"
|
||||
)
|
||||
|
||||
// Leadership election related consts
|
||||
const (
|
||||
LockName string = "stakater-reloader-lock"
|
||||
PodNameEnv string = "POD_NAME"
|
||||
PodNamespaceEnv string = "POD_NAMESPACE"
|
||||
)
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package controller
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
@@ -11,6 +12,7 @@ import (
|
||||
"github.com/stakater/Reloader/internal/pkg/util"
|
||||
"github.com/stakater/Reloader/pkg/kube"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
"k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
@@ -32,6 +34,7 @@ type Controller struct {
|
||||
ignoredNamespaces util.List
|
||||
collectors metrics.Collectors
|
||||
recorder record.EventRecorder
|
||||
namespaceSelector map[string]string
|
||||
}
|
||||
|
||||
// controllerInitialized flag determines whether controlled is being initialized
|
||||
@@ -39,12 +42,13 @@ var controllerInitialized bool = false
|
||||
|
||||
// NewController for initializing a Controller
|
||||
func NewController(
|
||||
client kubernetes.Interface, resource string, namespace string, ignoredNamespaces []string, collectors metrics.Collectors) (*Controller, error) {
|
||||
client kubernetes.Interface, resource string, namespace string, ignoredNamespaces []string, namespaceLabelSelector map[string]string, collectors metrics.Collectors) (*Controller, error) {
|
||||
|
||||
c := Controller{
|
||||
client: client,
|
||||
namespace: namespace,
|
||||
ignoredNamespaces: ignoredNamespaces,
|
||||
namespaceSelector: namespaceLabelSelector,
|
||||
}
|
||||
eventBroadcaster := record.NewBroadcaster()
|
||||
eventBroadcaster.StartRecordingToSink(&typedcorev1.EventSinkImpl{
|
||||
@@ -65,13 +69,15 @@ func NewController(
|
||||
c.queue = queue
|
||||
c.collectors = collectors
|
||||
c.recorder = recorder
|
||||
|
||||
logrus.Infof("created controller for: %s", resource)
|
||||
return &c, nil
|
||||
}
|
||||
|
||||
// Add function to add a new object to the queue in case of creating a resource
|
||||
func (c *Controller) Add(obj interface{}) {
|
||||
if options.ReloadOnCreate == "true" {
|
||||
if !c.resourceInIgnoredNamespace(obj) && controllerInitialized {
|
||||
if !c.resourceInIgnoredNamespace(obj) && c.resourceInNamespaceSelector(obj) && controllerInitialized {
|
||||
c.queue.Add(handler.ResourceCreatedHandler{
|
||||
Resource: obj,
|
||||
Collectors: c.collectors,
|
||||
@@ -91,9 +97,45 @@ func (c *Controller) resourceInIgnoredNamespace(raw interface{}) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (c *Controller) resourceInNamespaceSelector(raw interface{}) bool {
|
||||
if len(c.namespaceSelector) == 0 {
|
||||
return true
|
||||
}
|
||||
|
||||
switch object := raw.(type) {
|
||||
case *v1.ConfigMap:
|
||||
return c.matchLabels(object.ObjectMeta.Namespace)
|
||||
case *v1.Secret:
|
||||
return c.matchLabels(object.ObjectMeta.Namespace)
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (c *Controller) matchLabels(resourceNamespace string) bool {
|
||||
namespace, err := c.client.CoreV1().Namespaces().Get(context.Background(), resourceNamespace, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
logrus.Warn(err)
|
||||
return false
|
||||
}
|
||||
|
||||
for selectorKey, selectorVal := range c.namespaceSelector {
|
||||
|
||||
namespaceLabelVal, namespaceLabelKeyExists := namespace.ObjectMeta.Labels[selectorKey]
|
||||
|
||||
if namespaceLabelKeyExists && selectorVal == "*" {
|
||||
continue
|
||||
}
|
||||
|
||||
if !namespaceLabelKeyExists || selectorVal != namespaceLabelVal {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Update function to add an old object and a new object to the queue in case of updating a resource
|
||||
func (c *Controller) Update(old interface{}, new interface{}) {
|
||||
if !c.resourceInIgnoredNamespace(new) {
|
||||
if !c.resourceInIgnoredNamespace(new) && c.resourceInNamespaceSelector(new) {
|
||||
c.queue.Add(handler.ResourceUpdatedHandler{
|
||||
Resource: new,
|
||||
OldResource: old,
|
||||
@@ -180,5 +222,6 @@ func (c *Controller) handleErr(err error, key interface{}) {
|
||||
c.queue.Forget(key)
|
||||
// Report to an external entity that, even after several retries, we could not successfully process this key
|
||||
runtime.HandleError(err)
|
||||
logrus.Infof("Dropping the key %q out of the queue: %v", key, err)
|
||||
logrus.Errorf("Dropping key out of the queue: %v", err)
|
||||
logrus.Debugf("Dropping the key %q out of the queue: %v", key, err)
|
||||
}
|
||||
|
||||
@@ -1,11 +1,13 @@
|
||||
package controller
|
||||
|
||||
import (
|
||||
"github.com/stakater/Reloader/internal/pkg/constants"
|
||||
"context"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stakater/Reloader/internal/pkg/constants"
|
||||
|
||||
"github.com/stakater/Reloader/internal/pkg/metrics"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
@@ -14,7 +16,10 @@ import (
|
||||
"github.com/stakater/Reloader/internal/pkg/testutil"
|
||||
"github.com/stakater/Reloader/internal/pkg/util"
|
||||
"github.com/stakater/Reloader/pkg/kube"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
)
|
||||
@@ -40,7 +45,7 @@ func TestMain(m *testing.M) {
|
||||
|
||||
logrus.Infof("Creating controller")
|
||||
for k := range kube.ResourceMap {
|
||||
c, err := NewController(clients.KubernetesClient, k, namespace, []string{}, collectors)
|
||||
c, err := NewController(clients.KubernetesClient, k, namespace, []string{}, map[string]string{}, collectors)
|
||||
if err != nil {
|
||||
logrus.Fatalf("%s", err)
|
||||
}
|
||||
@@ -2279,3 +2284,143 @@ func TestController_resourceInIgnoredNamespace(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestController_resourceInNamespaceSelector(t *testing.T) {
|
||||
type fields struct {
|
||||
indexer cache.Indexer
|
||||
queue workqueue.RateLimitingInterface
|
||||
informer cache.Controller
|
||||
namespace v1.Namespace
|
||||
namespaceSelector util.Map
|
||||
}
|
||||
type args struct {
|
||||
raw interface{}
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
fields fields
|
||||
args args
|
||||
want bool
|
||||
}{
|
||||
{
|
||||
name: "TestConfigMapResourceInNamespaceSelector",
|
||||
fields: fields{
|
||||
namespaceSelector: util.Map{
|
||||
"select": "this",
|
||||
"select2": "this2",
|
||||
},
|
||||
namespace: v1.Namespace{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "selected-namespace",
|
||||
Labels: map[string]string{
|
||||
"select": "this",
|
||||
"select2": "this2",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
args: args{
|
||||
raw: testutil.GetConfigmap("selected-namespace", "testcm", "test"),
|
||||
},
|
||||
want: true,
|
||||
}, {
|
||||
name: "TestConfigMapResourceNotInNamespaceSelector",
|
||||
fields: fields{
|
||||
namespaceSelector: util.Map{
|
||||
"select": "this",
|
||||
"select2": "this2",
|
||||
},
|
||||
namespace: v1.Namespace{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "not-selected-namespace",
|
||||
Labels: map[string]string{},
|
||||
},
|
||||
},
|
||||
},
|
||||
args: args{
|
||||
raw: testutil.GetConfigmap("not-selected-namespace", "testcm", "test"),
|
||||
},
|
||||
want: false,
|
||||
},
|
||||
{
|
||||
name: "TestSecretResourceInNamespaceSelector",
|
||||
fields: fields{
|
||||
namespaceSelector: util.Map{
|
||||
"select": "this",
|
||||
"select2": "this2",
|
||||
},
|
||||
namespace: v1.Namespace{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "selected-namespace",
|
||||
Labels: map[string]string{
|
||||
"select": "this",
|
||||
"select2": "this2",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
args: args{
|
||||
raw: testutil.GetSecret("selected-namespace", "testsecret", "test"),
|
||||
},
|
||||
want: true,
|
||||
}, {
|
||||
name: "TestSecretResourceNotInNamespaceSelector",
|
||||
fields: fields{
|
||||
namespaceSelector: util.Map{
|
||||
"select": "this",
|
||||
"select2": "this2",
|
||||
},
|
||||
namespace: v1.Namespace{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "not-selected-namespace",
|
||||
Labels: map[string]string{},
|
||||
},
|
||||
},
|
||||
},
|
||||
args: args{
|
||||
raw: testutil.GetSecret("not-selected-namespace", "secret", "test"),
|
||||
},
|
||||
want: false,
|
||||
}, {
|
||||
name: "TestSecretResourceInNamespaceSelectorWiledcardValue",
|
||||
fields: fields{
|
||||
namespaceSelector: util.Map{
|
||||
"select": "*",
|
||||
},
|
||||
namespace: v1.Namespace{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "selected-namespace",
|
||||
Labels: map[string]string{
|
||||
"select": "this",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
args: args{
|
||||
raw: testutil.GetSecret("selected-namespace", "secret", "test"),
|
||||
},
|
||||
want: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
fakeClient := fake.NewSimpleClientset()
|
||||
namespace, _ := fakeClient.CoreV1().Namespaces().Create(context.Background(), &tt.fields.namespace, metav1.CreateOptions{})
|
||||
logrus.Infof("created fakeClient namesapce for testing = %s", namespace.Name)
|
||||
|
||||
c := &Controller{
|
||||
client: fakeClient,
|
||||
indexer: tt.fields.indexer,
|
||||
queue: tt.fields.queue,
|
||||
informer: tt.fields.informer,
|
||||
namespace: tt.fields.namespace.ObjectMeta.Name,
|
||||
namespaceSelector: tt.fields.namespaceSelector,
|
||||
}
|
||||
|
||||
if got := c.resourceInNamespaceSelector(tt.args.raw); got != tt.want {
|
||||
t.Errorf("Controller.resourceInNamespaceSelector() = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
110
internal/pkg/leadership/leadership.go
Normal file
110
internal/pkg/leadership/leadership.go
Normal file
@@ -0,0 +1,110 @@
|
||||
package leadership
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/stakater/Reloader/internal/pkg/controller"
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/tools/leaderelection"
|
||||
"k8s.io/client-go/tools/leaderelection/resourcelock"
|
||||
|
||||
coordinationv1 "k8s.io/client-go/kubernetes/typed/coordination/v1"
|
||||
)
|
||||
|
||||
const healthPort string = ":9091"
|
||||
|
||||
var (
|
||||
// Used for liveness probe
|
||||
m sync.Mutex
|
||||
healthy bool = true
|
||||
)
|
||||
|
||||
func GetNewLock(client coordinationv1.CoordinationV1Interface, lockName, podname, namespace string) *resourcelock.LeaseLock {
|
||||
return &resourcelock.LeaseLock{
|
||||
LeaseMeta: v1.ObjectMeta{
|
||||
Name: lockName,
|
||||
Namespace: namespace,
|
||||
},
|
||||
Client: client,
|
||||
LockConfig: resourcelock.ResourceLockConfig{
|
||||
Identity: podname,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// runLeaderElection runs leadership election. If an instance of the controller is the leader and stops leading it will shutdown.
|
||||
func RunLeaderElection(lock *resourcelock.LeaseLock, ctx context.Context, cancel context.CancelFunc, id string, controllers []*controller.Controller) {
|
||||
// Construct channels for the controllers to use
|
||||
var stopChannels []chan struct{}
|
||||
for i := 0; i < len(controllers); i++ {
|
||||
stop := make(chan struct{})
|
||||
stopChannels = append(stopChannels, stop)
|
||||
}
|
||||
|
||||
leaderelection.RunOrDie(ctx, leaderelection.LeaderElectionConfig{
|
||||
Lock: lock,
|
||||
ReleaseOnCancel: true,
|
||||
LeaseDuration: 15 * time.Second,
|
||||
RenewDeadline: 10 * time.Second,
|
||||
RetryPeriod: 2 * time.Second,
|
||||
Callbacks: leaderelection.LeaderCallbacks{
|
||||
OnStartedLeading: func(c context.Context) {
|
||||
logrus.Info("became leader, starting controllers")
|
||||
runControllers(controllers, stopChannels)
|
||||
},
|
||||
OnStoppedLeading: func() {
|
||||
logrus.Info("no longer leader, shutting down")
|
||||
stopControllers(stopChannels)
|
||||
cancel()
|
||||
m.Lock()
|
||||
defer m.Unlock()
|
||||
healthy = false
|
||||
},
|
||||
OnNewLeader: func(current_id string) {
|
||||
if current_id == id {
|
||||
logrus.Info("still the leader!")
|
||||
return
|
||||
}
|
||||
logrus.Infof("new leader is %s", current_id)
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func runControllers(controllers []*controller.Controller, stopChannels []chan struct{}) {
|
||||
for i, c := range controllers {
|
||||
c := c
|
||||
go c.Run(1, stopChannels[i])
|
||||
}
|
||||
}
|
||||
|
||||
func stopControllers(stopChannels []chan struct{}) {
|
||||
for _, c := range stopChannels {
|
||||
close(c)
|
||||
}
|
||||
}
|
||||
|
||||
// Healthz serves the liveness probe endpoint. If leadership election is
|
||||
// enabled and a replica stops leading the liveness probe will fail and the
|
||||
// kubelet will restart the container.
|
||||
func Healthz() error {
|
||||
http.HandleFunc("/live", healthz)
|
||||
return http.ListenAndServe(healthPort, nil)
|
||||
}
|
||||
|
||||
func healthz(w http.ResponseWriter, req *http.Request) {
|
||||
m.Lock()
|
||||
defer m.Unlock()
|
||||
if healthy {
|
||||
if i, err := w.Write([]byte("alive")); err != nil {
|
||||
logrus.Infof("failed to write liveness response, wrote: %d bytes, got err: %s", i, err)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
w.WriteHeader(http.StatusInternalServerError)
|
||||
}
|
||||
213
internal/pkg/leadership/leadership_test.go
Normal file
213
internal/pkg/leadership/leadership_test.go
Normal file
@@ -0,0 +1,213 @@
|
||||
package leadership
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/stakater/Reloader/internal/pkg/constants"
|
||||
"github.com/stakater/Reloader/internal/pkg/controller"
|
||||
"github.com/stakater/Reloader/internal/pkg/handler"
|
||||
"github.com/stakater/Reloader/internal/pkg/metrics"
|
||||
"github.com/stakater/Reloader/internal/pkg/options"
|
||||
"github.com/stakater/Reloader/internal/pkg/testutil"
|
||||
"github.com/stakater/Reloader/internal/pkg/util"
|
||||
"github.com/stakater/Reloader/pkg/kube"
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
|
||||
testutil.CreateNamespace(testutil.Namespace, testutil.Clients.KubernetesClient)
|
||||
|
||||
logrus.Infof("Running Testcases")
|
||||
retCode := m.Run()
|
||||
|
||||
testutil.DeleteNamespace(testutil.Namespace, testutil.Clients.KubernetesClient)
|
||||
|
||||
os.Exit(retCode)
|
||||
}
|
||||
|
||||
func TestHealthz(t *testing.T) {
|
||||
request, err := http.NewRequest(http.MethodGet, "/live", nil)
|
||||
if err != nil {
|
||||
t.Fatalf(("failed to create request"))
|
||||
}
|
||||
|
||||
response := httptest.NewRecorder()
|
||||
|
||||
healthz(response, request)
|
||||
got := response.Code
|
||||
want := 200
|
||||
|
||||
if got != want {
|
||||
t.Fatalf("got: %q, want: %q", got, want)
|
||||
}
|
||||
|
||||
// Have the liveness probe serve a 500
|
||||
healthy = false
|
||||
|
||||
request, err = http.NewRequest(http.MethodGet, "/live", nil)
|
||||
if err != nil {
|
||||
t.Fatalf(("failed to create request"))
|
||||
}
|
||||
|
||||
response = httptest.NewRecorder()
|
||||
|
||||
healthz(response, request)
|
||||
got = response.Code
|
||||
want = 500
|
||||
|
||||
if got != want {
|
||||
t.Fatalf("got: %q, want: %q", got, want)
|
||||
}
|
||||
}
|
||||
|
||||
// TestRunLeaderElection validates that the liveness endpoint serves 500 when
|
||||
// leadership election fails
|
||||
func TestRunLeaderElection(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.TODO())
|
||||
|
||||
lock := GetNewLock(testutil.Clients.KubernetesClient.CoordinationV1(), constants.LockName, testutil.Pod, testutil.Namespace)
|
||||
|
||||
go RunLeaderElection(lock, ctx, cancel, testutil.Pod, []*controller.Controller{})
|
||||
|
||||
// Liveness probe should be serving OK
|
||||
request, err := http.NewRequest(http.MethodGet, "/live", nil)
|
||||
if err != nil {
|
||||
t.Fatalf(("failed to create request"))
|
||||
}
|
||||
|
||||
response := httptest.NewRecorder()
|
||||
|
||||
healthz(response, request)
|
||||
got := response.Code
|
||||
want := 500
|
||||
|
||||
if got != want {
|
||||
t.Fatalf("got: %q, want: %q", got, want)
|
||||
}
|
||||
|
||||
// Cancel the leader election context, so leadership is released and
|
||||
// live endpoint serves 500
|
||||
cancel()
|
||||
|
||||
request, err = http.NewRequest(http.MethodGet, "/live", nil)
|
||||
if err != nil {
|
||||
t.Fatalf(("failed to create request"))
|
||||
}
|
||||
|
||||
response = httptest.NewRecorder()
|
||||
|
||||
healthz(response, request)
|
||||
got = response.Code
|
||||
want = 500
|
||||
|
||||
if got != want {
|
||||
t.Fatalf("got: %q, want: %q", got, want)
|
||||
}
|
||||
}
|
||||
|
||||
// TestRunLeaderElectionWithControllers tests that leadership election works
|
||||
// wiht real controllers and that on context cancellation the controllers stop
|
||||
// running.
|
||||
func TestRunLeaderElectionWithControllers(t *testing.T) {
|
||||
t.Logf("Creating controller")
|
||||
var controllers []*controller.Controller
|
||||
for k := range kube.ResourceMap {
|
||||
c, err := controller.NewController(testutil.Clients.KubernetesClient, k, testutil.Namespace, []string{}, map[string]string{}, metrics.NewCollectors())
|
||||
if err != nil {
|
||||
logrus.Fatalf("%s", err)
|
||||
}
|
||||
|
||||
controllers = append(controllers, c)
|
||||
}
|
||||
time.Sleep(3 * time.Second)
|
||||
|
||||
lock := GetNewLock(testutil.Clients.KubernetesClient.CoordinationV1(), fmt.Sprintf("%s-%d", constants.LockName, 1), testutil.Pod, testutil.Namespace)
|
||||
|
||||
ctx, cancel := context.WithCancel(context.TODO())
|
||||
|
||||
// Start running leadership election, this also starts the controllers
|
||||
go RunLeaderElection(lock, ctx, cancel, testutil.Pod, controllers)
|
||||
time.Sleep(3 * time.Second)
|
||||
|
||||
// Create some stuff and do a thing
|
||||
configmapName := testutil.ConfigmapNamePrefix + "-update-" + testutil.RandSeq(5)
|
||||
configmapClient, err := testutil.CreateConfigMap(testutil.Clients.KubernetesClient, testutil.Namespace, configmapName, "www.google.com")
|
||||
if err != nil {
|
||||
t.Fatalf("Error while creating the configmap %v", err)
|
||||
}
|
||||
|
||||
// Creating deployment
|
||||
_, err = testutil.CreateDeployment(testutil.Clients.KubernetesClient, configmapName, testutil.Namespace, true)
|
||||
if err != nil {
|
||||
t.Fatalf("Error in deployment creation: %v", err)
|
||||
}
|
||||
|
||||
// Updating configmap for first time
|
||||
updateErr := testutil.UpdateConfigMap(configmapClient, testutil.Namespace, configmapName, "", "www.stakater.com")
|
||||
if updateErr != nil {
|
||||
t.Fatalf("Configmap was not updated")
|
||||
}
|
||||
time.Sleep(3 * time.Second)
|
||||
|
||||
// Verifying deployment update
|
||||
logrus.Infof("Verifying pod envvars has been created")
|
||||
shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, testutil.Namespace, configmapName, "www.stakater.com")
|
||||
config := util.Config{
|
||||
Namespace: testutil.Namespace,
|
||||
ResourceName: configmapName,
|
||||
SHAValue: shaData,
|
||||
Annotation: options.ConfigmapUpdateOnChangeAnnotation,
|
||||
}
|
||||
deploymentFuncs := handler.GetDeploymentRollingUpgradeFuncs()
|
||||
updated := testutil.VerifyResourceEnvVarUpdate(testutil.Clients, config, constants.ConfigmapEnvVarPostfix, deploymentFuncs)
|
||||
if !updated {
|
||||
t.Fatalf("Deployment was not updated")
|
||||
}
|
||||
time.Sleep(testutil.SleepDuration)
|
||||
|
||||
// Cancel the leader election context, so leadership is released
|
||||
logrus.Info("shutting down controller from test")
|
||||
cancel()
|
||||
time.Sleep(5 * time.Second)
|
||||
|
||||
// Updating configmap again
|
||||
updateErr = testutil.UpdateConfigMap(configmapClient, testutil.Namespace, configmapName, "", "www.stakater.com/new")
|
||||
if updateErr != nil {
|
||||
t.Fatalf("Configmap was not updated")
|
||||
}
|
||||
|
||||
// Verifying that the deployment was not updated as leadership has been lost
|
||||
logrus.Infof("Verifying pod envvars has not been updated")
|
||||
shaData = testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, testutil.Namespace, configmapName, "www.stakater.com/new")
|
||||
config = util.Config{
|
||||
Namespace: testutil.Namespace,
|
||||
ResourceName: configmapName,
|
||||
SHAValue: shaData,
|
||||
Annotation: options.ConfigmapUpdateOnChangeAnnotation,
|
||||
}
|
||||
deploymentFuncs = handler.GetDeploymentRollingUpgradeFuncs()
|
||||
updated = testutil.VerifyResourceEnvVarUpdate(testutil.Clients, config, constants.ConfigmapEnvVarPostfix, deploymentFuncs)
|
||||
if updated {
|
||||
t.Fatalf("Deployment was updated")
|
||||
}
|
||||
|
||||
// Deleting deployment
|
||||
err = testutil.DeleteDeployment(testutil.Clients.KubernetesClient, testutil.Namespace, configmapName)
|
||||
if err != nil {
|
||||
logrus.Errorf("Error while deleting the deployment %v", err)
|
||||
}
|
||||
|
||||
// Deleting configmap
|
||||
err = testutil.DeleteConfigMap(testutil.Clients.KubernetesClient, testutil.Namespace, configmapName)
|
||||
if err != nil {
|
||||
logrus.Errorf("Error while deleting the configmap %v", err)
|
||||
}
|
||||
time.Sleep(testutil.SleepDuration)
|
||||
}
|
||||
@@ -25,4 +25,6 @@ var (
|
||||
ReloadStrategy = constants.EnvVarsReloadStrategy
|
||||
// ReloadOnCreate Adds support to watch create events
|
||||
ReloadOnCreate = "false"
|
||||
// EnableHA adds support for running multiple replicas via leadership election
|
||||
EnableHA = false
|
||||
)
|
||||
|
||||
@@ -16,6 +16,7 @@ import (
|
||||
"github.com/stakater/Reloader/internal/pkg/callbacks"
|
||||
"github.com/stakater/Reloader/internal/pkg/constants"
|
||||
"github.com/stakater/Reloader/internal/pkg/crypto"
|
||||
"github.com/stakater/Reloader/internal/pkg/metrics"
|
||||
"github.com/stakater/Reloader/internal/pkg/options"
|
||||
"github.com/stakater/Reloader/internal/pkg/util"
|
||||
"github.com/stakater/Reloader/pkg/kube"
|
||||
@@ -35,6 +36,19 @@ var (
|
||||
SecretResourceType = "secrets"
|
||||
)
|
||||
|
||||
var (
|
||||
Clients = kube.GetClients()
|
||||
Pod = "test-reloader-" + RandSeq(5)
|
||||
Namespace = "test-reloader-" + RandSeq(5)
|
||||
ConfigmapNamePrefix = "testconfigmap-reloader"
|
||||
SecretNamePrefix = "testsecret-reloader"
|
||||
Data = "dGVzdFNlY3JldEVuY29kaW5nRm9yUmVsb2FkZXI="
|
||||
NewData = "dGVzdE5ld1NlY3JldEVuY29kaW5nRm9yUmVsb2FkZXI="
|
||||
UpdatedData = "dGVzdFVwZGF0ZWRTZWNyZXRFbmNvZGluZ0ZvclJlbG9hZGVy"
|
||||
Collectors = metrics.NewCollectors()
|
||||
SleepDuration = 3 * time.Second
|
||||
)
|
||||
|
||||
// CreateNamespace creates namespace for testing
|
||||
func CreateNamespace(namespace string, client kubernetes.Interface) {
|
||||
_, err := client.CoreV1().Namespaces().Create(context.TODO(), &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}}, metav1.CreateOptions{})
|
||||
|
||||
@@ -54,6 +54,8 @@ func GetSHAfromSecret(data map[string][]byte) string {
|
||||
|
||||
type List []string
|
||||
|
||||
type Map map[string]string
|
||||
|
||||
func (l *List) Contains(s string) bool {
|
||||
for _, v := range *l {
|
||||
if v == s {
|
||||
|
||||
31
okteto.yml
31
okteto.yml
@@ -1,14 +1,17 @@
|
||||
name: reloader-reloader
|
||||
image: okteto/golang:1
|
||||
command: bash
|
||||
securityContext:
|
||||
capabilities:
|
||||
add:
|
||||
- SYS_PTRACE
|
||||
volumes:
|
||||
- /go/pkg/
|
||||
- /root/.cache/go-build/
|
||||
sync:
|
||||
- .:/app
|
||||
forward:
|
||||
- 2345:2345
|
||||
dev:
|
||||
reloader-reloader:
|
||||
image: okteto/golang:1
|
||||
command: bash
|
||||
volumes:
|
||||
- /go/pkg/
|
||||
- /root/.cache/go-build/
|
||||
sync:
|
||||
- .:/app
|
||||
forward:
|
||||
- 2345:2345
|
||||
workdir: /app
|
||||
autocreate: true
|
||||
securityContext:
|
||||
capabilities:
|
||||
add:
|
||||
- SYS_PTRACE
|
||||
|
||||
Reference in New Issue
Block a user