mirror of
https://github.com/stakater/Reloader.git
synced 2026-02-15 02:19:50 +00:00
Compare commits
72 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
2c8ef70c43 | ||
|
|
4d2c8a451e | ||
|
|
f7927c85b1 | ||
|
|
2e2fd2a11b | ||
|
|
0e6ec1d36b | ||
|
|
85b33d9104 | ||
|
|
c838ecbbc7 | ||
|
|
068a5c1e64 | ||
|
|
4d559a1864 | ||
|
|
322142dd66 | ||
|
|
39f37b706c | ||
|
|
4e10dd4f80 | ||
|
|
ccaa600ff4 | ||
|
|
a3fcfeb62f | ||
|
|
d2cbbafeb1 | ||
|
|
eaf8e16414 | ||
|
|
5a65cf9f6d | ||
|
|
a8a68ae1b0 | ||
|
|
7643a27fb1 | ||
|
|
71fdb53c2e | ||
|
|
d6312f6f83 | ||
|
|
19220f5e6e | ||
|
|
05456b0905 | ||
|
|
10328dee8d | ||
|
|
fd174ed691 | ||
|
|
2e47f1740c | ||
|
|
15cb96f945 | ||
|
|
1e987db54d | ||
|
|
12a7fed3ae | ||
|
|
f18fac66c2 | ||
|
|
b5c95f9cbf | ||
|
|
46b948388f | ||
|
|
78be58b090 | ||
|
|
54a8e0683b | ||
|
|
702f0caa93 | ||
|
|
2e709e85ae | ||
|
|
debfd57a91 | ||
|
|
c3b8af34ac | ||
|
|
7a65bcb35b | ||
|
|
af6cd9e37c | ||
|
|
344004d0b3 | ||
|
|
a5bc586f09 | ||
|
|
81ca7ab601 | ||
|
|
69c9ccb2ea | ||
|
|
0ec3effab8 | ||
|
|
dba42e91bc | ||
|
|
68fd3bebe5 | ||
|
|
52b975ef0d | ||
|
|
0679af76f4 | ||
|
|
309c10f632 | ||
|
|
07ddec9fd1 | ||
|
|
69a80fd1d9 | ||
|
|
04975de060 | ||
|
|
459a808371 | ||
|
|
ef8a335c93 | ||
|
|
93a52500d1 | ||
|
|
ac2dac330e | ||
|
|
e9843c7c7d | ||
|
|
1f154d0572 | ||
|
|
7ccb17392e | ||
|
|
e8da3f48ec | ||
|
|
614865a8d7 | ||
|
|
4f551ada6e | ||
|
|
608a928967 | ||
|
|
5a14798341 | ||
|
|
e7516e82e3 | ||
|
|
dc3494c041 | ||
|
|
79e3588389 | ||
|
|
45a833bbb2 | ||
|
|
1f22ebe132 | ||
|
|
1846b31936 | ||
|
|
935a17b1c7 |
9
.github/workflows/pull_request.yaml
vendored
9
.github/workflows/pull_request.yaml
vendored
@@ -19,7 +19,9 @@ jobs:
|
||||
steps:
|
||||
- name: Check out code
|
||||
uses: actions/checkout@v2
|
||||
|
||||
with:
|
||||
ref: ${{github.event.pull_request.head.sha}}
|
||||
|
||||
# Setting up helm binary
|
||||
- name: Set up Helm
|
||||
uses: azure/setup-helm@v1
|
||||
@@ -98,6 +100,7 @@ jobs:
|
||||
push: true
|
||||
build-args: BUILD_PARAMETERS=${{ env.BUILD_PARAMETERS }}
|
||||
cache-to: type=inline
|
||||
platforms: linux/amd64,linux/arm,linux/arm64
|
||||
tags: |
|
||||
${{ env.IMAGE_REPOSITORY }}:${{ steps.generate_tag.outputs.GIT_TAG }}
|
||||
labels: |
|
||||
@@ -110,7 +113,7 @@ jobs:
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.STAKATER_GITHUB_TOKEN }}
|
||||
with:
|
||||
message: '@${{ github.actor }} Image is available for testing. `docker pull ${{ github.repository }}:${{ steps.generate_tag.outputs.GIT_TAG }}`'
|
||||
message: '@${{ github.actor }} Image is available for testing. `docker pull ${{ env.IMAGE_REPOSITORY }}:${{ steps.generate_tag.outputs.GIT_TAG }}`'
|
||||
allow-repeats: false
|
||||
|
||||
- name: Notify Failure
|
||||
@@ -130,4 +133,4 @@ jobs:
|
||||
fields: repo,author,action,eventName,ref,workflow
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.STAKATER_GITHUB_TOKEN }}
|
||||
SLACK_WEBHOOK_URL: ${{ secrets.STAKATER_DELIVERY_SLACK_WEBHOOK }}
|
||||
SLACK_WEBHOOK_URL: ${{ secrets.STAKATER_DELIVERY_SLACK_WEBHOOK }}
|
||||
|
||||
5
.github/workflows/push.yaml
vendored
5
.github/workflows/push.yaml
vendored
@@ -99,6 +99,7 @@ jobs:
|
||||
push: true
|
||||
build-args: BUILD_PARAMETERS=${{ env.BUILD_PARAMETERS }}
|
||||
cache-to: type=inline
|
||||
platforms: linux/amd64,linux/arm,linux/arm64
|
||||
tags: |
|
||||
${{ env.IMAGE_REPOSITORY }}:${{ steps.generate_tag.outputs.new_tag }}
|
||||
labels: |
|
||||
@@ -126,6 +127,10 @@ jobs:
|
||||
VERSION: ${{ steps.generate_operator_tag.outputs.new_tag }}
|
||||
run: make bump-chart
|
||||
|
||||
- name: Helm Template
|
||||
run: |
|
||||
helm template stakater deployments/kubernetes/chart/reloader/ > deployments/kubernetes/reloader.yaml
|
||||
|
||||
# Publish helm chart
|
||||
- name: Publish Helm chart
|
||||
uses: stefanprodan/helm-gh-pages@master
|
||||
|
||||
@@ -1,5 +1,8 @@
|
||||
# Build the manager binary
|
||||
FROM golang:1.15.2 as builder
|
||||
FROM --platform=${BUILDPLATFORM} golang:1.15.2 as builder
|
||||
|
||||
ARG TARGETOS
|
||||
ARG TARGETARCH
|
||||
|
||||
WORKDIR /workspace
|
||||
|
||||
@@ -16,7 +19,7 @@ COPY internal/ internal/
|
||||
COPY pkg/ pkg/
|
||||
|
||||
# Build
|
||||
RUN CGO_ENABLED=0 GOOS=linux GOARCH=amd64 GO111MODULE=on go build -mod=mod -a -o manager main.go
|
||||
RUN CGO_ENABLED=0 GOOS=${TARGETOS} GOARCH=${TARGETARCH} GO111MODULE=on go build -mod=mod -a -o manager main.go
|
||||
|
||||
# Use distroless as minimal base image to package the manager binary
|
||||
# Refer to https://github.com/GoogleContainerTools/distroless for more details
|
||||
|
||||
7
Makefile
7
Makefile
@@ -90,6 +90,7 @@ deploy: binary-image push apply
|
||||
|
||||
# Bump Chart
|
||||
bump-chart:
|
||||
sed -i "s/^version:.*/version: $(VERSION)/" deployments/kubernetes/chart/reloader/Chart.yaml
|
||||
sed -i "s/^appVersion:.*/appVersion: $(VERSION)/" deployments/kubernetes/chart/reloader/Chart.yaml
|
||||
sed -i "s/tag:.*/tag: v$(VERSION)/" deployments/kubernetes/chart/reloader/values.yaml
|
||||
sed -i "s/^version:.*/version: v$(VERSION)/" deployments/kubernetes/chart/reloader/Chart.yaml
|
||||
sed -i "s/^appVersion:.*/appVersion: v$(VERSION)/" deployments/kubernetes/chart/reloader/Chart.yaml
|
||||
sed -i "s/tag:.*/tag: v$(VERSION)/" deployments/kubernetes/chart/reloader/values.yaml
|
||||
sed -i "s/version:.*/version: v$(VERSION)/" deployments/kubernetes/chart/reloader/values.yaml
|
||||
|
||||
28
README.md
28
README.md
@@ -13,11 +13,11 @@
|
||||
|
||||
## Problem
|
||||
|
||||
We would like to watch if some change happens in `ConfigMap` and/or `Secret`; then perform a rolling upgrade on relevant `DeploymentConfig`, `Deployment`, `Daemonset` and `Statefulset`
|
||||
We would like to watch if some change happens in `ConfigMap` and/or `Secret`; then perform a rolling upgrade on relevant `DeploymentConfig`, `Deployment`, `Daemonset`, `Statefulset` and `Rollout`
|
||||
|
||||
## Solution
|
||||
|
||||
Reloader can watch changes in `ConfigMap` and `Secret` and do rolling upgrades on Pods with their associated `DeploymentConfigs`, `Deployments`, `Daemonsets` and `Statefulsets`.
|
||||
Reloader can watch changes in `ConfigMap` and `Secret` and do rolling upgrades on Pods with their associated `DeploymentConfigs`, `Deployments`, `Daemonsets` `Statefulsets` and `Rollouts`.
|
||||
|
||||
## Compatibility
|
||||
|
||||
@@ -36,11 +36,11 @@ spec:
|
||||
template: metadata:
|
||||
```
|
||||
|
||||
This will discover deployments/daemonsets/statefulset automatically where `foo-configmap` or `foo-secret` is being used either via environment variable or from volume mount. And it will perform rolling upgrade on related pods when `foo-configmap` or `foo-secret`are updated.
|
||||
This will discover deploymentconfigs/deployments/daemonsets/statefulset/rollouts automatically where `foo-configmap` or `foo-secret` is being used either via environment variable or from volume mount. And it will perform rolling upgrade on related pods when `foo-configmap` or `foo-secret`are updated.
|
||||
|
||||
You can restrict this discovery to only `ConfigMap` or `Secret` objects that
|
||||
are tagged with a special annotation. To take advantage of that, annotate
|
||||
your deployment/daemonset/statefulset like this:
|
||||
your deploymentconfigs/deployments/daemonsets/statefulset/rollouts like this:
|
||||
|
||||
```yaml
|
||||
kind: Deployment
|
||||
@@ -63,7 +63,7 @@ data:
|
||||
key: value
|
||||
```
|
||||
|
||||
provided the secret/configmap is being used in an environment variable or a
|
||||
provided the secret/configmap is being used in an environment variable, or a
|
||||
volume mount.
|
||||
|
||||
Please note that `reloader.stakater.com/search` and
|
||||
@@ -73,7 +73,7 @@ will always restart upon a change in configmaps or secrets it uses, regardless
|
||||
of whether they have the `reloader.stakater.com/match: "true"` annotation or
|
||||
not.
|
||||
|
||||
We can also specify a specific configmap or secret which would trigger rolling upgrade only upon change in our specified configmap or secret, this way, it will not trigger rolling upgrade upon changes in all configmaps or secrets used in a deployment, daemonset or statefulset.
|
||||
We can also specify a specific configmap or secret which would trigger rolling upgrade only upon change in our specified configmap or secret, this way, it will not trigger rolling upgrade upon changes in all configmaps or secrets used in a deploymentconfig, deployment, daemonset, statefulset or rollout.
|
||||
To do this either set the auto annotation to `"false"` (`reloader.stakater.com/auto: "false"`) or remove it altogether, and use annotations mentioned [here](#Configmap) or [here](#Secret)
|
||||
|
||||
### Configmap
|
||||
@@ -131,6 +131,7 @@ spec:
|
||||
### NOTES
|
||||
|
||||
- Reloader also supports [sealed-secrets](https://github.com/bitnami-labs/sealed-secrets). [Here](docs/Reloader-with-Sealed-Secrets.md) are the steps to use sealed-secrets with reloader.
|
||||
- For [rollouts](https://github.com/argoproj/argo-rollouts/) reloader simply triggers a change is up to you how you configure the rollout strategy.
|
||||
- `reloader.stakater.com/auto: "true"` will only reload the pod, if the configmap or secret is used (as a volume mount or as an env) in `DeploymentConfigs/Deployment/Daemonsets/Statefulsets`
|
||||
- `secret.reloader.stakater.com/reload` or `configmap.reloader.stakater.com/reload` annotation will reload the pod upon changes in specified configmap or secret, irrespective of the usage of configmap or secret.
|
||||
- you may override the auto annotation with the `--auto-annotation` flag
|
||||
@@ -154,7 +155,7 @@ You can apply vanilla manifests by changing `RELEASE-NAME` placeholder provided
|
||||
kubectl apply -f https://raw.githubusercontent.com/stakater/Reloader/master/deployments/kubernetes/reloader.yaml
|
||||
```
|
||||
|
||||
By default Reloader gets deployed in `default` namespace and watches changes `secrets` and `configmaps` in all namespaces.
|
||||
By default, Reloader gets deployed in `default` namespace and watches changes `secrets` and `configmaps` in all namespaces.
|
||||
|
||||
Reloader can be configured to ignore the resources `secrets` and `configmaps` by passing the following args (`spec.template.spec.containers.args`) to its container :
|
||||
|
||||
@@ -201,7 +202,7 @@ helm repo update
|
||||
helm install stakater/reloader # For helm3 add --generate-name flag or set the release name
|
||||
```
|
||||
|
||||
**Note:** By default reloader watches in all namespaces. To watch in single namespace, please run following command. It will install reloader in `test` namespace which will only watch `Deployments`, `Daemonsets` and `Statefulsets` in `test` namespace.
|
||||
**Note:** By default reloader watches in all namespaces. To watch in single namespace, please run following command. It will install reloader in `test` namespace which will only watch `Deployments`, `Daemonsets` `Statefulsets` and `Rollouts` in `test` namespace.
|
||||
|
||||
```bash
|
||||
helm install stakater/reloader --set reloader.watchGlobally=false --namespace test # For helm3 add --generate-name flag or set the release name
|
||||
@@ -218,13 +219,20 @@ Reloader can be configured to ignore the resources `secrets` and `configmaps` by
|
||||
|
||||
You can also set the log format of Reloader to json by setting `logFormat` to `json` in values.yaml and apply the chart
|
||||
|
||||
You can enable to scrape Reloader's Prometheus metrics by setting `serviceMonitor.enabled` to `true` in values.yaml file.
|
||||
You can enable to scrape Reloader's Prometheus metrics by setting `serviceMonitor.enabled` or `podMonitor.enabled` to `true` in values.yaml file. Service monitor will be removed in future releases of reloader in favour of Pod monitor.
|
||||
|
||||
**Note:** Reloading of OpenShift (DeploymentConfig) and/or Argo Rollouts has to be enabled explicitly because it might not be always possible to use it on a cluster with restricted permissions. This can be done by changing the following parameters:
|
||||
|
||||
| Parameter | Description | Type |
|
||||
| ---------------- | ---------------------------------------------------------------------------- | ------- |
|
||||
| isOpenshift | Enable OpenShift DeploymentConfigs. Valid value are either `true` or `false` | boolean |
|
||||
| isArgoRollouts | Enable Argo Rollouts. Valid value are either `true` or `false` | boolean |
|
||||
|
||||
## Help
|
||||
|
||||
### Documentation
|
||||
|
||||
You can find more documentation [here](docs/)
|
||||
You can find more documentation [here](docs)
|
||||
|
||||
### Have a question?
|
||||
|
||||
|
||||
@@ -3,8 +3,8 @@
|
||||
apiVersion: v1
|
||||
name: reloader
|
||||
description: Reloader chart that runs on kubernetes
|
||||
version: 0.0.78
|
||||
appVersion: 0.0.78
|
||||
version: v0.0.95
|
||||
appVersion: v0.0.95
|
||||
keywords:
|
||||
- Reloader
|
||||
- kubernetes
|
||||
|
||||
@@ -32,7 +32,7 @@ rules:
|
||||
- list
|
||||
- get
|
||||
- watch
|
||||
{{- if or (.Capabilities.APIVersions.Has "apps.openshift.io/v1") (.Values.isOpenshift) }}
|
||||
{{- if and (.Capabilities.APIVersions.Has "apps.openshift.io/v1") (.Values.reloader.isOpenshift) }}
|
||||
- apiGroups:
|
||||
- "apps.openshift.io"
|
||||
- ""
|
||||
@@ -43,6 +43,18 @@ rules:
|
||||
- get
|
||||
- update
|
||||
- patch
|
||||
{{- end }}
|
||||
{{- if and (.Capabilities.APIVersions.Has "argoproj.io/v1alpha1") (.Values.reloader.isArgoRollouts) }}
|
||||
- apiGroups:
|
||||
- "argoproj.io"
|
||||
- ""
|
||||
resources:
|
||||
- rollouts
|
||||
verbs:
|
||||
- list
|
||||
- get
|
||||
- update
|
||||
- patch
|
||||
{{- end }}
|
||||
- apiGroups:
|
||||
- "apps"
|
||||
|
||||
@@ -51,6 +51,9 @@ spec:
|
||||
{{- if .Values.reloader.deployment.tolerations }}
|
||||
tolerations:
|
||||
{{ toYaml .Values.reloader.deployment.tolerations | indent 8 }}
|
||||
{{- end }}
|
||||
{{- if .Values.reloader.deployment.priorityClassName }}
|
||||
priorityClassName: {{ .Values.reloader.deployment.priorityClassName }}
|
||||
{{- end }}
|
||||
containers:
|
||||
- image: "{{ .Values.reloader.deployment.image.name }}:{{ .Values.reloader.deployment.image.tag }}"
|
||||
@@ -107,7 +110,7 @@ spec:
|
||||
- mountPath: /tmp/
|
||||
name: tmp-volume
|
||||
{{- end }}
|
||||
{{- if or (.Values.reloader.logFormat) (.Values.reloader.ignoreSecrets) (.Values.reloader.ignoreNamespaces) (.Values.reloader.ignoreConfigMaps) (.Values.reloader.custom_annotations) }}
|
||||
{{- if or (.Values.reloader.logFormat) (.Values.reloader.ignoreSecrets) (.Values.reloader.ignoreNamespaces) (.Values.reloader.ignoreConfigMaps) (.Values.reloader.custom_annotations) (eq .Values.reloader.isArgoRollouts true) }}
|
||||
args:
|
||||
{{- if .Values.reloader.logFormat }}
|
||||
- "--log-format={{ .Values.reloader.logFormat }}"
|
||||
@@ -124,17 +127,28 @@ spec:
|
||||
|
||||
{{- if .Values.reloader.custom_annotations }}
|
||||
{{- if .Values.reloader.custom_annotations.configmap }}
|
||||
- "--configmap-annotation"
|
||||
- "{{ .Values.reloader.custom_annotations.configmap }}"
|
||||
- "--configmap-annotation"
|
||||
- "{{ .Values.reloader.custom_annotations.configmap }}"
|
||||
{{- end }}
|
||||
{{- if .Values.reloader.custom_annotations.secret }}
|
||||
- "--secret-annotation"
|
||||
- "{{ .Values.reloader.custom_annotations.secret }}"
|
||||
- "--secret-annotation"
|
||||
- "{{ .Values.reloader.custom_annotations.secret }}"
|
||||
{{- end }}
|
||||
{{- if .Values.reloader.custom_annotations.auto }}
|
||||
- "--auto-annotation"
|
||||
- "{{ .Values.reloader.custom_annotations.auto }}"
|
||||
- "--auto-annotation"
|
||||
- "{{ .Values.reloader.custom_annotations.auto }}"
|
||||
{{- end }}
|
||||
{{- if .Values.reloader.custom_annotations.search }}
|
||||
- "--auto-search-annotation"
|
||||
- "{{ .Values.reloader.custom_annotations.search }}"
|
||||
{{- end }}
|
||||
{{- if .Values.reloader.custom_annotations.match }}
|
||||
- "--search-match-annotation"
|
||||
- "{{ .Values.reloader.custom_annotations.match }}"
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- if eq .Values.reloader.isArgoRollouts true }}
|
||||
- "--is-Argo-Rollouts={{ .Values.reloader.isArgoRollouts }}"
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- if .Values.reloader.deployment.resources }}
|
||||
|
||||
@@ -0,0 +1,31 @@
|
||||
{{- if and ( .Capabilities.APIVersions.Has "monitoring.coreos.com/v1" ) ( .Values.reloader.podMonitor.enabled ) }}
|
||||
apiVersion: monitoring.coreos.com/v1
|
||||
kind: PodMonitor
|
||||
metadata:
|
||||
labels:
|
||||
{{ include "reloader-labels.chart" . | indent 4 }}
|
||||
{{- if .Values.reloader.podMonitor.labels }}
|
||||
{{ toYaml .Values.reloader.podMonitor.labels | indent 4}}
|
||||
{{- end }}
|
||||
name: {{ template "reloader-fullname" . }}
|
||||
{{- if .Values.reloader.podMonitor.namespace }}
|
||||
namespace: {{ .Values.reloader.podMonitor.namespace }}
|
||||
{{- end }}
|
||||
spec:
|
||||
podMetricsEndpoints:
|
||||
- port: http
|
||||
path: "/metrics"
|
||||
{{- if .Values.reloader.podMonitor.interval }}
|
||||
interval: {{ .Values.reloader.podMonitor.interval }}
|
||||
{{- end }}
|
||||
{{- if .Values.reloader.podMonitor.timeout }}
|
||||
scrapeTimeout: {{ .Values.reloader.podMonitor.timeout }}
|
||||
{{- end }}
|
||||
jobLabel: {{ template "reloader-fullname" . }}
|
||||
namespaceSelector:
|
||||
matchNames:
|
||||
- {{ .Release.Namespace }}
|
||||
selector:
|
||||
matchLabels:
|
||||
{{ include "reloader-labels.chart" . | nindent 6 }}
|
||||
{{- end }}
|
||||
@@ -32,7 +32,7 @@ rules:
|
||||
- list
|
||||
- get
|
||||
- watch
|
||||
{{- if or (.Capabilities.APIVersions.Has "apps.openshift.io/v1") (.Values.reloader.isOpenshift) }}
|
||||
{{- if and (.Capabilities.APIVersions.Has "apps.openshift.io/v1") (.Values.reloader.isOpenshift) }}
|
||||
- apiGroups:
|
||||
- "apps.openshift.io"
|
||||
- ""
|
||||
@@ -43,6 +43,18 @@ rules:
|
||||
- get
|
||||
- update
|
||||
- patch
|
||||
{{- end }}
|
||||
{{- if and (.Capabilities.APIVersions.Has "argoproj.io/v1alpha1") (.Values.reloader.isArgoRollouts) }}
|
||||
- apiGroups:
|
||||
- "argoproj.io"
|
||||
- ""
|
||||
resources:
|
||||
- rollouts
|
||||
verbs:
|
||||
- list
|
||||
- get
|
||||
- update
|
||||
- patch
|
||||
{{- end }}
|
||||
- apiGroups:
|
||||
- "apps"
|
||||
|
||||
@@ -7,6 +7,9 @@ imagePullSecrets: {{ toYaml .Values.global.imagePullSecrets | nindent 2 }}
|
||||
metadata:
|
||||
annotations:
|
||||
{{ include "reloader-helm3.annotations" . | indent 4 }}
|
||||
{{- if .Values.reloader.serviceAccount.annotations }}
|
||||
{{ toYaml .Values.reloader.serviceAccount.annotations | indent 4 }}
|
||||
{{- end }}
|
||||
labels:
|
||||
{{ include "reloader-labels.chart" . | indent 4 }}
|
||||
{{- if .Values.reloader.serviceAccount.labels }}
|
||||
|
||||
@@ -9,6 +9,7 @@ kubernetes:
|
||||
host: https://kubernetes.default
|
||||
|
||||
reloader:
|
||||
isArgoRollouts: false
|
||||
isOpenshift: false
|
||||
ignoreSecrets: false
|
||||
ignoreConfigMaps: false
|
||||
@@ -51,10 +52,10 @@ reloader:
|
||||
labels:
|
||||
provider: stakater
|
||||
group: com.stakater.platform
|
||||
version: v0.0.77
|
||||
version: v0.0.95
|
||||
image:
|
||||
name: stakater/reloader
|
||||
tag: v0.0.78
|
||||
tag: v0.0.95
|
||||
pullPolicy: IfNotPresent
|
||||
# Support for extra environment variables.
|
||||
env:
|
||||
@@ -77,6 +78,7 @@ reloader:
|
||||
resources: {}
|
||||
pod:
|
||||
annotations: {}
|
||||
priorityClassName: ""
|
||||
|
||||
service: {}
|
||||
# labels: {}
|
||||
@@ -91,9 +93,10 @@ reloader:
|
||||
# Specifies whether a ServiceAccount should be created
|
||||
create: true
|
||||
labels: {}
|
||||
annotations: {}
|
||||
# The name of the ServiceAccount to use.
|
||||
# If not set and create is true, a name is generated using the fullname template
|
||||
name:
|
||||
name:
|
||||
# Optional flags to pass to the Reloader entrypoint
|
||||
# Example:
|
||||
# custom_annotations:
|
||||
@@ -111,4 +114,15 @@ reloader:
|
||||
# labels:
|
||||
# Set timeout for scrape
|
||||
# timeout: 10s
|
||||
|
||||
|
||||
podMonitor:
|
||||
# enabling this requires service to be enabled as well, or no endpoints will be found
|
||||
enabled: false
|
||||
# Set the namespace the podMonitor should be deployed
|
||||
# namespace: monitoring
|
||||
# Set how frequently Prometheus should scrape
|
||||
# interval: 30s
|
||||
# Set labels for the podMonitor, use this to define your scrape label for Prometheus Operator
|
||||
# labels:
|
||||
# Set timeout for scrape
|
||||
# timeout: 10s
|
||||
|
||||
3
deployments/kubernetes/manifests/podmonitor.yaml
Normal file
3
deployments/kubernetes/manifests/podmonitor.yaml
Normal file
@@ -0,0 +1,3 @@
|
||||
---
|
||||
# Source: reloader/templates/podmonitor.yaml
|
||||
|
||||
@@ -1,19 +1,34 @@
|
||||
---
|
||||
# Source: reloader/templates/serviceaccount.yaml
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
annotations:
|
||||
meta.helm.sh/release-namespace: "default"
|
||||
meta.helm.sh/release-name: "stakater"
|
||||
labels:
|
||||
app: stakater-reloader
|
||||
chart: "reloader-v0.0.95"
|
||||
release: "stakater"
|
||||
heritage: "Helm"
|
||||
app.kubernetes.io/managed-by: "Helm"
|
||||
name: stakater-reloader
|
||||
---
|
||||
# Source: reloader/templates/clusterrole.yaml
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
annotations:
|
||||
meta.helm.sh/release-namespace: "default"
|
||||
meta.helm.sh/release-name: "reloader"
|
||||
meta.helm.sh/release-name: "stakater"
|
||||
labels:
|
||||
app: reloader-reloader
|
||||
chart: "reloader-v0.0.77"
|
||||
release: "reloader"
|
||||
heritage: "Tiller"
|
||||
app.kubernetes.io/managed-by: "Tiller"
|
||||
name: reloader-reloader-role
|
||||
app: stakater-reloader
|
||||
chart: "reloader-v0.0.95"
|
||||
release: "stakater"
|
||||
heritage: "Helm"
|
||||
app.kubernetes.io/managed-by: "Helm"
|
||||
name: stakater-reloader-role
|
||||
namespace: default
|
||||
rules:
|
||||
- apiGroups:
|
||||
@@ -46,33 +61,31 @@ rules:
|
||||
- get
|
||||
- update
|
||||
- patch
|
||||
|
||||
---
|
||||
# Source: reloader/templates/clusterrolebinding.yaml
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
annotations:
|
||||
meta.helm.sh/release-namespace: "default"
|
||||
meta.helm.sh/release-name: "reloader"
|
||||
meta.helm.sh/release-name: "stakater"
|
||||
labels:
|
||||
app: reloader-reloader
|
||||
chart: "reloader-v0.0.77"
|
||||
release: "reloader"
|
||||
heritage: "Tiller"
|
||||
app.kubernetes.io/managed-by: "Tiller"
|
||||
name: reloader-reloader-role-binding
|
||||
app: stakater-reloader
|
||||
chart: "reloader-v0.0.95"
|
||||
release: "stakater"
|
||||
heritage: "Helm"
|
||||
app.kubernetes.io/managed-by: "Helm"
|
||||
name: stakater-reloader-role-binding
|
||||
namespace: default
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: reloader-reloader-role
|
||||
name: stakater-reloader-role
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: reloader-reloader
|
||||
name: stakater-reloader
|
||||
namespace: default
|
||||
|
||||
---
|
||||
# Source: reloader/templates/deployment.yaml
|
||||
apiVersion: apps/v1
|
||||
@@ -80,42 +93,40 @@ kind: Deployment
|
||||
metadata:
|
||||
annotations:
|
||||
meta.helm.sh/release-namespace: "default"
|
||||
meta.helm.sh/release-name: "reloader"
|
||||
meta.helm.sh/release-name: "stakater"
|
||||
labels:
|
||||
app: reloader-reloader
|
||||
chart: "reloader-v0.0.77"
|
||||
release: "reloader"
|
||||
heritage: "Tiller"
|
||||
app.kubernetes.io/managed-by: "Tiller"
|
||||
app: stakater-reloader
|
||||
chart: "reloader-v0.0.95"
|
||||
release: "stakater"
|
||||
heritage: "Helm"
|
||||
app.kubernetes.io/managed-by: "Helm"
|
||||
group: com.stakater.platform
|
||||
provider: stakater
|
||||
version: v0.0.77
|
||||
|
||||
name: reloader-reloader
|
||||
version: v0.0.95
|
||||
name: stakater-reloader
|
||||
spec:
|
||||
replicas: 1
|
||||
revisionHistoryLimit: 2
|
||||
selector:
|
||||
matchLabels:
|
||||
app: reloader-reloader
|
||||
release: "reloader"
|
||||
app: stakater-reloader
|
||||
release: "stakater"
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: reloader-reloader
|
||||
chart: "reloader-v0.0.77"
|
||||
release: "reloader"
|
||||
heritage: "Tiller"
|
||||
app.kubernetes.io/managed-by: "Tiller"
|
||||
app: stakater-reloader
|
||||
chart: "reloader-v0.0.95"
|
||||
release: "stakater"
|
||||
heritage: "Helm"
|
||||
app.kubernetes.io/managed-by: "Helm"
|
||||
group: com.stakater.platform
|
||||
provider: stakater
|
||||
version: v0.0.77
|
||||
|
||||
version: v0.0.95
|
||||
spec:
|
||||
containers:
|
||||
- image: "stakater/reloader:v0.0.77"
|
||||
- image: "stakater/reloader:v0.0.95"
|
||||
imagePullPolicy: IfNotPresent
|
||||
name: reloader-reloader
|
||||
name: stakater-reloader
|
||||
|
||||
ports:
|
||||
- name: http
|
||||
@@ -131,39 +142,4 @@ spec:
|
||||
securityContext:
|
||||
runAsNonRoot: true
|
||||
runAsUser: 65534
|
||||
|
||||
serviceAccountName: reloader-reloader
|
||||
|
||||
---
|
||||
# Source: reloader/templates/role.yaml
|
||||
|
||||
|
||||
---
|
||||
# Source: reloader/templates/rolebinding.yaml
|
||||
|
||||
|
||||
---
|
||||
# Source: reloader/templates/service.yaml
|
||||
|
||||
|
||||
---
|
||||
# Source: reloader/templates/serviceaccount.yaml
|
||||
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
annotations:
|
||||
meta.helm.sh/release-namespace: "default"
|
||||
meta.helm.sh/release-name: "reloader"
|
||||
labels:
|
||||
app: reloader-reloader
|
||||
chart: "reloader-v0.0.77"
|
||||
release: "reloader"
|
||||
heritage: "Tiller"
|
||||
app.kubernetes.io/managed-by: "Tiller"
|
||||
name: reloader-reloader
|
||||
|
||||
---
|
||||
# Source: reloader/templates/servicemonitor.yaml
|
||||
|
||||
|
||||
serviceAccountName: stakater-reloader
|
||||
|
||||
@@ -9,6 +9,7 @@ kubernetes:
|
||||
host: https://kubernetes.default
|
||||
|
||||
reloader:
|
||||
isArgoRollouts: false
|
||||
isOpenshift: false
|
||||
ignoreSecrets: false
|
||||
ignoreConfigMaps: false
|
||||
@@ -91,6 +92,7 @@ reloader:
|
||||
# Specifies whether a ServiceAccount should be created
|
||||
create: true
|
||||
labels: {}
|
||||
annotations: {}
|
||||
# The name of the ServiceAccount to use.
|
||||
# If not set and create is true, a name is generated using the fullname template
|
||||
name:
|
||||
@@ -112,3 +114,15 @@ reloader:
|
||||
# Set timeout for scrape
|
||||
# timeout: 10s
|
||||
|
||||
|
||||
podMonitor:
|
||||
# enabling this requires service to be enabled as well, or no endpoints will be found
|
||||
enabled: false
|
||||
# Set the namespace the podMonitor should be deployed
|
||||
# namespace: monitoring
|
||||
# Set how frequently Prometheus should scrape
|
||||
# interval: 30s
|
||||
# Set labels for the podMonitor, use this to define your scrape label for Prometheus Operator
|
||||
# labels:
|
||||
# Set timeout for scrape
|
||||
# timeout: 10s
|
||||
|
||||
@@ -37,7 +37,7 @@ metadata:
|
||||
```
|
||||
<small>*the default annotation can be changed with the `--secret-annotation` flag</small>
|
||||
|
||||
Above mentioned annotation are also work for `Daemonsets` and `Statefulsets`
|
||||
Above mentioned annotation are also work for `Daemonsets` `Statefulsets` and `Rollouts`
|
||||
|
||||
## How Rolling upgrade works?
|
||||
|
||||
|
||||
@@ -5,3 +5,4 @@ These are the key features of Reloader:
|
||||
1. Restart pod in a deployment on change in linked/related configmap's or secret's
|
||||
2. Restart pod in a daemonset on change in linked/related configmap's or secret's
|
||||
3. Restart pod in a statefulset on change in linked/related configmap's or secret's
|
||||
4. Restart pod in a rollout on change in linked/related configmap's or secret's
|
||||
|
||||
52
go.mod
52
go.mod
@@ -3,24 +3,42 @@ module github.com/stakater/Reloader
|
||||
go 1.15
|
||||
|
||||
require (
|
||||
github.com/golang/groupcache v0.0.0-20191002201903-404acd9df4cc // indirect
|
||||
github.com/inconshreveable/mousetrap v1.0.0 // indirect
|
||||
github.com/onsi/ginkgo v1.10.2 // indirect
|
||||
github.com/onsi/gomega v1.7.0 // indirect
|
||||
github.com/openshift/api v3.9.1-0.20190923092516-169848dd8137+incompatible
|
||||
github.com/openshift/client-go v0.0.0-20190923092832-6afefc9bb372
|
||||
github.com/prometheus/client_golang v1.4.1
|
||||
github.com/sirupsen/logrus v1.4.2
|
||||
github.com/spf13/cobra v0.0.0-20160722081547-f62e98d28ab7
|
||||
k8s.io/api v0.0.0-20190918155943-95b840bb6a1f
|
||||
k8s.io/apimachinery v0.0.0-20191004115801-a2eda9f80ab8
|
||||
k8s.io/client-go v0.0.0-20190918160344-1fbdaa4c8d90
|
||||
github.com/argoproj/argo-rollouts v1.0.1
|
||||
github.com/onsi/ginkgo v1.15.1 // indirect
|
||||
github.com/onsi/gomega v1.11.0 // indirect
|
||||
github.com/openshift/api v0.0.0-20210527122704-efd9d5958e01
|
||||
github.com/openshift/client-go v0.0.0-20210521082421-73d9475a9142
|
||||
github.com/prometheus/client_golang v1.10.0
|
||||
github.com/sirupsen/logrus v1.7.0
|
||||
github.com/spf13/cobra v1.1.3
|
||||
k8s.io/api v0.21.1
|
||||
k8s.io/apimachinery v0.21.1
|
||||
k8s.io/client-go v0.21.1
|
||||
)
|
||||
|
||||
replace (
|
||||
github.com/openshift/api => github.com/openshift/api v3.9.1-0.20190923092516-169848dd8137+incompatible // prebase-1.16
|
||||
github.com/openshift/client-go => github.com/openshift/client-go v0.0.0-20190923092832-6afefc9bb372 // prebase-1.16
|
||||
k8s.io/api => k8s.io/api v0.0.0-20191004120104-195af9ec3521 // release-1.16
|
||||
k8s.io/apimachinery => k8s.io/apimachinery v0.0.0-20191004115801-a2eda9f80ab8 // kubernetes-1.16.0
|
||||
k8s.io/client-go => k8s.io/client-go v0.0.0-20190918160344-1fbdaa4c8d90 // kubernetes-1.16.0
|
||||
k8s.io/api => k8s.io/api v0.20.4
|
||||
k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.20.4
|
||||
k8s.io/apimachinery => k8s.io/apimachinery v0.21.0-alpha.0
|
||||
k8s.io/apiserver => k8s.io/apiserver v0.20.4
|
||||
k8s.io/cli-runtime => k8s.io/cli-runtime v0.20.4
|
||||
k8s.io/client-go => k8s.io/client-go v0.20.4
|
||||
k8s.io/cloud-provider => k8s.io/cloud-provider v0.20.4
|
||||
k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.20.4
|
||||
k8s.io/code-generator => k8s.io/code-generator v0.20.5-rc.0
|
||||
k8s.io/component-base => k8s.io/component-base v0.20.4
|
||||
k8s.io/component-helpers => k8s.io/component-helpers v0.20.4
|
||||
k8s.io/controller-manager => k8s.io/controller-manager v0.20.4
|
||||
k8s.io/cri-api => k8s.io/cri-api v0.20.5-rc.0
|
||||
k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.20.4
|
||||
k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.20.4
|
||||
k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.20.4
|
||||
k8s.io/kube-proxy => k8s.io/kube-proxy v0.20.4
|
||||
k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.20.4
|
||||
k8s.io/kubectl => k8s.io/kubectl v0.20.4
|
||||
k8s.io/kubelet => k8s.io/kubelet v0.20.4
|
||||
k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.20.4
|
||||
k8s.io/metrics => k8s.io/metrics v0.20.4
|
||||
k8s.io/mount-utils => k8s.io/mount-utils v0.20.5-rc.0
|
||||
k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.20.4
|
||||
)
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package callbacks
|
||||
|
||||
import (
|
||||
"context"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/stakater/Reloader/internal/pkg/util"
|
||||
"github.com/stakater/Reloader/pkg/kube"
|
||||
@@ -8,6 +9,7 @@ import (
|
||||
v1 "k8s.io/api/core/v1"
|
||||
meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
argorolloutv1alpha1 "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1"
|
||||
openshiftv1 "github.com/openshift/api/apps/v1"
|
||||
)
|
||||
|
||||
@@ -46,7 +48,7 @@ type RollingUpgradeFuncs struct {
|
||||
|
||||
// GetDeploymentItems returns the deployments in given namespace
|
||||
func GetDeploymentItems(clients kube.Clients, namespace string) []interface{} {
|
||||
deployments, err := clients.KubernetesClient.AppsV1().Deployments(namespace).List(meta_v1.ListOptions{})
|
||||
deployments, err := clients.KubernetesClient.AppsV1().Deployments(namespace).List(context.TODO(), meta_v1.ListOptions{})
|
||||
if err != nil {
|
||||
logrus.Errorf("Failed to list deployments %v", err)
|
||||
}
|
||||
@@ -55,7 +57,7 @@ func GetDeploymentItems(clients kube.Clients, namespace string) []interface{} {
|
||||
|
||||
// GetDaemonSetItems returns the daemonSets in given namespace
|
||||
func GetDaemonSetItems(clients kube.Clients, namespace string) []interface{} {
|
||||
daemonSets, err := clients.KubernetesClient.AppsV1().DaemonSets(namespace).List(meta_v1.ListOptions{})
|
||||
daemonSets, err := clients.KubernetesClient.AppsV1().DaemonSets(namespace).List(context.TODO(), meta_v1.ListOptions{})
|
||||
if err != nil {
|
||||
logrus.Errorf("Failed to list daemonSets %v", err)
|
||||
}
|
||||
@@ -64,7 +66,7 @@ func GetDaemonSetItems(clients kube.Clients, namespace string) []interface{} {
|
||||
|
||||
// GetStatefulSetItems returns the statefulSets in given namespace
|
||||
func GetStatefulSetItems(clients kube.Clients, namespace string) []interface{} {
|
||||
statefulSets, err := clients.KubernetesClient.AppsV1().StatefulSets(namespace).List(meta_v1.ListOptions{})
|
||||
statefulSets, err := clients.KubernetesClient.AppsV1().StatefulSets(namespace).List(context.TODO(), meta_v1.ListOptions{})
|
||||
if err != nil {
|
||||
logrus.Errorf("Failed to list statefulSets %v", err)
|
||||
}
|
||||
@@ -73,13 +75,22 @@ func GetStatefulSetItems(clients kube.Clients, namespace string) []interface{} {
|
||||
|
||||
// GetDeploymentConfigItems returns the deploymentConfigs in given namespace
|
||||
func GetDeploymentConfigItems(clients kube.Clients, namespace string) []interface{} {
|
||||
deploymentConfigs, err := clients.OpenshiftAppsClient.AppsV1().DeploymentConfigs(namespace).List(meta_v1.ListOptions{})
|
||||
deploymentConfigs, err := clients.OpenshiftAppsClient.AppsV1().DeploymentConfigs(namespace).List(context.TODO(), meta_v1.ListOptions{})
|
||||
if err != nil {
|
||||
logrus.Errorf("Failed to list deploymentConfigs %v", err)
|
||||
}
|
||||
return util.InterfaceSlice(deploymentConfigs.Items)
|
||||
}
|
||||
|
||||
// GetRolloutItems returns the rollouts in given namespace
|
||||
func GetRolloutItems(clients kube.Clients, namespace string) []interface{} {
|
||||
rollouts, err := clients.ArgoRolloutClient.ArgoprojV1alpha1().Rollouts(namespace).List(context.TODO(), meta_v1.ListOptions{})
|
||||
if err != nil {
|
||||
logrus.Errorf("Failed to list Rollouts %v", err)
|
||||
}
|
||||
return util.InterfaceSlice(rollouts.Items)
|
||||
}
|
||||
|
||||
// GetDeploymentAnnotations returns the annotations of given deployment
|
||||
func GetDeploymentAnnotations(item interface{}) map[string]string {
|
||||
return item.(appsv1.Deployment).ObjectMeta.Annotations
|
||||
@@ -100,6 +111,11 @@ func GetDeploymentConfigAnnotations(item interface{}) map[string]string {
|
||||
return item.(openshiftv1.DeploymentConfig).ObjectMeta.Annotations
|
||||
}
|
||||
|
||||
// GetRolloutAnnotations returns the annotations of given rollout
|
||||
func GetRolloutAnnotations(item interface{}) map[string]string {
|
||||
return item.(argorolloutv1alpha1.Rollout).ObjectMeta.Annotations
|
||||
}
|
||||
|
||||
// GetDeploymentPodAnnotations returns the pod's annotations of given deployment
|
||||
func GetDeploymentPodAnnotations(item interface{}) map[string]string {
|
||||
return item.(appsv1.Deployment).Spec.Template.ObjectMeta.Annotations
|
||||
@@ -120,6 +136,11 @@ func GetDeploymentConfigPodAnnotations(item interface{}) map[string]string {
|
||||
return item.(openshiftv1.DeploymentConfig).Spec.Template.ObjectMeta.Annotations
|
||||
}
|
||||
|
||||
// GetRolloutPodAnnotations returns the pod's annotations of given rollout
|
||||
func GetRolloutPodAnnotations(item interface{}) map[string]string {
|
||||
return item.(argorolloutv1alpha1.Rollout).Spec.Template.ObjectMeta.Annotations
|
||||
}
|
||||
|
||||
// GetDeploymentContainers returns the containers of given deployment
|
||||
func GetDeploymentContainers(item interface{}) []v1.Container {
|
||||
return item.(appsv1.Deployment).Spec.Template.Spec.Containers
|
||||
@@ -140,6 +161,11 @@ func GetDeploymentConfigContainers(item interface{}) []v1.Container {
|
||||
return item.(openshiftv1.DeploymentConfig).Spec.Template.Spec.Containers
|
||||
}
|
||||
|
||||
// GetRolloutContainers returns the containers of given rollout
|
||||
func GetRolloutContainers(item interface{}) []v1.Container {
|
||||
return item.(argorolloutv1alpha1.Rollout).Spec.Template.Spec.Containers
|
||||
}
|
||||
|
||||
// GetDeploymentInitContainers returns the containers of given deployment
|
||||
func GetDeploymentInitContainers(item interface{}) []v1.Container {
|
||||
return item.(appsv1.Deployment).Spec.Template.Spec.InitContainers
|
||||
@@ -160,31 +186,46 @@ func GetDeploymentConfigInitContainers(item interface{}) []v1.Container {
|
||||
return item.(openshiftv1.DeploymentConfig).Spec.Template.Spec.InitContainers
|
||||
}
|
||||
|
||||
// GetRolloutInitContainers returns the containers of given rollout
|
||||
func GetRolloutInitContainers(item interface{}) []v1.Container {
|
||||
return item.(argorolloutv1alpha1.Rollout).Spec.Template.Spec.InitContainers
|
||||
}
|
||||
|
||||
// UpdateDeployment performs rolling upgrade on deployment
|
||||
func UpdateDeployment(clients kube.Clients, namespace string, resource interface{}) error {
|
||||
deployment := resource.(appsv1.Deployment)
|
||||
_, err := clients.KubernetesClient.AppsV1().Deployments(namespace).Update(&deployment)
|
||||
_, err := clients.KubernetesClient.AppsV1().Deployments(namespace).Update(context.TODO(), &deployment, meta_v1.UpdateOptions{FieldManager: "Reloader"})
|
||||
return err
|
||||
}
|
||||
|
||||
// UpdateDaemonSet performs rolling upgrade on daemonSet
|
||||
func UpdateDaemonSet(clients kube.Clients, namespace string, resource interface{}) error {
|
||||
daemonSet := resource.(appsv1.DaemonSet)
|
||||
_, err := clients.KubernetesClient.AppsV1().DaemonSets(namespace).Update(&daemonSet)
|
||||
_, err := clients.KubernetesClient.AppsV1().DaemonSets(namespace).Update(context.TODO(), &daemonSet, meta_v1.UpdateOptions{FieldManager: "Reloader"})
|
||||
return err
|
||||
}
|
||||
|
||||
// UpdateStatefulSet performs rolling upgrade on statefulSet
|
||||
func UpdateStatefulSet(clients kube.Clients, namespace string, resource interface{}) error {
|
||||
statefulSet := resource.(appsv1.StatefulSet)
|
||||
_, err := clients.KubernetesClient.AppsV1().StatefulSets(namespace).Update(&statefulSet)
|
||||
_, err := clients.KubernetesClient.AppsV1().StatefulSets(namespace).Update(context.TODO(), &statefulSet, meta_v1.UpdateOptions{FieldManager: "Reloader"})
|
||||
return err
|
||||
}
|
||||
|
||||
// UpdateDeploymentConfig performs rolling upgrade on deploymentConfig
|
||||
func UpdateDeploymentConfig(clients kube.Clients, namespace string, resource interface{}) error {
|
||||
deploymentConfig := resource.(openshiftv1.DeploymentConfig)
|
||||
_, err := clients.OpenshiftAppsClient.AppsV1().DeploymentConfigs(namespace).Update(&deploymentConfig)
|
||||
_, err := clients.OpenshiftAppsClient.AppsV1().DeploymentConfigs(namespace).Update(context.TODO(), &deploymentConfig, meta_v1.UpdateOptions{FieldManager: "Reloader"})
|
||||
return err
|
||||
}
|
||||
|
||||
// UpdateRollout performs rolling upgrade on rollout
|
||||
func UpdateRollout(clients kube.Clients, namespace string, resource interface{}) error {
|
||||
rollout := resource.(argorolloutv1alpha1.Rollout)
|
||||
rolloutBefore, _ := clients.ArgoRolloutClient.ArgoprojV1alpha1().Rollouts(namespace).Get(context.TODO(), rollout.Name, meta_v1.GetOptions{})
|
||||
logrus.Warnf("Before: %+v", rolloutBefore.Spec.Template.Spec.Containers[0].Env)
|
||||
logrus.Warnf("After: %+v", rollout.Spec.Template.Spec.Containers[0].Env)
|
||||
_, err := clients.ArgoRolloutClient.ArgoprojV1alpha1().Rollouts(namespace).Update(context.TODO(), &rollout, meta_v1.UpdateOptions{FieldManager: "Reloader"})
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -207,3 +248,8 @@ func GetStatefulSetVolumes(item interface{}) []v1.Volume {
|
||||
func GetDeploymentConfigVolumes(item interface{}) []v1.Volume {
|
||||
return item.(openshiftv1.DeploymentConfig).Spec.Template.Spec.Volumes
|
||||
}
|
||||
|
||||
// GetRolloutVolumes returns the Volumes of given rollout
|
||||
func GetRolloutVolumes(item interface{}) []v1.Volume {
|
||||
return item.(argorolloutv1alpha1.Rollout).Spec.Template.Spec.Volumes
|
||||
}
|
||||
|
||||
@@ -32,6 +32,7 @@ func NewReloaderCommand() *cobra.Command {
|
||||
cmd.PersistentFlags().StringVar(&options.LogFormat, "log-format", "", "Log format to use (empty string for text, or JSON")
|
||||
cmd.PersistentFlags().StringSlice("resources-to-ignore", []string{}, "list of resources to ignore (valid options 'configMaps' or 'secrets')")
|
||||
cmd.PersistentFlags().StringSlice("namespaces-to-ignore", []string{}, "list of namespaces to ignore")
|
||||
cmd.PersistentFlags().StringVar(&options.IsArgoRollouts, "is-Argo-Rollouts", "false", "Add support for argo rollouts")
|
||||
return cmd
|
||||
}
|
||||
|
||||
|
||||
@@ -145,7 +145,7 @@ func (c *Controller) handleErr(err error, key interface{}) {
|
||||
|
||||
// This controller retries 5 times if something goes wrong. After that, it stops trying.
|
||||
if c.queue.NumRequeues(key) < 5 {
|
||||
logrus.Errorf("Error syncing events %v: %v", key, err)
|
||||
logrus.Errorf("Error syncing events: %v", err)
|
||||
|
||||
// Re-enqueue the key rate limited. Based on the rate limiter on the
|
||||
// queue and the re-enqueue history, the key will be processed later again.
|
||||
|
||||
@@ -20,7 +20,7 @@ func (r ResourceCreatedHandler) Handle() error {
|
||||
} else {
|
||||
config, _ := r.GetConfig()
|
||||
// process resource based on its type
|
||||
doRollingUpgrade(config, r.Collectors)
|
||||
return doRollingUpgrade(config, r.Collectors)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -22,7 +22,7 @@ func (r ResourceUpdatedHandler) Handle() error {
|
||||
config, oldSHAData := r.GetConfig()
|
||||
if config.SHAValue != oldSHAData {
|
||||
// process resource based on its type
|
||||
doRollingUpgrade(config, r.Collectors)
|
||||
return doRollingUpgrade(config, r.Collectors)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
|
||||
@@ -71,30 +71,66 @@ func GetDeploymentConfigRollingUpgradeFuncs() callbacks.RollingUpgradeFuncs {
|
||||
}
|
||||
}
|
||||
|
||||
func doRollingUpgrade(config util.Config, collectors metrics.Collectors) {
|
||||
clients := kube.GetClients()
|
||||
|
||||
rollingUpgrade(clients, config, GetDeploymentRollingUpgradeFuncs(), collectors)
|
||||
rollingUpgrade(clients, config, GetDaemonSetRollingUpgradeFuncs(), collectors)
|
||||
rollingUpgrade(clients, config, GetStatefulSetRollingUpgradeFuncs(), collectors)
|
||||
|
||||
if kube.IsOpenshift {
|
||||
rollingUpgrade(clients, config, GetDeploymentConfigRollingUpgradeFuncs(), collectors)
|
||||
// GetArgoRolloutRollingUpgradeFuncs returns all callback funcs for a rollout
|
||||
func GetArgoRolloutRollingUpgradeFuncs() callbacks.RollingUpgradeFuncs {
|
||||
return callbacks.RollingUpgradeFuncs{
|
||||
ItemsFunc: callbacks.GetRolloutItems,
|
||||
AnnotationsFunc: callbacks.GetRolloutAnnotations,
|
||||
PodAnnotationsFunc: callbacks.GetRolloutPodAnnotations,
|
||||
ContainersFunc: callbacks.GetRolloutContainers,
|
||||
InitContainersFunc: callbacks.GetRolloutInitContainers,
|
||||
UpdateFunc: callbacks.UpdateRollout,
|
||||
VolumesFunc: callbacks.GetRolloutVolumes,
|
||||
ResourceType: "Rollout",
|
||||
}
|
||||
}
|
||||
|
||||
func rollingUpgrade(clients kube.Clients, config util.Config, upgradeFuncs callbacks.RollingUpgradeFuncs, collectors metrics.Collectors) {
|
||||
func doRollingUpgrade(config util.Config, collectors metrics.Collectors) error {
|
||||
clients := kube.GetClients()
|
||||
|
||||
err := rollingUpgrade(clients, config, GetDeploymentRollingUpgradeFuncs(), collectors)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = rollingUpgrade(clients, config, GetDaemonSetRollingUpgradeFuncs(), collectors)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = rollingUpgrade(clients, config, GetStatefulSetRollingUpgradeFuncs(), collectors)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if kube.IsOpenshift {
|
||||
err = rollingUpgrade(clients, config, GetDeploymentConfigRollingUpgradeFuncs(), collectors)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if options.IsArgoRollouts == "true" {
|
||||
err = rollingUpgrade(clients, config, GetArgoRolloutRollingUpgradeFuncs(), collectors)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func rollingUpgrade(clients kube.Clients, config util.Config, upgradeFuncs callbacks.RollingUpgradeFuncs, collectors metrics.Collectors) error {
|
||||
|
||||
err := PerformRollingUpgrade(clients, config, upgradeFuncs, collectors)
|
||||
if err != nil {
|
||||
logrus.Errorf("Rolling upgrade for '%s' failed with error = %v", config.ResourceName, err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// PerformRollingUpgrade upgrades the deployment if there is any change in configmap or secret data
|
||||
func PerformRollingUpgrade(clients kube.Clients, config util.Config, upgradeFuncs callbacks.RollingUpgradeFuncs, collectors metrics.Collectors) error {
|
||||
items := upgradeFuncs.ItemsFunc(clients, config.Namespace)
|
||||
var err error
|
||||
|
||||
for _, i := range items {
|
||||
// find correct annotation and update the resource
|
||||
annotations := upgradeFuncs.AnnotationsFunc(i)
|
||||
@@ -139,6 +175,7 @@ func PerformRollingUpgrade(clients kube.Clients, config util.Config, upgradeFunc
|
||||
if err != nil {
|
||||
logrus.Errorf("Update for '%s' of type '%s' in namespace '%s' failed with error %v", resourceName, upgradeFuncs.ResourceType, config.Namespace, err)
|
||||
collectors.Reloaded.With(prometheus.Labels{"success": "false"}).Inc()
|
||||
return err
|
||||
} else {
|
||||
logrus.Infof("Changes detected in '%s' of type '%s' in namespace '%s'", config.ResourceName, config.Type, config.Namespace)
|
||||
logrus.Infof("Updated '%s' of type '%s' in namespace '%s'", resourceName, upgradeFuncs.ResourceType, config.Namespace)
|
||||
@@ -146,7 +183,7 @@ func PerformRollingUpgrade(clients kube.Clients, config util.Config, upgradeFunc
|
||||
}
|
||||
}
|
||||
}
|
||||
return err
|
||||
return nil
|
||||
}
|
||||
|
||||
func getVolumeMountName(volumes []v1.Volume, mountType string, volumeName string) string {
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package handler
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"testing"
|
||||
@@ -721,7 +722,7 @@ func TestRollingUpgradeForDeploymentWithConfigmapViaSearchAnnotationNotMapped(t
|
||||
t.Errorf("Failed to create deployment with search annotation.")
|
||||
}
|
||||
defer func() {
|
||||
_ = clients.KubernetesClient.AppsV1().Deployments(namespace).Delete(deployment.Name, &v1.DeleteOptions{})
|
||||
_ = clients.KubernetesClient.AppsV1().Deployments(namespace).Delete(context.TODO(), deployment.Name, v1.DeleteOptions{})
|
||||
}()
|
||||
// defer clients.KubernetesClient.AppsV1().Deployments(namespace).Delete(deployment.Name, &v1.DeleteOptions{})
|
||||
|
||||
|
||||
@@ -17,4 +17,6 @@ var (
|
||||
SearchMatchAnnotation = "reloader.stakater.com/match"
|
||||
// LogFormat is the log format to use (json, or empty string for default)
|
||||
LogFormat = ""
|
||||
// Adds support for argo rollouts
|
||||
IsArgoRollouts = "false"
|
||||
)
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package testutil
|
||||
|
||||
import (
|
||||
"context"
|
||||
"math/rand"
|
||||
"sort"
|
||||
"strconv"
|
||||
@@ -33,7 +34,7 @@ var (
|
||||
|
||||
// CreateNamespace creates namespace for testing
|
||||
func CreateNamespace(namespace string, client kubernetes.Interface) {
|
||||
_, err := client.CoreV1().Namespaces().Create(&v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}})
|
||||
_, err := client.CoreV1().Namespaces().Create(context.TODO(), &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}}, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
logrus.Fatalf("Failed to create namespace for testing %v", err)
|
||||
} else {
|
||||
@@ -43,7 +44,7 @@ func CreateNamespace(namespace string, client kubernetes.Interface) {
|
||||
|
||||
// DeleteNamespace deletes namespace for testing
|
||||
func DeleteNamespace(namespace string, client kubernetes.Interface) {
|
||||
err := client.CoreV1().Namespaces().Delete(namespace, &metav1.DeleteOptions{})
|
||||
err := client.CoreV1().Namespaces().Delete(context.TODO(), namespace, metav1.DeleteOptions{})
|
||||
if err != nil {
|
||||
logrus.Fatalf("Failed to delete namespace that was created for testing %v", err)
|
||||
} else {
|
||||
@@ -597,7 +598,7 @@ func ConvertResourceToSHA(resourceType string, namespace string, resourceName st
|
||||
func CreateConfigMap(client kubernetes.Interface, namespace string, configmapName string, data string) (core_v1.ConfigMapInterface, error) {
|
||||
logrus.Infof("Creating configmap")
|
||||
configmapClient := client.CoreV1().ConfigMaps(namespace)
|
||||
_, err := configmapClient.Create(GetConfigmap(namespace, configmapName, data))
|
||||
_, err := configmapClient.Create(context.TODO(), GetConfigmap(namespace, configmapName, data), metav1.CreateOptions{})
|
||||
time.Sleep(3 * time.Second)
|
||||
return configmapClient, err
|
||||
}
|
||||
@@ -606,7 +607,7 @@ func CreateConfigMap(client kubernetes.Interface, namespace string, configmapNam
|
||||
func CreateSecret(client kubernetes.Interface, namespace string, secretName string, data string) (core_v1.SecretInterface, error) {
|
||||
logrus.Infof("Creating secret")
|
||||
secretClient := client.CoreV1().Secrets(namespace)
|
||||
_, err := secretClient.Create(GetSecret(namespace, secretName, data))
|
||||
_, err := secretClient.Create(context.TODO(), GetSecret(namespace, secretName, data), metav1.CreateOptions{})
|
||||
time.Sleep(3 * time.Second)
|
||||
return secretClient, err
|
||||
}
|
||||
@@ -621,7 +622,7 @@ func CreateDeployment(client kubernetes.Interface, deploymentName string, namesp
|
||||
} else {
|
||||
deploymentObj = GetDeploymentWithEnvVars(namespace, deploymentName)
|
||||
}
|
||||
deployment, err := deploymentClient.Create(deploymentObj)
|
||||
deployment, err := deploymentClient.Create(context.TODO(), deploymentObj, metav1.CreateOptions{})
|
||||
time.Sleep(3 * time.Second)
|
||||
return deployment, err
|
||||
}
|
||||
@@ -636,7 +637,7 @@ func CreateDeploymentConfig(client appsclient.Interface, deploymentName string,
|
||||
} else {
|
||||
deploymentConfigObj = GetDeploymentConfigWithEnvVars(namespace, deploymentName)
|
||||
}
|
||||
deploymentConfig, err := deploymentConfigsClient.Create(deploymentConfigObj)
|
||||
deploymentConfig, err := deploymentConfigsClient.Create(context.TODO(), deploymentConfigObj, metav1.CreateOptions{})
|
||||
time.Sleep(5 * time.Second)
|
||||
return deploymentConfig, err
|
||||
}
|
||||
@@ -651,7 +652,7 @@ func CreateDeploymentWithInitContainer(client kubernetes.Interface, deploymentNa
|
||||
} else {
|
||||
deploymentObj = GetDeploymentWithInitContainerAndEnv(namespace, deploymentName)
|
||||
}
|
||||
deployment, err := deploymentClient.Create(deploymentObj)
|
||||
deployment, err := deploymentClient.Create(context.TODO(), deploymentObj, metav1.CreateOptions{})
|
||||
time.Sleep(3 * time.Second)
|
||||
return deployment, err
|
||||
}
|
||||
@@ -661,7 +662,7 @@ func CreateDeploymentWithEnvVarSource(client kubernetes.Interface, deploymentNam
|
||||
logrus.Infof("Creating Deployment")
|
||||
deploymentClient := client.AppsV1().Deployments(namespace)
|
||||
deploymentObj := GetDeploymentWithEnvVarSources(namespace, deploymentName)
|
||||
deployment, err := deploymentClient.Create(deploymentObj)
|
||||
deployment, err := deploymentClient.Create(context.TODO(), deploymentObj, metav1.CreateOptions{})
|
||||
time.Sleep(3 * time.Second)
|
||||
return deployment, err
|
||||
|
||||
@@ -672,7 +673,7 @@ func CreateDeploymentWithPodAnnotations(client kubernetes.Interface, deploymentN
|
||||
logrus.Infof("Creating Deployment")
|
||||
deploymentClient := client.AppsV1().Deployments(namespace)
|
||||
deploymentObj := GetDeploymentWithPodAnnotations(namespace, deploymentName, both)
|
||||
deployment, err := deploymentClient.Create(deploymentObj)
|
||||
deployment, err := deploymentClient.Create(context.TODO(), deploymentObj, metav1.CreateOptions{})
|
||||
time.Sleep(3 * time.Second)
|
||||
return deployment, err
|
||||
}
|
||||
@@ -684,7 +685,7 @@ func CreateDeploymentWithEnvVarSourceAndAnnotations(client kubernetes.Interface,
|
||||
deploymentClient := client.AppsV1().Deployments(namespace)
|
||||
deploymentObj := GetDeploymentWithEnvVarSources(namespace, deploymentName)
|
||||
deploymentObj.Annotations = annotations
|
||||
deployment, err := deploymentClient.Create(deploymentObj)
|
||||
deployment, err := deploymentClient.Create(context.TODO(), deploymentObj, metav1.CreateOptions{})
|
||||
time.Sleep(3 * time.Second)
|
||||
return deployment, err
|
||||
}
|
||||
@@ -699,7 +700,7 @@ func CreateDaemonSet(client kubernetes.Interface, daemonsetName string, namespac
|
||||
} else {
|
||||
daemonsetObj = GetDaemonSetWithEnvVars(namespace, daemonsetName)
|
||||
}
|
||||
daemonset, err := daemonsetClient.Create(daemonsetObj)
|
||||
daemonset, err := daemonsetClient.Create(context.TODO(), daemonsetObj, metav1.CreateOptions{})
|
||||
time.Sleep(3 * time.Second)
|
||||
return daemonset, err
|
||||
}
|
||||
@@ -714,7 +715,7 @@ func CreateStatefulSet(client kubernetes.Interface, statefulsetName string, name
|
||||
} else {
|
||||
statefulsetObj = GetStatefulSetWithEnvVar(namespace, statefulsetName)
|
||||
}
|
||||
statefulset, err := statefulsetClient.Create(statefulsetObj)
|
||||
statefulset, err := statefulsetClient.Create(context.TODO(), statefulsetObj, metav1.CreateOptions{})
|
||||
time.Sleep(3 * time.Second)
|
||||
return statefulset, err
|
||||
}
|
||||
@@ -722,7 +723,7 @@ func CreateStatefulSet(client kubernetes.Interface, statefulsetName string, name
|
||||
// DeleteDeployment creates a deployment in given namespace and returns the error if any
|
||||
func DeleteDeployment(client kubernetes.Interface, namespace string, deploymentName string) error {
|
||||
logrus.Infof("Deleting Deployment")
|
||||
deploymentError := client.AppsV1().Deployments(namespace).Delete(deploymentName, &metav1.DeleteOptions{})
|
||||
deploymentError := client.AppsV1().Deployments(namespace).Delete(context.TODO(), deploymentName, metav1.DeleteOptions{})
|
||||
time.Sleep(3 * time.Second)
|
||||
return deploymentError
|
||||
}
|
||||
@@ -730,7 +731,7 @@ func DeleteDeployment(client kubernetes.Interface, namespace string, deploymentN
|
||||
// DeleteDeploymentConfig deletes a deploymentConfig in given namespace and returns the error if any
|
||||
func DeleteDeploymentConfig(client appsclient.Interface, namespace string, deploymentConfigName string) error {
|
||||
logrus.Infof("Deleting DeploymentConfig")
|
||||
deploymentConfigError := client.AppsV1().DeploymentConfigs(namespace).Delete(deploymentConfigName, &metav1.DeleteOptions{})
|
||||
deploymentConfigError := client.AppsV1().DeploymentConfigs(namespace).Delete(context.TODO(), deploymentConfigName, metav1.DeleteOptions{})
|
||||
time.Sleep(3 * time.Second)
|
||||
return deploymentConfigError
|
||||
}
|
||||
@@ -738,7 +739,7 @@ func DeleteDeploymentConfig(client appsclient.Interface, namespace string, deplo
|
||||
// DeleteDaemonSet creates a daemonset in given namespace and returns the error if any
|
||||
func DeleteDaemonSet(client kubernetes.Interface, namespace string, daemonsetName string) error {
|
||||
logrus.Infof("Deleting DaemonSet %s", daemonsetName)
|
||||
daemonsetError := client.AppsV1().DaemonSets(namespace).Delete(daemonsetName, &metav1.DeleteOptions{})
|
||||
daemonsetError := client.AppsV1().DaemonSets(namespace).Delete(context.TODO(), daemonsetName, metav1.DeleteOptions{})
|
||||
time.Sleep(3 * time.Second)
|
||||
return daemonsetError
|
||||
}
|
||||
@@ -746,7 +747,7 @@ func DeleteDaemonSet(client kubernetes.Interface, namespace string, daemonsetNam
|
||||
// DeleteStatefulSet creates a statefulset in given namespace and returns the error if any
|
||||
func DeleteStatefulSet(client kubernetes.Interface, namespace string, statefulsetName string) error {
|
||||
logrus.Infof("Deleting StatefulSet %s", statefulsetName)
|
||||
statefulsetError := client.AppsV1().StatefulSets(namespace).Delete(statefulsetName, &metav1.DeleteOptions{})
|
||||
statefulsetError := client.AppsV1().StatefulSets(namespace).Delete(context.TODO(), statefulsetName, metav1.DeleteOptions{})
|
||||
time.Sleep(3 * time.Second)
|
||||
return statefulsetError
|
||||
}
|
||||
@@ -760,7 +761,7 @@ func UpdateConfigMap(configmapClient core_v1.ConfigMapInterface, namespace strin
|
||||
} else {
|
||||
configmap = GetConfigmap(namespace, configmapName, data)
|
||||
}
|
||||
_, updateErr := configmapClient.Update(configmap)
|
||||
_, updateErr := configmapClient.Update(context.TODO(), configmap, metav1.UpdateOptions{})
|
||||
time.Sleep(3 * time.Second)
|
||||
return updateErr
|
||||
}
|
||||
@@ -774,7 +775,7 @@ func UpdateSecret(secretClient core_v1.SecretInterface, namespace string, secret
|
||||
} else {
|
||||
secret = GetSecret(namespace, secretName, data)
|
||||
}
|
||||
_, updateErr := secretClient.Update(secret)
|
||||
_, updateErr := secretClient.Update(context.TODO(), secret, metav1.UpdateOptions{})
|
||||
time.Sleep(3 * time.Second)
|
||||
return updateErr
|
||||
}
|
||||
@@ -782,7 +783,7 @@ func UpdateSecret(secretClient core_v1.SecretInterface, namespace string, secret
|
||||
// DeleteConfigMap deletes a configmap in given namespace and returns the error if any
|
||||
func DeleteConfigMap(client kubernetes.Interface, namespace string, configmapName string) error {
|
||||
logrus.Infof("Deleting configmap %q.\n", configmapName)
|
||||
err := client.CoreV1().ConfigMaps(namespace).Delete(configmapName, &metav1.DeleteOptions{})
|
||||
err := client.CoreV1().ConfigMaps(namespace).Delete(context.TODO(), configmapName, metav1.DeleteOptions{})
|
||||
time.Sleep(3 * time.Second)
|
||||
return err
|
||||
}
|
||||
@@ -790,7 +791,7 @@ func DeleteConfigMap(client kubernetes.Interface, namespace string, configmapNam
|
||||
// DeleteSecret deletes a secret in given namespace and returns the error if any
|
||||
func DeleteSecret(client kubernetes.Interface, namespace string, secretName string) error {
|
||||
logrus.Infof("Deleting secret %q.\n", secretName)
|
||||
err := client.CoreV1().Secrets(namespace).Delete(secretName, &metav1.DeleteOptions{})
|
||||
err := client.CoreV1().Secrets(namespace).Delete(context.TODO(), secretName, metav1.DeleteOptions{})
|
||||
time.Sleep(3 * time.Second)
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -1,10 +1,12 @@
|
||||
package kube
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
|
||||
argorollout "github.com/argoproj/argo-rollouts/pkg/client/clientset/versioned"
|
||||
appsclient "github.com/openshift/client-go/apps/clientset/versioned"
|
||||
"github.com/sirupsen/logrus"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
@@ -15,6 +17,7 @@ import (
|
||||
type Clients struct {
|
||||
KubernetesClient kubernetes.Interface
|
||||
OpenshiftAppsClient appsclient.Interface
|
||||
ArgoRolloutClient argorollout.Interface
|
||||
}
|
||||
|
||||
var (
|
||||
@@ -38,18 +41,34 @@ func GetClients() Clients {
|
||||
}
|
||||
}
|
||||
|
||||
var rolloutClient *argorollout.Clientset
|
||||
|
||||
rolloutClient, err = GetArgoRolloutClient()
|
||||
if err != nil {
|
||||
logrus.Warnf("Unable to create ArgoRollout client error = %v", err)
|
||||
}
|
||||
|
||||
return Clients{
|
||||
KubernetesClient: client,
|
||||
OpenshiftAppsClient: appsClient,
|
||||
ArgoRolloutClient: rolloutClient,
|
||||
}
|
||||
}
|
||||
|
||||
func GetArgoRolloutClient() (*argorollout.Clientset, error) {
|
||||
config, err := getConfig()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return argorollout.NewForConfig(config)
|
||||
}
|
||||
|
||||
func isOpenshift() bool {
|
||||
client, err := GetKubernetesClient()
|
||||
if err != nil {
|
||||
logrus.Fatalf("Unable to create Kubernetes client error = %v", err)
|
||||
}
|
||||
_, err = client.RESTClient().Get().AbsPath("/apis/project.openshift.io").Do().Raw()
|
||||
_, err = client.RESTClient().Get().AbsPath("/apis/project.openshift.io").Do(context.TODO()).Raw()
|
||||
if err == nil {
|
||||
logrus.Info("Environment: Openshift")
|
||||
return true
|
||||
|
||||
Reference in New Issue
Block a user