Compare commits

...

75 Commits

Author SHA1 Message Date
stakater-user
f2b4e8e6c6 [skip-ci] Update artifacts 2021-07-30 06:35:22 +00:00
Ahmed Waleed Malik
99a38bff8e Merge pull request #254 from stakater/handle-resource-redeploy
fix: Reload pods after redeploy secrets or configmap
2021-07-30 11:20:35 +05:00
Ahmed Waleed Malik
d0aa627715 Merge pull request #253 from stakater/remove-unused-files
[skip-ci] Remove unused files
2021-07-30 10:56:58 +05:00
Waleed Malik
953cbe9d28 Reload resource if secret/configmap is re-created 2021-07-30 10:24:50 +05:00
Waleed Malik
f7873aba7b Update dependencies 2021-07-29 15:24:41 +05:00
Waleed Malik
f9728ecfff Add run target in Makefile 2021-07-29 15:24:30 +05:00
Waleed Malik
96a44153de Non-root user should be specified in numeric form in Dockerfile 2021-07-29 15:24:09 +05:00
Waleed Malik
cafbcbd2cb Update build image targetC 2021-07-29 14:56:43 +05:00
Waleed Malik
6397a35e32 Remove unused files 2021-07-29 14:33:04 +05:00
Waleed Malik
aea8592880 Update golangci-lint step in workflows 2021-07-29 14:32:56 +05:00
stakater-user
2aa514a34c [skip-ci] Update artifacts 2021-07-28 10:36:56 +00:00
Faizan Ahmad
ac39bc4eba Merge pull request #251 from aslafy-z/patch-1
docs(helm): podmonitor does not need service
2021-07-28 12:20:07 +02:00
Zadkiel
284d21686e docs(helm): podmonitor does not need service 2021-07-20 17:42:59 +02:00
stakater-user
00c0c11c76 [skip-ci] Update artifacts 2021-07-11 07:51:39 +00:00
Brandon Clifford
96ebfa8e62 Fix typo in Chart.yaml sources (#248) 2021-07-11 09:37:32 +02:00
stakater-user
95d442d80f [skip-ci] Update artifacts 2021-07-09 07:43:58 +00:00
Faizan Ahmad
e4e58882ab Merge pull request #246 from gciria/add-chart-liveness-readiness
Add Liveness and readiness probe timeout values
2021-07-09 09:29:41 +02:00
Gustavo Ciria
ea71fc0eec Create Chart.yaml 2021-07-08 13:07:43 -03:00
Gustavo Ciria
462b225d92 Delete Chart.yaml
Version and appVersion do not need to be updated manually.
2021-07-08 12:55:03 -03:00
Gustavo Ciria
d8728092f8 Add Liveness and readiness probe timeout values 2021-07-08 11:44:49 -03:00
stakater-user
2c8ef70c43 [skip-ci] Update artifacts 2021-06-28 14:19:33 +00:00
Faizan Ahmad
4d2c8a451e Merge pull request #243 from sfynx/master
Only enable Rollouts when enabled in Helm chart.
2021-06-28 16:03:38 +02:00
Henno Schooljan
f7927c85b1 Disable OpenShift by default, add notes in README. 2021-06-25 21:28:16 +02:00
Henno Schooljan
2e2fd2a11b Only enable Rollouts when enabled in Helm chart.
This prevents a permission issue in case Rollouts is available on a cluster, but the user does not have permission to use it (e.g. as a tenant on a cluster without cluster admin rights), and therefore also may not set permissions for it.

See issue #231.
2021-06-16 20:46:51 +02:00
stakater-user
0e6ec1d36b [skip-ci] Update artifacts 2021-06-15 17:40:07 +00:00
Faizan Ahmad
85b33d9104 Merge pull request #242 from stakater/actions-update
Added helm template step in push action
2021-06-15 19:25:54 +02:00
talha0324
c838ecbbc7 Updated command to one line 2021-06-15 19:52:28 +05:00
stakater-user
068a5c1e64 [skip-ci] Update artifacts 2021-06-15 14:13:35 +00:00
Faizan Ahmad
4d559a1864 Merge pull request #240 from stakater/file-name-fix
Fixed file name
2021-06-15 15:57:30 +02:00
talha0324
322142dd66 syntax fix 2021-06-15 18:24:56 +05:00
talha0324
39f37b706c Added helm template step in push action 2021-06-15 18:23:43 +05:00
talha0324
4e10dd4f80 Merge branch 'master' into file-name-fix
merging master into this branch
2021-06-15 17:58:49 +05:00
stakater-user
ccaa600ff4 [skip-ci] Update artifacts 2021-06-15 11:42:02 +00:00
Faizan Ahmad
a3fcfeb62f Merge pull request #241 from stakater/actions-fix
Fixed helm version tag and PR message
2021-06-15 13:28:07 +02:00
talha0324
d2cbbafeb1 Fixed helm version tag and PR message 2021-06-15 16:07:49 +05:00
talha0324
eaf8e16414 Fixed file name 2021-06-15 15:48:52 +05:00
stakater-user
5a65cf9f6d [skip-ci] Update artifacts 2021-06-13 19:48:18 +00:00
Faizan Ahmad
a8a68ae1b0 Merge pull request #236 from tete17/Update-dependencies-for-Argo-Rollouts
Update ArgoCD Rollouts to 1.0.1 to fix a compatibility issue
2021-06-13 21:33:59 +02:00
tete17
7643a27fb1 Upgrade argo-rollouts to v1.0.1 2021-06-04 18:32:02 +02:00
tete17
71fdb53c2e Update ArgoCD Rollouts to 0.10.2 to fix a compatibility issue and update necessary k8s machinery 2021-05-31 15:35:51 +02:00
stakater-user
d6312f6f83 [skip-ci] Update artifacts 2021-05-25 03:26:20 +00:00
Ahmed Waleed Malik
19220f5e6e Merge pull request #235 from phillebaba/feature/chart-priority-class
Add priority class name to helm chart
2021-05-25 08:13:08 +05:00
Philip Laine
05456b0905 Remove priority class name value 2021-05-24 21:19:20 +02:00
Philip Laine
10328dee8d Update deployments/kubernetes/chart/reloader/templates/deployment.yaml
Co-authored-by: Ahmed Waleed Malik <ahmedwaleedmalik@gmail.com>
2021-05-24 21:18:42 +02:00
Philip Laine
fd174ed691 Add priority class name to helm chart 2021-05-19 22:06:35 +02:00
stakater-user
2e47f1740c [skip-ci] Update artifacts 2021-04-26 04:52:02 +00:00
Ahmed Waleed Malik
15cb96f945 Merge pull request #228 from stakater/fix-issue-221
Add Optional pod monitor
2021-04-26 09:38:29 +05:00
faizanahmad055
1e987db54d Add endline in podmonitor.yaml
Signed-off-by: faizanahmad055 <faizan.ahmad55@outlook.com>
2021-04-25 23:10:23 +02:00
faizanahmad055
12a7fed3ae Add endline in values.yaml.tmpl
Signed-off-by: faizanahmad055 <faizan.ahmad55@outlook.com>
2021-04-25 23:09:40 +02:00
stakater-user
f18fac66c2 [skip-ci] Update artifacts 2021-04-25 21:05:39 +00:00
faizanahmad055
b5c95f9cbf Add Optional pod monitor
Signed-off-by: faizanahmad055 <faizan.ahmad55@outlook.com>
2021-04-25 23:05:36 +02:00
Faizan Ahmad
46b948388f Merge pull request #226 from tenstad/propagate-error-to-fix-retry
Propagate error to enable retry
2021-04-25 22:52:06 +02:00
Amund Tenstad
78be58b090 Do not log content of secrets 2021-04-21 16:12:53 +02:00
Amund Tenstad
54a8e0683b Propagate PerformRollingUpgrade error to Handle 2021-04-21 13:25:08 +02:00
stakater-user
702f0caa93 [skip-ci] Update artifacts 2021-04-11 18:21:36 +00:00
Faizan Ahmad
2e709e85ae Merge pull request #223 from sfynx/master
fix: read isArgoRollouts correctly in Helm chart
2021-04-11 20:08:10 +02:00
Henno Schooljan
debfd57a91 fix: read isArgoRollouts correctly in Helm chart 2021-04-07 23:21:41 +02:00
stakater-user
c3b8af34ac [skip-ci] Update artifacts 2021-03-22 13:47:07 +00:00
Ahmed Waleed Malik
7a65bcb35b Merge pull request #218 from stakater/fix-issue-207
Make argo rollouts optional
2021-03-22 18:32:15 +05:00
faizanahmad055
af6cd9e37c Add isArgoRollouts in values.yaml.tmpl
Signed-off-by: faizanahmad055 <faizan.ahmad55@outlook.com>
2021-03-22 11:34:47 +01:00
faizanahmad055
344004d0b3 Make argo rollouts optional
Signed-off-by: faizanahmad055 <faizan.ahmad55@outlook.com>
2021-03-22 11:30:22 +01:00
stakater-user
a5bc586f09 [skip-ci] Update artifacts 2021-03-17 04:30:18 +00:00
Ahmed Waleed Malik
81ca7ab601 Merge pull request #212 from corco/multi_arch_dockerfile
Dockerfile now use the platform supplied by buildx
2021-03-17 09:15:39 +05:00
Jonathan Drolet
69c9ccb2ea Merge remote-tracking branch 'stakater/master' into multi_arch_dockerfile 2021-03-16 09:05:32 -04:00
Ahmed Waleed Malik
0ec3effab8 Merge pull request #215 from stakater/use-commit-hash
Update pull_request.yaml
2021-03-16 16:26:17 +05:00
Ahmed Waleed Malik
dba42e91bc Update pull_request.yaml 2021-03-16 16:20:16 +05:00
Jonathan Drolet
68fd3bebe5 Dockerfile now use the platform supplied by buildx 2021-03-15 21:11:18 -04:00
Ahmed Waleed Malik
52b975ef0d Merge pull request #211 from stakater/update-pr-workflow
Update pull_request.yaml
2021-03-15 20:38:18 +05:00
Ahmed Waleed Malik
0679af76f4 Update pull_request.yaml 2021-03-15 20:23:53 +05:00
Ahmed Waleed Malik
309c10f632 Merge pull request #206 from stakater/update-modules
Update modules
2021-03-10 09:49:27 +05:00
Waleed Malik
07ddec9fd1 Clean up unused dependencies 2021-03-10 09:19:23 +05:00
Waleed Malik
69a80fd1d9 Update modules 2021-03-10 09:18:51 +05:00
stakater-user
04975de060 [skip-ci] Update artifacts 2021-03-09 19:20:58 +00:00
Ahmed Waleed Malik
459a808371 Merge pull request #205 from tete17/Support-Rollouts-on-Helm-deployment
Allow reloader to modify rollouts when installed through helm
2021-03-09 23:50:42 +05:00
tete17
ef8a335c93 Allow reloader to modify rollouts when installed through helm 2021-03-09 20:19:33 +01:00
29 changed files with 2386 additions and 359 deletions

View File

@@ -19,7 +19,9 @@ jobs:
steps:
- name: Check out code
uses: actions/checkout@v2
with:
ref: ${{github.event.pull_request.head.sha}}
# Setting up helm binary
- name: Set up Helm
uses: azure/setup-helm@v1
@@ -34,10 +36,12 @@ jobs:
run: |
make install
- name: Lint
run: |
curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(go env GOPATH)/bin v1.26.0
golangci-lint run --timeout=10m ./...
- name: Run golangci-lint
uses: golangci/golangci-lint-action@v2.3.0
with:
version: v1.33
only-new-issues: false
args: --timeout 10m
- name: Helm Lint
run: |
@@ -111,7 +115,7 @@ jobs:
env:
GITHUB_TOKEN: ${{ secrets.STAKATER_GITHUB_TOKEN }}
with:
message: '@${{ github.actor }} Image is available for testing. `docker pull ${{ github.repository }}:${{ steps.generate_tag.outputs.GIT_TAG }}`'
message: '@${{ github.actor }} Image is available for testing. `docker pull ${{ env.IMAGE_REPOSITORY }}:${{ steps.generate_tag.outputs.GIT_TAG }}`'
allow-repeats: false
- name: Notify Failure
@@ -131,4 +135,4 @@ jobs:
fields: repo,author,action,eventName,ref,workflow
env:
GITHUB_TOKEN: ${{ secrets.STAKATER_GITHUB_TOKEN }}
SLACK_WEBHOOK_URL: ${{ secrets.STAKATER_DELIVERY_SLACK_WEBHOOK }}
SLACK_WEBHOOK_URL: ${{ secrets.STAKATER_DELIVERY_SLACK_WEBHOOK }}

View File

@@ -39,11 +39,13 @@ jobs:
run: |
make install
- name: Lint
run: |
curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(go env GOPATH)/bin v1.26.0
golangci-lint run --timeout=10m ./...
- name: Run golangci-lint
uses: golangci/golangci-lint-action@v2.3.0
with:
version: v1.33
only-new-issues: false
args: --timeout 10m
- name: Install kubectl
run: |
curl -LO "https://storage.googleapis.com/kubernetes-release/release/v${KUBERNETES_VERSION}/bin/linux/amd64/kubectl"
@@ -127,6 +129,10 @@ jobs:
VERSION: ${{ steps.generate_operator_tag.outputs.new_tag }}
run: make bump-chart
- name: Helm Template
run: |
helm template stakater deployments/kubernetes/chart/reloader/ > deployments/kubernetes/reloader.yaml
# Publish helm chart
- name: Publish Helm chart
uses: stefanprodan/helm-gh-pages@master

View File

@@ -1,5 +1,8 @@
# Build the manager binary
FROM golang:1.15.2 as builder
FROM --platform=${BUILDPLATFORM} golang:1.16 as builder
ARG TARGETOS
ARG TARGETARCH
WORKDIR /workspace
@@ -16,14 +19,14 @@ COPY internal/ internal/
COPY pkg/ pkg/
# Build
RUN CGO_ENABLED=0 GOOS=linux GOARCH=amd64 GO111MODULE=on go build -mod=mod -a -o manager main.go
RUN CGO_ENABLED=0 GOOS=${TARGETOS} GOARCH=${TARGETARCH} GO111MODULE=on go build -mod=mod -a -o manager main.go
# Use distroless as minimal base image to package the manager binary
# Refer to https://github.com/GoogleContainerTools/distroless for more details
FROM gcr.io/distroless/static:nonroot
WORKDIR /
COPY --from=builder /workspace/manager .
USER nonroot:nonroot
USER 65532:65532
# Port for metrics and probes
EXPOSE 9090

View File

@@ -1,6 +1,6 @@
# note: call scripts from /scripts
.PHONY: default build builder-image binary-image test stop clean-images clean push apply deploy release release-all manifest push clean-image
.PHONY: default build build-image test stop push apply deploy release release-all manifest push
OS ?= linux
ARCH ?= ???
@@ -9,12 +9,12 @@ ALL_ARCH ?= arm64 arm amd64
BUILDER ?= reloader-builder-${ARCH}
BINARY ?= Reloader
DOCKER_IMAGE ?= stakater/reloader
# Default value "dev"
TAG ?= v0.0.75.0
REPOSITORY_GENERIC = ${DOCKER_IMAGE}:${TAG}
REPOSITORY_ARCH = ${DOCKER_IMAGE}:${TAG}-${ARCH}
# Default value "dev"
VERSION ?= 0.0.1
REPOSITORY_GENERIC = ${DOCKER_IMAGE}:${VERSION}
REPOSITORY_ARCH = ${DOCKER_IMAGE}:v${VERSION}-${ARCH}
BUILD=
GOCMD = go
@@ -26,23 +26,19 @@ default: build test
install:
"$(GOCMD)" mod download
run:
go run ./main.go
build:
"$(GOCMD)" build ${GOFLAGS} ${LDFLAGS} -o "${BINARY}"
builder-image:
docker buildx build --platform ${OS}/${ARCH} --build-arg GOARCH=$(ARCH) -t "${BUILDER}" --load -f build/package/Dockerfile.build .
reloader-${ARCH}.tar:
docker buildx build --platform ${OS}/${ARCH} --build-arg GOARCH=$(ARCH) -t "${BUILDER}" --load -f build/package/Dockerfile.build .
docker run --platform ${OS}/${ARCH} --rm "${BUILDER}" > reloader-${ARCH}.tar
binary-image: builder-image
cat reloader-${ARCH}.tar | docker buildx build --platform ${OS}/${ARCH} -t "${REPOSITORY_ARCH}" --load -f Dockerfile.run -
build-image:
docker buildx build --platform ${OS}/${ARCH} --build-arg GOARCH=$(ARCH) -t "${REPOSITORY_ARCH}" --load -f Dockerfile .
push:
docker push ${REPOSITORY_ARCH}
release: binary-image push manifest
release: build-image push manifest
release-all:
-rm -rf ~/.docker/manifests/*
@@ -66,23 +62,6 @@ test:
stop:
@docker stop "${BINARY}"
clean-images: stop
-docker rmi "${BINARY}"
@for arch in $(ALL_ARCH) ; do \
echo Clean image: $$arch ; \
make clean-image ARCH=$$arch ; \
done
-docker rmi "${REPOSITORY_GENERIC}"
clean-image:
-docker rmi "${BUILDER}"
-docker rmi "${REPOSITORY_ARCH}"
-rm -rf ~/.docker/manifests/*
clean:
"$(GOCMD)" clean -i
-rm -rf reloader-*.tar
apply:
kubectl apply -f deployments/manifests/ -n temp-reloader
@@ -93,3 +72,4 @@ bump-chart:
sed -i "s/^version:.*/version: v$(VERSION)/" deployments/kubernetes/chart/reloader/Chart.yaml
sed -i "s/^appVersion:.*/appVersion: v$(VERSION)/" deployments/kubernetes/chart/reloader/Chart.yaml
sed -i "s/tag:.*/tag: v$(VERSION)/" deployments/kubernetes/chart/reloader/values.yaml
sed -i "s/version:.*/version: v$(VERSION)/" deployments/kubernetes/chart/reloader/values.yaml

View File

@@ -219,7 +219,14 @@ Reloader can be configured to ignore the resources `secrets` and `configmaps` by
You can also set the log format of Reloader to json by setting `logFormat` to `json` in values.yaml and apply the chart
You can enable to scrape Reloader's Prometheus metrics by setting `serviceMonitor.enabled` to `true` in values.yaml file.
You can enable to scrape Reloader's Prometheus metrics by setting `serviceMonitor.enabled` or `podMonitor.enabled` to `true` in values.yaml file. Service monitor will be removed in future releases of reloader in favour of Pod monitor.
**Note:** Reloading of OpenShift (DeploymentConfig) and/or Argo Rollouts has to be enabled explicitly because it might not be always possible to use it on a cluster with restricted permissions. This can be done by changing the following parameters:
| Parameter | Description | Type |
| ---------------- | ---------------------------------------------------------------------------- | ------- |
| isOpenshift | Enable OpenShift DeploymentConfigs. Valid value are either `true` or `false` | boolean |
| isArgoRollouts | Enable Argo Rollouts. Valid value are either `true` or `false` | boolean |
## Help

View File

@@ -1,26 +0,0 @@
FROM golang:1.15.2-alpine
LABEL maintainer "Stakater Team"
ARG GOARCH=amd64
RUN apk -v --update \
--no-cache \
add git build-base
WORKDIR "$GOPATH/src/github.com/stakater/Reloader"
COPY go.mod go.sum ./
RUN go mod download
COPY . .
ENV CGO_ENABLED=0 GOOS=linux GOARCH=$GOARCH
RUN go build -a --installsuffix cgo --ldflags="-s" -o /Reloader
COPY build/package/Dockerfile.run /
# Running this image produces a tarball suitable to be piped into another
# Docker build command.
CMD tar -cf - -C / Dockerfile.run Reloader

View File

@@ -1,14 +0,0 @@
FROM alpine:3.11
LABEL maintainer "Stakater Team"
RUN apk add --update --no-cache ca-certificates
COPY Reloader /bin/Reloader
# On alpine 'nobody' has uid 65534
USER 65534
# Port for metrics and probes
EXPOSE 9090
ENTRYPOINT ["/bin/Reloader"]

View File

@@ -3,14 +3,14 @@
apiVersion: v1
name: reloader
description: Reloader chart that runs on kubernetes
version: v0.0.83
appVersion: v0.0.83
version: v0.0.99
appVersion: v0.0.99
keywords:
- Reloader
- kubernetes
home: https://github.com/stakater/Reloader
sources:
- https://github.com/stakater/IngressMonitorController
- https://github.com/stakater/Reloader
icon: https://raw.githubusercontent.com/stakater/Reloader/master/assets/web/reloader-round-100px.png
maintainers:
- name: Stakater

View File

@@ -32,7 +32,7 @@ rules:
- list
- get
- watch
{{- if or (.Capabilities.APIVersions.Has "apps.openshift.io/v1") (.Values.isOpenshift) }}
{{- if and (.Capabilities.APIVersions.Has "apps.openshift.io/v1") (.Values.reloader.isOpenshift) }}
- apiGroups:
- "apps.openshift.io"
- ""
@@ -43,6 +43,18 @@ rules:
- get
- update
- patch
{{- end }}
{{- if and (.Capabilities.APIVersions.Has "argoproj.io/v1alpha1") (.Values.reloader.isArgoRollouts) }}
- apiGroups:
- "argoproj.io"
- ""
resources:
- rollouts
verbs:
- list
- get
- update
- patch
{{- end }}
- apiGroups:
- "apps"

View File

@@ -51,6 +51,9 @@ spec:
{{- if .Values.reloader.deployment.tolerations }}
tolerations:
{{ toYaml .Values.reloader.deployment.tolerations | indent 8 }}
{{- end }}
{{- if .Values.reloader.deployment.priorityClassName }}
priorityClassName: {{ .Values.reloader.deployment.priorityClassName }}
{{- end }}
containers:
- image: "{{ .Values.reloader.deployment.image.name }}:{{ .Values.reloader.deployment.image.tag }}"
@@ -97,17 +100,25 @@ spec:
httpGet:
path: /metrics
port: http
timeoutSeconds: {{ .Values.reloader.deployment.livenessProbe.timeoutSeconds | default "5" }}
failureThreshold: {{ .Values.reloader.deployment.livenessProbe.failureThreshold | default "5" }}
periodSeconds: {{ .Values.reloader.deployment.livenessProbe.periodSeconds | default "10" }}
successThreshold: {{ .Values.reloader.deployment.livenessProbe.successThreshold | default "1" }}
readinessProbe:
httpGet:
path: /metrics
port: http
timeoutSeconds: {{ .Values.reloader.deployment.readinessProbe.timeoutSeconds | default "5" }}
failureThreshold: {{ .Values.reloader.deployment.readinessProbe.failureThreshold | default "5" }}
periodSeconds: {{ .Values.reloader.deployment.readinessProbe.periodSeconds | default "10" }}
successThreshold: {{ .Values.reloader.deployment.readinessProbe.successThreshold | default "1" }}
{{- if eq .Values.reloader.readOnlyRootFileSystem true }}
volumeMounts:
- mountPath: /tmp/
name: tmp-volume
{{- end }}
{{- if or (.Values.reloader.logFormat) (.Values.reloader.ignoreSecrets) (.Values.reloader.ignoreNamespaces) (.Values.reloader.ignoreConfigMaps) (.Values.reloader.custom_annotations) }}
{{- if or (.Values.reloader.logFormat) (.Values.reloader.ignoreSecrets) (.Values.reloader.ignoreNamespaces) (.Values.reloader.ignoreConfigMaps) (.Values.reloader.custom_annotations) (eq .Values.reloader.isArgoRollouts true) }}
args:
{{- if .Values.reloader.logFormat }}
- "--log-format={{ .Values.reloader.logFormat }}"
@@ -144,6 +155,9 @@ spec:
- "{{ .Values.reloader.custom_annotations.match }}"
{{- end }}
{{- end }}
{{- if eq .Values.reloader.isArgoRollouts true }}
- "--is-Argo-Rollouts={{ .Values.reloader.isArgoRollouts }}"
{{- end }}
{{- end }}
{{- if .Values.reloader.deployment.resources }}
resources:

View File

@@ -0,0 +1,31 @@
{{- if and ( .Capabilities.APIVersions.Has "monitoring.coreos.com/v1" ) ( .Values.reloader.podMonitor.enabled ) }}
apiVersion: monitoring.coreos.com/v1
kind: PodMonitor
metadata:
labels:
{{ include "reloader-labels.chart" . | indent 4 }}
{{- if .Values.reloader.podMonitor.labels }}
{{ toYaml .Values.reloader.podMonitor.labels | indent 4}}
{{- end }}
name: {{ template "reloader-fullname" . }}
{{- if .Values.reloader.podMonitor.namespace }}
namespace: {{ .Values.reloader.podMonitor.namespace }}
{{- end }}
spec:
podMetricsEndpoints:
- port: http
path: "/metrics"
{{- if .Values.reloader.podMonitor.interval }}
interval: {{ .Values.reloader.podMonitor.interval }}
{{- end }}
{{- if .Values.reloader.podMonitor.timeout }}
scrapeTimeout: {{ .Values.reloader.podMonitor.timeout }}
{{- end }}
jobLabel: {{ template "reloader-fullname" . }}
namespaceSelector:
matchNames:
- {{ .Release.Namespace }}
selector:
matchLabels:
{{ include "reloader-labels.chart" . | nindent 6 }}
{{- end }}

View File

@@ -32,7 +32,7 @@ rules:
- list
- get
- watch
{{- if or (.Capabilities.APIVersions.Has "apps.openshift.io/v1") (.Values.reloader.isOpenshift) }}
{{- if and (.Capabilities.APIVersions.Has "apps.openshift.io/v1") (.Values.reloader.isOpenshift) }}
- apiGroups:
- "apps.openshift.io"
- ""
@@ -43,6 +43,18 @@ rules:
- get
- update
- patch
{{- end }}
{{- if and (.Capabilities.APIVersions.Has "argoproj.io/v1alpha1") (.Values.reloader.isArgoRollouts) }}
- apiGroups:
- "argoproj.io"
- ""
resources:
- rollouts
verbs:
- list
- get
- update
- patch
{{- end }}
- apiGroups:
- "apps"

View File

@@ -9,6 +9,7 @@ kubernetes:
host: https://kubernetes.default
reloader:
isArgoRollouts: false
isOpenshift: false
ignoreSecrets: false
ignoreConfigMaps: false
@@ -51,10 +52,10 @@ reloader:
labels:
provider: stakater
group: com.stakater.platform
version: v0.0.77
version: v0.0.99
image:
name: stakater/reloader
tag: v0.0.83
tag: v0.0.99
pullPolicy: IfNotPresent
# Support for extra environment variables.
env:
@@ -65,6 +66,18 @@ reloader:
# field supports Key value pair as environment variables. It gets the values from other fields of pod.
field:
# Liveness and readiness probe timeout values.
livenessProbe: {}
# timeoutSeconds: 5
# failureThreshold: 5
# periodSeconds: 10
# successThreshold: 1
readinessProbe: {}
# timeoutSeconds: 15
# failureThreshold: 5
# periodSeconds: 10
# successThreshold: 1
# Specify resource requests/limits for the deployment.
# Example:
# resources:
@@ -77,6 +90,7 @@ reloader:
resources: {}
pod:
annotations: {}
priorityClassName: ""
service: {}
# labels: {}
@@ -94,15 +108,17 @@ reloader:
annotations: {}
# The name of the ServiceAccount to use.
# If not set and create is true, a name is generated using the fullname template
name:
name:
# Optional flags to pass to the Reloader entrypoint
# Example:
# custom_annotations:
# configmap: "my.company.com/configmap"
# secret: "my.company.com/secret"
custom_annotations: {}
serviceMonitor:
# enabling this requires service to be enabled as well, or no endpoints will be found
# Deprecated: Service monitor will be removed in future releases of reloader in favour of Pod monitor
# Enabling this requires service to be enabled as well, or no endpoints will be found
enabled: false
# Set the namespace the ServiceMonitor should be deployed
# namespace: monitoring
@@ -112,4 +128,14 @@ reloader:
# labels:
# Set timeout for scrape
# timeout: 10s
podMonitor:
enabled: false
# Set the namespace the podMonitor should be deployed
# namespace: monitoring
# Set how frequently Prometheus should scrape
# interval: 30s
# Set labels for the podMonitor, use this to define your scrape label for Prometheus Operator
# labels:
# Set timeout for scrape
# timeout: 10s

View File

@@ -0,0 +1,3 @@
---
# Source: reloader/templates/podmonitor.yaml

View File

@@ -1,19 +1,34 @@
---
# Source: reloader/templates/serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
annotations:
meta.helm.sh/release-namespace: "default"
meta.helm.sh/release-name: "stakater"
labels:
app: stakater-reloader
chart: "reloader-v0.0.99"
release: "stakater"
heritage: "Helm"
app.kubernetes.io/managed-by: "Helm"
name: stakater-reloader
---
# Source: reloader/templates/clusterrole.yaml
apiVersion: rbac.authorization.k8s.io/v1
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
annotations:
meta.helm.sh/release-namespace: "default"
meta.helm.sh/release-name: "reloader"
meta.helm.sh/release-name: "stakater"
labels:
app: reloader-reloader
chart: "reloader-v0.0.77"
release: "reloader"
heritage: "Tiller"
app.kubernetes.io/managed-by: "Tiller"
name: reloader-reloader-role
app: stakater-reloader
chart: "reloader-v0.0.99"
release: "stakater"
heritage: "Helm"
app.kubernetes.io/managed-by: "Helm"
name: stakater-reloader-role
namespace: default
rules:
- apiGroups:
@@ -46,33 +61,31 @@ rules:
- get
- update
- patch
---
# Source: reloader/templates/clusterrolebinding.yaml
apiVersion: rbac.authorization.k8s.io/v1
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
annotations:
meta.helm.sh/release-namespace: "default"
meta.helm.sh/release-name: "reloader"
meta.helm.sh/release-name: "stakater"
labels:
app: reloader-reloader
chart: "reloader-v0.0.77"
release: "reloader"
heritage: "Tiller"
app.kubernetes.io/managed-by: "Tiller"
name: reloader-reloader-role-binding
app: stakater-reloader
chart: "reloader-v0.0.99"
release: "stakater"
heritage: "Helm"
app.kubernetes.io/managed-by: "Helm"
name: stakater-reloader-role-binding
namespace: default
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: reloader-reloader-role
name: stakater-reloader-role
subjects:
- kind: ServiceAccount
name: reloader-reloader
name: stakater-reloader
namespace: default
---
# Source: reloader/templates/deployment.yaml
apiVersion: apps/v1
@@ -80,42 +93,40 @@ kind: Deployment
metadata:
annotations:
meta.helm.sh/release-namespace: "default"
meta.helm.sh/release-name: "reloader"
meta.helm.sh/release-name: "stakater"
labels:
app: reloader-reloader
chart: "reloader-v0.0.77"
release: "reloader"
heritage: "Tiller"
app.kubernetes.io/managed-by: "Tiller"
app: stakater-reloader
chart: "reloader-v0.0.99"
release: "stakater"
heritage: "Helm"
app.kubernetes.io/managed-by: "Helm"
group: com.stakater.platform
provider: stakater
version: v0.0.77
name: reloader-reloader
version: v0.0.99
name: stakater-reloader
spec:
replicas: 1
revisionHistoryLimit: 2
selector:
matchLabels:
app: reloader-reloader
release: "reloader"
app: stakater-reloader
release: "stakater"
template:
metadata:
labels:
app: reloader-reloader
chart: "reloader-v0.0.77"
release: "reloader"
heritage: "Tiller"
app.kubernetes.io/managed-by: "Tiller"
app: stakater-reloader
chart: "reloader-v0.0.99"
release: "stakater"
heritage: "Helm"
app.kubernetes.io/managed-by: "Helm"
group: com.stakater.platform
provider: stakater
version: v0.0.77
version: v0.0.99
spec:
containers:
- image: "stakater/reloader:v0.0.77"
- image: "stakater/reloader:v0.0.99"
imagePullPolicy: IfNotPresent
name: reloader-reloader
name: stakater-reloader
ports:
- name: http
@@ -124,46 +135,19 @@ spec:
httpGet:
path: /metrics
port: http
timeoutSeconds: 5
failureThreshold: 5
periodSeconds: 10
successThreshold: 1
readinessProbe:
httpGet:
path: /metrics
port: http
timeoutSeconds: 5
failureThreshold: 5
periodSeconds: 10
successThreshold: 1
securityContext:
runAsNonRoot: true
runAsUser: 65534
serviceAccountName: reloader-reloader
---
# Source: reloader/templates/role.yaml
---
# Source: reloader/templates/rolebinding.yaml
---
# Source: reloader/templates/service.yaml
---
# Source: reloader/templates/serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
annotations:
meta.helm.sh/release-namespace: "default"
meta.helm.sh/release-name: "reloader"
labels:
app: reloader-reloader
chart: "reloader-v0.0.77"
release: "reloader"
heritage: "Tiller"
app.kubernetes.io/managed-by: "Tiller"
name: reloader-reloader
---
# Source: reloader/templates/servicemonitor.yaml
serviceAccountName: stakater-reloader

View File

@@ -9,6 +9,7 @@ kubernetes:
host: https://kubernetes.default
reloader:
isArgoRollouts: false
isOpenshift: false
ignoreSecrets: false
ignoreConfigMaps: false
@@ -113,3 +114,15 @@ reloader:
# Set timeout for scrape
# timeout: 10s
podMonitor:
# enabling this requires service to be enabled as well, or no endpoints will be found
enabled: false
# Set the namespace the podMonitor should be deployed
# namespace: monitoring
# Set how frequently Prometheus should scrape
# interval: 30s
# Set labels for the podMonitor, use this to define your scrape label for Prometheus Operator
# labels:
# Set timeout for scrape
# timeout: 10s

62
go.mod
View File

@@ -1,27 +1,49 @@
module github.com/stakater/Reloader
go 1.15
go 1.16
require (
github.com/argoproj/argo-rollouts v0.7.2
github.com/golang/groupcache v0.0.0-20191002201903-404acd9df4cc // indirect
github.com/inconshreveable/mousetrap v1.0.0 // indirect
github.com/onsi/ginkgo v1.10.2 // indirect
github.com/onsi/gomega v1.7.0 // indirect
github.com/openshift/api v3.9.1-0.20190923092516-169848dd8137+incompatible
github.com/openshift/client-go v0.0.0-20190923092832-6afefc9bb372
github.com/prometheus/client_golang v1.4.1
github.com/sirupsen/logrus v1.4.2
github.com/spf13/cobra v0.0.0-20160722081547-f62e98d28ab7
k8s.io/api v0.0.0-20190918155943-95b840bb6a1f
k8s.io/apimachinery v0.0.0-20191004115801-a2eda9f80ab8
k8s.io/client-go v0.0.0-20190918160344-1fbdaa4c8d90
github.com/argoproj/argo-rollouts v1.0.2
github.com/onsi/ginkgo v1.15.1 // indirect
github.com/onsi/gomega v1.11.0 // indirect
github.com/openshift/api v0.0.0-20210527122704-efd9d5958e01
github.com/openshift/client-go v0.0.0-20210521082421-73d9475a9142
github.com/prometheus/client_golang v1.10.0
github.com/sirupsen/logrus v1.7.0
github.com/spf13/cobra v1.1.3
k8s.io/api v0.21.2
k8s.io/apimachinery v0.21.2
k8s.io/client-go v0.21.2
)
// Replacements for argo-rollouts
replace (
github.com/openshift/api => github.com/openshift/api v3.9.1-0.20190923092516-169848dd8137+incompatible // prebase-1.16
github.com/openshift/client-go => github.com/openshift/client-go v0.0.0-20190923092832-6afefc9bb372 // prebase-1.16
k8s.io/api => k8s.io/api v0.0.0-20191004120104-195af9ec3521 // release-1.16
k8s.io/apimachinery => k8s.io/apimachinery v0.0.0-20191004115801-a2eda9f80ab8 // kubernetes-1.16.0
k8s.io/client-go => k8s.io/client-go v0.0.0-20190918160344-1fbdaa4c8d90 // kubernetes-1.16.0
)
github.com/go-check/check => github.com/go-check/check v0.0.0-20180628173108-788fd7840127
github.com/grpc-ecosystem/grpc-gateway => github.com/grpc-ecosystem/grpc-gateway v1.16.0
k8s.io/api => k8s.io/api v0.20.4
k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.20.4
k8s.io/apimachinery => k8s.io/apimachinery v0.21.0-alpha.0
k8s.io/apiserver => k8s.io/apiserver v0.20.4
k8s.io/cli-runtime => k8s.io/cli-runtime v0.20.4
k8s.io/client-go => k8s.io/client-go v0.20.4
k8s.io/cloud-provider => k8s.io/cloud-provider v0.20.4
k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.20.4
k8s.io/code-generator => k8s.io/code-generator v0.20.5-rc.0
k8s.io/component-base => k8s.io/component-base v0.20.4
k8s.io/component-helpers => k8s.io/component-helpers v0.20.4
k8s.io/controller-manager => k8s.io/controller-manager v0.20.4
k8s.io/cri-api => k8s.io/cri-api v0.20.5-rc.0
k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.20.4
k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.20.4
k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.20.4
k8s.io/kube-proxy => k8s.io/kube-proxy v0.20.4
k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.20.4
k8s.io/kubectl => k8s.io/kubectl v0.20.4
k8s.io/kubelet => k8s.io/kubelet v0.20.4
k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.20.4
k8s.io/metrics => k8s.io/metrics v0.20.4
k8s.io/mount-utils => k8s.io/mount-utils v0.20.5-rc.0
k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.20.4
k8s.io/sample-cli-plugin => k8s.io/sample-cli-plugin v0.20.4
k8s.io/sample-controller => k8s.io/sample-controller v0.20.4
)

2138
go.sum

File diff suppressed because it is too large Load Diff

View File

@@ -1,6 +1,7 @@
package callbacks
import (
"context"
"github.com/sirupsen/logrus"
"github.com/stakater/Reloader/internal/pkg/util"
"github.com/stakater/Reloader/pkg/kube"
@@ -47,7 +48,7 @@ type RollingUpgradeFuncs struct {
// GetDeploymentItems returns the deployments in given namespace
func GetDeploymentItems(clients kube.Clients, namespace string) []interface{} {
deployments, err := clients.KubernetesClient.AppsV1().Deployments(namespace).List(meta_v1.ListOptions{})
deployments, err := clients.KubernetesClient.AppsV1().Deployments(namespace).List(context.TODO(), meta_v1.ListOptions{})
if err != nil {
logrus.Errorf("Failed to list deployments %v", err)
}
@@ -56,7 +57,7 @@ func GetDeploymentItems(clients kube.Clients, namespace string) []interface{} {
// GetDaemonSetItems returns the daemonSets in given namespace
func GetDaemonSetItems(clients kube.Clients, namespace string) []interface{} {
daemonSets, err := clients.KubernetesClient.AppsV1().DaemonSets(namespace).List(meta_v1.ListOptions{})
daemonSets, err := clients.KubernetesClient.AppsV1().DaemonSets(namespace).List(context.TODO(), meta_v1.ListOptions{})
if err != nil {
logrus.Errorf("Failed to list daemonSets %v", err)
}
@@ -65,7 +66,7 @@ func GetDaemonSetItems(clients kube.Clients, namespace string) []interface{} {
// GetStatefulSetItems returns the statefulSets in given namespace
func GetStatefulSetItems(clients kube.Clients, namespace string) []interface{} {
statefulSets, err := clients.KubernetesClient.AppsV1().StatefulSets(namespace).List(meta_v1.ListOptions{})
statefulSets, err := clients.KubernetesClient.AppsV1().StatefulSets(namespace).List(context.TODO(), meta_v1.ListOptions{})
if err != nil {
logrus.Errorf("Failed to list statefulSets %v", err)
}
@@ -74,7 +75,7 @@ func GetStatefulSetItems(clients kube.Clients, namespace string) []interface{} {
// GetDeploymentConfigItems returns the deploymentConfigs in given namespace
func GetDeploymentConfigItems(clients kube.Clients, namespace string) []interface{} {
deploymentConfigs, err := clients.OpenshiftAppsClient.AppsV1().DeploymentConfigs(namespace).List(meta_v1.ListOptions{})
deploymentConfigs, err := clients.OpenshiftAppsClient.AppsV1().DeploymentConfigs(namespace).List(context.TODO(), meta_v1.ListOptions{})
if err != nil {
logrus.Errorf("Failed to list deploymentConfigs %v", err)
}
@@ -83,7 +84,7 @@ func GetDeploymentConfigItems(clients kube.Clients, namespace string) []interfac
// GetRolloutItems returns the rollouts in given namespace
func GetRolloutItems(clients kube.Clients, namespace string) []interface{} {
rollouts, err := clients.ArgoRolloutClient.ArgoprojV1alpha1().Rollouts(namespace).List(meta_v1.ListOptions{})
rollouts, err := clients.ArgoRolloutClient.ArgoprojV1alpha1().Rollouts(namespace).List(context.TODO(), meta_v1.ListOptions{})
if err != nil {
logrus.Errorf("Failed to list Rollouts %v", err)
}
@@ -193,38 +194,38 @@ func GetRolloutInitContainers(item interface{}) []v1.Container {
// UpdateDeployment performs rolling upgrade on deployment
func UpdateDeployment(clients kube.Clients, namespace string, resource interface{}) error {
deployment := resource.(appsv1.Deployment)
_, err := clients.KubernetesClient.AppsV1().Deployments(namespace).Update(&deployment)
_, err := clients.KubernetesClient.AppsV1().Deployments(namespace).Update(context.TODO(), &deployment, meta_v1.UpdateOptions{FieldManager: "Reloader"})
return err
}
// UpdateDaemonSet performs rolling upgrade on daemonSet
func UpdateDaemonSet(clients kube.Clients, namespace string, resource interface{}) error {
daemonSet := resource.(appsv1.DaemonSet)
_, err := clients.KubernetesClient.AppsV1().DaemonSets(namespace).Update(&daemonSet)
_, err := clients.KubernetesClient.AppsV1().DaemonSets(namespace).Update(context.TODO(), &daemonSet, meta_v1.UpdateOptions{FieldManager: "Reloader"})
return err
}
// UpdateStatefulSet performs rolling upgrade on statefulSet
func UpdateStatefulSet(clients kube.Clients, namespace string, resource interface{}) error {
statefulSet := resource.(appsv1.StatefulSet)
_, err := clients.KubernetesClient.AppsV1().StatefulSets(namespace).Update(&statefulSet)
_, err := clients.KubernetesClient.AppsV1().StatefulSets(namespace).Update(context.TODO(), &statefulSet, meta_v1.UpdateOptions{FieldManager: "Reloader"})
return err
}
// UpdateDeploymentConfig performs rolling upgrade on deploymentConfig
func UpdateDeploymentConfig(clients kube.Clients, namespace string, resource interface{}) error {
deploymentConfig := resource.(openshiftv1.DeploymentConfig)
_, err := clients.OpenshiftAppsClient.AppsV1().DeploymentConfigs(namespace).Update(&deploymentConfig)
_, err := clients.OpenshiftAppsClient.AppsV1().DeploymentConfigs(namespace).Update(context.TODO(), &deploymentConfig, meta_v1.UpdateOptions{FieldManager: "Reloader"})
return err
}
// UpdateRollout performs rolling upgrade on rollout
func UpdateRollout(clients kube.Clients, namespace string, resource interface{}) error {
rollout := resource.(argorolloutv1alpha1.Rollout)
rolloutBefore, _ := clients.ArgoRolloutClient.ArgoprojV1alpha1().Rollouts(namespace).Get(rollout.Name, meta_v1.GetOptions{})
rolloutBefore, _ := clients.ArgoRolloutClient.ArgoprojV1alpha1().Rollouts(namespace).Get(context.TODO(), rollout.Name, meta_v1.GetOptions{})
logrus.Warnf("Before: %+v", rolloutBefore.Spec.Template.Spec.Containers[0].Env)
logrus.Warnf("After: %+v", rollout.Spec.Template.Spec.Containers[0].Env)
_, err := clients.ArgoRolloutClient.ArgoprojV1alpha1().Rollouts(namespace).Update(&rollout)
_, err := clients.ArgoRolloutClient.ArgoprojV1alpha1().Rollouts(namespace).Update(context.TODO(), &rollout, meta_v1.UpdateOptions{FieldManager: "Reloader"})
return err
}

View File

@@ -32,6 +32,7 @@ func NewReloaderCommand() *cobra.Command {
cmd.PersistentFlags().StringVar(&options.LogFormat, "log-format", "", "Log format to use (empty string for text, or JSON")
cmd.PersistentFlags().StringSlice("resources-to-ignore", []string{}, "list of resources to ignore (valid options 'configMaps' or 'secrets')")
cmd.PersistentFlags().StringSlice("namespaces-to-ignore", []string{}, "list of namespaces to ignore")
cmd.PersistentFlags().StringVar(&options.IsArgoRollouts, "is-Argo-Rollouts", "false", "Add support for argo rollouts")
return cmd
}

View File

@@ -30,6 +30,9 @@ type Controller struct {
collectors metrics.Collectors
}
// controllerInitialized flag determines whether controlled is being initialized
var controllerInitialized bool = false
// NewController for initializing a Controller
func NewController(
client kubernetes.Interface, resource string, namespace string, ignoredNamespaces []string, collectors metrics.Collectors) (*Controller, error) {
@@ -57,8 +60,12 @@ func NewController(
// Add function to add a new object to the queue in case of creating a resource
func (c *Controller) Add(obj interface{}) {
// Not required as reloader should update the resource in the event of any change and not in the event of any resource creation.
// This causes the issue where reloader reloads the pods when reloader itself gets restarted as it's queue is filled with all the k8s objects as new resources.
if !c.resourceInIgnoredNamespace(obj) && controllerInitialized {
c.queue.Add(handler.ResourceCreatedHandler{
Resource: obj,
Collectors: c.collectors,
})
}
}
func (c *Controller) resourceInIgnoredNamespace(raw interface{}) bool {
@@ -111,6 +118,9 @@ func (c *Controller) Run(threadiness int, stopCh chan struct{}) {
}
func (c *Controller) runWorker() {
// At this point the controller is fully initialized and we can start processing the resources
controllerInitialized = true
for c.processNextItem() {
}
}
@@ -145,7 +155,7 @@ func (c *Controller) handleErr(err error, key interface{}) {
// This controller retries 5 times if something goes wrong. After that, it stops trying.
if c.queue.NumRequeues(key) < 5 {
logrus.Errorf("Error syncing events %v: %v", key, err)
logrus.Errorf("Error syncing events: %v", err)
// Re-enqueue the key rate limited. Based on the rate limiter on the
// queue and the re-enqueue history, the key will be processed later again.

View File

@@ -20,7 +20,7 @@ func (r ResourceCreatedHandler) Handle() error {
} else {
config, _ := r.GetConfig()
// process resource based on its type
doRollingUpgrade(config, r.Collectors)
return doRollingUpgrade(config, r.Collectors)
}
return nil
}

View File

@@ -22,7 +22,7 @@ func (r ResourceUpdatedHandler) Handle() error {
config, oldSHAData := r.GetConfig()
if config.SHAValue != oldSHAData {
// process resource based on its type
doRollingUpgrade(config, r.Collectors)
return doRollingUpgrade(config, r.Collectors)
}
}
return nil

View File

@@ -85,32 +85,52 @@ func GetArgoRolloutRollingUpgradeFuncs() callbacks.RollingUpgradeFuncs {
}
}
func doRollingUpgrade(config util.Config, collectors metrics.Collectors) {
func doRollingUpgrade(config util.Config, collectors metrics.Collectors) error {
clients := kube.GetClients()
rollingUpgrade(clients, config, GetDeploymentRollingUpgradeFuncs(), collectors)
rollingUpgrade(clients, config, GetDaemonSetRollingUpgradeFuncs(), collectors)
rollingUpgrade(clients, config, GetStatefulSetRollingUpgradeFuncs(), collectors)
if kube.IsOpenshift {
rollingUpgrade(clients, config, GetDeploymentConfigRollingUpgradeFuncs(), collectors)
err := rollingUpgrade(clients, config, GetDeploymentRollingUpgradeFuncs(), collectors)
if err != nil {
return err
}
err = rollingUpgrade(clients, config, GetDaemonSetRollingUpgradeFuncs(), collectors)
if err != nil {
return err
}
err = rollingUpgrade(clients, config, GetStatefulSetRollingUpgradeFuncs(), collectors)
if err != nil {
return err
}
rollingUpgrade(clients, config, GetArgoRolloutRollingUpgradeFuncs(), collectors)
if kube.IsOpenshift {
err = rollingUpgrade(clients, config, GetDeploymentConfigRollingUpgradeFuncs(), collectors)
if err != nil {
return err
}
}
if options.IsArgoRollouts == "true" {
err = rollingUpgrade(clients, config, GetArgoRolloutRollingUpgradeFuncs(), collectors)
if err != nil {
return err
}
}
return nil
}
func rollingUpgrade(clients kube.Clients, config util.Config, upgradeFuncs callbacks.RollingUpgradeFuncs, collectors metrics.Collectors) {
func rollingUpgrade(clients kube.Clients, config util.Config, upgradeFuncs callbacks.RollingUpgradeFuncs, collectors metrics.Collectors) error {
err := PerformRollingUpgrade(clients, config, upgradeFuncs, collectors)
if err != nil {
logrus.Errorf("Rolling upgrade for '%s' failed with error = %v", config.ResourceName, err)
}
return err
}
// PerformRollingUpgrade upgrades the deployment if there is any change in configmap or secret data
func PerformRollingUpgrade(clients kube.Clients, config util.Config, upgradeFuncs callbacks.RollingUpgradeFuncs, collectors metrics.Collectors) error {
items := upgradeFuncs.ItemsFunc(clients, config.Namespace)
var err error
for _, i := range items {
// find correct annotation and update the resource
annotations := upgradeFuncs.AnnotationsFunc(i)
@@ -155,6 +175,7 @@ func PerformRollingUpgrade(clients kube.Clients, config util.Config, upgradeFunc
if err != nil {
logrus.Errorf("Update for '%s' of type '%s' in namespace '%s' failed with error %v", resourceName, upgradeFuncs.ResourceType, config.Namespace, err)
collectors.Reloaded.With(prometheus.Labels{"success": "false"}).Inc()
return err
} else {
logrus.Infof("Changes detected in '%s' of type '%s' in namespace '%s'", config.ResourceName, config.Type, config.Namespace)
logrus.Infof("Updated '%s' of type '%s' in namespace '%s'", resourceName, upgradeFuncs.ResourceType, config.Namespace)
@@ -162,7 +183,7 @@ func PerformRollingUpgrade(clients kube.Clients, config util.Config, upgradeFunc
}
}
}
return err
return nil
}
func getVolumeMountName(volumes []v1.Volume, mountType string, volumeName string) string {

View File

@@ -1,6 +1,7 @@
package handler
import (
"context"
"fmt"
"os"
"testing"
@@ -721,7 +722,7 @@ func TestRollingUpgradeForDeploymentWithConfigmapViaSearchAnnotationNotMapped(t
t.Errorf("Failed to create deployment with search annotation.")
}
defer func() {
_ = clients.KubernetesClient.AppsV1().Deployments(namespace).Delete(deployment.Name, &v1.DeleteOptions{})
_ = clients.KubernetesClient.AppsV1().Deployments(namespace).Delete(context.TODO(), deployment.Name, v1.DeleteOptions{})
}()
// defer clients.KubernetesClient.AppsV1().Deployments(namespace).Delete(deployment.Name, &v1.DeleteOptions{})

View File

@@ -17,4 +17,6 @@ var (
SearchMatchAnnotation = "reloader.stakater.com/match"
// LogFormat is the log format to use (json, or empty string for default)
LogFormat = ""
// Adds support for argo rollouts
IsArgoRollouts = "false"
)

View File

@@ -1,6 +1,7 @@
package testutil
import (
"context"
"math/rand"
"sort"
"strconv"
@@ -33,7 +34,7 @@ var (
// CreateNamespace creates namespace for testing
func CreateNamespace(namespace string, client kubernetes.Interface) {
_, err := client.CoreV1().Namespaces().Create(&v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}})
_, err := client.CoreV1().Namespaces().Create(context.TODO(), &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}}, metav1.CreateOptions{})
if err != nil {
logrus.Fatalf("Failed to create namespace for testing %v", err)
} else {
@@ -43,7 +44,7 @@ func CreateNamespace(namespace string, client kubernetes.Interface) {
// DeleteNamespace deletes namespace for testing
func DeleteNamespace(namespace string, client kubernetes.Interface) {
err := client.CoreV1().Namespaces().Delete(namespace, &metav1.DeleteOptions{})
err := client.CoreV1().Namespaces().Delete(context.TODO(), namespace, metav1.DeleteOptions{})
if err != nil {
logrus.Fatalf("Failed to delete namespace that was created for testing %v", err)
} else {
@@ -597,7 +598,7 @@ func ConvertResourceToSHA(resourceType string, namespace string, resourceName st
func CreateConfigMap(client kubernetes.Interface, namespace string, configmapName string, data string) (core_v1.ConfigMapInterface, error) {
logrus.Infof("Creating configmap")
configmapClient := client.CoreV1().ConfigMaps(namespace)
_, err := configmapClient.Create(GetConfigmap(namespace, configmapName, data))
_, err := configmapClient.Create(context.TODO(), GetConfigmap(namespace, configmapName, data), metav1.CreateOptions{})
time.Sleep(3 * time.Second)
return configmapClient, err
}
@@ -606,7 +607,7 @@ func CreateConfigMap(client kubernetes.Interface, namespace string, configmapNam
func CreateSecret(client kubernetes.Interface, namespace string, secretName string, data string) (core_v1.SecretInterface, error) {
logrus.Infof("Creating secret")
secretClient := client.CoreV1().Secrets(namespace)
_, err := secretClient.Create(GetSecret(namespace, secretName, data))
_, err := secretClient.Create(context.TODO(), GetSecret(namespace, secretName, data), metav1.CreateOptions{})
time.Sleep(3 * time.Second)
return secretClient, err
}
@@ -621,7 +622,7 @@ func CreateDeployment(client kubernetes.Interface, deploymentName string, namesp
} else {
deploymentObj = GetDeploymentWithEnvVars(namespace, deploymentName)
}
deployment, err := deploymentClient.Create(deploymentObj)
deployment, err := deploymentClient.Create(context.TODO(), deploymentObj, metav1.CreateOptions{})
time.Sleep(3 * time.Second)
return deployment, err
}
@@ -636,7 +637,7 @@ func CreateDeploymentConfig(client appsclient.Interface, deploymentName string,
} else {
deploymentConfigObj = GetDeploymentConfigWithEnvVars(namespace, deploymentName)
}
deploymentConfig, err := deploymentConfigsClient.Create(deploymentConfigObj)
deploymentConfig, err := deploymentConfigsClient.Create(context.TODO(), deploymentConfigObj, metav1.CreateOptions{})
time.Sleep(5 * time.Second)
return deploymentConfig, err
}
@@ -651,7 +652,7 @@ func CreateDeploymentWithInitContainer(client kubernetes.Interface, deploymentNa
} else {
deploymentObj = GetDeploymentWithInitContainerAndEnv(namespace, deploymentName)
}
deployment, err := deploymentClient.Create(deploymentObj)
deployment, err := deploymentClient.Create(context.TODO(), deploymentObj, metav1.CreateOptions{})
time.Sleep(3 * time.Second)
return deployment, err
}
@@ -661,7 +662,7 @@ func CreateDeploymentWithEnvVarSource(client kubernetes.Interface, deploymentNam
logrus.Infof("Creating Deployment")
deploymentClient := client.AppsV1().Deployments(namespace)
deploymentObj := GetDeploymentWithEnvVarSources(namespace, deploymentName)
deployment, err := deploymentClient.Create(deploymentObj)
deployment, err := deploymentClient.Create(context.TODO(), deploymentObj, metav1.CreateOptions{})
time.Sleep(3 * time.Second)
return deployment, err
@@ -672,7 +673,7 @@ func CreateDeploymentWithPodAnnotations(client kubernetes.Interface, deploymentN
logrus.Infof("Creating Deployment")
deploymentClient := client.AppsV1().Deployments(namespace)
deploymentObj := GetDeploymentWithPodAnnotations(namespace, deploymentName, both)
deployment, err := deploymentClient.Create(deploymentObj)
deployment, err := deploymentClient.Create(context.TODO(), deploymentObj, metav1.CreateOptions{})
time.Sleep(3 * time.Second)
return deployment, err
}
@@ -684,7 +685,7 @@ func CreateDeploymentWithEnvVarSourceAndAnnotations(client kubernetes.Interface,
deploymentClient := client.AppsV1().Deployments(namespace)
deploymentObj := GetDeploymentWithEnvVarSources(namespace, deploymentName)
deploymentObj.Annotations = annotations
deployment, err := deploymentClient.Create(deploymentObj)
deployment, err := deploymentClient.Create(context.TODO(), deploymentObj, metav1.CreateOptions{})
time.Sleep(3 * time.Second)
return deployment, err
}
@@ -699,7 +700,7 @@ func CreateDaemonSet(client kubernetes.Interface, daemonsetName string, namespac
} else {
daemonsetObj = GetDaemonSetWithEnvVars(namespace, daemonsetName)
}
daemonset, err := daemonsetClient.Create(daemonsetObj)
daemonset, err := daemonsetClient.Create(context.TODO(), daemonsetObj, metav1.CreateOptions{})
time.Sleep(3 * time.Second)
return daemonset, err
}
@@ -714,7 +715,7 @@ func CreateStatefulSet(client kubernetes.Interface, statefulsetName string, name
} else {
statefulsetObj = GetStatefulSetWithEnvVar(namespace, statefulsetName)
}
statefulset, err := statefulsetClient.Create(statefulsetObj)
statefulset, err := statefulsetClient.Create(context.TODO(), statefulsetObj, metav1.CreateOptions{})
time.Sleep(3 * time.Second)
return statefulset, err
}
@@ -722,7 +723,7 @@ func CreateStatefulSet(client kubernetes.Interface, statefulsetName string, name
// DeleteDeployment creates a deployment in given namespace and returns the error if any
func DeleteDeployment(client kubernetes.Interface, namespace string, deploymentName string) error {
logrus.Infof("Deleting Deployment")
deploymentError := client.AppsV1().Deployments(namespace).Delete(deploymentName, &metav1.DeleteOptions{})
deploymentError := client.AppsV1().Deployments(namespace).Delete(context.TODO(), deploymentName, metav1.DeleteOptions{})
time.Sleep(3 * time.Second)
return deploymentError
}
@@ -730,7 +731,7 @@ func DeleteDeployment(client kubernetes.Interface, namespace string, deploymentN
// DeleteDeploymentConfig deletes a deploymentConfig in given namespace and returns the error if any
func DeleteDeploymentConfig(client appsclient.Interface, namespace string, deploymentConfigName string) error {
logrus.Infof("Deleting DeploymentConfig")
deploymentConfigError := client.AppsV1().DeploymentConfigs(namespace).Delete(deploymentConfigName, &metav1.DeleteOptions{})
deploymentConfigError := client.AppsV1().DeploymentConfigs(namespace).Delete(context.TODO(), deploymentConfigName, metav1.DeleteOptions{})
time.Sleep(3 * time.Second)
return deploymentConfigError
}
@@ -738,7 +739,7 @@ func DeleteDeploymentConfig(client appsclient.Interface, namespace string, deplo
// DeleteDaemonSet creates a daemonset in given namespace and returns the error if any
func DeleteDaemonSet(client kubernetes.Interface, namespace string, daemonsetName string) error {
logrus.Infof("Deleting DaemonSet %s", daemonsetName)
daemonsetError := client.AppsV1().DaemonSets(namespace).Delete(daemonsetName, &metav1.DeleteOptions{})
daemonsetError := client.AppsV1().DaemonSets(namespace).Delete(context.TODO(), daemonsetName, metav1.DeleteOptions{})
time.Sleep(3 * time.Second)
return daemonsetError
}
@@ -746,7 +747,7 @@ func DeleteDaemonSet(client kubernetes.Interface, namespace string, daemonsetNam
// DeleteStatefulSet creates a statefulset in given namespace and returns the error if any
func DeleteStatefulSet(client kubernetes.Interface, namespace string, statefulsetName string) error {
logrus.Infof("Deleting StatefulSet %s", statefulsetName)
statefulsetError := client.AppsV1().StatefulSets(namespace).Delete(statefulsetName, &metav1.DeleteOptions{})
statefulsetError := client.AppsV1().StatefulSets(namespace).Delete(context.TODO(), statefulsetName, metav1.DeleteOptions{})
time.Sleep(3 * time.Second)
return statefulsetError
}
@@ -760,7 +761,7 @@ func UpdateConfigMap(configmapClient core_v1.ConfigMapInterface, namespace strin
} else {
configmap = GetConfigmap(namespace, configmapName, data)
}
_, updateErr := configmapClient.Update(configmap)
_, updateErr := configmapClient.Update(context.TODO(), configmap, metav1.UpdateOptions{})
time.Sleep(3 * time.Second)
return updateErr
}
@@ -774,7 +775,7 @@ func UpdateSecret(secretClient core_v1.SecretInterface, namespace string, secret
} else {
secret = GetSecret(namespace, secretName, data)
}
_, updateErr := secretClient.Update(secret)
_, updateErr := secretClient.Update(context.TODO(), secret, metav1.UpdateOptions{})
time.Sleep(3 * time.Second)
return updateErr
}
@@ -782,7 +783,7 @@ func UpdateSecret(secretClient core_v1.SecretInterface, namespace string, secret
// DeleteConfigMap deletes a configmap in given namespace and returns the error if any
func DeleteConfigMap(client kubernetes.Interface, namespace string, configmapName string) error {
logrus.Infof("Deleting configmap %q.\n", configmapName)
err := client.CoreV1().ConfigMaps(namespace).Delete(configmapName, &metav1.DeleteOptions{})
err := client.CoreV1().ConfigMaps(namespace).Delete(context.TODO(), configmapName, metav1.DeleteOptions{})
time.Sleep(3 * time.Second)
return err
}
@@ -790,7 +791,7 @@ func DeleteConfigMap(client kubernetes.Interface, namespace string, configmapNam
// DeleteSecret deletes a secret in given namespace and returns the error if any
func DeleteSecret(client kubernetes.Interface, namespace string, secretName string) error {
logrus.Infof("Deleting secret %q.\n", secretName)
err := client.CoreV1().Secrets(namespace).Delete(secretName, &metav1.DeleteOptions{})
err := client.CoreV1().Secrets(namespace).Delete(context.TODO(), secretName, metav1.DeleteOptions{})
time.Sleep(3 * time.Second)
return err
}

View File

@@ -1,6 +1,7 @@
package kube
import (
"context"
"os"
"k8s.io/client-go/tools/clientcmd"
@@ -67,7 +68,7 @@ func isOpenshift() bool {
if err != nil {
logrus.Fatalf("Unable to create Kubernetes client error = %v", err)
}
_, err = client.RESTClient().Get().AbsPath("/apis/project.openshift.io").Do().Raw()
_, err = client.RESTClient().Get().AbsPath("/apis/project.openshift.io").Do(context.TODO()).Raw()
if err == nil {
logrus.Info("Environment: Openshift")
return true

View File

@@ -1,4 +0,0 @@
issues:
kind: 1
url: https://aurorasolutions.atlassian.net
project: STK