Compare commits

...

92 Commits

Author SHA1 Message Date
stakater-user
565612e421 [skip-ci] Update artifacts 2022-02-26 22:43:34 +00:00
Faizan Ahmad
31e247e3ae Merge pull request #291 from stakater/fix-reloading-on-create
Make reload on create optional
2022-02-26 23:22:54 +01:00
faizanahmad055
1e79b86f72 Update readme
Signed-off-by: faizanahmad055 <faizan.ahmad55@outlook.com>
2022-02-20 00:00:04 +01:00
faizanahmad055
b5b684c67b Fix reload on create flag
Signed-off-by: faizanahmad055 <faizan.ahmad55@outlook.com>
2022-02-19 23:23:33 +01:00
faizanahmad055
bbc6bd2dea Update go version
Signed-off-by: faizanahmad055 <faizan.ahmad55@outlook.com>
2022-02-14 10:33:55 +01:00
faizanahmad055
61ce150d7c Make reload on create optional
Signed-off-by: faizanahmad055 <faizan.ahmad55@outlook.com>
2022-02-13 19:53:49 +01:00
stakater-user
56e83ecde9 [skip-ci] Update artifacts 2022-02-13 12:16:49 +00:00
Faizan Ahmad
c33876508c Merge pull request #290 from ctschubel/master
add replicas config to helm-chart
2022-02-13 13:07:37 +01:00
ctschubel
55ea2e430e add replicas to values.yaml.tmpl 2022-02-10 19:33:29 +01:00
ctschubel
4beefc3f43 fix replica config value name in helm-chart 2022-02-10 16:05:03 +01:00
ctschubel
3b1d30141c add replicas config to helm-chart 2022-02-10 15:21:16 +01:00
stakater-user
766bc24241 [skip-ci] Update artifacts 2022-01-02 13:43:49 +00:00
Faizan Ahmad
8e3aad3b0e Merge pull request #280 from jsoref/issue-278
Add .Release.Namespace
2022-01-02 14:35:19 +01:00
Josh Soref
ce2866bf6a Add .Release.Namespace
Signed-off-by: Josh Soref <jsoref@users.noreply.github.com>
2022-01-02 01:46:17 -05:00
stakater-user
bcbaad8495 [skip-ci] Update artifacts 2021-11-08 21:27:34 +00:00
Faizan Ahmad
3346319082 Merge pull request #271 from stakater/fix-deployment-pipeline
Fix yaml error in deployment manifest
2021-11-08 22:17:38 +01:00
faizanahmad055
139aa43c1c Fix yaml error in deployment manifest
Signed-off-by: faizanahmad055 <faizan.ahmad55@outlook.com>
2021-11-08 22:02:06 +01:00
Faizan Ahmad
11fdd40e41 Merge pull request #263 from Aenima4six2/aenima4six2/176
Issue 176 - Use pod templates annotations
2021-11-08 16:47:29 +01:00
aenima4six2
c4ce86cb0b 176 - Update helm chart with reloadStrategy support 2021-11-06 18:14:46 -04:00
aenima4six2
dfe7e9b3ca 176 - Add reload strategies to support pod annotation templates 2021-10-17 19:09:50 -04:00
stakater-user
1c29bfc084 [skip-ci] Update artifacts 2021-10-12 07:03:12 +00:00
Faizan Ahmad
c48e2bb8bb Merge pull request #269 from stakater/fix-reloader
Fix update path of reloader
2021-10-12 08:45:48 +02:00
hanzala1234
df40b5d02e Delete reloader.yam 2021-10-11 19:58:59 +05:00
hanzala1234
aa26a2222b Fix update path of reloader 2021-10-11 19:58:13 +05:00
stakater-user
f9d1a967c7 [skip-ci] Update artifacts 2021-10-11 14:43:27 +00:00
Faizan Ahmad
b2e1d3f0dd Merge pull request #268 from stakater/fix-pipeline
Generate manifest in separate file
2021-10-11 16:28:20 +02:00
hanzala
24478a9dd4 Update jumbo manifest in pipeline 2021-10-11 19:12:39 +05:00
hanzala
160525bd1f Fix command 2021-10-11 18:38:13 +05:00
hanzala
d9158ab602 Fix helm template command 2021-10-08 17:36:54 +05:00
hanzala
2b4cc64026 Generate manifest in separate file 2021-10-08 17:34:44 +05:00
stakater-user
ccd7dcb867 [skip-ci] Update artifacts 2021-09-30 08:29:26 +00:00
Faizan Ahmad
5c5c555a7f Merge pull request #264 from jamesgoodhouse/update_helm_notes
Update NOTES for Helm chart to be dynamic
2021-09-30 10:13:30 +02:00
James J. Goodhouse
273e4768f3 update helm chart NOTES to be dynamic 2021-09-28 09:16:49 -07:00
Ahmed Waleed Malik
69e359e9fc Merge pull request #259 from iamNoah1/better-readability
[skip-ci] better readiblity for example templates
2021-08-30 09:41:23 +05:00
Noah Ispas (iamNoah1)
e5352df348 better readiblity for example templates 2021-08-28 20:34:30 +02:00
stakater-user
f2b4e8e6c6 [skip-ci] Update artifacts 2021-07-30 06:35:22 +00:00
Ahmed Waleed Malik
99a38bff8e Merge pull request #254 from stakater/handle-resource-redeploy
fix: Reload pods after redeploy secrets or configmap
2021-07-30 11:20:35 +05:00
Ahmed Waleed Malik
d0aa627715 Merge pull request #253 from stakater/remove-unused-files
[skip-ci] Remove unused files
2021-07-30 10:56:58 +05:00
Waleed Malik
953cbe9d28 Reload resource if secret/configmap is re-created 2021-07-30 10:24:50 +05:00
Waleed Malik
f7873aba7b Update dependencies 2021-07-29 15:24:41 +05:00
Waleed Malik
f9728ecfff Add run target in Makefile 2021-07-29 15:24:30 +05:00
Waleed Malik
96a44153de Non-root user should be specified in numeric form in Dockerfile 2021-07-29 15:24:09 +05:00
Waleed Malik
cafbcbd2cb Update build image targetC 2021-07-29 14:56:43 +05:00
Waleed Malik
6397a35e32 Remove unused files 2021-07-29 14:33:04 +05:00
Waleed Malik
aea8592880 Update golangci-lint step in workflows 2021-07-29 14:32:56 +05:00
stakater-user
2aa514a34c [skip-ci] Update artifacts 2021-07-28 10:36:56 +00:00
Faizan Ahmad
ac39bc4eba Merge pull request #251 from aslafy-z/patch-1
docs(helm): podmonitor does not need service
2021-07-28 12:20:07 +02:00
Zadkiel
284d21686e docs(helm): podmonitor does not need service 2021-07-20 17:42:59 +02:00
stakater-user
00c0c11c76 [skip-ci] Update artifacts 2021-07-11 07:51:39 +00:00
Brandon Clifford
96ebfa8e62 Fix typo in Chart.yaml sources (#248) 2021-07-11 09:37:32 +02:00
stakater-user
95d442d80f [skip-ci] Update artifacts 2021-07-09 07:43:58 +00:00
Faizan Ahmad
e4e58882ab Merge pull request #246 from gciria/add-chart-liveness-readiness
Add Liveness and readiness probe timeout values
2021-07-09 09:29:41 +02:00
Gustavo Ciria
ea71fc0eec Create Chart.yaml 2021-07-08 13:07:43 -03:00
Gustavo Ciria
462b225d92 Delete Chart.yaml
Version and appVersion do not need to be updated manually.
2021-07-08 12:55:03 -03:00
Gustavo Ciria
d8728092f8 Add Liveness and readiness probe timeout values 2021-07-08 11:44:49 -03:00
stakater-user
2c8ef70c43 [skip-ci] Update artifacts 2021-06-28 14:19:33 +00:00
Faizan Ahmad
4d2c8a451e Merge pull request #243 from sfynx/master
Only enable Rollouts when enabled in Helm chart.
2021-06-28 16:03:38 +02:00
Henno Schooljan
f7927c85b1 Disable OpenShift by default, add notes in README. 2021-06-25 21:28:16 +02:00
Henno Schooljan
2e2fd2a11b Only enable Rollouts when enabled in Helm chart.
This prevents a permission issue in case Rollouts is available on a cluster, but the user does not have permission to use it (e.g. as a tenant on a cluster without cluster admin rights), and therefore also may not set permissions for it.

See issue #231.
2021-06-16 20:46:51 +02:00
stakater-user
0e6ec1d36b [skip-ci] Update artifacts 2021-06-15 17:40:07 +00:00
Faizan Ahmad
85b33d9104 Merge pull request #242 from stakater/actions-update
Added helm template step in push action
2021-06-15 19:25:54 +02:00
talha0324
c838ecbbc7 Updated command to one line 2021-06-15 19:52:28 +05:00
stakater-user
068a5c1e64 [skip-ci] Update artifacts 2021-06-15 14:13:35 +00:00
Faizan Ahmad
4d559a1864 Merge pull request #240 from stakater/file-name-fix
Fixed file name
2021-06-15 15:57:30 +02:00
talha0324
322142dd66 syntax fix 2021-06-15 18:24:56 +05:00
talha0324
39f37b706c Added helm template step in push action 2021-06-15 18:23:43 +05:00
talha0324
4e10dd4f80 Merge branch 'master' into file-name-fix
merging master into this branch
2021-06-15 17:58:49 +05:00
stakater-user
ccaa600ff4 [skip-ci] Update artifacts 2021-06-15 11:42:02 +00:00
Faizan Ahmad
a3fcfeb62f Merge pull request #241 from stakater/actions-fix
Fixed helm version tag and PR message
2021-06-15 13:28:07 +02:00
talha0324
d2cbbafeb1 Fixed helm version tag and PR message 2021-06-15 16:07:49 +05:00
talha0324
eaf8e16414 Fixed file name 2021-06-15 15:48:52 +05:00
stakater-user
5a65cf9f6d [skip-ci] Update artifacts 2021-06-13 19:48:18 +00:00
Faizan Ahmad
a8a68ae1b0 Merge pull request #236 from tete17/Update-dependencies-for-Argo-Rollouts
Update ArgoCD Rollouts to 1.0.1 to fix a compatibility issue
2021-06-13 21:33:59 +02:00
tete17
7643a27fb1 Upgrade argo-rollouts to v1.0.1 2021-06-04 18:32:02 +02:00
tete17
71fdb53c2e Update ArgoCD Rollouts to 0.10.2 to fix a compatibility issue and update necessary k8s machinery 2021-05-31 15:35:51 +02:00
stakater-user
d6312f6f83 [skip-ci] Update artifacts 2021-05-25 03:26:20 +00:00
Ahmed Waleed Malik
19220f5e6e Merge pull request #235 from phillebaba/feature/chart-priority-class
Add priority class name to helm chart
2021-05-25 08:13:08 +05:00
Philip Laine
05456b0905 Remove priority class name value 2021-05-24 21:19:20 +02:00
Philip Laine
10328dee8d Update deployments/kubernetes/chart/reloader/templates/deployment.yaml
Co-authored-by: Ahmed Waleed Malik <ahmedwaleedmalik@gmail.com>
2021-05-24 21:18:42 +02:00
Philip Laine
fd174ed691 Add priority class name to helm chart 2021-05-19 22:06:35 +02:00
stakater-user
2e47f1740c [skip-ci] Update artifacts 2021-04-26 04:52:02 +00:00
Ahmed Waleed Malik
15cb96f945 Merge pull request #228 from stakater/fix-issue-221
Add Optional pod monitor
2021-04-26 09:38:29 +05:00
faizanahmad055
1e987db54d Add endline in podmonitor.yaml
Signed-off-by: faizanahmad055 <faizan.ahmad55@outlook.com>
2021-04-25 23:10:23 +02:00
faizanahmad055
12a7fed3ae Add endline in values.yaml.tmpl
Signed-off-by: faizanahmad055 <faizan.ahmad55@outlook.com>
2021-04-25 23:09:40 +02:00
stakater-user
f18fac66c2 [skip-ci] Update artifacts 2021-04-25 21:05:39 +00:00
faizanahmad055
b5c95f9cbf Add Optional pod monitor
Signed-off-by: faizanahmad055 <faizan.ahmad55@outlook.com>
2021-04-25 23:05:36 +02:00
Faizan Ahmad
46b948388f Merge pull request #226 from tenstad/propagate-error-to-fix-retry
Propagate error to enable retry
2021-04-25 22:52:06 +02:00
Amund Tenstad
78be58b090 Do not log content of secrets 2021-04-21 16:12:53 +02:00
Amund Tenstad
54a8e0683b Propagate PerformRollingUpgrade error to Handle 2021-04-21 13:25:08 +02:00
stakater-user
702f0caa93 [skip-ci] Update artifacts 2021-04-11 18:21:36 +00:00
Faizan Ahmad
2e709e85ae Merge pull request #223 from sfynx/master
fix: read isArgoRollouts correctly in Helm chart
2021-04-11 20:08:10 +02:00
Henno Schooljan
debfd57a91 fix: read isArgoRollouts correctly in Helm chart 2021-04-07 23:21:41 +02:00
41 changed files with 4437 additions and 637 deletions

View File

@@ -7,7 +7,7 @@ on:
env:
DOCKER_FILE_PATH: Dockerfile
GOLANG_VERSION: 1.15.2
GOLANG_VERSION: 1.17.2
KUBERNETES_VERSION: "1.18.0"
KIND_VERSION: "0.7.0"
@@ -36,10 +36,12 @@ jobs:
run: |
make install
- name: Lint
run: |
curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(go env GOPATH)/bin v1.26.0
golangci-lint run --timeout=10m ./...
- name: Run golangci-lint
uses: golangci/golangci-lint-action@v2.3.0
with:
version: v1.33
only-new-issues: false
args: --timeout 10m
- name: Helm Lint
run: |
@@ -113,7 +115,7 @@ jobs:
env:
GITHUB_TOKEN: ${{ secrets.STAKATER_GITHUB_TOKEN }}
with:
message: '@${{ github.actor }} Image is available for testing. `docker pull ${{ github.repository }}:${{ steps.generate_tag.outputs.GIT_TAG }}`'
message: '@${{ github.actor }} Image is available for testing. `docker pull ${{ env.IMAGE_REPOSITORY }}:${{ steps.generate_tag.outputs.GIT_TAG }}`'
allow-repeats: false
- name: Notify Failure

View File

@@ -7,7 +7,7 @@ on:
env:
DOCKER_FILE_PATH: Dockerfile
GOLANG_VERSION: 1.15.2
GOLANG_VERSION: 1.17.2
KUBERNETES_VERSION: "1.18.0"
KIND_VERSION: "0.7.0"
HELM_REGISTRY_URL: "https://stakater.github.io/stakater-charts"
@@ -39,11 +39,13 @@ jobs:
run: |
make install
- name: Lint
run: |
curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(go env GOPATH)/bin v1.26.0
golangci-lint run --timeout=10m ./...
- name: Run golangci-lint
uses: golangci/golangci-lint-action@v2.3.0
with:
version: v1.33
only-new-issues: false
args: --timeout 10m
- name: Install kubectl
run: |
curl -LO "https://storage.googleapis.com/kubernetes-release/release/v${KUBERNETES_VERSION}/bin/linux/amd64/kubectl"
@@ -127,6 +129,11 @@ jobs:
VERSION: ${{ steps.generate_operator_tag.outputs.new_tag }}
run: make bump-chart
- name: Helm Template
run: |
helm template reloader deployments/kubernetes/chart/reloader/ > deployments/kubernetes/reloader.yaml
helm template reloader deployments/kubernetes/chart/reloader/ --output-dir deployments/kubernetes/manifests/ && mv deployments/kubernetes/manifests/reloader/templates/* deployments/kubernetes/manifests/ && rm -r deployments/kubernetes/manifests/reloader
# Publish helm chart
- name: Publish Helm chart
uses: stefanprodan/helm-gh-pages@master

View File

@@ -6,7 +6,7 @@ on:
- "v*"
env:
GOLANG_VERSION: 1.15.2
GOLANG_VERSION: 1.17.2
jobs:
build:

3
.gitignore vendored
View File

@@ -8,4 +8,5 @@ _gopath/
.DS_Store
.vscode
vendor
dist
dist
Reloader

View File

@@ -1,5 +1,5 @@
# Build the manager binary
FROM --platform=${BUILDPLATFORM} golang:1.15.2 as builder
FROM --platform=${BUILDPLATFORM} golang:1.17.2 as builder
ARG TARGETOS
ARG TARGETARCH
@@ -26,7 +26,7 @@ RUN CGO_ENABLED=0 GOOS=${TARGETOS} GOARCH=${TARGETARCH} GO111MODULE=on go build
FROM gcr.io/distroless/static:nonroot
WORKDIR /
COPY --from=builder /workspace/manager .
USER nonroot:nonroot
USER 65532:65532
# Port for metrics and probes
EXPOSE 9090

View File

@@ -1,6 +1,6 @@
# note: call scripts from /scripts
.PHONY: default build builder-image binary-image test stop clean-images clean push apply deploy release release-all manifest push clean-image
.PHONY: default build build-image test stop push apply deploy release release-all manifest push
OS ?= linux
ARCH ?= ???
@@ -9,12 +9,12 @@ ALL_ARCH ?= arm64 arm amd64
BUILDER ?= reloader-builder-${ARCH}
BINARY ?= Reloader
DOCKER_IMAGE ?= stakater/reloader
# Default value "dev"
TAG ?= v0.0.75.0
REPOSITORY_GENERIC = ${DOCKER_IMAGE}:${TAG}
REPOSITORY_ARCH = ${DOCKER_IMAGE}:${TAG}-${ARCH}
# Default value "dev"
VERSION ?= 0.0.1
REPOSITORY_GENERIC = ${DOCKER_IMAGE}:${VERSION}
REPOSITORY_ARCH = ${DOCKER_IMAGE}:v${VERSION}-${ARCH}
BUILD=
GOCMD = go
@@ -26,23 +26,19 @@ default: build test
install:
"$(GOCMD)" mod download
run:
go run ./main.go
build:
"$(GOCMD)" build ${GOFLAGS} ${LDFLAGS} -o "${BINARY}"
builder-image:
docker buildx build --platform ${OS}/${ARCH} --build-arg GOARCH=$(ARCH) -t "${BUILDER}" --load -f build/package/Dockerfile.build .
reloader-${ARCH}.tar:
docker buildx build --platform ${OS}/${ARCH} --build-arg GOARCH=$(ARCH) -t "${BUILDER}" --load -f build/package/Dockerfile.build .
docker run --platform ${OS}/${ARCH} --rm "${BUILDER}" > reloader-${ARCH}.tar
binary-image: builder-image
cat reloader-${ARCH}.tar | docker buildx build --platform ${OS}/${ARCH} -t "${REPOSITORY_ARCH}" --load -f Dockerfile.run -
build-image:
docker buildx build --platform ${OS}/${ARCH} --build-arg GOARCH=$(ARCH) -t "${REPOSITORY_ARCH}" --load -f Dockerfile .
push:
docker push ${REPOSITORY_ARCH}
release: binary-image push manifest
release: build-image push manifest
release-all:
-rm -rf ~/.docker/manifests/*
@@ -66,23 +62,6 @@ test:
stop:
@docker stop "${BINARY}"
clean-images: stop
-docker rmi "${BINARY}"
@for arch in $(ALL_ARCH) ; do \
echo Clean image: $$arch ; \
make clean-image ARCH=$$arch ; \
done
-docker rmi "${REPOSITORY_GENERIC}"
clean-image:
-docker rmi "${BUILDER}"
-docker rmi "${REPOSITORY_ARCH}"
-rm -rf ~/.docker/manifests/*
clean:
"$(GOCMD)" clean -i
-rm -rf reloader-*.tar
apply:
kubectl apply -f deployments/manifests/ -n temp-reloader
@@ -93,3 +72,4 @@ bump-chart:
sed -i "s/^version:.*/version: v$(VERSION)/" deployments/kubernetes/chart/reloader/Chart.yaml
sed -i "s/^appVersion:.*/appVersion: v$(VERSION)/" deployments/kubernetes/chart/reloader/Chart.yaml
sed -i "s/tag:.*/tag: v$(VERSION)/" deployments/kubernetes/chart/reloader/values.yaml
sed -i "s/version:.*/version: v$(VERSION)/" deployments/kubernetes/chart/reloader/values.yaml

View File

@@ -99,7 +99,8 @@ metadata:
annotations:
configmap.reloader.stakater.com/reload: "foo-configmap,bar-configmap,baz-configmap"
spec:
template: metadata:
template:
metadata:
```
### Secret
@@ -114,7 +115,8 @@ metadata:
annotations:
secret.reloader.stakater.com/reload: "foo-secret"
spec:
template: metadata:
template:
metadata:
```
Use comma separated list to define multiple secrets.
@@ -125,7 +127,8 @@ metadata:
annotations:
secret.reloader.stakater.com/reload: "foo-secret,bar-secret,baz-secret"
spec:
template: metadata:
template:
metadata:
```
### NOTES
@@ -142,6 +145,19 @@ spec:
- you may want to prevent watching certain namespaces with the `--namespaces-to-ignore` flag
- you may want to prevent watching certain resources with the `--resources-to-ignore` flag
- you can configure logging in JSON format with the `--log-format=json` option
- you can configure the "reload strategy" with the `--reload-strategy=<strategy-name>` option (details below)
## Reload Strategies
Reloader supports multiple "reload" strategies for performing rolling upgrades to resources. The following list describes them:
- **env-vars**: When a tracked `configMap`/`secret` is updated, this strategy attaches a Reloader specific environment variable to any containers
referencing the changed `configMap` or `secret` on the owning resource (e.g., `Deployment`, `StatefulSet`, etc.).
This strategy can be specified with the `--reload-strategy=env-vars` argument. Note: This is the default reload strategy.
- **annotations**: When a tracked `configMap`/`secret` is updated, this strategy attaches a `reloader.stakater.com/last-reloaded-from` pod template annotation
on the owning resource (e.g., `Deployment`, `StatefulSet`, etc.). This strategy is useful when using resource syncing tools like ArgoCD, since it will not cause these tools
to detect configuration drift after a resource is reloaded. Note: Since the attached pod template annotation only tracks the last reload source, this strategy will reload any tracked resource should its
`configMap` or `secret` be deleted and recreated.
This strategy can be specified with the `--reload-strategy=annotations` argument.
## Deploying to Kubernetes
@@ -219,7 +235,15 @@ Reloader can be configured to ignore the resources `secrets` and `configmaps` by
You can also set the log format of Reloader to json by setting `logFormat` to `json` in values.yaml and apply the chart
You can enable to scrape Reloader's Prometheus metrics by setting `serviceMonitor.enabled` to `true` in values.yaml file.
You can enable to scrape Reloader's Prometheus metrics by setting `serviceMonitor.enabled` or `podMonitor.enabled` to `true` in values.yaml file. Service monitor will be removed in future releases of reloader in favour of Pod monitor.
**Note:** Reloading of OpenShift (DeploymentConfig) and/or Argo Rollouts has to be enabled explicitly because it might not be always possible to use it on a cluster with restricted permissions. This can be done by changing the following parameters:
| Parameter | Description | Type |
| ---------------- |------------------------------------------------------------------------------| ------- |
| isOpenshift | Enable OpenShift DeploymentConfigs. Valid value are either `true` or `false` | boolean |
| isArgoRollouts | Enable Argo Rollouts. Valid value are either `true` or `false` | boolean |
| reloadOnCreate | Enable reload on create events. Valid value are either `true` or `false` | boolean |
## Help

View File

@@ -1,26 +0,0 @@
FROM golang:1.15.2-alpine
LABEL maintainer "Stakater Team"
ARG GOARCH=amd64
RUN apk -v --update \
--no-cache \
add git build-base
WORKDIR "$GOPATH/src/github.com/stakater/Reloader"
COPY go.mod go.sum ./
RUN go mod download
COPY . .
ENV CGO_ENABLED=0 GOOS=linux GOARCH=$GOARCH
RUN go build -a --installsuffix cgo --ldflags="-s" -o /Reloader
COPY build/package/Dockerfile.run /
# Running this image produces a tarball suitable to be piped into another
# Docker build command.
CMD tar -cf - -C / Dockerfile.run Reloader

View File

@@ -1,14 +0,0 @@
FROM alpine:3.11
LABEL maintainer "Stakater Team"
RUN apk add --update --no-cache ca-certificates
COPY Reloader /bin/Reloader
# On alpine 'nobody' has uid 65534
USER 65534
# Port for metrics and probes
EXPOSE 9090
ENTRYPOINT ["/bin/Reloader"]

View File

@@ -3,14 +3,14 @@
apiVersion: v1
name: reloader
description: Reloader chart that runs on kubernetes
version: v0.0.86
appVersion: v0.0.86
version: v0.0.106
appVersion: v0.0.106
keywords:
- Reloader
- kubernetes
home: https://github.com/stakater/Reloader
sources:
- https://github.com/stakater/IngressMonitorController
- https://github.com/stakater/Reloader
icon: https://raw.githubusercontent.com/stakater/Reloader/master/assets/web/reloader-round-100px.png
maintainers:
- name: Stakater

View File

@@ -1,7 +1,7 @@
- For a `Deployment` called `foo` have a `ConfigMap` called `foo-configmap`. Then add this annotation to main metadata of your `Deployment`
configmap.reloader.stakater.com/reload: "foo-configmap"
{{ .Values.reloader.custom_annotations.configmap | default "configmap.reloader.stakater.com/reload" }}: "foo-configmap"
- For a `Deployment` called `foo` have a `Secret` called `foo-secret`. Then add this annotation to main metadata of your `Deployment`
secret.reloader.stakater.com/reload: "foo-secret"
- For a `Deployment` called `foo` have a `Secret` called `foo-secret`. Then add this annotation to main metadata of your `Deployment`
{{ .Values.reloader.custom_annotations.secret | default "secret.reloader.stakater.com/reload" }}: "foo-secret"
- After successful installation, your pods will get rolling updates when a change in data of configmap or secret will happen.

View File

@@ -32,7 +32,7 @@ rules:
- list
- get
- watch
{{- if or (.Capabilities.APIVersions.Has "apps.openshift.io/v1") (.Values.isOpenshift) }}
{{- if and (.Capabilities.APIVersions.Has "apps.openshift.io/v1") (.Values.reloader.isOpenshift) }}
- apiGroups:
- "apps.openshift.io"
- ""
@@ -44,7 +44,7 @@ rules:
- update
- patch
{{- end }}
{{- if or (.Capabilities.APIVersions.Has "argoproj.io/v1alpha1") (.Values.isArgoRollouts) }}
{{- if and (.Capabilities.APIVersions.Has "argoproj.io/v1alpha1") (.Values.reloader.isArgoRollouts) }}
- apiGroups:
- "argoproj.io"
- ""

View File

@@ -15,8 +15,9 @@ metadata:
{{ toYaml .Values.reloader.matchLabels | indent 4 }}
{{- end }}
name: {{ template "reloader-fullname" . }}
namespace: {{ .Release.Namespace }}
spec:
replicas: 1
replicas: {{ .Values.reloader.deployment.replicas }}
revisionHistoryLimit: 2
selector:
matchLabels:
@@ -51,6 +52,9 @@ spec:
{{- if .Values.reloader.deployment.tolerations }}
tolerations:
{{ toYaml .Values.reloader.deployment.tolerations | indent 8 }}
{{- end }}
{{- if .Values.reloader.deployment.priorityClassName }}
priorityClassName: {{ .Values.reloader.deployment.priorityClassName }}
{{- end }}
containers:
- image: "{{ .Values.reloader.deployment.image.name }}:{{ .Values.reloader.deployment.image.tag }}"
@@ -97,17 +101,25 @@ spec:
httpGet:
path: /metrics
port: http
timeoutSeconds: {{ .Values.reloader.deployment.livenessProbe.timeoutSeconds | default "5" }}
failureThreshold: {{ .Values.reloader.deployment.livenessProbe.failureThreshold | default "5" }}
periodSeconds: {{ .Values.reloader.deployment.livenessProbe.periodSeconds | default "10" }}
successThreshold: {{ .Values.reloader.deployment.livenessProbe.successThreshold | default "1" }}
readinessProbe:
httpGet:
path: /metrics
port: http
timeoutSeconds: {{ .Values.reloader.deployment.readinessProbe.timeoutSeconds | default "5" }}
failureThreshold: {{ .Values.reloader.deployment.readinessProbe.failureThreshold | default "5" }}
periodSeconds: {{ .Values.reloader.deployment.readinessProbe.periodSeconds | default "10" }}
successThreshold: {{ .Values.reloader.deployment.readinessProbe.successThreshold | default "1" }}
{{- if eq .Values.reloader.readOnlyRootFileSystem true }}
volumeMounts:
- mountPath: /tmp/
name: tmp-volume
{{- end }}
{{- if or (.Values.reloader.logFormat) (.Values.reloader.ignoreSecrets) (.Values.reloader.ignoreNamespaces) (.Values.reloader.ignoreConfigMaps) (.Values.reloader.custom_annotations) (eq .Values.reloader.isArgoRollouts true) }}
{{- if or (.Values.reloader.logFormat) (.Values.reloader.ignoreSecrets) (.Values.reloader.ignoreNamespaces) (.Values.reloader.ignoreConfigMaps) (.Values.reloader.custom_annotations) (eq .Values.reloader.isArgoRollouts true) (eq .Values.reloader.reloadOnCreate true) (ne .Values.reloader.reloadStrategy "default")}}
args:
{{- if .Values.reloader.logFormat }}
- "--log-format={{ .Values.reloader.logFormat }}"
@@ -147,6 +159,12 @@ spec:
{{- if eq .Values.reloader.isArgoRollouts true }}
- "--is-Argo-Rollouts={{ .Values.reloader.isArgoRollouts }}"
{{- end }}
{{- if eq .Values.reloader.reloadOnCreate true }}
- "--reload-on-create={{ .Values.reloader.reloadOnCreate }}"
{{- end }}
{{- if ne .Values.reloader.reloadStrategy "default" }}
- "--reload-strategy={{ .Values.reloader.reloadStrategy }}"
{{- end }}
{{- end }}
{{- if .Values.reloader.deployment.resources }}
resources:

View File

@@ -0,0 +1,31 @@
{{- if and ( .Capabilities.APIVersions.Has "monitoring.coreos.com/v1" ) ( .Values.reloader.podMonitor.enabled ) }}
apiVersion: monitoring.coreos.com/v1
kind: PodMonitor
metadata:
labels:
{{ include "reloader-labels.chart" . | indent 4 }}
{{- if .Values.reloader.podMonitor.labels }}
{{ toYaml .Values.reloader.podMonitor.labels | indent 4}}
{{- end }}
name: {{ template "reloader-fullname" . }}
{{- if .Values.reloader.podMonitor.namespace }}
namespace: {{ .Values.reloader.podMonitor.namespace }}
{{- end }}
spec:
podMetricsEndpoints:
- port: http
path: "/metrics"
{{- if .Values.reloader.podMonitor.interval }}
interval: {{ .Values.reloader.podMonitor.interval }}
{{- end }}
{{- if .Values.reloader.podMonitor.timeout }}
scrapeTimeout: {{ .Values.reloader.podMonitor.timeout }}
{{- end }}
jobLabel: {{ template "reloader-fullname" . }}
namespaceSelector:
matchNames:
- {{ .Release.Namespace }}
selector:
matchLabels:
{{ include "reloader-labels.chart" . | nindent 6 }}
{{- end }}

View File

@@ -32,7 +32,7 @@ rules:
- list
- get
- watch
{{- if or (.Capabilities.APIVersions.Has "apps.openshift.io/v1") (.Values.reloader.isOpenshift) }}
{{- if and (.Capabilities.APIVersions.Has "apps.openshift.io/v1") (.Values.reloader.isOpenshift) }}
- apiGroups:
- "apps.openshift.io"
- ""
@@ -44,7 +44,7 @@ rules:
- update
- patch
{{- end }}
{{- if or (.Capabilities.APIVersions.Has "argoproj.io/v1alpha1") (.Values.isArgoRollouts) }}
{{- if and (.Capabilities.APIVersions.Has "argoproj.io/v1alpha1") (.Values.reloader.isArgoRollouts) }}
- apiGroups:
- "argoproj.io"
- ""

View File

@@ -19,4 +19,5 @@ metadata:
{{ toYaml .Values.reloader.matchLabels | indent 4 }}
{{- end }}
name: {{ template "reloader-serviceAccountName" . }}
namespace: {{ .Release.Namespace }}
{{- end }}

View File

@@ -0,0 +1,19 @@
{
"$schema": "http://json-schema.org/schema#",
"type": "object",
"properties": {
"reloader": {
"type": "object",
"properties": {
"reloadStrategy": {
"type": "string",
"enum": [
"default",
"env-vars",
"annotations"
]
}
}
}
}
}

View File

@@ -13,6 +13,8 @@ reloader:
isOpenshift: false
ignoreSecrets: false
ignoreConfigMaps: false
reloadOnCreate: false
reloadStrategy: default # Set to default, env-vars or annotations
ignoreNamespaces: "" # Comma separated list of namespaces to ignore
logFormat: "" #json
watchGlobally: true
@@ -22,6 +24,7 @@ reloader:
rbac: false
matchLabels: {}
deployment:
replicas: 1
nodeSelector:
# cloud.google.com/gke-nodepool: default-pool
@@ -52,10 +55,10 @@ reloader:
labels:
provider: stakater
group: com.stakater.platform
version: v0.0.77
version: v0.0.106
image:
name: stakater/reloader
tag: v0.0.86
tag: v0.0.106
pullPolicy: IfNotPresent
# Support for extra environment variables.
env:
@@ -66,6 +69,18 @@ reloader:
# field supports Key value pair as environment variables. It gets the values from other fields of pod.
field:
# Liveness and readiness probe timeout values.
livenessProbe: {}
# timeoutSeconds: 5
# failureThreshold: 5
# periodSeconds: 10
# successThreshold: 1
readinessProbe: {}
# timeoutSeconds: 15
# failureThreshold: 5
# periodSeconds: 10
# successThreshold: 1
# Specify resource requests/limits for the deployment.
# Example:
# resources:
@@ -78,6 +93,7 @@ reloader:
resources: {}
pod:
annotations: {}
priorityClassName: ""
service: {}
# labels: {}
@@ -95,15 +111,17 @@ reloader:
annotations: {}
# The name of the ServiceAccount to use.
# If not set and create is true, a name is generated using the fullname template
name:
name:
# Optional flags to pass to the Reloader entrypoint
# Example:
# custom_annotations:
# configmap: "my.company.com/configmap"
# secret: "my.company.com/secret"
custom_annotations: {}
serviceMonitor:
# enabling this requires service to be enabled as well, or no endpoints will be found
# Deprecated: Service monitor will be removed in future releases of reloader in favour of Pod monitor
# Enabling this requires service to be enabled as well, or no endpoints will be found
enabled: false
# Set the namespace the ServiceMonitor should be deployed
# namespace: monitoring
@@ -113,4 +131,14 @@ reloader:
# labels:
# Set timeout for scrape
# timeout: 10s
podMonitor:
enabled: false
# Set the namespace the podMonitor should be deployed
# namespace: monitoring
# Set how frequently Prometheus should scrape
# interval: 30s
# Set labels for the podMonitor, use this to define your scrape label for Prometheus Operator
# labels:
# Set timeout for scrape
# timeout: 10s

View File

@@ -1,7 +1,7 @@
---
# Source: reloader/templates/clusterrole.yaml
apiVersion: rbac.authorization.k8s.io/v1
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
annotations:
@@ -9,10 +9,10 @@ metadata:
meta.helm.sh/release-name: "reloader"
labels:
app: reloader-reloader
chart: "reloader-v0.0.77"
chart: "reloader-v0.0.106"
release: "reloader"
heritage: "Tiller"
app.kubernetes.io/managed-by: "Tiller"
heritage: "Helm"
app.kubernetes.io/managed-by: "Helm"
name: reloader-reloader-role
namespace: default
rules:
@@ -46,4 +46,3 @@ rules:
- get
- update
- patch

View File

@@ -1,7 +1,7 @@
---
# Source: reloader/templates/clusterrolebinding.yaml
apiVersion: rbac.authorization.k8s.io/v1
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
annotations:
@@ -9,10 +9,10 @@ metadata:
meta.helm.sh/release-name: "reloader"
labels:
app: reloader-reloader
chart: "reloader-v0.0.77"
chart: "reloader-v0.0.106"
release: "reloader"
heritage: "Tiller"
app.kubernetes.io/managed-by: "Tiller"
heritage: "Helm"
app.kubernetes.io/managed-by: "Helm"
name: reloader-reloader-role-binding
namespace: default
roleRef:
@@ -23,4 +23,3 @@ subjects:
- kind: ServiceAccount
name: reloader-reloader
namespace: default

View File

@@ -8,15 +8,15 @@ metadata:
meta.helm.sh/release-name: "reloader"
labels:
app: reloader-reloader
chart: "reloader-v0.0.77"
chart: "reloader-v0.0.106"
release: "reloader"
heritage: "Tiller"
app.kubernetes.io/managed-by: "Tiller"
heritage: "Helm"
app.kubernetes.io/managed-by: "Helm"
group: com.stakater.platform
provider: stakater
version: v0.0.77
version: v0.0.106
name: reloader-reloader
namespace: default
spec:
replicas: 1
revisionHistoryLimit: 2
@@ -28,17 +28,16 @@ spec:
metadata:
labels:
app: reloader-reloader
chart: "reloader-v0.0.77"
chart: "reloader-v0.0.106"
release: "reloader"
heritage: "Tiller"
app.kubernetes.io/managed-by: "Tiller"
heritage: "Helm"
app.kubernetes.io/managed-by: "Helm"
group: com.stakater.platform
provider: stakater
version: v0.0.77
version: v0.0.106
spec:
containers:
- image: "stakater/reloader:v0.0.77"
- image: "stakater/reloader:v0.0.106"
imagePullPolicy: IfNotPresent
name: reloader-reloader
@@ -49,13 +48,19 @@ spec:
httpGet:
path: /metrics
port: http
timeoutSeconds: 5
failureThreshold: 5
periodSeconds: 10
successThreshold: 1
readinessProbe:
httpGet:
path: /metrics
port: http
timeoutSeconds: 5
failureThreshold: 5
periodSeconds: 10
successThreshold: 1
securityContext:
runAsNonRoot: true
runAsUser: 65534
serviceAccountName: reloader-reloader

View File

@@ -0,0 +1,3 @@
---
# Source: reloader/templates/podmonitor.yaml

View File

@@ -1,6 +1,5 @@
---
# Source: reloader/templates/serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
@@ -9,9 +8,9 @@ metadata:
meta.helm.sh/release-name: "reloader"
labels:
app: reloader-reloader
chart: "reloader-v0.0.77"
chart: "reloader-v0.0.106"
release: "reloader"
heritage: "Tiller"
app.kubernetes.io/managed-by: "Tiller"
heritage: "Helm"
app.kubernetes.io/managed-by: "Helm"
name: reloader-reloader
namespace: default

View File

@@ -1,7 +1,23 @@
---
# Source: reloader/templates/serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
annotations:
meta.helm.sh/release-namespace: "default"
meta.helm.sh/release-name: "reloader"
labels:
app: reloader-reloader
chart: "reloader-v0.0.106"
release: "reloader"
heritage: "Helm"
app.kubernetes.io/managed-by: "Helm"
name: reloader-reloader
namespace: default
---
# Source: reloader/templates/clusterrole.yaml
apiVersion: rbac.authorization.k8s.io/v1
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
annotations:
@@ -9,10 +25,10 @@ metadata:
meta.helm.sh/release-name: "reloader"
labels:
app: reloader-reloader
chart: "reloader-v0.0.77"
chart: "reloader-v0.0.106"
release: "reloader"
heritage: "Tiller"
app.kubernetes.io/managed-by: "Tiller"
heritage: "Helm"
app.kubernetes.io/managed-by: "Helm"
name: reloader-reloader-role
namespace: default
rules:
@@ -46,11 +62,10 @@ rules:
- get
- update
- patch
---
# Source: reloader/templates/clusterrolebinding.yaml
apiVersion: rbac.authorization.k8s.io/v1
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
annotations:
@@ -58,10 +73,10 @@ metadata:
meta.helm.sh/release-name: "reloader"
labels:
app: reloader-reloader
chart: "reloader-v0.0.77"
chart: "reloader-v0.0.106"
release: "reloader"
heritage: "Tiller"
app.kubernetes.io/managed-by: "Tiller"
heritage: "Helm"
app.kubernetes.io/managed-by: "Helm"
name: reloader-reloader-role-binding
namespace: default
roleRef:
@@ -72,7 +87,6 @@ subjects:
- kind: ServiceAccount
name: reloader-reloader
namespace: default
---
# Source: reloader/templates/deployment.yaml
apiVersion: apps/v1
@@ -83,15 +97,15 @@ metadata:
meta.helm.sh/release-name: "reloader"
labels:
app: reloader-reloader
chart: "reloader-v0.0.77"
chart: "reloader-v0.0.106"
release: "reloader"
heritage: "Tiller"
app.kubernetes.io/managed-by: "Tiller"
heritage: "Helm"
app.kubernetes.io/managed-by: "Helm"
group: com.stakater.platform
provider: stakater
version: v0.0.77
version: v0.0.106
name: reloader-reloader
namespace: default
spec:
replicas: 1
revisionHistoryLimit: 2
@@ -103,17 +117,16 @@ spec:
metadata:
labels:
app: reloader-reloader
chart: "reloader-v0.0.77"
chart: "reloader-v0.0.106"
release: "reloader"
heritage: "Tiller"
app.kubernetes.io/managed-by: "Tiller"
heritage: "Helm"
app.kubernetes.io/managed-by: "Helm"
group: com.stakater.platform
provider: stakater
version: v0.0.77
version: v0.0.106
spec:
containers:
- image: "stakater/reloader:v0.0.77"
- image: "stakater/reloader:v0.0.106"
imagePullPolicy: IfNotPresent
name: reloader-reloader
@@ -124,46 +137,19 @@ spec:
httpGet:
path: /metrics
port: http
timeoutSeconds: 5
failureThreshold: 5
periodSeconds: 10
successThreshold: 1
readinessProbe:
httpGet:
path: /metrics
port: http
timeoutSeconds: 5
failureThreshold: 5
periodSeconds: 10
successThreshold: 1
securityContext:
runAsNonRoot: true
runAsUser: 65534
serviceAccountName: reloader-reloader
---
# Source: reloader/templates/role.yaml
---
# Source: reloader/templates/rolebinding.yaml
---
# Source: reloader/templates/service.yaml
---
# Source: reloader/templates/serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
annotations:
meta.helm.sh/release-namespace: "default"
meta.helm.sh/release-name: "reloader"
labels:
app: reloader-reloader
chart: "reloader-v0.0.77"
release: "reloader"
heritage: "Tiller"
app.kubernetes.io/managed-by: "Tiller"
name: reloader-reloader
---
# Source: reloader/templates/servicemonitor.yaml

View File

@@ -13,6 +13,8 @@ reloader:
isOpenshift: false
ignoreSecrets: false
ignoreConfigMaps: false
reloadOnCreate: false
reloadStrategy: default # Set to default, env-vars or annotations
ignoreNamespaces: "" # Comma separated list of namespaces to ignore
logFormat: "" #json
watchGlobally: true
@@ -22,6 +24,7 @@ reloader:
rbac: false
matchLabels: {}
deployment:
replicas: 1
nodeSelector:
# cloud.google.com/gke-nodepool: default-pool
@@ -114,3 +117,15 @@ reloader:
# Set timeout for scrape
# timeout: 10s
podMonitor:
# enabling this requires service to be enabled as well, or no endpoints will be found
enabled: false
# Set the namespace the podMonitor should be deployed
# namespace: monitoring
# Set how frequently Prometheus should scrape
# interval: 30s
# Set labels for the podMonitor, use this to define your scrape label for Prometheus Operator
# labels:
# Set timeout for scrape
# timeout: 10s

107
go.mod
View File

@@ -1,26 +1,97 @@
module github.com/stakater/Reloader
go 1.15
go 1.17
require (
github.com/argoproj/argo-rollouts v0.7.2
github.com/golang/groupcache v0.0.0-20191002201903-404acd9df4cc // indirect
github.com/onsi/ginkgo v1.15.1 // indirect
github.com/onsi/gomega v1.11.0 // indirect
github.com/openshift/api v3.9.1-0.20190923092516-169848dd8137+incompatible
github.com/openshift/client-go v0.0.0-20190923092832-6afefc9bb372
github.com/prometheus/client_golang v1.9.0
github.com/sirupsen/logrus v1.6.0
github.com/argoproj/argo-rollouts v1.0.2
github.com/openshift/api v0.0.0-20210527122704-efd9d5958e01
github.com/openshift/client-go v0.0.0-20210521082421-73d9475a9142
github.com/prometheus/client_golang v1.10.0
github.com/sirupsen/logrus v1.7.0
github.com/spf13/cobra v1.1.3
k8s.io/api v0.0.0-20190918155943-95b840bb6a1f
k8s.io/apimachinery v0.0.0-20191004115801-a2eda9f80ab8
k8s.io/client-go v0.0.0-20190918160344-1fbdaa4c8d90
k8s.io/api v0.21.2
k8s.io/apimachinery v0.21.2
k8s.io/client-go v0.21.2
)
replace (
github.com/openshift/api => github.com/openshift/api v3.9.1-0.20190923092516-169848dd8137+incompatible // prebase-1.16
github.com/openshift/client-go => github.com/openshift/client-go v0.0.0-20190923092832-6afefc9bb372 // prebase-1.16
k8s.io/api => k8s.io/api v0.0.0-20191004120104-195af9ec3521 // release-1.16
k8s.io/apimachinery => k8s.io/apimachinery v0.0.0-20191004115801-a2eda9f80ab8 // kubernetes-1.16.0
k8s.io/client-go => k8s.io/client-go v0.0.0-20190918160344-1fbdaa4c8d90 // kubernetes-1.16.0
require (
github.com/PuerkitoBio/purell v1.1.1 // indirect
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/cespare/xxhash/v2 v2.1.1 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/emicklei/go-restful v2.9.5+incompatible // indirect
github.com/evanphx/json-patch v4.9.0+incompatible // indirect
github.com/go-logr/logr v0.4.0 // indirect
github.com/go-openapi/jsonpointer v0.19.3 // indirect
github.com/go-openapi/jsonreference v0.19.3 // indirect
github.com/go-openapi/spec v0.19.3 // indirect
github.com/go-openapi/swag v0.19.5 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang/protobuf v1.4.3 // indirect
github.com/google/go-cmp v0.5.4 // indirect
github.com/google/gofuzz v1.1.0 // indirect
github.com/googleapis/gnostic v0.4.1 // indirect
github.com/hashicorp/golang-lru v0.5.4 // indirect
github.com/imdario/mergo v0.3.11 // indirect
github.com/inconshreveable/mousetrap v1.0.0 // indirect
github.com/json-iterator/go v1.1.10 // indirect
github.com/mailru/easyjson v0.7.0 // indirect
github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.1 // indirect
github.com/onsi/ginkgo v1.15.1 // indirect
github.com/onsi/gomega v1.11.0 // indirect
github.com/pkg/errors v0.9.1 // indirect
github.com/prometheus/client_model v0.2.0 // indirect
github.com/prometheus/common v0.21.0 // indirect
github.com/prometheus/procfs v0.6.0 // indirect
github.com/spf13/pflag v1.0.5 // indirect
golang.org/x/crypto v0.0.0-20201016220609-9e8e0b390897 // indirect
golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb // indirect
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d // indirect
golang.org/x/sys v0.0.0-20210309074719-68d13333faf2 // indirect
golang.org/x/text v0.3.4 // indirect
golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e // indirect
google.golang.org/appengine v1.6.6 // indirect
google.golang.org/protobuf v1.25.0 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
k8s.io/klog/v2 v2.8.0 // indirect
k8s.io/kube-openapi v0.0.0-20210216185858-15cd8face8d6 // indirect
k8s.io/utils v0.0.0-20201110183641-67b214c5f920 // indirect
sigs.k8s.io/structured-merge-diff/v4 v4.0.2 // indirect
sigs.k8s.io/yaml v1.2.0 // indirect
)
// Replacements for argo-rollouts
replace (
github.com/go-check/check => github.com/go-check/check v0.0.0-20180628173108-788fd7840127
github.com/grpc-ecosystem/grpc-gateway => github.com/grpc-ecosystem/grpc-gateway v1.16.0
k8s.io/api => k8s.io/api v0.20.4
k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.20.4
k8s.io/apimachinery => k8s.io/apimachinery v0.21.0-alpha.0
k8s.io/apiserver => k8s.io/apiserver v0.20.4
k8s.io/cli-runtime => k8s.io/cli-runtime v0.20.4
k8s.io/client-go => k8s.io/client-go v0.20.4
k8s.io/cloud-provider => k8s.io/cloud-provider v0.20.4
k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.20.4
k8s.io/code-generator => k8s.io/code-generator v0.20.5-rc.0
k8s.io/component-base => k8s.io/component-base v0.20.4
k8s.io/component-helpers => k8s.io/component-helpers v0.20.4
k8s.io/controller-manager => k8s.io/controller-manager v0.20.4
k8s.io/cri-api => k8s.io/cri-api v0.20.5-rc.0
k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.20.4
k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.20.4
k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.20.4
k8s.io/kube-proxy => k8s.io/kube-proxy v0.20.4
k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.20.4
k8s.io/kubectl => k8s.io/kubectl v0.20.4
k8s.io/kubelet => k8s.io/kubelet v0.20.4
k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.20.4
k8s.io/metrics => k8s.io/metrics v0.20.4
k8s.io/mount-utils => k8s.io/mount-utils v0.20.5-rc.0
k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.20.4
k8s.io/sample-cli-plugin => k8s.io/sample-cli-plugin v0.20.4
k8s.io/sample-controller => k8s.io/sample-controller v0.20.4
)

1114
go.sum

File diff suppressed because it is too large Load Diff

View File

@@ -1,6 +1,7 @@
package callbacks
import (
"context"
"github.com/sirupsen/logrus"
"github.com/stakater/Reloader/internal/pkg/util"
"github.com/stakater/Reloader/pkg/kube"
@@ -47,46 +48,87 @@ type RollingUpgradeFuncs struct {
// GetDeploymentItems returns the deployments in given namespace
func GetDeploymentItems(clients kube.Clients, namespace string) []interface{} {
deployments, err := clients.KubernetesClient.AppsV1().Deployments(namespace).List(meta_v1.ListOptions{})
deployments, err := clients.KubernetesClient.AppsV1().Deployments(namespace).List(context.TODO(), meta_v1.ListOptions{})
if err != nil {
logrus.Errorf("Failed to list deployments %v", err)
}
// Ensure we always have pod annotations to add to
for i, v := range deployments.Items {
if v.Spec.Template.ObjectMeta.Annotations == nil {
annotations := make(map[string]string)
deployments.Items[i].Spec.Template.ObjectMeta.Annotations = annotations
}
}
return util.InterfaceSlice(deployments.Items)
}
// GetDaemonSetItems returns the daemonSets in given namespace
func GetDaemonSetItems(clients kube.Clients, namespace string) []interface{} {
daemonSets, err := clients.KubernetesClient.AppsV1().DaemonSets(namespace).List(meta_v1.ListOptions{})
daemonSets, err := clients.KubernetesClient.AppsV1().DaemonSets(namespace).List(context.TODO(), meta_v1.ListOptions{})
if err != nil {
logrus.Errorf("Failed to list daemonSets %v", err)
}
// Ensure we always have pod annotations to add to
for i, v := range daemonSets.Items {
if v.Spec.Template.ObjectMeta.Annotations == nil {
daemonSets.Items[i].Spec.Template.ObjectMeta.Annotations = make(map[string]string)
}
}
return util.InterfaceSlice(daemonSets.Items)
}
// GetStatefulSetItems returns the statefulSets in given namespace
func GetStatefulSetItems(clients kube.Clients, namespace string) []interface{} {
statefulSets, err := clients.KubernetesClient.AppsV1().StatefulSets(namespace).List(meta_v1.ListOptions{})
statefulSets, err := clients.KubernetesClient.AppsV1().StatefulSets(namespace).List(context.TODO(), meta_v1.ListOptions{})
if err != nil {
logrus.Errorf("Failed to list statefulSets %v", err)
}
// Ensure we always have pod annotations to add to
for i, v := range statefulSets.Items {
if v.Spec.Template.ObjectMeta.Annotations == nil {
statefulSets.Items[i].Spec.Template.ObjectMeta.Annotations = make(map[string]string)
}
}
return util.InterfaceSlice(statefulSets.Items)
}
// GetDeploymentConfigItems returns the deploymentConfigs in given namespace
func GetDeploymentConfigItems(clients kube.Clients, namespace string) []interface{} {
deploymentConfigs, err := clients.OpenshiftAppsClient.AppsV1().DeploymentConfigs(namespace).List(meta_v1.ListOptions{})
deploymentConfigs, err := clients.OpenshiftAppsClient.AppsV1().DeploymentConfigs(namespace).List(context.TODO(), meta_v1.ListOptions{})
if err != nil {
logrus.Errorf("Failed to list deploymentConfigs %v", err)
}
// Ensure we always have pod annotations to add to
for i, v := range deploymentConfigs.Items {
if v.Spec.Template.ObjectMeta.Annotations == nil {
deploymentConfigs.Items[i].Spec.Template.ObjectMeta.Annotations = make(map[string]string)
}
}
return util.InterfaceSlice(deploymentConfigs.Items)
}
// GetRolloutItems returns the rollouts in given namespace
func GetRolloutItems(clients kube.Clients, namespace string) []interface{} {
rollouts, err := clients.ArgoRolloutClient.ArgoprojV1alpha1().Rollouts(namespace).List(meta_v1.ListOptions{})
rollouts, err := clients.ArgoRolloutClient.ArgoprojV1alpha1().Rollouts(namespace).List(context.TODO(), meta_v1.ListOptions{})
if err != nil {
logrus.Errorf("Failed to list Rollouts %v", err)
}
// Ensure we always have pod annotations to add to
for i, v := range rollouts.Items {
if v.Spec.Template.ObjectMeta.Annotations == nil {
rollouts.Items[i].Spec.Template.ObjectMeta.Annotations = make(map[string]string)
}
}
return util.InterfaceSlice(rollouts.Items)
}
@@ -193,38 +235,38 @@ func GetRolloutInitContainers(item interface{}) []v1.Container {
// UpdateDeployment performs rolling upgrade on deployment
func UpdateDeployment(clients kube.Clients, namespace string, resource interface{}) error {
deployment := resource.(appsv1.Deployment)
_, err := clients.KubernetesClient.AppsV1().Deployments(namespace).Update(&deployment)
_, err := clients.KubernetesClient.AppsV1().Deployments(namespace).Update(context.TODO(), &deployment, meta_v1.UpdateOptions{FieldManager: "Reloader"})
return err
}
// UpdateDaemonSet performs rolling upgrade on daemonSet
func UpdateDaemonSet(clients kube.Clients, namespace string, resource interface{}) error {
daemonSet := resource.(appsv1.DaemonSet)
_, err := clients.KubernetesClient.AppsV1().DaemonSets(namespace).Update(&daemonSet)
_, err := clients.KubernetesClient.AppsV1().DaemonSets(namespace).Update(context.TODO(), &daemonSet, meta_v1.UpdateOptions{FieldManager: "Reloader"})
return err
}
// UpdateStatefulSet performs rolling upgrade on statefulSet
func UpdateStatefulSet(clients kube.Clients, namespace string, resource interface{}) error {
statefulSet := resource.(appsv1.StatefulSet)
_, err := clients.KubernetesClient.AppsV1().StatefulSets(namespace).Update(&statefulSet)
_, err := clients.KubernetesClient.AppsV1().StatefulSets(namespace).Update(context.TODO(), &statefulSet, meta_v1.UpdateOptions{FieldManager: "Reloader"})
return err
}
// UpdateDeploymentConfig performs rolling upgrade on deploymentConfig
func UpdateDeploymentConfig(clients kube.Clients, namespace string, resource interface{}) error {
deploymentConfig := resource.(openshiftv1.DeploymentConfig)
_, err := clients.OpenshiftAppsClient.AppsV1().DeploymentConfigs(namespace).Update(&deploymentConfig)
_, err := clients.OpenshiftAppsClient.AppsV1().DeploymentConfigs(namespace).Update(context.TODO(), &deploymentConfig, meta_v1.UpdateOptions{FieldManager: "Reloader"})
return err
}
// UpdateRollout performs rolling upgrade on rollout
func UpdateRollout(clients kube.Clients, namespace string, resource interface{}) error {
rollout := resource.(argorolloutv1alpha1.Rollout)
rolloutBefore, _ := clients.ArgoRolloutClient.ArgoprojV1alpha1().Rollouts(namespace).Get(rollout.Name, meta_v1.GetOptions{})
rolloutBefore, _ := clients.ArgoRolloutClient.ArgoprojV1alpha1().Rollouts(namespace).Get(context.TODO(), rollout.Name, meta_v1.GetOptions{})
logrus.Warnf("Before: %+v", rolloutBefore.Spec.Template.Spec.Containers[0].Env)
logrus.Warnf("After: %+v", rollout.Spec.Template.Spec.Containers[0].Env)
_, err := clients.ArgoRolloutClient.ArgoprojV1alpha1().Rollouts(namespace).Update(&rollout)
_, err := clients.ArgoRolloutClient.ArgoprojV1alpha1().Rollouts(namespace).Update(context.TODO(), &rollout, meta_v1.UpdateOptions{FieldManager: "Reloader"})
return err
}

View File

@@ -3,7 +3,9 @@ package cmd
import (
"errors"
"fmt"
"github.com/stakater/Reloader/internal/pkg/constants"
"os"
"strings"
"github.com/sirupsen/logrus"
"github.com/spf13/cobra"
@@ -18,9 +20,10 @@ import (
// NewReloaderCommand starts the reloader controller
func NewReloaderCommand() *cobra.Command {
cmd := &cobra.Command{
Use: "reloader",
Short: "A watcher for your Kubernetes cluster",
Run: startReloader,
Use: "reloader",
Short: "A watcher for your Kubernetes cluster",
PreRunE: validateFlags,
Run: startReloader,
}
// options
@@ -33,9 +36,25 @@ func NewReloaderCommand() *cobra.Command {
cmd.PersistentFlags().StringSlice("resources-to-ignore", []string{}, "list of resources to ignore (valid options 'configMaps' or 'secrets')")
cmd.PersistentFlags().StringSlice("namespaces-to-ignore", []string{}, "list of namespaces to ignore")
cmd.PersistentFlags().StringVar(&options.IsArgoRollouts, "is-Argo-Rollouts", "false", "Add support for argo rollouts")
cmd.PersistentFlags().StringVar(&options.ReloadStrategy, constants.ReloadStrategyFlag, constants.EnvVarsReloadStrategy, "Specifies the desired reload strategy")
cmd.PersistentFlags().StringVar(&options.ReloadOnCreate, "reload-on-create", "false", "Add support to watch create events")
return cmd
}
func validateFlags(*cobra.Command, []string) error {
// Ensure the reload strategy is one of the following...
valid := []string{constants.EnvVarsReloadStrategy, constants.AnnotationsReloadStrategy}
for _, s := range valid {
if s == options.ReloadStrategy {
return nil
}
}
err := fmt.Sprintf("%s must be one of: %s", constants.ReloadStrategyFlag, strings.Join(valid, ", "))
return errors.New(err)
}
func configureLogging(logFormat string) error {
switch logFormat {
case "json":

View File

@@ -7,4 +7,16 @@ const (
SecretEnvVarPostfix = "SECRET"
// EnvVarPrefix is a Prefix for environment variable
EnvVarPrefix = "STAKATER_"
// ReloaderAnnotationPrefix is a Prefix for all reloader annotations
ReloaderAnnotationPrefix = "reloader.stakater.com"
// LastReloadedFromAnnotation is an annotation used to describe the last resource that triggered a reload
LastReloadedFromAnnotation = "last-reloaded-from"
// ReloadStrategyFlag The reload strategy flag name
ReloadStrategyFlag = "reload-strategy"
// EnvVarsReloadStrategy instructs Reloader to add container environment variables to facilitate a restart
EnvVarsReloadStrategy = "env-vars"
// AnnotationsReloadStrategy instructs Reloader to add pod template annotations to facilitate a restart
AnnotationsReloadStrategy = "annotations"
)

View File

@@ -2,6 +2,7 @@ package controller
import (
"fmt"
"github.com/stakater/Reloader/internal/pkg/options"
"time"
"github.com/sirupsen/logrus"
@@ -30,6 +31,9 @@ type Controller struct {
collectors metrics.Collectors
}
// controllerInitialized flag determines whether controlled is being initialized
var controllerInitialized bool = false
// NewController for initializing a Controller
func NewController(
client kubernetes.Interface, resource string, namespace string, ignoredNamespaces []string, collectors metrics.Collectors) (*Controller, error) {
@@ -57,8 +61,14 @@ func NewController(
// Add function to add a new object to the queue in case of creating a resource
func (c *Controller) Add(obj interface{}) {
// Not required as reloader should update the resource in the event of any change and not in the event of any resource creation.
// This causes the issue where reloader reloads the pods when reloader itself gets restarted as it's queue is filled with all the k8s objects as new resources.
if options.ReloadOnCreate == "true" {
if !c.resourceInIgnoredNamespace(obj) && controllerInitialized {
c.queue.Add(handler.ResourceCreatedHandler{
Resource: obj,
Collectors: c.collectors,
})
}
}
}
func (c *Controller) resourceInIgnoredNamespace(raw interface{}) bool {
@@ -111,6 +121,9 @@ func (c *Controller) Run(threadiness int, stopCh chan struct{}) {
}
func (c *Controller) runWorker() {
// At this point the controller is fully initialized and we can start processing the resources
controllerInitialized = true
for c.processNextItem() {
}
}
@@ -145,7 +158,7 @@ func (c *Controller) handleErr(err error, key interface{}) {
// This controller retries 5 times if something goes wrong. After that, it stops trying.
if c.queue.NumRequeues(key) < 5 {
logrus.Errorf("Error syncing events %v: %v", key, err)
logrus.Errorf("Error syncing events: %v", err)
// Re-enqueue the key rate limited. Based on the rate limiter on the
// queue and the re-enqueue history, the key will be processed later again.

File diff suppressed because it is too large Load Diff

View File

@@ -20,7 +20,7 @@ func (r ResourceCreatedHandler) Handle() error {
} else {
config, _ := r.GetConfig()
// process resource based on its type
doRollingUpgrade(config, r.Collectors)
return doRollingUpgrade(config, r.Collectors)
}
return nil
}

View File

@@ -22,7 +22,7 @@ func (r ResourceUpdatedHandler) Handle() error {
config, oldSHAData := r.GetConfig()
if config.SHAValue != oldSHAData {
// process resource based on its type
doRollingUpgrade(config, r.Collectors)
return doRollingUpgrade(config, r.Collectors)
}
}
return nil

View File

@@ -1,9 +1,9 @@
package handler
import (
"strconv"
"strings"
"encoding/json"
"errors"
"fmt"
"github.com/prometheus/client_golang/prometheus"
"github.com/sirupsen/logrus"
"github.com/stakater/Reloader/internal/pkg/callbacks"
@@ -13,6 +13,8 @@ import (
"github.com/stakater/Reloader/internal/pkg/util"
"github.com/stakater/Reloader/pkg/kube"
v1 "k8s.io/api/core/v1"
"strconv"
"strings"
)
// GetDeploymentRollingUpgradeFuncs returns all callback funcs for a deployment
@@ -85,34 +87,52 @@ func GetArgoRolloutRollingUpgradeFuncs() callbacks.RollingUpgradeFuncs {
}
}
func doRollingUpgrade(config util.Config, collectors metrics.Collectors) {
func doRollingUpgrade(config util.Config, collectors metrics.Collectors) error {
clients := kube.GetClients()
rollingUpgrade(clients, config, GetDeploymentRollingUpgradeFuncs(), collectors)
rollingUpgrade(clients, config, GetDaemonSetRollingUpgradeFuncs(), collectors)
rollingUpgrade(clients, config, GetStatefulSetRollingUpgradeFuncs(), collectors)
err := rollingUpgrade(clients, config, GetDeploymentRollingUpgradeFuncs(), collectors)
if err != nil {
return err
}
err = rollingUpgrade(clients, config, GetDaemonSetRollingUpgradeFuncs(), collectors)
if err != nil {
return err
}
err = rollingUpgrade(clients, config, GetStatefulSetRollingUpgradeFuncs(), collectors)
if err != nil {
return err
}
if kube.IsOpenshift {
rollingUpgrade(clients, config, GetDeploymentConfigRollingUpgradeFuncs(), collectors)
err = rollingUpgrade(clients, config, GetDeploymentConfigRollingUpgradeFuncs(), collectors)
if err != nil {
return err
}
}
if options.IsArgoRollouts == "true" {
rollingUpgrade(clients, config, GetArgoRolloutRollingUpgradeFuncs(), collectors)
err = rollingUpgrade(clients, config, GetArgoRolloutRollingUpgradeFuncs(), collectors)
if err != nil {
return err
}
}
return nil
}
func rollingUpgrade(clients kube.Clients, config util.Config, upgradeFuncs callbacks.RollingUpgradeFuncs, collectors metrics.Collectors) {
func rollingUpgrade(clients kube.Clients, config util.Config, upgradeFuncs callbacks.RollingUpgradeFuncs, collectors metrics.Collectors) error {
err := PerformRollingUpgrade(clients, config, upgradeFuncs, collectors)
if err != nil {
logrus.Errorf("Rolling upgrade for '%s' failed with error = %v", config.ResourceName, err)
}
return err
}
// PerformRollingUpgrade upgrades the deployment if there is any change in configmap or secret data
func PerformRollingUpgrade(clients kube.Clients, config util.Config, upgradeFuncs callbacks.RollingUpgradeFuncs, collectors metrics.Collectors) error {
items := upgradeFuncs.ItemsFunc(clients, config.Namespace)
var err error
for _, i := range items {
// find correct annotation and update the resource
annotations := upgradeFuncs.AnnotationsFunc(i)
@@ -128,7 +148,7 @@ func PerformRollingUpgrade(clients kube.Clients, config util.Config, upgradeFunc
result := constants.NotUpdated
reloaderEnabled, err := strconv.ParseBool(reloaderEnabledValue)
if err == nil && reloaderEnabled {
result = updateContainers(upgradeFuncs, i, config, true)
result = invokeReloadStrategy(upgradeFuncs, i, config, true)
}
if result != constants.Updated && annotationValue != "" {
@@ -136,7 +156,7 @@ func PerformRollingUpgrade(clients kube.Clients, config util.Config, upgradeFunc
for _, value := range values {
value = strings.Trim(value, " ")
if value == config.ResourceName {
result = updateContainers(upgradeFuncs, i, config, false)
result = invokeReloadStrategy(upgradeFuncs, i, config, false)
if result == constants.Updated {
break
}
@@ -147,7 +167,7 @@ func PerformRollingUpgrade(clients kube.Clients, config util.Config, upgradeFunc
if result != constants.Updated && searchAnnotationValue == "true" {
matchAnnotationValue := config.ResourceAnnotations[options.SearchMatchAnnotation]
if matchAnnotationValue == "true" {
result = updateContainers(upgradeFuncs, i, config, true)
result = invokeReloadStrategy(upgradeFuncs, i, config, true)
}
}
@@ -157,6 +177,7 @@ func PerformRollingUpgrade(clients kube.Clients, config util.Config, upgradeFunc
if err != nil {
logrus.Errorf("Update for '%s' of type '%s' in namespace '%s' failed with error %v", resourceName, upgradeFuncs.ResourceType, config.Namespace, err)
collectors.Reloaded.With(prometheus.Labels{"success": "false"}).Inc()
return err
} else {
logrus.Infof("Changes detected in '%s' of type '%s' in namespace '%s'", config.ResourceName, config.Type, config.Namespace)
logrus.Infof("Updated '%s' of type '%s' in namespace '%s'", resourceName, upgradeFuncs.ResourceType, config.Namespace)
@@ -164,7 +185,7 @@ func PerformRollingUpgrade(clients kube.Clients, config util.Config, upgradeFunc
}
}
}
return err
return nil
}
func getVolumeMountName(volumes []v1.Volume, mountType string, volumeName string) string {
@@ -238,7 +259,7 @@ func getContainerWithEnvReference(containers []v1.Container, resourceName string
return nil
}
func getContainerToUpdate(upgradeFuncs callbacks.RollingUpgradeFuncs, item interface{}, config util.Config, autoReload bool) *v1.Container {
func getContainerUsingResource(upgradeFuncs callbacks.RollingUpgradeFuncs, item interface{}, config util.Config, autoReload bool) *v1.Container {
volumes := upgradeFuncs.VolumesFunc(item)
containers := upgradeFuncs.ContainersFunc(item)
initContainers := upgradeFuncs.InitContainersFunc(item)
@@ -277,10 +298,70 @@ func getContainerToUpdate(upgradeFuncs callbacks.RollingUpgradeFuncs, item inter
return container
}
func updateContainers(upgradeFuncs callbacks.RollingUpgradeFuncs, item interface{}, config util.Config, autoReload bool) constants.Result {
func invokeReloadStrategy(upgradeFuncs callbacks.RollingUpgradeFuncs, item interface{}, config util.Config, autoReload bool) constants.Result {
if options.ReloadStrategy == constants.AnnotationsReloadStrategy {
return updatePodAnnotations(upgradeFuncs, item, config, autoReload)
}
return updateContainerEnvVars(upgradeFuncs, item, config, autoReload)
}
func updatePodAnnotations(upgradeFuncs callbacks.RollingUpgradeFuncs, item interface{}, config util.Config, autoReload bool) constants.Result {
container := getContainerUsingResource(upgradeFuncs, item, config, autoReload)
if container == nil {
return constants.NoContainerFound
}
// Generate reloaded annotations. Attaching this to the item's annotation will trigger a rollout
// Note: the data on this struct is purely informational and is not used for future updates
reloadSource := util.NewReloadSourceFromConfig(config, []string{container.Name})
annotations, err := createReloadedAnnotations(&reloadSource)
if err != nil {
logrus.Errorf("Failed to create reloaded annotations for %s! error = %v", config.ResourceName, err)
return constants.NotUpdated
}
// Copy the all annotations to the item's annotations
pa := upgradeFuncs.PodAnnotationsFunc(item)
if pa == nil {
return constants.NotUpdated
}
for k, v := range annotations {
pa[k] = v
}
return constants.Updated
}
func createReloadedAnnotations(target *util.ReloadSource) (map[string]string, error) {
if target == nil {
return nil, errors.New("target is required")
}
// Create a single "last-invokeReloadStrategy-from" annotation that stores metadata about the
// resource that caused the last invokeReloadStrategy.
// Intentionally only storing the last item in order to keep
// the generated annotations as small as possible.
annotations := make(map[string]string)
lastReloadedResourceName := fmt.Sprintf("%s/%s",
constants.ReloaderAnnotationPrefix,
constants.LastReloadedFromAnnotation,
)
lastReloadedResource, err := json.Marshal(target)
if err != nil {
return nil, err
}
annotations[lastReloadedResourceName] = string(lastReloadedResource)
return annotations, nil
}
func updateContainerEnvVars(upgradeFuncs callbacks.RollingUpgradeFuncs, item interface{}, config util.Config, autoReload bool) constants.Result {
var result constants.Result
envVar := constants.EnvVarPrefix + util.ConvertToEnvVarName(config.ResourceName) + "_" + config.Type
container := getContainerToUpdate(upgradeFuncs, item, config, autoReload)
container := getContainerUsingResource(upgradeFuncs, item, config, autoReload)
if container == nil {
return constants.NoContainerFound

File diff suppressed because it is too large Load Diff

View File

@@ -1,5 +1,7 @@
package options
import "github.com/stakater/Reloader/internal/pkg/constants"
var (
// ConfigmapUpdateOnChangeAnnotation is an annotation to detect changes in
// configmaps specified by name
@@ -17,6 +19,10 @@ var (
SearchMatchAnnotation = "reloader.stakater.com/match"
// LogFormat is the log format to use (json, or empty string for default)
LogFormat = ""
// Adds support for argo rollouts
// IsArgoRollouts Adds support for argo rollouts
IsArgoRollouts = "false"
// ReloadStrategy Specify the update strategy
ReloadStrategy = constants.EnvVarsReloadStrategy
// ReloadOnCreate Adds support to watch create events
ReloadOnCreate = "false"
)

View File

@@ -1,6 +1,9 @@
package testutil
import (
"context"
"encoding/json"
"fmt"
"math/rand"
"sort"
"strconv"
@@ -33,7 +36,7 @@ var (
// CreateNamespace creates namespace for testing
func CreateNamespace(namespace string, client kubernetes.Interface) {
_, err := client.CoreV1().Namespaces().Create(&v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}})
_, err := client.CoreV1().Namespaces().Create(context.TODO(), &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}}, metav1.CreateOptions{})
if err != nil {
logrus.Fatalf("Failed to create namespace for testing %v", err)
} else {
@@ -43,7 +46,7 @@ func CreateNamespace(namespace string, client kubernetes.Interface) {
// DeleteNamespace deletes namespace for testing
func DeleteNamespace(namespace string, client kubernetes.Interface) {
err := client.CoreV1().Namespaces().Delete(namespace, &metav1.DeleteOptions{})
err := client.CoreV1().Namespaces().Delete(context.TODO(), namespace, metav1.DeleteOptions{})
if err != nil {
logrus.Fatalf("Failed to delete namespace that was created for testing %v", err)
} else {
@@ -562,8 +565,8 @@ func GetSecretWithUpdatedLabel(namespace string, secretName string, label string
}
}
// GetResourceSHA returns the SHA value of given environment variable
func GetResourceSHA(containers []v1.Container, envVar string) string {
// GetResourceSHAFromEnvVar returns the SHA value of given environment variable
func GetResourceSHAFromEnvVar(containers []v1.Container, envVar string) string {
for i := range containers {
envs := containers[i].Env
for j := range envs {
@@ -575,6 +578,28 @@ func GetResourceSHA(containers []v1.Container, envVar string) string {
return ""
}
// GetResourceSHAFromAnnotation returns the SHA value of given environment variable
func GetResourceSHAFromAnnotation(podAnnotations map[string]string) string {
lastReloadedResourceName := fmt.Sprintf("%s/%s",
constants.ReloaderAnnotationPrefix,
constants.LastReloadedFromAnnotation,
)
annotationJson, ok := podAnnotations[lastReloadedResourceName]
if !ok {
return ""
}
var last util.ReloadSource
bytes := []byte(annotationJson)
err := json.Unmarshal(bytes, &last)
if err != nil {
return ""
}
return last.Hash
}
//ConvertResourceToSHA generates SHA from secret or configmap data
func ConvertResourceToSHA(resourceType string, namespace string, resourceName string, data string) string {
values := []string{}
@@ -597,7 +622,7 @@ func ConvertResourceToSHA(resourceType string, namespace string, resourceName st
func CreateConfigMap(client kubernetes.Interface, namespace string, configmapName string, data string) (core_v1.ConfigMapInterface, error) {
logrus.Infof("Creating configmap")
configmapClient := client.CoreV1().ConfigMaps(namespace)
_, err := configmapClient.Create(GetConfigmap(namespace, configmapName, data))
_, err := configmapClient.Create(context.TODO(), GetConfigmap(namespace, configmapName, data), metav1.CreateOptions{})
time.Sleep(3 * time.Second)
return configmapClient, err
}
@@ -606,7 +631,7 @@ func CreateConfigMap(client kubernetes.Interface, namespace string, configmapNam
func CreateSecret(client kubernetes.Interface, namespace string, secretName string, data string) (core_v1.SecretInterface, error) {
logrus.Infof("Creating secret")
secretClient := client.CoreV1().Secrets(namespace)
_, err := secretClient.Create(GetSecret(namespace, secretName, data))
_, err := secretClient.Create(context.TODO(), GetSecret(namespace, secretName, data), metav1.CreateOptions{})
time.Sleep(3 * time.Second)
return secretClient, err
}
@@ -621,7 +646,7 @@ func CreateDeployment(client kubernetes.Interface, deploymentName string, namesp
} else {
deploymentObj = GetDeploymentWithEnvVars(namespace, deploymentName)
}
deployment, err := deploymentClient.Create(deploymentObj)
deployment, err := deploymentClient.Create(context.TODO(), deploymentObj, metav1.CreateOptions{})
time.Sleep(3 * time.Second)
return deployment, err
}
@@ -636,7 +661,7 @@ func CreateDeploymentConfig(client appsclient.Interface, deploymentName string,
} else {
deploymentConfigObj = GetDeploymentConfigWithEnvVars(namespace, deploymentName)
}
deploymentConfig, err := deploymentConfigsClient.Create(deploymentConfigObj)
deploymentConfig, err := deploymentConfigsClient.Create(context.TODO(), deploymentConfigObj, metav1.CreateOptions{})
time.Sleep(5 * time.Second)
return deploymentConfig, err
}
@@ -651,7 +676,7 @@ func CreateDeploymentWithInitContainer(client kubernetes.Interface, deploymentNa
} else {
deploymentObj = GetDeploymentWithInitContainerAndEnv(namespace, deploymentName)
}
deployment, err := deploymentClient.Create(deploymentObj)
deployment, err := deploymentClient.Create(context.TODO(), deploymentObj, metav1.CreateOptions{})
time.Sleep(3 * time.Second)
return deployment, err
}
@@ -661,7 +686,7 @@ func CreateDeploymentWithEnvVarSource(client kubernetes.Interface, deploymentNam
logrus.Infof("Creating Deployment")
deploymentClient := client.AppsV1().Deployments(namespace)
deploymentObj := GetDeploymentWithEnvVarSources(namespace, deploymentName)
deployment, err := deploymentClient.Create(deploymentObj)
deployment, err := deploymentClient.Create(context.TODO(), deploymentObj, metav1.CreateOptions{})
time.Sleep(3 * time.Second)
return deployment, err
@@ -672,7 +697,7 @@ func CreateDeploymentWithPodAnnotations(client kubernetes.Interface, deploymentN
logrus.Infof("Creating Deployment")
deploymentClient := client.AppsV1().Deployments(namespace)
deploymentObj := GetDeploymentWithPodAnnotations(namespace, deploymentName, both)
deployment, err := deploymentClient.Create(deploymentObj)
deployment, err := deploymentClient.Create(context.TODO(), deploymentObj, metav1.CreateOptions{})
time.Sleep(3 * time.Second)
return deployment, err
}
@@ -684,7 +709,7 @@ func CreateDeploymentWithEnvVarSourceAndAnnotations(client kubernetes.Interface,
deploymentClient := client.AppsV1().Deployments(namespace)
deploymentObj := GetDeploymentWithEnvVarSources(namespace, deploymentName)
deploymentObj.Annotations = annotations
deployment, err := deploymentClient.Create(deploymentObj)
deployment, err := deploymentClient.Create(context.TODO(), deploymentObj, metav1.CreateOptions{})
time.Sleep(3 * time.Second)
return deployment, err
}
@@ -699,7 +724,7 @@ func CreateDaemonSet(client kubernetes.Interface, daemonsetName string, namespac
} else {
daemonsetObj = GetDaemonSetWithEnvVars(namespace, daemonsetName)
}
daemonset, err := daemonsetClient.Create(daemonsetObj)
daemonset, err := daemonsetClient.Create(context.TODO(), daemonsetObj, metav1.CreateOptions{})
time.Sleep(3 * time.Second)
return daemonset, err
}
@@ -714,7 +739,7 @@ func CreateStatefulSet(client kubernetes.Interface, statefulsetName string, name
} else {
statefulsetObj = GetStatefulSetWithEnvVar(namespace, statefulsetName)
}
statefulset, err := statefulsetClient.Create(statefulsetObj)
statefulset, err := statefulsetClient.Create(context.TODO(), statefulsetObj, metav1.CreateOptions{})
time.Sleep(3 * time.Second)
return statefulset, err
}
@@ -722,7 +747,7 @@ func CreateStatefulSet(client kubernetes.Interface, statefulsetName string, name
// DeleteDeployment creates a deployment in given namespace and returns the error if any
func DeleteDeployment(client kubernetes.Interface, namespace string, deploymentName string) error {
logrus.Infof("Deleting Deployment")
deploymentError := client.AppsV1().Deployments(namespace).Delete(deploymentName, &metav1.DeleteOptions{})
deploymentError := client.AppsV1().Deployments(namespace).Delete(context.TODO(), deploymentName, metav1.DeleteOptions{})
time.Sleep(3 * time.Second)
return deploymentError
}
@@ -730,7 +755,7 @@ func DeleteDeployment(client kubernetes.Interface, namespace string, deploymentN
// DeleteDeploymentConfig deletes a deploymentConfig in given namespace and returns the error if any
func DeleteDeploymentConfig(client appsclient.Interface, namespace string, deploymentConfigName string) error {
logrus.Infof("Deleting DeploymentConfig")
deploymentConfigError := client.AppsV1().DeploymentConfigs(namespace).Delete(deploymentConfigName, &metav1.DeleteOptions{})
deploymentConfigError := client.AppsV1().DeploymentConfigs(namespace).Delete(context.TODO(), deploymentConfigName, metav1.DeleteOptions{})
time.Sleep(3 * time.Second)
return deploymentConfigError
}
@@ -738,7 +763,7 @@ func DeleteDeploymentConfig(client appsclient.Interface, namespace string, deplo
// DeleteDaemonSet creates a daemonset in given namespace and returns the error if any
func DeleteDaemonSet(client kubernetes.Interface, namespace string, daemonsetName string) error {
logrus.Infof("Deleting DaemonSet %s", daemonsetName)
daemonsetError := client.AppsV1().DaemonSets(namespace).Delete(daemonsetName, &metav1.DeleteOptions{})
daemonsetError := client.AppsV1().DaemonSets(namespace).Delete(context.TODO(), daemonsetName, metav1.DeleteOptions{})
time.Sleep(3 * time.Second)
return daemonsetError
}
@@ -746,7 +771,7 @@ func DeleteDaemonSet(client kubernetes.Interface, namespace string, daemonsetNam
// DeleteStatefulSet creates a statefulset in given namespace and returns the error if any
func DeleteStatefulSet(client kubernetes.Interface, namespace string, statefulsetName string) error {
logrus.Infof("Deleting StatefulSet %s", statefulsetName)
statefulsetError := client.AppsV1().StatefulSets(namespace).Delete(statefulsetName, &metav1.DeleteOptions{})
statefulsetError := client.AppsV1().StatefulSets(namespace).Delete(context.TODO(), statefulsetName, metav1.DeleteOptions{})
time.Sleep(3 * time.Second)
return statefulsetError
}
@@ -760,7 +785,7 @@ func UpdateConfigMap(configmapClient core_v1.ConfigMapInterface, namespace strin
} else {
configmap = GetConfigmap(namespace, configmapName, data)
}
_, updateErr := configmapClient.Update(configmap)
_, updateErr := configmapClient.Update(context.TODO(), configmap, metav1.UpdateOptions{})
time.Sleep(3 * time.Second)
return updateErr
}
@@ -774,7 +799,7 @@ func UpdateSecret(secretClient core_v1.SecretInterface, namespace string, secret
} else {
secret = GetSecret(namespace, secretName, data)
}
_, updateErr := secretClient.Update(secret)
_, updateErr := secretClient.Update(context.TODO(), secret, metav1.UpdateOptions{})
time.Sleep(3 * time.Second)
return updateErr
}
@@ -782,7 +807,7 @@ func UpdateSecret(secretClient core_v1.SecretInterface, namespace string, secret
// DeleteConfigMap deletes a configmap in given namespace and returns the error if any
func DeleteConfigMap(client kubernetes.Interface, namespace string, configmapName string) error {
logrus.Infof("Deleting configmap %q.\n", configmapName)
err := client.CoreV1().ConfigMaps(namespace).Delete(configmapName, &metav1.DeleteOptions{})
err := client.CoreV1().ConfigMaps(namespace).Delete(context.TODO(), configmapName, metav1.DeleteOptions{})
time.Sleep(3 * time.Second)
return err
}
@@ -790,7 +815,7 @@ func DeleteConfigMap(client kubernetes.Interface, namespace string, configmapNam
// DeleteSecret deletes a secret in given namespace and returns the error if any
func DeleteSecret(client kubernetes.Interface, namespace string, secretName string) error {
logrus.Infof("Deleting secret %q.\n", secretName)
err := client.CoreV1().Secrets(namespace).Delete(secretName, &metav1.DeleteOptions{})
err := client.CoreV1().Secrets(namespace).Delete(context.TODO(), secretName, metav1.DeleteOptions{})
time.Sleep(3 * time.Second)
return err
}
@@ -805,8 +830,8 @@ func RandSeq(n int) string {
return string(b)
}
// VerifyResourceUpdate verifies whether the rolling upgrade happened or not
func VerifyResourceUpdate(clients kube.Clients, config util.Config, envVarPostfix string, upgradeFuncs callbacks.RollingUpgradeFuncs) bool {
// VerifyResourceEnvVarUpdate verifies whether the rolling upgrade happened or not
func VerifyResourceEnvVarUpdate(clients kube.Clients, config util.Config, envVarPostfix string, upgradeFuncs callbacks.RollingUpgradeFuncs) bool {
items := upgradeFuncs.ItemsFunc(clients, config.Namespace)
for _, i := range items {
containers := upgradeFuncs.ContainersFunc(i)
@@ -835,7 +860,45 @@ func VerifyResourceUpdate(clients kube.Clients, config util.Config, envVarPostfi
if matches {
envName := constants.EnvVarPrefix + util.ConvertToEnvVarName(config.ResourceName) + "_" + envVarPostfix
updated := GetResourceSHA(containers, envName)
updated := GetResourceSHAFromEnvVar(containers, envName)
if updated == config.SHAValue {
return true
}
}
}
return false
}
// VerifyResourceAnnotationUpdate verifies whether the rolling upgrade happened or not
func VerifyResourceAnnotationUpdate(clients kube.Clients, config util.Config, upgradeFuncs callbacks.RollingUpgradeFuncs) bool {
items := upgradeFuncs.ItemsFunc(clients, config.Namespace)
for _, i := range items {
podAnnotations := upgradeFuncs.PodAnnotationsFunc(i)
// match statefulsets with the correct annotation
annotationValue := util.ToObjectMeta(i).Annotations[config.Annotation]
searchAnnotationValue := util.ToObjectMeta(i).Annotations[options.AutoSearchAnnotation]
reloaderEnabledValue := util.ToObjectMeta(i).Annotations[options.ReloaderAutoAnnotation]
reloaderEnabled, err := strconv.ParseBool(reloaderEnabledValue)
matches := false
if err == nil && reloaderEnabled {
matches = true
} else if annotationValue != "" {
values := strings.Split(annotationValue, ",")
for _, value := range values {
value = strings.Trim(value, " ")
if value == config.ResourceName {
matches = true
break
}
}
} else if searchAnnotationValue == "true" {
if config.ResourceAnnotations[options.SearchMatchAnnotation] == "true" {
matches = true
}
}
if matches {
updated := GetResourceSHAFromAnnotation(podAnnotations)
if updated == config.SHAValue {
return true
}

View File

@@ -0,0 +1,39 @@
package util
import "time"
type ReloadSource struct {
Type string `json:"type"`
Name string `json:"name"`
Namespace string `json:"namespace"`
Hash string `json:"hash"`
ContainerRefs []string `json:"containerRefs"`
ObservedAt int64 `json:"observedAt"`
}
func NewReloadSource(
resourceName string,
resourceNamespace string,
resourceType string,
resourceHash string,
containerRefs []string,
) ReloadSource {
return ReloadSource{
ObservedAt: time.Now().Unix(),
Name: resourceName,
Namespace: resourceNamespace,
Type: resourceType,
Hash: resourceHash,
ContainerRefs: containerRefs,
}
}
func NewReloadSourceFromConfig(config Config, containerRefs []string) ReloadSource {
return NewReloadSource(
config.ResourceName,
config.Namespace,
config.Type,
config.SHAValue,
containerRefs,
)
}

View File

@@ -1,6 +1,7 @@
package kube
import (
"context"
"os"
"k8s.io/client-go/tools/clientcmd"
@@ -67,7 +68,7 @@ func isOpenshift() bool {
if err != nil {
logrus.Fatalf("Unable to create Kubernetes client error = %v", err)
}
_, err = client.RESTClient().Get().AbsPath("/apis/project.openshift.io").Do().Raw()
_, err = client.RESTClient().Get().AbsPath("/apis/project.openshift.io").Do(context.TODO()).Raw()
if err == nil {
logrus.Info("Environment: Openshift")
return true

View File

@@ -1,4 +0,0 @@
issues:
kind: 1
url: https://aurorasolutions.atlassian.net
project: STK