Compare commits

...

76 Commits

Author SHA1 Message Date
Muhammad Safwan Karim
5907d61031 Merge pull request #1062 from stakater/update-version-ekaxhap
Bump version to 1.4.11 on release-v1.4.11 branch
2025-12-10 16:59:21 +05:00
msafwankarim
85dce31b40 Bump version to 1.4.11
Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
2025-12-10 11:58:05 +00:00
Muhammad Safwan Karim
b71fb19882 Merge pull request #1061 from MatthiasWerning/bugfix/controller-init
Fix configmaps key in ResourceMap leading to controllers not being able to mark themselves as initialized
2025-12-10 15:35:31 +05:00
Matthias Werning
27fb47ff52 Fix configmaps key in ResourceMap leading to controllers not being able to mark themselves as initialized 2025-12-10 09:10:52 +01:00
Muhammad Safwan Karim
d409b79a11 Merge pull request #1049 from Wamgleb/fix-pause-annotation-bug
fix:Rename pause-deployment-annotation to pause-deployment-time-annot…
2025-11-14 21:50:22 +05:00
Muhammad Safwan Karim
c3546066fa Merge pull request #1052 from artemptushkin/patch-1
Fix issue in yaml path
2025-11-14 21:48:00 +05:00
Muhammad Safwan Karim
7080ec27cc Merge pull request #1032 from arizon-dread/fix-openshift-runAsUser-documentation
fix the value reference to runAsUser for OpenShift
2025-11-13 18:13:54 +05:00
Artem Ptushkin
6f1ecffb25 Fix issue in yaml path
It requires `deployment` actually, see https://artifacthub.io/packages/helm/stakater/reloader?modal=values
2025-11-12 16:04:24 +01:00
Hlib Kalinchuk
765053f21e fix:Rename pause-deployment-annotation to pause-deployment-time-annotation in deployment.yaml 2025-11-07 14:36:59 +02:00
Muhammad Safwan Karim
fd9b7e2c1f Merge pull request #1046 from stakater/update-chart-2.2.5
Bump chart
2025-11-05 17:19:58 +05:00
Muhammad Safwan Karim
bfb720e9e9 Bump chart 2025-11-05 16:13:37 +05:00
Muhammad Safwan Karim
9607da6d8a Merge pull request #1042 from stakater/upgrade-to-go-1.25
Upgradde to go 1.25.3 (latest)
2025-11-05 14:54:46 +05:00
Safwan
32046ebfe0 Fixed spelling 2025-11-05 14:04:49 +05:00
Safwan
5d4b9f5a32 Resolved comments 2025-11-05 14:02:06 +05:00
Felix Tonnvik
620959a03b updated qa link-check to retry on status 429 2025-11-05 09:54:04 +01:00
Safwan
008c45e9ac fixed markdown lint indentation 2025-11-05 13:21:45 +05:00
Safwan
fa201d9762 Clarification about workflow in the readme 2025-11-05 13:09:04 +05:00
Safwan
174b57cdad Fixed linting 2025-11-04 17:39:24 +05:00
Safwan
4476fad274 Update golangci-lint version in workflow 2025-11-04 17:04:40 +05:00
Safwan
16b26be5c2 Upgradde to go 1.25.3 (latest) 2025-11-04 17:01:38 +05:00
Muhammad Safwan Karim
7c429714ae Merge pull request #1023 from stakater/renovate/github.com-argoproj-argo-rollouts-1.x
fix(deps): update module github.com/argoproj/argo-rollouts to v1.8.3
2025-11-04 14:49:38 +05:00
Muhammad Safwan Karim
64c3d8487b Merge pull request #1025 from stakater/renovate/python-3.x
chore(deps): update python docker tag to v3.14
2025-11-04 14:49:04 +05:00
Muhammad Safwan Karim
405069e691 Merge pull request #1039 from stakater/helm/release-2.2.4
Bump chart
2025-11-04 14:47:22 +05:00
Muhammad Safwan Karim
4694b7570e Bump chart 2025-11-04 14:40:54 +05:00
Muhammad Safwan Karim
3a9ca713bb Merge pull request #1037 from stakater/renovate/ghcr.io-stakater-reloader-1.x
chore(deps): update ghcr.io/stakater/reloader docker tag to v1.4.9
2025-11-04 14:37:12 +05:00
renovate[bot]
3de9c688f2 chore(deps): update ghcr.io/stakater/reloader docker tag to v1.4.9 2025-11-04 09:31:37 +00:00
Muhammad Safwan Karim
90b9713b7f Merge pull request #1027 from stakater/renovate/github.com-spf13-cobra-1.x
fix(deps): update module github.com/spf13/cobra to v1.10.1
2025-11-04 14:30:46 +05:00
Muhammad Safwan Karim
9139f838cf Merge pull request #1029 from stakater/renovate/actions-checkout-5.x
chore(deps): update actions/checkout action to v5
2025-11-04 14:28:30 +05:00
Muhammad Safwan Karim
59738b2d6d Merge pull request #1031 from stakater/renovate/sigstore-cosign-installer-4.x
chore(deps): update sigstore/cosign-installer action to v4
2025-11-04 14:26:34 +05:00
Muhammad Safwan Karim
91bdb47dad Merge pull request #1030 from stakater/renovate/actions-setup-go-6.x
chore(deps): update actions/setup-go action to v6
2025-11-04 14:26:07 +05:00
Felix Tonnvik
2835e5952f Merge pull request #1035 from stakater/release-v1.4.9
Release v1.4.9
2025-11-04 09:43:21 +01:00
Felix Tonnvik
cadf4489e8 Merge branch 'master' into release-v1.4.9 2025-11-03 18:05:44 +01:00
Felix Tonnvik
32f83fabc9 Merge pull request #1034 from stakater/update-version-g213t1o
Bump version to 1.4.9 on release-v1.4.9 branch
2025-11-03 18:00:49 +01:00
Felix-Stakater
09f2a63b00 Bump version to 1.4.9
Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
2025-11-03 16:57:23 +00:00
Felix Tonnvik
c860dcc402 Merge pull request #1033 from SebastienSyd/master
fix: bump go version to 1.24.9 to fix CVEs
2025-11-03 17:49:45 +01:00
Sebastien NICOT
5f2cf19213 fix: bump go version to fix CVEs 2025-11-03 17:08:16 +01:00
Erik Svensson
8980e1fd80 fix the value reference so it matches the chart value in the documentation 2025-11-03 08:57:13 +01:00
renovate[bot]
644e5d51d3 chore(deps): update sigstore/cosign-installer action to v4 2025-10-31 11:42:26 +00:00
renovate[bot]
65dc259b7b chore(deps): update actions/setup-go action to v6 2025-10-31 11:42:19 +00:00
renovate[bot]
3cf845b596 chore(deps): update actions/checkout action to v5 2025-10-31 11:42:13 +00:00
renovate[bot]
9af46a363c fix(deps): update module github.com/spf13/cobra to v1.10.1 2025-10-31 11:42:02 +00:00
renovate[bot]
999141df8c chore(deps): update python docker tag to v3.14 2025-10-31 11:41:39 +00:00
renovate[bot]
e99bb34451 fix(deps): update module github.com/argoproj/argo-rollouts to v1.8.3 2025-10-31 11:41:27 +00:00
Muhammad Safwan Karim
196373a688 Merge pull request #907 from stakater/renovate/nginxinc-nginx-unprivileged-1.x
chore(deps): update nginxinc/nginx-unprivileged docker tag to v1.29
2025-10-31 16:21:59 +05:00
Muhammad Safwan Karim
c3022c1255 Merge pull request #873 from stakater/renovate/peter-evans-create-pull-request-7.x
chore(deps): update peter-evans/create-pull-request action to v7.0.8
2025-10-31 16:20:38 +05:00
Muhammad Safwan Karim
c988b77933 Merge pull request #1022 from stakater/renovate/stakater-.github-0.x
chore(deps): update stakater/.github action to v0.0.163
2025-10-31 16:18:23 +05:00
renovate[bot]
e3e7cef752 chore(deps): update nginxinc/nginx-unprivileged docker tag to v1.29 2025-10-31 11:17:27 +00:00
Muhammad Safwan Karim
f7d4fca874 Merge pull request #1021 from stakater/renovate/stakater-vale-package-0.x
chore(deps): update dependency stakater/vale-package to v0.0.87
2025-10-31 16:14:56 +05:00
Muhammad Safwan Karim
956b3934da Merge pull request #914 from stakater/renovate/k8s.io-utils-digest
fix(deps): update k8s.io/utils digest to bc988d5
2025-10-31 16:14:13 +05:00
renovate[bot]
39352e4f4d chore(deps): update stakater/.github action to v0.0.163 2025-10-31 11:10:32 +00:00
Muhammad Safwan Karim
a43dcc7b85 Merge pull request #920 from stakater/renovate/anothrnick-github-tag-action-1.x
chore(deps): update anothrnick/github-tag-action action to v1.75.0
2025-10-31 16:09:51 +05:00
renovate[bot]
0078e3f814 chore(deps): update dependency stakater/vale-package to v0.0.87 2025-10-31 11:08:47 +00:00
Muhammad Safwan Karim
acaa00e256 Merge pull request #954 from stakater/renovate/sigstore-cosign-installer-3.x
chore(deps): update sigstore/cosign-installer action to v3.10.1
2025-10-31 16:08:13 +05:00
renovate[bot]
dffed992d6 chore(deps): update sigstore/cosign-installer action to v3.10.1 2025-10-31 07:46:33 +00:00
renovate[bot]
eff894e919 chore(deps): update anothrnick/github-tag-action action to v1.75.0 2025-10-31 07:46:23 +00:00
renovate[bot]
6d640e2ca1 chore(deps): update peter-evans/create-pull-request action to v7.0.8 2025-10-31 07:46:18 +00:00
renovate[bot]
11c99a7c13 fix(deps): update k8s.io/utils digest to bc988d5 2025-10-31 07:46:14 +00:00
Muhammad Safwan Karim
03c3f5947b Merge pull request #1017 from stakater/renovate/stakater-vale-package-0.x
chore(deps): update dependency stakater/vale-package to v0.0.86
2025-10-31 12:36:04 +05:00
Muhammad Safwan Karim
1084574bd0 Merge pull request #1013 from praddy26/fix-bug-996
fix: Prevent permission errors for ignored workload types #996
2025-10-27 11:20:38 +05:00
renovate[bot]
3103e5ac4d chore(deps): update dependency stakater/vale-package to v0.0.86 2025-10-24 10:02:33 +00:00
Pradeep Lakshmi Narasimha
a77c10a2c6 fix: Prevent permission errors for ignored workload types #996
Signed-off-by: Pradeep Lakshmi Narasimha <pradeep.vaishnav4@gmail.com>
2025-09-30 22:43:56 +05:30
Safwan
bd767a7ef1 Bump chart 2025-09-15 14:00:18 +02:00
Sebastien NICOT
3a1cc8f348 fix: bump go version to fix CVEs (#1007) 2025-09-15 14:18:31 +05:00
Pradeep Lakshmi Narasimha
dd0807e951 fix: Controller not respecting ignore* flags 2025-08-29 12:24:02 +02:00
Muhammad Safwan Karim
b8edc25177 Fix comment for adding Stakater Helm repository (#1004) 2025-08-25 18:58:32 +05:00
Muhammad Safwan Karim
f9d658d3b4 Clarify PR process for Helm chart version updates (#1003)
Updated instructions for creating a PR to include a specific label for Helm chart version updates.
2025-08-25 18:50:27 +05:00
Muhammad Safwan Karim
816ad6d430 bump image tag and chart version (#1002) 2025-08-25 17:53:22 +05:00
M Ahmad Mujtaba
19a76258d0 Merge pull request #998 from stakater/fix-broken-test
fixed broken test
2025-08-22 15:22:40 +05:00
Felix Tonnvik
aa481d9568 fixed broken test 2025-08-19 11:06:08 +02:00
Muhammad Safwan Karim
177d2756a8 Moved some functions to common package (#988)
* separate methods

* basic refactoring

* moved common code to util package to use it in gateway

* common check for argo rollouts

* made code compilable with latest changes on master

* Moved options to separate package and created CommandLineOptions instance that will be in sync with options values.

* reverted extra changes

* initialize CommandLineOptions with default options in module init

* wait for paused at annotation before checking deployment paused

* moved things around to fix things

* reverted unnecessary changes

* reverted rolling_upgrade changes

* reverted extra change

* additional checks in reloader

* refactor: ShouldReloadInternal method. It will be called by Reloader
ShouldReload has some additional resource/namespace filter checks which are not needed for Reloader

* added test cases

* moved config to sharable packae

* moved resource selector and label selctor methods

* fixed pipeline

* removed map.yaml

* removed vague comment
2025-08-15 15:51:47 +05:00
Felix
9b2af6f9b7 Merge pull request #989 from praddy26/fix-operator-pod-crash
fix: handle operator crash when using workloadRef
2025-08-15 12:06:09 +02:00
Felix
7c4899a7eb Merge pull request #981 from nitinverma9/add-dnsConfig-support-for-deployment
reloader: Add in support for dnsConfig for pod reloader.
2025-08-15 12:04:30 +02:00
Abdul Aziz
54d44858f8 Update README.md (#993)
* Update README.md

* Update README.md

Updated sponsor badge

* Update README.md

Fix line breaks for lint
2025-08-14 07:39:49 +02:00
Abdul Aziz
6304a9e5ab Update index.md (#995)
Add sponsor link at the bottom
2025-08-14 07:39:29 +02:00
Pradeep Lakshmi Narasimha
1e6a6ec2d9 fix: handle operator crash when using workloadRef 2025-08-14 07:55:56 +05:30
Nitin Verma
42cd7e71a2 reloader: Add in support for dnsConfig for pod reloader. This is supported as a common standard in most public helm charts, so decided to create a PR here. 2025-08-04 07:52:57 +01:00
50 changed files with 978 additions and 351 deletions

View File

@@ -3,5 +3,6 @@
{
"pattern": "^(?!http).+"
}
]
],
"retryOn429": true
}

View File

@@ -23,7 +23,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4.2.2
uses: actions/checkout@v5.0.0
with:
fetch-depth: 0
token: ${{ secrets.GITHUB_TOKEN }}
@@ -57,7 +57,7 @@ jobs:
git diff
- name: Create pull request
uses: peter-evans/create-pull-request@v7.0.6
uses: peter-evans/create-pull-request@v7.0.8
with:
commit-message: "Bump version to ${{ inputs.TARGET_VERSION }}"
title: "Bump version to ${{ inputs.TARGET_VERSION }} on ${{ inputs.TARGET_BRANCH }} branch"

View File

@@ -26,7 +26,7 @@ jobs:
steps:
- name: Check out code
uses: actions/checkout@v4
uses: actions/checkout@v5
with:
ref: ${{github.event.pull_request.head.sha}}
fetch-depth: 0
@@ -55,7 +55,7 @@ jobs:
steps:
- name: Check out code
uses: actions/checkout@v4
uses: actions/checkout@v5
with:
ref: ${{github.event.pull_request.head.sha}}
fetch-depth: 0

View File

@@ -25,7 +25,7 @@ env:
jobs:
qa:
uses: stakater/.github/.github/workflows/pull_request_doc_qa.yaml@v0.0.131
uses: stakater/.github/.github/workflows/pull_request_doc_qa.yaml@v0.0.163
with:
MD_CONFIG: .github/md_config.json
DOC_SRC: README.md
@@ -40,7 +40,7 @@ jobs:
name: Build
steps:
- name: Check out code
uses: actions/checkout@v4
uses: actions/checkout@v5
with:
ref: ${{github.event.pull_request.head.sha}}
fetch-depth: 0
@@ -57,7 +57,7 @@ jobs:
charts: deployments/kubernetes/chart/reloader
- name: Set up Go
uses: actions/setup-go@v5
uses: actions/setup-go@v6
with:
go-version-file: 'go.mod'
check-latest: true
@@ -80,11 +80,7 @@ jobs:
make install
- name: Run golangci-lint
uses: golangci/golangci-lint-action@v6
with:
version: latest
only-new-issues: false
args: --timeout 10m
run: make lint
- name: Helm Lint
run: |

View File

@@ -16,13 +16,13 @@ on:
jobs:
qa:
uses: stakater/.github/.github/workflows/pull_request_doc_qa.yaml@v0.0.134
uses: stakater/.github/.github/workflows/pull_request_doc_qa.yaml@v0.0.163
with:
MD_CONFIG: .github/md_config.json
DOC_SRC: docs
MD_LINT_CONFIG: .markdownlint.yaml
build:
uses: stakater/.github/.github/workflows/pull_request_container_build.yaml@v0.0.134
uses: stakater/.github/.github/workflows/pull_request_container_build.yaml@v0.0.163
with:
DOCKER_FILE_PATH: Dockerfile-docs
CONTAINER_REGISTRY_URL: ghcr.io/stakater

View File

@@ -31,7 +31,7 @@ jobs:
steps:
- name: Check out code
uses: actions/checkout@v4
uses: actions/checkout@v5
with:
token: ${{ secrets.PUBLISH_TOKEN }}
fetch-depth: 0 # otherwise, you will fail to push refs to dest repo
@@ -73,7 +73,7 @@ jobs:
exit 1
- name: Install Cosign
uses: sigstore/cosign-installer@v3.8.2
uses: sigstore/cosign-installer@v4.0.0
- name: Login to GHCR Registry
uses: docker/login-action@v3
@@ -106,7 +106,7 @@ jobs:
commit_email: stakater@gmail.com
- name: Push new chart tag
uses: anothrNick/github-tag-action@1.71.0
uses: anothrNick/github-tag-action@1.75.0
env:
GITHUB_TOKEN: ${{ secrets.PUBLISH_TOKEN }}
WITH_V: false

View File

@@ -30,13 +30,13 @@ jobs:
if: ${{ github.event.label.name == 'build-and-push-pr-image' }}
steps:
- name: Check out code
uses: actions/checkout@v4
uses: actions/checkout@v5
with:
ref: ${{github.event.pull_request.head.sha}}
fetch-depth: 0
- name: Set up Go
uses: actions/setup-go@v5
uses: actions/setup-go@v6
with:
go-version-file: 'go.mod'
check-latest: true
@@ -47,11 +47,7 @@ jobs:
make install
- name: Run golangci-lint
uses: golangci/golangci-lint-action@v6
with:
version: latest
only-new-issues: false
args: --timeout 10m
run: make lint
- name: Generate Tags
id: generate_tag

View File

@@ -29,7 +29,7 @@ jobs:
steps:
- name: Check out code
uses: actions/checkout@v4
uses: actions/checkout@v5
with:
token: ${{ secrets.PUBLISH_TOKEN }}
fetch-depth: 0 # otherwise, you will fail to push refs to dest repo
@@ -42,7 +42,7 @@ jobs:
version: v3.11.3
- name: Set up Go
uses: actions/setup-go@v5
uses: actions/setup-go@v6
with:
go-version-file: 'go.mod'
check-latest: true
@@ -53,11 +53,7 @@ jobs:
make install
- name: Run golangci-lint
uses: golangci/golangci-lint-action@v6
with:
version: latest
only-new-issues: false
args: --timeout 10m
run: make lint
- name: Install kubectl
run: |
@@ -215,7 +211,7 @@ jobs:
org.opencontainers.image.revision=${{ github.sha }}
- name: Push Latest Tag
uses: anothrNick/github-tag-action@1.71.0
uses: anothrNick/github-tag-action@1.75.0
env:
GITHUB_TOKEN: ${{ secrets.PUBLISH_TOKEN }}
WITH_V: false

View File

@@ -15,7 +15,7 @@ jobs:
steps:
- name: Check out code
uses: actions/checkout@v4
uses: actions/checkout@v5
with:
fetch-depth: 0

View File

@@ -24,7 +24,7 @@ jobs:
steps:
- name: Check out code
uses: actions/checkout@v4
uses: actions/checkout@v5
with:
token: ${{ secrets.PUBLISH_TOKEN }}
fetch-depth: 0 # otherwise, you will fail to push refs to dest repo
@@ -37,7 +37,7 @@ jobs:
version: v3.11.3
- name: Set up Go
uses: actions/setup-go@v5
uses: actions/setup-go@v6
with:
go-version-file: 'go.mod'
check-latest: true
@@ -48,11 +48,7 @@ jobs:
make install
- name: Run golangci-lint
uses: golangci/golangci-lint-action@v6
with:
version: latest
only-new-issues: false
args: --timeout 10m
run: make lint
- name: Install kubectl
run: |

View File

@@ -1,7 +1,7 @@
StylesPath = styles
MinAlertLevel = warning
Packages = https://github.com/stakater/vale-package/releases/download/v0.0.77/Stakater.zip
Packages = https://github.com/stakater/vale-package/releases/download/v0.0.87/Stakater.zip
Vocab = Stakater
# Only check MarkDown files

View File

@@ -1,3 +1,3 @@
# Code of Conduct
Reloader follows the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/master/code-of-conduct.md).
Reloader follows the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/main/code-of-conduct.md).

View File

@@ -2,7 +2,7 @@ ARG BUILDER_IMAGE
ARG BASE_IMAGE
# Build the manager binary
FROM --platform=${BUILDPLATFORM} ${BUILDER_IMAGE:-golang:1.24.4} AS builder
FROM --platform=${BUILDPLATFORM} ${BUILDER_IMAGE:-golang:1.25.3} AS builder
ARG TARGETOS
ARG TARGETARCH

View File

@@ -1,4 +1,4 @@
FROM python:3.13-alpine as builder
FROM python:3.14-alpine as builder
# set workdir
RUN mkdir -p $HOME/application
@@ -17,7 +17,7 @@ RUN python theme_common/scripts/combine_mkdocs_config_yaml.py theme_common/mkdoc
# build the docs
RUN mkdocs build
FROM nginxinc/nginx-unprivileged:1.27-alpine as deploy
FROM nginxinc/nginx-unprivileged:1.29-alpine as deploy
COPY --from=builder $HOME/application/site/ /usr/share/nginx/html/reloader/
COPY docs-nginx.conf /etc/nginx/conf.d/default.conf

View File

@@ -41,7 +41,7 @@ YQ ?= $(LOCALBIN)/yq
KUSTOMIZE_VERSION ?= v5.3.0
CONTROLLER_TOOLS_VERSION ?= v0.14.0
ENVTEST_VERSION ?= release-0.17
GOLANGCI_LINT_VERSION ?= v1.57.2
GOLANGCI_LINT_VERSION ?= v2.6.1
YQ_VERSION ?= v4.27.5
YQ_DOWNLOAD_URL = "https://github.com/mikefarah/yq/releases/download/$(YQ_VERSION)/yq_$(OS)_$(ARCH)"
@@ -75,7 +75,7 @@ $(ENVTEST): $(LOCALBIN)
.PHONY: golangci-lint
golangci-lint: $(GOLANGCI_LINT) ## Download golangci-lint locally if necessary.
$(GOLANGCI_LINT): $(LOCALBIN)
$(call go-install-tool,$(GOLANGCI_LINT),github.com/golangci/golangci-lint/cmd/golangci-lint,${GOLANGCI_LINT_VERSION})
$(call go-install-tool,$(GOLANGCI_LINT),github.com/golangci/golangci-lint/v2/cmd/golangci-lint,${GOLANGCI_LINT_VERSION})
# go-install-tool will 'go install' any package with custom target and name of binary, if it doesn't exist
# $1 - target path with name of binary (ideally with version)
@@ -102,6 +102,9 @@ run:
build:
"$(GOCMD)" build ${GOFLAGS} ${LDFLAGS} -o "${BINARY}"
lint: golangci-lint ## Run golangci-lint on the codebase
$(GOLANGCI_LINT) run ./...
build-image:
docker buildx build \
--platform ${OS}/${ARCH} \

View File

@@ -2,6 +2,7 @@
<img src="assets/web/reloader.jpg" alt="Reloader" width="40%"/>
</p>
[![💖 Sponsor Our Work](https://img.shields.io/badge/Sponsor%20Our%20Work-FF8C00?style=flat-square&logo=github-sponsors&logoColor=white)](https://github.com/sponsors/stakater?utm_source=github&utm_medium=readme&utm_campaign=reloader)
[![Go Report Card](https://goreportcard.com/badge/github.com/stakater/reloader?style=flat-square)](https://goreportcard.com/report/github.com/stakater/reloader)
[![Go Doc](https://img.shields.io/badge/godoc-reference-blue.svg?style=flat-square)](https://godoc.org/github.com/stakater/reloader)
[![Release](https://img.shields.io/github/release/stakater/reloader.svg?style=flat-square)](https://github.com/stakater/reloader/releases/latest)
@@ -210,12 +211,13 @@ To enable this feature, update the `reloader.env.secret` section in your `values
```yaml
reloader:
env:
secret:
ALERT_ON_RELOAD: "true" # Enable alerting (default: false)
ALERT_SINK: "slack" # Options: slack, teams, gchat or webhook (default: webhook)
ALERT_WEBHOOK_URL: "<your-webhook-url>" # Required if ALERT_ON_RELOAD is true
ALERT_ADDITIONAL_INFO: "Triggered by Reloader in staging environment"
deployment:
env:
secret:
ALERT_ON_RELOAD: "true" # Enable alerting (default: false)
ALERT_SINK: "slack" # Options: slack, teams, gchat or webhook (default: webhook)
ALERT_WEBHOOK_URL: "<your-webhook-url>" # Required if ALERT_ON_RELOAD is true
ALERT_ADDITIONAL_INFO: "Triggered by Reloader in staging environment"
```
### 7. ⏸️ Pause Deployments
@@ -328,13 +330,30 @@ Reloader supports multiple strategies for triggering rolling updates when a watc
|------|-------------|
| `--resources-to-ignore=configmaps` | Ignore ConfigMaps (only one type can be ignored at a time) |
| `--resources-to-ignore=secrets` | Ignore Secrets (cannot combine with configMaps) |
| `--ignored-workload-types=jobs,cronjobs` | Ignore specific workload types from reload monitoring |
| `--resource-label-selector=key=value` | Only watch ConfigMaps/Secrets with matching labels |
> **⚠️ Note:**
> Only **one** resource type can be ignored at a time.
> Trying to ignore **both `configmaps` and `secrets`** will cause an error in Reloader.
> **⚠️ Note:**
>
> Only **one** resource type can be ignored at a time.
> Trying to ignore **both `configmaps` and `secrets`** will cause an error in Reloader.
> ✅ **Workaround:** Scale the Reloader deployment to `0` replicas if you want to disable it completely.
**💡 Workload Type Examples:**
```bash
# Ignore only Jobs
--ignored-workload-types=jobs
# Ignore only CronJobs
--ignored-workload-types=cronjobs
# Ignore both (comma-separated)
--ignored-workload-types=jobs,cronjobs
```
> **🔧 Use Case:** Ignoring workload types is useful when you don't want certain types of workloads to be automatically reloaded.
#### 3. 🧩 Namespace Filtering
| Flag | Description |
@@ -415,11 +434,14 @@ _Repository GitHub releases_: As requested by the community in [issue 685](https
To make a GitHub release:
1. Code owners create a release branch `release-vX.Y.Z`
1. Code owners run a dispatch mode workflow to automatically generate version and manifests on the release branch
1. Code owners create a release branch `release-vX.Y.Z` from `master`
1. Code owners run [Init Release](https://github.com/stakater/Reloader/actions/workflows/init-branch-release.yaml) workflow to automatically generate version and manifests on the release branch
- Set the `TARGET_BRANCH` parameter to release branch i.e. `release-vX.Y.Z`
- Set the `TARGET_VERSION` to release version without 'v' i.e. `X.Y.Z`
1. A PR is created to bump the image version on the release branch, example: [PR-798](https://github.com/stakater/Reloader/pull/798)
1. Code owners create a GitHub release with tag `vX.Y.Z` and target branch `release-vX.Y.Z`, which triggers creation of images
1. Code owners create a PR to update the Helm chart version, example: [PR-846](https://github.com/stakater/Reloader/pull/846)
1. Code owners create another branch from `master` and bump the helm chart version as well as Reloader image version.
- Code owners create a PR with `release/helm-chart` label, example: [PR-846](https://github.com/stakater/Reloader/pull/846)
_Repository git tagging_: Push to the main branch will create a merge-image and merge-tag named `merge-${{ github.event.number }}`, for example `merge-800` when pull request number 800 is merged.

View File

@@ -1 +1 @@
1.1.0
1.4.11

View File

@@ -1,8 +1,8 @@
apiVersion: v1
name: reloader
description: Reloader chart that runs on kubernetes
version: 2.2.0
appVersion: v1.4.6
version: 2.2.5
appVersion: v1.4.10
keywords:
- Reloader
- kubernetes

View File

@@ -5,6 +5,7 @@ If you have configured helm on your cluster, you can add Reloader to helm from o
## Installation
```bash
# Add stakater helm repoository
helm repo add stakater https://stakater.github.io/stakater-charts
helm repo update
@@ -14,6 +15,8 @@ helm install stakater/reloader # For helm3 add --generate-name flag or set the r
helm install {{RELEASE_NAME}} stakater/reloader -n {{NAMESPACE}} --set reloader.watchGlobally=false # By default, Reloader watches in all namespaces. To watch in single namespace, set watchGlobally=false
helm install stakater/reloader --set reloader.watchGlobally=false --namespace test --generate-name # Install Reloader in `test` namespace which will only watch `Deployments`, `Daemonsets` `Statefulsets` and `Rollouts` in `test` namespace.
helm install stakater/reloader --set reloader.ignoreJobs=true --set reloader.ignoreCronJobs=true --generate-name # Install Reloader ignoring Jobs and CronJobs from reload monitoring
```
## Uninstalling
@@ -47,6 +50,8 @@ helm uninstall {{RELEASE_NAME}} -n {{NAMESPACE}}
| `reloader.isOpenshift` | Enable OpenShift DeploymentConfigs. Valid value are either `true` or `false` | boolean | `false` |
| `reloader.ignoreSecrets` | To ignore secrets. Valid value are either `true` or `false`. Either `ignoreSecrets` or `ignoreConfigMaps` can be ignored, not both at the same time | boolean | `false` |
| `reloader.ignoreConfigMaps` | To ignore configmaps. Valid value are either `true` or `false` | boolean | `false` |
| `reloader.ignoreJobs` | To ignore jobs from reload monitoring. Valid value are either `true` or `false`. Translates to `--ignored-workload-types=jobs` | boolean | `false` |
| `reloader.ignoreCronJobs` | To ignore CronJobs from reload monitoring. Valid value are either `true` or `false`. Translates to `--ignored-workload-types=cronjobs` | boolean | `false` |
| `reloader.reloadOnCreate` | Enable reload on create events. Valid value are either `true` or `false` | boolean | `false` |
| `reloader.reloadOnDelete` | Enable reload on delete events. Valid value are either `true` or `false` | boolean | `false` |
| `reloader.syncAfterRestart` | Enable sync after Reloader restarts for **Add** events, works only when reloadOnCreate is `true`. Valid value are either `true` or `false` | boolean | `false` |
@@ -58,7 +63,7 @@ helm uninstall {{RELEASE_NAME}} -n {{NAMESPACE}}
| `reloader.watchGlobally` | Allow Reloader to watch in all namespaces (`true`) or just in a single namespace (`false`) | boolean | `true` |
| `reloader.enableHA` | Enable leadership election allowing you to run multiple replicas | boolean | `false` |
| `reloader.enablePProf` | Enables pprof for profiling | boolean | `false` |
| `reloader.pprofAddr` | Address to start pprof server on | string | `:6060` |
| `reloader.pprofAddr` | Address to start pprof server on | string | `:6060` |
| `reloader.readOnlyRootFileSystem` | Enforce readOnlyRootFilesystem | boolean | `false` |
| `reloader.legacy.rbac` | | boolean | `false` |
| `reloader.matchLabels` | Pod labels to match | map | `{}` |
@@ -87,6 +92,7 @@ helm uninstall {{RELEASE_NAME}} -n {{NAMESPACE}}
| `reloader.deployment.volumeMounts` | Mount volume | array | `[]` |
| `reloader.deployment.volumes` | Add volume to a pod | array | `[]` |
| `reloader.deployment.dnsConfig` | dns configuration for pods | map | `{}` |
### Other Reloader Parameters
| Parameter | Description | Type | Default |
@@ -99,6 +105,8 @@ helm uninstall {{RELEASE_NAME}} -n {{NAMESPACE}}
| `reloader.podMonitor.enabled` | Enable to scrape Reloader's Prometheus metrics | boolean | `false` |
| `reloader.podDisruptionBudget.enabled` | Limit the number of pods of a replicated application | boolean | `false` |
| `reloader.netpol.enabled` | | boolean | `false` |
| `reloader.volumeMounts` | Mount volume | array | `[]` |
| `reloader.volumes` | Add volume to a pod | array | `[]` |
| `reloader.webhookUrl` | Add webhook to Reloader | string | `""` |
## ⚙️ Helm Chart Configuration Notes
@@ -112,6 +120,10 @@ helm uninstall {{RELEASE_NAME}} -n {{NAMESPACE}}
- Only one of these resources can be ignored at a time:
- `ignoreConfigMaps` **or** `ignoreSecrets`
- Trying to ignore both will cause Helm template compilation errors
- The `ignoreJobs` and `ignoreCronJobs` flags can be used together or individually
- When both are enabled, translates to `--ignored-workload-types=jobs,cronjobs`
- When used individually, translates to `--ignored-workload-types=jobs` or `--ignored-workload-types=cronjobs`
- These flags prevent Reloader from monitoring and reloading the specified workload types
### Special Integrations
- OpenShift (`DeploymentConfig`) and Argo Rollouts support must be **explicitly enabled**
@@ -120,7 +132,7 @@ helm uninstall {{RELEASE_NAME}} -n {{NAMESPACE}}
### OpenShift Considerations
- Recent OpenShift versions (tested on 4.13.3) require:
- Users to be in a dynamically assigned UID range
- **Solution**: Unset `runAsUser` via `deployment.securityContext.runAsUser=null`
- **Solution**: Unset `runAsUser` via `reloader.deployment.securityContext.runAsUser=null`
- Let OpenShift assign UID automatically during installation
### Core Functionality Flags

View File

@@ -71,6 +71,10 @@ spec:
{{- if .Values.reloader.deployment.priorityClassName }}
priorityClassName: {{ .Values.reloader.deployment.priorityClassName }}
{{- end }}
{{- with .Values.reloader.deployment.dnsConfig }}
dnsConfig:
{{- toYaml . | nindent 8 }}
{{- end }}
containers:
{{- if .Values.global.imageRegistry }}
- image: "{{ .Values.global.imageRegistry }}/{{ .Values.image.name }}:{{ .Values.image.tag }}"
@@ -151,7 +155,7 @@ spec:
- name: RELOADER_DEPLOYMENT_NAME
value: {{ template "reloader-fullname" . }}
{{- if .Values.reloader.enableHA }}
- name: POD_NAME
valueFrom:
@@ -206,7 +210,7 @@ spec:
{{- . | toYaml | nindent 10 }}
{{- end }}
{{- end }}
{{- if or (.Values.reloader.logFormat) (.Values.reloader.logLevel) (.Values.reloader.ignoreSecrets) (.Values.reloader.ignoreNamespaces) (include "reloader-namespaceSelector" .) (.Values.reloader.resourceLabelSelector) (.Values.reloader.ignoreConfigMaps) (.Values.reloader.custom_annotations) (eq .Values.reloader.isArgoRollouts true) (eq .Values.reloader.reloadOnCreate true) (eq .Values.reloader.reloadOnDelete true) (ne .Values.reloader.reloadStrategy "default") (.Values.reloader.enableHA) (.Values.reloader.autoReloadAll)}}
{{- if or (.Values.reloader.logFormat) (.Values.reloader.logLevel) (.Values.reloader.ignoreSecrets) (.Values.reloader.ignoreNamespaces) (include "reloader-namespaceSelector" .) (.Values.reloader.resourceLabelSelector) (.Values.reloader.ignoreConfigMaps) (.Values.reloader.custom_annotations) (eq .Values.reloader.isArgoRollouts true) (eq .Values.reloader.reloadOnCreate true) (eq .Values.reloader.reloadOnDelete true) (ne .Values.reloader.reloadStrategy "default") (.Values.reloader.enableHA) (.Values.reloader.autoReloadAll) (.Values.reloader.ignoreJobs) (.Values.reloader.ignoreCronJobs)}}
args:
{{- if .Values.reloader.logFormat }}
- "--log-format={{ .Values.reloader.logFormat }}"
@@ -220,6 +224,13 @@ spec:
{{- if .Values.reloader.ignoreConfigMaps }}
- "--resources-to-ignore=configMaps"
{{- end }}
{{- if and (.Values.reloader.ignoreJobs) (.Values.reloader.ignoreCronJobs) }}
- "--ignored-workload-types=jobs,cronjobs"
{{- else if .Values.reloader.ignoreJobs }}
- "--ignored-workload-types=jobs"
{{- else if .Values.reloader.ignoreCronJobs }}
- "--ignored-workload-types=cronjobs"
{{- end }}
{{- if .Values.reloader.ignoreNamespaces }}
- "--namespaces-to-ignore={{ .Values.reloader.ignoreNamespaces }}"
{{- end }}
@@ -269,7 +280,7 @@ spec:
- "{{ .Values.reloader.custom_annotations.pausePeriod }}"
{{- end }}
{{- if .Values.reloader.custom_annotations.pauseTime }}
- "--pause-deployment-annotation"
- "--pause-deployment-time-annotation"
- "{{ .Values.reloader.custom_annotations.pauseTime }}"
{{- end }}
{{- if .Values.reloader.webhookUrl }}

View File

@@ -61,3 +61,44 @@ tests:
valueFrom:
fieldRef:
fieldPath: metadata.name
- it: sets ignored-workload-types argument when ignoreJobs is true
set:
reloader:
ignoreJobs: true
asserts:
- contains:
path: spec.template.spec.containers[0].args
content: "--ignored-workload-types=jobs"
- it: sets ignored-workload-types argument when ignoreCronJobs is true
set:
reloader:
ignoreCronJobs: true
asserts:
- contains:
path: spec.template.spec.containers[0].args
content: "--ignored-workload-types=cronjobs"
- it: sets ignored-workload-types argument when both ignoreJobs and ignoreCronJobs are true
set:
reloader:
ignoreJobs: true
ignoreCronJobs: true
asserts:
- contains:
path: spec.template.spec.containers[0].args
content: "--ignored-workload-types=jobs,cronjobs"
- it: does not set ignored-workload-types argument when both ignoreJobs and ignoreCronJobs are false
set:
reloader:
ignoreJobs: false
ignoreCronJobs: false
asserts:
- notContains:
path: spec.template.spec.containers[0].args
content: "--ignored-workload-types=jobs"
- notContains:
path: spec.template.spec.containers[0].args
content: "--ignored-workload-types=cronjobs"

View File

@@ -17,7 +17,7 @@ fullnameOverride: ""
image:
name: stakater/reloader
repository: ghcr.io/stakater/reloader
tag: v1.4.6
tag: v1.4.10
# digest: sha256:1234567
pullPolicy: IfNotPresent
@@ -27,7 +27,11 @@ reloader:
isOpenshift: false
ignoreSecrets: false
ignoreConfigMaps: false
# Set to true to exclude Job workloads from automatic reload monitoring
# Useful when you don't want Jobs to be restarted when their referenced ConfigMaps/Secrets change
ignoreJobs: false
# Set to true to exclude CronJob workloads from automatic reload monitoring
# Useful when you don't want CronJobs to be restarted when their referenced ConfigMaps/Secrets change
ignoreCronJobs: false
reloadOnCreate: false
reloadOnDelete: false
@@ -53,6 +57,19 @@ reloader:
# Set to true to expose a prometheus counter of reloads by namespace (this metric may have high cardinality in clusters with many namespaces)
enableMetricsByNamespace: false
deployment:
# Specifies the deployment DNS configuration.
dnsConfig: {}
# nameservers:
# - 1.2.3.4
# searches:
# - ns1.svc.cluster-domain.example
# - my.dns.search.suffix
# options:
# - name: ndots
# value: "1"
# - name: attempts
# value: "3"
# If you wish to run multiple replicas set reloader.enableHA = true
replicas: 1
@@ -71,7 +88,7 @@ reloader:
# - key: "node-role.kubernetes.io/infra-worker"
# operator: "Exists"
affinity: {}
volumeMounts: []
volumes: []
@@ -113,7 +130,7 @@ reloader:
labels:
provider: stakater
group: com.stakater.platform
version: v1.4.6
version: v1.4.10
# Support for extra environment variables.
env:
# Open supports Key value pair as environment variables.

View File

@@ -17,7 +17,7 @@ spec:
app: reloader-reloader
spec:
containers:
- image: "ghcr.io/stakater/reloader:v1.1.0"
- image: "ghcr.io/stakater/reloader:v1.4.11"
imagePullPolicy: IfNotPresent
name: reloader-reloader
env:

View File

@@ -141,7 +141,7 @@ spec:
fieldPath: metadata.namespace
- name: RELOADER_DEPLOYMENT_NAME
value: reloader-reloader
image: ghcr.io/stakater/reloader:latest
image: ghcr.io/stakater/reloader:v1.4.11
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 5

View File

@@ -10,3 +10,17 @@ These are the key features of Reloader:
1. Restart pod in a `rollout` on change in linked/related `ConfigMaps` or `Secrets`
This site contains more details on how Reloader works. For an overview, please see the repository's [README file](https://github.com/stakater/Reloader/blob/master/README.md).
---
<div align="center">
[![💖 Sponsor our work](https://img.shields.io/badge/Sponsor%20Our%20Work-FF8C00?style=for-the-badge&logo=github-sponsors&logoColor=white)](https://github.com/sponsors/stakater?utm_source=docs&utm_medium=footer&utm_campaign=reloader)
<p>
Your support funds maintenance, security updates, and new features for Reloader, plus continued investment in other open source tools.
</p>
</div>
---

10
go.mod
View File

@@ -1,21 +1,21 @@
module github.com/stakater/Reloader
go 1.24.4
go 1.25.3
require (
github.com/argoproj/argo-rollouts v1.8.2
github.com/argoproj/argo-rollouts v1.8.3
github.com/openshift/api v0.0.0-20250411135543-10a8fa583797
github.com/openshift/client-go v0.0.0-20250402181141-b3bad3b645f2
github.com/parnurzeal/gorequest v0.3.0
github.com/prometheus/client_golang v1.22.0
github.com/sirupsen/logrus v1.9.3
github.com/spf13/cobra v1.9.1
github.com/spf13/cobra v1.10.1
github.com/stretchr/testify v1.10.0
k8s.io/api v0.32.3
k8s.io/apimachinery v0.32.3
k8s.io/client-go v0.32.3
k8s.io/kubectl v0.32.3
k8s.io/utils v0.0.0-20250321185631-1f6e0b77f77e
k8s.io/utils v0.0.0-20251002143259-bc988d571ff4
)
require (
@@ -50,7 +50,7 @@ require (
github.com/prometheus/common v0.63.0 // indirect
github.com/prometheus/procfs v0.16.0 // indirect
github.com/smartystreets/goconvey v1.7.2 // indirect
github.com/spf13/pflag v1.0.6 // indirect
github.com/spf13/pflag v1.0.9 // indirect
github.com/x448/float16 v0.8.4 // indirect
golang.org/x/net v0.39.0 // indirect
golang.org/x/oauth2 v0.29.0 // indirect

16
go.sum
View File

@@ -1,5 +1,5 @@
github.com/argoproj/argo-rollouts v1.8.2 h1:DBvkYvFTEH/zJ9MxJerqz/NMWEgZcHY5vxztyCBS5ak=
github.com/argoproj/argo-rollouts v1.8.2/go.mod h1:xZIw+dg+B4IqMv5fNPenIBUiPb9xljL2st1xxkjhaC0=
github.com/argoproj/argo-rollouts v1.8.3 h1:blbtQva4IK9r6gFh+dWkCrLnFdPOWiv9ubQYu36qeaA=
github.com/argoproj/argo-rollouts v1.8.3/go.mod h1:kCAUvIfMGfOyVf3lvQbBt0nqQn4Pd+zB5/YwKv+UBa8=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
@@ -104,10 +104,10 @@ github.com/smartystreets/assertions v1.2.0 h1:42S6lae5dvLc7BrLu/0ugRtcFVjoJNMC/N
github.com/smartystreets/assertions v1.2.0/go.mod h1:tcbTF8ujkAEcZ8TElKY+i30BzYlVhC/LOxJk7iOWnoo=
github.com/smartystreets/goconvey v1.7.2 h1:9RBaZCeXEQ3UselpuwUQHltGVXvdwm6cv1hgR6gDIPg=
github.com/smartystreets/goconvey v1.7.2/go.mod h1:Vw0tHAZW6lzCRk3xgdin6fKYcG+G3Pg9vgXWeJpQFMM=
github.com/spf13/cobra v1.9.1 h1:CXSaggrXdbHK9CF+8ywj8Amf7PBRmPCOJugH954Nnlo=
github.com/spf13/cobra v1.9.1/go.mod h1:nDyEzZ8ogv936Cinf6g1RU9MRY64Ir93oCnqb9wxYW0=
github.com/spf13/pflag v1.0.6 h1:jFzHGLGAlb3ruxLB8MhbI6A8+AQX/2eW4qeyNZXNp2o=
github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/spf13/cobra v1.10.1 h1:lJeBwCfmrnXthfAupyUTzJ/J4Nc1RsHC/mSRU2dll/s=
github.com/spf13/cobra v1.10.1/go.mod h1:7SmJGaTHFVBY0jW4NXGluQoLvhqFQM+6XSKD+P4XaB0=
github.com/spf13/pflag v1.0.9 h1:9exaQaMOCwffKiiiYk6/BndUBv+iRViNW+4lEMi0PvY=
github.com/spf13/pflag v1.0.9/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
@@ -185,8 +185,8 @@ k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff h1:/usPimJzUKKu+m+TE36gUy
k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff/go.mod h1:5jIi+8yX4RIb8wk3XwBo5Pq2ccx4FP10ohkbSKCZoK8=
k8s.io/kubectl v0.32.3 h1:VMi584rbboso+yjfv0d8uBHwwxbC438LKq+dXd5tOAI=
k8s.io/kubectl v0.32.3/go.mod h1:6Euv2aso5GKzo/UVMacV6C7miuyevpfI91SvBvV9Zdg=
k8s.io/utils v0.0.0-20250321185631-1f6e0b77f77e h1:KqK5c/ghOm8xkHYhlodbp6i6+r+ChV2vuAuVRdFbLro=
k8s.io/utils v0.0.0-20250321185631-1f6e0b77f77e/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
k8s.io/utils v0.0.0-20251002143259-bc988d571ff4 h1:SjGebBtkBqHFOli+05xYbK8YF1Dzkbzn+gDM4X9T4Ck=
k8s.io/utils v0.0.0-20251002143259-bc988d571ff4/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 h1:gBQPwqORJ8d8/YNZWEjoZs7npUVDpVXUUOFfW6CgAqE=
sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8/go.mod h1:mdzfpAEoE6DHQEN0uh9ZbOCuHbLK5wOm7dK4ctXE9Tg=
sigs.k8s.io/randfill v0.0.0-20250304075658-069ef1bbf016/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY=

View File

@@ -9,6 +9,15 @@ import (
"github.com/sirupsen/logrus"
)
type AlertSink string
const (
AlertSinkSlack AlertSink = "slack"
AlertSinkTeams AlertSink = "teams"
AlertSinkGoogleChat AlertSink = "gchat"
AlertSinkRaw AlertSink = "raw"
)
// function to send alert msg to webhook service
func SendWebhookAlert(msg string) {
webhook_url, ok := os.LookupEnv("ALERT_WEBHOOK_URL")
@@ -31,14 +40,15 @@ func SendWebhookAlert(msg string) {
msg = fmt.Sprintf("%s : %s", alert_additional_info, msg)
}
if alert_sink == "slack" {
switch AlertSink(alert_sink) {
case AlertSinkSlack:
sendSlackAlert(webhook_url, webhook_proxy, msg)
} else if alert_sink == "teams" {
case AlertSinkTeams:
sendTeamsAlert(webhook_url, webhook_proxy, msg)
} else if alert_sink == "gchat" {
case AlertSinkGoogleChat:
sendGoogleChatAlert(webhook_url, webhook_proxy, msg)
} else {
msg = strings.Replace(msg, "*", "", -1)
default:
msg = strings.ReplaceAll(msg, "*", "")
sendRawWebhookAlert(webhook_url, webhook_proxy, msg)
}
}

View File

@@ -83,9 +83,9 @@ func GetDeploymentItem(clients kube.Clients, name string, namespace string) (run
return nil, err
}
if deployment.Spec.Template.ObjectMeta.Annotations == nil {
if deployment.Spec.Template.Annotations == nil {
annotations := make(map[string]string)
deployment.Spec.Template.ObjectMeta.Annotations = annotations
deployment.Spec.Template.Annotations = annotations
}
return deployment, nil
@@ -101,9 +101,9 @@ func GetDeploymentItems(clients kube.Clients, namespace string) []runtime.Object
items := make([]runtime.Object, len(deployments.Items))
// Ensure we always have pod annotations to add to
for i, v := range deployments.Items {
if v.Spec.Template.ObjectMeta.Annotations == nil {
if v.Spec.Template.Annotations == nil {
annotations := make(map[string]string)
deployments.Items[i].Spec.Template.ObjectMeta.Annotations = annotations
deployments.Items[i].Spec.Template.Annotations = annotations
}
items[i] = &deployments.Items[i]
}
@@ -132,9 +132,9 @@ func GetCronJobItems(clients kube.Clients, namespace string) []runtime.Object {
items := make([]runtime.Object, len(cronjobs.Items))
// Ensure we always have pod annotations to add to
for i, v := range cronjobs.Items {
if v.Spec.JobTemplate.Spec.Template.ObjectMeta.Annotations == nil {
if v.Spec.JobTemplate.Spec.Template.Annotations == nil {
annotations := make(map[string]string)
cronjobs.Items[i].Spec.JobTemplate.Spec.Template.ObjectMeta.Annotations = annotations
cronjobs.Items[i].Spec.JobTemplate.Spec.Template.Annotations = annotations
}
items[i] = &cronjobs.Items[i]
}
@@ -163,9 +163,9 @@ func GetJobItems(clients kube.Clients, namespace string) []runtime.Object {
items := make([]runtime.Object, len(jobs.Items))
// Ensure we always have pod annotations to add to
for i, v := range jobs.Items {
if v.Spec.Template.ObjectMeta.Annotations == nil {
if v.Spec.Template.Annotations == nil {
annotations := make(map[string]string)
jobs.Items[i].Spec.Template.ObjectMeta.Annotations = annotations
jobs.Items[i].Spec.Template.Annotations = annotations
}
items[i] = &jobs.Items[i]
}
@@ -194,8 +194,8 @@ func GetDaemonSetItems(clients kube.Clients, namespace string) []runtime.Object
items := make([]runtime.Object, len(daemonSets.Items))
// Ensure we always have pod annotations to add to
for i, v := range daemonSets.Items {
if v.Spec.Template.ObjectMeta.Annotations == nil {
daemonSets.Items[i].Spec.Template.ObjectMeta.Annotations = make(map[string]string)
if v.Spec.Template.Annotations == nil {
daemonSets.Items[i].Spec.Template.Annotations = make(map[string]string)
}
items[i] = &daemonSets.Items[i]
}
@@ -224,8 +224,8 @@ func GetStatefulSetItems(clients kube.Clients, namespace string) []runtime.Objec
items := make([]runtime.Object, len(statefulSets.Items))
// Ensure we always have pod annotations to add to
for i, v := range statefulSets.Items {
if v.Spec.Template.ObjectMeta.Annotations == nil {
statefulSets.Items[i].Spec.Template.ObjectMeta.Annotations = make(map[string]string)
if v.Spec.Template.Annotations == nil {
statefulSets.Items[i].Spec.Template.Annotations = make(map[string]string)
}
items[i] = &statefulSets.Items[i]
}
@@ -254,8 +254,8 @@ func GetRolloutItems(clients kube.Clients, namespace string) []runtime.Object {
items := make([]runtime.Object, len(rollouts.Items))
// Ensure we always have pod annotations to add to
for i, v := range rollouts.Items {
if v.Spec.Template.ObjectMeta.Annotations == nil {
rollouts.Items[i].Spec.Template.ObjectMeta.Annotations = make(map[string]string)
if v.Spec.Template.Annotations == nil {
rollouts.Items[i].Spec.Template.Annotations = make(map[string]string)
}
items[i] = &rollouts.Items[i]
}
@@ -265,98 +265,98 @@ func GetRolloutItems(clients kube.Clients, namespace string) []runtime.Object {
// GetDeploymentAnnotations returns the annotations of given deployment
func GetDeploymentAnnotations(item runtime.Object) map[string]string {
if item.(*appsv1.Deployment).ObjectMeta.Annotations == nil {
item.(*appsv1.Deployment).ObjectMeta.Annotations = make(map[string]string)
if item.(*appsv1.Deployment).Annotations == nil {
item.(*appsv1.Deployment).Annotations = make(map[string]string)
}
return item.(*appsv1.Deployment).ObjectMeta.Annotations
return item.(*appsv1.Deployment).Annotations
}
// GetCronJobAnnotations returns the annotations of given cronjob
func GetCronJobAnnotations(item runtime.Object) map[string]string {
if item.(*batchv1.CronJob).ObjectMeta.Annotations == nil {
item.(*batchv1.CronJob).ObjectMeta.Annotations = make(map[string]string)
if item.(*batchv1.CronJob).Annotations == nil {
item.(*batchv1.CronJob).Annotations = make(map[string]string)
}
return item.(*batchv1.CronJob).ObjectMeta.Annotations
return item.(*batchv1.CronJob).Annotations
}
// GetJobAnnotations returns the annotations of given job
func GetJobAnnotations(item runtime.Object) map[string]string {
if item.(*batchv1.Job).ObjectMeta.Annotations == nil {
item.(*batchv1.Job).ObjectMeta.Annotations = make(map[string]string)
if item.(*batchv1.Job).Annotations == nil {
item.(*batchv1.Job).Annotations = make(map[string]string)
}
return item.(*batchv1.Job).ObjectMeta.Annotations
return item.(*batchv1.Job).Annotations
}
// GetDaemonSetAnnotations returns the annotations of given daemonSet
func GetDaemonSetAnnotations(item runtime.Object) map[string]string {
if item.(*appsv1.DaemonSet).ObjectMeta.Annotations == nil {
item.(*appsv1.DaemonSet).ObjectMeta.Annotations = make(map[string]string)
if item.(*appsv1.DaemonSet).Annotations == nil {
item.(*appsv1.DaemonSet).Annotations = make(map[string]string)
}
return item.(*appsv1.DaemonSet).ObjectMeta.Annotations
return item.(*appsv1.DaemonSet).Annotations
}
// GetStatefulSetAnnotations returns the annotations of given statefulSet
func GetStatefulSetAnnotations(item runtime.Object) map[string]string {
if item.(*appsv1.StatefulSet).ObjectMeta.Annotations == nil {
item.(*appsv1.StatefulSet).ObjectMeta.Annotations = make(map[string]string)
if item.(*appsv1.StatefulSet).Annotations == nil {
item.(*appsv1.StatefulSet).Annotations = make(map[string]string)
}
return item.(*appsv1.StatefulSet).ObjectMeta.Annotations
return item.(*appsv1.StatefulSet).Annotations
}
// GetRolloutAnnotations returns the annotations of given rollout
func GetRolloutAnnotations(item runtime.Object) map[string]string {
if item.(*argorolloutv1alpha1.Rollout).ObjectMeta.Annotations == nil {
item.(*argorolloutv1alpha1.Rollout).ObjectMeta.Annotations = make(map[string]string)
if item.(*argorolloutv1alpha1.Rollout).Annotations == nil {
item.(*argorolloutv1alpha1.Rollout).Annotations = make(map[string]string)
}
return item.(*argorolloutv1alpha1.Rollout).ObjectMeta.Annotations
return item.(*argorolloutv1alpha1.Rollout).Annotations
}
// GetDeploymentPodAnnotations returns the pod's annotations of given deployment
func GetDeploymentPodAnnotations(item runtime.Object) map[string]string {
if item.(*appsv1.Deployment).Spec.Template.ObjectMeta.Annotations == nil {
item.(*appsv1.Deployment).Spec.Template.ObjectMeta.Annotations = make(map[string]string)
if item.(*appsv1.Deployment).Spec.Template.Annotations == nil {
item.(*appsv1.Deployment).Spec.Template.Annotations = make(map[string]string)
}
return item.(*appsv1.Deployment).Spec.Template.ObjectMeta.Annotations
return item.(*appsv1.Deployment).Spec.Template.Annotations
}
// GetCronJobPodAnnotations returns the pod's annotations of given cronjob
func GetCronJobPodAnnotations(item runtime.Object) map[string]string {
if item.(*batchv1.CronJob).Spec.JobTemplate.Spec.Template.ObjectMeta.Annotations == nil {
item.(*batchv1.CronJob).Spec.JobTemplate.Spec.Template.ObjectMeta.Annotations = make(map[string]string)
if item.(*batchv1.CronJob).Spec.JobTemplate.Spec.Template.Annotations == nil {
item.(*batchv1.CronJob).Spec.JobTemplate.Spec.Template.Annotations = make(map[string]string)
}
return item.(*batchv1.CronJob).Spec.JobTemplate.Spec.Template.ObjectMeta.Annotations
return item.(*batchv1.CronJob).Spec.JobTemplate.Spec.Template.Annotations
}
// GetJobPodAnnotations returns the pod's annotations of given job
func GetJobPodAnnotations(item runtime.Object) map[string]string {
if item.(*batchv1.Job).Spec.Template.ObjectMeta.Annotations == nil {
item.(*batchv1.Job).Spec.Template.ObjectMeta.Annotations = make(map[string]string)
if item.(*batchv1.Job).Spec.Template.Annotations == nil {
item.(*batchv1.Job).Spec.Template.Annotations = make(map[string]string)
}
return item.(*batchv1.Job).Spec.Template.ObjectMeta.Annotations
return item.(*batchv1.Job).Spec.Template.Annotations
}
// GetDaemonSetPodAnnotations returns the pod's annotations of given daemonSet
func GetDaemonSetPodAnnotations(item runtime.Object) map[string]string {
if item.(*appsv1.DaemonSet).Spec.Template.ObjectMeta.Annotations == nil {
item.(*appsv1.DaemonSet).Spec.Template.ObjectMeta.Annotations = make(map[string]string)
if item.(*appsv1.DaemonSet).Spec.Template.Annotations == nil {
item.(*appsv1.DaemonSet).Spec.Template.Annotations = make(map[string]string)
}
return item.(*appsv1.DaemonSet).Spec.Template.ObjectMeta.Annotations
return item.(*appsv1.DaemonSet).Spec.Template.Annotations
}
// GetStatefulSetPodAnnotations returns the pod's annotations of given statefulSet
func GetStatefulSetPodAnnotations(item runtime.Object) map[string]string {
if item.(*appsv1.StatefulSet).Spec.Template.ObjectMeta.Annotations == nil {
item.(*appsv1.StatefulSet).Spec.Template.ObjectMeta.Annotations = make(map[string]string)
if item.(*appsv1.StatefulSet).Spec.Template.Annotations == nil {
item.(*appsv1.StatefulSet).Spec.Template.Annotations = make(map[string]string)
}
return item.(*appsv1.StatefulSet).Spec.Template.ObjectMeta.Annotations
return item.(*appsv1.StatefulSet).Spec.Template.Annotations
}
// GetRolloutPodAnnotations returns the pod's annotations of given rollout
func GetRolloutPodAnnotations(item runtime.Object) map[string]string {
if item.(*argorolloutv1alpha1.Rollout).Spec.Template.ObjectMeta.Annotations == nil {
item.(*argorolloutv1alpha1.Rollout).Spec.Template.ObjectMeta.Annotations = make(map[string]string)
if item.(*argorolloutv1alpha1.Rollout).Spec.Template.Annotations == nil {
item.(*argorolloutv1alpha1.Rollout).Spec.Template.Annotations = make(map[string]string)
}
return item.(*argorolloutv1alpha1.Rollout).Spec.Template.ObjectMeta.Annotations
return item.(*argorolloutv1alpha1.Rollout).Spec.Template.Annotations
}
// GetDeploymentContainers returns the containers of given deployment
@@ -481,9 +481,9 @@ func ReCreateJobFromjob(clients kube.Clients, namespace string, resource runtime
}
// Remove fields that should not be specified when creating a new Job
job.ObjectMeta.ResourceVersion = ""
job.ObjectMeta.UID = ""
job.ObjectMeta.CreationTimestamp = meta_v1.Time{}
job.ResourceVersion = ""
job.UID = ""
job.CreationTimestamp = meta_v1.Time{}
job.Status = batchv1.JobStatus{}
// Remove problematic labels

View File

@@ -373,19 +373,19 @@ func TestPatchResources(t *testing.T) {
assert.NoError(t, err)
patchedResource, err := callbacks.GetDeploymentItem(clients, "test-deployment", fixtures.namespace)
assert.NoError(t, err)
assert.Equal(t, "test", patchedResource.(*appsv1.Deployment).ObjectMeta.Annotations["test"])
assert.Equal(t, "test", patchedResource.(*appsv1.Deployment).Annotations["test"])
}},
{"DaemonSet", createTestDaemonSetWithAnnotations, callbacks.PatchDaemonSet, deleteTestDaemonSet, func(err error) {
assert.NoError(t, err)
patchedResource, err := callbacks.GetDaemonSetItem(clients, "test-daemonset", fixtures.namespace)
assert.NoError(t, err)
assert.Equal(t, "test", patchedResource.(*appsv1.DaemonSet).ObjectMeta.Annotations["test"])
assert.Equal(t, "test", patchedResource.(*appsv1.DaemonSet).Annotations["test"])
}},
{"StatefulSet", createTestStatefulSetWithAnnotations, callbacks.PatchStatefulSet, deleteTestStatefulSet, func(err error) {
assert.NoError(t, err)
patchedResource, err := callbacks.GetStatefulSetItem(clients, "test-statefulset", fixtures.namespace)
assert.NoError(t, err)
assert.Equal(t, "test", patchedResource.(*appsv1.StatefulSet).ObjectMeta.Annotations["test"])
assert.Equal(t, "test", patchedResource.(*appsv1.StatefulSet).Annotations["test"])
}},
{"CronJob", createTestCronJobWithAnnotations, callbacks.PatchCronJob, deleteTestCronJob, func(err error) {
assert.EqualError(t, err, "not supported patching: CronJob")
@@ -621,17 +621,17 @@ func deleteTestStatefulSets(clients kube.Clients, namespace string) error {
func createResourceWithPodAnnotations(obj runtime.Object, annotations map[string]string) runtime.Object {
switch v := obj.(type) {
case *appsv1.Deployment:
v.Spec.Template.ObjectMeta.Annotations = annotations
v.Spec.Template.Annotations = annotations
case *appsv1.DaemonSet:
v.Spec.Template.ObjectMeta.Annotations = annotations
v.Spec.Template.Annotations = annotations
case *appsv1.StatefulSet:
v.Spec.Template.ObjectMeta.Annotations = annotations
v.Spec.Template.Annotations = annotations
case *batchv1.CronJob:
v.Spec.JobTemplate.Spec.Template.ObjectMeta.Annotations = annotations
v.Spec.JobTemplate.Spec.Template.Annotations = annotations
case *batchv1.Job:
v.Spec.Template.ObjectMeta.Annotations = annotations
v.Spec.Template.Annotations = annotations
case *argorolloutv1alpha1.Rollout:
v.Spec.Template.ObjectMeta.Annotations = annotations
v.Spec.Template.Annotations = annotations
}
return obj
}

View File

@@ -133,13 +133,13 @@ func startReloader(cmd *cobra.Command, args []string) {
namespaceLabelSelector := ""
if isGlobal {
namespaceLabelSelector, err = util.GetNamespaceLabelSelector()
namespaceLabelSelector, err = common.GetNamespaceLabelSelector(options.NamespaceSelectors)
if err != nil {
logrus.Fatal(err)
}
}
resourceLabelSelector, err := util.GetResourceLabelSelector()
resourceLabelSelector, err := common.GetResourceLabelSelector(options.ResourceSelectors)
if err != nil {
logrus.Fatal(err)
}

View File

@@ -124,9 +124,9 @@ func (c *Controller) Add(obj interface{}) {
func (c *Controller) resourceInIgnoredNamespace(raw interface{}) bool {
switch object := raw.(type) {
case *v1.ConfigMap:
return c.ignoredNamespaces.Contains(object.ObjectMeta.Namespace)
return c.ignoredNamespaces.Contains(object.Namespace)
case *v1.Secret:
return c.ignoredNamespaces.Contains(object.ObjectMeta.Namespace)
return c.ignoredNamespaces.Contains(object.Namespace)
}
return false
}
@@ -212,7 +212,7 @@ func (c *Controller) Run(threadiness int, stopCh chan struct{}) {
// Wait for all involved caches to be synced, before processing items from the queue is started
if !cache.WaitForCacheSync(stopCh, c.informer.HasSynced) {
runtime.HandleError(fmt.Errorf("Timed out waiting for caches to sync"))
runtime.HandleError(fmt.Errorf("timed out waiting for caches to sync"))
return
}
@@ -226,9 +226,9 @@ func (c *Controller) Run(threadiness int, stopCh chan struct{}) {
func (c *Controller) runWorker() {
// At this point the controller is fully initialized and we can start processing the resources
if c.resource == "secrets" {
if c.resource == string(v1.ResourceSecrets) {
secretControllerInitialized = true
} else if c.resource == "configMaps" {
} else if c.resource == string(v1.ResourceConfigMaps) {
configmapControllerInitialized = true
}

View File

@@ -15,6 +15,7 @@ import (
"github.com/stakater/Reloader/internal/pkg/options"
"github.com/stakater/Reloader/internal/pkg/testutil"
"github.com/stakater/Reloader/internal/pkg/util"
"github.com/stakater/Reloader/pkg/common"
"github.com/stakater/Reloader/pkg/kube"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@@ -94,7 +95,7 @@ func TestControllerUpdatingConfigmapShouldCreatePodAnnotationInDeployment(t *tes
// Verifying deployment update
logrus.Infof("Verifying pod annotation has been created")
shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapName, "www.stakater.com")
config := util.Config{
config := common.Config{
Namespace: namespace,
ResourceName: configmapName,
SHAValue: shaData,
@@ -147,7 +148,7 @@ func TestControllerUpdatingConfigmapShouldAutoCreatePodAnnotationInDeployment(t
// Verifying deployment update
logrus.Infof("Verifying pod annotation has been created")
shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapName, "www.stakater.com")
config := util.Config{
config := common.Config{
Namespace: namespace,
ResourceName: configmapName,
SHAValue: shaData,
@@ -212,7 +213,7 @@ func TestControllerCreatingConfigmapShouldCreatePodAnnotationInDeployment(t *tes
// Verifying deployment update
logrus.Infof("Verifying pod annotation has been created")
shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapName, "www.stakater.com")
config := util.Config{
config := common.Config{
Namespace: namespace,
ResourceName: configmapName,
SHAValue: shaData,
@@ -271,7 +272,7 @@ func TestControllerForUpdatingConfigmapShouldUpdateDeploymentUsingArs(t *testing
// Verifying deployment update
logrus.Infof("Verifying pod annotation has been updated")
shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapName, "aurorasolutions.io")
config := util.Config{
config := common.Config{
Namespace: namespace,
ResourceName: configmapName,
SHAValue: shaData,
@@ -326,7 +327,7 @@ func TestControllerUpdatingConfigmapLabelsShouldNotCreateOrCreatePodAnnotationIn
// Verifying deployment update
logrus.Infof("Verifying pod annotation has been created")
shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapName, "www.google.com")
config := util.Config{
config := common.Config{
Namespace: namespace,
ResourceName: configmapName,
SHAValue: shaData,
@@ -390,7 +391,7 @@ func TestControllerCreatingSecretShouldCreatePodAnnotationInDeployment(t *testin
// Verifying Upgrade
logrus.Infof("Verifying pod annotation has been created")
shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, namespace, secretName, newData)
config := util.Config{
config := common.Config{
Namespace: namespace,
ResourceName: secretName,
SHAValue: shaData,
@@ -443,7 +444,7 @@ func TestControllerUpdatingSecretShouldCreatePodAnnotationInDeployment(t *testin
// Verifying Upgrade
logrus.Infof("Verifying pod annotation has been created")
shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, namespace, secretName, newData)
config := util.Config{
config := common.Config{
Namespace: namespace,
ResourceName: secretName,
SHAValue: shaData,
@@ -501,7 +502,7 @@ func TestControllerUpdatingSecretShouldUpdatePodAnnotationInDeployment(t *testin
// Verifying Upgrade
logrus.Infof("Verifying pod annotation has been updated")
shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, namespace, secretName, updatedData)
config := util.Config{
config := common.Config{
Namespace: namespace,
ResourceName: secretName,
SHAValue: shaData,
@@ -552,7 +553,7 @@ func TestControllerUpdatingSecretLabelsShouldNotCreateOrUpdatePodAnnotationInDep
// Verifying Upgrade
logrus.Infof("Verifying pod annotation has been created")
shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, namespace, secretName, data)
config := util.Config{
config := common.Config{
Namespace: namespace,
ResourceName: secretName,
SHAValue: shaData,
@@ -604,7 +605,7 @@ func TestControllerUpdatingConfigmapShouldCreatePodAnnotationInDaemonSet(t *test
// Verifying DaemonSet update
logrus.Infof("Verifying pod annotation has been created")
shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapName, "www.stakater.com")
config := util.Config{
config := common.Config{
Namespace: namespace,
ResourceName: configmapName,
SHAValue: shaData,
@@ -667,7 +668,7 @@ func TestControllerForUpdatingConfigmapShouldUpdateDaemonSetUsingArs(t *testing.
// Verifying DaemonSet update
logrus.Infof("Verifying pod annotation has been updated")
shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapName, "aurorasolutions.io")
config := util.Config{
config := common.Config{
Namespace: namespace,
ResourceName: configmapName,
SHAValue: shaData,
@@ -720,7 +721,7 @@ func TestControllerUpdatingSecretShouldCreatePodAnnotationInDaemonSet(t *testing
// Verifying Upgrade
logrus.Infof("Verifying pod annotation has been created")
shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, namespace, secretName, newData)
config := util.Config{
config := common.Config{
Namespace: namespace,
ResourceName: secretName,
SHAValue: shaData,
@@ -779,7 +780,7 @@ func TestControllerUpdatingSecretShouldUpdatePodAnnotationInDaemonSet(t *testing
// Verifying Upgrade
logrus.Infof("Verifying pod annotation has been updated")
shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, namespace, secretName, updatedData)
config := util.Config{
config := common.Config{
Namespace: namespace,
ResourceName: secretName,
SHAValue: shaData,
@@ -830,7 +831,7 @@ func TestControllerUpdatingSecretLabelsShouldNotCreateOrUpdatePodAnnotationInDae
// Verifying Upgrade
logrus.Infof("Verifying pod annotation has been created")
shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, namespace, secretName, data)
config := util.Config{
config := common.Config{
Namespace: namespace,
ResourceName: secretName,
SHAValue: shaData,
@@ -882,7 +883,7 @@ func TestControllerUpdatingConfigmapShouldCreatePodAnnotationInStatefulSet(t *te
// Verifying StatefulSet update
logrus.Infof("Verifying pod annotation has been created")
shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapName, "www.stakater.com")
config := util.Config{
config := common.Config{
Namespace: namespace,
ResourceName: configmapName,
SHAValue: shaData,
@@ -941,7 +942,7 @@ func TestControllerForUpdatingConfigmapShouldUpdateStatefulSetUsingArs(t *testin
// Verifying StatefulSet update
logrus.Infof("Verifying pod annotation has been updated")
shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapName, "aurorasolutions.io")
config := util.Config{
config := common.Config{
Namespace: namespace,
ResourceName: configmapName,
SHAValue: shaData,
@@ -994,7 +995,7 @@ func TestControllerUpdatingSecretShouldCreatePodAnnotationInStatefulSet(t *testi
// Verifying Upgrade
logrus.Infof("Verifying pod annotation has been created")
shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, namespace, secretName, newData)
config := util.Config{
config := common.Config{
Namespace: namespace,
ResourceName: secretName,
SHAValue: shaData,
@@ -1046,7 +1047,7 @@ func TestControllerUpdatingConfigmapShouldCreateEnvInDeployment(t *testing.T) {
// Verifying deployment update
logrus.Infof("Verifying env var has been created")
shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapName, "www.stakater.com")
config := util.Config{
config := common.Config{
Namespace: namespace,
ResourceName: configmapName,
SHAValue: shaData,
@@ -1099,7 +1100,7 @@ func TestControllerUpdatingConfigmapShouldAutoCreateEnvInDeployment(t *testing.T
// Verifying deployment update
logrus.Infof("Verifying env var has been created")
shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapName, "www.stakater.com")
config := util.Config{
config := common.Config{
Namespace: namespace,
ResourceName: configmapName,
SHAValue: shaData,
@@ -1164,7 +1165,7 @@ func TestControllerCreatingConfigmapShouldCreateEnvInDeployment(t *testing.T) {
// Verifying deployment update
logrus.Infof("Verifying env var has been created")
shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapName, "www.stakater.com")
config := util.Config{
config := common.Config{
Namespace: namespace,
ResourceName: configmapName,
SHAValue: shaData,
@@ -1223,7 +1224,7 @@ func TestControllerForUpdatingConfigmapShouldUpdateDeploymentUsingErs(t *testing
// Verifying deployment update
logrus.Infof("Verifying env var has been updated")
shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapName, "aurorasolutions.io")
config := util.Config{
config := common.Config{
Namespace: namespace,
ResourceName: configmapName,
SHAValue: shaData,
@@ -1278,7 +1279,7 @@ func TestControllerUpdatingConfigmapLabelsShouldNotCreateOrUpdateEnvInDeployment
// Verifying deployment update
logrus.Infof("Verifying env var has been created")
shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapName, "www.google.com")
config := util.Config{
config := common.Config{
Namespace: namespace,
ResourceName: configmapName,
SHAValue: shaData,
@@ -1342,7 +1343,7 @@ func TestControllerCreatingSecretShouldCreateEnvInDeployment(t *testing.T) {
// Verifying Upgrade
logrus.Infof("Verifying env var has been created")
shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, namespace, secretName, newData)
config := util.Config{
config := common.Config{
Namespace: namespace,
ResourceName: secretName,
SHAValue: shaData,
@@ -1395,7 +1396,7 @@ func TestControllerUpdatingSecretShouldCreateEnvInDeployment(t *testing.T) {
// Verifying Upgrade
logrus.Infof("Verifying env var has been created")
shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, namespace, secretName, newData)
config := util.Config{
config := common.Config{
Namespace: namespace,
ResourceName: secretName,
SHAValue: shaData,
@@ -1453,7 +1454,7 @@ func TestControllerUpdatingSecretShouldUpdateEnvInDeployment(t *testing.T) {
// Verifying Upgrade
logrus.Infof("Verifying env var has been updated")
shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, namespace, secretName, updatedData)
config := util.Config{
config := common.Config{
Namespace: namespace,
ResourceName: secretName,
SHAValue: shaData,
@@ -1504,7 +1505,7 @@ func TestControllerUpdatingSecretLabelsShouldNotCreateOrUpdateEnvInDeployment(t
// Verifying Upgrade
logrus.Infof("Verifying env var has been created")
shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, namespace, secretName, data)
config := util.Config{
config := common.Config{
Namespace: namespace,
ResourceName: secretName,
SHAValue: shaData,
@@ -1556,7 +1557,7 @@ func TestControllerUpdatingConfigmapShouldCreateEnvInDaemonSet(t *testing.T) {
// Verifying DaemonSet update
logrus.Infof("Verifying env var has been created")
shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapName, "www.stakater.com")
config := util.Config{
config := common.Config{
Namespace: namespace,
ResourceName: configmapName,
SHAValue: shaData,
@@ -1619,7 +1620,7 @@ func TestControllerForUpdatingConfigmapShouldUpdateDaemonSetUsingErs(t *testing.
// Verifying DaemonSet update
logrus.Infof("Verifying env var has been updated")
shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapName, "aurorasolutions.io")
config := util.Config{
config := common.Config{
Namespace: namespace,
ResourceName: configmapName,
SHAValue: shaData,
@@ -1672,7 +1673,7 @@ func TestControllerUpdatingSecretShouldCreateEnvInDaemonSet(t *testing.T) {
// Verifying Upgrade
logrus.Infof("Verifying env var has been created")
shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, namespace, secretName, newData)
config := util.Config{
config := common.Config{
Namespace: namespace,
ResourceName: secretName,
SHAValue: shaData,
@@ -1731,7 +1732,7 @@ func TestControllerUpdatingSecretShouldUpdateEnvInDaemonSet(t *testing.T) {
// Verifying Upgrade
logrus.Infof("Verifying env var has been updated")
shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, namespace, secretName, updatedData)
config := util.Config{
config := common.Config{
Namespace: namespace,
ResourceName: secretName,
SHAValue: shaData,
@@ -1782,7 +1783,7 @@ func TestControllerUpdatingSecretLabelsShouldNotCreateOrUpdateEnvInDaemonSet(t *
// Verifying Upgrade
logrus.Infof("Verifying env var has been created")
shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, namespace, secretName, data)
config := util.Config{
config := common.Config{
Namespace: namespace,
ResourceName: secretName,
SHAValue: shaData,
@@ -1834,7 +1835,7 @@ func TestControllerUpdatingConfigmapShouldCreateEnvInStatefulSet(t *testing.T) {
// Verifying StatefulSet update
logrus.Infof("Verifying env var has been created")
shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapName, "www.stakater.com")
config := util.Config{
config := common.Config{
Namespace: namespace,
ResourceName: configmapName,
SHAValue: shaData,
@@ -1893,7 +1894,7 @@ func TestControllerForUpdatingConfigmapShouldUpdateStatefulSetUsingErs(t *testin
// Verifying StatefulSet update
logrus.Infof("Verifying env var has been updated")
shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapName, "aurorasolutions.io")
config := util.Config{
config := common.Config{
Namespace: namespace,
ResourceName: configmapName,
SHAValue: shaData,
@@ -1946,7 +1947,7 @@ func TestControllerUpdatingSecretShouldCreateEnvInStatefulSet(t *testing.T) {
// Verifying Upgrade
logrus.Infof("Verifying env var has been created")
shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, namespace, secretName, newData)
config := util.Config{
config := common.Config{
Namespace: namespace,
ResourceName: secretName,
SHAValue: shaData,
@@ -2004,7 +2005,7 @@ func TestControllerUpdatingSecretShouldUpdateEnvInStatefulSet(t *testing.T) {
// Verifying Upgrade
logrus.Infof("Verifying env var has been updated")
shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, namespace, secretName, updatedData)
config := util.Config{
config := common.Config{
Namespace: namespace,
ResourceName: secretName,
SHAValue: shaData,
@@ -2062,7 +2063,7 @@ func TestControllerUpdatingSecretShouldUpdatePodAnnotationInStatefulSet(t *testi
// Verifying Upgrade
logrus.Infof("Verifying pod annotation has been updated")
shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, namespace, secretName, updatedData)
config := util.Config{
config := common.Config{
Namespace: namespace,
ResourceName: secretName,
SHAValue: shaData,
@@ -2340,7 +2341,7 @@ func TestController_resourceInNamespaceSelector(t *testing.T) {
indexer: tt.fields.indexer,
queue: tt.fields.queue,
informer: tt.fields.informer,
namespace: tt.fields.namespace.ObjectMeta.Name,
namespace: tt.fields.namespace.Name,
namespaceSelector: tt.fields.namespaceSelector,
}

View File

@@ -4,7 +4,7 @@ import (
"github.com/sirupsen/logrus"
"github.com/stakater/Reloader/internal/pkg/metrics"
"github.com/stakater/Reloader/internal/pkg/options"
"github.com/stakater/Reloader/internal/pkg/util"
"github.com/stakater/Reloader/pkg/common"
v1 "k8s.io/api/core/v1"
"k8s.io/client-go/tools/record"
)
@@ -33,13 +33,13 @@ func (r ResourceCreatedHandler) Handle() error {
}
// GetConfig gets configurations containing SHA, annotations, namespace and resource name
func (r ResourceCreatedHandler) GetConfig() (util.Config, string) {
func (r ResourceCreatedHandler) GetConfig() (common.Config, string) {
var oldSHAData string
var config util.Config
var config common.Config
if _, ok := r.Resource.(*v1.ConfigMap); ok {
config = util.GetConfigmapConfig(r.Resource.(*v1.ConfigMap))
config = common.GetConfigmapConfig(r.Resource.(*v1.ConfigMap))
} else if _, ok := r.Resource.(*v1.Secret); ok {
config = util.GetSecretConfig(r.Resource.(*v1.Secret))
config = common.GetSecretConfig(r.Resource.(*v1.Secret))
} else {
logrus.Warnf("Invalid resource: Resource should be 'Secret' or 'Configmap' but found, %v", r.Resource)
}

View File

@@ -10,7 +10,7 @@ import (
"github.com/stakater/Reloader/internal/pkg/metrics"
"github.com/stakater/Reloader/internal/pkg/options"
"github.com/stakater/Reloader/internal/pkg/testutil"
"github.com/stakater/Reloader/internal/pkg/util"
"github.com/stakater/Reloader/pkg/common"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/runtime"
@@ -42,20 +42,20 @@ func (r ResourceDeleteHandler) Handle() error {
}
// GetConfig gets configurations containing SHA, annotations, namespace and resource name
func (r ResourceDeleteHandler) GetConfig() (util.Config, string) {
func (r ResourceDeleteHandler) GetConfig() (common.Config, string) {
var oldSHAData string
var config util.Config
var config common.Config
if _, ok := r.Resource.(*v1.ConfigMap); ok {
config = util.GetConfigmapConfig(r.Resource.(*v1.ConfigMap))
config = common.GetConfigmapConfig(r.Resource.(*v1.ConfigMap))
} else if _, ok := r.Resource.(*v1.Secret); ok {
config = util.GetSecretConfig(r.Resource.(*v1.Secret))
config = common.GetSecretConfig(r.Resource.(*v1.Secret))
} else {
logrus.Warnf("Invalid resource: Resource should be 'Secret' or 'Configmap' but found, %v", r.Resource)
}
return config, oldSHAData
}
func invokeDeleteStrategy(upgradeFuncs callbacks.RollingUpgradeFuncs, item runtime.Object, config util.Config, autoReload bool) InvokeStrategyResult {
func invokeDeleteStrategy(upgradeFuncs callbacks.RollingUpgradeFuncs, item runtime.Object, config common.Config, autoReload bool) InvokeStrategyResult {
if options.ReloadStrategy == constants.AnnotationsReloadStrategy {
return removePodAnnotations(upgradeFuncs, item, config, autoReload)
}
@@ -63,12 +63,12 @@ func invokeDeleteStrategy(upgradeFuncs callbacks.RollingUpgradeFuncs, item runti
return removeContainerEnvVars(upgradeFuncs, item, config, autoReload)
}
func removePodAnnotations(upgradeFuncs callbacks.RollingUpgradeFuncs, item runtime.Object, config util.Config, autoReload bool) InvokeStrategyResult {
func removePodAnnotations(upgradeFuncs callbacks.RollingUpgradeFuncs, item runtime.Object, config common.Config, autoReload bool) InvokeStrategyResult {
config.SHAValue = testutil.GetSHAfromEmptyData()
return updatePodAnnotations(upgradeFuncs, item, config, autoReload)
}
func removeContainerEnvVars(upgradeFuncs callbacks.RollingUpgradeFuncs, item runtime.Object, config util.Config, autoReload bool) InvokeStrategyResult {
func removeContainerEnvVars(upgradeFuncs callbacks.RollingUpgradeFuncs, item runtime.Object, config common.Config, autoReload bool) InvokeStrategyResult {
envVar := getEnvVarName(config.ResourceName, config.Type)
container := getContainerUsingResource(upgradeFuncs, item, config, autoReload)

View File

@@ -1,11 +1,9 @@
package handler
import (
"github.com/stakater/Reloader/internal/pkg/util"
)
import "github.com/stakater/Reloader/pkg/common"
// ResourceHandler handles the creation and update of resources
type ResourceHandler interface {
Handle() error
GetConfig() (util.Config, string)
GetConfig() (common.Config, string)
}

View File

@@ -9,7 +9,6 @@ import (
"github.com/stakater/Reloader/internal/pkg/options"
"github.com/stakater/Reloader/pkg/kube"
"github.com/stretchr/testify/assert"
app "k8s.io/api/apps/v1"
appsv1 "k8s.io/api/apps/v1"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@@ -374,14 +373,14 @@ func TestPauseDeployment(t *testing.T) {
}
// Simple helper function for test cases
func FindDeploymentByName(deployments []runtime.Object, deploymentName string) (*app.Deployment, error) {
func FindDeploymentByName(deployments []runtime.Object, deploymentName string) (*appsv1.Deployment, error) {
for _, deployment := range deployments {
accessor, err := meta.Accessor(deployment)
if err != nil {
return nil, fmt.Errorf("error getting accessor for item: %v", err)
}
if accessor.GetName() == deploymentName {
deploymentObj, ok := deployment.(*app.Deployment)
deploymentObj, ok := deployment.(*appsv1.Deployment)
if !ok {
return nil, fmt.Errorf("failed to cast to Deployment")
}

View File

@@ -5,6 +5,7 @@ import (
"github.com/stakater/Reloader/internal/pkg/metrics"
"github.com/stakater/Reloader/internal/pkg/options"
"github.com/stakater/Reloader/internal/pkg/util"
"github.com/stakater/Reloader/pkg/common"
v1 "k8s.io/api/core/v1"
"k8s.io/client-go/tools/record"
)
@@ -36,15 +37,15 @@ func (r ResourceUpdatedHandler) Handle() error {
}
// GetConfig gets configurations containing SHA, annotations, namespace and resource name
func (r ResourceUpdatedHandler) GetConfig() (util.Config, string) {
func (r ResourceUpdatedHandler) GetConfig() (common.Config, string) {
var oldSHAData string
var config util.Config
var config common.Config
if _, ok := r.Resource.(*v1.ConfigMap); ok {
oldSHAData = util.GetSHAfromConfigmap(r.OldResource.(*v1.ConfigMap))
config = util.GetConfigmapConfig(r.Resource.(*v1.ConfigMap))
config = common.GetConfigmapConfig(r.Resource.(*v1.ConfigMap))
} else if _, ok := r.Resource.(*v1.Secret); ok {
oldSHAData = util.GetSHAfromSecret(r.OldResource.(*v1.Secret).Data)
config = util.GetSecretConfig(r.Resource.(*v1.Secret))
config = common.GetSecretConfig(r.Resource.(*v1.Secret))
} else {
logrus.Warnf("Invalid resource: Resource should be 'Secret' or 'Configmap' but found, %v", r.Resource)
}

View File

@@ -138,7 +138,7 @@ func GetArgoRolloutRollingUpgradeFuncs() callbacks.RollingUpgradeFuncs {
}
}
func sendUpgradeWebhook(config util.Config, webhookUrl string) error {
func sendUpgradeWebhook(config common.Config, webhookUrl string) error {
logrus.Infof("Changes detected in '%s' of type '%s' in namespace '%s', Sending webhook to '%s'",
config.ResourceName, config.Type, config.Namespace, webhookUrl)
@@ -160,7 +160,12 @@ func sendWebhook(url string) (string, []error) {
// the reloader seems to retry automatically so no retry logic added
return "", err
}
defer resp.Body.Close()
defer func() {
closeErr := resp.Body.Close()
if closeErr != nil {
logrus.Error(closeErr)
}
}()
var buffer bytes.Buffer
_, bufferErr := io.Copy(&buffer, resp.Body)
if bufferErr != nil {
@@ -169,21 +174,37 @@ func sendWebhook(url string) (string, []error) {
return buffer.String(), nil
}
func doRollingUpgrade(config util.Config, collectors metrics.Collectors, recorder record.EventRecorder, invoke invokeStrategy) error {
func doRollingUpgrade(config common.Config, collectors metrics.Collectors, recorder record.EventRecorder, invoke invokeStrategy) error {
clients := kube.GetClients()
err := rollingUpgrade(clients, config, GetDeploymentRollingUpgradeFuncs(), collectors, recorder, invoke)
// Get ignored workload types to avoid listing resources without RBAC permissions
ignoredWorkloadTypes, err := util.GetIgnoredWorkloadTypesList()
if err != nil {
logrus.Errorf("Failed to parse ignored workload types: %v", err)
ignoredWorkloadTypes = util.List{} // Continue with empty list if parsing fails
}
err = rollingUpgrade(clients, config, GetDeploymentRollingUpgradeFuncs(), collectors, recorder, invoke)
if err != nil {
return err
}
err = rollingUpgrade(clients, config, GetCronJobCreateJobFuncs(), collectors, recorder, invoke)
if err != nil {
return err
// Only process CronJobs if they are not ignored
if !ignoredWorkloadTypes.Contains("cronjobs") {
err = rollingUpgrade(clients, config, GetCronJobCreateJobFuncs(), collectors, recorder, invoke)
if err != nil {
return err
}
}
err = rollingUpgrade(clients, config, GetJobCreateJobFuncs(), collectors, recorder, invoke)
if err != nil {
return err
// Only process Jobs if they are not ignored
if !ignoredWorkloadTypes.Contains("jobs") {
err = rollingUpgrade(clients, config, GetJobCreateJobFuncs(), collectors, recorder, invoke)
if err != nil {
return err
}
}
err = rollingUpgrade(clients, config, GetDaemonSetRollingUpgradeFuncs(), collectors, recorder, invoke)
if err != nil {
return err
@@ -203,7 +224,7 @@ func doRollingUpgrade(config util.Config, collectors metrics.Collectors, recorde
return nil
}
func rollingUpgrade(clients kube.Clients, config util.Config, upgradeFuncs callbacks.RollingUpgradeFuncs, collectors metrics.Collectors, recorder record.EventRecorder, strategy invokeStrategy) error {
func rollingUpgrade(clients kube.Clients, config common.Config, upgradeFuncs callbacks.RollingUpgradeFuncs, collectors metrics.Collectors, recorder record.EventRecorder, strategy invokeStrategy) error {
err := PerformAction(clients, config, upgradeFuncs, collectors, recorder, strategy)
if err != nil {
logrus.Errorf("Rolling upgrade for '%s' failed with error = %v", config.ResourceName, err)
@@ -212,7 +233,7 @@ func rollingUpgrade(clients kube.Clients, config util.Config, upgradeFuncs callb
}
// PerformAction invokes the deployment if there is any change in configmap or secret data
func PerformAction(clients kube.Clients, config util.Config, upgradeFuncs callbacks.RollingUpgradeFuncs, collectors metrics.Collectors, recorder record.EventRecorder, strategy invokeStrategy) error {
func PerformAction(clients kube.Clients, config common.Config, upgradeFuncs callbacks.RollingUpgradeFuncs, collectors metrics.Collectors, recorder record.EventRecorder, strategy invokeStrategy) error {
items := upgradeFuncs.ItemsFunc(clients, config.Namespace)
for _, item := range items {
@@ -249,7 +270,7 @@ func retryOnConflict(backoff wait.Backoff, fn func(_ bool) error) error {
return err
}
func upgradeResource(clients kube.Clients, config util.Config, upgradeFuncs callbacks.RollingUpgradeFuncs, collectors metrics.Collectors, recorder record.EventRecorder, strategy invokeStrategy, resource runtime.Object, fetchResource bool) error {
func upgradeResource(clients kube.Clients, config common.Config, upgradeFuncs callbacks.RollingUpgradeFuncs, collectors metrics.Collectors, recorder record.EventRecorder, strategy invokeStrategy, resource runtime.Object, fetchResource bool) error {
accessor, err := meta.Accessor(resource)
if err != nil {
return err
@@ -334,7 +355,8 @@ func upgradeResource(clients kube.Clients, config util.Config, upgradeFuncs call
func getVolumeMountName(volumes []v1.Volume, mountType string, volumeName string) string {
for i := range volumes {
if mountType == constants.ConfigmapEnvVarPostfix {
switch mountType {
case constants.ConfigmapEnvVarPostfix:
if volumes[i].ConfigMap != nil && volumes[i].ConfigMap.Name == volumeName {
return volumes[i].Name
}
@@ -346,7 +368,7 @@ func getVolumeMountName(volumes []v1.Volume, mountType string, volumeName string
}
}
}
} else if mountType == constants.SecretEnvVarPostfix {
case constants.SecretEnvVarPostfix:
if volumes[i].Secret != nil && volumes[i].Secret.SecretName == volumeName {
return volumes[i].Name
}
@@ -383,9 +405,9 @@ func getContainerWithEnvReference(containers []v1.Container, resourceName string
for j := range envs {
envVarSource := envs[j].ValueFrom
if envVarSource != nil {
if resourceType == constants.SecretEnvVarPostfix && envVarSource.SecretKeyRef != nil && envVarSource.SecretKeyRef.LocalObjectReference.Name == resourceName {
if resourceType == constants.SecretEnvVarPostfix && envVarSource.SecretKeyRef != nil && envVarSource.SecretKeyRef.Name == resourceName {
return &containers[i]
} else if resourceType == constants.ConfigmapEnvVarPostfix && envVarSource.ConfigMapKeyRef != nil && envVarSource.ConfigMapKeyRef.LocalObjectReference.Name == resourceName {
} else if resourceType == constants.ConfigmapEnvVarPostfix && envVarSource.ConfigMapKeyRef != nil && envVarSource.ConfigMapKeyRef.Name == resourceName {
return &containers[i]
}
}
@@ -393,9 +415,9 @@ func getContainerWithEnvReference(containers []v1.Container, resourceName string
envsFrom := containers[i].EnvFrom
for j := range envsFrom {
if resourceType == constants.SecretEnvVarPostfix && envsFrom[j].SecretRef != nil && envsFrom[j].SecretRef.LocalObjectReference.Name == resourceName {
if resourceType == constants.SecretEnvVarPostfix && envsFrom[j].SecretRef != nil && envsFrom[j].SecretRef.Name == resourceName {
return &containers[i]
} else if resourceType == constants.ConfigmapEnvVarPostfix && envsFrom[j].ConfigMapRef != nil && envsFrom[j].ConfigMapRef.LocalObjectReference.Name == resourceName {
} else if resourceType == constants.ConfigmapEnvVarPostfix && envsFrom[j].ConfigMapRef != nil && envsFrom[j].ConfigMapRef.Name == resourceName {
return &containers[i]
}
}
@@ -403,7 +425,7 @@ func getContainerWithEnvReference(containers []v1.Container, resourceName string
return nil
}
func getContainerUsingResource(upgradeFuncs callbacks.RollingUpgradeFuncs, item runtime.Object, config util.Config, autoReload bool) *v1.Container {
func getContainerUsingResource(upgradeFuncs callbacks.RollingUpgradeFuncs, item runtime.Object, config common.Config, autoReload bool) *v1.Container {
volumes := upgradeFuncs.VolumesFunc(item)
containers := upgradeFuncs.ContainersFunc(item)
initContainers := upgradeFuncs.InitContainersFunc(item)
@@ -417,7 +439,11 @@ func getContainerUsingResource(upgradeFuncs callbacks.RollingUpgradeFuncs, item
container = getContainerWithVolumeMount(initContainers, volumeMountName)
if container != nil {
// if configmap/secret is being used in init container then return the first Pod container to save reloader env
return &containers[0]
if len(containers) > 0 {
return &containers[0]
}
// No containers available, return nil to avoid crash
return nil
}
} else if container != nil {
return container
@@ -430,13 +456,21 @@ func getContainerUsingResource(upgradeFuncs callbacks.RollingUpgradeFuncs, item
container = getContainerWithEnvReference(initContainers, config.ResourceName, config.Type)
if container != nil {
// if configmap/secret is being used in init container then return the first Pod container to save reloader env
return &containers[0]
if len(containers) > 0 {
return &containers[0]
}
// No containers available, return nil to avoid crash
return nil
}
}
// Get the first container if the annotation is related to specified configmap or secret i.e. configmap.reloader.stakater.com/reload
if container == nil && !autoReload {
return &containers[0]
if len(containers) > 0 {
return &containers[0]
}
// No containers available, return nil to avoid crash
return nil
}
return container
@@ -452,16 +486,16 @@ type InvokeStrategyResult struct {
Patch *Patch
}
type invokeStrategy func(upgradeFuncs callbacks.RollingUpgradeFuncs, item runtime.Object, config util.Config, autoReload bool) InvokeStrategyResult
type invokeStrategy func(upgradeFuncs callbacks.RollingUpgradeFuncs, item runtime.Object, config common.Config, autoReload bool) InvokeStrategyResult
func invokeReloadStrategy(upgradeFuncs callbacks.RollingUpgradeFuncs, item runtime.Object, config util.Config, autoReload bool) InvokeStrategyResult {
func invokeReloadStrategy(upgradeFuncs callbacks.RollingUpgradeFuncs, item runtime.Object, config common.Config, autoReload bool) InvokeStrategyResult {
if options.ReloadStrategy == constants.AnnotationsReloadStrategy {
return updatePodAnnotations(upgradeFuncs, item, config, autoReload)
}
return updateContainerEnvVars(upgradeFuncs, item, config, autoReload)
}
func updatePodAnnotations(upgradeFuncs callbacks.RollingUpgradeFuncs, item runtime.Object, config util.Config, autoReload bool) InvokeStrategyResult {
func updatePodAnnotations(upgradeFuncs callbacks.RollingUpgradeFuncs, item runtime.Object, config common.Config, autoReload bool) InvokeStrategyResult {
container := getContainerUsingResource(upgradeFuncs, item, config, autoReload)
if container == nil {
return InvokeStrategyResult{constants.NoContainerFound, nil}
@@ -469,7 +503,7 @@ func updatePodAnnotations(upgradeFuncs callbacks.RollingUpgradeFuncs, item runti
// Generate reloaded annotations. Attaching this to the item's annotation will trigger a rollout
// Note: the data on this struct is purely informational and is not used for future updates
reloadSource := util.NewReloadSourceFromConfig(config, []string{container.Name})
reloadSource := common.NewReloadSourceFromConfig(config, []string{container.Name})
annotations, patch, err := createReloadedAnnotations(&reloadSource, upgradeFuncs)
if err != nil {
logrus.Errorf("Failed to create reloaded annotations for %s! error = %v", config.ResourceName, err)
@@ -496,7 +530,7 @@ func getReloaderAnnotationKey() string {
)
}
func createReloadedAnnotations(target *util.ReloadSource, upgradeFuncs callbacks.RollingUpgradeFuncs) (map[string]string, []byte, error) {
func createReloadedAnnotations(target *common.ReloadSource, upgradeFuncs callbacks.RollingUpgradeFuncs) (map[string]string, []byte, error) {
if target == nil {
return nil, nil, errors.New("target is required")
}
@@ -531,7 +565,7 @@ func getEnvVarName(resourceName string, typeName string) string {
return constants.EnvVarPrefix + util.ConvertToEnvVarName(resourceName) + "_" + typeName
}
func updateContainerEnvVars(upgradeFuncs callbacks.RollingUpgradeFuncs, item runtime.Object, config util.Config, autoReload bool) InvokeStrategyResult {
func updateContainerEnvVars(upgradeFuncs callbacks.RollingUpgradeFuncs, item runtime.Object, config common.Config, autoReload bool) InvokeStrategyResult {
envVar := getEnvVarName(config.ResourceName, config.Type)
container := getContainerUsingResource(upgradeFuncs, item, config, autoReload)

View File

@@ -7,6 +7,7 @@ import (
"testing"
"time"
argorolloutv1alpha1 "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1"
"github.com/prometheus/client_golang/prometheus"
promtestutil "github.com/prometheus/client_golang/prometheus/testutil"
"github.com/sirupsen/logrus"
@@ -16,8 +17,10 @@ import (
"github.com/stakater/Reloader/internal/pkg/options"
"github.com/stakater/Reloader/internal/pkg/testutil"
"github.com/stakater/Reloader/internal/pkg/util"
"github.com/stakater/Reloader/pkg/common"
"github.com/stakater/Reloader/pkg/kube"
"github.com/stretchr/testify/assert"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@@ -672,7 +675,7 @@ func teardownArs() {
logrus.Errorf("Error while deleting statefulSet with secret as env var source %v", statefulSetError)
}
// Deleting Deployment with pasuse annotation
// Deleting Deployment with pause annotation
deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, arsNamespace, arsConfigmapWithPausedDeployment)
if deploymentError != nil {
logrus.Errorf("Error while deleting deployment with configmap %v", deploymentError)
@@ -708,7 +711,7 @@ func teardownArs() {
logrus.Errorf("Error while deleting the configmap %v", err)
}
// Deleting Configmap used projected volume in init containers
// Deleting secret used in projected volume in init containers
err = testutil.DeleteSecret(clients.KubernetesClient, arsNamespace, arsProjectedSecretWithInitContainer)
if err != nil {
logrus.Errorf("Error while deleting the secret %v", err)
@@ -1392,7 +1395,7 @@ func teardownErs() {
logrus.Errorf("Error while deleting the configmap %v", err)
}
// Deleting Configmap used projected volume in init containers
// Deleting secret used in projected volume in init containers
err = testutil.DeleteSecret(clients.KubernetesClient, ersNamespace, ersProjectedSecretWithInitContainer)
if err != nil {
logrus.Errorf("Error while deleting the secret %v", err)
@@ -1475,7 +1478,7 @@ func teardownErs() {
logrus.Errorf("Error while deleting the configmap used with configmap exclude annotation: %v", err)
}
// Deleting ConfigMap for testins pausing deployments
// Deleting ConfigMap for testing pausing deployments
err = testutil.DeleteConfigMap(clients.KubernetesClient, ersNamespace, ersConfigmapWithPausedDeployment)
if err != nil {
logrus.Errorf("Error while deleting the configmap: %v", err)
@@ -1486,13 +1489,13 @@ func teardownErs() {
}
func getConfigWithAnnotations(resourceType string, name string, shaData string, annotation string, typedAutoAnnotation string) util.Config {
func getConfigWithAnnotations(resourceType string, name string, shaData string, annotation string, typedAutoAnnotation string) common.Config {
ns := ersNamespace
if options.ReloadStrategy == constants.AnnotationsReloadStrategy {
ns = arsNamespace
}
return util.Config{
return common.Config{
Namespace: ns,
ResourceName: name,
SHAValue: shaData,
@@ -1509,7 +1512,7 @@ func getCollectors() metrics.Collectors {
var labelSucceeded = prometheus.Labels{"success": "true"}
var labelFailed = prometheus.Labels{"success": "false"}
func testRollingUpgradeInvokeDeleteStrategyArs(t *testing.T, clients kube.Clients, config util.Config, upgradeFuncs callbacks.RollingUpgradeFuncs, collectors metrics.Collectors, envVarPostfix string) {
func testRollingUpgradeInvokeDeleteStrategyArs(t *testing.T, clients kube.Clients, config common.Config, upgradeFuncs callbacks.RollingUpgradeFuncs, collectors metrics.Collectors, envVarPostfix string) {
err := PerformAction(clients, config, upgradeFuncs, collectors, nil, invokeDeleteStrategy)
time.Sleep(5 * time.Second)
if err != nil {
@@ -1527,7 +1530,7 @@ func testRollingUpgradeInvokeDeleteStrategyArs(t *testing.T, clients kube.Client
}
}
func testRollingUpgradeWithPatchAndInvokeDeleteStrategyArs(t *testing.T, clients kube.Clients, config util.Config, upgradeFuncs callbacks.RollingUpgradeFuncs, collectors metrics.Collectors, envVarPostfix string) {
func testRollingUpgradeWithPatchAndInvokeDeleteStrategyArs(t *testing.T, clients kube.Clients, config common.Config, upgradeFuncs callbacks.RollingUpgradeFuncs, collectors metrics.Collectors, envVarPostfix string) {
err := PerformAction(clients, config, upgradeFuncs, collectors, nil, invokeDeleteStrategy)
upgradeFuncs.PatchFunc = func(client kube.Clients, namespace string, resource runtime.Object, patchType patchtypes.PatchType, bytes []byte) error {
assert.Equal(t, patchtypes.StrategicMergePatchType, patchType)
@@ -2247,7 +2250,7 @@ func TestRollingUpgradeForDeploymentWithSecretExcludeAnnotationUsingArs(t *testi
logrus.Infof("Verifying deployment did not update")
updated := testutil.VerifyResourceAnnotationUpdate(clients, config, deploymentFuncs)
if updated {
t.Errorf("Deployment which had to be exluded was updated")
t.Errorf("Deployment which had to be excluded was updated")
}
}
@@ -2907,7 +2910,7 @@ func TestIgnoreAnnotationNoReloadUsingErs(t *testing.T) {
}
}
func testRollingUpgradeInvokeDeleteStrategyErs(t *testing.T, clients kube.Clients, config util.Config, upgradeFuncs callbacks.RollingUpgradeFuncs, collectors metrics.Collectors, envVarPostfix string) {
func testRollingUpgradeInvokeDeleteStrategyErs(t *testing.T, clients kube.Clients, config common.Config, upgradeFuncs callbacks.RollingUpgradeFuncs, collectors metrics.Collectors, envVarPostfix string) {
err := PerformAction(clients, config, upgradeFuncs, collectors, nil, invokeDeleteStrategy)
time.Sleep(5 * time.Second)
if err != nil {
@@ -2924,7 +2927,7 @@ func testRollingUpgradeInvokeDeleteStrategyErs(t *testing.T, clients kube.Client
}
}
func testRollingUpgradeWithPatchAndInvokeDeleteStrategyErs(t *testing.T, clients kube.Clients, config util.Config, upgradeFuncs callbacks.RollingUpgradeFuncs, collectors metrics.Collectors, envVarPostfix string) {
func testRollingUpgradeWithPatchAndInvokeDeleteStrategyErs(t *testing.T, clients kube.Clients, config common.Config, upgradeFuncs callbacks.RollingUpgradeFuncs, collectors metrics.Collectors, envVarPostfix string) {
assert.NotEmpty(t, upgradeFuncs.PatchTemplatesFunc().DeleteEnvVarTemplate)
err := PerformAction(clients, config, upgradeFuncs, collectors, nil, invokeDeleteStrategy)
@@ -4214,3 +4217,72 @@ func waitForDeploymentPausedAtAnnotation(clients kube.Clients, deploymentFuncs c
return fmt.Errorf("timeout waiting for deployment %s to have pause-period annotation", deploymentName)
}
// MockArgoRolloutWithEmptyContainers creates a mock Argo Rollout with no containers
// This simulates the scenario where Argo Rollouts with workloadRef return empty containers
func MockArgoRolloutWithEmptyContainers(namespace, name string) *runtime.Object {
rollout := &argorolloutv1alpha1.Rollout{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: namespace,
},
Spec: argorolloutv1alpha1.RolloutSpec{
Template: v1.PodTemplateSpec{
Spec: v1.PodSpec{
Containers: []v1.Container{}, // Empty containers slice
InitContainers: []v1.Container{}, // Empty init containers slice
Volumes: []v1.Volume{}, // Empty volumes slice
},
},
},
}
var obj runtime.Object = rollout
return &obj
}
// TestGetContainerUsingResourceWithArgoRolloutEmptyContainers tests with real Argo Rollout functions
func TestGetContainerUsingResourceWithArgoRolloutEmptyContainers(t *testing.T) {
namespace := "test-namespace"
resourceName := "test-configmap"
// Use real Argo Rollout functions but mock the containers function
rolloutFuncs := GetArgoRolloutRollingUpgradeFuncs()
originalContainersFunc := rolloutFuncs.ContainersFunc
originalInitContainersFunc := rolloutFuncs.InitContainersFunc
// Override to return empty containers (simulating workloadRef scenario)
rolloutFuncs.ContainersFunc = func(item runtime.Object) []v1.Container {
return []v1.Container{} // Empty like workloadRef rollouts
}
rolloutFuncs.InitContainersFunc = func(item runtime.Object) []v1.Container {
return []v1.Container{} // Empty like workloadRef rollouts
}
// Restore original functions after test
defer func() {
rolloutFuncs.ContainersFunc = originalContainersFunc
rolloutFuncs.InitContainersFunc = originalInitContainersFunc
}()
// Use proper Argo Rollout object instead of Pod
mockRollout := MockArgoRolloutWithEmptyContainers(namespace, "test-rollout")
config := common.Config{
Namespace: namespace,
ResourceName: resourceName,
Type: constants.ConfigmapEnvVarPostfix,
SHAValue: "test-sha",
}
// Test both autoReload scenarios using subtests as suggested by Felix
for _, autoReload := range []bool{true, false} {
t.Run(fmt.Sprintf("autoReload_%t", autoReload), func(t *testing.T) {
// This tests the actual fix in the context of Argo Rollouts
result := getContainerUsingResource(rolloutFuncs, *mockRollout, config, autoReload)
if result != nil {
t.Errorf("Expected nil when using real Argo Rollout functions with empty containers (workloadRef scenario), got %v", result)
}
})
}
}

View File

@@ -16,7 +16,7 @@ import (
"github.com/stakater/Reloader/internal/pkg/metrics"
"github.com/stakater/Reloader/internal/pkg/options"
"github.com/stakater/Reloader/internal/pkg/testutil"
"github.com/stakater/Reloader/internal/pkg/util"
"github.com/stakater/Reloader/pkg/common"
"github.com/stakater/Reloader/pkg/kube"
)
@@ -159,7 +159,7 @@ func TestRunLeaderElectionWithControllers(t *testing.T) {
// Verifying deployment update
logrus.Infof("Verifying pod envvars has been created")
shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, testutil.Namespace, configmapName, "www.stakater.com")
config := util.Config{
config := common.Config{
Namespace: testutil.Namespace,
ResourceName: configmapName,
SHAValue: shaData,
@@ -186,7 +186,7 @@ func TestRunLeaderElectionWithControllers(t *testing.T) {
// Verifying that the deployment was not updated as leadership has been lost
logrus.Infof("Verifying pod envvars has not been updated")
shaData = testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, testutil.Namespace, configmapName, "www.stakater.com/new")
config = util.Config{
config = common.Config{
Namespace: testutil.Namespace,
ResourceName: configmapName,
SHAValue: shaData,

View File

@@ -65,6 +65,8 @@ var (
WebhookUrl = ""
// ResourcesToIgnore is a list of resources to ignore when watching for changes
ResourcesToIgnore = []string{}
// WorkloadTypesToIgnore is a list of workload types to ignore when watching for changes
WorkloadTypesToIgnore = []string{}
// NamespacesToIgnore is a list of namespace names to ignore when watching for changes
NamespacesToIgnore = []string{}
// NamespaceSelectors is a list of namespace selectors to watch for changes

View File

@@ -21,6 +21,7 @@ import (
"github.com/stakater/Reloader/internal/pkg/metrics"
"github.com/stakater/Reloader/internal/pkg/options"
"github.com/stakater/Reloader/internal/pkg/util"
"github.com/stakater/Reloader/pkg/common"
"github.com/stakater/Reloader/pkg/kube"
appsv1 "k8s.io/api/apps/v1"
batchv1 "k8s.io/api/batch/v1"
@@ -93,7 +94,7 @@ func getAnnotations(name string, autoReload bool, secretAutoReload bool, configm
annotations[options.ConfigmapReloaderAutoAnnotation] = "true"
}
if !(len(annotations) > 0) {
if len(annotations) == 0 {
annotations = map[string]string{
options.ConfigmapUpdateOnChangeAnnotation: name,
options.SecretUpdateOnChangeAnnotation: name}
@@ -478,18 +479,19 @@ func GetDeploymentWithPodAnnotations(namespace string, deploymentName string, bo
},
}
if !both {
deployment.ObjectMeta.Annotations = nil
deployment.Annotations = nil
}
deployment.Spec.Template.ObjectMeta.Annotations = getAnnotations(deploymentName, true, false, false, map[string]string{})
deployment.Spec.Template.Annotations = getAnnotations(deploymentName, true, false, false, map[string]string{})
return deployment
}
func GetDeploymentWithTypedAutoAnnotation(namespace string, deploymentName string, resourceType string) *appsv1.Deployment {
replicaset := int32(1)
var objectMeta metav1.ObjectMeta
if resourceType == SecretResourceType {
switch resourceType {
case SecretResourceType:
objectMeta = getObjectMeta(namespace, deploymentName, false, true, false, map[string]string{})
} else if resourceType == ConfigmapResourceType {
case ConfigmapResourceType:
objectMeta = getObjectMeta(namespace, deploymentName, false, false, true, map[string]string{})
}
@@ -513,9 +515,10 @@ func GetDeploymentWithExcludeAnnotation(namespace string, deploymentName string,
annotation := map[string]string{}
if resourceType == SecretResourceType {
switch resourceType {
case SecretResourceType:
annotation[options.SecretExcludeReloaderAnnotation] = deploymentName
} else if resourceType == ConfigmapResourceType {
case ConfigmapResourceType:
annotation[options.ConfigmapExcludeReloaderAnnotation] = deploymentName
}
@@ -733,7 +736,7 @@ func GetResourceSHAFromAnnotation(podAnnotations map[string]string) string {
return ""
}
var last util.ReloadSource
var last common.ReloadSource
bytes := []byte(annotationJson)
err := json.Unmarshal(bytes, &last)
if err != nil {
@@ -746,12 +749,13 @@ func GetResourceSHAFromAnnotation(podAnnotations map[string]string) string {
// ConvertResourceToSHA generates SHA from secret or configmap data
func ConvertResourceToSHA(resourceType string, namespace string, resourceName string, data string) string {
values := []string{}
if resourceType == SecretResourceType {
switch resourceType {
case SecretResourceType:
secret := GetSecret(namespace, resourceName, data)
for k, v := range secret.Data {
values = append(values, k+"="+string(v[:]))
}
} else if resourceType == ConfigmapResourceType {
case ConfigmapResourceType:
configmap := GetConfigmap(namespace, resourceName, data)
for k, v := range configmap.Data {
values = append(values, k+"="+v)
@@ -1058,7 +1062,7 @@ func RandSeq(n int) string {
}
// VerifyResourceEnvVarUpdate verifies whether the rolling upgrade happened or not
func VerifyResourceEnvVarUpdate(clients kube.Clients, config util.Config, envVarPostfix string, upgradeFuncs callbacks.RollingUpgradeFuncs) bool {
func VerifyResourceEnvVarUpdate(clients kube.Clients, config common.Config, envVarPostfix string, upgradeFuncs callbacks.RollingUpgradeFuncs) bool {
items := upgradeFuncs.ItemsFunc(clients, config.Namespace)
for _, i := range items {
containers := upgradeFuncs.ContainersFunc(i)
@@ -1104,7 +1108,7 @@ func VerifyResourceEnvVarUpdate(clients kube.Clients, config util.Config, envVar
}
// VerifyResourceEnvVarRemoved verifies whether the rolling upgrade happened or not and all Envvars SKAKATER_name_CONFIGMAP/SECRET are removed
func VerifyResourceEnvVarRemoved(clients kube.Clients, config util.Config, envVarPostfix string, upgradeFuncs callbacks.RollingUpgradeFuncs) bool {
func VerifyResourceEnvVarRemoved(clients kube.Clients, config common.Config, envVarPostfix string, upgradeFuncs callbacks.RollingUpgradeFuncs) bool {
items := upgradeFuncs.ItemsFunc(clients, config.Namespace)
for _, i := range items {
containers := upgradeFuncs.ContainersFunc(i)
@@ -1153,7 +1157,7 @@ func VerifyResourceEnvVarRemoved(clients kube.Clients, config util.Config, envVa
}
// VerifyResourceAnnotationUpdate verifies whether the rolling upgrade happened or not
func VerifyResourceAnnotationUpdate(clients kube.Clients, config util.Config, upgradeFuncs callbacks.RollingUpgradeFuncs) bool {
func VerifyResourceAnnotationUpdate(clients kube.Clients, config common.Config, upgradeFuncs callbacks.RollingUpgradeFuncs) bool {
items := upgradeFuncs.ItemsFunc(clients, config.Namespace)
for _, i := range items {
podAnnotations := upgradeFuncs.PodAnnotationsFunc(i)

View File

@@ -8,13 +8,11 @@ import (
"sort"
"strings"
"github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"github.com/stakater/Reloader/internal/pkg/constants"
"github.com/stakater/Reloader/internal/pkg/crypto"
"github.com/stakater/Reloader/internal/pkg/options"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/labels"
)
// ConvertToEnvVarName converts the given text into a usable env var
@@ -85,6 +83,7 @@ func ConfigureReloaderFlags(cmd *cobra.Command) {
cmd.PersistentFlags().StringVar(&options.LogLevel, "log-level", "info", "Log level to use (trace, debug, info, warning, error, fatal and panic)")
cmd.PersistentFlags().StringVar(&options.WebhookUrl, "webhook-url", "", "webhook to trigger instead of performing a reload")
cmd.PersistentFlags().StringSliceVar(&options.ResourcesToIgnore, "resources-to-ignore", options.ResourcesToIgnore, "list of resources to ignore (valid options 'configMaps' or 'secrets')")
cmd.PersistentFlags().StringSliceVar(&options.WorkloadTypesToIgnore, "ignored-workload-types", options.WorkloadTypesToIgnore, "list of workload types to ignore (valid options: 'jobs', 'cronjobs', or both)")
cmd.PersistentFlags().StringSliceVar(&options.NamespacesToIgnore, "namespaces-to-ignore", options.NamespacesToIgnore, "list of namespaces to ignore")
cmd.PersistentFlags().StringSliceVar(&options.NamespaceSelectors, "namespace-selector", options.NamespaceSelectors, "list of key:value labels to filter on for namespaces")
cmd.PersistentFlags().StringSliceVar(&options.ResourceSelectors, "resource-label-selector", options.ResourceSelectors, "list of key:value labels to filter on for configmaps and secrets")
@@ -98,68 +97,6 @@ func ConfigureReloaderFlags(cmd *cobra.Command) {
cmd.PersistentFlags().StringVar(&options.PProfAddr, "pprof-addr", ":6060", "Address to start pprof server on. Default is :6060")
}
func GetNamespaceLabelSelector() (string, error) {
slice := options.NamespaceSelectors
for i, kv := range slice {
// Legacy support for ":" as a delimiter and "*" for wildcard.
if strings.Contains(kv, ":") {
split := strings.Split(kv, ":")
if split[1] == "*" {
slice[i] = split[0]
} else {
slice[i] = split[0] + "=" + split[1]
}
}
// Convert wildcard to valid apimachinery operator
if strings.Contains(kv, "=") {
split := strings.Split(kv, "=")
if split[1] == "*" {
slice[i] = split[0]
}
}
}
namespaceLabelSelector := strings.Join(slice[:], ",")
_, err := labels.Parse(namespaceLabelSelector)
if err != nil {
logrus.Fatal(err)
}
return namespaceLabelSelector, nil
}
func GetResourceLabelSelector() (string, error) {
slice := options.ResourceSelectors
for i, kv := range slice {
// Legacy support for ":" as a delimiter and "*" for wildcard.
if strings.Contains(kv, ":") {
split := strings.Split(kv, ":")
if split[1] == "*" {
slice[i] = split[0]
} else {
slice[i] = split[0] + "=" + split[1]
}
}
// Convert wildcard to valid apimachinery operator
if strings.Contains(kv, "=") {
split := strings.Split(kv, "=")
if split[1] == "*" {
slice[i] = split[0]
}
}
}
resourceLabelSelector := strings.Join(slice[:], ",")
_, err := labels.Parse(resourceLabelSelector)
if err != nil {
logrus.Fatal(err)
}
return resourceLabelSelector, nil
}
func GetIgnoredResourcesList() (List, error) {
ignoredResourcesList := options.ResourcesToIgnore // getStringSliceFromFlags(cmd, "resources-to-ignore")
@@ -176,3 +113,16 @@ func GetIgnoredResourcesList() (List, error) {
return ignoredResourcesList, nil
}
func GetIgnoredWorkloadTypesList() (List, error) {
ignoredWorkloadTypesList := options.WorkloadTypesToIgnore
for _, v := range ignoredWorkloadTypesList {
if v != "jobs" && v != "cronjobs" {
return nil, fmt.Errorf("'ignored-workload-types' accepts 'jobs', 'cronjobs', or both, not '%s'", v)
}
}
return ignoredWorkloadTypesList, nil
}

View File

@@ -3,6 +3,7 @@ package util
import (
"testing"
"github.com/stakater/Reloader/internal/pkg/options"
v1 "k8s.io/api/core/v1"
)
@@ -45,3 +46,141 @@ func TestGetHashFromConfigMap(t *testing.T) {
}
}
}
func TestGetIgnoredWorkloadTypesList(t *testing.T) {
// Save original state
originalWorkloadTypes := options.WorkloadTypesToIgnore
defer func() {
options.WorkloadTypesToIgnore = originalWorkloadTypes
}()
tests := []struct {
name string
workloadTypes []string
expectError bool
expected []string
}{
{
name: "Both jobs and cronjobs",
workloadTypes: []string{"jobs", "cronjobs"},
expectError: false,
expected: []string{"jobs", "cronjobs"},
},
{
name: "Only jobs",
workloadTypes: []string{"jobs"},
expectError: false,
expected: []string{"jobs"},
},
{
name: "Only cronjobs",
workloadTypes: []string{"cronjobs"},
expectError: false,
expected: []string{"cronjobs"},
},
{
name: "Empty list",
workloadTypes: []string{},
expectError: false,
expected: []string{},
},
{
name: "Invalid workload type",
workloadTypes: []string{"invalid"},
expectError: true,
expected: nil,
},
{
name: "Mixed valid and invalid",
workloadTypes: []string{"jobs", "invalid"},
expectError: true,
expected: nil,
},
{
name: "Duplicate values",
workloadTypes: []string{"jobs", "jobs"},
expectError: false,
expected: []string{"jobs", "jobs"},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
// Set the global option
options.WorkloadTypesToIgnore = tt.workloadTypes
result, err := GetIgnoredWorkloadTypesList()
if tt.expectError && err == nil {
t.Errorf("Expected error but got none")
}
if !tt.expectError && err != nil {
t.Errorf("Expected no error but got: %v", err)
}
if !tt.expectError {
if len(result) != len(tt.expected) {
t.Errorf("Expected %v, got %v", tt.expected, result)
return
}
for i, expected := range tt.expected {
if i >= len(result) || result[i] != expected {
t.Errorf("Expected %v, got %v", tt.expected, result)
break
}
}
}
})
}
}
func TestListContains(t *testing.T) {
tests := []struct {
name string
list List
item string
expected bool
}{
{
name: "List contains item",
list: List{"jobs", "cronjobs"},
item: "jobs",
expected: true,
},
{
name: "List does not contain item",
list: List{"jobs"},
item: "cronjobs",
expected: false,
},
{
name: "Empty list",
list: List{},
item: "jobs",
expected: false,
},
{
name: "Case sensitive matching",
list: List{"jobs", "cronjobs"},
item: "Jobs",
expected: false,
},
{
name: "Multiple occurrences",
list: List{"jobs", "jobs", "cronjobs"},
item: "jobs",
expected: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
result := tt.list.Contains(tt.item)
if result != tt.expected {
t.Errorf("Expected %v, got %v", tt.expected, result)
}
})
}
}

View File

@@ -12,6 +12,7 @@ import (
"github.com/stakater/Reloader/internal/pkg/options"
"github.com/stakater/Reloader/internal/pkg/util"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/client-go/kubernetes"
)
@@ -74,6 +75,8 @@ type ReloaderOptions struct {
WebhookUrl string `json:"webhookUrl"`
// ResourcesToIgnore is a list of resource types to ignore (e.g., "configmaps" or "secrets")
ResourcesToIgnore []string `json:"resourcesToIgnore"`
// WorkloadTypesToIgnore is a list of workload types to ignore (e.g., "jobs" or "cronjobs")
WorkloadTypesToIgnore []string `json:"workloadTypesToIgnore"`
// NamespaceSelectors is a list of label selectors to filter namespaces to watch
NamespaceSelectors []string `json:"namespaceSelectors"`
// ResourceSelectors is a list of label selectors to filter ConfigMaps and Secrets to watch
@@ -121,11 +124,90 @@ func PublishMetaInfoConfigmap(clientset kubernetes.Interface) {
}
}
func ShouldReload(config util.Config, resourceType string, annotations Map, podAnnotations Map, options *ReloaderOptions) ReloadCheckResult {
func GetNamespaceLabelSelector(slice []string) (string, error) {
for i, kv := range slice {
// Legacy support for ":" as a delimiter and "*" for wildcard.
if strings.Contains(kv, ":") {
split := strings.Split(kv, ":")
if split[1] == "*" {
slice[i] = split[0]
} else {
slice[i] = split[0] + "=" + split[1]
}
}
// Convert wildcard to valid apimachinery operator
if strings.Contains(kv, "=") {
split := strings.Split(kv, "=")
if split[1] == "*" {
slice[i] = split[0]
}
}
}
if resourceType == "Rollout" && !options.IsArgoRollouts {
return ReloadCheckResult{
ShouldReload: false,
namespaceLabelSelector := strings.Join(slice[:], ",")
_, err := labels.Parse(namespaceLabelSelector)
if err != nil {
logrus.Fatal(err)
}
return namespaceLabelSelector, nil
}
func GetResourceLabelSelector(slice []string) (string, error) {
for i, kv := range slice {
// Legacy support for ":" as a delimiter and "*" for wildcard.
if strings.Contains(kv, ":") {
split := strings.Split(kv, ":")
if split[1] == "*" {
slice[i] = split[0]
} else {
slice[i] = split[0] + "=" + split[1]
}
}
// Convert wildcard to valid apimachinery operator
if strings.Contains(kv, "=") {
split := strings.Split(kv, "=")
if split[1] == "*" {
slice[i] = split[0]
}
}
}
resourceLabelSelector := strings.Join(slice[:], ",")
_, err := labels.Parse(resourceLabelSelector)
if err != nil {
logrus.Fatal(err)
}
return resourceLabelSelector, nil
}
// ShouldReload checks if a resource should be reloaded based on its annotations and the provided options.
func ShouldReload(config Config, resourceType string, annotations Map, podAnnotations Map, options *ReloaderOptions) ReloadCheckResult {
// Check if this workload type should be ignored
if len(options.WorkloadTypesToIgnore) > 0 {
ignoredWorkloadTypes, err := util.GetIgnoredWorkloadTypesList()
if err != nil {
logrus.Errorf("Failed to parse ignored workload types: %v", err)
} else {
// Map Kubernetes resource types to CLI-friendly names for comparison
var resourceToCheck string
switch resourceType {
case "Job":
resourceToCheck = "jobs"
case "CronJob":
resourceToCheck = "cronjobs"
default:
resourceToCheck = resourceType // For other types, use as-is
}
// Check if current resource type should be ignored
if ignoredWorkloadTypes.Contains(resourceToCheck) {
return ReloadCheckResult{
ShouldReload: false,
}
}
}
}
@@ -251,6 +333,7 @@ func GetCommandLineOptions() *ReloaderOptions {
CommandLineOptions.EnableHA = options.EnableHA
CommandLineOptions.WebhookUrl = options.WebhookUrl
CommandLineOptions.ResourcesToIgnore = options.ResourcesToIgnore
CommandLineOptions.WorkloadTypesToIgnore = options.WorkloadTypesToIgnore
CommandLineOptions.NamespaceSelectors = options.NamespaceSelectors
CommandLineOptions.ResourceSelectors = options.ResourceSelectors
CommandLineOptions.NamespacesToIgnore = options.NamespacesToIgnore
@@ -258,6 +341,7 @@ func GetCommandLineOptions() *ReloaderOptions {
CommandLineOptions.ReloadOnCreate = parseBool(options.ReloadOnCreate)
CommandLineOptions.ReloadOnDelete = parseBool(options.ReloadOnDelete)
CommandLineOptions.EnablePProf = options.EnablePProf
CommandLineOptions.PProfAddr = options.PProfAddr
return CommandLineOptions
}

224
pkg/common/common_test.go Normal file
View File

@@ -0,0 +1,224 @@
package common
import (
"testing"
"github.com/stakater/Reloader/internal/pkg/options"
)
func TestShouldReload_IgnoredWorkloadTypes(t *testing.T) {
// Save original state
originalWorkloadTypes := options.WorkloadTypesToIgnore
defer func() {
options.WorkloadTypesToIgnore = originalWorkloadTypes
}()
tests := []struct {
name string
ignoredWorkloadTypes []string
resourceType string
shouldReload bool
description string
}{
{
name: "Jobs ignored - Job should not reload",
ignoredWorkloadTypes: []string{"jobs"},
resourceType: "Job",
shouldReload: false,
description: "When jobs are ignored, Job resources should not be reloaded",
},
{
name: "Jobs ignored - CronJob should reload",
ignoredWorkloadTypes: []string{"jobs"},
resourceType: "CronJob",
shouldReload: true,
description: "When jobs are ignored, CronJob resources should still be processed",
},
{
name: "CronJobs ignored - CronJob should not reload",
ignoredWorkloadTypes: []string{"cronjobs"},
resourceType: "CronJob",
shouldReload: false,
description: "When cronjobs are ignored, CronJob resources should not be reloaded",
},
{
name: "CronJobs ignored - Job should reload",
ignoredWorkloadTypes: []string{"cronjobs"},
resourceType: "Job",
shouldReload: true,
description: "When cronjobs are ignored, Job resources should still be processed",
},
{
name: "Both ignored - Job should not reload",
ignoredWorkloadTypes: []string{"jobs", "cronjobs"},
resourceType: "Job",
shouldReload: false,
description: "When both are ignored, Job resources should not be reloaded",
},
{
name: "Both ignored - CronJob should not reload",
ignoredWorkloadTypes: []string{"jobs", "cronjobs"},
resourceType: "CronJob",
shouldReload: false,
description: "When both are ignored, CronJob resources should not be reloaded",
},
{
name: "Both ignored - Deployment should reload",
ignoredWorkloadTypes: []string{"jobs", "cronjobs"},
resourceType: "Deployment",
shouldReload: true,
description: "When both are ignored, other workload types should still be processed",
},
{
name: "None ignored - Job should reload",
ignoredWorkloadTypes: []string{},
resourceType: "Job",
shouldReload: true,
description: "When nothing is ignored, all workload types should be processed",
},
{
name: "None ignored - CronJob should reload",
ignoredWorkloadTypes: []string{},
resourceType: "CronJob",
shouldReload: true,
description: "When nothing is ignored, all workload types should be processed",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
// Set the ignored workload types
options.WorkloadTypesToIgnore = tt.ignoredWorkloadTypes
// Create minimal test config and options
config := Config{
ResourceName: "test-resource",
Annotation: "configmap.reloader.stakater.com/reload",
}
annotations := Map{
"configmap.reloader.stakater.com/reload": "test-config",
}
// Create ReloaderOptions with the ignored workload types
opts := &ReloaderOptions{
WorkloadTypesToIgnore: tt.ignoredWorkloadTypes,
AutoReloadAll: true, // Enable auto-reload to simplify test
ReloaderAutoAnnotation: "reloader.stakater.com/auto",
}
// Call ShouldReload
result := ShouldReload(config, tt.resourceType, annotations, Map{}, opts)
// Check the result
if result.ShouldReload != tt.shouldReload {
t.Errorf("For resource type %s with ignored types %v, expected ShouldReload=%v, got=%v",
tt.resourceType, tt.ignoredWorkloadTypes, tt.shouldReload, result.ShouldReload)
}
t.Logf("✓ %s", tt.description)
})
}
}
func TestShouldReload_IgnoredWorkloadTypes_ValidationError(t *testing.T) {
// Save original state
originalWorkloadTypes := options.WorkloadTypesToIgnore
defer func() {
options.WorkloadTypesToIgnore = originalWorkloadTypes
}()
// Test with invalid workload type - should still continue processing
options.WorkloadTypesToIgnore = []string{"invalid"}
config := Config{
ResourceName: "test-resource",
Annotation: "configmap.reloader.stakater.com/reload",
}
annotations := Map{
"configmap.reloader.stakater.com/reload": "test-config",
}
opts := &ReloaderOptions{
WorkloadTypesToIgnore: []string{"invalid"},
AutoReloadAll: true, // Enable auto-reload to simplify test
ReloaderAutoAnnotation: "reloader.stakater.com/auto",
}
// Should not panic and should continue with normal processing
result := ShouldReload(config, "Job", annotations, Map{}, opts)
// Since validation failed, it should continue with normal processing (should reload)
if !result.ShouldReload {
t.Errorf("Expected ShouldReload=true when validation fails, got=%v", result.ShouldReload)
}
}
// Test that validates the fix for issue #996
func TestShouldReload_IssueRBACPermissionFixed(t *testing.T) {
// Save original state
originalWorkloadTypes := options.WorkloadTypesToIgnore
defer func() {
options.WorkloadTypesToIgnore = originalWorkloadTypes
}()
tests := []struct {
name string
ignoredWorkloadTypes []string
resourceType string
description string
}{
{
name: "Issue #996 - ignoreJobs prevents Job processing",
ignoredWorkloadTypes: []string{"jobs"},
resourceType: "Job",
description: "Job resources are skipped entirely, preventing RBAC permission errors",
},
{
name: "Issue #996 - ignoreCronJobs prevents CronJob processing",
ignoredWorkloadTypes: []string{"cronjobs"},
resourceType: "CronJob",
description: "CronJob resources are skipped entirely, preventing RBAC permission errors",
},
{
name: "Issue #996 - both ignored prevent both types",
ignoredWorkloadTypes: []string{"jobs", "cronjobs"},
resourceType: "Job",
description: "Job resources are skipped entirely when both types are ignored",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
// Set the ignored workload types
options.WorkloadTypesToIgnore = tt.ignoredWorkloadTypes
config := Config{
ResourceName: "test-resource",
Annotation: "configmap.reloader.stakater.com/reload",
}
annotations := Map{
"configmap.reloader.stakater.com/reload": "test-config",
}
opts := &ReloaderOptions{
WorkloadTypesToIgnore: tt.ignoredWorkloadTypes,
AutoReloadAll: true, // Enable auto-reload to simplify test
ReloaderAutoAnnotation: "reloader.stakater.com/auto",
}
// Call ShouldReload
result := ShouldReload(config, tt.resourceType, annotations, Map{}, opts)
// Should not reload when workload type is ignored
if result.ShouldReload {
t.Errorf("Expected ShouldReload=false for ignored workload type %s, got=%v",
tt.resourceType, result.ShouldReload)
}
t.Logf("✓ %s", tt.description)
})
}
}

View File

@@ -1,8 +1,9 @@
package util
package common
import (
"github.com/stakater/Reloader/internal/pkg/constants"
"github.com/stakater/Reloader/internal/pkg/options"
"github.com/stakater/Reloader/internal/pkg/util"
v1 "k8s.io/api/core/v1"
)
@@ -15,6 +16,7 @@ type Config struct {
TypedAutoAnnotation string
SHAValue string
Type string
Labels map[string]string
}
// GetConfigmapConfig provides utility config for configmap
@@ -25,8 +27,9 @@ func GetConfigmapConfig(configmap *v1.ConfigMap) Config {
ResourceAnnotations: configmap.Annotations,
Annotation: options.ConfigmapUpdateOnChangeAnnotation,
TypedAutoAnnotation: options.ConfigmapReloaderAutoAnnotation,
SHAValue: GetSHAfromConfigmap(configmap),
SHAValue: util.GetSHAfromConfigmap(configmap),
Type: constants.ConfigmapEnvVarPostfix,
Labels: configmap.Labels,
}
}
@@ -38,7 +41,8 @@ func GetSecretConfig(secret *v1.Secret) Config {
ResourceAnnotations: secret.Annotations,
Annotation: options.SecretUpdateOnChangeAnnotation,
TypedAutoAnnotation: options.SecretReloaderAutoAnnotation,
SHAValue: GetSHAfromSecret(secret.Data),
SHAValue: util.GetSHAfromSecret(secret.Data),
Type: constants.SecretEnvVarPostfix,
Labels: secret.Labels,
}
}

View File

@@ -1,4 +1,4 @@
package util
package common
import "time"

View File

@@ -7,7 +7,7 @@ import (
// ResourceMap are resources from where changes are going to be detected
var ResourceMap = map[string]runtime.Object{
"configMaps": &v1.ConfigMap{},
"configmaps": &v1.ConfigMap{},
"secrets": &v1.Secret{},
"namespaces": &v1.Namespace{},
"namespaces": &v1.Namespace{},
}