diff --git a/.github/md_config.json b/.github/md_config.json
index 465a309..37715e3 100644
--- a/.github/md_config.json
+++ b/.github/md_config.json
@@ -3,5 +3,6 @@
{
"pattern": "^(?!http).+"
}
- ]
+ ],
+ "retryOn429": true
}
diff --git a/.github/workflows/init-branch-release.yaml b/.github/workflows/init-branch-release.yaml
index 07416c6..01c54dc 100644
--- a/.github/workflows/init-branch-release.yaml
+++ b/.github/workflows/init-branch-release.yaml
@@ -23,7 +23,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout code
- uses: actions/checkout@v4.2.2
+ uses: actions/checkout@v5.0.0
with:
fetch-depth: 0
token: ${{ secrets.GITHUB_TOKEN }}
@@ -57,7 +57,7 @@ jobs:
git diff
- name: Create pull request
- uses: peter-evans/create-pull-request@v7.0.6
+ uses: peter-evans/create-pull-request@v7.0.8
with:
commit-message: "Bump version to ${{ inputs.TARGET_VERSION }}"
title: "Bump version to ${{ inputs.TARGET_VERSION }} on ${{ inputs.TARGET_BRANCH }} branch"
diff --git a/.github/workflows/pull_request-helm.yaml b/.github/workflows/pull_request-helm.yaml
index b3f80eb..0edafae 100644
--- a/.github/workflows/pull_request-helm.yaml
+++ b/.github/workflows/pull_request-helm.yaml
@@ -26,7 +26,7 @@ jobs:
steps:
- name: Check out code
- uses: actions/checkout@v4
+ uses: actions/checkout@v5
with:
ref: ${{github.event.pull_request.head.sha}}
fetch-depth: 0
@@ -55,7 +55,7 @@ jobs:
steps:
- name: Check out code
- uses: actions/checkout@v4
+ uses: actions/checkout@v5
with:
ref: ${{github.event.pull_request.head.sha}}
fetch-depth: 0
diff --git a/.github/workflows/pull_request.yaml b/.github/workflows/pull_request.yaml
index e661eff..e4b1c6f 100644
--- a/.github/workflows/pull_request.yaml
+++ b/.github/workflows/pull_request.yaml
@@ -25,7 +25,7 @@ env:
jobs:
qa:
- uses: stakater/.github/.github/workflows/pull_request_doc_qa.yaml@v0.0.117
+ uses: stakater/.github/.github/workflows/pull_request_doc_qa.yaml@v0.0.163
with:
MD_CONFIG: .github/md_config.json
DOC_SRC: README.md
@@ -40,7 +40,7 @@ jobs:
name: Build
steps:
- name: Check out code
- uses: actions/checkout@v4
+ uses: actions/checkout@v5
with:
ref: ${{github.event.pull_request.head.sha}}
fetch-depth: 0
@@ -57,12 +57,17 @@ jobs:
charts: deployments/kubernetes/chart/reloader
- name: Set up Go
- uses: actions/setup-go@v5
+ uses: actions/setup-go@v6
with:
go-version-file: 'go.mod'
check-latest: true
cache: true
+ - name: Create timestamp
+ id: prep
+ run: echo "created=$(date -u +'%Y-%m-%dT%H:%M:%SZ')" >> $GITHUB_OUTPUT
+
+
# Get highest tag and remove any suffixes with '-'
- name: Get Highest tag
id: highest_tag
@@ -75,11 +80,7 @@ jobs:
make install
- name: Run golangci-lint
- uses: golangci/golangci-lint-action@v5
- with:
- version: latest
- only-new-issues: false
- args: --timeout 10m
+ run: make lint
- name: Helm Lint
run: |
@@ -104,6 +105,7 @@ jobs:
kind create cluster
kubectl cluster-info
+
- name: Test
run: make test
@@ -135,7 +137,12 @@ jobs:
file: ${{ env.DOCKER_FILE_PATH }}
pull: true
push: false
- build-args: BUILD_PARAMETERS=${{ env.BUILD_PARAMETERS }}
+ build-args: |
+ VERSION=merge-${{ steps.generate_tag.outputs.GIT_TAG }}
+ COMMIT=${{github.event.pull_request.head.sha}}
+ BUILD_DATE=${{ steps.prep.outputs.created }}
+ BUILD_PARAMETERS=${{ env.BUILD_PARAMETERS }}
+
cache-to: type=inline
platforms: linux/amd64,linux/arm,linux/arm64
tags: |
diff --git a/.github/workflows/pull_request_docs.yaml b/.github/workflows/pull_request_docs.yaml
index c3bb23f..dd416bd 100644
--- a/.github/workflows/pull_request_docs.yaml
+++ b/.github/workflows/pull_request_docs.yaml
@@ -12,11 +12,22 @@ on:
- 'docs/**'
- 'theme_common'
- 'theme_override'
+ - 'deployments/kubernetes/chart/reloader/README.md'
jobs:
qa:
- uses: stakater/.github/.github/workflows/pull_request_doc_qa.yaml@v0.0.117
+ uses: stakater/.github/.github/workflows/pull_request_doc_qa.yaml@v0.0.163
with:
MD_CONFIG: .github/md_config.json
DOC_SRC: docs
MD_LINT_CONFIG: .markdownlint.yaml
+ build:
+ uses: stakater/.github/.github/workflows/pull_request_container_build.yaml@v0.0.163
+ with:
+ DOCKER_FILE_PATH: Dockerfile-docs
+ CONTAINER_REGISTRY_URL: ghcr.io/stakater
+ PUSH_IMAGE: false
+ secrets:
+ CONTAINER_REGISTRY_USERNAME: ${{ github.actor }}
+ CONTAINER_REGISTRY_PASSWORD: ${{ secrets.GHCR_TOKEN }}
+ SLACK_WEBHOOK_URL: ${{ secrets.STAKATER_DELIVERY_SLACK_WEBHOOK }}
diff --git a/.github/workflows/push-helm-chart.yaml b/.github/workflows/push-helm-chart.yaml
index 7c83632..fc80c05 100644
--- a/.github/workflows/push-helm-chart.yaml
+++ b/.github/workflows/push-helm-chart.yaml
@@ -1,5 +1,7 @@
name: Push Helm Chart
+# TODO: fix: workflows have a problem where only code owners' PRs get the actions running
+
on:
pull_request:
types:
@@ -9,17 +11,19 @@ on:
paths:
- 'deployments/kubernetes/chart/reloader/**'
- '.github/workflows/push-helm-chart.yaml'
+ - '.github/workflows/release-helm-chart.yaml'
env:
HELM_REGISTRY_URL: "https://stakater.github.io/stakater-charts"
- REGISTRY: ghcr.io
+ REGISTRY: ghcr.io # container registry
jobs:
verify-and-push-helm-chart:
permissions:
contents: read
- packages: write # to push artifacts to `ghcr.io`
+ id-token: write # needed for signing the images with GitHub OIDC Token
+ packages: write # for pushing and signing container images
name: Verify and Push Helm Chart
if: ${{ (github.event.pull_request.merged == true) && (contains(github.event.pull_request.labels.*.name, 'release/helm-chart')) }}
@@ -27,7 +31,7 @@ jobs:
steps:
- name: Check out code
- uses: actions/checkout@v4
+ uses: actions/checkout@v5
with:
token: ${{ secrets.PUBLISH_TOKEN }}
fetch-depth: 0 # otherwise, you will fail to push refs to dest repo
@@ -68,6 +72,9 @@ jobs:
echo "Helm Chart Version wasnt updated"
exit 1
+ - name: Install Cosign
+ uses: sigstore/cosign-installer@v4.0.0
+
- name: Login to GHCR Registry
uses: docker/login-action@v3
with:
@@ -81,6 +88,9 @@ jobs:
helm push ./packaged-chart/*.tgz oci://ghcr.io/stakater/charts
rm -rf ./packaged-chart
+ - name: Sign artifacts with Cosign
+ run: cosign sign --yes ghcr.io/stakater/charts/reloader:${{ steps.new_chart_version.outputs.result }}
+
- name: Publish Helm chart to gh-pages
uses: stefanprodan/helm-gh-pages@master
with:
@@ -95,6 +105,13 @@ jobs:
commit_username: stakater-user
commit_email: stakater@gmail.com
+ - name: Push new chart tag
+ uses: anothrNick/github-tag-action@1.75.0
+ env:
+ GITHUB_TOKEN: ${{ secrets.PUBLISH_TOKEN }}
+ WITH_V: false
+ CUSTOM_TAG: chart-v${{ steps.new_chart_version.outputs.result }}
+
- name: Notify Slack
uses: 8398a7/action-slack@v3
if: always() # Pick up events even if the job fails or is canceled.
diff --git a/.github/workflows/push-pr-image.yaml b/.github/workflows/push-pr-image.yaml
index ae051b9..eff22f7 100644
--- a/.github/workflows/push-pr-image.yaml
+++ b/.github/workflows/push-pr-image.yaml
@@ -30,13 +30,13 @@ jobs:
if: ${{ github.event.label.name == 'build-and-push-pr-image' }}
steps:
- name: Check out code
- uses: actions/checkout@v4
+ uses: actions/checkout@v5
with:
ref: ${{github.event.pull_request.head.sha}}
fetch-depth: 0
- name: Set up Go
- uses: actions/setup-go@v5
+ uses: actions/setup-go@v6
with:
go-version-file: 'go.mod'
check-latest: true
@@ -47,11 +47,7 @@ jobs:
make install
- name: Run golangci-lint
- uses: golangci/golangci-lint-action@v5
- with:
- version: latest
- only-new-issues: false
- args: --timeout 10m
+ run: make lint
- name: Generate Tags
id: generate_tag
diff --git a/.github/workflows/push.yaml b/.github/workflows/push.yaml
index 626e506..dda9a1c 100644
--- a/.github/workflows/push.yaml
+++ b/.github/workflows/push.yaml
@@ -29,7 +29,7 @@ jobs:
steps:
- name: Check out code
- uses: actions/checkout@v4
+ uses: actions/checkout@v5
with:
token: ${{ secrets.PUBLISH_TOKEN }}
fetch-depth: 0 # otherwise, you will fail to push refs to dest repo
@@ -42,7 +42,7 @@ jobs:
version: v3.11.3
- name: Set up Go
- uses: actions/setup-go@v5
+ uses: actions/setup-go@v6
with:
go-version-file: 'go.mod'
check-latest: true
@@ -53,11 +53,7 @@ jobs:
make install
- name: Run golangci-lint
- uses: golangci/golangci-lint-action@v5
- with:
- version: latest
- only-new-issues: false
- args: --timeout 10m
+ run: make lint
- name: Install kubectl
run: |
@@ -91,6 +87,10 @@ jobs:
with:
username: ${{ secrets.STAKATER_DOCKERHUB_USERNAME }}
password: ${{ secrets.STAKATER_DOCKERHUB_PASSWORD }}
+
+ - name: Create timestamp
+ id: prep
+ run: echo "created=$(date -u +'%Y-%m-%dT%H:%M:%SZ')" >> $GITHUB_OUTPUT
- name: Generate image repository path for Docker registry
run: |
@@ -148,7 +148,11 @@ jobs:
file: ${{ env.DOCKER_FILE_PATH }}
pull: true
push: true
- build-args: BUILD_PARAMETERS=${{ env.BUILD_PARAMETERS }}
+ build-args: |
+ VERSION=merge-${{ github.event.number }}
+ COMMIT=${{ github.sha }}
+ BUILD_DATE=${{ steps.prep.outputs.created }}
+ BUILD_PARAMETERS=${{ env.BUILD_PARAMETERS }}
cache-to: type=inline
platforms: linux/amd64,linux/arm,linux/arm64
tags: |
@@ -200,7 +204,6 @@ jobs:
push: true
build-args: BUILD_PARAMETERS=${{ env.BUILD_PARAMETERS }}
cache-to: type=inline
- platforms: linux/amd64,linux/arm,linux/arm64
tags: |
${{ env.GHCR_IMAGE_REPOSITORY }}/docs:merge-${{ github.event.number }}
labels: |
@@ -208,7 +211,7 @@ jobs:
org.opencontainers.image.revision=${{ github.sha }}
- name: Push Latest Tag
- uses: anothrNick/github-tag-action@1.71.0
+ uses: anothrNick/github-tag-action@1.75.0
env:
GITHUB_TOKEN: ${{ secrets.PUBLISH_TOKEN }}
WITH_V: false
diff --git a/.github/workflows/release-helm-chart.yaml b/.github/workflows/release-helm-chart.yaml
new file mode 100644
index 0000000..78c7063
--- /dev/null
+++ b/.github/workflows/release-helm-chart.yaml
@@ -0,0 +1,39 @@
+name: Release Helm chart
+
+on:
+ push:
+ tags:
+ - "chart-v*"
+
+permissions:
+ contents: write
+
+jobs:
+ release-helm-chart:
+ name: Release Helm chart
+ runs-on: ubuntu-latest
+
+ steps:
+ - name: Check out code
+ uses: actions/checkout@v5
+ with:
+ fetch-depth: 0
+
+ - name: Create release
+ env:
+ GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
+ tag: ${{ github.ref }}
+ run: |
+ gh release create "$tag" \
+ --repo="$GITHUB_REPOSITORY" \
+ --title="Helm chart ${tag#chart-}" \
+ --generate-notes
+
+ - name: Notify Slack
+ uses: 8398a7/action-slack@v3
+ if: always()
+ with:
+ status: ${{ job.status }}
+ fields: repo,author,action,eventName,ref,workflow
+ env:
+ SLACK_WEBHOOK_URL: ${{ secrets.STAKATER_DELIVERY_SLACK_WEBHOOK }}
diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml
index 0758597..6bd392f 100644
--- a/.github/workflows/release.yaml
+++ b/.github/workflows/release.yaml
@@ -1,8 +1,9 @@
name: Release Go project
on:
- release:
- types: [published]
+ push:
+ tags:
+ - "v*"
env:
DOCKER_FILE_PATH: Dockerfile
@@ -12,7 +13,7 @@ env:
REGISTRY: ghcr.io
jobs:
- build:
+ release:
permissions:
contents: read
@@ -23,7 +24,7 @@ jobs:
steps:
- name: Check out code
- uses: actions/checkout@v4
+ uses: actions/checkout@v5
with:
token: ${{ secrets.PUBLISH_TOKEN }}
fetch-depth: 0 # otherwise, you will fail to push refs to dest repo
@@ -36,7 +37,7 @@ jobs:
version: v3.11.3
- name: Set up Go
- uses: actions/setup-go@v5
+ uses: actions/setup-go@v6
with:
go-version-file: 'go.mod'
check-latest: true
@@ -47,11 +48,7 @@ jobs:
make install
- name: Run golangci-lint
- uses: golangci/golangci-lint-action@v5
- with:
- version: latest
- only-new-issues: false
- args: --timeout 10m
+ run: make lint
- name: Install kubectl
run: |
@@ -78,6 +75,10 @@ jobs:
id: generate_tag
run: echo "RELEASE_VERSION=${GITHUB_REF#refs/*/}" >> $GITHUB_OUTPUT
+ - name: Create timestamp
+ id: prep
+ run: echo "created=$(date -u +'%Y-%m-%dT%H:%M:%SZ')" >> $GITHUB_OUTPUT
+
- name: Set up QEMU
uses: docker/setup-qemu-action@v3
@@ -105,6 +106,10 @@ jobs:
platforms: linux/amd64,linux/arm,linux/arm64
tags: |
${{ env.DOCKER_IMAGE_REPOSITORY }}:${{ steps.generate_tag.outputs.RELEASE_VERSION }}
+ build-args: |
+ VERSION=${{ steps.generate_tag.outputs.RELEASE_VERSION }}
+ COMMIT=${{ github.sha }}
+ BUILD_DATE=${{ steps.prep.outputs.created }}
labels: |
org.opencontainers.image.source=${{ github.event.repository.clone_url }}
org.opencontainers.image.created=${{ steps.prep.outputs.created }}
@@ -151,6 +156,10 @@ jobs:
platforms: linux/amd64,linux/arm,linux/arm64
tags: |
${{ env.GHCR_IMAGE_REPOSITORY }}:${{ steps.generate_tag.outputs.RELEASE_VERSION }},${{ env.GHCR_IMAGE_REPOSITORY }}:latest
+ build-args: |
+ VERSION=${{ steps.generate_tag.outputs.RELEASE_VERSION }}
+ COMMIT=${{ github.sha }}
+ BUILD_DATE=${{ steps.prep.outputs.created }}
labels: |
org.opencontainers.image.source=${{ github.event.repository.clone_url }}
org.opencontainers.image.created=${{ steps.prep.outputs.created }}
@@ -193,22 +202,6 @@ jobs:
## Add steps to generate required artifacts for a release here(helm chart, operator manifest etc.)
##############################
- # # Generate tag for operator without "v"
- # - name: Generate Operator Tag
- # id: generate_operator_tag
- # uses: anothrNick/github-tag-action@1.70.0
- # env:
- # GITHUB_TOKEN: ${{ secrets.STAKATER_GITHUB_TOKEN }}
- # WITH_V: false
- # DEFAULT_BUMP: patch
- # DRY_RUN: true
-
- # # Update chart tag to the latest semver tag
- # - name: Update Chart Version
- # env:
- # VERSION: ${{ steps.generate_operator_tag.outputs.RELEASE_VERSION }}
- # run: make bump-chart
-
- name: Run GoReleaser
uses: goreleaser/goreleaser-action@master
with:
diff --git a/.github/workflows/reloader-enterprise-published.yml b/.github/workflows/reloader-enterprise-published.yml
index 262ab83..9015c2c 100644
--- a/.github/workflows/reloader-enterprise-published.yml
+++ b/.github/workflows/reloader-enterprise-published.yml
@@ -14,4 +14,4 @@ jobs:
-H "Accept: application/vnd.github.v3+json" \
-H "Authorization: token ${{ secrets.STAKATER_AB_TOKEN_FOR_RLDR }}" \
https://api.github.com/repos/stakater-ab/reloader-enterprise/dispatches \
- -d '{"event_type":"release-published"}'
+ -d '{"event_type":"release-published","client_payload":{"tag":"${{ github.event.release.tag_name }}"}}'
diff --git a/.github/workflows/reloader-enterprise-unpublished.yml b/.github/workflows/reloader-enterprise-unpublished.yml
index 1212619..e1d6743 100644
--- a/.github/workflows/reloader-enterprise-unpublished.yml
+++ b/.github/workflows/reloader-enterprise-unpublished.yml
@@ -14,4 +14,4 @@ jobs:
-H "Accept: application/vnd.github.v3+json" \
-H "Authorization: token ${{ secrets.STAKATER_AB_TOKEN_FOR_RLDR }}" \
https://api.github.com/repos/stakater-ab/reloader-enterprise/dispatches \
- -d '{"event_type":"release-unpublished "}'
+ -d '{"event_type":"release-unpublished","client_payload":{"tag":"${{ github.event.release.tag_name }}"}}'
diff --git a/.goreleaser.yml b/.goreleaser.yml
index 8263637..08953b7 100644
--- a/.goreleaser.yml
+++ b/.goreleaser.yml
@@ -18,10 +18,7 @@ snapshot:
checksum:
name_template: "{{ .ProjectName }}_{{ .Version }}_checksums.txt"
changelog:
- sort: asc
- filters:
- exclude:
- - '^docs:'
- - '^test:'
+ # It will be generated manually as part of making a new GitHub release
+ disable: true
env_files:
github_token: /home/jenkins/.apitoken/hub
diff --git a/.markdownlint.yaml b/.markdownlint.yaml
index 77dfb50..c26d891 100644
--- a/.markdownlint.yaml
+++ b/.markdownlint.yaml
@@ -3,4 +3,6 @@
"MD013": false,
"MD024": false,
"MD029": { "style": one },
+ "MD033": false,
+ "MD041": false,
}
diff --git a/.vale.ini b/.vale.ini
index 24bb702..081b307 100644
--- a/.vale.ini
+++ b/.vale.ini
@@ -1,7 +1,7 @@
StylesPath = styles
MinAlertLevel = warning
-Packages = https://github.com/stakater/vale-package/releases/download/v0.0.52/Stakater.zip
+Packages = https://github.com/stakater/vale-package/releases/download/v0.0.87/Stakater.zip
Vocab = Stakater
# Only check MarkDown files
diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md
index 219830f..e5649af 100644
--- a/CODE_OF_CONDUCT.md
+++ b/CODE_OF_CONDUCT.md
@@ -1,3 +1,3 @@
# Code of Conduct
-Reloader follows the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/master/code-of-conduct.md).
+Reloader follows the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/main/code-of-conduct.md).
diff --git a/Dockerfile b/Dockerfile
index 19e6dca..8b4715c 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -2,13 +2,17 @@ ARG BUILDER_IMAGE
ARG BASE_IMAGE
# Build the manager binary
-FROM --platform=${BUILDPLATFORM} ${BUILDER_IMAGE:-golang:1.23.1} AS builder
+FROM --platform=${BUILDPLATFORM} ${BUILDER_IMAGE:-golang:1.25.3} AS builder
ARG TARGETOS
ARG TARGETARCH
ARG GOPROXY
ARG GOPRIVATE
+ARG COMMIT
+ARG VERSION
+ARG BUILD_DATE
+
WORKDIR /workspace
# Copy the Go Modules manifests
@@ -30,7 +34,10 @@ RUN CGO_ENABLED=0 \
GOPROXY=${GOPROXY} \
GOPRIVATE=${GOPRIVATE} \
GO111MODULE=on \
- go build -mod=mod -a -o manager main.go
+ go build -ldflags="-s -w -X github.com/stakater/Reloader/pkg/common.Version=${VERSION} \
+ -X github.com/stakater/Reloader/pkg/common.Commit=${COMMIT} \
+ -X github.com/stakater/Reloader/pkg/common.BuildDate=${BUILD_DATE}" \
+ -installsuffix 'static' -mod=mod -a -o manager ./
# Use distroless as minimal base image to package the manager binary
# Refer to https://github.com/GoogleContainerTools/distroless for more details
diff --git a/Dockerfile-docs b/Dockerfile-docs
index a126021..feb745c 100644
--- a/Dockerfile-docs
+++ b/Dockerfile-docs
@@ -1,4 +1,4 @@
-FROM python:3.12 as builder
+FROM python:3.14-alpine as builder
# set workdir
RUN mkdir -p $HOME/application
@@ -10,14 +10,14 @@ COPY --chown=1001:root . .
RUN pip3 install -r theme_common/requirements.txt
# Combine Theme Resources
-RUN python theme_common/scripts/combine_theme_resources.py theme_common/resources theme_override/resources dist/_theme
+RUN python theme_common/scripts/combine_theme_resources.py -s theme_common/resources -ov theme_override/resources -o dist/_theme
# Produce mkdocs file
RUN python theme_common/scripts/combine_mkdocs_config_yaml.py theme_common/mkdocs.yml theme_override/mkdocs.yml mkdocs.yml
# build the docs
RUN mkdocs build
-FROM nginxinc/nginx-unprivileged:1.27-alpine as deploy
+FROM nginxinc/nginx-unprivileged:1.29-alpine as deploy
COPY --from=builder $HOME/application/site/ /usr/share/nginx/html/reloader/
COPY docs-nginx.conf /etc/nginx/conf.d/default.conf
diff --git a/Makefile b/Makefile
index 5668d59..8444e1f 100644
--- a/Makefile
+++ b/Makefile
@@ -41,12 +41,11 @@ YQ ?= $(LOCALBIN)/yq
KUSTOMIZE_VERSION ?= v5.3.0
CONTROLLER_TOOLS_VERSION ?= v0.14.0
ENVTEST_VERSION ?= release-0.17
-GOLANGCI_LINT_VERSION ?= v1.57.2
+GOLANGCI_LINT_VERSION ?= v2.6.1
YQ_VERSION ?= v4.27.5
YQ_DOWNLOAD_URL = "https://github.com/mikefarah/yq/releases/download/$(YQ_VERSION)/yq_$(OS)_$(ARCH)"
-
.PHONY: yq
yq: $(YQ) ## Download YQ locally if needed
$(YQ):
@@ -58,7 +57,6 @@ $(YQ):
@chmod +x $(YQ)
@echo "yq downloaded successfully to $(YQ)."
-
.PHONY: kustomize
kustomize: $(KUSTOMIZE) ## Download kustomize locally if necessary.
$(KUSTOMIZE): $(LOCALBIN)
@@ -77,7 +75,7 @@ $(ENVTEST): $(LOCALBIN)
.PHONY: golangci-lint
golangci-lint: $(GOLANGCI_LINT) ## Download golangci-lint locally if necessary.
$(GOLANGCI_LINT): $(LOCALBIN)
- $(call go-install-tool,$(GOLANGCI_LINT),github.com/golangci/golangci-lint/cmd/golangci-lint,${GOLANGCI_LINT_VERSION})
+ $(call go-install-tool,$(GOLANGCI_LINT),github.com/golangci/golangci-lint/v2/cmd/golangci-lint,${GOLANGCI_LINT_VERSION})
# go-install-tool will 'go install' any package with custom target and name of binary, if it doesn't exist
# $1 - target path with name of binary (ideally with version)
@@ -104,6 +102,9 @@ run:
build:
"$(GOCMD)" build ${GOFLAGS} ${LDFLAGS} -o "${BINARY}"
+lint: golangci-lint ## Run golangci-lint on the codebase
+ $(GOLANGCI_LINT) run ./...
+
build-image:
docker buildx build \
--platform ${OS}/${ARCH} \
@@ -157,12 +158,6 @@ k8s-manifests: $(KUSTOMIZE) ## Generate k8s manifests using Kustomize from 'mani
update-manifests-version: ## Generate k8s manifests using Kustomize from 'manifests' folder
sed -i 's/image:.*/image: \"ghcr.io\/stakater\/reloader:v$(VERSION)"/g' deployments/kubernetes/manifests/deployment.yaml
-# Bump Chart
-bump-chart:
- sed -i "s/^appVersion:.*/appVersion: v$(VERSION)/" deployments/kubernetes/chart/reloader/Chart.yaml
- sed -i "s/tag:.*/tag: v$(VERSION)/" deployments/kubernetes/chart/reloader/values.yaml
- sed -i "s/version:.*/version: v$(VERSION)/" deployments/kubernetes/chart/reloader/values.yaml
-
YQ_VERSION = v4.42.1
YQ_BIN = $(shell pwd)/yq
CURRENT_ARCH := $(shell uname -m | sed 's/x86_64/amd64/' | sed 's/aarch64/arm64/')
diff --git a/README.md b/README.md
index f6735b6..ae0a00a 100644
--- a/README.md
+++ b/README.md
@@ -1,5 +1,8 @@
-#  Reloader
+
+
+
+[](https://github.com/sponsors/stakater?utm_source=github&utm_medium=readme&utm_campaign=reloader)
[](https://goreportcard.com/report/github.com/stakater/reloader)
[](https://godoc.org/github.com/stakater/reloader)
[](https://github.com/stakater/reloader/releases/latest)
@@ -7,270 +10,268 @@
[](https://hub.docker.com/r/stakater/reloader/)
[](https://hub.docker.com/r/stakater/reloader/)
[](LICENSE)
-[](https://stakater.com/?utm_source=Reloader&utm_medium=github)
-## Problem
+## π What is Reloader?
-We would like to watch if some change happens in `ConfigMap` and/or `Secret`; then perform a rolling upgrade on relevant `DeploymentConfig`, `Deployment`, `Daemonset`, `Statefulset` and `Rollout`
+Reloader is a Kubernetes controller that automatically triggers rollouts of workloads (like Deployments, StatefulSets, and more) whenever referenced `Secrets` or `ConfigMaps` are updated.
-## Solution
+In a traditional Kubernetes setup, updating a `Secret` or `ConfigMap` does not automatically restart or redeploy your workloads. This can lead to stale configurations running in production, especially when dealing with dynamic values like credentials, feature flags, or environment configs.
-Reloader can watch changes in `ConfigMap` and `Secret` and do rolling upgrades on Pods with their associated `DeploymentConfigs`, `Deployments`, `Daemonsets` `Statefulsets` and `Rollouts`.
+Reloader bridges that gap by ensuring your workloads stay in sync with configuration changes β automatically and safely.
-## Enterprise Version
+## π Why Reloader?
-Reloader is available in two different versions:
+- β
**Zero manual restarts**: No need to manually rollout workloads after config/secret changes.
+- π **Secure by design**: Ensure your apps always use the most up-to-date credentials or tokens.
+- π οΈ **Flexible**: Works with all major workload types β Deployment, StatefulSet, Daemonset, ArgoRollout, and more.
+- β‘ **Fast feedback loop**: Ideal for CI/CD pipelines where secrets/configs change frequently.
+- π **Out-of-the-box integration**: Just label your workloads and let Reloader do the rest.
-1. Open Source Version
-1. Enterprise Version, which includes:
- - SLA (Service Level Agreement) for support and unique requests
- - Slack support
- - Certified images
+## π§ How It Works?
-Contact [`sales@stakater.com`](mailto:sales@stakater.com) for info about Reloader Enterprise.
+```mermaid
+flowchart LR
+ ExternalSecret -->|Creates| Secret
+ SealedSecret -->|Creates| Secret
+ Certificate -->|Creates| Secret
+ Secret -->|Watched by| Reloader
+ ConfigMap -->|Watched by| Reloader
-## Compatibility
+ Reloader -->|Triggers Rollout| Deployment
+ Reloader -->|Triggers Rollout| DeploymentConfig
+ Reloader -->|Triggers Rollout| Daemonset
+ Reloader -->|Triggers Rollout| Statefulset
+ Reloader -->|Triggers Rollout| ArgoRollout
+ Reloader -->|Triggers Job| CronJob
+ Reloader -->|Sends Notification| Slack,Teams,Webhook
+```
-Reloader is compatible with Kubernetes >= 1.19
+- Sources like `ExternalSecret`, `SealedSecret`, or `Certificate` from `cert-manager` can create or manage Kubernetes `Secrets` β but they can also be created manually or delivered through GitOps workflows.
+- `Secrets` and `ConfigMaps` are watched by Reloader.
+- When changes are detected, Reloader automatically triggers a rollout of the associated workloads, ensuring your app always runs with the latest configuration.
-## How to use Reloader
+## β‘ Quick Start
-You have a `Deployment` called `foo` and a `ConfigMap` and/or a `Secret` either mounted as a volume or defined as a environment variable. The `ConfigMap` and `Secret` can be named whatever, but for the sake of this example, lets refer to the `ConfigMap` as `foo-configmap` and the secret as `foo-secret`.
+### 1. Install Reloader
-Add the annotation to the main metadata of your `Deployment`. By default this would be `reloader.stakater.com/auto`.
+Follow any of this [installation options](#-installation).
+
+### 2. Annotate Your Workload
+
+To enable automatic reload for a Deployment:
```yaml
+apiVersion: apps/v1
kind: Deployment
metadata:
- name: foo
+ name: my-app
annotations:
reloader.stakater.com/auto: "true"
spec:
template:
metadata:
+ labels:
+ app: my-app
+ spec:
+ containers:
+ - name: app
+ image: your-image
+ envFrom:
+ - configMapRef:
+ name: my-config
+ - secretRef:
+ name: my-secret
```
-This will discover deploymentconfigs/deployments/daemonsets/statefulset/rollouts/cronjobs/jobs automatically where `foo-configmap` or `foo-secret` is being used either via environment variable or from volume mount. And it will perform rolling upgrade on related pods when `foo-configmap` or `foo-secret`are updated.
+This tells Reloader to watch the `ConfigMap` and `Secret` referenced in this deployment. When either is updated, it will trigger a rollout.
-You can filter it by the type of monitored resource and use typed versions of `auto` annotation. If you want to discover changes only in mounted `Secret`s and ignore changes in `ConfigMap`s, add `secret.reloader.stakater.com/auto` annotation instead. Analogously, you can use `configmap.reloader.stakater.com/auto` annotation to look for changes in mounted `ConfigMap`, changes in any of mounted `Secret`s will not trigger a rolling upgrade on related pods.
+## π’ Enterprise Version
-You can also restrict this discovery to only `ConfigMap` or `Secret` objects that
-are tagged with a special annotation. To take advantage of that, annotate
-your deploymentconfigs/deployments/daemonsets/statefulset/rollouts/cronjobs/jobs like this:
+Stakater offers an enterprise-grade version of Reloader with:
+
+1. SLA-backed support
+1. Certified images
+1. Private Slack support
+
+Contact [`sales@stakater.com`](mailto:sales@stakater.com) for info about Reloader Enterprise.
+
+## π§© Usage
+
+Reloader supports multiple annotation-based controls to let you **customize when and how your Kubernetes workloads are reloaded** upon changes in `Secrets` or `ConfigMaps`.
+
+Kubernetes does not trigger pod restarts when a referenced `Secret` or `ConfigMap` is updated. Reloader bridges this gap by watching for changes and automatically performing rollouts β but it gives you full control via annotations, so you can:
+
+- Reload **all** resources by default
+- Restrict reloads to only **Secrets** or only **ConfigMaps**
+- Watch only **specific resources**
+- Use **opt-in via tagging** (`search` + `match`)
+- Exclude workloads you donβt want to reload
+
+### 1. π Automatic Reload (Default)
+
+Use these annotations to automatically restart the workload when referenced `Secrets` or `ConfigMaps` change.
+
+| Annotation | Description |
+|--------------------------------------------|----------------------------------------------------------------------|
+| `reloader.stakater.com/auto: "true"` | Reloads workload when any referenced ConfigMap or Secret changes |
+| `secret.reloader.stakater.com/auto: "true"`| Reloads only when referenced Secret(s) change |
+| `configmap.reloader.stakater.com/auto: "true"`| Reloads only when referenced ConfigMap(s) change |
+
+### 2. π Named Resource Reload (Specific Resource Annotations)
+
+These annotations allow you to manually define which ConfigMaps or Secrets should trigger a reload, regardless of whether they're used in the pod spec.
+
+| Annotation | Description |
+|-----------------------------------------------------|--------------------------------------------------------------------------------------|
+| `secret.reloader.stakater.com/reload: "my-secret"` | Reloads when specific Secret(s) change, regardless of how they're used |
+| `configmap.reloader.stakater.com/reload: "my-config"`| Reloads when specific ConfigMap(s) change, regardless of how they're used |
+
+#### Use when
+
+1. β
This is useful in tightly scoped scenarios where config is shared but reloads are only relevant in certain cases.
+1. β
Use this when you know exactly which resource(s) matter and want to avoid auto-discovery or searching altogether.
+
+### 3. π― Targeted Reload (Match + Search Annotations)
+
+This pattern allows fine-grained reload control β workloads only restart if the Secret/ConfigMap is both:
+
+1. Referenced by the workload
+1. Explicitly annotated with `match: true`
+
+| Annotation | Applies To | Description |
+|-------------------------------------------|--------------|-----------------------------------------------------------------------------|
+| `reloader.stakater.com/search: "true"` | Workload | Enables search mode (only reloads if matching secrets/configMaps are found) |
+| `reloader.stakater.com/match: "true"` | ConfigMap/Secret | Marks the config/secret as eligible for reload in search mode |
+
+#### How it works
+
+1. The workload must have: `reloader.stakater.com/search: "true"`
+1. The ConfigMap or Secret must have: `reloader.stakater.com/match: "true"`
+1. The resource (ConfigMap or Secret) must also be referenced in the workload (via env, `volumeMount`, etc.)
+
+#### Use when
+
+1. β
You want to reload a workload only if it references a ConfigMap or Secret that has been explicitly tagged with `reloader.stakater.com/match: "true"`.
+1. β
Use this when you want full control over which shared or system-wide resources trigger reloads. Great in multi-tenant clusters or shared configs.
+
+### β Resource-Level Ignore Annotation
+
+When you need to prevent specific ConfigMaps or Secrets from triggering any reloads, use the ignore annotation on the resource itself:
```yaml
-kind: Deployment
+apiVersion: v1
+kind: ConfigMap # or Secret
metadata:
+ name: my-config
annotations:
- reloader.stakater.com/search: "true"
-spec:
- template:
+ reloader.stakater.com/ignore: "true"
```
-and Reloader will trigger the rolling upgrade upon modification of any
-`ConfigMap` or `Secret` annotated like this:
+This instructs Reloader to skip all reload logic for that resource across all workloads.
+
+### 4. βοΈ Workload-Specific Rollout Strategy
+
+By default, Reloader uses the **rollout** strategy β it updates the pod template to trigger a new rollout. This works well in most cases, but it can cause problems if you're using GitOps tools like ArgoCD, which detect this as configuration drift.
+
+To avoid that, you can switch to the **restart** strategy, which simply restarts the pod without changing the pod template.
```yaml
-kind: ConfigMap
metadata:
annotations:
- reloader.stakater.com/match: "true"
-data:
- key: value
+ reloader.stakater.com/rollout-strategy: "restart"
```
-provided the secret/configmap is being used in an environment variable, or a
-volume mount.
+| Value | Behavior |
+|--------------------|-----------------------------------------------------------------|
+| `rollout` (default) | Updates pod template metadata to trigger a rollout |
+| `restart` | Deletes the pod to restart it without patching the template |
-Please note that `reloader.stakater.com/search` and
-`reloader.stakater.com/auto` do not work together. If you have the
-`reloader.stakater.com/auto: "true"` annotation on your deployment, then it
-will always restart upon a change in configmaps or secrets it uses, regardless
-of whether they have the `reloader.stakater.com/match: "true"` annotation or
-not.
+β
Use `restart` if:
-Similarly, `reloader.stakater.com/auto` and its typed version (`secret.reloader.stakater.com/auto` or `configmap.reloader.stakater.com/auto`) do not work together. If you have both annotations in your deployment, then only one of them needs to be true to trigger the restart. For example, having both `reloader.stakater.com/auto: "true"` and `secret.reloader.stakater.com/auto: "false"` or both `reloader.stakater.com/auto: "false"` and `secret.reloader.stakater.com/auto: "true"` will restart upon a change in a secret it uses.
+1. You're using GitOps and want to avoid drift
+1. You want a quick restart without changing the workload spec
+1. Your platform restricts metadata changes
-We can also specify a specific configmap or secret which would trigger rolling upgrade only upon change in our specified configmap or secret, this way, it will not trigger rolling upgrade upon changes in all configmaps or secrets used in a `deploymentconfig`, `deployment`, `daemonset`, `statefulset`, `rollout`, `cronJob` or `job`.
-To do this either set the auto annotation to `"false"` (`reloader.stakater.com/auto: "false"`) or remove it altogether, and use annotations for [Configmap](.#Configmap) or [Secret](.#Secret).
+### 5. β Annotation Behavior Rules & Compatibility
-It's also possible to enable auto reloading for all resources, by setting the `--auto-reload-all` flag.
-In this case, all resources that do not have the auto annotation (or its typed version) set to `"false"`, will be reloaded automatically when their Configmaps or Secrets are updated.
-Notice that setting the auto annotation to an undefined value counts as false as-well.
+- `reloader.stakater.com/auto` and `reloader.stakater.com/search` **cannot be used together** β the `auto` annotation takes precedence.
+- If both `auto` and its typed versions (`secret.reloader.stakater.com/auto`, `configmap.reloader.stakater.com/auto`) are used, **only one needs to be true** to trigger a reload.
+- Setting `reloader.stakater.com/auto: "false"` explicitly disables reload for that workload.
+- If `--auto-reload-all` is enabled on the controller:
+ - All workloads are treated as if they have `auto: "true"` unless they explicitly set it to `"false"`.
+ - Missing or unrecognized annotation values are treated as `"false"`.
-### Configmap
+### 6. π Alerting on Reload
-To perform rolling upgrade when change happens only on specific configmaps use below annotation.
+Reloader can optionally **send alerts** whenever it triggers a rolling upgrade for a workload (e.g., `Deployment`, `StatefulSet`, etc.).
-For a `Deployment` called `foo` have a `ConfigMap` called `foo-configmap`. Then add this annotation to main metadata of your `Deployment`
+These alerts are sent to a configured **webhook endpoint**, which can be a generic receiver or services like Slack, Microsoft Teams or Google Chat.
+
+To enable this feature, update the `reloader.env.secret` section in your `values.yaml` (when installing via Helm):
```yaml
-kind: Deployment
-metadata:
- annotations:
- configmap.reloader.stakater.com/reload: "foo-configmap"
-spec:
- template:
- metadata:
+reloader:
+ deployment:
+ env:
+ secret:
+ ALERT_ON_RELOAD: "true" # Enable alerting (default: false)
+ ALERT_SINK: "slack" # Options: slack, teams, gchat or webhook (default: webhook)
+ ALERT_WEBHOOK_URL: "" # Required if ALERT_ON_RELOAD is true
+ ALERT_ADDITIONAL_INFO: "Triggered by Reloader in staging environment"
```
-Use comma separated list to define multiple configmaps.
+### 7. βΈοΈ Pause Deployments
-```yaml
-kind: Deployment
-metadata:
- annotations:
- configmap.reloader.stakater.com/reload: "foo-configmap,bar-configmap,baz-configmap"
-spec:
- template:
- metadata:
+This feature allows you to pause rollouts for a deployment for a specified duration, helping to prevent multiple restarts when several ConfigMaps or Secrets are updated in quick succession.
+
+| Annotation | Applies To | Description |
+|---------------------------------------------------------|--------------|-----------------------------------------------------------------------------|
+| `deployment.reloader.stakater.com/pause-period: "5m"` | Deployment | Pauses reloads for the specified period (e.g., `5m`, `1h`) |
+
+#### How it works
+
+1. Add the `deployment.reloader.stakater.com/pause-period` annotation to your Deployment, specifying the pause duration (e.g., `"5m"` for five minutes).
+1. When a watched ConfigMap or Secret changes, Reloader will still trigger a reload event, but if the deployment is paused, the rollout will have no effect until the pause period has elapsed.
+1. This avoids repeated restarts if multiple resources are updated close together.
+
+#### Use when
+
+1. β
Your deployment references multiple ConfigMaps or Secrets that may be updated at the same time.
+1. β
You want to minimize unnecessary rollouts and reduce downtime caused by back-to-back configuration changes.
+
+## π Installation
+
+### 1. π¦ Helm
+
+Reloader can be installed in multiple ways depending on your Kubernetes setup and preference. Below are the supported methods:
+
+```bash
+helm repo add stakater https://stakater.github.io/stakater-charts
+helm repo update
+helm install reloader stakater/reloader
```
-### Secret
+β‘οΈ See full Helm configuration in the [chart README](./deployments/kubernetes/chart/reloader/README.md).
-To perform rolling upgrade when change happens only on specific secrets use below annotation.
+### 2. π Vanilla Manifests
-For a `Deployment` called `foo` have a `Secret` called `foo-secret`. Then add this annotation to main metadata of your `Deployment`
-
-```yaml
-kind: Deployment
-metadata:
- annotations:
- secret.reloader.stakater.com/reload: "foo-secret"
-spec:
- template:
- metadata:
-```
-
-Use comma separated list to define multiple secrets.
-
-```yaml
-kind: Deployment
-metadata:
- annotations:
- secret.reloader.stakater.com/reload: "foo-secret,bar-secret,baz-secret"
-spec:
- template:
- metadata:
-```
-
-### NOTES
-
-- Reloader also supports [sealed-secrets](https://github.com/bitnami-labs/sealed-secrets). [Here](docs/Reloader-with-Sealed-Secrets.md) are the steps to use sealed-secrets with Reloader.
-- For [`rollouts`](https://github.com/argoproj/argo-rollouts/) Reloader simply triggers a change is up to you how you configure the `rollout` strategy.
-- `reloader.stakater.com/auto: "true"` will only reload the pod, if the configmap or secret is used (as a volume mount or as an env) in `DeploymentConfigs/Deployment/Daemonsets/Statefulsets/CronJobs/Jobs`
-- `secret.reloader.stakater.com/reload` or `configmap.reloader.stakater.com/reload` annotation will reload the pod upon changes in specified configmap or secret, irrespective of the usage of configmap or secret.
-- you may override the auto annotation with the `--auto-annotation` flag
-- you may override the secret typed auto annotation with the `--secret-auto-annotation` flag
-- you may override the configmap typed auto annotation with the `--configmap-auto-annotation` flag
-- you may override the search annotation with the `--auto-search-annotation` flag
- and the match annotation with the `--search-match-annotation` flag
-- you may override the configmap annotation with the `--configmap-annotation` flag
-- you may override the secret annotation with the `--secret-annotation` flag
-- you may want to prevent watching certain namespaces with the `--namespaces-to-ignore` flag
-- you may want to watch only a set of namespaces with certain labels by using the `--namespace-selector` flag
-- you may want to watch only a set of secrets/configmaps with certain labels by using the `--resource-label-selector` flag
-- you may want to prevent watching certain resources with the `--resources-to-ignore` flag
-- you can configure logging in JSON format with the `--log-format=json` option
-- you can configure the "reload strategy" with the `--reload-strategy=` option (details below)
-- you can configure rollout reload strategy with `reloader.stakater.com/rollout-strategy` annotation, `restart` or `rollout` values are available (defaults to `rollout`)
-
-## Reload Strategies
-
-Reloader supports multiple "reload" strategies for performing rolling upgrades to resources. The following list describes them:
-
-- **env-vars**: When a tracked `configMap`/`secret` is updated, this strategy attaches a Reloader specific environment variable to any containers referencing the changed `configMap` or `secret` on the owning resource (e.g., `Deployment`, `StatefulSet`, etc.). This strategy can be specified with the `--reload-strategy=env-vars` argument. Note: This is the default reload strategy.
-- **annotations**: When a tracked `configMap`/`secret` is updated, this strategy attaches a `reloader.stakater.com/last-reloaded-from` pod template annotation on the owning resource (e.g., `Deployment`, `StatefulSet`, etc.). This strategy is useful when using resource syncing tools like ArgoCD, since it will not cause these tools to detect configuration drift after a resource is reloaded. Note: Since the attached pod template annotation only tracks the last reload source, this strategy will reload any tracked resource should its `configMap` or `secret` be deleted and recreated. This strategy can be specified with the `--reload-strategy=annotations` argument.
-
-## Deploying to Kubernetes
-
-You can deploy Reloader by following methods:
-
-### Vanilla Manifests
-
-You can apply vanilla manifests by changing `RELEASE-NAME` placeholder provided in manifest with a proper value and apply it by running the command given below:
+Apply raw Kubernetes manifests directly:
```bash
kubectl apply -f https://raw.githubusercontent.com/stakater/Reloader/master/deployments/kubernetes/reloader.yaml
```
-By default, Reloader gets deployed in `default` namespace and watches changes `secrets` and `configmaps` in all namespaces. Additionally, in the default Reloader deployment, the following resource limits and requests are set:
+### 3. π§± Vanilla Kustomize
-```yaml
-resources:
- limits:
- cpu: 150m
- memory: 512Mi
- requests:
- cpu: 10m
- memory: 128Mi
-```
-
-Reloader can be configured to ignore the resources `secrets` and `configmaps` by passing the following arguments (`spec.template.spec.containers.args`) to its container:
-
-| Argument | Description |
-|----------------------------------|----------------------|
-| `--resources-to-ignore=configMaps` | To ignore configmaps |
-| `--resources-to-ignore=secrets` | To ignore secrets |
-
-**Note:** At one time only one of these resource can be ignored, trying to do it will cause error in Reloader. Workaround for ignoring both resources is by scaling down the Reloader pods to `0`.
-
-Reloader can be configured to only watch secrets/configmaps with one or more labels using the `--resource-label-selector` parameter. Supported operators are `!, in, notin, ==, =, !=`, if no operator is found the 'exists' operator is inferred (i.e. key only). Additional examples of these selectors can be found in the [Kubernetes Docs](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors).
-
-**Note:** The old `:` delimited key value mappings are deprecated and if provided will be translated to `key=value`. Likewise, if a wildcard value is provided (e.g. `key:*`) it will be translated to the standalone `key` which checks for key existence.
-
-These selectors can be combined, for example with:
-
-```yaml
---resource-label-selector=reloader=enabled,key-exists,another-label in (value1,value2,value3)
-```
-
-Only configmaps or secrets labeled like the following will be watched:
-
-```yaml
-kind: ConfigMap
-apiVersion: v1
-metadata:
- labels:
- reloader: enabled
- key-exists: yes
- another-label: value1
-```
-
-Reloader can be configured to only watch namespaces labeled with one or more labels using the `--namespace-selector` parameter. Supported operators are `!, in, notin, ==, =, !=`, if no operator is found the 'exists' operator is inferred (i.e. key only). Additional examples of these selectors can be found in the [Kubernetes Docs](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors).
-
-**Note:** The old `:` delimited key value mappings are deprecated and if provided will be translated to `key=value`. Likewise, if a wildcard value is provided (e.g. `key:*`) it will be translated to the standalone `key` which checks for key existence.
-
-These selectors can be combined, for example with:
-
-```yaml
---namespace-selector=reloader=enabled,test=true
-```
-
-Only namespaces labeled as below would be watched and eligible for reloads:
-
-```yaml
-kind: Namespace
-apiVersion: v1
-metadata:
- labels:
- reloader: enabled
- test: true
-```
-
-### Vanilla Kustomize
-
-You can also apply the vanilla manifests by running the following command
+Use the built-in Kustomize support:
```bash
kubectl apply -k https://github.com/stakater/Reloader/deployments/kubernetes
```
-Similarly to vanilla manifests get deployed in `default` namespace and watches changes `secrets` and `configmaps` in all namespaces.
+### 4. π οΈ Custom Kustomize Setup
-### Kustomize
-
-You can write your own `kustomization.yaml` using ours as a 'base' and write patches to tweak the configuration.
+You can create your own `kustomization.yaml` and use Reloaderβs as a base:
```yaml
apiVersion: kustomize.config.k8s.io/v1beta1
@@ -282,126 +283,110 @@ resources:
namespace: reloader
```
-### Helm Charts
+### 5. βοΈ Default Resource Requests and Limits
-Alternatively if you have configured helm on your cluster, you can add Reloader to helm from our public chart repository and deploy it via helm using below-mentioned commands. Follow [this](docs/Helm2-to-Helm3.md) guide, in case you have trouble migrating Reloader from Helm2 to Helm3.
+By default, Reloader is deployed with the following resource requests and limits:
-#### Installation
-
-```bash
-helm repo add stakater https://stakater.github.io/stakater-charts
-
-helm repo update
-
-helm install stakater/reloader # For helm3 add --generate-name flag or set the release name
-
-helm install {{RELEASE_NAME}} stakater/reloader -n {{NAMESPACE}} --set reloader.watchGlobally=false # By default, Reloader watches in all namespaces. To watch in single namespace, set watchGlobally=false
-
-helm install stakater/reloader --set reloader.watchGlobally=false --namespace test --generate-name # Install Reloader in `test` namespace which will only watch `Deployments`, `Daemonsets` `Statefulsets` and `Rollouts` in `test` namespace.
+```yaml
+resources:
+ limits:
+ cpu: 150m
+ memory: 512Mi
+ requests:
+ cpu: 10m
+ memory: 128Mi
```
-#### Uninstalling
+### 6. βοΈ Optional runtime configurations
+
+These flags let you customize Reloader's behavior globally, at the Reloader controller level.
+
+#### 1. π Reload Behavior
+
+| Flag | Description |
+|------|-------------|
+| `--reload-on-create=true` | Reload workloads when a watched ConfigMap or Secret is created |
+| `--reload-on-delete=true` | Reload workloads when a watched ConfigMap or Secret is deleted |
+| `--auto-reload-all=true` | Automatically reload all workloads unless opted out (`auto: "false"`) |
+| `--reload-strategy=env-vars` | Strategy to use for triggering reload (`env-vars` or `annotations`) |
+| `--log-format=json` | Enable JSON-formatted logs for better machine readability |
+
+##### Reload Strategies
+
+Reloader supports multiple strategies for triggering rolling updates when a watched `ConfigMap` or `Secret` changes. You can configure the strategy using the `--reload-strategy` flag.
+
+| Strategy | Description |
+|--------------|-------------|
+| `env-vars` (default) | Adds a dummy environment variable to any container referencing the changed resource (e.g., `Deployment`, `StatefulSet`, etc.). This forces Kubernetes to perform a rolling update. |
+| `annotations` | Adds a `reloader.stakater.com/last-reloaded-from` annotation to the pod template metadata. Ideal for GitOps tools like ArgoCD, as it avoids triggering unwanted sync diffs. |
+
+- The `env-vars` strategy is the default and works in most setups.
+- The `annotations` strategy is preferred in **GitOps environments** to prevent config drift in tools like ArgoCD or Flux.
+- In `annotations` mode, a `ConfigMap` or `Secret` that is deleted and re-created will still trigger a reload (since previous state is not tracked).
+
+#### 2. π« Resource Filtering
+
+| Flag | Description |
+|------|-------------|
+| `--resources-to-ignore=configmaps` | Ignore ConfigMaps (only one type can be ignored at a time) |
+| `--resources-to-ignore=secrets` | Ignore Secrets (cannot combine with configMaps) |
+| `--ignored-workload-types=jobs,cronjobs` | Ignore specific workload types from reload monitoring |
+| `--resource-label-selector=key=value` | Only watch ConfigMaps/Secrets with matching labels |
+
+> **β οΈ Note:**
+>
+> Only **one** resource type can be ignored at a time.
+> Trying to ignore **both `configmaps` and `secrets`** will cause an error in Reloader.
+> β
**Workaround:** Scale the Reloader deployment to `0` replicas if you want to disable it completely.
+
+**π‘ Workload Type Examples:**
```bash
-helm uninstall {{RELEASE_NAME}} -n {{NAMESPACE}}
+# Ignore only Jobs
+--ignored-workload-types=jobs
+
+# Ignore only CronJobs
+--ignored-workload-types=cronjobs
+
+# Ignore both (comma-separated)
+--ignored-workload-types=jobs,cronjobs
```
-### Parameters
+> **π§ Use Case:** Ignoring workload types is useful when you don't want certain types of workloads to be automatically reloaded.
-#### Global Parameters
+#### 3. π§© Namespace Filtering
-| Parameter | Description | Type | Default |
-|---------------------------|-----------------------------------------------------------------|-------|---------|
-| `global.imagePullSecrets` | Reference to one or more secrets to be used when pulling images | array | `[]` |
+| Flag | Description |
+|------|-------------|
+| `--namespace-selector='key=value'`
`--namespace-selector='key1=value1,key2=value2'`
`--namespace-selector='key in (value1,value2)'`| Watch only namespaces with matching labels. See [LIST and WATCH filtering](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#list-and-watch-filtering) for more details on label selectors |
+| `--namespaces-to-ignore=ns1,ns2` | Skip specific namespaces from being watched |
-#### Common Parameters
+#### 4. π Annotation Key Overrides
-| Parameter | Description | Type | Default |
-|--------------------|-------------------------------|--------|---------|
-| `nameOverride` | replace the name of the chart | string | `""` |
-| `fullnameOverride` | replace the generated name | string | `""` |
+These flags allow you to redefine annotation keys used in your workloads or resources:
-#### Core Reloader Parameters
+| Flag | Overrides |
+|------|-----------|
+| `--auto-annotation` | Overrides `reloader.stakater.com/auto` |
+| `--secret-auto-annotation` | Overrides `secret.reloader.stakater.com/auto` |
+| `--configmap-auto-annotation` | Overrides `configmap.reloader.stakater.com/auto` |
+| `--auto-search-annotation` | Overrides `reloader.stakater.com/search` |
+| `--search-match-annotation` | Overrides `reloader.stakater.com/match` |
+| `--secret-annotation` | Overrides `secret.reloader.stakater.com/reload` |
+| `--configmap-annotation` | Overrides `configmap.reloader.stakater.com/reload` |
+| `--pause-deployment-annotation` | Overrides `deployment.reloader.stakater.com/pause-period` |
+| `--pause-deployment-time-annotation` | Overrides `deployment.reloader.stakater.com/paused-at` |
-| Parameter | Description | Type | Default |
-|-----------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------|-------------|-----------|
-| `reloader.autoReloadAll` | | boolean | `false` |
-| `reloader.isArgoRollouts` | Enable Argo `Rollouts`. Valid value are either `true` or `false` | boolean | `false` |
-| `reloader.isOpenshift` | Enable OpenShift DeploymentConfigs. Valid value are either `true` or `false` | boolean | `false` |
-| `reloader.ignoreSecrets` | To ignore secrets. Valid value are either `true` or `false`. Either `ignoreSecrets` or `ignoreConfigMaps` can be ignored, not both at the same time | boolean | `false` |
-| `reloader.ignoreConfigMaps` | To ignore configmaps. Valid value are either `true` or `false` | boolean | `false` |
-| `reloader.reloadOnCreate` | Enable reload on create events. Valid value are either `true` or `false` | boolean | `false` |
-| `reloader.reloadOnDelete` | Enable reload on delete events. Valid value are either `true` or `false` | boolean | `false` |
-| `reloader.syncAfterRestart` | Enable sync after Reloader restarts for **Add** events, works only when reloadOnCreate is `true`. Valid value are either `true` or `false` | boolean | `false` |
-| `reloader.reloadStrategy` | Strategy to trigger resource restart, set to either `default`, `env-vars` or `annotations` | enumeration | `default` |
-| `reloader.ignoreNamespaces` | List of comma separated namespaces to ignore, if multiple are provided, they are combined with the AND operator | string | `""` |
-| `reloader.namespaceSelector` | List of comma separated namespaces to select, if multiple are provided, they are combined with the AND operator | string | `""` |
-| `reloader.resourceLabelSelector` | List of comma separated label selectors, if multiple are provided they are combined with the AND operator | string | `""` |
-| `reloader.logFormat` | Set type of log format. Value could be either `json` or `""` | string | `""` |
-| `reloader.watchGlobally` | Allow Reloader to watch in all namespaces (`true`) or just in a single namespace (`false`) | boolean | `true` |
-| `reloader.enableHA` | Enable leadership election allowing you to run multiple replicas | boolean | `false` |
-| `reloader.readOnlyRootFileSystem` | Enforce readOnlyRootFilesystem | boolean | `false` |
-| `reloader.legacy.rbac` | | boolean | `false` |
-| `reloader.matchLabels` | Pod labels to match | map | `{}` |
-| `reloader.enableMetricsByNamespace` | Expose an additional Prometheus counter of reloads by namespace (this metric may have high cardinality in clusters with many namespaces) | boolean | `false` |
+### 5. π·οΈ Debugging
-#### Deployment Reloader Parameters
+| Flag | Description |
+|--- |-------------|
+| `--enable-pprof` | Enables `pprof` for profiling |
+| `--pprof-addr` | Address to start `pprof` server on. Default is `:6060` |
-| Parameter | Description | Type | Default |
-|-------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------|--------|-------------------|
-| `reloader.deployment.replicas` | Number of replicas, if you wish to run multiple replicas set `reloader.enableHA = true`. The replicas will be limited to 1 when `reloader.enableHA = false` | int | 1 |
-| `reloader.deployment.revisionHistoryLimit` | Limit the number of revisions retained in the revision history | int | 2 |
-| `reloader.deployment.nodeSelector` | Scheduling pod to a specific node based on set labels | map | `{}` |
-| `reloader.deployment.affinity` | Set affinity rules on pod | map | `{}` |
-| `reloader.deployment.securityContext` | Set pod security context | map | `{}` |
-| `reloader.deployment.containerSecurityContext` | Set container security context | map | `{}` |
-| `reloader.deployment.tolerations` | A list of `tolerations` to be applied to the deployment | array | `[]` |
-| `reloader.deployment.topologySpreadConstraints` | Topology spread constraints for pod assignment | array | `[]` |
-| `reloader.deployment.annotations` | Set deployment annotations | map | `{}` |
-| `reloader.deployment.labels` | Set deployment labels, default to stakater settings | array | `see values.yaml` |
-| `reloader.deployment.image` | Set container image name, tag and policy | array | `see values.yaml` |
-| `reloader.deployment.env` | Support for extra environment variables | array | `[]` |
-| `reloader.deployment.livenessProbe` | Set liveness probe timeout values | map | `{}` |
-| `reloader.deployment.readinessProbe` | Set readiness probe timeout values | map | `{}` |
-| `reloader.deployment.resources` | Set container requests and limits (e.g. CPU or memory) | map | `{}` |
-| `reloader.deployment.pod.annotations` | Set annotations for pod | map | `{}` |
-| `reloader.deployment.priorityClassName` | Set priority class for pod in cluster | string | `""` |
+## Compatibility
-#### Other Reloader Parameters
-
-| Parameter | Description | Type | Default |
-|----------------------------------------|-----------------------------------------------------------------|---------|---------|
-| `reloader.service` | | map | `{}` |
-| `reloader.rbac.enabled` | Specifies whether a role based access control should be created | boolean | `true` |
-| `reloader.serviceAccount.create` | Specifies whether a ServiceAccount should be created | boolean | `true` |
-| `reloader.custom_annotations` | Add custom annotations | map | `{}` |
-| `reloader.serviceMonitor.enabled` | Enable to scrape Reloader's Prometheus metrics (legacy) | boolean | `false` |
-| `reloader.podMonitor.enabled` | Enable to scrape Reloader's Prometheus metrics | boolean | `false` |
-| `reloader.podDisruptionBudget.enabled` | Limit the number of pods of a replicated application | boolean | `false` |
-| `reloader.netpol.enabled` | | boolean | `false` |
-| `reloader.volumeMounts` | Mount volume | array | `[]` |
-| `reloader.volumes` | Add volume to a pod | array | `[]` |
-| `reloader.webhookUrl` | Add webhook to Reloader | string | `""` |
-
-#### Additional Remarks
-
-- Both `namespaceSelector` & `resourceLabelSelector` can be used together. If they are then both conditions must be met for the configmap or secret to be eligible to trigger reload events. (e.g. If a configmap matches `resourceLabelSelector` but `namespaceSelector` does not match the namespace the configmap is in, it will be ignored).
-- At one time only one of the resources `ignoreConfigMaps` or `ignoreSecrets` can be ignored, trying to do both will cause error in helm template compilation
-- Reloading of OpenShift (DeploymentConfig) and/or Argo `Rollouts` has to be enabled explicitly because it might not be always possible to use it on a cluster with restricted permissions
-- `isOpenShift` Recent versions of OpenShift (tested on 4.13.3) require the specified user to be in an `uid` range which is dynamically assigned by the namespace. The solution is to unset the runAsUser variable via ``deployment.securityContext.runAsUser=null`` and let OpenShift assign it at install
-- `reloadOnCreate` controls how Reloader handles secrets being added to the cache for the first time. If `reloadOnCreate` is set to true:
- 1. Configmaps/secrets being added to the cache will cause Reloader to perform a rolling update of the associated workload
- 1. When applications are deployed for the first time, Reloader will perform a rolling update of the associated workload
- 1. If you are running Reloader in HA mode all workloads will have a rolling update performed when a new leader is elected
-- `reloadOnDelete` controls how Reloader handles secrets being deleted. If `reloadOnDelete` is set to true:
- 1. Configmaps/secrets being deleted will cause Reloader to perform a rolling update of the associated workload
-- `serviceMonitor` will be removed in future releases of Reloader in favour of Pod monitor
-- If `reloadOnCreate` is set to false:
- 1. Updates to configmaps/secrets that occur while there is no leader will not be picked up by the new leader until a subsequent update of the configmap/secret occurs
- 1. In the worst case the window in which there can be no leader is 15s as this is the LeaseDuration
-- If `reloadOnDelete` is set to false:
- 1. Deleting of configmaps/secrets has no effect to pods that references these resources.
-- By default, `reloadOnCreate`, `reloadOnDelete` and `syncAfterRestart` are all set to false. All need to be enabled explicitly
+Reloader is compatible with Kubernetes >= 1.19
## Help
@@ -449,22 +434,17 @@ _Repository GitHub releases_: As requested by the community in [issue 685](https
To make a GitHub release:
-1. Code owners create a release branch `release-vX.Y.Z`
-1. Code owners run a dispatch mode workflow to automatically generate version and manifests on the release branch
+1. Code owners create a release branch `release-vX.Y.Z` from `master`
+1. Code owners run [Init Release](https://github.com/stakater/Reloader/actions/workflows/init-branch-release.yaml) workflow to automatically generate version and manifests on the release branch
+ - Set the `TARGET_BRANCH` parameter to release branch i.e. `release-vX.Y.Z`
+ - Set the `TARGET_VERSION` to release version without 'v' i.e. `X.Y.Z`
1. A PR is created to bump the image version on the release branch, example: [PR-798](https://github.com/stakater/Reloader/pull/798)
1. Code owners create a GitHub release with tag `vX.Y.Z` and target branch `release-vX.Y.Z`, which triggers creation of images
+1. Code owners create another branch from `master` and bump the helm chart version as well as Reloader image version.
+ - Code owners create a PR with `release/helm-chart` label, example: [PR-846](https://github.com/stakater/Reloader/pull/846)
_Repository git tagging_: Push to the main branch will create a merge-image and merge-tag named `merge-${{ github.event.number }}`, for example `merge-800` when pull request number 800 is merged.
-_Helm chart versioning_: The Reloader Helm chart is maintained in [this repository](./deployments/kubernetes/chart/reloader). The Helm chart has its own semantic versioning. Helm charts and code releases are separate artifacts and separately versioned. Manifest making strategy relies on Kustomize. The Reloader Helm chart manages the two artifacts with these two fields:
-
-- [`appVersion`](./deployments/kubernetes/chart/reloader/Chart.yaml) points to released Reloader application image version listed on the [releases page](https://github.com/stakater/Reloader/releases)
-- [`version`](./deployments/kubernetes/chart/reloader/Chart.yaml) sets the Reloader Helm chart version
-
-Helm chart will be released to the chart registry whenever files in `deployments/kubernetes/chart/reloader/**` change on the main branch.
-
-Helm Chart will be released by the maintainers, on labelling a PR with `release/helm-chart` and pre-maturely updating the `version` field in `Chart.yaml` file.
-
## Changelog
View the [releases page](https://github.com/stakater/Reloader/releases) to see what has changed in each release.
@@ -473,15 +453,12 @@ View the [releases page](https://github.com/stakater/Reloader/releases) to see w
Apache2 Β© [Stakater][website]
-## About
+## About Stakater
-`Reloader` is maintained by [Stakater][website]. Like it? Please let us know at
+[](https://stakater.com/?utm_source=Reloader&utm_medium=github)
-See [our other projects](https://github.com/stakater)
-or contact us in case of professional services and queries on
+`Reloader` is maintained by [Stakater][website]. Like it? Please let us know at [hello@stakater.com](hello@stakater.com)
+
+See [our other projects](https://github.com/stakater) or contact us in case of professional services and queries on [hello@stakater.com](hello@stakater.com)
[website]: https://stakater.com
-
-## Acknowledgements
-
-- [ConfigmapController](https://github.com/fabric8io/configmapcontroller); We documented [here](docs/Reloader-vs-ConfigmapController.md) why we re-created Reloader
diff --git a/VERSION b/VERSION
index 1cc5f65..ac9f79c 100644
--- a/VERSION
+++ b/VERSION
@@ -1 +1 @@
-1.1.0
\ No newline at end of file
+1.4.10
diff --git a/assets/web/reloader-round-100px.png b/assets/web/reloader-round-100px.png
deleted file mode 100644
index 9e84c1d..0000000
Binary files a/assets/web/reloader-round-100px.png and /dev/null differ
diff --git a/assets/web/reloader.jpg b/assets/web/reloader.jpg
new file mode 100644
index 0000000..d510a68
Binary files /dev/null and b/assets/web/reloader.jpg differ
diff --git a/deployments/kubernetes/chart/reloader/Chart.yaml b/deployments/kubernetes/chart/reloader/Chart.yaml
index 85663f2..3ed7c9b 100644
--- a/deployments/kubernetes/chart/reloader/Chart.yaml
+++ b/deployments/kubernetes/chart/reloader/Chart.yaml
@@ -1,10 +1,8 @@
-# Generated from deployments/kubernetes/templates/chart/Chart.yaml.tmpl
-
apiVersion: v1
name: reloader
description: Reloader chart that runs on kubernetes
-version: 1.2.2
-appVersion: v1.2.1
+version: 2.2.5
+appVersion: v1.4.10
keywords:
- Reloader
- kubernetes
@@ -18,4 +16,4 @@ maintainers:
- name: rasheedamir
email: rasheed@stakater.com
- name: faizanahmad055
- email: faizan.ahmad55@outlook.com
+ email: faizan@stakater.com
diff --git a/deployments/kubernetes/chart/reloader/README.md b/deployments/kubernetes/chart/reloader/README.md
new file mode 100644
index 0000000..b3ba973
--- /dev/null
+++ b/deployments/kubernetes/chart/reloader/README.md
@@ -0,0 +1,179 @@
+# Reloader Helm Chart
+
+If you have configured helm on your cluster, you can add Reloader to helm from our public chart repository and deploy it via helm using below-mentioned commands. Follow the [Helm2 to Helm3 guide](../../../../docs/Helm2-to-Helm3.md), in case you have trouble migrating Reloader from Helm2 to Helm3.
+
+## Installation
+
+```bash
+# Add stakater helm repoository
+helm repo add stakater https://stakater.github.io/stakater-charts
+
+helm repo update
+
+helm install stakater/reloader # For helm3 add --generate-name flag or set the release name
+
+helm install {{RELEASE_NAME}} stakater/reloader -n {{NAMESPACE}} --set reloader.watchGlobally=false # By default, Reloader watches in all namespaces. To watch in single namespace, set watchGlobally=false
+
+helm install stakater/reloader --set reloader.watchGlobally=false --namespace test --generate-name # Install Reloader in `test` namespace which will only watch `Deployments`, `Daemonsets` `Statefulsets` and `Rollouts` in `test` namespace.
+
+helm install stakater/reloader --set reloader.ignoreJobs=true --set reloader.ignoreCronJobs=true --generate-name # Install Reloader ignoring Jobs and CronJobs from reload monitoring
+```
+
+## Uninstalling
+
+```bash
+helm uninstall {{RELEASE_NAME}} -n {{NAMESPACE}}
+```
+
+## Parameters
+
+### Global Parameters
+
+| Parameter | Description | Type | Default |
+| ------------------------- | --------------------------------------------------------------- | ----- | ------- |
+| `global.imagePullSecrets` | Reference to one or more secrets to be used when pulling images | array | `[]` |
+
+### Common Parameters
+
+| Parameter | Description | Type | Default |
+| ------------------ | ---------------------------------------- | ------ | ----------------- |
+| `nameOverride` | replace the name of the chart | string | `""` |
+| `fullnameOverride` | replace the generated name | string | `""` |
+| `image` | Set container image name, tag and policy | map | `see values.yaml` |
+
+### Core Reloader Parameters
+
+| Parameter | Description | Type | Default |
+| ----------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------- | ----------- | --------- |
+| `reloader.autoReloadAll` | | boolean | `false` |
+| `reloader.isArgoRollouts` | Enable Argo `Rollouts`. Valid value are either `true` or `false` | boolean | `false` |
+| `reloader.isOpenshift` | Enable OpenShift DeploymentConfigs. Valid value are either `true` or `false` | boolean | `false` |
+| `reloader.ignoreSecrets` | To ignore secrets. Valid value are either `true` or `false`. Either `ignoreSecrets` or `ignoreConfigMaps` can be ignored, not both at the same time | boolean | `false` |
+| `reloader.ignoreConfigMaps` | To ignore configmaps. Valid value are either `true` or `false` | boolean | `false` |
+| `reloader.ignoreJobs` | To ignore jobs from reload monitoring. Valid value are either `true` or `false`. Translates to `--ignored-workload-types=jobs` | boolean | `false` |
+| `reloader.ignoreCronJobs` | To ignore CronJobs from reload monitoring. Valid value are either `true` or `false`. Translates to `--ignored-workload-types=cronjobs` | boolean | `false` |
+| `reloader.reloadOnCreate` | Enable reload on create events. Valid value are either `true` or `false` | boolean | `false` |
+| `reloader.reloadOnDelete` | Enable reload on delete events. Valid value are either `true` or `false` | boolean | `false` |
+| `reloader.syncAfterRestart` | Enable sync after Reloader restarts for **Add** events, works only when reloadOnCreate is `true`. Valid value are either `true` or `false` | boolean | `false` |
+| `reloader.reloadStrategy` | Strategy to trigger resource restart, set to either `default`, `env-vars` or `annotations` | enumeration | `default` |
+| `reloader.ignoreNamespaces` | List of comma separated namespaces to ignore, if multiple are provided, they are combined with the AND operator | string | `""` |
+| `reloader.namespaceSelector` | List of comma separated k8s label selectors for namespaces selection. The parameter only used when `reloader.watchGlobally` is `true`. See [LIST and WATCH filtering](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#list-and-watch-filtering) for more details on label-selector | string | `""` |
+| `reloader.resourceLabelSelector` | List of comma separated label selectors, if multiple are provided they are combined with the AND operator | string | `""` |
+| `reloader.logFormat` | Set type of log format. Value could be either `json` or `""` | string | `""` |
+| `reloader.watchGlobally` | Allow Reloader to watch in all namespaces (`true`) or just in a single namespace (`false`) | boolean | `true` |
+| `reloader.enableHA` | Enable leadership election allowing you to run multiple replicas | boolean | `false` |
+| `reloader.enablePProf` | Enables pprof for profiling | boolean | `false` |
+| `reloader.pprofAddr` | Address to start pprof server on | string | `:6060` |
+| `reloader.readOnlyRootFileSystem` | Enforce readOnlyRootFilesystem | boolean | `false` |
+| `reloader.legacy.rbac` | | boolean | `false` |
+| `reloader.matchLabels` | Pod labels to match | map | `{}` |
+| `reloader.enableMetricsByNamespace` | Expose an additional Prometheus counter of reloads by namespace (this metric may have high cardinality in clusters with many namespaces) | boolean | `false` |
+
+### Deployment Reloader Parameters
+
+| Parameter | Description | Type | Default |
+| ----------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------- | ------ | ----------------- |
+| `reloader.deployment.replicas` | Number of replicas, if you wish to run multiple replicas set `reloader.enableHA = true`. The replicas will be limited to 1 when `reloader.enableHA = false` | int | 1 |
+| `reloader.deployment.revisionHistoryLimit` | Limit the number of revisions retained in the revision history | int | 2 |
+| `reloader.deployment.nodeSelector` | Scheduling pod to a specific node based on set labels | map | `{}` |
+| `reloader.deployment.affinity` | Set affinity rules on pod | map | `{}` |
+| `reloader.deployment.securityContext` | Set pod security context | map | `{}` |
+| `reloader.deployment.containerSecurityContext` | Set container security context | map | `{}` |
+| `reloader.deployment.tolerations` | A list of `tolerations` to be applied to the deployment | array | `[]` |
+| `reloader.deployment.topologySpreadConstraints` | Topology spread constraints for pod assignment | array | `[]` |
+| `reloader.deployment.annotations` | Set deployment annotations | map | `{}` |
+| `reloader.deployment.labels` | Set deployment labels, default to Stakater settings | array | `see values.yaml` |
+| `reloader.deployment.env` | Support for extra environment variables | array | `[]` |
+| `reloader.deployment.livenessProbe` | Set liveness probe timeout values | map | `{}` |
+| `reloader.deployment.readinessProbe` | Set readiness probe timeout values | map | `{}` |
+| `reloader.deployment.resources` | Set container requests and limits (e.g. CPU or memory) | map | `{}` |
+| `reloader.deployment.pod.annotations` | Set annotations for pod | map | `{}` |
+| `reloader.deployment.priorityClassName` | Set priority class for pod in cluster | string | `""` |
+| `reloader.deployment.volumeMounts` | Mount volume | array | `[]` |
+| `reloader.deployment.volumes` | Add volume to a pod | array | `[]` |
+
+| `reloader.deployment.dnsConfig` | dns configuration for pods | map | `{}` |
+### Other Reloader Parameters
+
+| Parameter | Description | Type | Default |
+| -------------------------------------- | --------------------------------------------------------------- | ------- | ------- |
+| `reloader.service` | | map | `{}` |
+| `reloader.rbac.enabled` | Specifies whether a role based access control should be created | boolean | `true` |
+| `reloader.serviceAccount.create` | Specifies whether a ServiceAccount should be created | boolean | `true` |
+| `reloader.custom_annotations` | Add custom annotations | map | `{}` |
+| `reloader.serviceMonitor.enabled` | Enable to scrape Reloader's Prometheus metrics (legacy) | boolean | `false` |
+| `reloader.podMonitor.enabled` | Enable to scrape Reloader's Prometheus metrics | boolean | `false` |
+| `reloader.podDisruptionBudget.enabled` | Limit the number of pods of a replicated application | boolean | `false` |
+| `reloader.netpol.enabled` | | boolean | `false` |
+| `reloader.volumeMounts` | Mount volume | array | `[]` |
+| `reloader.volumes` | Add volume to a pod | array | `[]` |
+| `reloader.webhookUrl` | Add webhook to Reloader | string | `""` |
+
+## βοΈ Helm Chart Configuration Notes
+
+### Selector Behavior
+- Both `namespaceSelector` & `resourceLabelSelector` can be used together
+- **Both conditions must be met** for a ConfigMap/Secret to trigger reloads
+ - Example: If a ConfigMap matches `resourceLabelSelector` but not `namespaceSelector`, it will be ignored
+
+### Important Limitations
+- Only one of these resources can be ignored at a time:
+ - `ignoreConfigMaps` **or** `ignoreSecrets`
+ - Trying to ignore both will cause Helm template compilation errors
+- The `ignoreJobs` and `ignoreCronJobs` flags can be used together or individually
+ - When both are enabled, translates to `--ignored-workload-types=jobs,cronjobs`
+ - When used individually, translates to `--ignored-workload-types=jobs` or `--ignored-workload-types=cronjobs`
+ - These flags prevent Reloader from monitoring and reloading the specified workload types
+
+### Special Integrations
+- OpenShift (`DeploymentConfig`) and Argo Rollouts support must be **explicitly enabled**
+ - Required due to potential permission restrictions on clusters
+
+### OpenShift Considerations
+- Recent OpenShift versions (tested on 4.13.3) require:
+ - Users to be in a dynamically assigned UID range
+ - **Solution**: Unset `runAsUser` via `reloader.deployment.securityContext.runAsUser=null`
+ - Let OpenShift assign UID automatically during installation
+
+### Core Functionality Flags
+
+#### π `reloadOnCreate` Behavior
+**When true:**
+β
New ConfigMaps/Secrets trigger rolling updates
+β
New deployments referencing existing resources reload
+β
In HA mode, new leader reloads all tracked workloads
+
+**When false:**
+β Updates during leader downtime are missed
+β³ Potential 15s delay window (default `LeaseDuration`)
+
+#### ποΈ `reloadOnDelete` Behavior
+**When true:**
+β
Deleted resources trigger rolling updates of referencing workloads
+
+**When false:**
+β Deletions have no effect on referencing pods
+
+#### Default Settings
+β οΈ All flags default to `false` (must be enabled explicitly):
+- `reloadOnCreate`
+- `reloadOnDelete`
+- `syncAfterRestart`
+
+### Deprecation Notice
+- `serviceMonitor` will be removed in future releases in favor of `PodMonitor`
+
+## Release Process
+
+_Helm chart versioning_: The Reloader Helm chart is maintained in this repository. The Helm chart has its own semantic versioning. Helm charts and code releases are separate artifacts and separately versioned. Manifest making strategy relies on Kustomize. The Reloader Helm chart manages the two artifacts with these two fields:
+
+- [`appVersion`](Chart.yaml) points to released Reloader application image version listed on the [releases page](https://github.com/stakater/Reloader/releases)
+- [`version`](Chart.yaml) sets the Reloader Helm chart version
+
+Helm chart will be released to the chart registry whenever files in `deployments/kubernetes/chart/reloader/**` change on the main branch.
+
+### To release the Helm chart
+
+1. Create a new branch and update the Helm chart `appVersion` and `version`, example pull-request: [PR-846](https://github.com/stakater/Reloader/pull/846)
+1. Label the PR with `release/helm-chart`
+1. After approval and just before squash, make sure the squash commit message represents all changes, because it will be used to autogenerate the changelog message
diff --git a/deployments/kubernetes/chart/reloader/templates/_helpers.tpl b/deployments/kubernetes/chart/reloader/templates/_helpers.tpl
index 04b3ee4..8987fb5 100644
--- a/deployments/kubernetes/chart/reloader/templates/_helpers.tpl
+++ b/deployments/kubernetes/chart/reloader/templates/_helpers.tpl
@@ -20,12 +20,27 @@ We truncate at 63 chars because some Kubernetes name fields are limited to this
{{- end -}}
{{- end -}}
-{{- define "reloader-labels.chart" -}}
+{{/*
+Create chart name and version as used by the chart label.
+*/}}
+{{- define "reloader-chart" -}}
+{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
+{{- end }}
+
+{{- define "reloader-match-labels.chart" -}}
app: {{ template "reloader-fullname" . }}
-chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}"
release: {{ .Release.Name | quote }}
+{{- end -}}
+
+{{- define "reloader-labels.chart" -}}
+{{ include "reloader-match-labels.chart" . }}
+app.kubernetes.io/name: {{ template "reloader-name" . }}
+app.kubernetes.io/instance: {{ .Release.Name | quote }}
+helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}"
+chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}"
heritage: {{ .Release.Service | quote }}
app.kubernetes.io/managed-by: {{ .Release.Service | quote }}
+app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
{{- end -}}
{{/*
@@ -38,10 +53,10 @@ podAntiAffinity:
podAffinityTerm:
labelSelector:
matchExpressions:
- - key: app
+ - key: app.kubernetes.io/instance
operator: In
values:
- - {{ template "reloader-fullname" . }}
+ - {{ .Release.Name | quote }}
topologyKey: "kubernetes.io/hostname"
{{- end -}}
@@ -63,3 +78,12 @@ Create the annotations to support helm3
meta.helm.sh/release-namespace: {{ .Release.Namespace | quote }}
meta.helm.sh/release-name: {{ .Release.Name | quote }}
{{- end -}}
+
+{{/*
+Create the namespace selector if it does not watch globally
+*/}}
+{{- define "reloader-namespaceSelector" -}}
+{{- if and .Values.reloader.watchGlobally .Values.reloader.namespaceSelector -}}
+ {{ .Values.reloader.namespaceSelector }}
+{{- end -}}
+{{- end -}}
diff --git a/deployments/kubernetes/chart/reloader/templates/clusterrole.yaml b/deployments/kubernetes/chart/reloader/templates/clusterrole.yaml
index 5b2ad54..9f655aa 100644
--- a/deployments/kubernetes/chart/reloader/templates/clusterrole.yaml
+++ b/deployments/kubernetes/chart/reloader/templates/clusterrole.yaml
@@ -11,10 +11,10 @@ metadata:
labels:
{{ include "reloader-labels.chart" . | indent 4 }}
{{- if .Values.reloader.rbac.labels }}
-{{ toYaml .Values.reloader.rbac.labels | indent 4 }}
+{{ tpl (toYaml .Values.reloader.rbac.labels) . | indent 4 }}
{{- end }}
{{- if .Values.reloader.matchLabels }}
-{{ toYaml .Values.reloader.matchLabels | indent 4 }}
+{{ tpl (toYaml .Values.reloader.matchLabels) . | indent 4 }}
{{- end }}
name: {{ template "reloader-fullname" . }}-role
rules:
@@ -31,7 +31,7 @@ rules:
- list
- get
- watch
-{{- if .Values.reloader.namespaceSelector }}
+{{- if (include "reloader-namespaceSelector" .) }}
- apiGroups:
- ""
resources:
@@ -76,6 +76,7 @@ rules:
- get
- update
- patch
+{{- if .Values.reloader.ignoreCronJobs }}{{- else }}
- apiGroups:
- "batch"
resources:
@@ -83,6 +84,8 @@ rules:
verbs:
- list
- get
+{{- end }}
+{{- if .Values.reloader.ignoreJobs }}{{- else }}
- apiGroups:
- "batch"
resources:
@@ -92,6 +95,7 @@ rules:
- delete
- list
- get
+{{- end}}
{{- if .Values.reloader.enableHA }}
- apiGroups:
- "coordination.k8s.io"
diff --git a/deployments/kubernetes/chart/reloader/templates/clusterrolebinding.yaml b/deployments/kubernetes/chart/reloader/templates/clusterrolebinding.yaml
index 0730dba..137b5a8 100644
--- a/deployments/kubernetes/chart/reloader/templates/clusterrolebinding.yaml
+++ b/deployments/kubernetes/chart/reloader/templates/clusterrolebinding.yaml
@@ -11,10 +11,10 @@ metadata:
labels:
{{ include "reloader-labels.chart" . | indent 4 }}
{{- if .Values.reloader.rbac.labels }}
-{{ toYaml .Values.reloader.rbac.labels | indent 4 }}
+{{ tpl (toYaml .Values.reloader.rbac.labels) . | indent 4 }}
{{- end }}
{{- if .Values.reloader.matchLabels }}
-{{ toYaml .Values.reloader.matchLabels | indent 4 }}
+{{ tpl (toYaml .Values.reloader.matchLabels) . | indent 4 }}
{{- end }}
name: {{ template "reloader-fullname" . }}-role-binding
roleRef:
diff --git a/deployments/kubernetes/chart/reloader/templates/deployment.yaml b/deployments/kubernetes/chart/reloader/templates/deployment.yaml
index c801aa8..069c20c 100644
--- a/deployments/kubernetes/chart/reloader/templates/deployment.yaml
+++ b/deployments/kubernetes/chart/reloader/templates/deployment.yaml
@@ -4,15 +4,15 @@ metadata:
annotations:
{{ include "reloader-helm3.annotations" . | indent 4 }}
{{- if .Values.reloader.deployment.annotations }}
-{{ toYaml .Values.reloader.deployment.annotations | indent 4 }}
+{{ tpl (toYaml .Values.reloader.deployment.annotations) . | indent 4 }}
{{- end }}
labels:
{{ include "reloader-labels.chart" . | indent 4 }}
{{- if .Values.reloader.deployment.labels }}
-{{ toYaml .Values.reloader.deployment.labels | indent 4 }}
+{{ tpl (toYaml .Values.reloader.deployment.labels) . | indent 4 }}
{{- end }}
{{- if .Values.reloader.matchLabels }}
-{{ toYaml .Values.reloader.matchLabels | indent 4 }}
+{{ tpl (toYaml .Values.reloader.matchLabels) . | indent 4 }}
{{- end }}
name: {{ template "reloader-fullname" . }}
namespace: {{ .Values.namespace | default .Release.Namespace }}
@@ -25,24 +25,23 @@ spec:
revisionHistoryLimit: {{ .Values.reloader.deployment.revisionHistoryLimit }}
selector:
matchLabels:
- app: {{ template "reloader-fullname" . }}
- release: {{ .Release.Name | quote }}
+{{ include "reloader-match-labels.chart" . | indent 6 }}
{{- if .Values.reloader.matchLabels }}
-{{ toYaml .Values.reloader.matchLabels | indent 6 }}
+{{ tpl (toYaml .Values.reloader.matchLabels) . | indent 6 }}
{{- end }}
template:
metadata:
{{- if .Values.reloader.deployment.pod.annotations }}
annotations:
-{{ toYaml .Values.reloader.deployment.pod.annotations | indent 8 }}
+{{ tpl (toYaml .Values.reloader.deployment.pod.annotations) . | indent 8 }}
{{- end }}
labels:
{{ include "reloader-labels.chart" . | indent 8 }}
{{- if .Values.reloader.deployment.labels }}
-{{ toYaml .Values.reloader.deployment.labels | indent 8 }}
+{{ tpl (toYaml .Values.reloader.deployment.labels) . | indent 8 }}
{{- end }}
{{- if .Values.reloader.matchLabels }}
-{{ toYaml .Values.reloader.matchLabels | indent 8 }}
+{{ tpl (toYaml .Values.reloader.matchLabels) . | indent 8 }}
{{- end }}
spec:
{{- with .Values.global.imagePullSecrets }}
@@ -72,17 +71,21 @@ spec:
{{- if .Values.reloader.deployment.priorityClassName }}
priorityClassName: {{ .Values.reloader.deployment.priorityClassName }}
{{- end }}
+ {{- with .Values.reloader.deployment.dnsConfig }}
+ dnsConfig:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
containers:
{{- if .Values.global.imageRegistry }}
- - image: "{{ .Values.global.imageRegistry }}/{{ .Values.reloader.deployment.image.base }}:{{ .Values.reloader.deployment.image.tag }}"
+ - image: "{{ .Values.global.imageRegistry }}/{{ .Values.image.name }}:{{ .Values.image.tag }}"
{{- else }}
- {{- if .Values.reloader.deployment.image.digest }}
- - image: "{{ .Values.reloader.deployment.image.name }}@{{ .Values.reloader.deployment.image.digest }}"
+ {{- if .Values.image.digest }}
+ - image: "{{ .Values.image.repository }}@{{ .Values.image.digest }}"
{{- else }}
- - image: "{{ .Values.reloader.deployment.image.name }}:{{ .Values.reloader.deployment.image.tag }}"
+ - image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
{{- end }}
{{- end }}
- imagePullPolicy: {{ .Values.reloader.deployment.image.pullPolicy }}
+ imagePullPolicy: {{ .Values.image.pullPolicy }}
name: {{ template "reloader-fullname" . }}
env:
- name: GOMAXPROCS
@@ -144,6 +147,15 @@ spec:
fieldRef:
fieldPath: metadata.namespace
{{- end }}
+
+ - name: RELOADER_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+
+ - name: RELOADER_DEPLOYMENT_NAME
+ value: {{ template "reloader-fullname" . }}
+
{{- if .Values.reloader.enableHA }}
- name: POD_NAME
valueFrom:
@@ -198,7 +210,7 @@ spec:
{{- . | toYaml | nindent 10 }}
{{- end }}
{{- end }}
- {{- if or (.Values.reloader.logFormat) (.Values.reloader.logLevel) (.Values.reloader.ignoreSecrets) (.Values.reloader.ignoreNamespaces) (.Values.reloader.namespaceSelector) (.Values.reloader.resourceLabelSelector) (.Values.reloader.ignoreConfigMaps) (.Values.reloader.custom_annotations) (eq .Values.reloader.isArgoRollouts true) (eq .Values.reloader.reloadOnCreate true) (eq .Values.reloader.reloadOnDelete true) (ne .Values.reloader.reloadStrategy "default") (.Values.reloader.enableHA) (.Values.reloader.autoReloadAll)}}
+ {{- if or (.Values.reloader.logFormat) (.Values.reloader.logLevel) (.Values.reloader.ignoreSecrets) (.Values.reloader.ignoreNamespaces) (include "reloader-namespaceSelector" .) (.Values.reloader.resourceLabelSelector) (.Values.reloader.ignoreConfigMaps) (.Values.reloader.custom_annotations) (eq .Values.reloader.isArgoRollouts true) (eq .Values.reloader.reloadOnCreate true) (eq .Values.reloader.reloadOnDelete true) (ne .Values.reloader.reloadStrategy "default") (.Values.reloader.enableHA) (.Values.reloader.autoReloadAll) (.Values.reloader.ignoreJobs) (.Values.reloader.ignoreCronJobs)}}
args:
{{- if .Values.reloader.logFormat }}
- "--log-format={{ .Values.reloader.logFormat }}"
@@ -212,15 +224,28 @@ spec:
{{- if .Values.reloader.ignoreConfigMaps }}
- "--resources-to-ignore=configMaps"
{{- end }}
+ {{- if and (.Values.reloader.ignoreJobs) (.Values.reloader.ignoreCronJobs) }}
+ - "--ignored-workload-types=jobs,cronjobs"
+ {{- else if .Values.reloader.ignoreJobs }}
+ - "--ignored-workload-types=jobs"
+ {{- else if .Values.reloader.ignoreCronJobs }}
+ - "--ignored-workload-types=cronjobs"
+ {{- end }}
{{- if .Values.reloader.ignoreNamespaces }}
- "--namespaces-to-ignore={{ .Values.reloader.ignoreNamespaces }}"
{{- end }}
- {{- if .Values.reloader.namespaceSelector }}
- - "--namespace-selector={{ .Values.reloader.namespaceSelector }}"
+ {{- if (include "reloader-namespaceSelector" .) }}
+ - "--namespace-selector=\"{{ include "reloader-namespaceSelector" . }}\""
{{- end }}
{{- if .Values.reloader.resourceLabelSelector }}
- "--resource-label-selector={{ .Values.reloader.resourceLabelSelector }}"
{{- end }}
+ {{- if .Values.reloader.enablePProf }}
+ - "--enable-pprof"
+ {{- if and .Values.reloader.pprofAddr }}
+ - "--pprof-addr={{ .Values.reloader.pprofAddr }}"
+ {{- end }}
+ {{- end }}
{{- if .Values.reloader.custom_annotations }}
{{- if .Values.reloader.custom_annotations.configmap }}
- "--configmap-annotation"
@@ -249,6 +274,14 @@ spec:
{{- if .Values.reloader.custom_annotations.match }}
- "--search-match-annotation"
- "{{ .Values.reloader.custom_annotations.match }}"
+ {{- end }}
+ {{- if .Values.reloader.custom_annotations.pausePeriod }}
+ - "--pause-deployment-annotation"
+ - "{{ .Values.reloader.custom_annotations.pausePeriod }}"
+ {{- end }}
+ {{- if .Values.reloader.custom_annotations.pauseTime }}
+ - "--pause-deployment-time-annotation"
+ - "{{ .Values.reloader.custom_annotations.pauseTime }}"
{{- end }}
{{- if .Values.reloader.webhookUrl }}
- "--webhook-url"
diff --git a/deployments/kubernetes/chart/reloader/templates/networkpolicy.yaml b/deployments/kubernetes/chart/reloader/templates/networkpolicy.yaml
index ca5f224..ecb2c6a 100644
--- a/deployments/kubernetes/chart/reloader/templates/networkpolicy.yaml
+++ b/deployments/kubernetes/chart/reloader/templates/networkpolicy.yaml
@@ -7,17 +7,16 @@ metadata:
labels:
{{ include "reloader-labels.chart" . | indent 4 }}
{{- if .Values.reloader.matchLabels }}
-{{ toYaml .Values.reloader.matchLabels | indent 4 }}
+{{ tpl (toYaml .Values.reloader.matchLabels) . | indent 4 }}
{{- end }}
name: {{ template "reloader-fullname" . }}
namespace: {{ .Values.namespace | default .Release.Namespace }}
spec:
podSelector:
matchLabels:
- app: {{ template "reloader-fullname" . }}
- release: {{ .Release.Name | quote }}
+{{ include "reloader-match-labels.chart" . | indent 6 }}
{{- if .Values.reloader.matchLabels }}
-{{ toYaml .Values.reloader.matchLabels | indent 6 }}
+{{ tpl (toYaml .Values.reloader.matchLabels) . | indent 6 }}
{{- end }}
policyTypes:
- Ingress
diff --git a/deployments/kubernetes/chart/reloader/templates/poddisruptionbudget.yaml b/deployments/kubernetes/chart/reloader/templates/poddisruptionbudget.yaml
index e13c478..7f877ca 100644
--- a/deployments/kubernetes/chart/reloader/templates/poddisruptionbudget.yaml
+++ b/deployments/kubernetes/chart/reloader/templates/poddisruptionbudget.yaml
@@ -13,5 +13,5 @@ spec:
{{- end }}
selector:
matchLabels:
- app: {{ template "reloader-fullname" . }}
+ {{ include "reloader-match-labels.chart" . | nindent 6 }}
{{- end }}
diff --git a/deployments/kubernetes/chart/reloader/templates/podmonitor.yaml b/deployments/kubernetes/chart/reloader/templates/podmonitor.yaml
index 7afeba3..3af66dc 100644
--- a/deployments/kubernetes/chart/reloader/templates/podmonitor.yaml
+++ b/deployments/kubernetes/chart/reloader/templates/podmonitor.yaml
@@ -56,5 +56,5 @@ spec:
- {{ .Release.Namespace }}
selector:
matchLabels:
- {{ include "reloader-labels.chart" . | nindent 6 }}
+ {{ include "reloader-match-labels.chart" . | nindent 6 }}
{{- end }}
diff --git a/deployments/kubernetes/chart/reloader/templates/role.yaml b/deployments/kubernetes/chart/reloader/templates/role.yaml
index 13ac4bb..70a6815 100644
--- a/deployments/kubernetes/chart/reloader/templates/role.yaml
+++ b/deployments/kubernetes/chart/reloader/templates/role.yaml
@@ -11,10 +11,10 @@ metadata:
labels:
{{ include "reloader-labels.chart" . | indent 4 }}
{{- if .Values.reloader.rbac.labels }}
-{{ toYaml .Values.reloader.rbac.labels | indent 4 }}
+{{ tpl (toYaml .Values.reloader.rbac.labels) . | indent 4 }}
{{- end }}
{{- if .Values.reloader.matchLabels }}
-{{ toYaml .Values.reloader.matchLabels | indent 4 }}
+{{ tpl (toYaml .Values.reloader.matchLabels) . | indent 4 }}
{{- end }}
name: {{ template "reloader-fullname" . }}-role
namespace: {{ .Values.namespace | default .Release.Namespace }}
@@ -101,3 +101,34 @@ rules:
- create
- patch
{{- end }}
+
+---
+
+{{- if .Values.reloader.rbac.enabled }}
+apiVersion: rbac.authorization.k8s.io/v1
+kind: Role
+metadata:
+ annotations:
+{{ include "reloader-helm3.annotations" . | indent 4 }}
+ labels:
+{{ include "reloader-labels.chart" . | indent 4 }}
+{{- if .Values.reloader.rbac.labels }}
+{{ tpl (toYaml .Values.reloader.rbac.labels) . | indent 4 }}
+{{- end }}
+{{- if .Values.reloader.matchLabels }}
+{{ tpl (toYaml .Values.reloader.matchLabels) . | indent 4 }}
+{{- end }}
+ name: {{ template "reloader-fullname" . }}-metadata-role
+ namespace: {{ .Values.namespace | default .Release.Namespace }}
+rules:
+ - apiGroups:
+ - ""
+ resources:
+ - configmaps
+ verbs:
+ - list
+ - get
+ - watch
+ - create
+ - update
+{{- end }}
\ No newline at end of file
diff --git a/deployments/kubernetes/chart/reloader/templates/rolebinding.yaml b/deployments/kubernetes/chart/reloader/templates/rolebinding.yaml
index abeb721..5cf4cf3 100644
--- a/deployments/kubernetes/chart/reloader/templates/rolebinding.yaml
+++ b/deployments/kubernetes/chart/reloader/templates/rolebinding.yaml
@@ -11,10 +11,10 @@ metadata:
labels:
{{ include "reloader-labels.chart" . | indent 4 }}
{{- if .Values.reloader.rbac.labels }}
-{{ toYaml .Values.reloader.rbac.labels | indent 4 }}
+{{ tpl (toYaml .Values.reloader.rbac.labels) . | indent 4 }}
{{- end }}
{{- if .Values.reloader.matchLabels }}
-{{ toYaml .Values.reloader.matchLabels | indent 4 }}
+{{ tpl (toYaml .Values.reloader.matchLabels) . | indent 4 }}
{{- end }}
name: {{ template "reloader-fullname" . }}-role-binding
namespace: {{ .Values.namespace | default .Release.Namespace }}
@@ -27,3 +27,30 @@ subjects:
name: {{ template "reloader-serviceAccountName" . }}
namespace: {{ .Values.namespace | default .Release.Namespace }}
{{- end }}
+
+---
+{{- if .Values.reloader.rbac.enabled }}
+apiVersion: rbac.authorization.k8s.io/v1
+kind: RoleBinding
+metadata:
+ annotations:
+{{ include "reloader-helm3.annotations" . | indent 4 }}
+ labels:
+{{ include "reloader-labels.chart" . | indent 4 }}
+{{- if .Values.reloader.rbac.labels }}
+{{ tpl (toYaml .Values.reloader.rbac.labels) . | indent 4 }}
+{{- end }}
+{{- if .Values.reloader.matchLabels }}
+{{ tpl (toYaml .Values.reloader.matchLabels) . | indent 4 }}
+{{- end }}
+ name: {{ template "reloader-fullname" . }}-metadata-role-binding
+ namespace: {{ .Values.namespace | default .Release.Namespace }}
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: Role
+ name: {{ template "reloader-fullname" . }}-metadata-role
+subjects:
+ - kind: ServiceAccount
+ name: {{ template "reloader-serviceAccountName" . }}
+ namespace: {{ .Values.namespace | default .Release.Namespace }}
+{{- end }}
\ No newline at end of file
diff --git a/deployments/kubernetes/chart/reloader/templates/service.yaml b/deployments/kubernetes/chart/reloader/templates/service.yaml
index 95a8150..57bf63c 100644
--- a/deployments/kubernetes/chart/reloader/templates/service.yaml
+++ b/deployments/kubernetes/chart/reloader/templates/service.yaml
@@ -5,22 +5,22 @@ metadata:
annotations:
{{ include "reloader-helm3.annotations" . | indent 4 }}
{{- if .Values.reloader.service.annotations }}
-{{ toYaml .Values.reloader.service.annotations | indent 4 }}
+{{ tpl (toYaml .Values.reloader.service.annotations) . | indent 4 }}
{{- end }}
labels:
{{ include "reloader-labels.chart" . | indent 4 }}
{{- if .Values.reloader.service.labels }}
-{{ toYaml .Values.reloader.service.labels | indent 4 }}
+{{ tpl (toYaml .Values.reloader.service.labels) . | indent 4 }}
{{- end }}
name: {{ template "reloader-fullname" . }}
namespace: {{ .Values.namespace | default .Release.Namespace }}
spec:
selector:
{{- if .Values.reloader.deployment.labels }}
-{{ toYaml .Values.reloader.deployment.labels | indent 4 }}
+{{ tpl (toYaml .Values.reloader.deployment.labels) . | indent 4 }}
{{- end }}
{{- if .Values.reloader.matchLabels }}
-{{ toYaml .Values.reloader.matchLabels | indent 4 }}
+{{ tpl (toYaml .Values.reloader.matchLabels) . | indent 4 }}
{{- end }}
ports:
- port: {{ .Values.reloader.service.port }}
diff --git a/deployments/kubernetes/chart/reloader/templates/serviceaccount.yaml b/deployments/kubernetes/chart/reloader/templates/serviceaccount.yaml
index 27909ed..7bc5ccb 100644
--- a/deployments/kubernetes/chart/reloader/templates/serviceaccount.yaml
+++ b/deployments/kubernetes/chart/reloader/templates/serviceaccount.yaml
@@ -11,15 +11,15 @@ metadata:
annotations:
{{ include "reloader-helm3.annotations" . | indent 4 }}
{{- if .Values.reloader.serviceAccount.annotations }}
-{{ toYaml .Values.reloader.serviceAccount.annotations | indent 4 }}
+{{ tpl (toYaml .Values.reloader.serviceAccount.annotations) . | indent 4 }}
{{- end }}
labels:
{{ include "reloader-labels.chart" . | indent 4 }}
{{- if .Values.reloader.serviceAccount.labels }}
-{{ toYaml .Values.reloader.serviceAccount.labels | indent 4 }}
+{{ tpl (toYaml .Values.reloader.serviceAccount.labels) . | indent 4 }}
{{- end }}
{{- if .Values.reloader.matchLabels }}
-{{ toYaml .Values.reloader.matchLabels | indent 4 }}
+{{ tpl (toYaml .Values.reloader.matchLabels) . | indent 4 }}
{{- end }}
name: {{ template "reloader-serviceAccountName" . }}
namespace: {{ .Values.namespace | default .Release.Namespace }}
diff --git a/deployments/kubernetes/chart/reloader/templates/servicemonitor.yaml b/deployments/kubernetes/chart/reloader/templates/servicemonitor.yaml
index c4685fd..cdd3542 100644
--- a/deployments/kubernetes/chart/reloader/templates/servicemonitor.yaml
+++ b/deployments/kubernetes/chart/reloader/templates/servicemonitor.yaml
@@ -56,5 +56,5 @@ spec:
- {{ .Release.Namespace }}
selector:
matchLabels:
- {{ include "reloader-labels.chart" . | nindent 6 }}
+ {{ include "reloader-match-labels.chart" . | nindent 6 }}
{{- end }}
diff --git a/deployments/kubernetes/chart/reloader/tests/deployment_test.yaml b/deployments/kubernetes/chart/reloader/tests/deployment_test.yaml
index aee0f9f..2838fe4 100644
--- a/deployments/kubernetes/chart/reloader/tests/deployment_test.yaml
+++ b/deployments/kubernetes/chart/reloader/tests/deployment_test.yaml
@@ -61,3 +61,44 @@ tests:
valueFrom:
fieldRef:
fieldPath: metadata.name
+
+ - it: sets ignored-workload-types argument when ignoreJobs is true
+ set:
+ reloader:
+ ignoreJobs: true
+ asserts:
+ - contains:
+ path: spec.template.spec.containers[0].args
+ content: "--ignored-workload-types=jobs"
+
+ - it: sets ignored-workload-types argument when ignoreCronJobs is true
+ set:
+ reloader:
+ ignoreCronJobs: true
+ asserts:
+ - contains:
+ path: spec.template.spec.containers[0].args
+ content: "--ignored-workload-types=cronjobs"
+
+ - it: sets ignored-workload-types argument when both ignoreJobs and ignoreCronJobs are true
+ set:
+ reloader:
+ ignoreJobs: true
+ ignoreCronJobs: true
+ asserts:
+ - contains:
+ path: spec.template.spec.containers[0].args
+ content: "--ignored-workload-types=jobs,cronjobs"
+
+ - it: does not set ignored-workload-types argument when both ignoreJobs and ignoreCronJobs are false
+ set:
+ reloader:
+ ignoreJobs: false
+ ignoreCronJobs: false
+ asserts:
+ - notContains:
+ path: spec.template.spec.containers[0].args
+ content: "--ignored-workload-types=jobs"
+ - notContains:
+ path: spec.template.spec.containers[0].args
+ content: "--ignored-workload-types=cronjobs"
diff --git a/deployments/kubernetes/chart/reloader/values.yaml b/deployments/kubernetes/chart/reloader/values.yaml
index 5766a7c..8bde058 100644
--- a/deployments/kubernetes/chart/reloader/values.yaml
+++ b/deployments/kubernetes/chart/reloader/values.yaml
@@ -14,12 +14,25 @@ kubernetes:
nameOverride: ""
fullnameOverride: ""
+image:
+ name: stakater/reloader
+ repository: ghcr.io/stakater/reloader
+ tag: v1.4.10
+ # digest: sha256:1234567
+ pullPolicy: IfNotPresent
+
reloader:
autoReloadAll: false
isArgoRollouts: false
isOpenshift: false
ignoreSecrets: false
ignoreConfigMaps: false
+ # Set to true to exclude Job workloads from automatic reload monitoring
+ # Useful when you don't want Jobs to be restarted when their referenced ConfigMaps/Secrets change
+ ignoreJobs: false
+ # Set to true to exclude CronJob workloads from automatic reload monitoring
+ # Useful when you don't want CronJobs to be restarted when their referenced ConfigMaps/Secrets change
+ ignoreCronJobs: false
reloadOnCreate: false
reloadOnDelete: false
syncAfterRestart: false
@@ -32,6 +45,10 @@ reloader:
watchGlobally: true
# Set to true to enable leadership election allowing you to run multiple replicas
enableHA: false
+ # Set to true to enable pprof for profiling
+ enablePProf: false
+ # Address to start pprof server on. Default is ":6060"
+ pprofAddr: ":6060"
# Set to true if you have a pod security policy that enforces readOnlyRootFilesystem
readOnlyRootFileSystem: false
legacy:
@@ -40,6 +57,19 @@ reloader:
# Set to true to expose a prometheus counter of reloads by namespace (this metric may have high cardinality in clusters with many namespaces)
enableMetricsByNamespace: false
deployment:
+ # Specifies the deployment DNS configuration.
+ dnsConfig: {}
+ # nameservers:
+ # - 1.2.3.4
+ # searches:
+ # - ns1.svc.cluster-domain.example
+ # - my.dns.search.suffix
+ # options:
+ # - name: ndots
+ # value: "1"
+ # - name: attempts
+ # value: "3"
+
# If you wish to run multiple replicas set reloader.enableHA = true
replicas: 1
@@ -59,13 +89,17 @@ reloader:
# operator: "Exists"
affinity: {}
+ volumeMounts: []
+ volumes: []
+
securityContext:
runAsNonRoot: true
runAsUser: 65534
seccompProfile:
type: RuntimeDefault
- containerSecurityContext: {}
+ containerSecurityContext:
+ {}
# capabilities:
# drop:
# - ALL
@@ -89,20 +123,14 @@ reloader:
# whenUnsatisfiable: DoNotSchedule
# labelSelector:
# matchLabels:
- # app: my-app
+ # app.kubernetes.io/instance: my-app
topologySpreadConstraints: []
annotations: {}
labels:
provider: stakater
group: com.stakater.platform
- version: v1.2.1
- image:
- name: ghcr.io/stakater/reloader
- base: stakater/reloader
- tag: v1.2.1
- # digest: sha256:1234567
- pullPolicy: IfNotPresent
+ version: v1.4.10
# Support for extra environment variables.
env:
# Open supports Key value pair as environment variables.
@@ -158,7 +186,8 @@ reloader:
gomaxprocsOverride: ""
gomemlimitOverride: ""
- service: {}
+ service:
+ {}
# labels: {}
# annotations: {}
@@ -332,8 +361,4 @@ reloader:
# are applied during the life of a Pod. Possible values are "Off", "Initial", "Recreate", and "Auto".
updateMode: Auto
- volumeMounts: []
-
- volumes: []
-
webhookUrl: ""
diff --git a/deployments/kubernetes/kustomization.yaml b/deployments/kubernetes/kustomization.yaml
index b4b7efb..9bc6038 100644
--- a/deployments/kubernetes/kustomization.yaml
+++ b/deployments/kubernetes/kustomization.yaml
@@ -6,3 +6,4 @@ resources:
- manifests/clusterrolebinding.yaml
- manifests/serviceaccount.yaml
- manifests/deployment.yaml
+ - manifests/role.yaml
diff --git a/deployments/kubernetes/manifests/deployment.yaml b/deployments/kubernetes/manifests/deployment.yaml
index fb80fa4..da34ab1 100644
--- a/deployments/kubernetes/manifests/deployment.yaml
+++ b/deployments/kubernetes/manifests/deployment.yaml
@@ -17,7 +17,7 @@ spec:
app: reloader-reloader
spec:
containers:
- - image: "ghcr.io/stakater/reloader:v1.1.0"
+ - image: "ghcr.io/stakater/reloader:v1.4.10"
imagePullPolicy: IfNotPresent
name: reloader-reloader
env:
@@ -31,6 +31,13 @@ spec:
resourceFieldRef:
resource: limits.memory
divisor: '1'
+ - name: RELOADER_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+
+ - name: RELOADER_DEPLOYMENT_NAME
+ value: reloader-reloader
ports:
- name: http
containerPort: 9090
diff --git a/deployments/kubernetes/manifests/role.yaml b/deployments/kubernetes/manifests/role.yaml
new file mode 100644
index 0000000..0224ab9
--- /dev/null
+++ b/deployments/kubernetes/manifests/role.yaml
@@ -0,0 +1,32 @@
+apiVersion: rbac.authorization.k8s.io/v1
+kind: Role
+metadata:
+ name: reloader-reloader-metadata-role
+ namespace: default
+rules:
+ - apiGroups:
+ - ""
+ resources:
+ - configmaps
+ verbs:
+ - list
+ - get
+ - watch
+ - create
+ - update
+
+---
+
+apiVersion: rbac.authorization.k8s.io/v1
+kind: RoleBinding
+metadata:
+ name: reloader-reloader-metadata-rolebinding
+ namespace: default
+subjects:
+ - kind: ServiceAccount
+ name: reloader-reloader
+ namespace: default
+roleRef:
+ kind: Role
+ name: reloader-reloader-metadata-role
+ apiGroup: rbac.authorization.k8s.io
\ No newline at end of file
diff --git a/deployments/kubernetes/reloader.yaml b/deployments/kubernetes/reloader.yaml
index 254420b..4b25b07 100644
--- a/deployments/kubernetes/reloader.yaml
+++ b/deployments/kubernetes/reloader.yaml
@@ -5,6 +5,23 @@ metadata:
namespace: default
---
apiVersion: rbac.authorization.k8s.io/v1
+kind: Role
+metadata:
+ name: reloader-reloader-metadata-role
+ namespace: default
+rules:
+- apiGroups:
+ - ""
+ resources:
+ - configmaps
+ verbs:
+ - list
+ - get
+ - watch
+ - create
+ - update
+---
+apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: reloader-reloader-role
@@ -64,6 +81,20 @@ rules:
- patch
---
apiVersion: rbac.authorization.k8s.io/v1
+kind: RoleBinding
+metadata:
+ name: reloader-reloader-metadata-rolebinding
+ namespace: default
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: Role
+ name: reloader-reloader-metadata-role
+subjects:
+- kind: ServiceAccount
+ name: reloader-reloader
+ namespace: default
+---
+apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: reloader-reloader-role-binding
@@ -104,7 +135,13 @@ spec:
resourceFieldRef:
divisor: "1"
resource: limits.memory
- image: "ghcr.io/stakater/reloader:latest"
+ - name: RELOADER_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ - name: RELOADER_DEPLOYMENT_NAME
+ value: reloader-reloader
+ image: ghcr.io/stakater/reloader:v1.4.10
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 5
diff --git a/docs/Alerting.md b/docs/Alerting.md
index 43eefe5..bb4fbbe 100644
--- a/docs/Alerting.md
+++ b/docs/Alerting.md
@@ -8,7 +8,7 @@ In-order to enable this feature, you need to update the `reloader.env.secret` se
```yaml
ALERT_ON_RELOAD: [ true/false ] Default: false
- ALERT_SINK: [ slack/teams/webhook ] Default: webhook
+ ALERT_SINK: [ slack/teams/gchat/webhook ] Default: webhook
ALERT_WEBHOOK_URL: Required if ALERT_ON_RELOAD is true
ALERT_ADDITIONAL_INFO: Any additional information to be added to alert
```
diff --git a/docs/How-it-works.md b/docs/How-it-works.md
index a93c9fb..c0ae964 100644
--- a/docs/How-it-works.md
+++ b/docs/How-it-works.md
@@ -15,18 +15,18 @@ flowchart LR
## How Does Change Detection Work?
-Reloader watches changes in `configmaps` and `secrets` data. As soon as it detects a change in these. It forwards these objects to an update handler which decides if and how to perform the rolling upgrade.
+Reloader watches changes in `ConfigMaps` and `Secrets` data. As soon as it detects a change in these. It forwards these objects to an update handler which decides if and how to perform the rolling upgrade.
## Requirements for Rolling Upgrade
To perform rolling upgrade a `deployment`, `daemonset` or `statefulset` must have
- support for rolling upgrade strategy
-- specific annotation for `configmaps` or `secrets`
+- specific annotation for `ConfigMaps` or `Secrets`
-The annotation value is comma separated list of `configmaps` or `secrets`. If a change is detected in data of these `configmaps` or `secrets`, Reloader will perform rolling upgrades on their associated `deployments`, `daemonsets` or `statefulsets`.
+The annotation value is comma separated list of `ConfigMaps` or `Secrets`. If a change is detected in data of these `ConfigMaps` or `Secrets`, Reloader will perform rolling upgrades on their associated `deployments`, `daemonsets` or `statefulsets`.
-### Annotation for Configmap
+### Annotation for ConfigMap
For a `Deployment` called `foo` have a `ConfigMap` called `foo`. Then add this annotation* to your `Deployment`, where the default annotation can be changed with the `--configmap-annotation` flag:
@@ -50,13 +50,13 @@ Above mentioned annotation are also work for `Daemonsets` `Statefulsets` and `Ro
## How Does Rolling Upgrade Work?
-When Reloader detects changes in configmap. It gets two objects of configmap. First object is an old configmap object which has a state before the latest change. Second object is new configmap object which contains latest changes. Reloader compares both objects and see whether any change in data occurred or not. If Reloader finds any change in new configmap object, only then, it moves forward with rolling upgrade.
+When Reloader detects changes in `ConfigMap`. It gets two objects of `ConfigMap`. First object is an old `ConfigMap` object which has a state before the latest change. Second object is new `ConfigMap` object which contains latest changes. Reloader compares both objects and see whether any change in data occurred or not. If Reloader finds any change in new `ConfigMap` object, only then, it moves forward with rolling upgrade.
-After that, Reloader gets the list of all `deployments`, `daemonsets` and `statefulset` and looks for above mentioned annotation for configmap. If the annotation value contains the configmap name, it then looks for an environment variable which can contain the configmap or secret data change hash.
+After that, Reloader gets the list of all `deployments`, `daemonsets` and `statefulset` and looks for above mentioned annotation for `ConfigMap`. If the annotation value contains the `ConfigMap` name, it then looks for an environment variable which can contain the `ConfigMap` or secret data change hash.
-### Environment Variable for Configmap
+### Environment Variable for ConfigMap
-If configmap name is foo then
+If `ConfigMap` name is foo then
```yaml
STAKATER_FOO_CONFIGMAP
@@ -70,7 +70,7 @@ If Secret name is foo then
STAKATER_FOO_SECRET
```
-If the environment variable is found then it gets its value and compares it with new configmap hash value. If old value in environment variable is different from new hash value then Reloader updates the environment variable. If the environment variable does not exist then it creates a new environment variable with latest hash value from configmap and updates the relevant `deployment`, `daemonset` or `statefulset`
+If the environment variable is found then it gets its value and compares it with new `ConfigMap` hash value. If old value in environment variable is different from new hash value then Reloader updates the environment variable. If the environment variable does not exist then it creates a new environment variable with latest hash value from `ConfigMap` and updates the relevant `deployment`, `daemonset` or `statefulset`
Note: Rolling upgrade also works in the same way for secrets.
@@ -90,4 +90,4 @@ The output file can then be used to deploy Reloader in specific namespace.
## Compatibility With Helm Install and Upgrade
-Reloader has no impact on helm deployment cycle. Reloader only injects an environment variable in `deployment`, `daemonset` or `statefulset`. The environment variable contains the SHA1 value of configmap's or secret's data. So if a deployment is created using Helm and Reloader updates the deployment, then next time you upgrade the helm release, Reloader will do nothing except changing that environment variable value in `deployment` , `daemonset` or `statefulset`.
+Reloader has no impact on helm deployment cycle. Reloader only injects an environment variable in `deployment`, `daemonset` or `statefulset`. The environment variable contains the SHA1 value of `ConfigMaps` or `Secrets` data. So if a deployment is created using Helm and Reloader updates the deployment, then next time you upgrade the helm release, Reloader will do nothing except changing that environment variable value in `deployment` , `daemonset` or `statefulset`.
diff --git a/docs/Reloader-vs-ConfigmapController.md b/docs/Reloader-vs-ConfigmapController.md
index 810e611..f866f89 100644
--- a/docs/Reloader-vs-ConfigmapController.md
+++ b/docs/Reloader-vs-ConfigmapController.md
@@ -1,11 +1,11 @@
# Reloader vs ConfigmapController
-Reloader is inspired from [`Configmapcontroller`](https://github.com/fabric8io/configmapcontroller) but there are many ways in which it differs from `configmapController`. Below is the small comparison between these two controllers.
+Reloader is inspired from [`configmapcontroller`](https://github.com/fabric8io/configmapcontroller) but there are many ways in which it differs from `configmapcontroller`. Below is the small comparison between these two controllers.
-| Reloader | Configmap |
+| Reloader | ConfigMap |
|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
-| Reloader can watch both `secrets` and `configmaps`. | ConfigmapController can only watch changes in `configmaps`. It cannot detect changes in other resources like `secrets`. |
-| Reloader can perform rolling upgrades on `deployments` as well as on `statefulsets` and `daemonsets` | ConfigmapController can only perform rolling upgrades on `deployments`. It currently does not support rolling upgrades on `statefulsets` and `daemonsets` |
-| Reloader provides both unit test cases and end to end integration test cases for future updates. So one can make sure that new changes do not break any old functionality. | Currently there are not any unit test cases or end to end integration test cases in configmap controller. It add difficulties for any additional updates in configmap controller and one can not know for sure whether new changes breaks any old functionality or not. |
-| Reloader uses SHA1 to encode the change in configmap or secret. It then saves the SHA1 value in `STAKATER_FOO_CONFIGMAP` or `STAKATER_FOO_SECRET` environment variable depending upon where the change has happened. The use of SHA1 provides a concise 40 characters encoded value that is very less prone to collision. | Configmap controller uses `FABRICB_FOO_REVISION` environment variable to store any change in configmap controller. It does not encode it or convert it in suitable hash value to avoid data pollution in deployment. |
-| Reloader allows you to customize your own annotation (for both Secrets and Configmaps) using command line flags | Configmap controller restricts you to only their provided annotation |
+| Reloader can watch both `Secrets` and `ConfigMaps`. | `configmapcontroller` can only watch changes in `ConfigMaps`. It cannot detect changes in other resources like `Secrets`. |
+| Reloader can perform rolling upgrades on `deployments` as well as on `statefulsets` and `daemonsets` | `configmapcontroller` can only perform rolling upgrades on `deployments`. It currently does not support rolling upgrades on `statefulsets` and `daemonsets` |
+| Reloader provides both unit test cases and end to end integration test cases for future updates. So one can make sure that new changes do not break any old functionality. | Currently there are not any unit test cases or end to end integration test cases in `configmap-controller`. It add difficulties for any additional updates in `configmap-controller` and one can not know for sure whether new changes breaks any old functionality or not. |
+| Reloader uses SHA1 to encode the change in `ConfigMap` or `Secret`. It then saves the SHA1 value in `STAKATER_FOO_CONFIGMAP` or `STAKATER_FOO_SECRET` environment variable depending upon where the change has happened. The use of SHA1 provides a concise 40 characters encoded value that is very less prone to collision. | `configmap-controller` uses `FABRICB_FOO_REVISION` environment variable to store any change in `ConfigMap` controller. It does not encode it or convert it in suitable hash value to avoid data pollution in deployment. |
+| Reloader allows you to customize your own annotation (for both `Secrets` and `ConfigMaps`) using command line flags | `configmap-controller` restricts you to only their provided annotation |
diff --git a/docs/Reloader-vs-k8s-trigger-controller.md b/docs/Reloader-vs-k8s-trigger-controller.md
index 4a517fd..811987a 100644
--- a/docs/Reloader-vs-k8s-trigger-controller.md
+++ b/docs/Reloader-vs-k8s-trigger-controller.md
@@ -4,7 +4,7 @@ Reloader and k8s-trigger-controller are both built for same purpose. So there ar
## Similarities
-- Both controllers support change detection in configmap and secrets
+- Both controllers support change detection in `ConfigMaps` and `Secrets`
- Both controllers support deployment `rollout`
- Both controllers use SHA1 for hashing
- Both controllers have end to end as well as unit test cases.
diff --git a/docs/index.md b/docs/index.md
index 9cb508c..1197186 100644
--- a/docs/index.md
+++ b/docs/index.md
@@ -4,9 +4,23 @@ Reloader can watch changes in `ConfigMap` and `Secret` and do rolling upgrades o
These are the key features of Reloader:
-1. Restart pod in a `deployment` on change in linked/related configmap's or secret's
-1. Restart pod in a `daemonset` on change in linked/related configmap's or secret's
-1. Restart pod in a `statefulset` on change in linked/related configmap's or secret's
-1. Restart pod in a `rollout` on change in linked/related configmap's or secret's
+1. Restart pod in a `deployment` on change in linked/related `ConfigMaps` or `Secrets`
+1. Restart pod in a `daemonset` on change in linked/related `ConfigMaps` or `Secrets`
+1. Restart pod in a `statefulset` on change in linked/related `ConfigMaps` or `Secrets`
+1. Restart pod in a `rollout` on change in linked/related `ConfigMaps` or `Secrets`
This site contains more details on how Reloader works. For an overview, please see the repository's [README file](https://github.com/stakater/Reloader/blob/master/README.md).
+
+---
+
+
+
+[](https://github.com/sponsors/stakater?utm_source=docs&utm_medium=footer&utm_campaign=reloader)
+
+
+Your support funds maintenance, security updates, and new features for Reloader, plus continued investment in other open source tools.
+
+
+
+
+---
diff --git a/go.mod b/go.mod
index fcc49ae..c6bc3f1 100644
--- a/go.mod
+++ b/go.mod
@@ -1,21 +1,22 @@
module github.com/stakater/Reloader
-go 1.23.1
+go 1.25.3
require (
- github.com/argoproj/argo-rollouts v1.7.2
- github.com/openshift/api v0.0.0-20240131175612-92fe66c75e8f
- github.com/openshift/client-go v0.0.0-20231110140829-a6ca51f6d5ba
+ github.com/argoproj/argo-rollouts v1.8.3
+ github.com/openshift/api v0.0.0-20250411135543-10a8fa583797
+ github.com/openshift/client-go v0.0.0-20250402181141-b3bad3b645f2
github.com/parnurzeal/gorequest v0.3.0
- github.com/prometheus/client_golang v1.20.5
+ github.com/prometheus/client_golang v1.22.0
github.com/sirupsen/logrus v1.9.3
- github.com/spf13/cobra v1.8.1
- github.com/stretchr/testify v1.9.0
- k8s.io/api v0.31.1
- k8s.io/apimachinery v0.31.1
- k8s.io/client-go v0.31.1
- k8s.io/kubectl v0.31.1
- k8s.io/utils v0.0.0-20240711033017-18e509b52bc8
+ github.com/spf13/cobra v1.10.1
+ github.com/stretchr/testify v1.10.0
+ k8s.io/api v0.32.3
+ k8s.io/apimachinery v0.32.3
+ k8s.io/client-go v0.32.3
+ k8s.io/kubectl v0.32.3
+ k8s.io/utils v0.0.0-20251002143259-bc988d571ff4
+ sigs.k8s.io/secrets-store-csi-driver v1.5.4
)
require (
@@ -23,63 +24,59 @@ require (
github.com/cespare/xxhash/v2 v2.3.0 // indirect
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
github.com/elazarl/goproxy v0.0.0-20240726154733-8b0c20506380 // indirect
- github.com/emicklei/go-restful/v3 v3.11.0 // indirect
- github.com/fxamacker/cbor/v2 v2.7.0 // indirect
+ github.com/emicklei/go-restful/v3 v3.12.2 // indirect
+ github.com/fxamacker/cbor/v2 v2.8.0 // indirect
github.com/go-logr/logr v1.4.2 // indirect
- github.com/go-openapi/jsonpointer v0.19.6 // indirect
- github.com/go-openapi/jsonreference v0.20.2 // indirect
- github.com/go-openapi/swag v0.22.4 // indirect
+ github.com/go-openapi/jsonpointer v0.21.1 // indirect
+ github.com/go-openapi/jsonreference v0.21.0 // indirect
+ github.com/go-openapi/swag v0.23.1 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
- github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
github.com/golang/protobuf v1.5.4 // indirect
- github.com/google/gnostic-models v0.6.8 // indirect
- github.com/google/go-cmp v0.6.0 // indirect
+ github.com/google/gnostic-models v0.6.9 // indirect
+ github.com/google/go-cmp v0.7.0 // indirect
github.com/google/gofuzz v1.2.0 // indirect
github.com/google/uuid v1.6.0 // indirect
- github.com/imdario/mergo v0.3.13 // indirect
github.com/inconshreveable/mousetrap v1.1.0 // indirect
github.com/josharian/intern v1.0.0 // indirect
github.com/json-iterator/go v1.1.12 // indirect
- github.com/klauspost/compress v1.17.9 // indirect
github.com/kylelemons/godebug v1.1.0 // indirect
- github.com/mailru/easyjson v0.7.7 // indirect
+ github.com/mailru/easyjson v0.9.0 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.2 // indirect
github.com/moul/http2curl v1.0.0 // indirect
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
github.com/pkg/errors v0.9.1 // indirect
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
- github.com/prometheus/client_model v0.6.1 // indirect
- github.com/prometheus/common v0.55.0 // indirect
- github.com/prometheus/procfs v0.15.1 // indirect
+ github.com/prometheus/client_model v0.6.2 // indirect
+ github.com/prometheus/common v0.63.0 // indirect
+ github.com/prometheus/procfs v0.16.0 // indirect
github.com/smartystreets/goconvey v1.7.2 // indirect
- github.com/spf13/pflag v1.0.5 // indirect
+ github.com/spf13/pflag v1.0.9 // indirect
github.com/x448/float16 v0.8.4 // indirect
- golang.org/x/net v0.33.0 // indirect
- golang.org/x/oauth2 v0.21.0 // indirect
- golang.org/x/sys v0.28.0 // indirect
- golang.org/x/term v0.27.0 // indirect
- golang.org/x/text v0.21.0 // indirect
- golang.org/x/time v0.3.0 // indirect
- google.golang.org/protobuf v1.34.2 // indirect
+ golang.org/x/net v0.39.0 // indirect
+ golang.org/x/oauth2 v0.29.0 // indirect
+ golang.org/x/sys v0.32.0 // indirect
+ golang.org/x/term v0.31.0 // indirect
+ golang.org/x/text v0.24.0 // indirect
+ golang.org/x/time v0.11.0 // indirect
+ google.golang.org/protobuf v1.36.6 // indirect
gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect
- gopkg.in/yaml.v2 v2.4.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
k8s.io/klog/v2 v2.130.1 // indirect
- k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 // indirect
- sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect
- sigs.k8s.io/secrets-store-csi-driver v1.4.7 // indirect
- sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect
+ k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff // indirect
+ sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 // indirect
+ sigs.k8s.io/randfill v1.0.0 // indirect
+ sigs.k8s.io/structured-merge-diff/v4 v4.6.0 // indirect
sigs.k8s.io/yaml v1.4.0 // indirect
)
// Replacements for argo-rollouts
replace (
github.com/go-check/check => github.com/go-check/check v0.0.0-20201130134442-10cb98267c6c
- k8s.io/api v0.0.0 => k8s.io/api v0.28.4
- k8s.io/apimachinery v0.0.0 => k8s.io/apimachinery v0.28.4
- k8s.io/client-go v0.0.0 => k8s.io/client-go v0.27.4
+ k8s.io/api v0.0.0 => k8s.io/api v0.32.3
+ k8s.io/apimachinery v0.0.0 => k8s.io/apimachinery v0.32.3
+ k8s.io/client-go v0.0.0 => k8s.io/client-go v0.32.3
k8s.io/cloud-provider v0.0.0 => k8s.io/cloud-provider v0.24.2
k8s.io/controller-manager v0.0.0 => k8s.io/controller-manager v0.24.2
k8s.io/cri-api v0.0.0 => k8s.io/cri-api v0.20.5-rc.0
@@ -88,7 +85,7 @@ replace (
k8s.io/kube-controller-manager v0.0.0 => k8s.io/kube-controller-manager v0.24.2
k8s.io/kube-proxy v0.0.0 => k8s.io/kube-proxy v0.24.2
k8s.io/kube-scheduler v0.0.0 => k8s.io/kube-scheduler v0.24.2
- k8s.io/kubectl v0.0.0 => k8s.io/kubectl v0.27.1
+ k8s.io/kubectl v0.0.0 => k8s.io/kubectl v0.32.3
k8s.io/kubelet v0.0.0 => k8s.io/kubelet v0.24.2
k8s.io/legacy-cloud-providers v0.0.0 => k8s.io/legacy-cloud-providers v0.24.2
k8s.io/mount-utils v0.0.0 => k8s.io/mount-utils v0.20.5-rc.0
diff --git a/go.sum b/go.sum
index 5de1fc0..05738d7 100644
--- a/go.sum
+++ b/go.sum
@@ -1,176 +1,86 @@
-cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
-github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
-github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ=
-github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
-github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
-github.com/argoproj/argo-rollouts v1.7.2 h1:faDUH/qePerYRwsrHfVzNQkhjGBgXIiVYdVK8824kMo=
-github.com/argoproj/argo-rollouts v1.7.2/go.mod h1:Te4HrUELxKiBpK8lgk77o4gTa3mv8pXCd8xdPprKrbs=
-github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs=
-github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY=
+github.com/argoproj/argo-rollouts v1.8.3 h1:blbtQva4IK9r6gFh+dWkCrLnFdPOWiv9ubQYu36qeaA=
+github.com/argoproj/argo-rollouts v1.8.3/go.mod h1:kCAUvIfMGfOyVf3lvQbBt0nqQn4Pd+zB5/YwKv+UBa8=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
-github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
-github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
-github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
-github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
-github.com/dave/dst v0.26.2/go.mod h1:UMDJuIRPfyUCC78eFuB+SV/WI8oDeyFDvM/JR6NI3IU=
-github.com/dave/gopackages v0.0.0-20170318123100-46e7023ec56e/go.mod h1:i00+b/gKdIDIxuLDFob7ustLAVqhsZRk2qVZrArELGQ=
-github.com/dave/jennifer v1.2.0/go.mod h1:fIb+770HOpJ2fmN9EPPKOqm1vMGhB+TwXKMZhrIygKg=
-github.com/dave/kerr v0.0.0-20170318121727-bc25dd6abe8e/go.mod h1:qZqlPyPvfsDJt+3wHJ1EvSXDuVjFTK0j2p/ca+gtsb8=
-github.com/dave/rebecca v0.9.1/go.mod h1:N6XYdMD/OKw3lkF3ywh8Z6wPGuwNFDNtWYEMFWEmXBA=
+github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
-github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE=
-github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc=
github.com/elazarl/goproxy v0.0.0-20240726154733-8b0c20506380 h1:1NyRx2f4W4WBRyg0Kys0ZbaNmDDzZ2R/C7DTi+bbsJ0=
github.com/elazarl/goproxy v0.0.0-20240726154733-8b0c20506380/go.mod h1:thX175TtLTzLj3p7N/Q9IiKZ7NF+p72cvL91emV0hzo=
-github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
-github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
-github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g=
-github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
-github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
-github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
-github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
-github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
-github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
-github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E=
-github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ=
-github.com/getkin/kin-openapi v0.76.0/go.mod h1:660oXbgy5JFMKreazJaQTw7o+X00qeSyhcnluiMv+Xg=
-github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
-github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas=
-github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU=
-github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
+github.com/emicklei/go-restful/v3 v3.12.2 h1:DhwDP0vY3k8ZzE0RunuJy8GhNpPL6zqLkDf9B/a0/xU=
+github.com/emicklei/go-restful/v3 v3.12.2/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
+github.com/fxamacker/cbor/v2 v2.8.0 h1:fFtUGXUzXPHTIUdne5+zzMPTfffl3RD5qYnkY40vtxU=
+github.com/fxamacker/cbor/v2 v2.8.0/go.mod h1:vM4b+DJCtHn+zz7h3FFp/hDAI9WNWCsZj23V5ytsSxQ=
github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY=
github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
-github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
-github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
-github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE=
-github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs=
-github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8=
-github.com/go-openapi/jsonreference v0.19.5/go.mod h1:RdybgQwPxbL4UEjuAruzK1x3nE69AqPYEJeo/TWfEeg=
-github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE=
-github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k=
-github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
-github.com/go-openapi/swag v0.19.14/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ=
-github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14=
-github.com/go-openapi/swag v0.22.4 h1:QLMzNJnMGPRNDCbySlcj1x01tzU8/9LTTL9hZZZogBU=
-github.com/go-openapi/swag v0.22.4/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14=
+github.com/go-openapi/jsonpointer v0.21.1 h1:whnzv/pNXtK2FbX/W9yJfRmE2gsmkfahjMKB0fZvcic=
+github.com/go-openapi/jsonpointer v0.21.1/go.mod h1:50I1STOfbY1ycR8jGz8DaMeLCdXiI6aDteEdRNNzpdk=
+github.com/go-openapi/jsonreference v0.21.0 h1:Rs+Y7hSXT83Jacb7kFyjn4ijOuVGSvOdF2+tg1TRrwQ=
+github.com/go-openapi/jsonreference v0.21.0/go.mod h1:LmZmgsrTkVg9LG4EaHeY8cBDslNPMo06cago5JNLkm4=
+github.com/go-openapi/swag v0.23.1 h1:lpsStH0n2ittzTnbaSloVZLuB5+fvSY/+hnagBjSNZU=
+github.com/go-openapi/swag v0.23.1/go.mod h1:STZs8TbRvEQQKUA+JZNAm3EWlgaOBGpyFDqQnDHMef0=
github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI=
github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8=
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
-github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
-github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE=
-github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
-github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
-github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
-github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
-github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
-github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
-github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
-github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
-github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
-github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
-github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
-github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
-github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
-github.com/google/gnostic v0.5.7-v3refs/go.mod h1:73MKFl6jIHelAJNaBGFzt3SPtZULs9dYrGFt8OiIsHQ=
-github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I=
-github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U=
-github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
-github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
-github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
-github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/gnostic-models v0.6.9 h1:MU/8wDLif2qCXZmzncUQ/BOfxWfthHi63KqpoNbWqVw=
+github.com/google/gnostic-models v0.6.9/go.mod h1:CiWsm0s6BSQd1hRn8/QmxqB6BesYcbSZxsz9b0KuDBw=
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
-github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
-github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
+github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
+github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
-github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0=
github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
-github.com/google/pprof v0.0.0-20181127221834-b4f47329b966/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
-github.com/google/pprof v0.0.0-20240525223248-4bfdf5a9a2af h1:kmjWCqn2qkEml422C2Rrd27c3VGxi6a/6HNq8QmHRKM=
-github.com/google/pprof v0.0.0-20240525223248-4bfdf5a9a2af/go.mod h1:K1liHPHnj73Fdn/EKuT8nrFqBihUSKXoLYU0BuatOYo=
-github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db h1:097atOisP2aRj7vFgYQBbFN4U4JNXUNYpxael3UzMyo=
+github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144=
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8=
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
-github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So=
-github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
-github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
-github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
-github.com/imdario/mergo v0.3.13 h1:lFzP57bqS/wsqKssCGmtLAb8A0wKjLGrve2q3PPVcBk=
-github.com/imdario/mergo v0.3.13/go.mod h1:4lJ1jqUDcsbIECGy0RUJAXNIhg+6ocWgb1ALK2O4oXg=
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
-github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo=
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
-github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA=
-github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw=
-github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
-github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
-github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
+github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo=
+github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ=
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
-github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
-github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
-github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
-github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
-github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
-github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=
-github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
-github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
-github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c=
+github.com/mailru/easyjson v0.9.0 h1:PrnmzHw7262yW8sTBwxi1PdJA3Iw/EKBa8psRf7d9a4=
+github.com/mailru/easyjson v0.9.0/go.mod h1:1+xMtQp2MRNVL/V1bOzuP3aP8VNwRW55fQUto+XFtTU=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
-github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
github.com/moul/http2curl v1.0.0 h1:dRMWoAtb+ePxMlLkrCbAqh4TlPHXvoGUSQ323/9Zahs=
github.com/moul/http2curl v1.0.0/go.mod h1:8UbvGypXm98wA/IqH45anm5Y2Z6ep6O31QGOAZ3H0fQ=
-github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
-github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw=
-github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
-github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
-github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
-github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
-github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
-github.com/onsi/ginkgo v1.14.0 h1:2mOpI4JVVPBN+WQRa0WKH2eXR+Ey+uK4n7Zj0aYpIQA=
-github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY=
-github.com/onsi/ginkgo/v2 v2.19.0 h1:9Cnnf7UHo57Hy3k6/m5k3dRfGTMXGvxhHFvkDTCTpvA=
-github.com/onsi/ginkgo/v2 v2.19.0/go.mod h1:rlwLi9PilAFJ8jCg9UE1QP6VBpd6/xj3SRC0d6TU0To=
-github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
-github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
-github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
-github.com/onsi/gomega v1.33.1 h1:dsYjIxxSR755MDmKVsaFQTE22ChNBcuuTWgkUDSubOk=
-github.com/onsi/gomega v1.33.1/go.mod h1:U4R44UsT+9eLIaYRB2a5qajjtQYn0hauxvRm16AVYg0=
-github.com/openshift/api v0.0.0-20240131175612-92fe66c75e8f h1:v/UGegormU7y/1hMpt52McJtlBrsLgXpySOesXWFQVg=
-github.com/openshift/api v0.0.0-20240131175612-92fe66c75e8f/go.mod h1:LEnw1IVscIxyDnltE3Wi7bQb/QzIM8BfPNKoGA1Qlxw=
-github.com/openshift/build-machinery-go v0.0.0-20211213093930-7e33a7eb4ce3/go.mod h1:b1BuldmJlbA/xYtdZvKi+7j5YGB44qJUJDZ9zwiNCfE=
-github.com/openshift/client-go v0.0.0-20231110140829-a6ca51f6d5ba h1:uZ9gqdJIKUegxqeBqKXbPdd0JfO6aueQ2Ot/gTOhkD8=
-github.com/openshift/client-go v0.0.0-20231110140829-a6ca51f6d5ba/go.mod h1:/BACtJX3fnHOlecTC3VW7JPsJU7KCGaUqt/HkWp5ryo=
+github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE=
+github.com/onsi/ginkgo/v2 v2.21.0 h1:7rg/4f3rB88pb5obDgNZrNHrQ4e6WpjonchcpuBRnZM=
+github.com/onsi/ginkgo/v2 v2.21.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo=
+github.com/onsi/gomega v1.35.1 h1:Cwbd75ZBPxFSuZ6T+rN/WCb/gOc6YgFBXLlZLhC7Ds4=
+github.com/onsi/gomega v1.35.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog=
+github.com/openshift/api v0.0.0-20250411135543-10a8fa583797 h1:8x3G8QOZqo2bRAL8JFlPz/odqQECI/XmlZeRwnFxJ8I=
+github.com/openshift/api v0.0.0-20250411135543-10a8fa583797/go.mod h1:yk60tHAmHhtVpJQo3TwVYq2zpuP70iJIFDCmeKMIzPw=
+github.com/openshift/client-go v0.0.0-20250402181141-b3bad3b645f2 h1:bPXR0R8zp1o12nSUphN26hSM+OKYq5pMorbDCpApzDQ=
+github.com/openshift/client-go v0.0.0-20250402181141-b3bad3b645f2/go.mod h1:dT1cJyVTperQ53GvVRa+GZ27r02fDZy2k5j+9QoQsCo=
github.com/parnurzeal/gorequest v0.3.0 h1:SoFyqCDC9COr1xuS6VA8fC8RU7XyrJZN2ona1kEX7FI=
github.com/parnurzeal/gorequest v0.3.0/go.mod h1:3Kh2QUMJoqw3icWAecsyzkpY7UzRfDhbRdTjtNwNiUE=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
@@ -178,237 +88,114 @@ github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
-github.com/prometheus/client_golang v1.20.5 h1:cxppBPuYhUnsO6yo/aoRol4L7q7UFfdm+bR9r+8l63Y=
-github.com/prometheus/client_golang v1.20.5/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE=
-github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
-github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E=
-github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY=
-github.com/prometheus/common v0.55.0 h1:KEi6DK7lXW/m7Ig5i47x0vRzuBsHuvJdi5ee6Y3G1dc=
-github.com/prometheus/common v0.55.0/go.mod h1:2SECS4xJG1kd8XF9IcM1gMX6510RAEL65zxzNImwdc8=
-github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc=
-github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk=
+github.com/prometheus/client_golang v1.22.0 h1:rb93p9lokFEsctTys46VnV1kLCDpVZ0a/Y92Vm0Zc6Q=
+github.com/prometheus/client_golang v1.22.0/go.mod h1:R7ljNsLXhuQXYZYtw6GAE9AZg8Y7vEW5scdCXrWRXC0=
+github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk=
+github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE=
+github.com/prometheus/common v0.63.0 h1:YR/EIY1o3mEFP/kZCD7iDMnLPlGyuU2Gb3HIcXnA98k=
+github.com/prometheus/common v0.63.0/go.mod h1:VVFF/fBIoToEnWRVkYoXEkq3R3paCoxG9PXP74SnV18=
+github.com/prometheus/procfs v0.16.0 h1:xh6oHhKwnOJKMYiYBDWmkHqQPyiY40sny36Cmx2bbsM=
+github.com/prometheus/procfs v0.16.0/go.mod h1:8veyXUu3nGP7oaCxhX6yeaM5u4stL2FeMXnCqhDthZg=
github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8=
github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
-github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo=
github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
github.com/smartystreets/assertions v1.2.0 h1:42S6lae5dvLc7BrLu/0ugRtcFVjoJNMC/N3yZFZkDFs=
github.com/smartystreets/assertions v1.2.0/go.mod h1:tcbTF8ujkAEcZ8TElKY+i30BzYlVhC/LOxJk7iOWnoo=
github.com/smartystreets/goconvey v1.7.2 h1:9RBaZCeXEQ3UselpuwUQHltGVXvdwm6cv1hgR6gDIPg=
github.com/smartystreets/goconvey v1.7.2/go.mod h1:Vw0tHAZW6lzCRk3xgdin6fKYcG+G3Pg9vgXWeJpQFMM=
-github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk=
-github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM=
-github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y=
-github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
-github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
-github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8=
+github.com/spf13/cobra v1.10.1 h1:lJeBwCfmrnXthfAupyUTzJ/J4Nc1RsHC/mSRU2dll/s=
+github.com/spf13/cobra v1.10.1/go.mod h1:7SmJGaTHFVBY0jW4NXGluQoLvhqFQM+6XSKD+P4XaB0=
+github.com/spf13/pflag v1.0.9 h1:9exaQaMOCwffKiiiYk6/BndUBv+iRViNW+4lEMi0PvY=
+github.com/spf13/pflag v1.0.9/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
-github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
-github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
-github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
-github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
-github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
-github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
-github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
-github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
-github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
+github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
+github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM=
github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
-github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
-github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
-golang.org/x/arch v0.0.0-20180920145803-b19384d3c130/go.mod h1:cYlCBUl1MsqxdiKgmc4uh7TxZfWSFLOGSRR090WDxt8=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
-golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
-golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
-golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
-golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
-golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
-golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
-golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY=
-golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
-golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
-golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
-golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
-golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
-golang.org/x/net v0.26.0 h1:soB7SVo0PWrY4vPW/+ay0jKDNScG2X9wFeYlXIvJsOQ=
-golang.org/x/net v0.26.0/go.mod h1:5YKkiSynbBIh3p6iOc/vibscux0x38BZDkn8sCUPxHE=
-golang.org/x/net v0.33.0 h1:74SYHlV8BIgHIFC/LrYkOGIwL19eTYXQ5wc6TBuO36I=
-golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4=
-golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
-golang.org/x/oauth2 v0.21.0 h1:tsimM75w1tF/uws5rbeHzIWxEqElMehnc+iW793zsZs=
-golang.org/x/oauth2 v0.21.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI=
-golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/net v0.39.0 h1:ZCu7HMWDxpXpaiKdhzIfaltL9Lp31x/3fCP11bc6/fY=
+golang.org/x/net v0.39.0/go.mod h1:X7NRbYVEA+ewNkCNyJ513WmMdQ3BineSwVtN2zD/d+E=
+golang.org/x/oauth2 v0.29.0 h1:WdYw2tdTK1S8olAzWHdgeqfy+Mtm9XNhv/xJsY65d98=
+golang.org/x/oauth2 v0.29.0/go.mod h1:onh5ek6nERTohokkhCD/y2cV4Do3fxFHFuAejCkRWT8=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20180903190138-2b024373dcd9/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.22.0 h1:RI27ohtqKCnwULzJLqkv897zojh5/DwS/ENaMzUOaWI=
-golang.org/x/sys v0.22.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
-golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA=
-golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
-golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
-golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
-golang.org/x/term v0.21.0 h1:WVXCp+/EBEHOj53Rvu+7KiT/iElMrO8ACK16SMZ3jaA=
-golang.org/x/term v0.21.0/go.mod h1:ooXLefLobQVslOqselCNF4SxFAaoS6KujMbsGzSDmX0=
-golang.org/x/term v0.27.0 h1:WP60Sv1nlK1T6SupCHbXzSaN0b9wUmsPoRS9b61A23Q=
-golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM=
+golang.org/x/sys v0.32.0 h1:s77OFDvIQeibCmezSnk/q6iAfkdiQaJi4VzroCFrN20=
+golang.org/x/sys v0.32.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
+golang.org/x/term v0.31.0 h1:erwDkOK1Msy6offm1mOgvspSkslFnIGsFnxOKoufg3o=
+golang.org/x/term v0.31.0/go.mod h1:R4BeIy7D95HzImkxGkTW1UQTtP54tio2RyHz7PwK0aw=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
-golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
-golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
-golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
-golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4=
-golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI=
-golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo=
-golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ=
-golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4=
-golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/text v0.24.0 h1:dd5Bzh4yt5KYA8f9CJHCP4FB4D51c2c6JvN37xJJkJ0=
+golang.org/x/text v0.24.0/go.mod h1:L8rBsPeo2pSS+xqN0d5u2ikmjtmoJbDBT1b7nHvFCdU=
+golang.org/x/time v0.11.0 h1:/bpjEDfN9tkoN/ryeYHnv5hcMlc8ncjMcM4XBk5NWV0=
+golang.org/x/time v0.11.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
-golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
-golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
-golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
-golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
-golang.org/x/tools v0.0.0-20200509030707-2212a7e161a5/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
-golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
-golang.org/x/tools v0.1.10-0.20220218145154-897bd77cd717/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E=
-golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d h1:vU5i/LfpvrRCpgM/VPfJLg5KjxD3E+hfT1SH+d9zLwg=
-golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk=
+golang.org/x/tools v0.26.0 h1:v/60pFQmzmT9ExmjDv2gGIfi3OqfKoEP6I5+umXlbnQ=
+golang.org/x/tools v0.26.0/go.mod h1:TPVVj70c7JJ3WCazhD8OdXcZg/og+b9+tH/KxylGwH0=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
-google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
-google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
-google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
-google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
-google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
-google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
-google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
-google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
-google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
-google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
-google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
-google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
-google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
-google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
-google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
-google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
-google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4=
-google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
-google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
-google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
-google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg=
-google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw=
+google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY=
+google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
-gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
-gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
-gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
gopkg.in/evanphx/json-patch.v4 v4.12.0 h1:n6jtcsulIzXPJaxegRbvFNNrZDjbij7ny3gmSPG+6V4=
gopkg.in/evanphx/json-patch.v4 v4.12.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M=
-gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
-gopkg.in/src-d/go-billy.v4 v4.3.0/go.mod h1:tm33zBoOwxjYHZIE+OV8bxTWFMJLrconzFMd38aARFk=
-gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
-gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
-gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
-gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
-gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
-gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
-honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
-honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
-k8s.io/api v0.24.0/go.mod h1:5Jl90IUrJHUJYEMANRURMiVvJ0g7Ax7r3R1bqO8zx8I=
-k8s.io/api v0.31.1 h1:Xe1hX/fPW3PXYYv8BlozYqw63ytA92snr96zMW9gWTU=
-k8s.io/api v0.31.1/go.mod h1:sbN1g6eY6XVLeqNsZGLnI5FwVseTrZX7Fv3O26rhAaI=
-k8s.io/apimachinery v0.24.0/go.mod h1:82Bi4sCzVBdpYjyI4jY6aHX+YCUchUIrZrXKedjd2UM=
-k8s.io/apimachinery v0.31.1 h1:mhcUBbj7KUjaVhyXILglcVjuS4nYXiwC+KKFBgIVy7U=
-k8s.io/apimachinery v0.31.1/go.mod h1:rsPdaZJfTfLsNJSQzNHQvYoTmxhoOEofxtOsF3rtsMo=
-k8s.io/client-go v0.31.1 h1:f0ugtWSbWpxHR7sjVpQwuvw9a3ZKLXX0u0itkFXufb0=
-k8s.io/client-go v0.31.1/go.mod h1:sKI8871MJN2OyeqRlmA4W4KM9KBdBUpDLu/43eGemCg=
-k8s.io/code-generator v0.24.0/go.mod h1:dpVhs00hTuTdTY6jvVxvTFCk6gSMrtfRydbhZwHI15w=
-k8s.io/gengo v0.0.0-20210813121822-485abfe95c7c/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E=
-k8s.io/gengo v0.0.0-20211129171323-c02415ce4185/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E=
-k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE=
-k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y=
-k8s.io/klog/v2 v2.60.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=
+k8s.io/api v0.32.3 h1:Hw7KqxRusq+6QSplE3NYG4MBxZw1BZnq4aP4cJVINls=
+k8s.io/api v0.32.3/go.mod h1:2wEDTXADtm/HA7CCMD8D8bK4yuBUptzaRhYcYEEYA3k=
+k8s.io/apimachinery v0.32.3 h1:JmDuDarhDmA/Li7j3aPrwhpNBA94Nvk5zLeOge9HH1U=
+k8s.io/apimachinery v0.32.3/go.mod h1:GpHVgxoKlTxClKcteaeuF1Ul/lDVb74KpZcxcmLDElE=
+k8s.io/client-go v0.32.3 h1:RKPVltzopkSgHS7aS98QdscAgtgah/+zmpAogooIqVU=
+k8s.io/client-go v0.32.3/go.mod h1:3v0+3k4IcT9bXTc4V2rt+d2ZPPG700Xy6Oi0Gdl2PaY=
k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk=
k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
-k8s.io/kube-openapi v0.0.0-20220328201542-3ee0da9b0b42/go.mod h1:Z/45zLw8lUo4wdiUkI+v/ImEGAvu3WatcZl3lPMR4Rk=
-k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 h1:BZqlfIlq5YbRMFko6/PM7FjZpUb45WallggurYhKGag=
-k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340/go.mod h1:yD4MZYeKMBwQKVht279WycxKyM84kkAx2DPrTXaeb98=
-k8s.io/kubectl v0.31.1 h1:ih4JQJHxsEggFqDJEHSOdJ69ZxZftgeZvYo7M/cpp24=
-k8s.io/kubectl v0.31.1/go.mod h1:aNuQoR43W6MLAtXQ/Bu4GDmoHlbhHKuyD49lmTC8eJM=
-k8s.io/utils v0.0.0-20210802155522-efc7438f0176/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
-k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
-k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 h1:pUdcCO1Lk/tbT5ztQWOBi5HBgbBP1J8+AsQnQCKsi8A=
-k8s.io/utils v0.0.0-20240711033017-18e509b52bc8/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
-sigs.k8s.io/json v0.0.0-20211208200746-9f7c6b3444d2/go.mod h1:B+TnT182UBxE84DiCz4CVE26eOSDAeYCpfDnC2kdKMY=
-sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo=
-sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0=
-sigs.k8s.io/secrets-store-csi-driver v1.4.7 h1:AyuwmPTW2GoPD2RjyVD3OrH1J9cdPZx+0h2qJvzbGXs=
-sigs.k8s.io/secrets-store-csi-driver v1.4.7/go.mod h1:0/wMVOv8qLx7YNVMGU+Sh7S4D6TH6GhyEpouo28OTUU=
-sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw=
-sigs.k8s.io/structured-merge-diff/v4 v4.2.1/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4=
-sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4=
-sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08=
-sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc=
+k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff h1:/usPimJzUKKu+m+TE36gUyGcf03XZEP0ZIKgKj35LS4=
+k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff/go.mod h1:5jIi+8yX4RIb8wk3XwBo5Pq2ccx4FP10ohkbSKCZoK8=
+k8s.io/kubectl v0.32.3 h1:VMi584rbboso+yjfv0d8uBHwwxbC438LKq+dXd5tOAI=
+k8s.io/kubectl v0.32.3/go.mod h1:6Euv2aso5GKzo/UVMacV6C7miuyevpfI91SvBvV9Zdg=
+k8s.io/utils v0.0.0-20251002143259-bc988d571ff4 h1:SjGebBtkBqHFOli+05xYbK8YF1Dzkbzn+gDM4X9T4Ck=
+k8s.io/utils v0.0.0-20251002143259-bc988d571ff4/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
+sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 h1:gBQPwqORJ8d8/YNZWEjoZs7npUVDpVXUUOFfW6CgAqE=
+sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8/go.mod h1:mdzfpAEoE6DHQEN0uh9ZbOCuHbLK5wOm7dK4ctXE9Tg=
+sigs.k8s.io/randfill v0.0.0-20250304075658-069ef1bbf016/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY=
+sigs.k8s.io/randfill v1.0.0 h1:JfjMILfT8A6RbawdsK2JXGBR5AQVfd+9TbzrlneTyrU=
+sigs.k8s.io/randfill v1.0.0/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY=
+sigs.k8s.io/secrets-store-csi-driver v1.5.4 h1:enl+v1+JbKDyVjdfT/7CillZsc4rLAM9tTHyf7GeLxc=
+sigs.k8s.io/secrets-store-csi-driver v1.5.4/go.mod h1:Ct85xqsKLk/dxkj8inRjWA3RJsXXkPLjNSAJ0db5vKs=
+sigs.k8s.io/structured-merge-diff/v4 v4.6.0 h1:IUA9nvMmnKWcj5jl84xn+T5MnlZKThmUW1TdblaLVAc=
+sigs.k8s.io/structured-merge-diff/v4 v4.6.0/go.mod h1:dDy58f92j70zLsuZVuUX5Wp9vtxXpaZnkPGWeqDfCps=
sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E=
sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY=
diff --git a/internal/pkg/alerts/alert.go b/internal/pkg/alerts/alert.go
index 43b4c06..6b9568f 100644
--- a/internal/pkg/alerts/alert.go
+++ b/internal/pkg/alerts/alert.go
@@ -9,6 +9,15 @@ import (
"github.com/sirupsen/logrus"
)
+type AlertSink string
+
+const (
+ AlertSinkSlack AlertSink = "slack"
+ AlertSinkTeams AlertSink = "teams"
+ AlertSinkGoogleChat AlertSink = "gchat"
+ AlertSinkRaw AlertSink = "raw"
+)
+
// function to send alert msg to webhook service
func SendWebhookAlert(msg string) {
webhook_url, ok := os.LookupEnv("ALERT_WEBHOOK_URL")
@@ -31,12 +40,15 @@ func SendWebhookAlert(msg string) {
msg = fmt.Sprintf("%s : %s", alert_additional_info, msg)
}
- if alert_sink == "slack" {
+ switch AlertSink(alert_sink) {
+ case AlertSinkSlack:
sendSlackAlert(webhook_url, webhook_proxy, msg)
- } else if alert_sink == "teams" {
+ case AlertSinkTeams:
sendTeamsAlert(webhook_url, webhook_proxy, msg)
- } else {
- msg = strings.Replace(msg, "*", "", -1)
+ case AlertSinkGoogleChat:
+ sendGoogleChatAlert(webhook_url, webhook_proxy, msg)
+ default:
+ msg = strings.ReplaceAll(msg, "*", "")
sendRawWebhookAlert(webhook_url, webhook_proxy, msg)
}
}
@@ -98,6 +110,29 @@ func sendTeamsAlert(webhookUrl string, proxy string, msg string) []error {
return nil
}
+// function to send alert to Google Chat webhook
+func sendGoogleChatAlert(webhookUrl string, proxy string, msg string) []error {
+ payload := map[string]interface{}{
+ "text": msg,
+ }
+
+ request := gorequest.New().Proxy(proxy)
+ resp, _, err := request.
+ Post(webhookUrl).
+ RedirectPolicy(redirectPolicy).
+ Send(payload).
+ End()
+
+ if err != nil {
+ return err
+ }
+ if resp.StatusCode != 200 {
+ return []error{fmt.Errorf("error sending msg. status: %v", resp.Status)}
+ }
+
+ return nil
+}
+
// function to send alert to webhook service as text
func sendRawWebhookAlert(webhookUrl string, proxy string, msg string) []error {
request := gorequest.New().Proxy(proxy)
diff --git a/internal/pkg/callbacks/rolling_upgrade.go b/internal/pkg/callbacks/rolling_upgrade.go
index 4ef9159..13e5a63 100644
--- a/internal/pkg/callbacks/rolling_upgrade.go
+++ b/internal/pkg/callbacks/rolling_upgrade.go
@@ -2,6 +2,7 @@ package callbacks
import (
"context"
+ "errors"
"fmt"
"time"
@@ -15,10 +16,14 @@ import (
"k8s.io/apimachinery/pkg/runtime"
patchtypes "k8s.io/apimachinery/pkg/types"
+ "maps"
+
argorolloutv1alpha1 "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1"
- openshiftv1 "github.com/openshift/api/apps/v1"
)
+// ItemFunc is a generic function to return a specific resource in given namespace
+type ItemFunc func(kube.Clients, string, string) (runtime.Object, error)
+
// ItemsFunc is a generic function to return a specific resource array in given namespace
type ItemsFunc func(kube.Clients, string) []runtime.Object
@@ -34,6 +39,12 @@ type VolumesFunc func(runtime.Object) []v1.Volume
// UpdateFunc performs the resource update
type UpdateFunc func(kube.Clients, string, runtime.Object) error
+// PatchFunc performs the resource patch
+type PatchFunc func(kube.Clients, string, runtime.Object, patchtypes.PatchType, []byte) error
+
+// PatchTemplateFunc is a generic func to return strategic merge JSON patch template
+type PatchTemplatesFunc func() PatchTemplates
+
// AnnotationsFunc is a generic func to return annotations
type AnnotationsFunc func(runtime.Object) map[string]string
@@ -42,14 +53,42 @@ type PodAnnotationsFunc func(runtime.Object) map[string]string
// RollingUpgradeFuncs contains generic functions to perform rolling upgrade
type RollingUpgradeFuncs struct {
- ItemsFunc ItemsFunc
- AnnotationsFunc AnnotationsFunc
- PodAnnotationsFunc PodAnnotationsFunc
- ContainersFunc ContainersFunc
- InitContainersFunc InitContainersFunc
- UpdateFunc UpdateFunc
- VolumesFunc VolumesFunc
- ResourceType string
+ ItemFunc ItemFunc
+ ItemsFunc ItemsFunc
+ AnnotationsFunc AnnotationsFunc
+ PodAnnotationsFunc PodAnnotationsFunc
+ ContainersFunc ContainersFunc
+ ContainerPatchPathFunc ContainersFunc
+ InitContainersFunc InitContainersFunc
+ UpdateFunc UpdateFunc
+ PatchFunc PatchFunc
+ PatchTemplatesFunc PatchTemplatesFunc
+ VolumesFunc VolumesFunc
+ ResourceType string
+ SupportsPatch bool
+}
+
+// PatchTemplates contains merge JSON patch templates
+type PatchTemplates struct {
+ AnnotationTemplate string
+ EnvVarTemplate string
+ DeleteEnvVarTemplate string
+}
+
+// GetDeploymentItem returns the deployment in given namespace
+func GetDeploymentItem(clients kube.Clients, name string, namespace string) (runtime.Object, error) {
+ deployment, err := clients.KubernetesClient.AppsV1().Deployments(namespace).Get(context.TODO(), name, meta_v1.GetOptions{})
+ if err != nil {
+ logrus.Errorf("Failed to get deployment %v", err)
+ return nil, err
+ }
+
+ if deployment.Spec.Template.Annotations == nil {
+ annotations := make(map[string]string)
+ deployment.Spec.Template.Annotations = annotations
+ }
+
+ return deployment, nil
}
// GetDeploymentItems returns the deployments in given namespace
@@ -62,9 +101,9 @@ func GetDeploymentItems(clients kube.Clients, namespace string) []runtime.Object
items := make([]runtime.Object, len(deployments.Items))
// Ensure we always have pod annotations to add to
for i, v := range deployments.Items {
- if v.Spec.Template.ObjectMeta.Annotations == nil {
+ if v.Spec.Template.Annotations == nil {
annotations := make(map[string]string)
- deployments.Items[i].Spec.Template.ObjectMeta.Annotations = annotations
+ deployments.Items[i].Spec.Template.Annotations = annotations
}
items[i] = &deployments.Items[i]
}
@@ -72,6 +111,17 @@ func GetDeploymentItems(clients kube.Clients, namespace string) []runtime.Object
return items
}
+// GetCronJobItem returns the job in given namespace
+func GetCronJobItem(clients kube.Clients, name string, namespace string) (runtime.Object, error) {
+ cronjob, err := clients.KubernetesClient.BatchV1().CronJobs(namespace).Get(context.TODO(), name, meta_v1.GetOptions{})
+ if err != nil {
+ logrus.Errorf("Failed to get cronjob %v", err)
+ return nil, err
+ }
+
+ return cronjob, nil
+}
+
// GetCronJobItems returns the jobs in given namespace
func GetCronJobItems(clients kube.Clients, namespace string) []runtime.Object {
cronjobs, err := clients.KubernetesClient.BatchV1().CronJobs(namespace).List(context.TODO(), meta_v1.ListOptions{})
@@ -82,9 +132,9 @@ func GetCronJobItems(clients kube.Clients, namespace string) []runtime.Object {
items := make([]runtime.Object, len(cronjobs.Items))
// Ensure we always have pod annotations to add to
for i, v := range cronjobs.Items {
- if v.Spec.JobTemplate.Spec.Template.ObjectMeta.Annotations == nil {
+ if v.Spec.JobTemplate.Spec.Template.Annotations == nil {
annotations := make(map[string]string)
- cronjobs.Items[i].Spec.JobTemplate.Spec.Template.ObjectMeta.Annotations = annotations
+ cronjobs.Items[i].Spec.JobTemplate.Spec.Template.Annotations = annotations
}
items[i] = &cronjobs.Items[i]
}
@@ -92,6 +142,17 @@ func GetCronJobItems(clients kube.Clients, namespace string) []runtime.Object {
return items
}
+// GetJobItem returns the job in given namespace
+func GetJobItem(clients kube.Clients, name string, namespace string) (runtime.Object, error) {
+ job, err := clients.KubernetesClient.BatchV1().Jobs(namespace).Get(context.TODO(), name, meta_v1.GetOptions{})
+ if err != nil {
+ logrus.Errorf("Failed to get job %v", err)
+ return nil, err
+ }
+
+ return job, nil
+}
+
// GetJobItems returns the jobs in given namespace
func GetJobItems(clients kube.Clients, namespace string) []runtime.Object {
jobs, err := clients.KubernetesClient.BatchV1().Jobs(namespace).List(context.TODO(), meta_v1.ListOptions{})
@@ -102,9 +163,9 @@ func GetJobItems(clients kube.Clients, namespace string) []runtime.Object {
items := make([]runtime.Object, len(jobs.Items))
// Ensure we always have pod annotations to add to
for i, v := range jobs.Items {
- if v.Spec.Template.ObjectMeta.Annotations == nil {
+ if v.Spec.Template.Annotations == nil {
annotations := make(map[string]string)
- jobs.Items[i].Spec.Template.ObjectMeta.Annotations = annotations
+ jobs.Items[i].Spec.Template.Annotations = annotations
}
items[i] = &jobs.Items[i]
}
@@ -112,6 +173,17 @@ func GetJobItems(clients kube.Clients, namespace string) []runtime.Object {
return items
}
+// GetDaemonSetItem returns the daemonSet in given namespace
+func GetDaemonSetItem(clients kube.Clients, name string, namespace string) (runtime.Object, error) {
+ daemonSet, err := clients.KubernetesClient.AppsV1().DaemonSets(namespace).Get(context.TODO(), name, meta_v1.GetOptions{})
+ if err != nil {
+ logrus.Errorf("Failed to get daemonSet %v", err)
+ return nil, err
+ }
+
+ return daemonSet, nil
+}
+
// GetDaemonSetItems returns the daemonSets in given namespace
func GetDaemonSetItems(clients kube.Clients, namespace string) []runtime.Object {
daemonSets, err := clients.KubernetesClient.AppsV1().DaemonSets(namespace).List(context.TODO(), meta_v1.ListOptions{})
@@ -122,8 +194,8 @@ func GetDaemonSetItems(clients kube.Clients, namespace string) []runtime.Object
items := make([]runtime.Object, len(daemonSets.Items))
// Ensure we always have pod annotations to add to
for i, v := range daemonSets.Items {
- if v.Spec.Template.ObjectMeta.Annotations == nil {
- daemonSets.Items[i].Spec.Template.ObjectMeta.Annotations = make(map[string]string)
+ if v.Spec.Template.Annotations == nil {
+ daemonSets.Items[i].Spec.Template.Annotations = make(map[string]string)
}
items[i] = &daemonSets.Items[i]
}
@@ -131,6 +203,17 @@ func GetDaemonSetItems(clients kube.Clients, namespace string) []runtime.Object
return items
}
+// GetStatefulSetItem returns the statefulSet in given namespace
+func GetStatefulSetItem(clients kube.Clients, name string, namespace string) (runtime.Object, error) {
+ statefulSet, err := clients.KubernetesClient.AppsV1().StatefulSets(namespace).Get(context.TODO(), name, meta_v1.GetOptions{})
+ if err != nil {
+ logrus.Errorf("Failed to get statefulSet %v", err)
+ return nil, err
+ }
+
+ return statefulSet, nil
+}
+
// GetStatefulSetItems returns the statefulSets in given namespace
func GetStatefulSetItems(clients kube.Clients, namespace string) []runtime.Object {
statefulSets, err := clients.KubernetesClient.AppsV1().StatefulSets(namespace).List(context.TODO(), meta_v1.ListOptions{})
@@ -141,8 +224,8 @@ func GetStatefulSetItems(clients kube.Clients, namespace string) []runtime.Objec
items := make([]runtime.Object, len(statefulSets.Items))
// Ensure we always have pod annotations to add to
for i, v := range statefulSets.Items {
- if v.Spec.Template.ObjectMeta.Annotations == nil {
- statefulSets.Items[i].Spec.Template.ObjectMeta.Annotations = make(map[string]string)
+ if v.Spec.Template.Annotations == nil {
+ statefulSets.Items[i].Spec.Template.Annotations = make(map[string]string)
}
items[i] = &statefulSets.Items[i]
}
@@ -150,23 +233,15 @@ func GetStatefulSetItems(clients kube.Clients, namespace string) []runtime.Objec
return items
}
-// GetDeploymentConfigItems returns the deploymentConfigs in given namespace
-func GetDeploymentConfigItems(clients kube.Clients, namespace string) []runtime.Object {
- deploymentConfigs, err := clients.OpenshiftAppsClient.AppsV1().DeploymentConfigs(namespace).List(context.TODO(), meta_v1.ListOptions{})
+// GetRolloutItem returns the rollout in given namespace
+func GetRolloutItem(clients kube.Clients, name string, namespace string) (runtime.Object, error) {
+ rollout, err := clients.ArgoRolloutClient.ArgoprojV1alpha1().Rollouts(namespace).Get(context.TODO(), name, meta_v1.GetOptions{})
if err != nil {
- logrus.Errorf("Failed to list deploymentConfigs %v", err)
+ logrus.Errorf("Failed to get Rollout %v", err)
+ return nil, err
}
- items := make([]runtime.Object, len(deploymentConfigs.Items))
- // Ensure we always have pod annotations to add to
- for i, v := range deploymentConfigs.Items {
- if v.Spec.Template.ObjectMeta.Annotations == nil {
- deploymentConfigs.Items[i].Spec.Template.ObjectMeta.Annotations = make(map[string]string)
- }
- items[i] = &deploymentConfigs.Items[i]
- }
-
- return items
+ return rollout, nil
}
// GetRolloutItems returns the rollouts in given namespace
@@ -179,8 +254,8 @@ func GetRolloutItems(clients kube.Clients, namespace string) []runtime.Object {
items := make([]runtime.Object, len(rollouts.Items))
// Ensure we always have pod annotations to add to
for i, v := range rollouts.Items {
- if v.Spec.Template.ObjectMeta.Annotations == nil {
- rollouts.Items[i].Spec.Template.ObjectMeta.Annotations = make(map[string]string)
+ if v.Spec.Template.Annotations == nil {
+ rollouts.Items[i].Spec.Template.Annotations = make(map[string]string)
}
items[i] = &rollouts.Items[i]
}
@@ -190,72 +265,98 @@ func GetRolloutItems(clients kube.Clients, namespace string) []runtime.Object {
// GetDeploymentAnnotations returns the annotations of given deployment
func GetDeploymentAnnotations(item runtime.Object) map[string]string {
- return item.(*appsv1.Deployment).ObjectMeta.Annotations
+ if item.(*appsv1.Deployment).Annotations == nil {
+ item.(*appsv1.Deployment).Annotations = make(map[string]string)
+ }
+ return item.(*appsv1.Deployment).Annotations
}
// GetCronJobAnnotations returns the annotations of given cronjob
func GetCronJobAnnotations(item runtime.Object) map[string]string {
- return item.(*batchv1.CronJob).ObjectMeta.Annotations
+ if item.(*batchv1.CronJob).Annotations == nil {
+ item.(*batchv1.CronJob).Annotations = make(map[string]string)
+ }
+ return item.(*batchv1.CronJob).Annotations
}
// GetJobAnnotations returns the annotations of given job
func GetJobAnnotations(item runtime.Object) map[string]string {
- return item.(*batchv1.Job).ObjectMeta.Annotations
+ if item.(*batchv1.Job).Annotations == nil {
+ item.(*batchv1.Job).Annotations = make(map[string]string)
+ }
+ return item.(*batchv1.Job).Annotations
}
// GetDaemonSetAnnotations returns the annotations of given daemonSet
func GetDaemonSetAnnotations(item runtime.Object) map[string]string {
- return item.(*appsv1.DaemonSet).ObjectMeta.Annotations
+ if item.(*appsv1.DaemonSet).Annotations == nil {
+ item.(*appsv1.DaemonSet).Annotations = make(map[string]string)
+ }
+ return item.(*appsv1.DaemonSet).Annotations
}
// GetStatefulSetAnnotations returns the annotations of given statefulSet
func GetStatefulSetAnnotations(item runtime.Object) map[string]string {
- return item.(*appsv1.StatefulSet).ObjectMeta.Annotations
-}
-
-// GetDeploymentConfigAnnotations returns the annotations of given deploymentConfig
-func GetDeploymentConfigAnnotations(item runtime.Object) map[string]string {
- return item.(*openshiftv1.DeploymentConfig).ObjectMeta.Annotations
+ if item.(*appsv1.StatefulSet).Annotations == nil {
+ item.(*appsv1.StatefulSet).Annotations = make(map[string]string)
+ }
+ return item.(*appsv1.StatefulSet).Annotations
}
// GetRolloutAnnotations returns the annotations of given rollout
func GetRolloutAnnotations(item runtime.Object) map[string]string {
- return item.(*argorolloutv1alpha1.Rollout).ObjectMeta.Annotations
+ if item.(*argorolloutv1alpha1.Rollout).Annotations == nil {
+ item.(*argorolloutv1alpha1.Rollout).Annotations = make(map[string]string)
+ }
+ return item.(*argorolloutv1alpha1.Rollout).Annotations
}
// GetDeploymentPodAnnotations returns the pod's annotations of given deployment
func GetDeploymentPodAnnotations(item runtime.Object) map[string]string {
- return item.(*appsv1.Deployment).Spec.Template.ObjectMeta.Annotations
+ if item.(*appsv1.Deployment).Spec.Template.Annotations == nil {
+ item.(*appsv1.Deployment).Spec.Template.Annotations = make(map[string]string)
+ }
+ return item.(*appsv1.Deployment).Spec.Template.Annotations
}
// GetCronJobPodAnnotations returns the pod's annotations of given cronjob
func GetCronJobPodAnnotations(item runtime.Object) map[string]string {
- return item.(*batchv1.CronJob).Spec.JobTemplate.Spec.Template.ObjectMeta.Annotations
+ if item.(*batchv1.CronJob).Spec.JobTemplate.Spec.Template.Annotations == nil {
+ item.(*batchv1.CronJob).Spec.JobTemplate.Spec.Template.Annotations = make(map[string]string)
+ }
+ return item.(*batchv1.CronJob).Spec.JobTemplate.Spec.Template.Annotations
}
// GetJobPodAnnotations returns the pod's annotations of given job
func GetJobPodAnnotations(item runtime.Object) map[string]string {
- return item.(*batchv1.Job).Spec.Template.ObjectMeta.Annotations
+ if item.(*batchv1.Job).Spec.Template.Annotations == nil {
+ item.(*batchv1.Job).Spec.Template.Annotations = make(map[string]string)
+ }
+ return item.(*batchv1.Job).Spec.Template.Annotations
}
// GetDaemonSetPodAnnotations returns the pod's annotations of given daemonSet
func GetDaemonSetPodAnnotations(item runtime.Object) map[string]string {
- return item.(*appsv1.DaemonSet).Spec.Template.ObjectMeta.Annotations
+ if item.(*appsv1.DaemonSet).Spec.Template.Annotations == nil {
+ item.(*appsv1.DaemonSet).Spec.Template.Annotations = make(map[string]string)
+ }
+ return item.(*appsv1.DaemonSet).Spec.Template.Annotations
}
// GetStatefulSetPodAnnotations returns the pod's annotations of given statefulSet
func GetStatefulSetPodAnnotations(item runtime.Object) map[string]string {
- return item.(*appsv1.StatefulSet).Spec.Template.ObjectMeta.Annotations
-}
-
-// GetDeploymentConfigPodAnnotations returns the pod's annotations of given deploymentConfig
-func GetDeploymentConfigPodAnnotations(item runtime.Object) map[string]string {
- return item.(*openshiftv1.DeploymentConfig).Spec.Template.ObjectMeta.Annotations
+ if item.(*appsv1.StatefulSet).Spec.Template.Annotations == nil {
+ item.(*appsv1.StatefulSet).Spec.Template.Annotations = make(map[string]string)
+ }
+ return item.(*appsv1.StatefulSet).Spec.Template.Annotations
}
// GetRolloutPodAnnotations returns the pod's annotations of given rollout
func GetRolloutPodAnnotations(item runtime.Object) map[string]string {
- return item.(*argorolloutv1alpha1.Rollout).Spec.Template.ObjectMeta.Annotations
+ if item.(*argorolloutv1alpha1.Rollout).Spec.Template.Annotations == nil {
+ item.(*argorolloutv1alpha1.Rollout).Spec.Template.Annotations = make(map[string]string)
+ }
+ return item.(*argorolloutv1alpha1.Rollout).Spec.Template.Annotations
}
// GetDeploymentContainers returns the containers of given deployment
@@ -283,11 +384,6 @@ func GetStatefulSetContainers(item runtime.Object) []v1.Container {
return item.(*appsv1.StatefulSet).Spec.Template.Spec.Containers
}
-// GetDeploymentConfigContainers returns the containers of given deploymentConfig
-func GetDeploymentConfigContainers(item runtime.Object) []v1.Container {
- return item.(*openshiftv1.DeploymentConfig).Spec.Template.Spec.Containers
-}
-
// GetRolloutContainers returns the containers of given rollout
func GetRolloutContainers(item runtime.Object) []v1.Container {
return item.(*argorolloutv1alpha1.Rollout).Spec.Template.Spec.Containers
@@ -318,16 +414,20 @@ func GetStatefulSetInitContainers(item runtime.Object) []v1.Container {
return item.(*appsv1.StatefulSet).Spec.Template.Spec.InitContainers
}
-// GetDeploymentConfigInitContainers returns the containers of given deploymentConfig
-func GetDeploymentConfigInitContainers(item runtime.Object) []v1.Container {
- return item.(*openshiftv1.DeploymentConfig).Spec.Template.Spec.InitContainers
-}
-
// GetRolloutInitContainers returns the containers of given rollout
func GetRolloutInitContainers(item runtime.Object) []v1.Container {
return item.(*argorolloutv1alpha1.Rollout).Spec.Template.Spec.InitContainers
}
+// GetPatchTemplates returns patch templates
+func GetPatchTemplates() PatchTemplates {
+ return PatchTemplates{
+ AnnotationTemplate: `{"spec":{"template":{"metadata":{"annotations":{"%s":"%s"}}}}}`, // strategic merge patch
+ EnvVarTemplate: `{"spec":{"template":{"spec":{"containers":[{"name":"%s","env":[{"name":"%s","value":"%s"}]}]}}}}`, // strategic merge patch
+ DeleteEnvVarTemplate: `[{"op":"remove","path":"/spec/template/spec/containers/%d/env/%d"}]`, // JSON patch
+ }
+}
+
// UpdateDeployment performs rolling upgrade on deployment
func UpdateDeployment(clients kube.Clients, namespace string, resource runtime.Object) error {
deployment := resource.(*appsv1.Deployment)
@@ -335,18 +435,39 @@ func UpdateDeployment(clients kube.Clients, namespace string, resource runtime.O
return err
}
+// PatchDeployment performs rolling upgrade on deployment
+func PatchDeployment(clients kube.Clients, namespace string, resource runtime.Object, patchType patchtypes.PatchType, bytes []byte) error {
+ deployment := resource.(*appsv1.Deployment)
+ _, err := clients.KubernetesClient.AppsV1().Deployments(namespace).Patch(context.TODO(), deployment.Name, patchType, bytes, meta_v1.PatchOptions{FieldManager: "Reloader"})
+ return err
+}
+
// CreateJobFromCronjob performs rolling upgrade on cronjob
func CreateJobFromCronjob(clients kube.Clients, namespace string, resource runtime.Object) error {
cronJob := resource.(*batchv1.CronJob)
+
+ annotations := make(map[string]string)
+ annotations["cronjob.kubernetes.io/instantiate"] = "manual"
+ maps.Copy(annotations, cronJob.Spec.JobTemplate.Annotations)
+
job := &batchv1.Job{
- ObjectMeta: cronJob.Spec.JobTemplate.ObjectMeta,
- Spec: cronJob.Spec.JobTemplate.Spec,
+ ObjectMeta: meta_v1.ObjectMeta{
+ GenerateName: cronJob.Name + "-",
+ Namespace: cronJob.Namespace,
+ Annotations: annotations,
+ Labels: cronJob.Spec.JobTemplate.Labels,
+ OwnerReferences: []meta_v1.OwnerReference{*meta_v1.NewControllerRef(cronJob, batchv1.SchemeGroupVersion.WithKind("CronJob"))},
+ },
+ Spec: cronJob.Spec.JobTemplate.Spec,
}
- job.GenerateName = cronJob.Name + "-"
_, err := clients.KubernetesClient.BatchV1().Jobs(namespace).Create(context.TODO(), job, meta_v1.CreateOptions{FieldManager: "Reloader"})
return err
}
+func PatchCronJob(clients kube.Clients, namespace string, resource runtime.Object, patchType patchtypes.PatchType, bytes []byte) error {
+ return errors.New("not supported patching: CronJob")
+}
+
// ReCreateJobFromjob performs rolling upgrade on job
func ReCreateJobFromjob(clients kube.Clients, namespace string, resource runtime.Object) error {
oldJob := resource.(*batchv1.Job)
@@ -360,9 +481,9 @@ func ReCreateJobFromjob(clients kube.Clients, namespace string, resource runtime
}
// Remove fields that should not be specified when creating a new Job
- job.ObjectMeta.ResourceVersion = ""
- job.ObjectMeta.UID = ""
- job.ObjectMeta.CreationTimestamp = meta_v1.Time{}
+ job.ResourceVersion = ""
+ job.UID = ""
+ job.CreationTimestamp = meta_v1.Time{}
job.Status = batchv1.JobStatus{}
// Remove problematic labels
@@ -379,6 +500,10 @@ func ReCreateJobFromjob(clients kube.Clients, namespace string, resource runtime
return err
}
+func PatchJob(clients kube.Clients, namespace string, resource runtime.Object, patchType patchtypes.PatchType, bytes []byte) error {
+ return errors.New("not supported patching: Job")
+}
+
// UpdateDaemonSet performs rolling upgrade on daemonSet
func UpdateDaemonSet(clients kube.Clients, namespace string, resource runtime.Object) error {
daemonSet := resource.(*appsv1.DaemonSet)
@@ -386,6 +511,12 @@ func UpdateDaemonSet(clients kube.Clients, namespace string, resource runtime.Ob
return err
}
+func PatchDaemonSet(clients kube.Clients, namespace string, resource runtime.Object, patchType patchtypes.PatchType, bytes []byte) error {
+ daemonSet := resource.(*appsv1.DaemonSet)
+ _, err := clients.KubernetesClient.AppsV1().DaemonSets(namespace).Patch(context.TODO(), daemonSet.Name, patchType, bytes, meta_v1.PatchOptions{FieldManager: "Reloader"})
+ return err
+}
+
// UpdateStatefulSet performs rolling upgrade on statefulSet
func UpdateStatefulSet(clients kube.Clients, namespace string, resource runtime.Object) error {
statefulSet := resource.(*appsv1.StatefulSet)
@@ -393,18 +524,17 @@ func UpdateStatefulSet(clients kube.Clients, namespace string, resource runtime.
return err
}
-// UpdateDeploymentConfig performs rolling upgrade on deploymentConfig
-func UpdateDeploymentConfig(clients kube.Clients, namespace string, resource runtime.Object) error {
- deploymentConfig := resource.(*openshiftv1.DeploymentConfig)
- _, err := clients.OpenshiftAppsClient.AppsV1().DeploymentConfigs(namespace).Update(context.TODO(), deploymentConfig, meta_v1.UpdateOptions{FieldManager: "Reloader"})
+func PatchStatefulSet(clients kube.Clients, namespace string, resource runtime.Object, patchType patchtypes.PatchType, bytes []byte) error {
+ statefulSet := resource.(*appsv1.StatefulSet)
+ _, err := clients.KubernetesClient.AppsV1().StatefulSets(namespace).Patch(context.TODO(), statefulSet.Name, patchType, bytes, meta_v1.PatchOptions{FieldManager: "Reloader"})
return err
}
// UpdateRollout performs rolling upgrade on rollout
func UpdateRollout(clients kube.Clients, namespace string, resource runtime.Object) error {
- var err error
rollout := resource.(*argorolloutv1alpha1.Rollout)
strategy := rollout.GetAnnotations()[options.RolloutStrategyAnnotation]
+ var err error
switch options.ToArgoRolloutStrategy(strategy) {
case options.RestartStrategy:
_, err = clients.ArgoRolloutClient.ArgoprojV1alpha1().Rollouts(namespace).Patch(context.TODO(), rollout.Name, patchtypes.MergePatchType, []byte(fmt.Sprintf(`{"spec": {"restartAt": "%s"}}`, time.Now().Format(time.RFC3339))), meta_v1.PatchOptions{FieldManager: "Reloader"})
@@ -414,6 +544,10 @@ func UpdateRollout(clients kube.Clients, namespace string, resource runtime.Obje
return err
}
+func PatchRollout(clients kube.Clients, namespace string, resource runtime.Object, patchType patchtypes.PatchType, bytes []byte) error {
+ return errors.New("not supported patching: Rollout")
+}
+
// GetDeploymentVolumes returns the Volumes of given deployment
func GetDeploymentVolumes(item runtime.Object) []v1.Volume {
return item.(*appsv1.Deployment).Spec.Template.Spec.Volumes
@@ -439,11 +573,6 @@ func GetStatefulSetVolumes(item runtime.Object) []v1.Volume {
return item.(*appsv1.StatefulSet).Spec.Template.Spec.Volumes
}
-// GetDeploymentConfigVolumes returns the Volumes of given deploymentConfig
-func GetDeploymentConfigVolumes(item runtime.Object) []v1.Volume {
- return item.(*openshiftv1.DeploymentConfig).Spec.Template.Spec.Volumes
-}
-
// GetRolloutVolumes returns the Volumes of given rollout
func GetRolloutVolumes(item runtime.Object) []v1.Volume {
return item.(*argorolloutv1alpha1.Rollout).Spec.Template.Spec.Volumes
diff --git a/internal/pkg/callbacks/rolling_upgrade_test.go b/internal/pkg/callbacks/rolling_upgrade_test.go
index d358e21..452867f 100644
--- a/internal/pkg/callbacks/rolling_upgrade_test.go
+++ b/internal/pkg/callbacks/rolling_upgrade_test.go
@@ -3,6 +3,7 @@ package callbacks_test
import (
"context"
"fmt"
+ "strings"
"testing"
"time"
@@ -10,7 +11,7 @@ import (
appsv1 "k8s.io/api/apps/v1"
batchv1 "k8s.io/api/batch/v1"
v1 "k8s.io/api/core/v1"
- meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
watch "k8s.io/apimachinery/pkg/watch"
@@ -18,6 +19,7 @@ import (
argorolloutv1alpha1 "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1"
fakeargoclientset "github.com/argoproj/argo-rollouts/pkg/client/clientset/versioned/fake"
+ patchtypes "k8s.io/apimachinery/pkg/types"
"github.com/stakater/Reloader/internal/pkg/callbacks"
"github.com/stakater/Reloader/internal/pkg/options"
@@ -93,7 +95,7 @@ func TestUpdateRollout(t *testing.T) {
t.Errorf("updating rollout: %v", err)
}
rollout, err = clients.ArgoRolloutClient.ArgoprojV1alpha1().Rollouts(
- namespace).Get(context.TODO(), rollout.Name, meta_v1.GetOptions{})
+ namespace).Get(context.TODO(), rollout.Name, metav1.GetOptions{})
if err != nil {
t.Errorf("getting rollout: %v", err)
@@ -111,6 +113,71 @@ func TestUpdateRollout(t *testing.T) {
}
}
+func TestPatchRollout(t *testing.T) {
+ namespace := "test-ns"
+ rollout := testutil.GetRollout(namespace, "test", map[string]string{options.RolloutStrategyAnnotation: ""})
+ err := callbacks.PatchRollout(clients, namespace, rollout, patchtypes.StrategicMergePatchType, []byte(`{"spec": {}}`))
+ assert.EqualError(t, err, "not supported patching: Rollout")
+}
+
+func TestResourceItem(t *testing.T) {
+ fixtures := newTestFixtures()
+
+ tests := []struct {
+ name string
+ createFunc func(kube.Clients, string, string) (runtime.Object, error)
+ getItemFunc func(kube.Clients, string, string) (runtime.Object, error)
+ deleteFunc func(kube.Clients, string, string) error
+ }{
+ {
+ name: "Deployment",
+ createFunc: createTestDeploymentWithAnnotations,
+ getItemFunc: callbacks.GetDeploymentItem,
+ deleteFunc: deleteTestDeployment,
+ },
+ {
+ name: "CronJob",
+ createFunc: createTestCronJobWithAnnotations,
+ getItemFunc: callbacks.GetCronJobItem,
+ deleteFunc: deleteTestCronJob,
+ },
+ {
+ name: "Job",
+ createFunc: createTestJobWithAnnotations,
+ getItemFunc: callbacks.GetJobItem,
+ deleteFunc: deleteTestJob,
+ },
+ {
+ name: "DaemonSet",
+ createFunc: createTestDaemonSetWithAnnotations,
+ getItemFunc: callbacks.GetDaemonSetItem,
+ deleteFunc: deleteTestDaemonSet,
+ },
+ {
+ name: "StatefulSet",
+ createFunc: createTestStatefulSetWithAnnotations,
+ getItemFunc: callbacks.GetStatefulSetItem,
+ deleteFunc: deleteTestStatefulSet,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ resource, err := tt.createFunc(clients, fixtures.namespace, "1")
+ assert.NoError(t, err)
+
+ accessor, err := meta.Accessor(resource)
+ assert.NoError(t, err)
+
+ _, err = tt.getItemFunc(clients, accessor.GetName(), fixtures.namespace)
+ assert.NoError(t, err)
+
+ err = tt.deleteFunc(clients, fixtures.namespace, accessor.GetName())
+ assert.NoError(t, err)
+ })
+ }
+}
+
func TestResourceItems(t *testing.T) {
fixtures := newTestFixtures()
@@ -118,36 +185,42 @@ func TestResourceItems(t *testing.T) {
name string
createFunc func(kube.Clients, string) error
getItemsFunc func(kube.Clients, string) []runtime.Object
+ deleteFunc func(kube.Clients, string) error
expectedCount int
}{
{
name: "Deployments",
createFunc: createTestDeployments,
getItemsFunc: callbacks.GetDeploymentItems,
+ deleteFunc: deleteTestDeployments,
expectedCount: 2,
},
{
name: "CronJobs",
createFunc: createTestCronJobs,
getItemsFunc: callbacks.GetCronJobItems,
+ deleteFunc: deleteTestCronJobs,
expectedCount: 2,
},
{
name: "Jobs",
createFunc: createTestJobs,
getItemsFunc: callbacks.GetJobItems,
+ deleteFunc: deleteTestJobs,
expectedCount: 2,
},
{
name: "DaemonSets",
createFunc: createTestDaemonSets,
getItemsFunc: callbacks.GetDaemonSetItems,
+ deleteFunc: deleteTestDaemonSets,
expectedCount: 2,
},
{
name: "StatefulSets",
createFunc: createTestStatefulSets,
getItemsFunc: callbacks.GetStatefulSetItems,
+ deleteFunc: deleteTestStatefulSets,
expectedCount: 2,
},
}
@@ -262,10 +335,11 @@ func TestUpdateResources(t *testing.T) {
name string
createFunc func(kube.Clients, string, string) (runtime.Object, error)
updateFunc func(kube.Clients, string, runtime.Object) error
+ deleteFunc func(kube.Clients, string, string) error
}{
- {"Deployment", createTestDeploymentWithAnnotations, callbacks.UpdateDeployment},
- {"DaemonSet", createTestDaemonSetWithAnnotations, callbacks.UpdateDaemonSet},
- {"StatefulSet", createTestStatefulSetWithAnnotations, callbacks.UpdateStatefulSet},
+ {"Deployment", createTestDeploymentWithAnnotations, callbacks.UpdateDeployment, deleteTestDeployment},
+ {"DaemonSet", createTestDaemonSetWithAnnotations, callbacks.UpdateDaemonSet, deleteTestDaemonSet},
+ {"StatefulSet", createTestStatefulSetWithAnnotations, callbacks.UpdateStatefulSet, deleteTestStatefulSet},
}
for _, tt := range tests {
@@ -275,6 +349,65 @@ func TestUpdateResources(t *testing.T) {
err = tt.updateFunc(clients, fixtures.namespace, resource)
assert.NoError(t, err)
+
+ accessor, err := meta.Accessor(resource)
+ assert.NoError(t, err)
+
+ err = tt.deleteFunc(clients, fixtures.namespace, accessor.GetName())
+ assert.NoError(t, err)
+ })
+ }
+}
+
+func TestPatchResources(t *testing.T) {
+ fixtures := newTestFixtures()
+
+ tests := []struct {
+ name string
+ createFunc func(kube.Clients, string, string) (runtime.Object, error)
+ patchFunc func(kube.Clients, string, runtime.Object, patchtypes.PatchType, []byte) error
+ deleteFunc func(kube.Clients, string, string) error
+ assertFunc func(err error)
+ }{
+ {"Deployment", createTestDeploymentWithAnnotations, callbacks.PatchDeployment, deleteTestDeployment, func(err error) {
+ assert.NoError(t, err)
+ patchedResource, err := callbacks.GetDeploymentItem(clients, "test-deployment", fixtures.namespace)
+ assert.NoError(t, err)
+ assert.Equal(t, "test", patchedResource.(*appsv1.Deployment).Annotations["test"])
+ }},
+ {"DaemonSet", createTestDaemonSetWithAnnotations, callbacks.PatchDaemonSet, deleteTestDaemonSet, func(err error) {
+ assert.NoError(t, err)
+ patchedResource, err := callbacks.GetDaemonSetItem(clients, "test-daemonset", fixtures.namespace)
+ assert.NoError(t, err)
+ assert.Equal(t, "test", patchedResource.(*appsv1.DaemonSet).Annotations["test"])
+ }},
+ {"StatefulSet", createTestStatefulSetWithAnnotations, callbacks.PatchStatefulSet, deleteTestStatefulSet, func(err error) {
+ assert.NoError(t, err)
+ patchedResource, err := callbacks.GetStatefulSetItem(clients, "test-statefulset", fixtures.namespace)
+ assert.NoError(t, err)
+ assert.Equal(t, "test", patchedResource.(*appsv1.StatefulSet).Annotations["test"])
+ }},
+ {"CronJob", createTestCronJobWithAnnotations, callbacks.PatchCronJob, deleteTestCronJob, func(err error) {
+ assert.EqualError(t, err, "not supported patching: CronJob")
+ }},
+ {"Job", createTestJobWithAnnotations, callbacks.PatchJob, deleteTestJob, func(err error) {
+ assert.EqualError(t, err, "not supported patching: Job")
+ }},
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ resource, err := tt.createFunc(clients, fixtures.namespace, "1")
+ assert.NoError(t, err)
+
+ err = tt.patchFunc(clients, fixtures.namespace, resource, patchtypes.StrategicMergePatchType, []byte(`{"metadata":{"annotations":{"test":"test"}}}`))
+ tt.assertFunc(err)
+
+ accessor, err := meta.Accessor(resource)
+ assert.NoError(t, err)
+
+ err = tt.deleteFunc(clients, fixtures.namespace, accessor.GetName())
+ assert.NoError(t, err)
})
}
}
@@ -282,10 +415,26 @@ func TestUpdateResources(t *testing.T) {
func TestCreateJobFromCronjob(t *testing.T) {
fixtures := newTestFixtures()
- cronJob, err := createTestCronJobWithAnnotations(clients, fixtures.namespace, "1")
+ runtimeObj, err := createTestCronJobWithAnnotations(clients, fixtures.namespace, "1")
assert.NoError(t, err)
- err = callbacks.CreateJobFromCronjob(clients, fixtures.namespace, cronJob.(*batchv1.CronJob))
+ cronJob := runtimeObj.(*batchv1.CronJob)
+ err = callbacks.CreateJobFromCronjob(clients, fixtures.namespace, cronJob)
+ assert.NoError(t, err)
+
+ jobList, err := clients.KubernetesClient.BatchV1().Jobs(fixtures.namespace).List(context.TODO(), metav1.ListOptions{})
+ assert.NoError(t, err)
+
+ ownerFound := false
+ for _, job := range jobList.Items {
+ if isControllerOwner("CronJob", cronJob.Name, job.OwnerReferences) {
+ ownerFound = true
+ break
+ }
+ }
+ assert.Truef(t, ownerFound, "Missing CronJob owner reference")
+
+ err = deleteTestCronJob(clients, fixtures.namespace, cronJob.Name)
assert.NoError(t, err)
}
@@ -297,6 +446,9 @@ func TestReCreateJobFromJob(t *testing.T) {
err = callbacks.ReCreateJobFromjob(clients, fixtures.namespace, job.(*batchv1.Job))
assert.NoError(t, err)
+
+ err = deleteTestJob(clients, fixtures.namespace, "test-job")
+ assert.NoError(t, err)
}
func TestGetVolumes(t *testing.T) {
@@ -321,6 +473,24 @@ func TestGetVolumes(t *testing.T) {
}
}
+func TesGetPatchTemplateAnnotation(t *testing.T) {
+ templates := callbacks.GetPatchTemplates()
+ assert.NotEmpty(t, templates.AnnotationTemplate)
+ assert.Equal(t, 2, strings.Count(templates.AnnotationTemplate, "%s"))
+}
+
+func TestGetPatchTemplateEnvVar(t *testing.T) {
+ templates := callbacks.GetPatchTemplates()
+ assert.NotEmpty(t, templates.EnvVarTemplate)
+ assert.Equal(t, 3, strings.Count(templates.EnvVarTemplate, "%s"))
+}
+
+func TestGetPatchDeleteTemplateEnvVar(t *testing.T) {
+ templates := callbacks.GetPatchTemplates()
+ assert.NotEmpty(t, templates.DeleteEnvVarTemplate)
+ assert.Equal(t, 2, strings.Count(templates.DeleteEnvVarTemplate, "%d"))
+}
+
// Helper functions
func isRestartStrategy(rollout *argorolloutv1alpha1.Rollout) bool {
@@ -330,7 +500,7 @@ func isRestartStrategy(rollout *argorolloutv1alpha1.Rollout) bool {
func watchRollout(name, namespace string) chan interface{} {
timeOut := int64(1)
modifiedChan := make(chan interface{})
- watcher, _ := clients.ArgoRolloutClient.ArgoprojV1alpha1().Rollouts(namespace).Watch(context.Background(), meta_v1.ListOptions{TimeoutSeconds: &timeOut})
+ watcher, _ := clients.ArgoRolloutClient.ArgoprojV1alpha1().Rollouts(namespace).Watch(context.Background(), metav1.ListOptions{TimeoutSeconds: &timeOut})
go watchModified(watcher, name, modifiedChan)
return modifiedChan
}
@@ -358,6 +528,16 @@ func createTestDeployments(clients kube.Clients, namespace string) error {
return nil
}
+func deleteTestDeployments(clients kube.Clients, namespace string) error {
+ for i := 1; i <= 2; i++ {
+ err := testutil.DeleteDeployment(clients.KubernetesClient, namespace, fmt.Sprintf("test-deployment-%d", i))
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
func createTestCronJobs(clients kube.Clients, namespace string) error {
for i := 1; i <= 2; i++ {
_, err := testutil.CreateCronJob(clients.KubernetesClient, fmt.Sprintf("test-cron-%d", i), namespace, false)
@@ -368,6 +548,16 @@ func createTestCronJobs(clients kube.Clients, namespace string) error {
return nil
}
+func deleteTestCronJobs(clients kube.Clients, namespace string) error {
+ for i := 1; i <= 2; i++ {
+ err := testutil.DeleteCronJob(clients.KubernetesClient, namespace, fmt.Sprintf("test-cron-%d", i))
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
func createTestJobs(clients kube.Clients, namespace string) error {
for i := 1; i <= 2; i++ {
_, err := testutil.CreateJob(clients.KubernetesClient, fmt.Sprintf("test-job-%d", i), namespace, false)
@@ -378,6 +568,16 @@ func createTestJobs(clients kube.Clients, namespace string) error {
return nil
}
+func deleteTestJobs(clients kube.Clients, namespace string) error {
+ for i := 1; i <= 2; i++ {
+ err := testutil.DeleteJob(clients.KubernetesClient, namespace, fmt.Sprintf("test-job-%d", i))
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
func createTestDaemonSets(clients kube.Clients, namespace string) error {
for i := 1; i <= 2; i++ {
_, err := testutil.CreateDaemonSet(clients.KubernetesClient, fmt.Sprintf("test-daemonset-%d", i), namespace, false)
@@ -388,6 +588,16 @@ func createTestDaemonSets(clients kube.Clients, namespace string) error {
return nil
}
+func deleteTestDaemonSets(clients kube.Clients, namespace string) error {
+ for i := 1; i <= 2; i++ {
+ err := testutil.DeleteDaemonSet(clients.KubernetesClient, namespace, fmt.Sprintf("test-daemonset-%d", i))
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
func createTestStatefulSets(clients kube.Clients, namespace string) error {
for i := 1; i <= 2; i++ {
_, err := testutil.CreateStatefulSet(clients.KubernetesClient, fmt.Sprintf("test-statefulset-%d", i), namespace, false)
@@ -398,20 +608,30 @@ func createTestStatefulSets(clients kube.Clients, namespace string) error {
return nil
}
+func deleteTestStatefulSets(clients kube.Clients, namespace string) error {
+ for i := 1; i <= 2; i++ {
+ err := testutil.DeleteStatefulSet(clients.KubernetesClient, namespace, fmt.Sprintf("test-statefulset-%d", i))
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
func createResourceWithPodAnnotations(obj runtime.Object, annotations map[string]string) runtime.Object {
switch v := obj.(type) {
case *appsv1.Deployment:
- v.Spec.Template.ObjectMeta.Annotations = annotations
+ v.Spec.Template.Annotations = annotations
case *appsv1.DaemonSet:
- v.Spec.Template.ObjectMeta.Annotations = annotations
+ v.Spec.Template.Annotations = annotations
case *appsv1.StatefulSet:
- v.Spec.Template.ObjectMeta.Annotations = annotations
+ v.Spec.Template.Annotations = annotations
case *batchv1.CronJob:
- v.Spec.JobTemplate.Spec.Template.ObjectMeta.Annotations = annotations
+ v.Spec.JobTemplate.Spec.Template.Annotations = annotations
case *batchv1.Job:
- v.Spec.Template.ObjectMeta.Annotations = annotations
+ v.Spec.Template.Annotations = annotations
case *argorolloutv1alpha1.Rollout:
- v.Spec.Template.ObjectMeta.Annotations = annotations
+ v.Spec.Template.Annotations = annotations
}
return obj
}
@@ -479,6 +699,10 @@ func createTestDeploymentWithAnnotations(clients kube.Clients, namespace, versio
return clients.KubernetesClient.AppsV1().Deployments(namespace).Create(context.TODO(), deployment, metav1.CreateOptions{})
}
+func deleteTestDeployment(clients kube.Clients, namespace, name string) error {
+ return clients.KubernetesClient.AppsV1().Deployments(namespace).Delete(context.TODO(), name, metav1.DeleteOptions{})
+}
+
func createTestDaemonSetWithAnnotations(clients kube.Clients, namespace, version string) (runtime.Object, error) {
daemonSet := &appsv1.DaemonSet{
ObjectMeta: metav1.ObjectMeta{
@@ -490,6 +714,10 @@ func createTestDaemonSetWithAnnotations(clients kube.Clients, namespace, version
return clients.KubernetesClient.AppsV1().DaemonSets(namespace).Create(context.TODO(), daemonSet, metav1.CreateOptions{})
}
+func deleteTestDaemonSet(clients kube.Clients, namespace, name string) error {
+ return clients.KubernetesClient.AppsV1().DaemonSets(namespace).Delete(context.TODO(), name, metav1.DeleteOptions{})
+}
+
func createTestStatefulSetWithAnnotations(clients kube.Clients, namespace, version string) (runtime.Object, error) {
statefulSet := &appsv1.StatefulSet{
ObjectMeta: metav1.ObjectMeta{
@@ -501,6 +729,10 @@ func createTestStatefulSetWithAnnotations(clients kube.Clients, namespace, versi
return clients.KubernetesClient.AppsV1().StatefulSets(namespace).Create(context.TODO(), statefulSet, metav1.CreateOptions{})
}
+func deleteTestStatefulSet(clients kube.Clients, namespace, name string) error {
+ return clients.KubernetesClient.AppsV1().StatefulSets(namespace).Delete(context.TODO(), name, metav1.DeleteOptions{})
+}
+
func createTestCronJobWithAnnotations(clients kube.Clients, namespace, version string) (runtime.Object, error) {
cronJob := &batchv1.CronJob{
ObjectMeta: metav1.ObjectMeta{
@@ -512,6 +744,10 @@ func createTestCronJobWithAnnotations(clients kube.Clients, namespace, version s
return clients.KubernetesClient.BatchV1().CronJobs(namespace).Create(context.TODO(), cronJob, metav1.CreateOptions{})
}
+func deleteTestCronJob(clients kube.Clients, namespace, name string) error {
+ return clients.KubernetesClient.BatchV1().CronJobs(namespace).Delete(context.TODO(), name, metav1.DeleteOptions{})
+}
+
func createTestJobWithAnnotations(clients kube.Clients, namespace, version string) (runtime.Object, error) {
job := &batchv1.Job{
ObjectMeta: metav1.ObjectMeta{
@@ -522,3 +758,16 @@ func createTestJobWithAnnotations(clients kube.Clients, namespace, version strin
}
return clients.KubernetesClient.BatchV1().Jobs(namespace).Create(context.TODO(), job, metav1.CreateOptions{})
}
+
+func deleteTestJob(clients kube.Clients, namespace, name string) error {
+ return clients.KubernetesClient.BatchV1().Jobs(namespace).Delete(context.TODO(), name, metav1.DeleteOptions{})
+}
+
+func isControllerOwner(kind, name string, ownerRefs []metav1.OwnerReference) bool {
+ for _, ownerRef := range ownerRefs {
+ if *ownerRef.Controller && ownerRef.Kind == kind && ownerRef.Name == name {
+ return true
+ }
+ }
+ return false
+}
diff --git a/internal/pkg/cmd/reloader.go b/internal/pkg/cmd/reloader.go
index 03b6262..f20e0b8 100644
--- a/internal/pkg/cmd/reloader.go
+++ b/internal/pkg/cmd/reloader.go
@@ -5,6 +5,7 @@ import (
"errors"
"fmt"
"net/http"
+ _ "net/http/pprof"
"os"
"strings"
@@ -14,12 +15,12 @@ import (
"github.com/sirupsen/logrus"
"github.com/spf13/cobra"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "k8s.io/apimachinery/pkg/labels"
"github.com/stakater/Reloader/internal/pkg/controller"
"github.com/stakater/Reloader/internal/pkg/metrics"
"github.com/stakater/Reloader/internal/pkg/options"
"github.com/stakater/Reloader/internal/pkg/util"
+ "github.com/stakater/Reloader/pkg/common"
"github.com/stakater/Reloader/pkg/kube"
)
@@ -33,30 +34,7 @@ func NewReloaderCommand() *cobra.Command {
}
// options
- cmd.PersistentFlags().BoolVar(&options.AutoReloadAll, "auto-reload-all", false, "Auto reload all resources")
- cmd.PersistentFlags().StringVar(&options.ConfigmapUpdateOnChangeAnnotation, "configmap-annotation", "configmap.reloader.stakater.com/reload", "annotation to detect changes in configmaps, specified by name")
- cmd.PersistentFlags().StringVar(&options.SecretUpdateOnChangeAnnotation, "secret-annotation", "secret.reloader.stakater.com/reload", "annotation to detect changes in secrets, specified by name")
- cmd.PersistentFlags().StringVar(&options.SecretProviderClassUpdateOnChangeAnnotation, "secretproviderclass-annotation", "secretproviderclass.reloader.stakater.com/reload", "annotation to detect changes in secretproviderclasses, specified by name")
- cmd.PersistentFlags().StringVar(&options.ReloaderAutoAnnotation, "auto-annotation", "reloader.stakater.com/auto", "annotation to detect changes in secrets/configmaps")
- cmd.PersistentFlags().StringVar(&options.ConfigmapReloaderAutoAnnotation, "configmap-auto-annotation", "configmap.reloader.stakater.com/auto", "annotation to detect changes in configmaps")
- cmd.PersistentFlags().StringVar(&options.SecretReloaderAutoAnnotation, "secret-auto-annotation", "secret.reloader.stakater.com/auto", "annotation to detect changes in secrets")
- cmd.PersistentFlags().StringVar(&options.SecretProviderClassReloaderAutoAnnotation, "secretproviderclass-auto-annotation", "secretproviderclass.reloader.stakater.com/auto", "annotation to detect changes in secretproviderclasses")
- cmd.PersistentFlags().StringVar(&options.AutoSearchAnnotation, "auto-search-annotation", "reloader.stakater.com/search", "annotation to detect changes in configmaps or secrets tagged with special match annotation")
- cmd.PersistentFlags().StringVar(&options.SearchMatchAnnotation, "search-match-annotation", "reloader.stakater.com/match", "annotation to mark secrets or configmaps to match the search")
- cmd.PersistentFlags().StringVar(&options.LogFormat, "log-format", "", "Log format to use (empty string for text, or JSON)")
- cmd.PersistentFlags().StringVar(&options.LogLevel, "log-level", "info", "Log level to use (trace, debug, info, warning, error, fatal and panic)")
- cmd.PersistentFlags().StringVar(&options.WebhookUrl, "webhook-url", "", "webhook to trigger instead of performing a reload")
- cmd.PersistentFlags().StringSlice("resources-to-ignore", []string{}, "list of resources to ignore (valid options 'configMaps' or 'secrets')")
- cmd.PersistentFlags().StringSlice("namespaces-to-ignore", []string{}, "list of namespaces to ignore")
- cmd.PersistentFlags().StringSlice("namespace-selector", []string{}, "list of key:value labels to filter on for namespaces")
- cmd.PersistentFlags().StringSlice("resource-label-selector", []string{}, "list of key:value labels to filter on for configmaps and secrets")
- cmd.PersistentFlags().StringVar(&options.IsArgoRollouts, "is-Argo-Rollouts", "false", "Add support for argo rollouts")
- cmd.PersistentFlags().StringVar(&options.ReloadStrategy, constants.ReloadStrategyFlag, constants.EnvVarsReloadStrategy, "Specifies the desired reload strategy")
- cmd.PersistentFlags().StringVar(&options.ReloadOnCreate, "reload-on-create", "false", "Add support to watch create events")
- cmd.PersistentFlags().StringVar(&options.ReloadOnDelete, "reload-on-delete", "false", "Add support to watch delete events")
- cmd.PersistentFlags().BoolVar(&options.EnableHA, "enable-ha", false, "Adds support for running multiple replicas via leadership election")
- cmd.PersistentFlags().BoolVar(&options.SyncAfterRestart, "sync-after-restart", false, "Sync add events after reloader restarts")
- cmd.PersistentFlags().BoolVar(&options.EnableCSIIntegration, "enable-csi-integration", false, "Watch SecretProviderClassPodStatus for changes")
+ util.ConfigureReloaderFlags(cmd)
return cmd
}
@@ -125,15 +103,18 @@ func getHAEnvs() (string, string) {
}
func startReloader(cmd *cobra.Command, args []string) {
+ common.GetCommandLineOptions()
err := configureLogging(options.LogFormat, options.LogLevel)
if err != nil {
logrus.Warn(err)
}
logrus.Info("Starting Reloader")
+ isGlobal := false
currentNamespace := os.Getenv("KUBERNETES_NAMESPACE")
if len(currentNamespace) == 0 {
currentNamespace = v1.NamespaceAll
+ isGlobal = true
logrus.Warnf("KUBERNETES_NAMESPACE is unset, will detect changes in all namespaces.")
}
@@ -143,22 +124,22 @@ func startReloader(cmd *cobra.Command, args []string) {
logrus.Fatal(err)
}
- ignoredResourcesList, err := getIgnoredResourcesList(cmd)
+ ignoredResourcesList, err := util.GetIgnoredResourcesList()
if err != nil {
logrus.Fatal(err)
}
- ignoredNamespacesList, err := getIgnoredNamespacesList(cmd)
- if err != nil {
- logrus.Fatal(err)
+ ignoredNamespacesList := options.NamespacesToIgnore
+ namespaceLabelSelector := ""
+
+ if isGlobal {
+ namespaceLabelSelector, err = common.GetNamespaceLabelSelector(options.NamespaceSelectors)
+ if err != nil {
+ logrus.Fatal(err)
+ }
}
- namespaceLabelSelector, err := getNamespaceLabelSelector(cmd)
- if err != nil {
- logrus.Fatal(err)
- }
-
- resourceLabelSelector, err := getResourceLabelSelector(cmd)
+ resourceLabelSelector, err := common.GetResourceLabelSelector(options.ResourceSelectors)
if err != nil {
logrus.Fatal(err)
}
@@ -220,107 +201,19 @@ func startReloader(cmd *cobra.Command, args []string) {
go leadership.RunLeaderElection(lock, ctx, cancel, podName, controllers)
}
+ common.PublishMetaInfoConfigmap(clientset)
+
+ if options.EnablePProf {
+ go startPProfServer()
+ }
+
leadership.SetupLivenessEndpoint()
logrus.Fatal(http.ListenAndServe(constants.DefaultHttpListenAddr, nil))
}
-func getIgnoredNamespacesList(cmd *cobra.Command) (util.List, error) {
- return getStringSliceFromFlags(cmd, "namespaces-to-ignore")
-}
-
-func getNamespaceLabelSelector(cmd *cobra.Command) (string, error) {
- slice, err := getStringSliceFromFlags(cmd, "namespace-selector")
- if err != nil {
- logrus.Fatal(err)
- }
-
- for i, kv := range slice {
- // Legacy support for ":" as a delimiter and "*" for wildcard.
- if strings.Contains(kv, ":") {
- split := strings.Split(kv, ":")
- if split[1] == "*" {
- slice[i] = split[0]
- } else {
- slice[i] = split[0] + "=" + split[1]
- }
- }
- // Convert wildcard to valid apimachinery operator
- if strings.Contains(kv, "=") {
- split := strings.Split(kv, "=")
- if split[1] == "*" {
- slice[i] = split[0]
- }
- }
- }
-
- namespaceLabelSelector := strings.Join(slice[:], ",")
- _, err = labels.Parse(namespaceLabelSelector)
- if err != nil {
- logrus.Fatal(err)
- }
-
- return namespaceLabelSelector, nil
-}
-
-func getResourceLabelSelector(cmd *cobra.Command) (string, error) {
- slice, err := getStringSliceFromFlags(cmd, "resource-label-selector")
- if err != nil {
- logrus.Fatal(err)
- }
-
- for i, kv := range slice {
- // Legacy support for ":" as a delimiter and "*" for wildcard.
- if strings.Contains(kv, ":") {
- split := strings.Split(kv, ":")
- if split[1] == "*" {
- slice[i] = split[0]
- } else {
- slice[i] = split[0] + "=" + split[1]
- }
- }
- // Convert wildcard to valid apimachinery operator
- if strings.Contains(kv, "=") {
- split := strings.Split(kv, "=")
- if split[1] == "*" {
- slice[i] = split[0]
- }
- }
- }
-
- resourceLabelSelector := strings.Join(slice[:], ",")
- _, err = labels.Parse(resourceLabelSelector)
- if err != nil {
- logrus.Fatal(err)
- }
-
- return resourceLabelSelector, nil
-}
-
-func getStringSliceFromFlags(cmd *cobra.Command, flag string) ([]string, error) {
- slice, err := cmd.Flags().GetStringSlice(flag)
- if err != nil {
- return nil, err
- }
-
- return slice, nil
-}
-
-func getIgnoredResourcesList(cmd *cobra.Command) (util.List, error) {
-
- ignoredResourcesList, err := getStringSliceFromFlags(cmd, "resources-to-ignore")
- if err != nil {
- return nil, err
- }
-
- for _, v := range ignoredResourcesList {
- if v != "configMaps" && v != "secrets" {
- return nil, fmt.Errorf("'resources-to-ignore' only accepts 'configMaps' or 'secrets', not '%s'", v)
- }
- }
-
- if len(ignoredResourcesList) > 1 {
- return nil, errors.New("'resources-to-ignore' only accepts 'configMaps' or 'secrets', not both")
- }
-
- return ignoredResourcesList, nil
+func startPProfServer() {
+ logrus.Infof("Starting pprof server on %s", options.PProfAddr)
+ if err := http.ListenAndServe(options.PProfAddr, nil); err != nil {
+ logrus.Errorf("Failed to start pprof server: %v", err)
+ }
}
diff --git a/internal/pkg/controller/controller.go b/internal/pkg/controller/controller.go
index dca6625..f2a0143 100644
--- a/internal/pkg/controller/controller.go
+++ b/internal/pkg/controller/controller.go
@@ -134,13 +134,13 @@ func (c *Controller) Add(obj interface{}) {
}
func (c *Controller) resourceInIgnoredNamespace(raw interface{}) bool {
- switch object := raw.(type) {
+ switch obj := raw.(type) {
case *v1.ConfigMap:
- return c.ignoredNamespaces.Contains(object.ObjectMeta.Namespace)
+ return c.ignoredNamespaces.Contains(obj.Namespace)
case *v1.Secret:
- return c.ignoredNamespaces.Contains(object.ObjectMeta.Namespace)
+ return c.ignoredNamespaces.Contains(obj.Namespace)
case *csiv1.SecretProviderClassPodStatus:
- return c.ignoredNamespaces.Contains(object.ObjectMeta.Namespace)
+ return c.ignoredNamespaces.Contains(obj.Namespace)
}
return false
}
@@ -231,7 +231,7 @@ func (c *Controller) Run(threadiness int, stopCh chan struct{}) {
// Wait for all involved caches to be synced, before processing items from the queue is started
if !cache.WaitForCacheSync(stopCh, c.informer.HasSynced) {
- runtime.HandleError(fmt.Errorf("Timed out waiting for caches to sync"))
+ runtime.HandleError(fmt.Errorf("timed out waiting for caches to sync"))
return
}
@@ -245,9 +245,9 @@ func (c *Controller) Run(threadiness int, stopCh chan struct{}) {
func (c *Controller) runWorker() {
// At this point the controller is fully initialized and we can start processing the resources
- if c.resource == "secrets" {
+ if c.resource == string(v1.ResourceSecrets) {
secretControllerInitialized = true
- } else if c.resource == "configMaps" {
+ } else if c.resource == string(v1.ResourceConfigMaps) {
configmapControllerInitialized = true
}
diff --git a/internal/pkg/controller/controller_test.go b/internal/pkg/controller/controller_test.go
index f599923..0399933 100644
--- a/internal/pkg/controller/controller_test.go
+++ b/internal/pkg/controller/controller_test.go
@@ -15,6 +15,7 @@ import (
"github.com/stakater/Reloader/internal/pkg/options"
"github.com/stakater/Reloader/internal/pkg/testutil"
"github.com/stakater/Reloader/internal/pkg/util"
+ "github.com/stakater/Reloader/pkg/common"
"github.com/stakater/Reloader/pkg/kube"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@@ -73,64 +74,6 @@ func TestMain(m *testing.M) {
os.Exit(retCode)
}
-// Perform rolling upgrade on deploymentConfig and create pod annotation var upon updating the configmap
-func TestControllerUpdatingConfigmapShouldCreatePodAnnotationInDeploymentConfig(t *testing.T) {
- options.ReloadStrategy = constants.AnnotationsReloadStrategy
-
- // Don't run test on non-openshift environment
- if !kube.IsOpenshift {
- return
- }
-
- // Creating configmap
- configmapName := configmapNamePrefix + "-update-" + testutil.RandSeq(5)
- configmapClient, err := testutil.CreateConfigMap(clients.KubernetesClient, namespace, configmapName, "www.google.com")
- if err != nil {
- t.Errorf("Error while creating the configmap %v", err)
- }
-
- // Creating deployment
- _, err = testutil.CreateDeploymentConfig(clients.OpenshiftAppsClient, configmapName, namespace, true)
- if err != nil {
- t.Errorf("Error in deploymentConfig creation: %v", err)
- }
-
- // Updating configmap for first time
- updateErr := testutil.UpdateConfigMap(configmapClient, namespace, configmapName, "", "www.stakater.com")
- if updateErr != nil {
- t.Errorf("Configmap was not updated")
- }
-
- // Verifying deployment update
- logrus.Infof("Verifying pod annotation has been created")
- shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapName, "www.stakater.com")
- config := util.Config{
- Namespace: namespace,
- ResourceName: configmapName,
- SHAValue: shaData,
- Annotation: options.ConfigmapUpdateOnChangeAnnotation,
- }
- deploymentConfigFuncs := handler.GetDeploymentConfigRollingUpgradeFuncs()
- updated := testutil.VerifyResourceAnnotationUpdate(clients, config, deploymentConfigFuncs)
- if !updated {
- t.Errorf("DeploymentConfig was not updated")
- }
- time.Sleep(sleepDuration)
-
- // Deleting deployment
- err = testutil.DeleteDeploymentConfig(clients.OpenshiftAppsClient, namespace, configmapName)
- if err != nil {
- logrus.Errorf("Error while deleting the deploymentConfig %v", err)
- }
-
- // Deleting configmap
- err = testutil.DeleteConfigMap(clients.KubernetesClient, namespace, configmapName)
- if err != nil {
- logrus.Errorf("Error while deleting the configmap %v", err)
- }
- time.Sleep(sleepDuration)
-}
-
// Perform rolling upgrade on deployment and create pod annotation var upon updating the configmap
func TestControllerUpdatingConfigmapShouldCreatePodAnnotationInDeployment(t *testing.T) {
options.ReloadStrategy = constants.AnnotationsReloadStrategy
@@ -157,7 +100,7 @@ func TestControllerUpdatingConfigmapShouldCreatePodAnnotationInDeployment(t *tes
// Verifying deployment update
logrus.Infof("Verifying pod annotation has been created")
shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapName, "www.stakater.com")
- config := util.Config{
+ config := common.Config{
Namespace: namespace,
ResourceName: configmapName,
SHAValue: shaData,
@@ -210,7 +153,7 @@ func TestControllerUpdatingConfigmapShouldAutoCreatePodAnnotationInDeployment(t
// Verifying deployment update
logrus.Infof("Verifying pod annotation has been created")
shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapName, "www.stakater.com")
- config := util.Config{
+ config := common.Config{
Namespace: namespace,
ResourceName: configmapName,
SHAValue: shaData,
@@ -275,7 +218,7 @@ func TestControllerCreatingConfigmapShouldCreatePodAnnotationInDeployment(t *tes
// Verifying deployment update
logrus.Infof("Verifying pod annotation has been created")
shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapName, "www.stakater.com")
- config := util.Config{
+ config := common.Config{
Namespace: namespace,
ResourceName: configmapName,
SHAValue: shaData,
@@ -334,7 +277,7 @@ func TestControllerForUpdatingConfigmapShouldUpdateDeploymentUsingArs(t *testing
// Verifying deployment update
logrus.Infof("Verifying pod annotation has been updated")
shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapName, "aurorasolutions.io")
- config := util.Config{
+ config := common.Config{
Namespace: namespace,
ResourceName: configmapName,
SHAValue: shaData,
@@ -389,7 +332,7 @@ func TestControllerUpdatingConfigmapLabelsShouldNotCreateOrCreatePodAnnotationIn
// Verifying deployment update
logrus.Infof("Verifying pod annotation has been created")
shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapName, "www.google.com")
- config := util.Config{
+ config := common.Config{
Namespace: namespace,
ResourceName: configmapName,
SHAValue: shaData,
@@ -453,7 +396,7 @@ func TestControllerCreatingSecretShouldCreatePodAnnotationInDeployment(t *testin
// Verifying Upgrade
logrus.Infof("Verifying pod annotation has been created")
shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, namespace, secretName, newData)
- config := util.Config{
+ config := common.Config{
Namespace: namespace,
ResourceName: secretName,
SHAValue: shaData,
@@ -506,7 +449,7 @@ func TestControllerUpdatingSecretShouldCreatePodAnnotationInDeployment(t *testin
// Verifying Upgrade
logrus.Infof("Verifying pod annotation has been created")
shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, namespace, secretName, newData)
- config := util.Config{
+ config := common.Config{
Namespace: namespace,
ResourceName: secretName,
SHAValue: shaData,
@@ -564,7 +507,7 @@ func TestControllerUpdatingSecretShouldUpdatePodAnnotationInDeployment(t *testin
// Verifying Upgrade
logrus.Infof("Verifying pod annotation has been updated")
shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, namespace, secretName, updatedData)
- config := util.Config{
+ config := common.Config{
Namespace: namespace,
ResourceName: secretName,
SHAValue: shaData,
@@ -615,7 +558,7 @@ func TestControllerUpdatingSecretLabelsShouldNotCreateOrUpdatePodAnnotationInDep
// Verifying Upgrade
logrus.Infof("Verifying pod annotation has been created")
shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, namespace, secretName, data)
- config := util.Config{
+ config := common.Config{
Namespace: namespace,
ResourceName: secretName,
SHAValue: shaData,
@@ -677,7 +620,7 @@ func TestControllerUpdatingSecretProviderClassPodStatusShouldCreatePodAnnotation
// Verifying deployment update
logrus.Infof("Verifying pod annotation has been created")
shaData := testutil.ConvertResourceToSHA(testutil.SecretProviderClassPodStatusResourceType, namespace, secretproviderclasspodstatusName, newData)
- config := util.Config{
+ config := common.Config{
Namespace: namespace,
ResourceName: secretproviderclasspodstatusName,
SHAValue: shaData,
@@ -752,7 +695,7 @@ func TestControllerUpdatingSecretProviderClassPodStatusShouldUpdatePodAnnotation
// Verifying Upgrade
logrus.Infof("Verifying pod annotation has been updated")
shaData := testutil.ConvertResourceToSHA(testutil.SecretProviderClassPodStatusResourceType, namespace, secretproviderclasspodstatusName, updatedData)
- config := util.Config{
+ config := common.Config{
Namespace: namespace,
ResourceName: secretproviderclasspodstatusName,
SHAValue: shaData,
@@ -820,7 +763,7 @@ func TestControllerUpdatingSecretProviderClassPodStatusWithSameDataShouldNotCrea
// Verifying Upgrade
logrus.Infof("Verifying pod annotation has been created")
shaData := testutil.ConvertResourceToSHA(testutil.SecretProviderClassPodStatusResourceType, namespace, secretproviderclasspodstatusName, data)
- config := util.Config{
+ config := common.Config{
Namespace: namespace,
ResourceName: secretproviderclasspodstatusName,
SHAValue: shaData,
@@ -878,7 +821,7 @@ func TestControllerUpdatingConfigmapShouldCreatePodAnnotationInDaemonSet(t *test
// Verifying DaemonSet update
logrus.Infof("Verifying pod annotation has been created")
shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapName, "www.stakater.com")
- config := util.Config{
+ config := common.Config{
Namespace: namespace,
ResourceName: configmapName,
SHAValue: shaData,
@@ -941,7 +884,7 @@ func TestControllerForUpdatingConfigmapShouldUpdateDaemonSetUsingArs(t *testing.
// Verifying DaemonSet update
logrus.Infof("Verifying pod annotation has been updated")
shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapName, "aurorasolutions.io")
- config := util.Config{
+ config := common.Config{
Namespace: namespace,
ResourceName: configmapName,
SHAValue: shaData,
@@ -994,7 +937,7 @@ func TestControllerUpdatingSecretShouldCreatePodAnnotationInDaemonSet(t *testing
// Verifying Upgrade
logrus.Infof("Verifying pod annotation has been created")
shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, namespace, secretName, newData)
- config := util.Config{
+ config := common.Config{
Namespace: namespace,
ResourceName: secretName,
SHAValue: shaData,
@@ -1053,7 +996,7 @@ func TestControllerUpdatingSecretShouldUpdatePodAnnotationInDaemonSet(t *testing
// Verifying Upgrade
logrus.Infof("Verifying pod annotation has been updated")
shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, namespace, secretName, updatedData)
- config := util.Config{
+ config := common.Config{
Namespace: namespace,
ResourceName: secretName,
SHAValue: shaData,
@@ -1104,7 +1047,7 @@ func TestControllerUpdatingSecretLabelsShouldNotCreateOrUpdatePodAnnotationInDae
// Verifying Upgrade
logrus.Infof("Verifying pod annotation has been created")
shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, namespace, secretName, data)
- config := util.Config{
+ config := common.Config{
Namespace: namespace,
ResourceName: secretName,
SHAValue: shaData,
@@ -1156,7 +1099,7 @@ func TestControllerUpdatingConfigmapShouldCreatePodAnnotationInStatefulSet(t *te
// Verifying StatefulSet update
logrus.Infof("Verifying pod annotation has been created")
shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapName, "www.stakater.com")
- config := util.Config{
+ config := common.Config{
Namespace: namespace,
ResourceName: configmapName,
SHAValue: shaData,
@@ -1215,7 +1158,7 @@ func TestControllerForUpdatingConfigmapShouldUpdateStatefulSetUsingArs(t *testin
// Verifying StatefulSet update
logrus.Infof("Verifying pod annotation has been updated")
shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapName, "aurorasolutions.io")
- config := util.Config{
+ config := common.Config{
Namespace: namespace,
ResourceName: configmapName,
SHAValue: shaData,
@@ -1268,7 +1211,7 @@ func TestControllerUpdatingSecretShouldCreatePodAnnotationInStatefulSet(t *testi
// Verifying Upgrade
logrus.Infof("Verifying pod annotation has been created")
shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, namespace, secretName, newData)
- config := util.Config{
+ config := common.Config{
Namespace: namespace,
ResourceName: secretName,
SHAValue: shaData,
@@ -1294,64 +1237,6 @@ func TestControllerUpdatingSecretShouldCreatePodAnnotationInStatefulSet(t *testi
time.Sleep(sleepDuration)
}
-// Perform rolling upgrade on deploymentConfig and create env var upon updating the configmap
-func TestControllerUpdatingConfigmapShouldCreateEnvInDeploymentConfig(t *testing.T) {
- options.ReloadStrategy = constants.EnvVarsReloadStrategy
-
- // Don't run test on non-openshift environment
- if !kube.IsOpenshift {
- return
- }
-
- // Creating configmap
- configmapName := configmapNamePrefix + "-update-" + testutil.RandSeq(5)
- configmapClient, err := testutil.CreateConfigMap(clients.KubernetesClient, namespace, configmapName, "www.google.com")
- if err != nil {
- t.Errorf("Error while creating the configmap %v", err)
- }
-
- // Creating deployment
- _, err = testutil.CreateDeploymentConfig(clients.OpenshiftAppsClient, configmapName, namespace, true)
- if err != nil {
- t.Errorf("Error in deploymentConfig creation: %v", err)
- }
-
- // Updating configmap for first time
- updateErr := testutil.UpdateConfigMap(configmapClient, namespace, configmapName, "", "www.stakater.com")
- if updateErr != nil {
- t.Errorf("Configmap was not updated")
- }
-
- // Verifying deployment update
- logrus.Infof("Verifying env var has been created")
- shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapName, "www.stakater.com")
- config := util.Config{
- Namespace: namespace,
- ResourceName: configmapName,
- SHAValue: shaData,
- Annotation: options.ConfigmapUpdateOnChangeAnnotation,
- }
- deploymentConfigFuncs := handler.GetDeploymentConfigRollingUpgradeFuncs()
- updated := testutil.VerifyResourceEnvVarUpdate(clients, config, constants.ConfigmapEnvVarPostfix, deploymentConfigFuncs)
- if !updated {
- t.Errorf("DeploymentConfig was not updated")
- }
- time.Sleep(sleepDuration)
-
- // Deleting deployment
- err = testutil.DeleteDeploymentConfig(clients.OpenshiftAppsClient, namespace, configmapName)
- if err != nil {
- logrus.Errorf("Error while deleting the deploymentConfig %v", err)
- }
-
- // Deleting configmap
- err = testutil.DeleteConfigMap(clients.KubernetesClient, namespace, configmapName)
- if err != nil {
- logrus.Errorf("Error while deleting the configmap %v", err)
- }
- time.Sleep(sleepDuration)
-}
-
// Perform rolling upgrade on deployment and create env var upon updating the configmap
func TestControllerUpdatingConfigmapShouldCreateEnvInDeployment(t *testing.T) {
options.ReloadStrategy = constants.EnvVarsReloadStrategy
@@ -1378,7 +1263,7 @@ func TestControllerUpdatingConfigmapShouldCreateEnvInDeployment(t *testing.T) {
// Verifying deployment update
logrus.Infof("Verifying env var has been created")
shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapName, "www.stakater.com")
- config := util.Config{
+ config := common.Config{
Namespace: namespace,
ResourceName: configmapName,
SHAValue: shaData,
@@ -1431,7 +1316,7 @@ func TestControllerUpdatingConfigmapShouldAutoCreateEnvInDeployment(t *testing.T
// Verifying deployment update
logrus.Infof("Verifying env var has been created")
shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapName, "www.stakater.com")
- config := util.Config{
+ config := common.Config{
Namespace: namespace,
ResourceName: configmapName,
SHAValue: shaData,
@@ -1496,7 +1381,7 @@ func TestControllerCreatingConfigmapShouldCreateEnvInDeployment(t *testing.T) {
// Verifying deployment update
logrus.Infof("Verifying env var has been created")
shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapName, "www.stakater.com")
- config := util.Config{
+ config := common.Config{
Namespace: namespace,
ResourceName: configmapName,
SHAValue: shaData,
@@ -1555,7 +1440,7 @@ func TestControllerForUpdatingConfigmapShouldUpdateDeploymentUsingErs(t *testing
// Verifying deployment update
logrus.Infof("Verifying env var has been updated")
shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapName, "aurorasolutions.io")
- config := util.Config{
+ config := common.Config{
Namespace: namespace,
ResourceName: configmapName,
SHAValue: shaData,
@@ -1610,7 +1495,7 @@ func TestControllerUpdatingConfigmapLabelsShouldNotCreateOrUpdateEnvInDeployment
// Verifying deployment update
logrus.Infof("Verifying env var has been created")
shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapName, "www.google.com")
- config := util.Config{
+ config := common.Config{
Namespace: namespace,
ResourceName: configmapName,
SHAValue: shaData,
@@ -1674,7 +1559,7 @@ func TestControllerCreatingSecretShouldCreateEnvInDeployment(t *testing.T) {
// Verifying Upgrade
logrus.Infof("Verifying env var has been created")
shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, namespace, secretName, newData)
- config := util.Config{
+ config := common.Config{
Namespace: namespace,
ResourceName: secretName,
SHAValue: shaData,
@@ -1727,7 +1612,7 @@ func TestControllerUpdatingSecretShouldCreateEnvInDeployment(t *testing.T) {
// Verifying Upgrade
logrus.Infof("Verifying env var has been created")
shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, namespace, secretName, newData)
- config := util.Config{
+ config := common.Config{
Namespace: namespace,
ResourceName: secretName,
SHAValue: shaData,
@@ -1785,7 +1670,7 @@ func TestControllerUpdatingSecretShouldUpdateEnvInDeployment(t *testing.T) {
// Verifying Upgrade
logrus.Infof("Verifying env var has been updated")
shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, namespace, secretName, updatedData)
- config := util.Config{
+ config := common.Config{
Namespace: namespace,
ResourceName: secretName,
SHAValue: shaData,
@@ -1836,7 +1721,7 @@ func TestControllerUpdatingSecretLabelsShouldNotCreateOrUpdateEnvInDeployment(t
// Verifying Upgrade
logrus.Infof("Verifying env var has been created")
shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, namespace, secretName, data)
- config := util.Config{
+ config := common.Config{
Namespace: namespace,
ResourceName: secretName,
SHAValue: shaData,
@@ -1898,7 +1783,7 @@ func TestControllerUpdatingSecretProviderClassPodStatusShouldCreateEnvInDeployme
// Verifying Upgrade
logrus.Infof("Verifying env var has been created")
shaData := testutil.ConvertResourceToSHA(testutil.SecretProviderClassPodStatusResourceType, namespace, secretproviderclasspodstatusName, newData)
- config := util.Config{
+ config := common.Config{
Namespace: namespace,
ResourceName: secretproviderclasspodstatusName,
SHAValue: shaData,
@@ -1972,7 +1857,7 @@ func TestControllerUpdatingSecretProviderClassPodStatusShouldUpdateEnvInDeployme
// Verifying Upgrade
logrus.Infof("Verifying env var has been updated")
shaData := testutil.ConvertResourceToSHA(testutil.SecretProviderClassPodStatusResourceType, namespace, secretproviderclasspodstatusName, updatedData)
- config := util.Config{
+ config := common.Config{
Namespace: namespace,
ResourceName: secretproviderclasspodstatusName,
SHAValue: shaData,
@@ -2039,7 +1924,7 @@ func TestControllerUpdatingSecretProviderClassPodStatusLabelsShouldNotCreateOrUp
// Verifying Upgrade
logrus.Infof("Verifying env var has been created")
shaData := testutil.ConvertResourceToSHA(testutil.SecretProviderClassPodStatusResourceType, namespace, secretproviderclasspodstatusName, data)
- config := util.Config{
+ config := common.Config{
Namespace: namespace,
ResourceName: secretproviderclasspodstatusName,
SHAValue: shaData,
@@ -2097,7 +1982,7 @@ func TestControllerUpdatingConfigmapShouldCreateEnvInDaemonSet(t *testing.T) {
// Verifying DaemonSet update
logrus.Infof("Verifying env var has been created")
shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapName, "www.stakater.com")
- config := util.Config{
+ config := common.Config{
Namespace: namespace,
ResourceName: configmapName,
SHAValue: shaData,
@@ -2160,7 +2045,7 @@ func TestControllerForUpdatingConfigmapShouldUpdateDaemonSetUsingErs(t *testing.
// Verifying DaemonSet update
logrus.Infof("Verifying env var has been updated")
shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapName, "aurorasolutions.io")
- config := util.Config{
+ config := common.Config{
Namespace: namespace,
ResourceName: configmapName,
SHAValue: shaData,
@@ -2213,7 +2098,7 @@ func TestControllerUpdatingSecretShouldCreateEnvInDaemonSet(t *testing.T) {
// Verifying Upgrade
logrus.Infof("Verifying env var has been created")
shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, namespace, secretName, newData)
- config := util.Config{
+ config := common.Config{
Namespace: namespace,
ResourceName: secretName,
SHAValue: shaData,
@@ -2272,7 +2157,7 @@ func TestControllerUpdatingSecretShouldUpdateEnvInDaemonSet(t *testing.T) {
// Verifying Upgrade
logrus.Infof("Verifying env var has been updated")
shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, namespace, secretName, updatedData)
- config := util.Config{
+ config := common.Config{
Namespace: namespace,
ResourceName: secretName,
SHAValue: shaData,
@@ -2323,7 +2208,7 @@ func TestControllerUpdatingSecretLabelsShouldNotCreateOrUpdateEnvInDaemonSet(t *
// Verifying Upgrade
logrus.Infof("Verifying env var has been created")
shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, namespace, secretName, data)
- config := util.Config{
+ config := common.Config{
Namespace: namespace,
ResourceName: secretName,
SHAValue: shaData,
@@ -2375,7 +2260,7 @@ func TestControllerUpdatingConfigmapShouldCreateEnvInStatefulSet(t *testing.T) {
// Verifying StatefulSet update
logrus.Infof("Verifying env var has been created")
shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapName, "www.stakater.com")
- config := util.Config{
+ config := common.Config{
Namespace: namespace,
ResourceName: configmapName,
SHAValue: shaData,
@@ -2434,7 +2319,7 @@ func TestControllerForUpdatingConfigmapShouldUpdateStatefulSetUsingErs(t *testin
// Verifying StatefulSet update
logrus.Infof("Verifying env var has been updated")
shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapName, "aurorasolutions.io")
- config := util.Config{
+ config := common.Config{
Namespace: namespace,
ResourceName: configmapName,
SHAValue: shaData,
@@ -2487,7 +2372,7 @@ func TestControllerUpdatingSecretShouldCreateEnvInStatefulSet(t *testing.T) {
// Verifying Upgrade
logrus.Infof("Verifying env var has been created")
shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, namespace, secretName, newData)
- config := util.Config{
+ config := common.Config{
Namespace: namespace,
ResourceName: secretName,
SHAValue: shaData,
@@ -2545,7 +2430,7 @@ func TestControllerUpdatingSecretShouldUpdateEnvInStatefulSet(t *testing.T) {
// Verifying Upgrade
logrus.Infof("Verifying env var has been updated")
shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, namespace, secretName, updatedData)
- config := util.Config{
+ config := common.Config{
Namespace: namespace,
ResourceName: secretName,
SHAValue: shaData,
@@ -2603,7 +2488,7 @@ func TestControllerUpdatingSecretShouldUpdatePodAnnotationInStatefulSet(t *testi
// Verifying Upgrade
logrus.Infof("Verifying pod annotation has been updated")
shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, namespace, secretName, updatedData)
- config := util.Config{
+ config := common.Config{
Namespace: namespace,
ResourceName: secretName,
SHAValue: shaData,
@@ -2881,7 +2766,7 @@ func TestController_resourceInNamespaceSelector(t *testing.T) {
indexer: tt.fields.indexer,
queue: tt.fields.queue,
informer: tt.fields.informer,
- namespace: tt.fields.namespace.ObjectMeta.Name,
+ namespace: tt.fields.namespace.Name,
namespaceSelector: tt.fields.namespaceSelector,
}
diff --git a/internal/pkg/handler/create.go b/internal/pkg/handler/create.go
index a35da5e..fab7378 100644
--- a/internal/pkg/handler/create.go
+++ b/internal/pkg/handler/create.go
@@ -4,7 +4,7 @@ import (
"github.com/sirupsen/logrus"
"github.com/stakater/Reloader/internal/pkg/metrics"
"github.com/stakater/Reloader/internal/pkg/options"
- "github.com/stakater/Reloader/internal/pkg/util"
+ "github.com/stakater/Reloader/pkg/common"
v1 "k8s.io/api/core/v1"
"k8s.io/client-go/tools/record"
)
@@ -33,13 +33,13 @@ func (r ResourceCreatedHandler) Handle() error {
}
// GetConfig gets configurations containing SHA, annotations, namespace and resource name
-func (r ResourceCreatedHandler) GetConfig() (util.Config, string) {
+func (r ResourceCreatedHandler) GetConfig() (common.Config, string) {
var oldSHAData string
- var config util.Config
+ var config common.Config
if _, ok := r.Resource.(*v1.ConfigMap); ok {
- config = util.GetConfigmapConfig(r.Resource.(*v1.ConfigMap))
+ config = common.GetConfigmapConfig(r.Resource.(*v1.ConfigMap))
} else if _, ok := r.Resource.(*v1.Secret); ok {
- config = util.GetSecretConfig(r.Resource.(*v1.Secret))
+ config = common.GetSecretConfig(r.Resource.(*v1.Secret))
} else {
logrus.Warnf("Invalid resource: Resource should be 'Secret' or 'Configmap' but found, %v", r.Resource)
}
diff --git a/internal/pkg/handler/delete.go b/internal/pkg/handler/delete.go
index 2378d0f..65c671e 100644
--- a/internal/pkg/handler/delete.go
+++ b/internal/pkg/handler/delete.go
@@ -1,15 +1,20 @@
package handler
import (
+ "fmt"
+ "slices"
+
"github.com/sirupsen/logrus"
"github.com/stakater/Reloader/internal/pkg/callbacks"
"github.com/stakater/Reloader/internal/pkg/constants"
"github.com/stakater/Reloader/internal/pkg/metrics"
"github.com/stakater/Reloader/internal/pkg/options"
"github.com/stakater/Reloader/internal/pkg/testutil"
- "github.com/stakater/Reloader/internal/pkg/util"
+ "github.com/stakater/Reloader/pkg/common"
+
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/runtime"
+ patchtypes "k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/tools/record"
)
@@ -37,20 +42,20 @@ func (r ResourceDeleteHandler) Handle() error {
}
// GetConfig gets configurations containing SHA, annotations, namespace and resource name
-func (r ResourceDeleteHandler) GetConfig() (util.Config, string) {
+func (r ResourceDeleteHandler) GetConfig() (common.Config, string) {
var oldSHAData string
- var config util.Config
+ var config common.Config
if _, ok := r.Resource.(*v1.ConfigMap); ok {
- config = util.GetConfigmapConfig(r.Resource.(*v1.ConfigMap))
+ config = common.GetConfigmapConfig(r.Resource.(*v1.ConfigMap))
} else if _, ok := r.Resource.(*v1.Secret); ok {
- config = util.GetSecretConfig(r.Resource.(*v1.Secret))
+ config = common.GetSecretConfig(r.Resource.(*v1.Secret))
} else {
logrus.Warnf("Invalid resource: Resource should be 'Secret' or 'Configmap' but found, %v", r.Resource)
}
return config, oldSHAData
}
-func invokeDeleteStrategy(upgradeFuncs callbacks.RollingUpgradeFuncs, item runtime.Object, config util.Config, autoReload bool) constants.Result {
+func invokeDeleteStrategy(upgradeFuncs callbacks.RollingUpgradeFuncs, item runtime.Object, config common.Config, autoReload bool) InvokeStrategyResult {
if options.ReloadStrategy == constants.AnnotationsReloadStrategy {
return removePodAnnotations(upgradeFuncs, item, config, autoReload)
}
@@ -58,35 +63,38 @@ func invokeDeleteStrategy(upgradeFuncs callbacks.RollingUpgradeFuncs, item runti
return removeContainerEnvVars(upgradeFuncs, item, config, autoReload)
}
-func removePodAnnotations(upgradeFuncs callbacks.RollingUpgradeFuncs, item runtime.Object, config util.Config, autoReload bool) constants.Result {
+func removePodAnnotations(upgradeFuncs callbacks.RollingUpgradeFuncs, item runtime.Object, config common.Config, autoReload bool) InvokeStrategyResult {
config.SHAValue = testutil.GetSHAfromEmptyData()
return updatePodAnnotations(upgradeFuncs, item, config, autoReload)
}
-func removeContainerEnvVars(upgradeFuncs callbacks.RollingUpgradeFuncs, item runtime.Object, config util.Config, autoReload bool) constants.Result {
+func removeContainerEnvVars(upgradeFuncs callbacks.RollingUpgradeFuncs, item runtime.Object, config common.Config, autoReload bool) InvokeStrategyResult {
envVar := getEnvVarName(config.ResourceName, config.Type)
container := getContainerUsingResource(upgradeFuncs, item, config, autoReload)
if container == nil {
- return constants.NoContainerFound
+ return InvokeStrategyResult{constants.NoContainerFound, nil}
}
//remove if env var exists
- containers := upgradeFuncs.ContainersFunc(item)
- for i := range containers {
- envs := containers[i].Env
- index := -1
- for j := range envs {
- if envs[j].Name == envVar {
- index = j
- break
- }
- }
+ if len(container.Env) > 0 {
+ index := slices.IndexFunc(container.Env, func(envVariable v1.EnvVar) bool {
+ return envVariable.Name == envVar
+ })
if index != -1 {
- containers[i].Env = append(containers[i].Env[:index], containers[i].Env[index+1:]...)
- return constants.Updated
+ var patch []byte
+ if upgradeFuncs.SupportsPatch {
+ containers := upgradeFuncs.ContainersFunc(item)
+ containerIndex := slices.IndexFunc(containers, func(c v1.Container) bool {
+ return c.Name == container.Name
+ })
+ patch = fmt.Appendf(nil, upgradeFuncs.PatchTemplatesFunc().DeleteEnvVarTemplate, containerIndex, index)
+ }
+
+ container.Env = append(container.Env[:index], container.Env[index+1:]...)
+ return InvokeStrategyResult{constants.Updated, &Patch{Type: patchtypes.JSONPatchType, Bytes: patch}}
}
}
- return constants.NotUpdated
+ return InvokeStrategyResult{constants.NotUpdated, nil}
}
diff --git a/internal/pkg/handler/handler.go b/internal/pkg/handler/handler.go
index 634e080..1f5858e 100644
--- a/internal/pkg/handler/handler.go
+++ b/internal/pkg/handler/handler.go
@@ -1,11 +1,9 @@
package handler
-import (
- "github.com/stakater/Reloader/internal/pkg/util"
-)
+import "github.com/stakater/Reloader/pkg/common"
// ResourceHandler handles the creation and update of resources
type ResourceHandler interface {
Handle() error
- GetConfig() (util.Config, string)
+ GetConfig() (common.Config, string)
}
diff --git a/internal/pkg/handler/pause_deployment.go b/internal/pkg/handler/pause_deployment.go
new file mode 100644
index 0000000..28d1b9e
--- /dev/null
+++ b/internal/pkg/handler/pause_deployment.go
@@ -0,0 +1,242 @@
+package handler
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "time"
+
+ "github.com/sirupsen/logrus"
+ "github.com/stakater/Reloader/internal/pkg/options"
+ "github.com/stakater/Reloader/pkg/kube"
+ app "k8s.io/api/apps/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ patchtypes "k8s.io/apimachinery/pkg/types"
+)
+
+// Keeps track of currently active timers
+var activeTimers = make(map[string]*time.Timer)
+
+// Returns unique key for the activeTimers map
+func getTimerKey(namespace, deploymentName string) string {
+ return fmt.Sprintf("%s/%s", namespace, deploymentName)
+}
+
+// Checks if a deployment is currently paused
+func IsPaused(deployment *app.Deployment) bool {
+ return deployment.Spec.Paused
+}
+
+// Deployment paused by reloader ?
+func IsPausedByReloader(deployment *app.Deployment) bool {
+ if IsPaused(deployment) {
+ pausedAtAnnotationValue := deployment.Annotations[options.PauseDeploymentTimeAnnotation]
+ return pausedAtAnnotationValue != ""
+ }
+ return false
+}
+
+// Returns the time, the deployment was paused by reloader, nil otherwise
+func GetPauseStartTime(deployment *app.Deployment) (*time.Time, error) {
+ if !IsPausedByReloader(deployment) {
+ return nil, nil
+ }
+
+ pausedAtStr := deployment.Annotations[options.PauseDeploymentTimeAnnotation]
+ parsedTime, err := time.Parse(time.RFC3339, pausedAtStr)
+ if err != nil {
+ return nil, err
+ }
+
+ return &parsedTime, nil
+}
+
+// ParsePauseDuration parses the pause interval value and returns a time.Duration
+func ParsePauseDuration(pauseIntervalValue string) (time.Duration, error) {
+ pauseDuration, err := time.ParseDuration(pauseIntervalValue)
+ if err != nil {
+ logrus.Warnf("Failed to parse pause interval value '%s': %v", pauseIntervalValue, err)
+ return 0, err
+ }
+ return pauseDuration, nil
+}
+
+// Pauses a deployment for a specified duration and creates a timer to resume it
+// after the specified duration
+func PauseDeployment(deployment *app.Deployment, clients kube.Clients, namespace, pauseIntervalValue string) (*app.Deployment, error) {
+ deploymentName := deployment.Name
+ pauseDuration, err := ParsePauseDuration(pauseIntervalValue)
+
+ if err != nil {
+ return nil, err
+ }
+
+ if !IsPaused(deployment) {
+ logrus.Infof("Pausing Deployment '%s' in namespace '%s' for %s", deploymentName, namespace, pauseDuration)
+
+ deploymentFuncs := GetDeploymentRollingUpgradeFuncs()
+
+ pausePatch, err := CreatePausePatch()
+ if err != nil {
+ logrus.Errorf("Failed to create pause patch for deployment '%s': %v", deploymentName, err)
+ return deployment, err
+ }
+
+ err = deploymentFuncs.PatchFunc(clients, namespace, deployment, patchtypes.StrategicMergePatchType, pausePatch)
+
+ if err != nil {
+ logrus.Errorf("Failed to patch deployment '%s' in namespace '%s': %v", deploymentName, namespace, err)
+ return deployment, err
+ }
+
+ updatedDeployment, err := clients.KubernetesClient.AppsV1().Deployments(namespace).Get(context.TODO(), deploymentName, metav1.GetOptions{})
+
+ CreateResumeTimer(deployment, clients, namespace, pauseDuration)
+ return updatedDeployment, err
+ }
+
+ if !IsPausedByReloader(deployment) {
+ logrus.Infof("Deployment '%s' in namespace '%s' already paused", deploymentName, namespace)
+ return deployment, nil
+ }
+
+ // Deployment has already been paused by reloader, check for timer
+ logrus.Debugf("Deployment '%s' in namespace '%s' is already paused by reloader", deploymentName, namespace)
+
+ timerKey := getTimerKey(namespace, deploymentName)
+ _, timerExists := activeTimers[timerKey]
+
+ if !timerExists {
+ logrus.Warnf("Timer does not exist for already paused deployment '%s' in namespace '%s', creating new one",
+ deploymentName, namespace)
+ HandleMissingTimer(deployment, pauseDuration, clients, namespace)
+ }
+ return deployment, nil
+}
+
+// Handles the case where missing timers for deployments that have been paused by reloader.
+// Could occur after new leader election or reloader restart
+func HandleMissingTimer(deployment *app.Deployment, pauseDuration time.Duration, clients kube.Clients, namespace string) {
+ deploymentName := deployment.Name
+ pauseStartTime, err := GetPauseStartTime(deployment)
+ if err != nil {
+ logrus.Errorf("Error parsing pause start time for deployment '%s' in namespace '%s': %v. Resuming deployment immediately",
+ deploymentName, namespace, err)
+ ResumeDeployment(deployment, namespace, clients)
+ return
+ }
+
+ if pauseStartTime == nil {
+ return
+ }
+
+ elapsedPauseTime := time.Since(*pauseStartTime)
+ remainingPauseTime := pauseDuration - elapsedPauseTime
+
+ if remainingPauseTime <= 0 {
+ logrus.Infof("Pause period for deployment '%s' in namespace '%s' has expired. Resuming immediately",
+ deploymentName, namespace)
+ ResumeDeployment(deployment, namespace, clients)
+ return
+ }
+
+ logrus.Infof("Creating missing timer for already paused deployment '%s' in namespace '%s' with remaining time %s",
+ deploymentName, namespace, remainingPauseTime)
+ CreateResumeTimer(deployment, clients, namespace, remainingPauseTime)
+}
+
+// CreateResumeTimer creates a timer to resume the deployment after the specified duration
+func CreateResumeTimer(deployment *app.Deployment, clients kube.Clients, namespace string, pauseDuration time.Duration) {
+ deploymentName := deployment.Name
+ timerKey := getTimerKey(namespace, deployment.Name)
+
+ // Check if there's an existing timer for this deployment
+ if _, exists := activeTimers[timerKey]; exists {
+ logrus.Debugf("Timer already exists for deployment '%s' in namespace '%s', Skipping creation",
+ deploymentName, namespace)
+ return
+ }
+
+ // Create and store the new timer
+ timer := time.AfterFunc(pauseDuration, func() {
+ ResumeDeployment(deployment, namespace, clients)
+ })
+
+ // Add the new timer to the map
+ activeTimers[timerKey] = timer
+
+ logrus.Debugf("Created pause timer for deployment '%s' in namespace '%s' with duration %s",
+ deploymentName, namespace, pauseDuration)
+}
+
+// ResumeDeployment resumes a deployment that has been paused by reloader
+func ResumeDeployment(deployment *app.Deployment, namespace string, clients kube.Clients) {
+ deploymentName := deployment.Name
+
+ currentDeployment, err := clients.KubernetesClient.AppsV1().Deployments(namespace).Get(context.TODO(), deploymentName, metav1.GetOptions{})
+
+ if err != nil {
+ logrus.Errorf("Failed to get deployment '%s' in namespace '%s': %v", deploymentName, namespace, err)
+ return
+ }
+
+ if !IsPausedByReloader(currentDeployment) {
+ logrus.Infof("Deployment '%s' in namespace '%s' not paused by Reloader. Skipping resume", deploymentName, namespace)
+ return
+ }
+
+ deploymentFuncs := GetDeploymentRollingUpgradeFuncs()
+
+ resumePatch, err := CreateResumePatch()
+ if err != nil {
+ logrus.Errorf("Failed to create resume patch for deployment '%s': %v", deploymentName, err)
+ return
+ }
+
+ // Remove the timer
+ timerKey := getTimerKey(namespace, deploymentName)
+ if timer, exists := activeTimers[timerKey]; exists {
+ timer.Stop()
+ delete(activeTimers, timerKey)
+ logrus.Debugf("Removed pause timer for deployment '%s' in namespace '%s'", deploymentName, namespace)
+ }
+
+ err = deploymentFuncs.PatchFunc(clients, namespace, currentDeployment, patchtypes.StrategicMergePatchType, resumePatch)
+
+ if err != nil {
+ logrus.Errorf("Failed to resume deployment '%s' in namespace '%s': %v", deploymentName, namespace, err)
+ return
+ }
+
+ logrus.Infof("Successfully resumed deployment '%s' in namespace '%s'", deploymentName, namespace)
+}
+
+func CreatePausePatch() ([]byte, error) {
+ patchData := map[string]interface{}{
+ "spec": map[string]interface{}{
+ "paused": true,
+ },
+ "metadata": map[string]interface{}{
+ "annotations": map[string]string{
+ options.PauseDeploymentTimeAnnotation: time.Now().Format(time.RFC3339),
+ },
+ },
+ }
+
+ return json.Marshal(patchData)
+}
+
+func CreateResumePatch() ([]byte, error) {
+ patchData := map[string]interface{}{
+ "spec": map[string]interface{}{
+ "paused": false,
+ },
+ "metadata": map[string]interface{}{
+ "annotations": map[string]interface{}{
+ options.PauseDeploymentTimeAnnotation: nil,
+ },
+ },
+ }
+
+ return json.Marshal(patchData)
+}
diff --git a/internal/pkg/handler/pause_deployment_test.go b/internal/pkg/handler/pause_deployment_test.go
new file mode 100644
index 0000000..c14cbfc
--- /dev/null
+++ b/internal/pkg/handler/pause_deployment_test.go
@@ -0,0 +1,391 @@
+package handler
+
+import (
+ "context"
+ "fmt"
+ "testing"
+ "time"
+
+ "github.com/stakater/Reloader/internal/pkg/options"
+ "github.com/stakater/Reloader/pkg/kube"
+ "github.com/stretchr/testify/assert"
+ appsv1 "k8s.io/api/apps/v1"
+ "k8s.io/apimachinery/pkg/api/meta"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+ testclient "k8s.io/client-go/kubernetes/fake"
+)
+
+func TestIsPaused(t *testing.T) {
+ tests := []struct {
+ name string
+ deployment *appsv1.Deployment
+ paused bool
+ }{
+ {
+ name: "paused deployment",
+ deployment: &appsv1.Deployment{
+ Spec: appsv1.DeploymentSpec{
+ Paused: true,
+ },
+ },
+ paused: true,
+ },
+ {
+ name: "unpaused deployment",
+ deployment: &appsv1.Deployment{
+ Spec: appsv1.DeploymentSpec{
+ Paused: false,
+ },
+ },
+ paused: false,
+ },
+ }
+
+ for _, test := range tests {
+ t.Run(test.name, func(t *testing.T) {
+ result := IsPaused(test.deployment)
+ assert.Equal(t, test.paused, result)
+ })
+ }
+}
+
+func TestIsPausedByReloader(t *testing.T) {
+ tests := []struct {
+ name string
+ deployment *appsv1.Deployment
+ pausedByReloader bool
+ }{
+ {
+ name: "paused by reloader",
+ deployment: &appsv1.Deployment{
+ Spec: appsv1.DeploymentSpec{
+ Paused: true,
+ },
+ ObjectMeta: metav1.ObjectMeta{
+ Annotations: map[string]string{
+ options.PauseDeploymentTimeAnnotation: time.Now().Format(time.RFC3339),
+ },
+ },
+ },
+ pausedByReloader: true,
+ },
+ {
+ name: "not paused by reloader",
+ deployment: &appsv1.Deployment{
+ Spec: appsv1.DeploymentSpec{
+ Paused: true,
+ },
+ ObjectMeta: metav1.ObjectMeta{
+ Annotations: map[string]string{},
+ },
+ },
+ pausedByReloader: false,
+ },
+ {
+ name: "not paused",
+ deployment: &appsv1.Deployment{
+ Spec: appsv1.DeploymentSpec{
+ Paused: false,
+ },
+ },
+ pausedByReloader: false,
+ },
+ }
+
+ for _, test := range tests {
+ t.Run(test.name, func(t *testing.T) {
+ pausedByReloader := IsPausedByReloader(test.deployment)
+ assert.Equal(t, test.pausedByReloader, pausedByReloader)
+ })
+ }
+}
+
+func TestGetPauseStartTime(t *testing.T) {
+ now := time.Now()
+ nowStr := now.Format(time.RFC3339)
+
+ tests := []struct {
+ name string
+ deployment *appsv1.Deployment
+ pausedByReloader bool
+ expectedStartTime time.Time
+ }{
+ {
+ name: "valid pause time",
+ deployment: &appsv1.Deployment{
+ Spec: appsv1.DeploymentSpec{
+ Paused: true,
+ },
+ ObjectMeta: metav1.ObjectMeta{
+ Annotations: map[string]string{
+ options.PauseDeploymentTimeAnnotation: nowStr,
+ },
+ },
+ },
+ pausedByReloader: true,
+ expectedStartTime: now,
+ },
+ {
+ name: "not paused by reloader",
+ deployment: &appsv1.Deployment{
+ Spec: appsv1.DeploymentSpec{
+ Paused: false,
+ },
+ },
+ pausedByReloader: false,
+ },
+ }
+
+ for _, test := range tests {
+ t.Run(test.name, func(t *testing.T) {
+ actualStartTime, err := GetPauseStartTime(test.deployment)
+
+ assert.NoError(t, err)
+
+ if !test.pausedByReloader {
+ assert.Nil(t, actualStartTime)
+ } else {
+ assert.NotNil(t, actualStartTime)
+ assert.WithinDuration(t, test.expectedStartTime, *actualStartTime, time.Second)
+ }
+ })
+ }
+}
+
+func TestParsePauseDuration(t *testing.T) {
+ tests := []struct {
+ name string
+ pauseIntervalValue string
+ expectedDuration time.Duration
+ invalidDuration bool
+ }{
+ {
+ name: "valid duration",
+ pauseIntervalValue: "10s",
+ expectedDuration: 10 * time.Second,
+ invalidDuration: false,
+ },
+ {
+ name: "valid minute duration",
+ pauseIntervalValue: "2m",
+ expectedDuration: 2 * time.Minute,
+ invalidDuration: false,
+ },
+ {
+ name: "invalid duration",
+ pauseIntervalValue: "invalid",
+ expectedDuration: 0,
+ invalidDuration: true,
+ },
+ }
+
+ for _, test := range tests {
+ t.Run(test.name, func(t *testing.T) {
+ actualDuration, err := ParsePauseDuration(test.pauseIntervalValue)
+
+ if test.invalidDuration {
+ assert.Error(t, err)
+ } else {
+ assert.NoError(t, err)
+ assert.Equal(t, test.expectedDuration, actualDuration)
+ }
+ })
+ }
+}
+
+func TestHandleMissingTimerSimple(t *testing.T) {
+ tests := []struct {
+ name string
+ deployment *appsv1.Deployment
+ shouldBePaused bool // Should be unpaused after HandleMissingTimer ?
+ }{
+ {
+ name: "deployment paused by reloader, pause period has expired and no timer",
+ deployment: &appsv1.Deployment{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "test-deployment-1",
+ Annotations: map[string]string{
+ options.PauseDeploymentTimeAnnotation: time.Now().Add(-6 * time.Minute).Format(time.RFC3339),
+ options.PauseDeploymentAnnotation: "5m",
+ },
+ },
+ Spec: appsv1.DeploymentSpec{
+ Paused: true,
+ },
+ },
+ shouldBePaused: false,
+ },
+ {
+ name: "deployment paused by reloader, pause period expires in the future and no timer",
+ deployment: &appsv1.Deployment{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "test-deployment-2",
+ Annotations: map[string]string{
+ options.PauseDeploymentTimeAnnotation: time.Now().Add(1 * time.Minute).Format(time.RFC3339),
+ options.PauseDeploymentAnnotation: "5m",
+ },
+ },
+ Spec: appsv1.DeploymentSpec{
+ Paused: true,
+ },
+ },
+ shouldBePaused: true,
+ },
+ }
+
+ for _, test := range tests {
+ // Clean up any timers at the end of the test
+ defer func() {
+ for key, timer := range activeTimers {
+ timer.Stop()
+ delete(activeTimers, key)
+ }
+ }()
+
+ t.Run(test.name, func(t *testing.T) {
+ fakeClient := testclient.NewSimpleClientset()
+ clients := kube.Clients{
+ KubernetesClient: fakeClient,
+ }
+
+ _, err := fakeClient.AppsV1().Deployments("default").Create(
+ context.TODO(),
+ test.deployment,
+ metav1.CreateOptions{})
+ assert.NoError(t, err, "Expected no error when creating deployment")
+
+ pauseDuration, _ := ParsePauseDuration(test.deployment.Annotations[options.PauseDeploymentAnnotation])
+ HandleMissingTimer(test.deployment, pauseDuration, clients, "default")
+
+ updatedDeployment, _ := fakeClient.AppsV1().Deployments("default").Get(context.TODO(), test.deployment.Name, metav1.GetOptions{})
+
+ assert.Equal(t, test.shouldBePaused, updatedDeployment.Spec.Paused,
+ "Deployment should have correct paused state after timer expiration")
+
+ if test.shouldBePaused {
+ pausedAtAnnotationValue := updatedDeployment.Annotations[options.PauseDeploymentTimeAnnotation]
+ assert.NotEmpty(t, pausedAtAnnotationValue,
+ "Pause annotation should be present and contain a value when deployment is paused")
+ }
+ })
+ }
+}
+
+func TestPauseDeployment(t *testing.T) {
+ tests := []struct {
+ name string
+ deployment *appsv1.Deployment
+ expectedError bool
+ expectedPaused bool
+ expectedAnnotation bool // Should have pause time annotation
+ pauseInterval string
+ }{
+ {
+ name: "deployment without pause annotation",
+ deployment: &appsv1.Deployment{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "test-deployment",
+ Annotations: map[string]string{},
+ },
+ Spec: appsv1.DeploymentSpec{
+ Paused: false,
+ },
+ },
+ expectedError: true,
+ expectedPaused: false,
+ expectedAnnotation: false,
+ pauseInterval: "",
+ },
+ {
+ name: "deployment already paused but not by reloader",
+ deployment: &appsv1.Deployment{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "test-deployment",
+ Annotations: map[string]string{
+ options.PauseDeploymentAnnotation: "5m",
+ },
+ },
+ Spec: appsv1.DeploymentSpec{
+ Paused: true,
+ },
+ },
+ expectedError: false,
+ expectedPaused: true,
+ expectedAnnotation: false,
+ pauseInterval: "5m",
+ },
+ {
+ name: "deployment unpaused that needs to be paused by reloader",
+ deployment: &appsv1.Deployment{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "test-deployment-3",
+ Annotations: map[string]string{
+ options.PauseDeploymentAnnotation: "5m",
+ },
+ },
+ Spec: appsv1.DeploymentSpec{
+ Paused: false,
+ },
+ },
+ expectedError: false,
+ expectedPaused: true,
+ expectedAnnotation: true,
+ pauseInterval: "5m",
+ },
+ }
+
+ for _, test := range tests {
+ t.Run(test.name, func(t *testing.T) {
+ fakeClient := testclient.NewSimpleClientset()
+ clients := kube.Clients{
+ KubernetesClient: fakeClient,
+ }
+
+ _, err := fakeClient.AppsV1().Deployments("default").Create(
+ context.TODO(),
+ test.deployment,
+ metav1.CreateOptions{})
+ assert.NoError(t, err, "Expected no error when creating deployment")
+
+ updatedDeployment, err := PauseDeployment(test.deployment, clients, "default", test.pauseInterval)
+ if test.expectedError {
+ assert.Error(t, err, "Expected an error pausing the deployment")
+ return
+ } else {
+ assert.NoError(t, err, "Expected no error pausing the deployment")
+ }
+
+ assert.Equal(t, test.expectedPaused, updatedDeployment.Spec.Paused,
+ "Deployment should have correct paused state after pause")
+
+ if test.expectedAnnotation {
+ pausedAtAnnotationValue := updatedDeployment.Annotations[options.PauseDeploymentTimeAnnotation]
+ assert.NotEmpty(t, pausedAtAnnotationValue,
+ "Pause annotation should be present and contain a value when deployment is paused")
+ } else {
+ pausedAtAnnotationValue := updatedDeployment.Annotations[options.PauseDeploymentTimeAnnotation]
+ assert.Empty(t, pausedAtAnnotationValue,
+ "Pause annotation should not be present when deployment has not been paused by reloader")
+ }
+ })
+ }
+}
+
+// Simple helper function for test cases
+func FindDeploymentByName(deployments []runtime.Object, deploymentName string) (*appsv1.Deployment, error) {
+ for _, deployment := range deployments {
+ accessor, err := meta.Accessor(deployment)
+ if err != nil {
+ return nil, fmt.Errorf("error getting accessor for item: %v", err)
+ }
+ if accessor.GetName() == deploymentName {
+ deploymentObj, ok := deployment.(*appsv1.Deployment)
+ if !ok {
+ return nil, fmt.Errorf("failed to cast to Deployment")
+ }
+ return deploymentObj, nil
+ }
+ }
+ return nil, fmt.Errorf("deployment '%s' not found", deploymentName)
+}
diff --git a/internal/pkg/handler/update.go b/internal/pkg/handler/update.go
index 6a0baac..262399d 100644
--- a/internal/pkg/handler/update.go
+++ b/internal/pkg/handler/update.go
@@ -5,6 +5,7 @@ import (
"github.com/stakater/Reloader/internal/pkg/metrics"
"github.com/stakater/Reloader/internal/pkg/options"
"github.com/stakater/Reloader/internal/pkg/util"
+ "github.com/stakater/Reloader/pkg/common"
v1 "k8s.io/api/core/v1"
"k8s.io/client-go/tools/record"
csiv1 "sigs.k8s.io/secrets-store-csi-driver/apis/v1"
@@ -37,18 +38,18 @@ func (r ResourceUpdatedHandler) Handle() error {
}
// GetConfig gets configurations containing SHA, annotations, namespace and resource name
-func (r ResourceUpdatedHandler) GetConfig() (util.Config, string) {
+func (r ResourceUpdatedHandler) GetConfig() (common.Config, string) {
var oldSHAData string
- var config util.Config
+ var config common.Config
if _, ok := r.Resource.(*v1.ConfigMap); ok {
oldSHAData = util.GetSHAfromConfigmap(r.OldResource.(*v1.ConfigMap))
- config = util.GetConfigmapConfig(r.Resource.(*v1.ConfigMap))
+ config = common.GetConfigmapConfig(r.Resource.(*v1.ConfigMap))
} else if _, ok := r.Resource.(*v1.Secret); ok {
oldSHAData = util.GetSHAfromSecret(r.OldResource.(*v1.Secret).Data)
- config = util.GetSecretConfig(r.Resource.(*v1.Secret))
+ config = common.GetSecretConfig(r.Resource.(*v1.Secret))
} else if _, ok := r.Resource.(*csiv1.SecretProviderClassPodStatus); ok {
oldSHAData = util.GetSHAfromSecretProviderClassPodStatus(r.OldResource.(*csiv1.SecretProviderClassPodStatus).Status)
- config = util.GetSecretProviderClassPodStatusConfig(r.Resource.(*csiv1.SecretProviderClassPodStatus))
+ config = common.GetSecretProviderClassPodStatusConfig(r.Resource.(*csiv1.SecretProviderClassPodStatus))
} else {
logrus.Warnf("Invalid resource: Resource should be 'Secret' or 'Configmap' but found, %v", r.Resource)
}
diff --git a/internal/pkg/handler/upgrade.go b/internal/pkg/handler/upgrade.go
index 4542455..f5b7ead 100644
--- a/internal/pkg/handler/upgrade.go
+++ b/internal/pkg/handler/upgrade.go
@@ -8,8 +8,6 @@ import (
"fmt"
"io"
"os"
- "regexp"
- "strconv"
"strings"
"github.com/parnurzeal/gorequest"
@@ -21,113 +19,129 @@ import (
"github.com/stakater/Reloader/internal/pkg/metrics"
"github.com/stakater/Reloader/internal/pkg/options"
"github.com/stakater/Reloader/internal/pkg/util"
+ "github.com/stakater/Reloader/pkg/common"
"github.com/stakater/Reloader/pkg/kube"
+ app "k8s.io/api/apps/v1"
v1 "k8s.io/api/core/v1"
+ apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
+ patchtypes "k8s.io/apimachinery/pkg/types"
+ "k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/tools/record"
+ "k8s.io/client-go/util/retry"
)
// GetDeploymentRollingUpgradeFuncs returns all callback funcs for a deployment
func GetDeploymentRollingUpgradeFuncs() callbacks.RollingUpgradeFuncs {
return callbacks.RollingUpgradeFuncs{
+ ItemFunc: callbacks.GetDeploymentItem,
ItemsFunc: callbacks.GetDeploymentItems,
AnnotationsFunc: callbacks.GetDeploymentAnnotations,
PodAnnotationsFunc: callbacks.GetDeploymentPodAnnotations,
ContainersFunc: callbacks.GetDeploymentContainers,
InitContainersFunc: callbacks.GetDeploymentInitContainers,
UpdateFunc: callbacks.UpdateDeployment,
+ PatchFunc: callbacks.PatchDeployment,
+ PatchTemplatesFunc: callbacks.GetPatchTemplates,
VolumesFunc: callbacks.GetDeploymentVolumes,
ResourceType: "Deployment",
+ SupportsPatch: true,
}
}
// GetDeploymentRollingUpgradeFuncs returns all callback funcs for a cronjob
func GetCronJobCreateJobFuncs() callbacks.RollingUpgradeFuncs {
return callbacks.RollingUpgradeFuncs{
+ ItemFunc: callbacks.GetCronJobItem,
ItemsFunc: callbacks.GetCronJobItems,
AnnotationsFunc: callbacks.GetCronJobAnnotations,
PodAnnotationsFunc: callbacks.GetCronJobPodAnnotations,
ContainersFunc: callbacks.GetCronJobContainers,
InitContainersFunc: callbacks.GetCronJobInitContainers,
UpdateFunc: callbacks.CreateJobFromCronjob,
+ PatchFunc: callbacks.PatchCronJob,
+ PatchTemplatesFunc: func() callbacks.PatchTemplates { return callbacks.PatchTemplates{} },
VolumesFunc: callbacks.GetCronJobVolumes,
ResourceType: "CronJob",
+ SupportsPatch: false,
}
}
// GetDeploymentRollingUpgradeFuncs returns all callback funcs for a cronjob
func GetJobCreateJobFuncs() callbacks.RollingUpgradeFuncs {
return callbacks.RollingUpgradeFuncs{
+ ItemFunc: callbacks.GetJobItem,
ItemsFunc: callbacks.GetJobItems,
AnnotationsFunc: callbacks.GetJobAnnotations,
PodAnnotationsFunc: callbacks.GetJobPodAnnotations,
ContainersFunc: callbacks.GetJobContainers,
InitContainersFunc: callbacks.GetJobInitContainers,
UpdateFunc: callbacks.ReCreateJobFromjob,
+ PatchFunc: callbacks.PatchJob,
+ PatchTemplatesFunc: func() callbacks.PatchTemplates { return callbacks.PatchTemplates{} },
VolumesFunc: callbacks.GetJobVolumes,
ResourceType: "Job",
+ SupportsPatch: false,
}
}
// GetDaemonSetRollingUpgradeFuncs returns all callback funcs for a daemonset
func GetDaemonSetRollingUpgradeFuncs() callbacks.RollingUpgradeFuncs {
return callbacks.RollingUpgradeFuncs{
+ ItemFunc: callbacks.GetDaemonSetItem,
ItemsFunc: callbacks.GetDaemonSetItems,
AnnotationsFunc: callbacks.GetDaemonSetAnnotations,
PodAnnotationsFunc: callbacks.GetDaemonSetPodAnnotations,
ContainersFunc: callbacks.GetDaemonSetContainers,
InitContainersFunc: callbacks.GetDaemonSetInitContainers,
UpdateFunc: callbacks.UpdateDaemonSet,
+ PatchFunc: callbacks.PatchDaemonSet,
+ PatchTemplatesFunc: callbacks.GetPatchTemplates,
VolumesFunc: callbacks.GetDaemonSetVolumes,
ResourceType: "DaemonSet",
+ SupportsPatch: true,
}
}
// GetStatefulSetRollingUpgradeFuncs returns all callback funcs for a statefulSet
func GetStatefulSetRollingUpgradeFuncs() callbacks.RollingUpgradeFuncs {
return callbacks.RollingUpgradeFuncs{
+ ItemFunc: callbacks.GetStatefulSetItem,
ItemsFunc: callbacks.GetStatefulSetItems,
AnnotationsFunc: callbacks.GetStatefulSetAnnotations,
PodAnnotationsFunc: callbacks.GetStatefulSetPodAnnotations,
ContainersFunc: callbacks.GetStatefulSetContainers,
InitContainersFunc: callbacks.GetStatefulSetInitContainers,
UpdateFunc: callbacks.UpdateStatefulSet,
+ PatchFunc: callbacks.PatchStatefulSet,
+ PatchTemplatesFunc: callbacks.GetPatchTemplates,
VolumesFunc: callbacks.GetStatefulSetVolumes,
ResourceType: "StatefulSet",
- }
-}
-
-// GetDeploymentConfigRollingUpgradeFuncs returns all callback funcs for a deploymentConfig
-func GetDeploymentConfigRollingUpgradeFuncs() callbacks.RollingUpgradeFuncs {
- return callbacks.RollingUpgradeFuncs{
- ItemsFunc: callbacks.GetDeploymentConfigItems,
- AnnotationsFunc: callbacks.GetDeploymentConfigAnnotations,
- PodAnnotationsFunc: callbacks.GetDeploymentConfigPodAnnotations,
- ContainersFunc: callbacks.GetDeploymentConfigContainers,
- InitContainersFunc: callbacks.GetDeploymentConfigInitContainers,
- UpdateFunc: callbacks.UpdateDeploymentConfig,
- VolumesFunc: callbacks.GetDeploymentConfigVolumes,
- ResourceType: "DeploymentConfig",
+ SupportsPatch: true,
}
}
// GetArgoRolloutRollingUpgradeFuncs returns all callback funcs for a rollout
func GetArgoRolloutRollingUpgradeFuncs() callbacks.RollingUpgradeFuncs {
return callbacks.RollingUpgradeFuncs{
+ ItemFunc: callbacks.GetRolloutItem,
ItemsFunc: callbacks.GetRolloutItems,
AnnotationsFunc: callbacks.GetRolloutAnnotations,
PodAnnotationsFunc: callbacks.GetRolloutPodAnnotations,
ContainersFunc: callbacks.GetRolloutContainers,
InitContainersFunc: callbacks.GetRolloutInitContainers,
UpdateFunc: callbacks.UpdateRollout,
+ PatchFunc: callbacks.PatchRollout,
+ PatchTemplatesFunc: func() callbacks.PatchTemplates { return callbacks.PatchTemplates{} },
VolumesFunc: callbacks.GetRolloutVolumes,
ResourceType: "Rollout",
+ SupportsPatch: false,
}
}
-func sendUpgradeWebhook(config util.Config, webhookUrl string) error {
+func sendUpgradeWebhook(config common.Config, webhookUrl string) error {
logrus.Infof("Changes detected in '%s' of type '%s' in namespace '%s', Sending webhook to '%s'",
config.ResourceName, config.Type, config.Namespace, webhookUrl)
@@ -149,7 +163,12 @@ func sendWebhook(url string) (string, []error) {
// the reloader seems to retry automatically so no retry logic added
return "", err
}
- defer resp.Body.Close()
+ defer func() {
+ closeErr := resp.Body.Close()
+ if closeErr != nil {
+ logrus.Error(closeErr)
+ }
+ }()
var buffer bytes.Buffer
_, bufferErr := io.Copy(&buffer, resp.Body)
if bufferErr != nil {
@@ -158,21 +177,37 @@ func sendWebhook(url string) (string, []error) {
return buffer.String(), nil
}
-func doRollingUpgrade(config util.Config, collectors metrics.Collectors, recorder record.EventRecorder, invoke invokeStrategy) error {
+func doRollingUpgrade(config common.Config, collectors metrics.Collectors, recorder record.EventRecorder, invoke invokeStrategy) error {
clients := kube.GetClients()
- err := rollingUpgrade(clients, config, GetDeploymentRollingUpgradeFuncs(), collectors, recorder, invoke)
+ // Get ignored workload types to avoid listing resources without RBAC permissions
+ ignoredWorkloadTypes, err := util.GetIgnoredWorkloadTypesList()
+ if err != nil {
+ logrus.Errorf("Failed to parse ignored workload types: %v", err)
+ ignoredWorkloadTypes = util.List{} // Continue with empty list if parsing fails
+ }
+
+ err = rollingUpgrade(clients, config, GetDeploymentRollingUpgradeFuncs(), collectors, recorder, invoke)
if err != nil {
return err
}
- err = rollingUpgrade(clients, config, GetCronJobCreateJobFuncs(), collectors, recorder, invoke)
- if err != nil {
- return err
+
+ // Only process CronJobs if they are not ignored
+ if !ignoredWorkloadTypes.Contains("cronjobs") {
+ err = rollingUpgrade(clients, config, GetCronJobCreateJobFuncs(), collectors, recorder, invoke)
+ if err != nil {
+ return err
+ }
}
- err = rollingUpgrade(clients, config, GetJobCreateJobFuncs(), collectors, recorder, invoke)
- if err != nil {
- return err
+
+ // Only process Jobs if they are not ignored
+ if !ignoredWorkloadTypes.Contains("jobs") {
+ err = rollingUpgrade(clients, config, GetJobCreateJobFuncs(), collectors, recorder, invoke)
+ if err != nil {
+ return err
+ }
}
+
err = rollingUpgrade(clients, config, GetDaemonSetRollingUpgradeFuncs(), collectors, recorder, invoke)
if err != nil {
return err
@@ -182,13 +217,6 @@ func doRollingUpgrade(config util.Config, collectors metrics.Collectors, recorde
return err
}
- if kube.IsOpenshift {
- err = rollingUpgrade(clients, config, GetDeploymentConfigRollingUpgradeFuncs(), collectors, recorder, invoke)
- if err != nil {
- return err
- }
- }
-
if options.IsArgoRollouts == "true" {
err = rollingUpgrade(clients, config, GetArgoRolloutRollingUpgradeFuncs(), collectors, recorder, invoke)
if err != nil {
@@ -199,8 +227,7 @@ func doRollingUpgrade(config util.Config, collectors metrics.Collectors, recorde
return nil
}
-func rollingUpgrade(clients kube.Clients, config util.Config, upgradeFuncs callbacks.RollingUpgradeFuncs, collectors metrics.Collectors, recorder record.EventRecorder, strategy invokeStrategy) error {
-
+func rollingUpgrade(clients kube.Clients, config common.Config, upgradeFuncs callbacks.RollingUpgradeFuncs, collectors metrics.Collectors, recorder record.EventRecorder, strategy invokeStrategy) error {
err := PerformAction(clients, config, upgradeFuncs, collectors, recorder, strategy)
if err != nil {
logrus.Errorf("Rolling upgrade for '%s' failed with error = %v", config.ResourceName, err)
@@ -209,140 +236,134 @@ func rollingUpgrade(clients kube.Clients, config util.Config, upgradeFuncs callb
}
// PerformAction invokes the deployment if there is any change in configmap or secret data
-func PerformAction(clients kube.Clients, config util.Config, upgradeFuncs callbacks.RollingUpgradeFuncs, collectors metrics.Collectors, recorder record.EventRecorder, strategy invokeStrategy) error {
+func PerformAction(clients kube.Clients, config common.Config, upgradeFuncs callbacks.RollingUpgradeFuncs, collectors metrics.Collectors, recorder record.EventRecorder, strategy invokeStrategy) error {
items := upgradeFuncs.ItemsFunc(clients, config.Namespace)
+ for _, item := range items {
+ err := retryOnConflict(retry.DefaultRetry, func(fetchResource bool) error {
+ return upgradeResource(clients, config, upgradeFuncs, collectors, recorder, strategy, item, fetchResource)
+ })
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func retryOnConflict(backoff wait.Backoff, fn func(_ bool) error) error {
+ var lastError error
+ fetchResource := false // do not fetch resource on first attempt, already done by ItemsFunc
+ err := wait.ExponentialBackoff(backoff, func() (bool, error) {
+ err := fn(fetchResource)
+ fetchResource = true
+ switch {
+ case err == nil:
+ return true, nil
+ case apierrors.IsConflict(err):
+ lastError = err
+ return false, nil
+ default:
+ return false, err
+ }
+ })
+ if wait.Interrupted(err) {
+ err = lastError
+ }
+ return err
+}
+
+func upgradeResource(clients kube.Clients, config common.Config, upgradeFuncs callbacks.RollingUpgradeFuncs, collectors metrics.Collectors, recorder record.EventRecorder, strategy invokeStrategy, resource runtime.Object, fetchResource bool) error {
+ accessor, err := meta.Accessor(resource)
+ if err != nil {
+ return err
+ }
+
+ resourceName := accessor.GetName()
+ if fetchResource {
+ resource, err = upgradeFuncs.ItemFunc(clients, resourceName, config.Namespace)
+ if err != nil {
+ return err
+ }
+ }
if config.Type == constants.SecretProviderClassEnvVarPostfix {
populateAnnotationsFromSecretProviderClass(clients, &config)
}
- for _, i := range items {
- // find correct annotation and update the resource
- annotations := upgradeFuncs.AnnotationsFunc(i)
- annotationValue, found := annotations[config.Annotation]
- searchAnnotationValue, foundSearchAnn := annotations[options.AutoSearchAnnotation]
- reloaderEnabledValue, foundAuto := annotations[options.ReloaderAutoAnnotation]
- typedAutoAnnotationEnabledValue, foundTypedAuto := annotations[config.TypedAutoAnnotation]
- excludeConfigmapAnnotationValue, foundExcludeConfigmap := annotations[options.ConfigmapExcludeReloaderAnnotation]
- excludeSecretAnnotationValue, foundExcludeSecret := annotations[options.SecretExcludeReloaderAnnotation]
- excludeSecretProviderClassProviderAnnotationValue, foundExcludeSecretProviderClass := annotations[options.SecretProviderClassExcludeReloaderAnnotation]
+ annotations := upgradeFuncs.AnnotationsFunc(resource)
+ podAnnotations := upgradeFuncs.PodAnnotationsFunc(resource)
+ result := common.ShouldReload(config, upgradeFuncs.ResourceType, annotations, podAnnotations, common.GetCommandLineOptions())
- if !found && !foundAuto && !foundTypedAuto && !foundSearchAnn {
- annotations = upgradeFuncs.PodAnnotationsFunc(i)
- annotationValue = annotations[config.Annotation]
- searchAnnotationValue = annotations[options.AutoSearchAnnotation]
- reloaderEnabledValue = annotations[options.ReloaderAutoAnnotation]
- typedAutoAnnotationEnabledValue = annotations[config.TypedAutoAnnotation]
- }
+ if !result.ShouldReload {
+ logrus.Debugf("No changes detected in '%s' of type '%s' in namespace '%s'", config.ResourceName, config.Type, config.Namespace)
+ return nil
+ }
- isResourceExcluded := false
+ strategyResult := strategy(upgradeFuncs, resource, config, result.AutoReload)
- switch config.Type {
- case constants.ConfigmapEnvVarPostfix:
- if foundExcludeConfigmap {
- isResourceExcluded = checkIfResourceIsExcluded(config.ResourceName, excludeConfigmapAnnotationValue)
- }
- case constants.SecretEnvVarPostfix:
- if foundExcludeSecret {
- isResourceExcluded = checkIfResourceIsExcluded(config.ResourceName, excludeSecretAnnotationValue)
- }
- case constants.SecretProviderClassEnvVarPostfix:
- if foundExcludeSecretProviderClass {
- isResourceExcluded = checkIfResourceIsExcluded(config.ResourceName, excludeSecretProviderClassProviderAnnotationValue)
- }
- }
+ if strategyResult.Result != constants.Updated {
+ return nil
+ }
- if isResourceExcluded {
- continue
- }
+ // find correct annotation and update the resource
+ pauseInterval, foundPauseInterval := annotations[options.PauseDeploymentAnnotation]
- result := constants.NotUpdated
- reloaderEnabled, _ := strconv.ParseBool(reloaderEnabledValue)
- typedAutoAnnotationEnabled, _ := strconv.ParseBool(typedAutoAnnotationEnabledValue)
- if reloaderEnabled || typedAutoAnnotationEnabled || reloaderEnabledValue == "" && typedAutoAnnotationEnabledValue == "" && options.AutoReloadAll {
- result = strategy(upgradeFuncs, i, config, true)
- }
-
- if result != constants.Updated && annotationValue != "" {
- values := strings.Split(annotationValue, ",")
- for _, value := range values {
- value = strings.TrimSpace(value)
- re := regexp.MustCompile("^" + value + "$")
- if re.Match([]byte(config.ResourceName)) {
- result = strategy(upgradeFuncs, i, config, false)
- if result == constants.Updated {
- break
- }
- }
- }
- }
-
- if result != constants.Updated && searchAnnotationValue == "true" {
- matchAnnotationValue := config.ResourceAnnotations[options.SearchMatchAnnotation]
- if matchAnnotationValue == "true" {
- result = strategy(upgradeFuncs, i, config, true)
- }
- }
-
- if result == constants.Updated {
- accessor, err := meta.Accessor(i)
+ if foundPauseInterval {
+ deployment, ok := resource.(*app.Deployment)
+ if !ok {
+ logrus.Warnf("Annotation '%s' only applicable for deployments", options.PauseDeploymentAnnotation)
+ } else {
+ _, err = PauseDeployment(deployment, clients, config.Namespace, pauseInterval)
if err != nil {
+ logrus.Errorf("Failed to pause deployment '%s' in namespace '%s': %v", resourceName, config.Namespace, err)
return err
}
- resourceName := accessor.GetName()
- err = upgradeFuncs.UpdateFunc(clients, config.Namespace, i)
- if err != nil {
- message := fmt.Sprintf("Update for '%s' of type '%s' in namespace '%s' failed with error %v", resourceName, upgradeFuncs.ResourceType, config.Namespace, err)
- logrus.Errorf("Update for '%s' of type '%s' in namespace '%s' failed with error %v", resourceName, upgradeFuncs.ResourceType, config.Namespace, err)
-
- collectors.Reloaded.With(prometheus.Labels{"success": "false"}).Inc()
- collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "false", "namespace": config.Namespace}).Inc()
- if recorder != nil {
- recorder.Event(i, v1.EventTypeWarning, "ReloadFail", message)
- }
- return err
- } else {
- message := fmt.Sprintf("Changes detected in '%s' of type '%s' in namespace '%s'", config.ResourceName, config.Type, config.Namespace)
- message += fmt.Sprintf(", Updated '%s' of type '%s' in namespace '%s'", resourceName, upgradeFuncs.ResourceType, config.Namespace)
-
- logrus.Infof("Changes detected in '%s' of type '%s' in namespace '%s'; updated '%s' of type '%s' in namespace '%s'", config.ResourceName, config.Type, config.Namespace, resourceName, upgradeFuncs.ResourceType, config.Namespace)
-
- collectors.Reloaded.With(prometheus.Labels{"success": "true"}).Inc()
- collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": config.Namespace}).Inc()
- alert_on_reload, ok := os.LookupEnv("ALERT_ON_RELOAD")
- if recorder != nil {
- recorder.Event(i, v1.EventTypeNormal, "Reloaded", message)
- }
- if ok && alert_on_reload == "true" {
- msg := fmt.Sprintf(
- "Reloader detected changes in *%s* of type *%s* in namespace *%s*. Hence reloaded *%s* of type *%s* in namespace *%s*",
- config.ResourceName, config.Type, config.Namespace, resourceName, upgradeFuncs.ResourceType, config.Namespace)
- alert.SendWebhookAlert(msg)
- }
- }
}
}
+
+ if upgradeFuncs.SupportsPatch && strategyResult.Patch != nil {
+ err = upgradeFuncs.PatchFunc(clients, config.Namespace, resource, strategyResult.Patch.Type, strategyResult.Patch.Bytes)
+ } else {
+ err = upgradeFuncs.UpdateFunc(clients, config.Namespace, resource)
+ }
+
+ if err != nil {
+ message := fmt.Sprintf("Update for '%s' of type '%s' in namespace '%s' failed with error %v", resourceName, upgradeFuncs.ResourceType, config.Namespace, err)
+ logrus.Errorf("Update for '%s' of type '%s' in namespace '%s' failed with error %v", resourceName, upgradeFuncs.ResourceType, config.Namespace, err)
+
+ collectors.Reloaded.With(prometheus.Labels{"success": "false"}).Inc()
+ collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "false", "namespace": config.Namespace}).Inc()
+ if recorder != nil {
+ recorder.Event(resource, v1.EventTypeWarning, "ReloadFail", message)
+ }
+ return err
+ } else {
+ message := fmt.Sprintf("Changes detected in '%s' of type '%s' in namespace '%s'", config.ResourceName, config.Type, config.Namespace)
+ message += fmt.Sprintf(", Updated '%s' of type '%s' in namespace '%s'", resourceName, upgradeFuncs.ResourceType, config.Namespace)
+
+ logrus.Infof("Changes detected in '%s' of type '%s' in namespace '%s'; updated '%s' of type '%s' in namespace '%s'", config.ResourceName, config.Type, config.Namespace, resourceName, upgradeFuncs.ResourceType, config.Namespace)
+
+ collectors.Reloaded.With(prometheus.Labels{"success": "true"}).Inc()
+ collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": config.Namespace}).Inc()
+ alert_on_reload, ok := os.LookupEnv("ALERT_ON_RELOAD")
+ if recorder != nil {
+ recorder.Event(resource, v1.EventTypeNormal, "Reloaded", message)
+ }
+ if ok && alert_on_reload == "true" {
+ msg := fmt.Sprintf(
+ "Reloader detected changes in *%s* of type *%s* in namespace *%s*. Hence reloaded *%s* of type *%s* in namespace *%s*",
+ config.ResourceName, config.Type, config.Namespace, resourceName, upgradeFuncs.ResourceType, config.Namespace)
+ alert.SendWebhookAlert(msg)
+ }
+ }
+
return nil
}
-func checkIfResourceIsExcluded(resourceName, excludedResources string) bool {
- if excludedResources == "" {
- return false
- }
-
- excludedResourcesList := strings.Split(excludedResources, ",")
- for _, excludedResource := range excludedResourcesList {
- if strings.TrimSpace(excludedResource) == resourceName {
- return true
- }
- }
-
- return false
-}
-
func getVolumeMountName(volumes []v1.Volume, mountType string, volumeName string) string {
for i := range volumes {
- if mountType == constants.ConfigmapEnvVarPostfix {
+ switch mountType {
+ case constants.ConfigmapEnvVarPostfix:
if volumes[i].ConfigMap != nil && volumes[i].ConfigMap.Name == volumeName {
return volumes[i].Name
}
@@ -354,7 +375,7 @@ func getVolumeMountName(volumes []v1.Volume, mountType string, volumeName string
}
}
}
- } else if mountType == constants.SecretEnvVarPostfix {
+ case constants.SecretEnvVarPostfix:
if volumes[i].Secret != nil && volumes[i].Secret.SecretName == volumeName {
return volumes[i].Name
}
@@ -366,7 +387,7 @@ func getVolumeMountName(volumes []v1.Volume, mountType string, volumeName string
}
}
}
- } else if mountType == constants.SecretProviderClassEnvVarPostfix {
+ case constants.SecretProviderClassEnvVarPostfix:
if volumes[i].CSI != nil && volumes[i].CSI.VolumeAttributes["secretProviderClass"] == volumeName {
return volumes[i].Name
}
@@ -395,9 +416,9 @@ func getContainerWithEnvReference(containers []v1.Container, resourceName string
for j := range envs {
envVarSource := envs[j].ValueFrom
if envVarSource != nil {
- if resourceType == constants.SecretEnvVarPostfix && envVarSource.SecretKeyRef != nil && envVarSource.SecretKeyRef.LocalObjectReference.Name == resourceName {
+ if resourceType == constants.SecretEnvVarPostfix && envVarSource.SecretKeyRef != nil && envVarSource.SecretKeyRef.Name == resourceName {
return &containers[i]
- } else if resourceType == constants.ConfigmapEnvVarPostfix && envVarSource.ConfigMapKeyRef != nil && envVarSource.ConfigMapKeyRef.LocalObjectReference.Name == resourceName {
+ } else if resourceType == constants.ConfigmapEnvVarPostfix && envVarSource.ConfigMapKeyRef != nil && envVarSource.ConfigMapKeyRef.Name == resourceName {
return &containers[i]
}
}
@@ -405,9 +426,9 @@ func getContainerWithEnvReference(containers []v1.Container, resourceName string
envsFrom := containers[i].EnvFrom
for j := range envsFrom {
- if resourceType == constants.SecretEnvVarPostfix && envsFrom[j].SecretRef != nil && envsFrom[j].SecretRef.LocalObjectReference.Name == resourceName {
+ if resourceType == constants.SecretEnvVarPostfix && envsFrom[j].SecretRef != nil && envsFrom[j].SecretRef.Name == resourceName {
return &containers[i]
- } else if resourceType == constants.ConfigmapEnvVarPostfix && envsFrom[j].ConfigMapRef != nil && envsFrom[j].ConfigMapRef.LocalObjectReference.Name == resourceName {
+ } else if resourceType == constants.ConfigmapEnvVarPostfix && envsFrom[j].ConfigMapRef != nil && envsFrom[j].ConfigMapRef.Name == resourceName {
return &containers[i]
}
}
@@ -415,7 +436,7 @@ func getContainerWithEnvReference(containers []v1.Container, resourceName string
return nil
}
-func getContainerUsingResource(upgradeFuncs callbacks.RollingUpgradeFuncs, item runtime.Object, config util.Config, autoReload bool) *v1.Container {
+func getContainerUsingResource(upgradeFuncs callbacks.RollingUpgradeFuncs, item runtime.Object, config common.Config, autoReload bool) *v1.Container {
volumes := upgradeFuncs.VolumesFunc(item)
containers := upgradeFuncs.ContainersFunc(item)
initContainers := upgradeFuncs.InitContainersFunc(item)
@@ -429,7 +450,11 @@ func getContainerUsingResource(upgradeFuncs callbacks.RollingUpgradeFuncs, item
container = getContainerWithVolumeMount(initContainers, volumeMountName)
if container != nil {
// if configmap/secret is being used in init container then return the first Pod container to save reloader env
- return &containers[0]
+ if len(containers) > 0 {
+ return &containers[0]
+ }
+ // No containers available, return nil to avoid crash
+ return nil
}
} else if container != nil {
return container
@@ -442,58 +467,80 @@ func getContainerUsingResource(upgradeFuncs callbacks.RollingUpgradeFuncs, item
container = getContainerWithEnvReference(initContainers, config.ResourceName, config.Type)
if container != nil {
// if configmap/secret is being used in init container then return the first Pod container to save reloader env
- return &containers[0]
+ if len(containers) > 0 {
+ return &containers[0]
+ }
+ // No containers available, return nil to avoid crash
+ return nil
}
}
// Get the first container if the annotation is related to specified configmap or secret i.e. configmap.reloader.stakater.com/reload
if container == nil && !autoReload {
- return &containers[0]
+ if len(containers) > 0 {
+ return &containers[0]
+ }
+ // No containers available, return nil to avoid crash
+ return nil
}
return container
}
-type invokeStrategy func(upgradeFuncs callbacks.RollingUpgradeFuncs, item runtime.Object, config util.Config, autoReload bool) constants.Result
+type Patch struct {
+ Type patchtypes.PatchType
+ Bytes []byte
+}
-func invokeReloadStrategy(upgradeFuncs callbacks.RollingUpgradeFuncs, item runtime.Object, config util.Config, autoReload bool) constants.Result {
+type InvokeStrategyResult struct {
+ Result constants.Result
+ Patch *Patch
+}
+
+type invokeStrategy func(upgradeFuncs callbacks.RollingUpgradeFuncs, item runtime.Object, config common.Config, autoReload bool) InvokeStrategyResult
+
+func invokeReloadStrategy(upgradeFuncs callbacks.RollingUpgradeFuncs, item runtime.Object, config common.Config, autoReload bool) InvokeStrategyResult {
if options.ReloadStrategy == constants.AnnotationsReloadStrategy {
return updatePodAnnotations(upgradeFuncs, item, config, autoReload)
}
-
return updateContainerEnvVars(upgradeFuncs, item, config, autoReload)
}
-func updatePodAnnotations(upgradeFuncs callbacks.RollingUpgradeFuncs, item runtime.Object, config util.Config, autoReload bool) constants.Result {
+func updatePodAnnotations(upgradeFuncs callbacks.RollingUpgradeFuncs, item runtime.Object, config common.Config, autoReload bool) InvokeStrategyResult {
container := getContainerUsingResource(upgradeFuncs, item, config, autoReload)
if container == nil {
- return constants.NoContainerFound
+ return InvokeStrategyResult{constants.NoContainerFound, nil}
}
// Generate reloaded annotations. Attaching this to the item's annotation will trigger a rollout
// Note: the data on this struct is purely informational and is not used for future updates
- reloadSource := util.NewReloadSourceFromConfig(config, []string{container.Name})
- annotations, err := createReloadedAnnotations(&reloadSource)
+ reloadSource := common.NewReloadSourceFromConfig(config, []string{container.Name})
+ annotations, patch, err := createReloadedAnnotations(&reloadSource, upgradeFuncs)
if err != nil {
logrus.Errorf("Failed to create reloaded annotations for %s! error = %v", config.ResourceName, err)
- return constants.NotUpdated
+ return InvokeStrategyResult{constants.NotUpdated, nil}
}
// Copy the all annotations to the item's annotations
pa := upgradeFuncs.PodAnnotationsFunc(item)
if pa == nil {
- return constants.NotUpdated
+ return InvokeStrategyResult{constants.NotUpdated, nil}
}
if config.Type == constants.SecretProviderClassEnvVarPostfix && secretProviderClassAnnotationReloaded(pa, config) {
- return constants.NotUpdated
+ return InvokeStrategyResult{constants.NotUpdated, nil}
}
for k, v := range annotations {
pa[k] = v
}
- return constants.Updated
+ return InvokeStrategyResult{constants.Updated, &Patch{Type: patchtypes.StrategicMergePatchType, Bytes: patch}}
+}
+
+func secretProviderClassAnnotationReloaded(oldAnnotations map[string]string, newConfig common.Config) bool {
+ annotaion := oldAnnotations[getReloaderAnnotationKey()]
+ return strings.Contains(annotaion, newConfig.ResourceName) && strings.Contains(annotaion, newConfig.SHAValue)
}
func getReloaderAnnotationKey() string {
@@ -503,14 +550,9 @@ func getReloaderAnnotationKey() string {
)
}
-func secretProviderClassAnnotationReloaded(oldAnnotations map[string]string, newConfig util.Config) bool {
- annotaion := oldAnnotations[getReloaderAnnotationKey()]
- return strings.Contains(annotaion, newConfig.ResourceName) && strings.Contains(annotaion, newConfig.SHAValue)
-}
-
-func createReloadedAnnotations(target *util.ReloadSource) (map[string]string, error) {
+func createReloadedAnnotations(target *common.ReloadSource, upgradeFuncs callbacks.RollingUpgradeFuncs) (map[string]string, []byte, error) {
if target == nil {
- return nil, errors.New("target is required")
+ return nil, nil, errors.New("target is required")
}
// Create a single "last-invokeReloadStrategy-from" annotation that stores metadata about the
@@ -522,58 +564,72 @@ func createReloadedAnnotations(target *util.ReloadSource) (map[string]string, er
lastReloadedResource, err := json.Marshal(target)
if err != nil {
- return nil, err
+ return nil, nil, err
}
annotations[lastReloadedResourceName] = string(lastReloadedResource)
- return annotations, nil
+
+ var patch []byte
+ if upgradeFuncs.SupportsPatch {
+ escapedValue, err := jsonEscape(annotations[lastReloadedResourceName])
+ if err != nil {
+ return nil, nil, err
+ }
+ patch = fmt.Appendf(nil, upgradeFuncs.PatchTemplatesFunc().AnnotationTemplate, lastReloadedResourceName, escapedValue)
+ }
+
+ return annotations, patch, nil
}
func getEnvVarName(resourceName string, typeName string) string {
return constants.EnvVarPrefix + util.ConvertToEnvVarName(resourceName) + "_" + typeName
}
-func updateContainerEnvVars(upgradeFuncs callbacks.RollingUpgradeFuncs, item runtime.Object, config util.Config, autoReload bool) constants.Result {
- var result constants.Result
+func updateContainerEnvVars(upgradeFuncs callbacks.RollingUpgradeFuncs, item runtime.Object, config common.Config, autoReload bool) InvokeStrategyResult {
envVar := getEnvVarName(config.ResourceName, config.Type)
container := getContainerUsingResource(upgradeFuncs, item, config, autoReload)
if container == nil {
- return constants.NoContainerFound
+ return InvokeStrategyResult{constants.NoContainerFound, nil}
}
if config.Type == constants.SecretProviderClassEnvVarPostfix && secretProviderClassEnvReloaded(upgradeFuncs.ContainersFunc(item), envVar, config.SHAValue) {
- return constants.NotUpdated
+ return InvokeStrategyResult{constants.NotUpdated, nil}
}
//update if env var exists
- result = updateEnvVar(upgradeFuncs.ContainersFunc(item), envVar, config.SHAValue)
+ updateResult := updateEnvVar(container, envVar, config.SHAValue)
// if no existing env var exists lets create one
- if result == constants.NoEnvVarFound {
+ if updateResult == constants.NoEnvVarFound {
e := v1.EnvVar{
Name: envVar,
Value: config.SHAValue,
}
container.Env = append(container.Env, e)
- result = constants.Updated
+ updateResult = constants.Updated
}
- return result
+
+ var patch []byte
+ if upgradeFuncs.SupportsPatch {
+ patch = fmt.Appendf(nil, upgradeFuncs.PatchTemplatesFunc().EnvVarTemplate, container.Name, envVar, config.SHAValue)
+ }
+
+ return InvokeStrategyResult{updateResult, &Patch{Type: patchtypes.StrategicMergePatchType, Bytes: patch}}
}
-func updateEnvVar(containers []v1.Container, envVar string, shaData string) constants.Result {
- for i := range containers {
- envs := containers[i].Env
- for j := range envs {
- if envs[j].Name == envVar {
- if envs[j].Value != shaData {
- envs[j].Value = shaData
- return constants.Updated
- }
- return constants.NotUpdated
+func updateEnvVar(container *v1.Container, envVar string, shaData string) constants.Result {
+ envs := container.Env
+ for j := range envs {
+ if envs[j].Name == envVar {
+ if envs[j].Value != shaData {
+ envs[j].Value = shaData
+ return constants.Updated
}
+ return constants.NotUpdated
}
}
+
return constants.NoEnvVarFound
}
@@ -589,7 +645,7 @@ func secretProviderClassEnvReloaded(containers []v1.Container, envVar string, sh
return false
}
-func populateAnnotationsFromSecretProviderClass(clients kube.Clients, config *util.Config) {
+func populateAnnotationsFromSecretProviderClass(clients kube.Clients, config *common.Config) {
obj, err := clients.CSIClient.SecretsstoreV1().SecretProviderClasses(config.Namespace).Get(context.TODO(), config.ResourceName, metav1.GetOptions{})
annotations := make(map[string]string)
if err != nil {
@@ -599,3 +655,12 @@ func populateAnnotationsFromSecretProviderClass(clients kube.Clients, config *ut
}
config.ResourceAnnotations = annotations
}
+
+func jsonEscape(toEscape string) (string, error) {
+ bytes, err := json.Marshal(toEscape)
+ if err != nil {
+ return "", err
+ }
+ escaped := string(bytes)
+ return escaped[1 : len(escaped)-1], nil
+}
diff --git a/internal/pkg/handler/upgrade_test.go b/internal/pkg/handler/upgrade_test.go
index a0fb657..5bf490f 100644
--- a/internal/pkg/handler/upgrade_test.go
+++ b/internal/pkg/handler/upgrade_test.go
@@ -7,6 +7,7 @@ import (
"testing"
"time"
+ argorolloutv1alpha1 "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1"
"github.com/prometheus/client_golang/prometheus"
promtestutil "github.com/prometheus/client_golang/prometheus/testutil"
"github.com/sirupsen/logrus"
@@ -16,10 +17,15 @@ import (
"github.com/stakater/Reloader/internal/pkg/options"
"github.com/stakater/Reloader/internal/pkg/testutil"
"github.com/stakater/Reloader/internal/pkg/util"
+ "github.com/stakater/Reloader/pkg/common"
"github.com/stakater/Reloader/pkg/kube"
+ "github.com/stretchr/testify/assert"
+ v1 "k8s.io/api/core/v1"
+ "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/meta"
- v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
+ patchtypes "k8s.io/apimachinery/pkg/types"
testclient "k8s.io/client-go/kubernetes/fake"
csitestclient "sigs.k8s.io/secrets-store-csi-driver/pkg/client/clientset/versioned/fake"
)
@@ -30,61 +36,72 @@ var (
CSIClient: csitestclient.NewSimpleClientset(),
}
- arsNamespace = "test-handler-" + testutil.RandSeq(5)
- arsConfigmapName = "testconfigmap-handler-" + testutil.RandSeq(5)
- arsSecretName = "testsecret-handler-" + testutil.RandSeq(5)
+ arsNamespace = "test-handler-" + testutil.RandSeq(5)
+ arsConfigmapName = "testconfigmap-handler-" + testutil.RandSeq(5)
+ arsSecretName = "testsecret-handler-" + testutil.RandSeq(5)
+ arsProjectedConfigMapName = "testprojectedconfigmap-handler-" + testutil.RandSeq(5)
+ arsProjectedSecretName = "testprojectedsecret-handler-" + testutil.RandSeq(5)
+ arsConfigmapWithInitContainer = "testconfigmapInitContainerhandler-" + testutil.RandSeq(5)
+ arsSecretWithInitContainer = "testsecretWithInitContainer-handler-" + testutil.RandSeq(5)
+ arsProjectedConfigMapWithInitContainer = "testProjectedConfigMapWithInitContainer-handler" + testutil.RandSeq(5)
+ arsProjectedSecretWithInitContainer = "testProjectedSecretWithInitContainer-handler" + testutil.RandSeq(5)
+ arsConfigmapWithInitEnv = "configmapWithInitEnv-" + testutil.RandSeq(5)
+ arsSecretWithInitEnv = "secretWithInitEnv-handler-" + testutil.RandSeq(5)
+ arsConfigmapWithEnvName = "testconfigmapWithEnv-handler-" + testutil.RandSeq(5)
+ arsConfigmapWithEnvFromName = "testconfigmapWithEnvFrom-handler-" + testutil.RandSeq(5)
+ arsSecretWithEnvName = "testsecretWithEnv-handler-" + testutil.RandSeq(5)
+ arsSecretWithEnvFromName = "testsecretWithEnvFrom-handler-" + testutil.RandSeq(5)
+ arsConfigmapWithPodAnnotations = "testconfigmapPodAnnotations-handler-" + testutil.RandSeq(5)
+ arsConfigmapWithBothAnnotations = "testconfigmapBothAnnotations-handler-" + testutil.RandSeq(5)
+ arsConfigmapAnnotated = "testconfigmapAnnotated-handler-" + testutil.RandSeq(5)
+ arsConfigMapWithNonAnnotatedDeployment = "testconfigmapNonAnnotatedDeployment-handler-" + testutil.RandSeq(5)
+ arsSecretWithSecretAutoAnnotation = "testsecretwithsecretautoannotationdeployment-handler-" + testutil.RandSeq(5)
+ arsConfigmapWithConfigMapAutoAnnotation = "testconfigmapwithconfigmapautoannotationdeployment-handler-" + testutil.RandSeq(5)
+ arsSecretWithExcludeSecretAnnotation = "testsecretwithsecretexcludeannotationdeployment-handler-" + testutil.RandSeq(5)
+ arsConfigmapWithExcludeConfigMapAnnotation = "testconfigmapwithconfigmapexcludeannotationdeployment-handler-" + testutil.RandSeq(5)
+ arsConfigmapWithIgnoreAnnotation = "testconfigmapWithIgnoreAnnotation-handler-" + testutil.RandSeq(5)
+ arsSecretWithIgnoreAnnotation = "testsecretWithIgnoreAnnotation-handler-" + testutil.RandSeq(5)
+ arsConfigmapWithPausedDeployment = "testconfigmapWithPausedDeployment-handler-" + testutil.RandSeq(5)
+
+ // Secret provider class
arsSecretProviderClassName = "testsecretproviderclass-handler-" + testutil.RandSeq(5)
- arsProjectedConfigMapName = "testprojectedconfigmap-handler-" + testutil.RandSeq(5)
- arsProjectedSecretName = "testprojectedsecret-handler-" + testutil.RandSeq(5)
- arsConfigmapWithInitContainer = "testconfigmapInitContainerhandler-" + testutil.RandSeq(5)
- arsSecretWithInitContainer = "testsecretWithInitContainer-handler-" + testutil.RandSeq(5)
arsSecretProviderClassWithInitContainer = "testsecretproviderclassWithInitContainer-handler-" + testutil.RandSeq(5)
- arsProjectedConfigMapWithInitContainer = "testProjectedConfigMapWithInitContainer-handler" + testutil.RandSeq(5)
- arsProjectedSecretWithInitContainer = "testProjectedSecretWithInitContainer-handler" + testutil.RandSeq(5)
- arsConfigmapWithInitEnv = "configmapWithInitEnv-" + testutil.RandSeq(5)
- arsSecretWithInitEnv = "secretWithInitEnv-handler-" + testutil.RandSeq(5)
- arsConfigmapWithEnvName = "testconfigmapWithEnv-handler-" + testutil.RandSeq(5)
- arsConfigmapWithEnvFromName = "testconfigmapWithEnvFrom-handler-" + testutil.RandSeq(5)
- arsSecretWithEnvName = "testsecretWithEnv-handler-" + testutil.RandSeq(5)
- arsSecretWithEnvFromName = "testsecretWithEnvFrom-handler-" + testutil.RandSeq(5)
- arsConfigmapWithPodAnnotations = "testconfigmapPodAnnotations-handler-" + testutil.RandSeq(5)
- arsConfigmapWithBothAnnotations = "testconfigmapBothAnnotations-handler-" + testutil.RandSeq(5)
- arsConfigmapAnnotated = "testconfigmapAnnotated-handler-" + testutil.RandSeq(5)
- arsConfigMapWithNonAnnotatedDeployment = "testconfigmapNonAnnotatedDeployment-handler-" + testutil.RandSeq(5)
- arsSecretWithSecretAutoAnnotation = "testsecretwithsecretautoannotationdeployment-handler-" + testutil.RandSeq(5)
- arsConfigmapWithConfigMapAutoAnnotation = "testconfigmapwithconfigmapautoannotationdeployment-handler-" + testutil.RandSeq(5)
arsSecretProviderClassWithSPCAutoAnnotation = "testsecretproviderclasswithspcautoannotationdeployment-handler-" + testutil.RandSeq(5)
- arsSecretWithExcludeSecretAnnotation = "testsecretwithsecretexcludeannotationdeployment-handler-" + testutil.RandSeq(5)
- arsConfigmapWithExcludeConfigMapAnnotation = "testconfigmapwithconfigmapexcludeannotationdeployment-handler-" + testutil.RandSeq(5)
arsSecretProviderClassWithExcludeSPCAnnotation = "testsecretproviderclasswithspcexcludeannotationdeployment-handler-" + testutil.RandSeq(5)
arsSecretProviderClassReloadedWithSameConfig = "testsecretproviderclassreloadedwithsameconfig-handler-" + testutil.RandSeq(5)
arsSecretProviderClassReloadedWithDifferentConfig = "testsecretproviderclassreloadedwithdifferentconfig-handler-" + testutil.RandSeq(5)
- ersNamespace = "test-handler-" + testutil.RandSeq(5)
- ersConfigmapName = "testconfigmap-handler-" + testutil.RandSeq(5)
- ersSecretName = "testsecret-handler-" + testutil.RandSeq(5)
- ersSecretProviderClassName = "testsecretproviderclass-handler-" + testutil.RandSeq(5)
- ersProjectedConfigMapName = "testprojectedconfigmap-handler-" + testutil.RandSeq(5)
- ersProjectedSecretName = "testprojectedsecret-handler-" + testutil.RandSeq(5)
- ersConfigmapWithInitContainer = "testconfigmapInitContainerhandler-" + testutil.RandSeq(5)
- ersSecretWithInitContainer = "testsecretWithInitContainer-handler-" + testutil.RandSeq(5)
- ersSecretProviderClassWithInitContainer = "testsecretproviderclassWithInitContainer-handler-" + testutil.RandSeq(5)
- ersProjectedConfigMapWithInitContainer = "testProjectedConfigMapWithInitContainer-handler" + testutil.RandSeq(5)
- ersProjectedSecretWithInitContainer = "testProjectedSecretWithInitContainer-handler" + testutil.RandSeq(5)
- ersConfigmapWithInitEnv = "configmapWithInitEnv-" + testutil.RandSeq(5)
- ersSecretWithInitEnv = "secretWithInitEnv-handler-" + testutil.RandSeq(5)
- ersConfigmapWithEnvName = "testconfigmapWithEnv-handler-" + testutil.RandSeq(5)
- ersConfigmapWithEnvFromName = "testconfigmapWithEnvFrom-handler-" + testutil.RandSeq(5)
- ersSecretWithEnvName = "testsecretWithEnv-handler-" + testutil.RandSeq(5)
- ersSecretWithEnvFromName = "testsecretWithEnvFrom-handler-" + testutil.RandSeq(5)
- ersConfigmapWithPodAnnotations = "testconfigmapPodAnnotations-handler-" + testutil.RandSeq(5)
- ersConfigmapWithBothAnnotations = "testconfigmapBothAnnotations-handler-" + testutil.RandSeq(5)
- ersConfigmapAnnotated = "testconfigmapAnnotated-handler-" + testutil.RandSeq(5)
- ersSecretWithSecretAutoAnnotation = "testsecretwithsecretautoannotationdeployment-handler-" + testutil.RandSeq(5)
- ersConfigmapWithConfigMapAutoAnnotation = "testconfigmapwithconfigmapautoannotationdeployment-handler-" + testutil.RandSeq(5)
+ ersNamespace = "test-handler-" + testutil.RandSeq(5)
+ ersConfigmapName = "testconfigmap-handler-" + testutil.RandSeq(5)
+ ersSecretName = "testsecret-handler-" + testutil.RandSeq(5)
+ ersProjectedConfigMapName = "testprojectedconfigmap-handler-" + testutil.RandSeq(5)
+ ersProjectedSecretName = "testprojectedsecret-handler-" + testutil.RandSeq(5)
+ ersConfigmapWithInitContainer = "testconfigmapInitContainerhandler-" + testutil.RandSeq(5)
+ ersSecretWithInitContainer = "testsecretWithInitContainer-handler-" + testutil.RandSeq(5)
+ ersProjectedConfigMapWithInitContainer = "testProjectedConfigMapWithInitContainer-handler" + testutil.RandSeq(5)
+ ersProjectedSecretWithInitContainer = "testProjectedSecretWithInitContainer-handler" + testutil.RandSeq(5)
+ ersConfigmapWithInitEnv = "configmapWithInitEnv-" + testutil.RandSeq(5)
+ ersSecretWithInitEnv = "secretWithInitEnv-handler-" + testutil.RandSeq(5)
+ ersConfigmapWithEnvName = "testconfigmapWithEnv-handler-" + testutil.RandSeq(5)
+ ersConfigmapWithEnvFromName = "testconfigmapWithEnvFrom-handler-" + testutil.RandSeq(5)
+ ersSecretWithEnvName = "testsecretWithEnv-handler-" + testutil.RandSeq(5)
+ ersSecretWithEnvFromName = "testsecretWithEnvFrom-handler-" + testutil.RandSeq(5)
+ ersConfigmapWithPodAnnotations = "testconfigmapPodAnnotations-handler-" + testutil.RandSeq(5)
+ ersConfigmapWithBothAnnotations = "testconfigmapBothAnnotations-handler-" + testutil.RandSeq(5)
+ ersConfigmapAnnotated = "testconfigmapAnnotated-handler-" + testutil.RandSeq(5)
+ ersSecretWithSecretAutoAnnotation = "testsecretwithsecretautoannotationdeployment-handler-" + testutil.RandSeq(5)
+ ersConfigmapWithConfigMapAutoAnnotation = "testconfigmapwithconfigmapautoannotationdeployment-handler-" + testutil.RandSeq(5)
+ ersSecretWithSecretExcludeAnnotation = "testsecretwithsecretexcludeannotationdeployment-handler-" + testutil.RandSeq(5)
+ ersConfigmapWithConfigMapExcludeAnnotation = "testconfigmapwithconfigmapexcludeannotationdeployment-handler-" + testutil.RandSeq(5)
+ ersConfigmapWithIgnoreAnnotation = "testconfigmapWithIgnoreAnnotation-handler-" + testutil.RandSeq(5)
+ ersSecretWithIgnoreAnnotation = "testsecretWithIgnoreAnnotation-handler-" + testutil.RandSeq(5)
+ ersConfigmapWithPausedDeployment = "testconfigmapWithPausedDeployment-handler-" + testutil.RandSeq(5)
+
+ // SecretProviderClass
+ ersSecretProviderClassName = "testsecretproviderclass-handler-" + testutil.RandSeq(5)
+ ersSecretProviderClassWithInitContainer = "testsecretproviderclassWithInitContainer-handler-" + testutil.RandSeq(5)
+
ersSecretProviderClassWithSPCAutoAnnotation = "testsecretproviderclasswithspcautoannotationdeployment-handler-" + testutil.RandSeq(5)
- ersSecretWithSecretExcludeAnnotation = "testsecretwithsecretexcludeannotationdeployment-handler-" + testutil.RandSeq(5)
- ersConfigmapWithConfigMapExcludeAnnotation = "testconfigmapwithconfigmapexcludeannotationdeployment-handler-" + testutil.RandSeq(5)
ersSecretProviderClassWithExcludeSPCAnnotation = "testsecretproviderclasswithspcexcludeannotationdeployment-handler-" + testutil.RandSeq(5)
ersSecretProviderClassReloadedWithSameConfig = "testsecretproviderclassreloadedwithsameconfig-handler-" + testutil.RandSeq(5)
ersSecretProviderClassReloadedWithDifferentConfig = "testsecretproviderclassreloadedwithdifferentconfig-handler-" + testutil.RandSeq(5)
@@ -234,6 +251,12 @@ func setupArs() {
logrus.Errorf("Error in configmap creation: %v", err)
}
+ // Creating configmap for testing pausing deployments
+ _, err = testutil.CreateConfigMap(clients.KubernetesClient, arsNamespace, arsConfigmapWithPausedDeployment, "www.google.com")
+ if err != nil {
+ logrus.Errorf("Error in configmap creation: %v", err)
+ }
+
// Creating secret used with secret auto annotation
_, err = testutil.CreateSecret(clients.KubernetesClient, arsNamespace, arsSecretWithExcludeSecretAnnotation, data)
if err != nil {
@@ -264,6 +287,35 @@ func setupArs() {
logrus.Errorf("Error in configmap creation: %v", err)
}
+ // Creating configmap with ignore annotation
+ _, err = testutil.CreateConfigMap(clients.KubernetesClient, arsNamespace, arsConfigmapWithIgnoreAnnotation, "www.google.com")
+ if err != nil {
+ logrus.Errorf("Error in configmap creation: %v", err)
+ }
+ // Patch with ignore annotation
+ cmClient := clients.KubernetesClient.CoreV1().ConfigMaps(arsNamespace)
+ patch := []byte(`{"metadata":{"annotations":{"reloader.stakater.com/ignore":"true"}}}`)
+ _, _ = cmClient.Patch(context.TODO(), arsConfigmapWithIgnoreAnnotation, patchtypes.MergePatchType, patch, metav1.PatchOptions{})
+
+ // Creating secret with ignore annotation
+ _, err = testutil.CreateSecret(clients.KubernetesClient, arsNamespace, arsSecretWithIgnoreAnnotation, data)
+ if err != nil {
+ logrus.Errorf("Error in secret creation: %v", err)
+ }
+ secretClient := clients.KubernetesClient.CoreV1().Secrets(arsNamespace)
+ _, _ = secretClient.Patch(context.TODO(), arsSecretWithIgnoreAnnotation, patchtypes.MergePatchType, patch, metav1.PatchOptions{})
+
+ // Creating Deployment referencing configmap with ignore annotation
+ _, err = testutil.CreateDeployment(clients.KubernetesClient, arsConfigmapWithIgnoreAnnotation, arsNamespace, true)
+ if err != nil {
+ logrus.Errorf("Error in Deployment with configmap ignore annotation creation: %v", err)
+ }
+ // Creating Deployment referencing secret with ignore annotation
+ _, err = testutil.CreateDeployment(clients.KubernetesClient, arsSecretWithIgnoreAnnotation, arsNamespace, true)
+ if err != nil {
+ logrus.Errorf("Error in Deployment with secret ignore annotation creation: %v", err)
+ }
+
// Creating Deployment with configmap
_, err = testutil.CreateDeployment(clients.KubernetesClient, arsConfigmapName, arsNamespace, true)
if err != nil {
@@ -396,7 +448,7 @@ func setupArs() {
}
// Creating Deployment with secret and exclude secret annotation
- _, err = testutil.CreateDeploymentWithExcludeAnnotation(clients.KubernetesClient, arsSecretWithExcludeSecretAnnotation, arsNamespace, testutil.ConfigmapResourceType)
+ _, err = testutil.CreateDeploymentWithExcludeAnnotation(clients.KubernetesClient, arsSecretWithExcludeSecretAnnotation, arsNamespace, testutil.SecretResourceType)
if err != nil {
logrus.Errorf("Error in Deployment with secret and with secret exclude annotation: %v", err)
}
@@ -521,6 +573,12 @@ func setupArs() {
if err != nil {
logrus.Errorf("Error in Deployment with both annotations: %v", err)
}
+
+ // Creating Deployment with pause annotation
+ _, err = testutil.CreateDeploymentWithAnnotations(clients.KubernetesClient, arsConfigmapWithPausedDeployment, arsNamespace, map[string]string{options.PauseDeploymentAnnotation: "10s"}, false)
+ if err != nil {
+ logrus.Errorf("Error in Deployment with configmap creation: %v", err)
+ }
}
func teardownArs() {
@@ -770,6 +828,12 @@ func teardownArs() {
logrus.Errorf("Error while deleting statefulSet with secret as env var source %v", statefulSetError)
}
+ // Deleting Deployment with pause annotation
+ deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, arsNamespace, arsConfigmapWithPausedDeployment)
+ if deploymentError != nil {
+ logrus.Errorf("Error while deleting deployment with configmap %v", deploymentError)
+ }
+
// Deleting Configmap
err := testutil.DeleteConfigMap(clients.KubernetesClient, arsNamespace, arsConfigmapName)
if err != nil {
@@ -806,7 +870,7 @@ func teardownArs() {
logrus.Errorf("Error while deleting the configmap %v", err)
}
- // Deleting Configmap used projected volume in init containers
+ // Deleting secret used in projected volume in init containers
err = testutil.DeleteSecret(clients.KubernetesClient, arsNamespace, arsProjectedSecretWithInitContainer)
if err != nil {
logrus.Errorf("Error while deleting the secret %v", err)
@@ -919,6 +983,12 @@ func teardownArs() {
logrus.Errorf("Error while deleting the configmap used with configmap auto annotations: %v", err)
}
+ // Deleting configmap for testing pausing deployments
+ err = testutil.DeleteConfigMap(clients.KubernetesClient, arsNamespace, arsConfigmapWithPausedDeployment)
+ if err != nil {
+ logrus.Errorf("Error while deleting the configmap: %v", err)
+ }
+
// Deleting namespace
testutil.DeleteNamespace(arsNamespace, clients.KubernetesClient)
@@ -984,6 +1054,12 @@ func setupErs() {
logrus.Errorf("Error in configmap creation: %v", err)
}
+ // Creating configmap for testing pausing deployments
+ _, err = testutil.CreateConfigMap(clients.KubernetesClient, ersNamespace, ersConfigmapWithPausedDeployment, "www.google.com")
+ if err != nil {
+ logrus.Errorf("Error in configmap creation: %v", err)
+ }
+
// Creating secret
_, err = testutil.CreateSecret(clients.KubernetesClient, ersNamespace, ersSecretWithInitEnv, data)
if err != nil {
@@ -1071,6 +1147,34 @@ func setupErs() {
logrus.Errorf("Error in secretproviderclass creation: %v", err)
}
+ // Creating configmap with ignore annotation
+ _, err = testutil.CreateConfigMap(clients.KubernetesClient, ersNamespace, ersConfigmapWithIgnoreAnnotation, "www.google.com")
+ if err != nil {
+ logrus.Errorf("Error in configmap creation: %v", err)
+ }
+ cmClient := clients.KubernetesClient.CoreV1().ConfigMaps(ersNamespace)
+ patch := []byte(`{"metadata":{"annotations":{"reloader.stakater.com/ignore":"true"}}}`)
+ _, _ = cmClient.Patch(context.TODO(), ersConfigmapWithIgnoreAnnotation, patchtypes.MergePatchType, patch, metav1.PatchOptions{})
+
+ // Creating secret with ignore annotation
+ _, err = testutil.CreateSecret(clients.KubernetesClient, ersNamespace, ersSecretWithIgnoreAnnotation, data)
+ if err != nil {
+ logrus.Errorf("Error in secret creation: %v", err)
+ }
+ secretClient := clients.KubernetesClient.CoreV1().Secrets(ersNamespace)
+ _, _ = secretClient.Patch(context.TODO(), ersSecretWithIgnoreAnnotation, patchtypes.MergePatchType, patch, metav1.PatchOptions{})
+
+ // Creating Deployment referencing configmap with ignore annotation
+ _, err = testutil.CreateDeployment(clients.KubernetesClient, ersConfigmapWithIgnoreAnnotation, ersNamespace, true)
+ if err != nil {
+ logrus.Errorf("Error in Deployment with configmap ignore annotation creation: %v", err)
+ }
+ // Creating Deployment referencing secret with ignore annotation
+ _, err = testutil.CreateDeployment(clients.KubernetesClient, ersSecretWithIgnoreAnnotation, ersNamespace, true)
+ if err != nil {
+ logrus.Errorf("Error in Deployment with secret ignore annotation creation: %v", err)
+ }
+
// Creating Deployment with configmap
_, err = testutil.CreateDeployment(clients.KubernetesClient, ersConfigmapName, ersNamespace, true)
if err != nil {
@@ -1214,6 +1318,12 @@ func setupErs() {
logrus.Errorf("Error in Deployment with secretproviderclass and with secretproviderclass exclude annotation: %v", err)
}
+ // Creating Deployment with pause annotation
+ _, err = testutil.CreateDeploymentWithAnnotations(clients.KubernetesClient, ersConfigmapWithPausedDeployment, ersNamespace, map[string]string{options.PauseDeploymentAnnotation: "10s"}, false)
+ if err != nil {
+ logrus.Errorf("Error in Deployment with configmap creation: %v", err)
+ }
+
// Creating DaemonSet with configmap
_, err = testutil.CreateDaemonSet(clients.KubernetesClient, ersConfigmapName, ersNamespace, true)
if err != nil {
@@ -1570,6 +1680,12 @@ func teardownErs() {
logrus.Errorf("Error while deleting statefulSet with secret as env var source %v", statefulSetError)
}
+ // Deleting Deployment for testing pausing deployments
+ deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, ersNamespace, ersConfigmapWithPausedDeployment)
+ if deploymentError != nil {
+ logrus.Errorf("Error while deleting deployment with configmap %v", deploymentError)
+ }
+
// Deleting Configmap
err := testutil.DeleteConfigMap(clients.KubernetesClient, ersNamespace, ersConfigmapName)
if err != nil {
@@ -1606,7 +1722,7 @@ func teardownErs() {
logrus.Errorf("Error while deleting the configmap %v", err)
}
- // Deleting Configmap used projected volume in init containers
+ // Deleting secret used in projected volume in init containers
err = testutil.DeleteSecret(clients.KubernetesClient, ersNamespace, ersProjectedSecretWithInitContainer)
if err != nil {
logrus.Errorf("Error while deleting the secret %v", err)
@@ -1718,19 +1834,24 @@ func teardownErs() {
if err != nil {
logrus.Errorf("Error while deleting the secretproviderclass used with secretproviderclass to reload with different config: %v", err)
}
+ // Deleting ConfigMap for testing pausing deployments
+ err = testutil.DeleteConfigMap(clients.KubernetesClient, ersNamespace, ersConfigmapWithPausedDeployment)
+ if err != nil {
+ logrus.Errorf("Error while deleting the configmap: %v", err)
+ }
// Deleting namespace
testutil.DeleteNamespace(ersNamespace, clients.KubernetesClient)
}
-func getConfigWithAnnotations(resourceType string, name string, shaData string, annotation string, typedAutoAnnotation string) util.Config {
+func getConfigWithAnnotations(resourceType string, name string, shaData string, annotation string, typedAutoAnnotation string) common.Config {
ns := ersNamespace
if options.ReloadStrategy == constants.AnnotationsReloadStrategy {
ns = arsNamespace
}
- return util.Config{
+ return common.Config{
Namespace: ns,
ResourceName: name,
SHAValue: shaData,
@@ -1747,7 +1868,7 @@ func getCollectors() metrics.Collectors {
var labelSucceeded = prometheus.Labels{"success": "true"}
var labelFailed = prometheus.Labels{"success": "false"}
-func testRollingUpgradeInvokeDeleteStrategyArs(t *testing.T, clients kube.Clients, config util.Config, upgradeFuncs callbacks.RollingUpgradeFuncs, collectors metrics.Collectors, envVarPostfix string) {
+func testRollingUpgradeInvokeDeleteStrategyArs(t *testing.T, clients kube.Clients, config common.Config, upgradeFuncs callbacks.RollingUpgradeFuncs, collectors metrics.Collectors, envVarPostfix string) {
err := PerformAction(clients, config, upgradeFuncs, collectors, nil, invokeDeleteStrategy)
time.Sleep(5 * time.Second)
if err != nil {
@@ -1765,6 +1886,22 @@ func testRollingUpgradeInvokeDeleteStrategyArs(t *testing.T, clients kube.Client
}
}
+func testRollingUpgradeWithPatchAndInvokeDeleteStrategyArs(t *testing.T, clients kube.Clients, config common.Config, upgradeFuncs callbacks.RollingUpgradeFuncs, collectors metrics.Collectors, envVarPostfix string) {
+ err := PerformAction(clients, config, upgradeFuncs, collectors, nil, invokeDeleteStrategy)
+ upgradeFuncs.PatchFunc = func(client kube.Clients, namespace string, resource runtime.Object, patchType patchtypes.PatchType, bytes []byte) error {
+ assert.Equal(t, patchtypes.StrategicMergePatchType, patchType)
+ assert.NotEmpty(t, bytes)
+ return nil
+ }
+ upgradeFuncs.UpdateFunc = func(kube.Clients, string, runtime.Object) error {
+ t.Errorf("Update should not be called")
+ return nil
+ }
+ if err != nil {
+ t.Errorf("Rolling upgrade failed for %s with %s", upgradeFuncs.ResourceType, envVarPostfix)
+ }
+}
+
func TestRollingUpgradeForDeploymentWithConfigmapUsingArs(t *testing.T) {
options.ReloadStrategy = constants.AnnotationsReloadStrategy
envVarPostfix := constants.ConfigmapEnvVarPostfix
@@ -1774,6 +1911,18 @@ func TestRollingUpgradeForDeploymentWithConfigmapUsingArs(t *testing.T) {
deploymentFuncs := GetDeploymentRollingUpgradeFuncs()
collectors := getCollectors()
+ itemCalled := 0
+ itemsCalled := 0
+
+ deploymentFuncs.ItemFunc = func(client kube.Clients, namespace string, name string) (runtime.Object, error) {
+ itemCalled++
+ return callbacks.GetDeploymentItem(client, namespace, name)
+ }
+ deploymentFuncs.ItemsFunc = func(client kube.Clients, namespace string) []runtime.Object {
+ itemsCalled++
+ return callbacks.GetDeploymentItems(client, namespace)
+ }
+
err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy)
time.Sleep(5 * time.Second)
if err != nil {
@@ -1793,9 +1942,68 @@ func TestRollingUpgradeForDeploymentWithConfigmapUsingArs(t *testing.T) {
if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": arsNamespace})) != 1 {
t.Errorf("Counter by namespace was not increased")
}
+
+ assert.Equal(t, 0, itemCalled, "ItemFunc should not be called")
+ assert.Equal(t, 2, itemsCalled, "ItemsFunc should be called twice")
+
testRollingUpgradeInvokeDeleteStrategyArs(t, clients, config, deploymentFuncs, collectors, envVarPostfix)
}
+func TestRollingUpgradeForDeploymentWithPatchAndRetryUsingArs(t *testing.T) {
+ options.ReloadStrategy = constants.AnnotationsReloadStrategy
+ envVarPostfix := constants.ConfigmapEnvVarPostfix
+
+ shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, arsNamespace, arsConfigmapName, "www.stakater.com")
+ config := getConfigWithAnnotations(envVarPostfix, arsConfigmapName, shaData, options.ConfigmapUpdateOnChangeAnnotation, options.ConfigmapReloaderAutoAnnotation)
+ deploymentFuncs := GetDeploymentRollingUpgradeFuncs()
+
+ assert.True(t, deploymentFuncs.SupportsPatch)
+ assert.NotEmpty(t, deploymentFuncs.PatchTemplatesFunc().AnnotationTemplate)
+
+ itemCalled := 0
+ itemsCalled := 0
+
+ deploymentFuncs.ItemFunc = func(client kube.Clients, namespace string, name string) (runtime.Object, error) {
+ itemCalled++
+ return callbacks.GetDeploymentItem(client, namespace, name)
+ }
+ deploymentFuncs.ItemsFunc = func(client kube.Clients, namespace string) []runtime.Object {
+ itemsCalled++
+ return callbacks.GetDeploymentItems(client, namespace)
+ }
+
+ patchCalled := 0
+ deploymentFuncs.PatchFunc = func(client kube.Clients, namespace string, resource runtime.Object, patchType patchtypes.PatchType, bytes []byte) error {
+ patchCalled++
+ if patchCalled < 2 {
+ return &errors.StatusError{ErrStatus: metav1.Status{Reason: metav1.StatusReasonConflict}} // simulate conflict
+ }
+ assert.Equal(t, patchtypes.StrategicMergePatchType, patchType)
+ assert.NotEmpty(t, bytes)
+ assert.Contains(t, string(bytes), `{"spec":{"template":{"metadata":{"annotations":{"reloader.stakater.com/last-reloaded-from":`)
+ assert.Contains(t, string(bytes), `\"hash\":\"3c9a892aeaedc759abc3df9884a37b8be5680382\"`)
+ return nil
+ }
+
+ deploymentFuncs.UpdateFunc = func(kube.Clients, string, runtime.Object) error {
+ t.Errorf("Update should not be called")
+ return nil
+ }
+
+ collectors := getCollectors()
+ err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy)
+ if err != nil {
+ t.Errorf("Rolling upgrade failed for Deployment with Configmap")
+ }
+
+ assert.Equal(t, 1, itemCalled, "ItemFunc should be called once")
+ assert.Equal(t, 1, itemsCalled, "ItemsFunc should be called once")
+ assert.Equal(t, 2, patchCalled, "PatchFunc should be called twice")
+
+ deploymentFuncs = GetDeploymentRollingUpgradeFuncs()
+ testRollingUpgradeWithPatchAndInvokeDeleteStrategyArs(t, clients, config, deploymentFuncs, collectors, envVarPostfix)
+}
+
func TestRollingUpgradeForDeploymentWithConfigmapWithoutReloadAnnotationAndWithoutAutoReloadAllNoTriggersUsingArs(t *testing.T) {
options.ReloadStrategy = constants.AnnotationsReloadStrategy
envVarPostfix := constants.ConfigmapEnvVarPostfix
@@ -1968,7 +2176,7 @@ func TestRollingUpgradeForDeploymentWithConfigmapViaSearchAnnotationNotMappedUsi
t.Errorf("Failed to create deployment with search annotation.")
}
defer func() {
- _ = clients.KubernetesClient.AppsV1().Deployments(arsNamespace).Delete(context.TODO(), deployment.Name, v1.DeleteOptions{})
+ _ = clients.KubernetesClient.AppsV1().Deployments(arsNamespace).Delete(context.TODO(), deployment.Name, metav1.DeleteOptions{})
}()
// defer clients.KubernetesClient.AppsV1().Deployments(namespace).Delete(deployment.Name, &v1.DeleteOptions{})
@@ -2462,7 +2670,7 @@ func TestRollingUpgradeForDeploymentWithSecretExcludeAnnotationUsingArs(t *testi
logrus.Infof("Verifying deployment did not update")
updated := testutil.VerifyResourceAnnotationUpdate(clients, config, deploymentFuncs)
if updated {
- t.Errorf("Deployment which had to be exluded was updated")
+ t.Errorf("Deployment which had to be excluded was updated")
}
}
@@ -2644,6 +2852,7 @@ func TestRollingUpgradeForDeploymentWithExcludeConfigMapAnnotationUsingArs(t *te
t.Errorf("Deployment which had to be excluded was updated")
}
}
+
func TestRollingUpgradeForDeploymentWithConfigMapAutoAnnotationUsingArs(t *testing.T) {
options.ReloadStrategy = constants.AnnotationsReloadStrategy
envVarPostfix := constants.ConfigmapEnvVarPostfix
@@ -2685,6 +2894,18 @@ func TestRollingUpgradeForDaemonSetWithConfigmapUsingArs(t *testing.T) {
daemonSetFuncs := GetDaemonSetRollingUpgradeFuncs()
collectors := getCollectors()
+ itemCalled := 0
+ itemsCalled := 0
+
+ daemonSetFuncs.ItemFunc = func(client kube.Clients, namespace string, name string) (runtime.Object, error) {
+ itemCalled++
+ return callbacks.GetDaemonSetItem(client, namespace, name)
+ }
+ daemonSetFuncs.ItemsFunc = func(client kube.Clients, namespace string) []runtime.Object {
+ itemsCalled++
+ return callbacks.GetDaemonSetItems(client, namespace)
+ }
+
err := PerformAction(clients, config, daemonSetFuncs, collectors, nil, invokeReloadStrategy)
time.Sleep(5 * time.Second)
if err != nil {
@@ -2705,9 +2926,68 @@ func TestRollingUpgradeForDaemonSetWithConfigmapUsingArs(t *testing.T) {
t.Errorf("Counter by namespace was not increased")
}
+ assert.Equal(t, 0, itemCalled, "ItemFunc should not be called")
+ assert.Equal(t, 2, itemsCalled, "ItemsFunc should be called twice")
+
testRollingUpgradeInvokeDeleteStrategyArs(t, clients, config, daemonSetFuncs, collectors, envVarPostfix)
}
+func TestRollingUpgradeForDaemonSetWithPatchAndRetryUsingArs(t *testing.T) {
+ options.ReloadStrategy = constants.AnnotationsReloadStrategy
+ envVarPostfix := constants.ConfigmapEnvVarPostfix
+
+ shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, arsNamespace, arsConfigmapName, "www.facebook.com")
+ config := getConfigWithAnnotations(envVarPostfix, arsConfigmapName, shaData, options.ConfigmapUpdateOnChangeAnnotation, options.ConfigmapReloaderAutoAnnotation)
+ daemonSetFuncs := GetDaemonSetRollingUpgradeFuncs()
+
+ itemCalled := 0
+ itemsCalled := 0
+
+ daemonSetFuncs.ItemFunc = func(client kube.Clients, namespace string, name string) (runtime.Object, error) {
+ itemCalled++
+ return callbacks.GetDaemonSetItem(client, namespace, name)
+ }
+ daemonSetFuncs.ItemsFunc = func(client kube.Clients, namespace string) []runtime.Object {
+ itemsCalled++
+ return callbacks.GetDaemonSetItems(client, namespace)
+ }
+
+ assert.True(t, daemonSetFuncs.SupportsPatch)
+ assert.NotEmpty(t, daemonSetFuncs.PatchTemplatesFunc().AnnotationTemplate)
+
+ patchCalled := 0
+ daemonSetFuncs.PatchFunc = func(client kube.Clients, namespace string, resource runtime.Object, patchType patchtypes.PatchType, bytes []byte) error {
+ patchCalled++
+ if patchCalled < 2 {
+ return &errors.StatusError{ErrStatus: metav1.Status{Reason: metav1.StatusReasonConflict}} // simulate conflict
+ }
+ assert.Equal(t, patchtypes.StrategicMergePatchType, patchType)
+ assert.NotEmpty(t, bytes)
+ assert.Contains(t, string(bytes), `{"spec":{"template":{"metadata":{"annotations":{"reloader.stakater.com/last-reloaded-from":`)
+ assert.Contains(t, string(bytes), `\"hash\":\"314a2269170750a974d79f02b5b9ee517de7f280\"`)
+ return nil
+ }
+
+ daemonSetFuncs.UpdateFunc = func(kube.Clients, string, runtime.Object) error {
+ t.Errorf("Update should not be called")
+ return nil
+ }
+
+ collectors := getCollectors()
+
+ err := PerformAction(clients, config, daemonSetFuncs, collectors, nil, invokeReloadStrategy)
+ if err != nil {
+ t.Errorf("Rolling upgrade failed for DaemonSet with configmap")
+ }
+
+ assert.Equal(t, 1, itemCalled, "ItemFunc should be called once")
+ assert.Equal(t, 1, itemsCalled, "ItemsFunc should be called once")
+ assert.Equal(t, 2, patchCalled, "PatchFunc should be called twice")
+
+ daemonSetFuncs = GetDeploymentRollingUpgradeFuncs()
+ testRollingUpgradeWithPatchAndInvokeDeleteStrategyArs(t, clients, config, daemonSetFuncs, collectors, envVarPostfix)
+}
+
func TestRollingUpgradeForDaemonSetWithConfigmapInProjectedVolumeUsingArs(t *testing.T) {
options.ReloadStrategy = constants.AnnotationsReloadStrategy
envVarPostfix := constants.ConfigmapEnvVarPostfix
@@ -2877,6 +3157,18 @@ func TestRollingUpgradeForStatefulSetWithConfigmapUsingArs(t *testing.T) {
statefulSetFuncs := GetStatefulSetRollingUpgradeFuncs()
collectors := getCollectors()
+ itemCalled := 0
+ itemsCalled := 0
+
+ statefulSetFuncs.ItemFunc = func(client kube.Clients, namespace string, name string) (runtime.Object, error) {
+ itemCalled++
+ return callbacks.GetStatefulSetItem(client, namespace, name)
+ }
+ statefulSetFuncs.ItemsFunc = func(client kube.Clients, namespace string) []runtime.Object {
+ itemsCalled++
+ return callbacks.GetStatefulSetItems(client, namespace)
+ }
+
err := PerformAction(clients, config, statefulSetFuncs, collectors, nil, invokeReloadStrategy)
time.Sleep(5 * time.Second)
if err != nil {
@@ -2897,9 +3189,68 @@ func TestRollingUpgradeForStatefulSetWithConfigmapUsingArs(t *testing.T) {
t.Errorf("Counter by namespace was not increased")
}
+ assert.Equal(t, 0, itemCalled, "ItemFunc should not be called")
+ assert.Equal(t, 2, itemsCalled, "ItemsFunc should be called twice")
+
testRollingUpgradeInvokeDeleteStrategyArs(t, clients, config, statefulSetFuncs, collectors, envVarPostfix)
}
+func TestRollingUpgradeForStatefulSetWithPatchAndRetryUsingArs(t *testing.T) {
+ options.ReloadStrategy = constants.AnnotationsReloadStrategy
+ envVarPostfix := constants.ConfigmapEnvVarPostfix
+
+ shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, arsNamespace, arsConfigmapName, "www.twitter.com")
+ config := getConfigWithAnnotations(envVarPostfix, arsConfigmapName, shaData, options.ConfigmapUpdateOnChangeAnnotation, options.ConfigmapReloaderAutoAnnotation)
+ statefulSetFuncs := GetStatefulSetRollingUpgradeFuncs()
+
+ itemCalled := 0
+ itemsCalled := 0
+
+ statefulSetFuncs.ItemFunc = func(client kube.Clients, namespace string, name string) (runtime.Object, error) {
+ itemCalled++
+ return callbacks.GetStatefulSetItem(client, namespace, name)
+ }
+ statefulSetFuncs.ItemsFunc = func(client kube.Clients, namespace string) []runtime.Object {
+ itemsCalled++
+ return callbacks.GetStatefulSetItems(client, namespace)
+ }
+
+ assert.True(t, statefulSetFuncs.SupportsPatch)
+ assert.NotEmpty(t, statefulSetFuncs.PatchTemplatesFunc().AnnotationTemplate)
+
+ patchCalled := 0
+ statefulSetFuncs.PatchFunc = func(client kube.Clients, namespace string, resource runtime.Object, patchType patchtypes.PatchType, bytes []byte) error {
+ patchCalled++
+ if patchCalled < 2 {
+ return &errors.StatusError{ErrStatus: metav1.Status{Reason: metav1.StatusReasonConflict}} // simulate conflict
+ }
+ assert.Equal(t, patchtypes.StrategicMergePatchType, patchType)
+ assert.NotEmpty(t, bytes)
+ assert.Contains(t, string(bytes), `{"spec":{"template":{"metadata":{"annotations":{"reloader.stakater.com/last-reloaded-from":`)
+ assert.Contains(t, string(bytes), `\"hash\":\"f821414d40d8815fb330763f74a4ff7ab651d4fa\"`)
+ return nil
+ }
+
+ statefulSetFuncs.UpdateFunc = func(kube.Clients, string, runtime.Object) error {
+ t.Errorf("Update should not be called")
+ return nil
+ }
+
+ collectors := getCollectors()
+
+ err := PerformAction(clients, config, statefulSetFuncs, collectors, nil, invokeReloadStrategy)
+ if err != nil {
+ t.Errorf("Rolling upgrade failed for StatefulSet with configmap")
+ }
+
+ assert.Equal(t, 1, itemCalled, "ItemFunc should be called once")
+ assert.Equal(t, 1, itemsCalled, "ItemsFunc should be called once")
+ assert.Equal(t, 2, patchCalled, "PatchFunc should be called twice")
+
+ statefulSetFuncs = GetDeploymentRollingUpgradeFuncs()
+ testRollingUpgradeWithPatchAndInvokeDeleteStrategyArs(t, clients, config, statefulSetFuncs, collectors, envVarPostfix)
+}
+
func TestRollingUpgradeForStatefulSetWithConfigmapInProjectedVolumeUsingArs(t *testing.T) {
options.ReloadStrategy = constants.AnnotationsReloadStrategy
envVarPostfix := constants.ConfigmapEnvVarPostfix
@@ -3094,6 +3445,9 @@ func TestFailedRollingUpgradeUsingArs(t *testing.T) {
deploymentFuncs.UpdateFunc = func(_ kube.Clients, _ string, _ runtime.Object) error {
return fmt.Errorf("error")
}
+ deploymentFuncs.PatchFunc = func(kube.Clients, string, runtime.Object, patchtypes.PatchType, []byte) error {
+ return fmt.Errorf("error")
+ }
collectors := getCollectors()
_ = PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy)
@@ -3107,7 +3461,66 @@ func TestFailedRollingUpgradeUsingArs(t *testing.T) {
}
}
-func testRollingUpgradeInvokeDeleteStrategyErs(t *testing.T, clients kube.Clients, config util.Config, upgradeFuncs callbacks.RollingUpgradeFuncs, collectors metrics.Collectors, envVarPostfix string) {
+func TestIgnoreAnnotationNoReloadUsingArs(t *testing.T) {
+ options.ReloadStrategy = constants.AnnotationsReloadStrategy
+ envVarPostfix := constants.ConfigmapEnvVarPostfix
+
+ shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, arsNamespace, arsConfigmapWithIgnoreAnnotation, "www.stakater.com")
+ config := getConfigWithAnnotations(envVarPostfix, arsConfigmapWithIgnoreAnnotation, shaData, options.ConfigmapUpdateOnChangeAnnotation, options.ConfigmapReloaderAutoAnnotation)
+ config.ResourceAnnotations = map[string]string{"reloader.stakater.com/ignore": "true"}
+ deploymentFuncs := GetDeploymentRollingUpgradeFuncs()
+ collectors := getCollectors()
+
+ err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy)
+ if err != nil {
+ t.Errorf("Rolling upgrade failed for Deployment with Configmap and ignore annotation using ARS")
+ }
+
+ // Ensure deployment is NOT updated
+ updated := testutil.VerifyResourceAnnotationUpdate(clients, config, deploymentFuncs)
+ if updated {
+ t.Errorf("Deployment was updated but should not have been")
+ }
+
+ // Ensure counters remain zero
+ if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 0 {
+ t.Errorf("Reload counter should not have increased")
+ }
+ if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": arsNamespace})) != 0 {
+ t.Errorf("Reload counter by namespace should not have increased")
+ }
+}
+func TestIgnoreAnnotationNoReloadUsingErs(t *testing.T) {
+ options.ReloadStrategy = constants.EnvVarsReloadStrategy
+ envVarPostfix := constants.ConfigmapEnvVarPostfix
+
+ shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, ersNamespace, ersConfigmapWithIgnoreAnnotation, "www.stakater.com")
+ config := getConfigWithAnnotations(envVarPostfix, ersConfigmapWithIgnoreAnnotation, shaData, options.ConfigmapUpdateOnChangeAnnotation, options.ConfigmapReloaderAutoAnnotation)
+ config.ResourceAnnotations = map[string]string{"reloader.stakater.com/ignore": "true"}
+ deploymentFuncs := GetDeploymentRollingUpgradeFuncs()
+ collectors := getCollectors()
+
+ err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy)
+ if err != nil {
+ t.Errorf("Rolling upgrade failed for Deployment with Configmap and ignore annotation using ERS")
+ }
+
+ // Ensure deployment is NOT updated
+ updated := testutil.VerifyResourceEnvVarUpdate(clients, config, envVarPostfix, deploymentFuncs)
+ if updated {
+ t.Errorf("Deployment was updated but should not have been (ERS)")
+ }
+
+ // Ensure counters remain zero
+ if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 0 {
+ t.Errorf("Reload counter should not have increased (ERS)")
+ }
+ if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": ersNamespace})) != 0 {
+ t.Errorf("Reload counter by namespace should not have increased (ERS)")
+ }
+}
+
+func testRollingUpgradeInvokeDeleteStrategyErs(t *testing.T, clients kube.Clients, config common.Config, upgradeFuncs callbacks.RollingUpgradeFuncs, collectors metrics.Collectors, envVarPostfix string) {
err := PerformAction(clients, config, upgradeFuncs, collectors, nil, invokeDeleteStrategy)
time.Sleep(5 * time.Second)
if err != nil {
@@ -3124,6 +3537,24 @@ func testRollingUpgradeInvokeDeleteStrategyErs(t *testing.T, clients kube.Client
}
}
+func testRollingUpgradeWithPatchAndInvokeDeleteStrategyErs(t *testing.T, clients kube.Clients, config common.Config, upgradeFuncs callbacks.RollingUpgradeFuncs, collectors metrics.Collectors, envVarPostfix string) {
+ assert.NotEmpty(t, upgradeFuncs.PatchTemplatesFunc().DeleteEnvVarTemplate)
+
+ err := PerformAction(clients, config, upgradeFuncs, collectors, nil, invokeDeleteStrategy)
+ upgradeFuncs.PatchFunc = func(client kube.Clients, namespace string, resource runtime.Object, patchType patchtypes.PatchType, bytes []byte) error {
+ assert.Equal(t, patchtypes.JSONPatchType, patchType)
+ assert.NotEmpty(t, bytes)
+ return nil
+ }
+ upgradeFuncs.UpdateFunc = func(kube.Clients, string, runtime.Object) error {
+ t.Errorf("Update should not be called")
+ return nil
+ }
+ if err != nil {
+ t.Errorf("Rolling upgrade failed for %s with %s", upgradeFuncs.ResourceType, envVarPostfix)
+ }
+}
+
func TestRollingUpgradeForDeploymentWithConfigmapUsingErs(t *testing.T) {
options.ReloadStrategy = constants.EnvVarsReloadStrategy
envVarPostfix := constants.ConfigmapEnvVarPostfix
@@ -3156,6 +3587,48 @@ func TestRollingUpgradeForDeploymentWithConfigmapUsingErs(t *testing.T) {
testRollingUpgradeInvokeDeleteStrategyErs(t, clients, config, deploymentFuncs, collectors, envVarPostfix)
}
+func TestRollingUpgradeForDeploymentWithPatchAndRetryUsingErs(t *testing.T) {
+ options.ReloadStrategy = constants.EnvVarsReloadStrategy
+ envVarPostfix := constants.ConfigmapEnvVarPostfix
+
+ shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, ersNamespace, ersConfigmapName, "www.stakater.com")
+ config := getConfigWithAnnotations(envVarPostfix, ersConfigmapName, shaData, options.ConfigmapUpdateOnChangeAnnotation, options.ConfigmapReloaderAutoAnnotation)
+ deploymentFuncs := GetDeploymentRollingUpgradeFuncs()
+
+ assert.True(t, deploymentFuncs.SupportsPatch)
+ assert.NotEmpty(t, deploymentFuncs.PatchTemplatesFunc().EnvVarTemplate)
+
+ patchCalled := 0
+ deploymentFuncs.PatchFunc = func(client kube.Clients, namespace string, resource runtime.Object, patchType patchtypes.PatchType, bytes []byte) error {
+ patchCalled++
+ if patchCalled < 2 {
+ return &errors.StatusError{ErrStatus: metav1.Status{Reason: metav1.StatusReasonConflict}} // simulate conflict
+ }
+ assert.Equal(t, patchtypes.StrategicMergePatchType, patchType)
+ assert.NotEmpty(t, bytes)
+ assert.Contains(t, string(bytes), `{"spec":{"template":{"spec":{"containers":[{"name":`)
+ assert.Contains(t, string(bytes), `"value":"3c9a892aeaedc759abc3df9884a37b8be5680382"`)
+ return nil
+ }
+
+ deploymentFuncs.UpdateFunc = func(kube.Clients, string, runtime.Object) error {
+ t.Errorf("Update should not be called")
+ return nil
+ }
+
+ collectors := getCollectors()
+
+ err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy)
+ if err != nil {
+ t.Errorf("Rolling upgrade failed for %s with %s", deploymentFuncs.ResourceType, envVarPostfix)
+ }
+
+ assert.Equal(t, 2, patchCalled)
+
+ deploymentFuncs = GetDeploymentRollingUpgradeFuncs()
+ testRollingUpgradeWithPatchAndInvokeDeleteStrategyErs(t, clients, config, deploymentFuncs, collectors, envVarPostfix)
+}
+
func TestRollingUpgradeForDeploymentWithConfigmapInProjectedVolumeUsingErs(t *testing.T) {
options.ReloadStrategy = constants.EnvVarsReloadStrategy
envVarPostfix := constants.ConfigmapEnvVarPostfix
@@ -3264,7 +3737,7 @@ func TestRollingUpgradeForDeploymentWithConfigmapViaSearchAnnotationNotMappedUsi
t.Errorf("Failed to create deployment with search annotation.")
}
defer func() {
- _ = clients.KubernetesClient.AppsV1().Deployments(ersNamespace).Delete(context.TODO(), deployment.Name, v1.DeleteOptions{})
+ _ = clients.KubernetesClient.AppsV1().Deployments(ersNamespace).Delete(context.TODO(), deployment.Name, metav1.DeleteOptions{})
}()
// defer clients.KubernetesClient.AppsV1().Deployments(namespace).Delete(deployment.Name, &v1.DeleteOptions{})
@@ -4009,6 +4482,49 @@ func TestRollingUpgradeForDaemonSetWithConfigmapUsingErs(t *testing.T) {
testRollingUpgradeInvokeDeleteStrategyErs(t, clients, config, daemonSetFuncs, collectors, envVarPostfix)
}
+func TestRollingUpgradeForDaemonSetWithPatchAndRetryUsingErs(t *testing.T) {
+ options.ReloadStrategy = constants.EnvVarsReloadStrategy
+ envVarPostfix := constants.ConfigmapEnvVarPostfix
+
+ shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, ersNamespace, ersConfigmapName, "www.facebook.com")
+ config := getConfigWithAnnotations(envVarPostfix, ersConfigmapName, shaData, options.ConfigmapUpdateOnChangeAnnotation, options.ConfigmapReloaderAutoAnnotation)
+ daemonSetFuncs := GetDaemonSetRollingUpgradeFuncs()
+
+ assert.True(t, daemonSetFuncs.SupportsPatch)
+ assert.NotEmpty(t, daemonSetFuncs.PatchTemplatesFunc().EnvVarTemplate)
+
+ patchCalled := 0
+ daemonSetFuncs.PatchFunc = func(client kube.Clients, namespace string, resource runtime.Object, patchType patchtypes.PatchType, bytes []byte) error {
+ patchCalled++
+ if patchCalled < 2 {
+ return &errors.StatusError{ErrStatus: metav1.Status{Reason: metav1.StatusReasonConflict}} // simulate conflict
+ }
+ assert.Equal(t, patchtypes.StrategicMergePatchType, patchType)
+ assert.NotEmpty(t, bytes)
+ assert.Contains(t, string(bytes), `{"spec":{"template":{"spec":{"containers":[{"name":`)
+ assert.Contains(t, string(bytes), `"value":"314a2269170750a974d79f02b5b9ee517de7f280"`)
+ return nil
+ }
+
+ daemonSetFuncs.UpdateFunc = func(kube.Clients, string, runtime.Object) error {
+ t.Errorf("Update should not be called")
+ return nil
+ }
+
+ collectors := getCollectors()
+
+ err := PerformAction(clients, config, daemonSetFuncs, collectors, nil, invokeReloadStrategy)
+ time.Sleep(5 * time.Second)
+ if err != nil {
+ t.Errorf("Rolling upgrade failed for DaemonSet with configmap")
+ }
+
+ assert.Equal(t, 2, patchCalled)
+
+ daemonSetFuncs = GetDeploymentRollingUpgradeFuncs()
+ testRollingUpgradeWithPatchAndInvokeDeleteStrategyErs(t, clients, config, daemonSetFuncs, collectors, envVarPostfix)
+}
+
func TestRollingUpgradeForDaemonSetWithConfigmapInProjectedVolumeUsingErs(t *testing.T) {
options.ReloadStrategy = constants.EnvVarsReloadStrategy
envVarPostfix := constants.ConfigmapEnvVarPostfix
@@ -4201,6 +4717,49 @@ func TestRollingUpgradeForStatefulSetWithConfigmapUsingErs(t *testing.T) {
testRollingUpgradeInvokeDeleteStrategyErs(t, clients, config, statefulSetFuncs, collectors, envVarPostfix)
}
+func TestRollingUpgradeForStatefulSetWithPatchAndRetryUsingErs(t *testing.T) {
+ options.ReloadStrategy = constants.EnvVarsReloadStrategy
+ envVarPostfix := constants.ConfigmapEnvVarPostfix
+
+ shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, ersNamespace, ersConfigmapName, "www.twitter.com")
+ config := getConfigWithAnnotations(envVarPostfix, ersConfigmapName, shaData, options.ConfigmapUpdateOnChangeAnnotation, options.ConfigmapReloaderAutoAnnotation)
+ statefulSetFuncs := GetStatefulSetRollingUpgradeFuncs()
+
+ assert.True(t, statefulSetFuncs.SupportsPatch)
+ assert.NotEmpty(t, statefulSetFuncs.PatchTemplatesFunc().EnvVarTemplate)
+
+ patchCalled := 0
+ statefulSetFuncs.PatchFunc = func(client kube.Clients, namespace string, resource runtime.Object, patchType patchtypes.PatchType, bytes []byte) error {
+ patchCalled++
+ if patchCalled < 2 {
+ return &errors.StatusError{ErrStatus: metav1.Status{Reason: metav1.StatusReasonConflict}} // simulate conflict
+ }
+ assert.Equal(t, patchtypes.StrategicMergePatchType, patchType)
+ assert.NotEmpty(t, bytes)
+ assert.Contains(t, string(bytes), `{"spec":{"template":{"spec":{"containers":[{"name":`)
+ assert.Contains(t, string(bytes), `"value":"f821414d40d8815fb330763f74a4ff7ab651d4fa"`)
+ return nil
+ }
+
+ statefulSetFuncs.UpdateFunc = func(kube.Clients, string, runtime.Object) error {
+ t.Errorf("Update should not be called")
+ return nil
+ }
+
+ collectors := getCollectors()
+
+ err := PerformAction(clients, config, statefulSetFuncs, collectors, nil, invokeReloadStrategy)
+ time.Sleep(5 * time.Second)
+ if err != nil {
+ t.Errorf("Rolling upgrade failed for StatefulSet with configmap")
+ }
+
+ assert.Equal(t, 2, patchCalled)
+
+ statefulSetFuncs = GetDeploymentRollingUpgradeFuncs()
+ testRollingUpgradeWithPatchAndInvokeDeleteStrategyErs(t, clients, config, statefulSetFuncs, collectors, envVarPostfix)
+}
+
func TestRollingUpgradeForStatefulSetWithConfigmapInProjectedVolumeUsingErs(t *testing.T) {
options.ReloadStrategy = constants.EnvVarsReloadStrategy
envVarPostfix := constants.ConfigmapEnvVarPostfix
@@ -4397,6 +4956,9 @@ func TestFailedRollingUpgradeUsingErs(t *testing.T) {
deploymentFuncs.UpdateFunc = func(_ kube.Clients, _ string, _ runtime.Object) error {
return fmt.Errorf("error")
}
+ deploymentFuncs.PatchFunc = func(kube.Clients, string, runtime.Object, patchtypes.PatchType, []byte) error {
+ return fmt.Errorf("error")
+ }
collectors := getCollectors()
_ = PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy)
@@ -4409,3 +4971,183 @@ func TestFailedRollingUpgradeUsingErs(t *testing.T) {
t.Errorf("Counter by namespace was not increased")
}
}
+
+func TestPausingDeploymentUsingErs(t *testing.T) {
+ options.ReloadStrategy = constants.EnvVarsReloadStrategy
+ testPausingDeployment(t, options.ReloadStrategy, ersConfigmapWithPausedDeployment, ersNamespace)
+}
+
+func TestPausingDeploymentUsingArs(t *testing.T) {
+ options.ReloadStrategy = constants.AnnotationsReloadStrategy
+ testPausingDeployment(t, options.ReloadStrategy, arsConfigmapWithPausedDeployment, arsNamespace)
+}
+
+func testPausingDeployment(t *testing.T, reloadStrategy string, testName string, namespace string) {
+ options.ReloadStrategy = reloadStrategy
+ envVarPostfix := constants.ConfigmapEnvVarPostfix
+
+ shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, testName, "pause.stakater.com")
+ config := getConfigWithAnnotations(envVarPostfix, testName, shaData, options.ConfigmapUpdateOnChangeAnnotation, options.ConfigmapReloaderAutoAnnotation)
+ deploymentFuncs := GetDeploymentRollingUpgradeFuncs()
+ collectors := getCollectors()
+
+ _ = PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy)
+
+ // Wait for deployment to have paused-at annotation
+ logrus.Infof("Waiting for deployment %s to have paused-at annotation", testName)
+ err := waitForDeploymentPausedAtAnnotation(clients, deploymentFuncs, config.Namespace, testName, 30*time.Second)
+ if err != nil {
+ t.Errorf("Failed to wait for deployment paused-at annotation: %v", err)
+ }
+
+ if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 {
+ t.Errorf("Counter was not increased")
+ }
+
+ if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": namespace})) != 1 {
+ t.Errorf("Counter by namespace was not increased")
+ }
+
+ logrus.Infof("Verifying deployment has been paused")
+ items := deploymentFuncs.ItemsFunc(clients, config.Namespace)
+ deploymentPaused, err := isDeploymentPaused(items, testName)
+ if err != nil {
+ t.Errorf("%s", err.Error())
+ }
+ if !deploymentPaused {
+ t.Errorf("Deployment has not been paused")
+ }
+
+ shaData = testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, testName, "pause-changed.stakater.com")
+ config = getConfigWithAnnotations(envVarPostfix, testName, shaData, options.ConfigmapUpdateOnChangeAnnotation, options.ConfigmapReloaderAutoAnnotation)
+
+ _ = PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy)
+
+ if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 2 {
+ t.Errorf("Counter was not increased")
+ }
+
+ if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": namespace})) != 2 {
+ t.Errorf("Counter by namespace was not increased")
+ }
+
+ logrus.Infof("Verifying deployment is still paused")
+ items = deploymentFuncs.ItemsFunc(clients, config.Namespace)
+ deploymentPaused, err = isDeploymentPaused(items, testName)
+ if err != nil {
+ t.Errorf("%s", err.Error())
+ }
+ if !deploymentPaused {
+ t.Errorf("Deployment should still be paused")
+ }
+
+ logrus.Infof("Verifying deployment has been resumed after pause interval")
+ time.Sleep(11 * time.Second)
+ items = deploymentFuncs.ItemsFunc(clients, config.Namespace)
+ deploymentPaused, err = isDeploymentPaused(items, testName)
+ if err != nil {
+ t.Errorf("%s", err.Error())
+ }
+ if deploymentPaused {
+ t.Errorf("Deployment should have been resumed after pause interval")
+ }
+}
+
+func isDeploymentPaused(deployments []runtime.Object, deploymentName string) (bool, error) {
+ deployment, err := FindDeploymentByName(deployments, deploymentName)
+ if err != nil {
+ return false, err
+ }
+ return IsPaused(deployment), nil
+}
+
+// waitForDeploymentPausedAtAnnotation waits for a deployment to have the pause-period annotation
+func waitForDeploymentPausedAtAnnotation(clients kube.Clients, deploymentFuncs callbacks.RollingUpgradeFuncs, namespace, deploymentName string, timeout time.Duration) error {
+ start := time.Now()
+
+ for time.Since(start) < timeout {
+ items := deploymentFuncs.ItemsFunc(clients, namespace)
+ deployment, err := FindDeploymentByName(items, deploymentName)
+ if err == nil {
+ annotations := deployment.GetAnnotations()
+ if annotations != nil {
+ if _, exists := annotations[options.PauseDeploymentTimeAnnotation]; exists {
+ return nil
+ }
+ }
+ }
+
+ time.Sleep(100 * time.Millisecond)
+ }
+
+ return fmt.Errorf("timeout waiting for deployment %s to have pause-period annotation", deploymentName)
+}
+
+// MockArgoRolloutWithEmptyContainers creates a mock Argo Rollout with no containers
+// This simulates the scenario where Argo Rollouts with workloadRef return empty containers
+func MockArgoRolloutWithEmptyContainers(namespace, name string) *runtime.Object {
+ rollout := &argorolloutv1alpha1.Rollout{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: name,
+ Namespace: namespace,
+ },
+ Spec: argorolloutv1alpha1.RolloutSpec{
+ Template: v1.PodTemplateSpec{
+ Spec: v1.PodSpec{
+ Containers: []v1.Container{}, // Empty containers slice
+ InitContainers: []v1.Container{}, // Empty init containers slice
+ Volumes: []v1.Volume{}, // Empty volumes slice
+ },
+ },
+ },
+ }
+ var obj runtime.Object = rollout
+ return &obj
+}
+
+// TestGetContainerUsingResourceWithArgoRolloutEmptyContainers tests with real Argo Rollout functions
+func TestGetContainerUsingResourceWithArgoRolloutEmptyContainers(t *testing.T) {
+ namespace := "test-namespace"
+ resourceName := "test-configmap"
+
+ // Use real Argo Rollout functions but mock the containers function
+ rolloutFuncs := GetArgoRolloutRollingUpgradeFuncs()
+ originalContainersFunc := rolloutFuncs.ContainersFunc
+ originalInitContainersFunc := rolloutFuncs.InitContainersFunc
+
+ // Override to return empty containers (simulating workloadRef scenario)
+ rolloutFuncs.ContainersFunc = func(item runtime.Object) []v1.Container {
+ return []v1.Container{} // Empty like workloadRef rollouts
+ }
+ rolloutFuncs.InitContainersFunc = func(item runtime.Object) []v1.Container {
+ return []v1.Container{} // Empty like workloadRef rollouts
+ }
+
+ // Restore original functions after test
+ defer func() {
+ rolloutFuncs.ContainersFunc = originalContainersFunc
+ rolloutFuncs.InitContainersFunc = originalInitContainersFunc
+ }()
+
+ // Use proper Argo Rollout object instead of Pod
+ mockRollout := MockArgoRolloutWithEmptyContainers(namespace, "test-rollout")
+
+ config := common.Config{
+ Namespace: namespace,
+ ResourceName: resourceName,
+ Type: constants.ConfigmapEnvVarPostfix,
+ SHAValue: "test-sha",
+ }
+
+ // Test both autoReload scenarios using subtests as suggested by Felix
+ for _, autoReload := range []bool{true, false} {
+ t.Run(fmt.Sprintf("autoReload_%t", autoReload), func(t *testing.T) {
+ // This tests the actual fix in the context of Argo Rollouts
+ result := getContainerUsingResource(rolloutFuncs, *mockRollout, config, autoReload)
+
+ if result != nil {
+ t.Errorf("Expected nil when using real Argo Rollout functions with empty containers (workloadRef scenario), got %v", result)
+ }
+ })
+ }
+}
diff --git a/internal/pkg/leadership/leadership_test.go b/internal/pkg/leadership/leadership_test.go
index 1c916e5..eed0705 100644
--- a/internal/pkg/leadership/leadership_test.go
+++ b/internal/pkg/leadership/leadership_test.go
@@ -16,7 +16,7 @@ import (
"github.com/stakater/Reloader/internal/pkg/metrics"
"github.com/stakater/Reloader/internal/pkg/options"
"github.com/stakater/Reloader/internal/pkg/testutil"
- "github.com/stakater/Reloader/internal/pkg/util"
+ "github.com/stakater/Reloader/pkg/common"
"github.com/stakater/Reloader/pkg/kube"
)
@@ -159,7 +159,7 @@ func TestRunLeaderElectionWithControllers(t *testing.T) {
// Verifying deployment update
logrus.Infof("Verifying pod envvars has been created")
shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, testutil.Namespace, configmapName, "www.stakater.com")
- config := util.Config{
+ config := common.Config{
Namespace: testutil.Namespace,
ResourceName: configmapName,
SHAValue: shaData,
@@ -186,7 +186,7 @@ func TestRunLeaderElectionWithControllers(t *testing.T) {
// Verifying that the deployment was not updated as leadership has been lost
logrus.Infof("Verifying pod envvars has not been updated")
shaData = testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, testutil.Namespace, configmapName, "www.stakater.com/new")
- config = util.Config{
+ config = common.Config{
Namespace: testutil.Namespace,
ResourceName: configmapName,
SHAValue: shaData,
diff --git a/internal/pkg/options/flags.go b/internal/pkg/options/flags.go
index dcefade..62f2853 100644
--- a/internal/pkg/options/flags.go
+++ b/internal/pkg/options/flags.go
@@ -25,6 +25,8 @@ var (
SecretProviderClassUpdateOnChangeAnnotation = "secretproviderclass.reloader.stakater.com/reload"
// ReloaderAutoAnnotation is an annotation to detect changes in secrets/configmaps
ReloaderAutoAnnotation = "reloader.stakater.com/auto"
+ // IgnoreResourceAnnotation is an annotation to ignore changes in secrets/configmaps
+ IgnoreResourceAnnotation = "reloader.stakater.com/ignore"
// ConfigmapReloaderAutoAnnotation is an annotation to detect changes in configmaps
ConfigmapReloaderAutoAnnotation = "configmap.reloader.stakater.com/auto"
// SecretReloaderAutoAnnotation is an annotation to detect changes in secrets
@@ -45,6 +47,12 @@ var (
SearchMatchAnnotation = "reloader.stakater.com/match"
// RolloutStrategyAnnotation is an annotation to define rollout update strategy
RolloutStrategyAnnotation = "reloader.stakater.com/rollout-strategy"
+ // PauseDeploymentAnnotation is an annotation to define the time period to pause a deployment after
+ // a configmap/secret change has been detected. Valid values are described here: https://pkg.go.dev/time#ParseDuration
+ // only positive values are allowed
+ PauseDeploymentAnnotation = "deployment.reloader.stakater.com/pause-period"
+ // Annotation set by reloader to indicate that the deployment has been paused
+ PauseDeploymentTimeAnnotation = "deployment.reloader.stakater.com/paused-at"
// LogFormat is the log format to use (json, or empty string for default)
LogFormat = ""
// LogLevel is the log level to use (trace, debug, info, warning, error, fatal and panic)
@@ -64,6 +72,21 @@ var (
WebhookUrl = ""
// EnableCSIIntegration Adds support to watch SecretProviderClassPodStatus and restart deployment based on it
EnableCSIIntegration = false
+ // ResourcesToIgnore is a list of resources to ignore when watching for changes
+ ResourcesToIgnore = []string{}
+ // WorkloadTypesToIgnore is a list of workload types to ignore when watching for changes
+ WorkloadTypesToIgnore = []string{}
+ // NamespacesToIgnore is a list of namespace names to ignore when watching for changes
+ NamespacesToIgnore = []string{}
+ // NamespaceSelectors is a list of namespace selectors to watch for changes
+ NamespaceSelectors = []string{}
+ // ResourceSelectors is a list of resource selectors to watch for changes
+ ResourceSelectors = []string{}
+ // EnablePProf enables pprof for profiling
+ EnablePProf = false
+ // PProfAddr is the address to start pprof server on
+ // Default is :6060
+ PProfAddr = ":6060"
)
func ToArgoRolloutStrategy(s string) ArgoRolloutStrategy {
diff --git a/internal/pkg/testutil/kube.go b/internal/pkg/testutil/kube.go
index a61b63d..4901d9a 100644
--- a/internal/pkg/testutil/kube.go
+++ b/internal/pkg/testutil/kube.go
@@ -21,6 +21,7 @@ import (
"github.com/stakater/Reloader/internal/pkg/metrics"
"github.com/stakater/Reloader/internal/pkg/options"
"github.com/stakater/Reloader/internal/pkg/util"
+ "github.com/stakater/Reloader/pkg/common"
"github.com/stakater/Reloader/pkg/kube"
appsv1 "k8s.io/api/apps/v1"
batchv1 "k8s.io/api/batch/v1"
@@ -101,7 +102,7 @@ func getAnnotations(name string, autoReload bool, secretAutoReload bool, configm
annotations[options.SecretProviderClassReloaderAutoAnnotation] = "true"
}
- if !(len(annotations) > 0) {
+ if len(annotations) == 0 {
annotations = map[string]string{
options.ConfigmapUpdateOnChangeAnnotation: name,
options.SecretUpdateOnChangeAnnotation: name,
@@ -501,20 +502,21 @@ func GetDeploymentWithPodAnnotations(namespace string, deploymentName string, bo
},
}
if !both {
- deployment.ObjectMeta.Annotations = nil
+ deployment.Annotations = nil
}
- deployment.Spec.Template.ObjectMeta.Annotations = getAnnotations(deploymentName, true, false, false, false, map[string]string{})
+ deployment.Spec.Template.Annotations = getAnnotations(deploymentName, true, false, false, false, map[string]string{})
return deployment
}
func GetDeploymentWithTypedAutoAnnotation(namespace string, deploymentName string, resourceType string) *appsv1.Deployment {
replicaset := int32(1)
var objectMeta metav1.ObjectMeta
- if resourceType == SecretResourceType {
+ switch resourceType {
+ case SecretResourceType:
objectMeta = getObjectMeta(namespace, deploymentName, false, true, false, false, map[string]string{})
- } else if resourceType == ConfigmapResourceType {
+ case ConfigmapResourceType:
objectMeta = getObjectMeta(namespace, deploymentName, false, false, true, false, map[string]string{})
- } else if resourceType == SecretProviderClassPodStatusResourceType {
+ case SecretProviderClassPodStatusResourceType:
objectMeta = getObjectMeta(namespace, deploymentName, false, false, false, true, map[string]string{})
}
@@ -538,11 +540,12 @@ func GetDeploymentWithExcludeAnnotation(namespace string, deploymentName string,
annotation := map[string]string{}
- if resourceType == SecretResourceType {
+ switch resourceType {
+ case SecretResourceType:
annotation[options.SecretExcludeReloaderAnnotation] = deploymentName
- } else if resourceType == ConfigmapResourceType {
+ case ConfigmapResourceType:
annotation[options.ConfigmapExcludeReloaderAnnotation] = deploymentName
- } else if resourceType == SecretProviderClassPodStatusResourceType {
+ case SecretProviderClassPodStatusResourceType:
annotation[options.SecretProviderClassExcludeReloaderAnnotation] = deploymentName
}
@@ -796,7 +799,7 @@ func GetResourceSHAFromAnnotation(podAnnotations map[string]string) string {
return ""
}
- var last util.ReloadSource
+ var last common.ReloadSource
bytes := []byte(annotationJson)
err := json.Unmarshal(bytes, &last)
if err != nil {
@@ -809,17 +812,18 @@ func GetResourceSHAFromAnnotation(podAnnotations map[string]string) string {
// ConvertResourceToSHA generates SHA from secret, configmap or secretproviderclasspodstatus data
func ConvertResourceToSHA(resourceType string, namespace string, resourceName string, data string) string {
values := []string{}
- if resourceType == SecretResourceType {
+ switch resourceType {
+ case SecretResourceType:
secret := GetSecret(namespace, resourceName, data)
for k, v := range secret.Data {
values = append(values, k+"="+string(v[:]))
}
- } else if resourceType == ConfigmapResourceType {
+ case ConfigmapResourceType:
configmap := GetConfigmap(namespace, resourceName, data)
for k, v := range configmap.Data {
values = append(values, k+"="+v)
}
- } else if resourceType == SecretProviderClassPodStatusResourceType {
+ case SecretProviderClassPodStatusResourceType:
secretproviderclasspodstatus := GetSecretProviderClassPodStatus(namespace, resourceName, data)
for _, v := range secretproviderclasspodstatus.Status.Objects {
values = append(values, v.ID+"="+v.Version)
@@ -882,6 +886,26 @@ func CreateDeployment(client kubernetes.Interface, deploymentName string, namesp
return deployment, err
}
+// CreateDeployment creates a deployment in given namespace and returns the Deployment
+func CreateDeploymentWithAnnotations(client kubernetes.Interface, deploymentName string, namespace string, additionalAnnotations map[string]string, volumeMount bool) (*appsv1.Deployment, error) {
+ logrus.Infof("Creating Deployment")
+ deploymentClient := client.AppsV1().Deployments(namespace)
+ var deploymentObj *appsv1.Deployment
+ if volumeMount {
+ deploymentObj = GetDeployment(namespace, deploymentName)
+ } else {
+ deploymentObj = GetDeploymentWithEnvVars(namespace, deploymentName)
+ }
+
+ for annotationKey, annotationValue := range additionalAnnotations {
+ deploymentObj.Annotations[annotationKey] = annotationValue
+ }
+
+ deployment, err := deploymentClient.Create(context.TODO(), deploymentObj, metav1.CreateOptions{})
+ time.Sleep(3 * time.Second)
+ return deployment, err
+}
+
// CreateDeploymentConfig creates a deploymentConfig in given namespace and returns the DeploymentConfig
func CreateDeploymentConfig(client appsclient.Interface, deploymentName string, namespace string, volumeMount bool) (*openshiftv1.DeploymentConfig, error) {
logrus.Infof("Creating DeploymentConfig")
@@ -1056,6 +1080,22 @@ func DeleteStatefulSet(client kubernetes.Interface, namespace string, statefulse
return statefulsetError
}
+// DeleteCronJob deletes a cronJob in given namespace and returns the error if any
+func DeleteCronJob(client kubernetes.Interface, namespace string, cronJobName string) error {
+ logrus.Infof("Deleting CronJob %s", cronJobName)
+ cronJobError := client.BatchV1().CronJobs(namespace).Delete(context.TODO(), cronJobName, metav1.DeleteOptions{})
+ time.Sleep(3 * time.Second)
+ return cronJobError
+}
+
+// Deleteob deletes a job in given namespace and returns the error if any
+func DeleteJob(client kubernetes.Interface, namespace string, jobName string) error {
+ logrus.Infof("Deleting Job %s", jobName)
+ jobError := client.BatchV1().Jobs(namespace).Delete(context.TODO(), jobName, metav1.DeleteOptions{})
+ time.Sleep(3 * time.Second)
+ return jobError
+}
+
// UpdateConfigMap updates a configmap in given namespace and returns the error if any
func UpdateConfigMap(configmapClient core_v1.ConfigMapInterface, namespace string, configmapName string, label string, data string) error {
logrus.Infof("Updating configmap %q.\n", configmapName)
@@ -1147,7 +1187,7 @@ func RandSeq(n int) string {
}
// VerifyResourceEnvVarUpdate verifies whether the rolling upgrade happened or not
-func VerifyResourceEnvVarUpdate(clients kube.Clients, config util.Config, envVarPostfix string, upgradeFuncs callbacks.RollingUpgradeFuncs) bool {
+func VerifyResourceEnvVarUpdate(clients kube.Clients, config common.Config, envVarPostfix string, upgradeFuncs callbacks.RollingUpgradeFuncs) bool {
items := upgradeFuncs.ItemsFunc(clients, config.Namespace)
for _, i := range items {
containers := upgradeFuncs.ContainersFunc(i)
@@ -1193,7 +1233,7 @@ func VerifyResourceEnvVarUpdate(clients kube.Clients, config util.Config, envVar
}
// VerifyResourceEnvVarRemoved verifies whether the rolling upgrade happened or not and all Envvars SKAKATER_name_CONFIGMAP/SECRET are removed
-func VerifyResourceEnvVarRemoved(clients kube.Clients, config util.Config, envVarPostfix string, upgradeFuncs callbacks.RollingUpgradeFuncs) bool {
+func VerifyResourceEnvVarRemoved(clients kube.Clients, config common.Config, envVarPostfix string, upgradeFuncs callbacks.RollingUpgradeFuncs) bool {
items := upgradeFuncs.ItemsFunc(clients, config.Namespace)
for _, i := range items {
containers := upgradeFuncs.ContainersFunc(i)
@@ -1242,7 +1282,7 @@ func VerifyResourceEnvVarRemoved(clients kube.Clients, config util.Config, envVa
}
// VerifyResourceAnnotationUpdate verifies whether the rolling upgrade happened or not
-func VerifyResourceAnnotationUpdate(clients kube.Clients, config util.Config, upgradeFuncs callbacks.RollingUpgradeFuncs) bool {
+func VerifyResourceAnnotationUpdate(clients kube.Clients, config common.Config, upgradeFuncs callbacks.RollingUpgradeFuncs) bool {
items := upgradeFuncs.ItemsFunc(clients, config.Namespace)
for _, i := range items {
podAnnotations := upgradeFuncs.PodAnnotationsFunc(i)
diff --git a/internal/pkg/util/util.go b/internal/pkg/util/util.go
index f23094b..53846f3 100644
--- a/internal/pkg/util/util.go
+++ b/internal/pkg/util/util.go
@@ -3,10 +3,15 @@ package util
import (
"bytes"
"encoding/base64"
+ "errors"
+ "fmt"
"sort"
"strings"
+ "github.com/spf13/cobra"
+ "github.com/stakater/Reloader/internal/pkg/constants"
"github.com/stakater/Reloader/internal/pkg/crypto"
+ "github.com/stakater/Reloader/internal/pkg/options"
v1 "k8s.io/api/core/v1"
csiv1 "sigs.k8s.io/secrets-store-csi-driver/apis/v1"
)
@@ -65,8 +70,6 @@ func GetSHAfromSecretProviderClassPodStatus(data csiv1.SecretProviderClassPodSta
type List []string
-type Map map[string]string
-
func (l *List) Contains(s string) bool {
for _, v := range *l {
if v == s {
@@ -75,3 +78,62 @@ func (l *List) Contains(s string) bool {
}
return false
}
+
+func ConfigureReloaderFlags(cmd *cobra.Command) {
+ cmd.PersistentFlags().BoolVar(&options.AutoReloadAll, "auto-reload-all", false, "Auto reload all resources")
+ cmd.PersistentFlags().StringVar(&options.ConfigmapUpdateOnChangeAnnotation, "configmap-annotation", "configmap.reloader.stakater.com/reload", "annotation to detect changes in configmaps, specified by name")
+ cmd.PersistentFlags().StringVar(&options.SecretUpdateOnChangeAnnotation, "secret-annotation", "secret.reloader.stakater.com/reload", "annotation to detect changes in secrets, specified by name")
+ cmd.PersistentFlags().StringVar(&options.ReloaderAutoAnnotation, "auto-annotation", "reloader.stakater.com/auto", "annotation to detect changes in secrets/configmaps")
+ cmd.PersistentFlags().StringVar(&options.ConfigmapReloaderAutoAnnotation, "configmap-auto-annotation", "configmap.reloader.stakater.com/auto", "annotation to detect changes in configmaps")
+ cmd.PersistentFlags().StringVar(&options.SecretReloaderAutoAnnotation, "secret-auto-annotation", "secret.reloader.stakater.com/auto", "annotation to detect changes in secrets")
+ cmd.PersistentFlags().StringVar(&options.AutoSearchAnnotation, "auto-search-annotation", "reloader.stakater.com/search", "annotation to detect changes in configmaps or secrets tagged with special match annotation")
+ cmd.PersistentFlags().StringVar(&options.SearchMatchAnnotation, "search-match-annotation", "reloader.stakater.com/match", "annotation to mark secrets or configmaps to match the search")
+ cmd.PersistentFlags().StringVar(&options.PauseDeploymentAnnotation, "pause-deployment-annotation", "deployment.reloader.stakater.com/pause-period", "annotation to define the time period to pause a deployment after a configmap/secret change has been detected")
+ cmd.PersistentFlags().StringVar(&options.PauseDeploymentTimeAnnotation, "pause-deployment-time-annotation", "deployment.reloader.stakater.com/paused-at", "annotation to indicate when a deployment was paused by Reloader")
+ cmd.PersistentFlags().StringVar(&options.LogFormat, "log-format", "", "Log format to use (empty string for text, or JSON)")
+ cmd.PersistentFlags().StringVar(&options.LogLevel, "log-level", "info", "Log level to use (trace, debug, info, warning, error, fatal and panic)")
+ cmd.PersistentFlags().StringVar(&options.WebhookUrl, "webhook-url", "", "webhook to trigger instead of performing a reload")
+ cmd.PersistentFlags().StringSliceVar(&options.ResourcesToIgnore, "resources-to-ignore", options.ResourcesToIgnore, "list of resources to ignore (valid options 'configMaps' or 'secrets')")
+ cmd.PersistentFlags().StringSliceVar(&options.WorkloadTypesToIgnore, "ignored-workload-types", options.WorkloadTypesToIgnore, "list of workload types to ignore (valid options: 'jobs', 'cronjobs', or both)")
+ cmd.PersistentFlags().StringSliceVar(&options.NamespacesToIgnore, "namespaces-to-ignore", options.NamespacesToIgnore, "list of namespaces to ignore")
+ cmd.PersistentFlags().StringSliceVar(&options.NamespaceSelectors, "namespace-selector", options.NamespaceSelectors, "list of key:value labels to filter on for namespaces")
+ cmd.PersistentFlags().StringSliceVar(&options.ResourceSelectors, "resource-label-selector", options.ResourceSelectors, "list of key:value labels to filter on for configmaps and secrets")
+ cmd.PersistentFlags().StringVar(&options.IsArgoRollouts, "is-Argo-Rollouts", "false", "Add support for argo rollouts")
+ cmd.PersistentFlags().StringVar(&options.ReloadStrategy, constants.ReloadStrategyFlag, constants.EnvVarsReloadStrategy, "Specifies the desired reload strategy")
+ cmd.PersistentFlags().StringVar(&options.ReloadOnCreate, "reload-on-create", "false", "Add support to watch create events")
+ cmd.PersistentFlags().StringVar(&options.ReloadOnDelete, "reload-on-delete", "false", "Add support to watch delete events")
+ cmd.PersistentFlags().BoolVar(&options.EnableHA, "enable-ha", false, "Adds support for running multiple replicas via leadership election")
+ cmd.PersistentFlags().BoolVar(&options.SyncAfterRestart, "sync-after-restart", false, "Sync add events after reloader restarts")
+ cmd.PersistentFlags().BoolVar(&options.EnablePProf, "enable-pprof", false, "Enable pprof for profiling")
+ cmd.PersistentFlags().StringVar(&options.PProfAddr, "pprof-addr", ":6060", "Address to start pprof server on. Default is :6060")
+}
+
+func GetIgnoredResourcesList() (List, error) {
+
+ ignoredResourcesList := options.ResourcesToIgnore // getStringSliceFromFlags(cmd, "resources-to-ignore")
+
+ for _, v := range ignoredResourcesList {
+ if v != "configMaps" && v != "secrets" {
+ return nil, fmt.Errorf("'resources-to-ignore' only accepts 'configMaps' or 'secrets', not '%s'", v)
+ }
+ }
+
+ if len(ignoredResourcesList) > 1 {
+ return nil, errors.New("'resources-to-ignore' only accepts 'configMaps' or 'secrets', not both")
+ }
+
+ return ignoredResourcesList, nil
+}
+
+func GetIgnoredWorkloadTypesList() (List, error) {
+
+ ignoredWorkloadTypesList := options.WorkloadTypesToIgnore
+
+ for _, v := range ignoredWorkloadTypesList {
+ if v != "jobs" && v != "cronjobs" {
+ return nil, fmt.Errorf("'ignored-workload-types' accepts 'jobs', 'cronjobs', or both, not '%s'", v)
+ }
+ }
+
+ return ignoredWorkloadTypesList, nil
+}
diff --git a/internal/pkg/util/util_test.go b/internal/pkg/util/util_test.go
index 88c6bad..338f329 100644
--- a/internal/pkg/util/util_test.go
+++ b/internal/pkg/util/util_test.go
@@ -3,6 +3,7 @@ package util
import (
"testing"
+ "github.com/stakater/Reloader/internal/pkg/options"
v1 "k8s.io/api/core/v1"
)
@@ -45,3 +46,141 @@ func TestGetHashFromConfigMap(t *testing.T) {
}
}
}
+
+func TestGetIgnoredWorkloadTypesList(t *testing.T) {
+ // Save original state
+ originalWorkloadTypes := options.WorkloadTypesToIgnore
+ defer func() {
+ options.WorkloadTypesToIgnore = originalWorkloadTypes
+ }()
+
+ tests := []struct {
+ name string
+ workloadTypes []string
+ expectError bool
+ expected []string
+ }{
+ {
+ name: "Both jobs and cronjobs",
+ workloadTypes: []string{"jobs", "cronjobs"},
+ expectError: false,
+ expected: []string{"jobs", "cronjobs"},
+ },
+ {
+ name: "Only jobs",
+ workloadTypes: []string{"jobs"},
+ expectError: false,
+ expected: []string{"jobs"},
+ },
+ {
+ name: "Only cronjobs",
+ workloadTypes: []string{"cronjobs"},
+ expectError: false,
+ expected: []string{"cronjobs"},
+ },
+ {
+ name: "Empty list",
+ workloadTypes: []string{},
+ expectError: false,
+ expected: []string{},
+ },
+ {
+ name: "Invalid workload type",
+ workloadTypes: []string{"invalid"},
+ expectError: true,
+ expected: nil,
+ },
+ {
+ name: "Mixed valid and invalid",
+ workloadTypes: []string{"jobs", "invalid"},
+ expectError: true,
+ expected: nil,
+ },
+ {
+ name: "Duplicate values",
+ workloadTypes: []string{"jobs", "jobs"},
+ expectError: false,
+ expected: []string{"jobs", "jobs"},
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ // Set the global option
+ options.WorkloadTypesToIgnore = tt.workloadTypes
+
+ result, err := GetIgnoredWorkloadTypesList()
+
+ if tt.expectError && err == nil {
+ t.Errorf("Expected error but got none")
+ }
+
+ if !tt.expectError && err != nil {
+ t.Errorf("Expected no error but got: %v", err)
+ }
+
+ if !tt.expectError {
+ if len(result) != len(tt.expected) {
+ t.Errorf("Expected %v, got %v", tt.expected, result)
+ return
+ }
+
+ for i, expected := range tt.expected {
+ if i >= len(result) || result[i] != expected {
+ t.Errorf("Expected %v, got %v", tt.expected, result)
+ break
+ }
+ }
+ }
+ })
+ }
+}
+
+func TestListContains(t *testing.T) {
+ tests := []struct {
+ name string
+ list List
+ item string
+ expected bool
+ }{
+ {
+ name: "List contains item",
+ list: List{"jobs", "cronjobs"},
+ item: "jobs",
+ expected: true,
+ },
+ {
+ name: "List does not contain item",
+ list: List{"jobs"},
+ item: "cronjobs",
+ expected: false,
+ },
+ {
+ name: "Empty list",
+ list: List{},
+ item: "jobs",
+ expected: false,
+ },
+ {
+ name: "Case sensitive matching",
+ list: List{"jobs", "cronjobs"},
+ item: "Jobs",
+ expected: false,
+ },
+ {
+ name: "Multiple occurrences",
+ list: List{"jobs", "jobs", "cronjobs"},
+ item: "jobs",
+ expected: true,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ result := tt.list.Contains(tt.item)
+ if result != tt.expected {
+ t.Errorf("Expected %v, got %v", tt.expected, result)
+ }
+ })
+ }
+}
diff --git a/pkg/common/common.go b/pkg/common/common.go
new file mode 100644
index 0000000..b6fe3b5
--- /dev/null
+++ b/pkg/common/common.go
@@ -0,0 +1,376 @@
+package common
+
+import (
+ "context"
+ "os"
+ "regexp"
+ "strconv"
+ "strings"
+
+ "github.com/sirupsen/logrus"
+ "github.com/stakater/Reloader/internal/pkg/constants"
+ "github.com/stakater/Reloader/internal/pkg/options"
+ "github.com/stakater/Reloader/internal/pkg/util"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/labels"
+ "k8s.io/client-go/kubernetes"
+)
+
+type Map map[string]string
+
+type ReloadCheckResult struct {
+ ShouldReload bool
+ AutoReload bool
+}
+
+// ReloaderOptions contains all configurable options for the Reloader controller.
+// These options control how Reloader behaves when watching for changes in ConfigMaps and Secrets.
+type ReloaderOptions struct {
+ // AutoReloadAll enables automatic reloading of all resources when their corresponding ConfigMaps/Secrets are updated
+ AutoReloadAll bool `json:"autoReloadAll"`
+ // ConfigmapUpdateOnChangeAnnotation is the annotation key used to detect changes in ConfigMaps specified by name
+ ConfigmapUpdateOnChangeAnnotation string `json:"configmapUpdateOnChangeAnnotation"`
+ // SecretUpdateOnChangeAnnotation is the annotation key used to detect changes in Secrets specified by name
+ SecretUpdateOnChangeAnnotation string `json:"secretUpdateOnChangeAnnotation"`
+ // SecretProviderClassUpdateOnChangeAnnotation is the annotation key used to detect changes in SecretProviderClasses specified by name
+ SecretProviderClassUpdateOnChangeAnnotation string `json:"secretProviderClassUpdateOnChangeAnnotation"`
+ // ReloaderAutoAnnotation is the annotation key used to detect changes in any referenced ConfigMaps or Secrets
+ ReloaderAutoAnnotation string `json:"reloaderAutoAnnotation"`
+ // IgnoreResourceAnnotation is the annotation key used to ignore resources from being watched
+ IgnoreResourceAnnotation string `json:"ignoreResourceAnnotation"`
+ // ConfigmapReloaderAutoAnnotation is the annotation key used to detect changes in ConfigMaps only
+ ConfigmapReloaderAutoAnnotation string `json:"configmapReloaderAutoAnnotation"`
+ // SecretReloaderAutoAnnotation is the annotation key used to detect changes in Secrets only
+ SecretReloaderAutoAnnotation string `json:"secretReloaderAutoAnnotation"`
+ // SecretProviderClassReloaderAutoAnnotation is the annotation key used to detect changes in SecretProviderClasses only
+ SecretProviderClassReloaderAutoAnnotation string `json:"secretProviderClassReloaderAutoAnnotation"`
+ // ConfigmapExcludeReloaderAnnotation is the annotation key containing comma-separated list of ConfigMaps to exclude from watching
+ ConfigmapExcludeReloaderAnnotation string `json:"configmapExcludeReloaderAnnotation"`
+ // SecretExcludeReloaderAnnotation is the annotation key containing comma-separated list of Secrets to exclude from watching
+ SecretExcludeReloaderAnnotation string `json:"secretExcludeReloaderAnnotation"`
+ // SecretProviderClassExcludeReloaderAnnotation is the annotation key containing comma-separated list of SecretProviderClasses to exclude from watching
+ SecretProviderClassExcludeReloaderAnnotation string `json:"secretProviderClassExcludeReloaderAnnotation"`
+ // AutoSearchAnnotation is the annotation key used to detect changes in ConfigMaps/Secrets tagged with SearchMatchAnnotation
+ AutoSearchAnnotation string `json:"autoSearchAnnotation"`
+ // SearchMatchAnnotation is the annotation key used to tag ConfigMaps/Secrets to be found by AutoSearchAnnotation
+ SearchMatchAnnotation string `json:"searchMatchAnnotation"`
+ // RolloutStrategyAnnotation is the annotation key used to define the rollout update strategy for workloads
+ RolloutStrategyAnnotation string `json:"rolloutStrategyAnnotation"`
+ // PauseDeploymentAnnotation is the annotation key used to define the time period to pause a deployment after
+ PauseDeploymentAnnotation string `json:"pauseDeploymentAnnotation"`
+ // PauseDeploymentTimeAnnotation is the annotation key used to indicate when a deployment was paused by Reloader
+ PauseDeploymentTimeAnnotation string `json:"pauseDeploymentTimeAnnotation"`
+
+ // LogFormat specifies the log format to use (json, or empty string for default text format)
+ LogFormat string `json:"logFormat"`
+ // LogLevel specifies the log level to use (trace, debug, info, warning, error, fatal, panic)
+ LogLevel string `json:"logLevel"`
+ // IsArgoRollouts indicates whether support for Argo Rollouts is enabled
+ IsArgoRollouts bool `json:"isArgoRollouts"`
+ // ReloadStrategy specifies the strategy used to trigger resource reloads (env-vars or annotations)
+ ReloadStrategy string `json:"reloadStrategy"`
+ // ReloadOnCreate indicates whether to trigger reloads when ConfigMaps/Secrets are created
+ ReloadOnCreate bool `json:"reloadOnCreate"`
+ // ReloadOnDelete indicates whether to trigger reloads when ConfigMaps/Secrets are deleted
+ ReloadOnDelete bool `json:"reloadOnDelete"`
+ // SyncAfterRestart indicates whether to sync add events after Reloader restarts (only works when ReloadOnCreate is true)
+ SyncAfterRestart bool `json:"syncAfterRestart"`
+ // EnableHA indicates whether High Availability mode is enabled with leader election
+ EnableHA bool `json:"enableHA"`
+ // EnableCSIIntegration indicates whether CSI integration is enabled to watch SecretProviderClassPodStatus
+ EnableCSIIntegration bool `json:"enableCSIIntegration"`
+ // WebhookUrl is the URL to send webhook notifications to instead of performing reloads
+ WebhookUrl string `json:"webhookUrl"`
+ // ResourcesToIgnore is a list of resource types to ignore (e.g., "configmaps" or "secrets")
+ ResourcesToIgnore []string `json:"resourcesToIgnore"`
+ // WorkloadTypesToIgnore is a list of workload types to ignore (e.g., "jobs" or "cronjobs")
+ WorkloadTypesToIgnore []string `json:"workloadTypesToIgnore"`
+ // NamespaceSelectors is a list of label selectors to filter namespaces to watch
+ NamespaceSelectors []string `json:"namespaceSelectors"`
+ // ResourceSelectors is a list of label selectors to filter ConfigMaps and Secrets to watch
+ ResourceSelectors []string `json:"resourceSelectors"`
+ // NamespacesToIgnore is a list of namespace names to ignore when watching for changes
+ NamespacesToIgnore []string `json:"namespacesToIgnore"`
+ // EnablePProf enables pprof for profiling
+ EnablePProf bool `json:"enablePProf"`
+ // PProfAddr is the address to start pprof server on
+ PProfAddr string `json:"pprofAddr"`
+}
+
+var CommandLineOptions *ReloaderOptions
+
+func PublishMetaInfoConfigmap(clientset kubernetes.Interface) {
+ namespace := os.Getenv("RELOADER_NAMESPACE")
+ if namespace == "" {
+ logrus.Warn("RELOADER_NAMESPACE is not set, skipping meta info configmap creation")
+ return
+ }
+
+ metaInfo := &MetaInfo{
+ BuildInfo: *NewBuildInfo(),
+ ReloaderOptions: *GetCommandLineOptions(),
+ DeploymentInfo: metav1.ObjectMeta{
+ Name: os.Getenv("RELOADER_DEPLOYMENT_NAME"),
+ Namespace: namespace,
+ },
+ }
+
+ configMap := metaInfo.ToConfigMap()
+
+ if _, err := clientset.CoreV1().ConfigMaps(namespace).Get(context.Background(), configMap.Name, metav1.GetOptions{}); err == nil {
+ logrus.Info("Meta info configmap already exists, updating it")
+ _, err = clientset.CoreV1().ConfigMaps(namespace).Update(context.Background(), configMap, metav1.UpdateOptions{})
+ if err != nil {
+ logrus.Warn("Failed to update existing meta info configmap: ", err)
+ }
+ return
+ }
+
+ _, err := clientset.CoreV1().ConfigMaps(namespace).Create(context.Background(), configMap, metav1.CreateOptions{})
+ if err != nil {
+ logrus.Warn("Failed to create meta info configmap: ", err)
+ }
+}
+
+func GetNamespaceLabelSelector(slice []string) (string, error) {
+ for i, kv := range slice {
+ // Legacy support for ":" as a delimiter and "*" for wildcard.
+ if strings.Contains(kv, ":") {
+ split := strings.Split(kv, ":")
+ if split[1] == "*" {
+ slice[i] = split[0]
+ } else {
+ slice[i] = split[0] + "=" + split[1]
+ }
+ }
+ // Convert wildcard to valid apimachinery operator
+ if strings.Contains(kv, "=") {
+ split := strings.Split(kv, "=")
+ if split[1] == "*" {
+ slice[i] = split[0]
+ }
+ }
+ }
+
+ namespaceLabelSelector := strings.Join(slice[:], ",")
+ _, err := labels.Parse(namespaceLabelSelector)
+ if err != nil {
+ logrus.Fatal(err)
+ }
+
+ return namespaceLabelSelector, nil
+}
+
+func GetResourceLabelSelector(slice []string) (string, error) {
+ for i, kv := range slice {
+ // Legacy support for ":" as a delimiter and "*" for wildcard.
+ if strings.Contains(kv, ":") {
+ split := strings.Split(kv, ":")
+ if split[1] == "*" {
+ slice[i] = split[0]
+ } else {
+ slice[i] = split[0] + "=" + split[1]
+ }
+ }
+ // Convert wildcard to valid apimachinery operator
+ if strings.Contains(kv, "=") {
+ split := strings.Split(kv, "=")
+ if split[1] == "*" {
+ slice[i] = split[0]
+ }
+ }
+ }
+
+ resourceLabelSelector := strings.Join(slice[:], ",")
+ _, err := labels.Parse(resourceLabelSelector)
+ if err != nil {
+ logrus.Fatal(err)
+ }
+
+ return resourceLabelSelector, nil
+}
+
+// ShouldReload checks if a resource should be reloaded based on its annotations and the provided options.
+func ShouldReload(config Config, resourceType string, annotations Map, podAnnotations Map, options *ReloaderOptions) ReloadCheckResult {
+
+ // Check if this workload type should be ignored
+ if len(options.WorkloadTypesToIgnore) > 0 {
+ ignoredWorkloadTypes, err := util.GetIgnoredWorkloadTypesList()
+ if err != nil {
+ logrus.Errorf("Failed to parse ignored workload types: %v", err)
+ } else {
+ // Map Kubernetes resource types to CLI-friendly names for comparison
+ var resourceToCheck string
+ switch resourceType {
+ case "Job":
+ resourceToCheck = "jobs"
+ case "CronJob":
+ resourceToCheck = "cronjobs"
+ default:
+ resourceToCheck = resourceType // For other types, use as-is
+ }
+
+ // Check if current resource type should be ignored
+ if ignoredWorkloadTypes.Contains(resourceToCheck) {
+ return ReloadCheckResult{
+ ShouldReload: false,
+ }
+ }
+ }
+ }
+
+ ignoreResourceAnnotatonValue := config.ResourceAnnotations[options.IgnoreResourceAnnotation]
+ if ignoreResourceAnnotatonValue == "true" {
+ return ReloadCheckResult{
+ ShouldReload: false,
+ }
+ }
+
+ annotationValue, found := annotations[config.Annotation]
+ searchAnnotationValue, foundSearchAnn := annotations[options.AutoSearchAnnotation]
+ reloaderEnabledValue, foundAuto := annotations[options.ReloaderAutoAnnotation]
+ typedAutoAnnotationEnabledValue, foundTypedAuto := annotations[config.TypedAutoAnnotation]
+ excludeConfigmapAnnotationValue, foundExcludeConfigmap := annotations[options.ConfigmapExcludeReloaderAnnotation]
+ excludeSecretAnnotationValue, foundExcludeSecret := annotations[options.SecretExcludeReloaderAnnotation]
+ excludeSecretProviderClassProviderAnnotationValue, foundExcludeSecretProviderClass := annotations[options.SecretProviderClassExcludeReloaderAnnotation]
+
+ if !found && !foundAuto && !foundTypedAuto && !foundSearchAnn {
+ annotations = podAnnotations
+ annotationValue = annotations[config.Annotation]
+ searchAnnotationValue = annotations[options.AutoSearchAnnotation]
+ reloaderEnabledValue = annotations[options.ReloaderAutoAnnotation]
+ typedAutoAnnotationEnabledValue = annotations[config.TypedAutoAnnotation]
+ }
+
+ isResourceExcluded := false
+
+ switch config.Type {
+ case constants.ConfigmapEnvVarPostfix:
+ if foundExcludeConfigmap {
+ isResourceExcluded = checkIfResourceIsExcluded(config.ResourceName, excludeConfigmapAnnotationValue)
+ }
+ case constants.SecretEnvVarPostfix:
+ if foundExcludeSecret {
+ isResourceExcluded = checkIfResourceIsExcluded(config.ResourceName, excludeSecretAnnotationValue)
+ }
+
+ case constants.SecretProviderClassEnvVarPostfix:
+ if foundExcludeSecretProviderClass {
+ isResourceExcluded = checkIfResourceIsExcluded(config.ResourceName, excludeSecretProviderClassProviderAnnotationValue)
+ }
+ }
+
+ if isResourceExcluded {
+ return ReloadCheckResult{
+ ShouldReload: false,
+ }
+ }
+
+ reloaderEnabled, _ := strconv.ParseBool(reloaderEnabledValue)
+ typedAutoAnnotationEnabled, _ := strconv.ParseBool(typedAutoAnnotationEnabledValue)
+ if reloaderEnabled || typedAutoAnnotationEnabled || reloaderEnabledValue == "" && typedAutoAnnotationEnabledValue == "" && options.AutoReloadAll {
+ return ReloadCheckResult{
+ ShouldReload: true,
+ AutoReload: true,
+ }
+ }
+
+ values := strings.Split(annotationValue, ",")
+ for _, value := range values {
+ value = strings.TrimSpace(value)
+ re := regexp.MustCompile("^" + value + "$")
+ if re.Match([]byte(config.ResourceName)) {
+ return ReloadCheckResult{
+ ShouldReload: true,
+ AutoReload: false,
+ }
+ }
+ }
+
+ if searchAnnotationValue == "true" {
+ matchAnnotationValue := config.ResourceAnnotations[options.SearchMatchAnnotation]
+ if matchAnnotationValue == "true" {
+ return ReloadCheckResult{
+ ShouldReload: true,
+ AutoReload: true,
+ }
+ }
+ }
+
+ return ReloadCheckResult{
+ ShouldReload: false,
+ }
+}
+
+func checkIfResourceIsExcluded(resourceName, excludedResources string) bool {
+ if excludedResources == "" {
+ return false
+ }
+
+ excludedResourcesList := strings.Split(excludedResources, ",")
+ for _, excludedResource := range excludedResourcesList {
+ if strings.TrimSpace(excludedResource) == resourceName {
+ return true
+ }
+ }
+
+ return false
+}
+
+func init() {
+ GetCommandLineOptions()
+}
+
+func GetCommandLineOptions() *ReloaderOptions {
+ if CommandLineOptions == nil {
+ CommandLineOptions = &ReloaderOptions{}
+ }
+
+ CommandLineOptions.AutoReloadAll = options.AutoReloadAll
+ CommandLineOptions.ConfigmapUpdateOnChangeAnnotation = options.ConfigmapUpdateOnChangeAnnotation
+ CommandLineOptions.SecretUpdateOnChangeAnnotation = options.SecretUpdateOnChangeAnnotation
+ CommandLineOptions.SecretProviderClassUpdateOnChangeAnnotation = options.SecretProviderClassUpdateOnChangeAnnotation
+ CommandLineOptions.ReloaderAutoAnnotation = options.ReloaderAutoAnnotation
+ CommandLineOptions.IgnoreResourceAnnotation = options.IgnoreResourceAnnotation
+ CommandLineOptions.ConfigmapReloaderAutoAnnotation = options.ConfigmapReloaderAutoAnnotation
+ CommandLineOptions.SecretReloaderAutoAnnotation = options.SecretReloaderAutoAnnotation
+ CommandLineOptions.SecretProviderClassReloaderAutoAnnotation = options.SecretProviderClassReloaderAutoAnnotation
+ CommandLineOptions.ConfigmapExcludeReloaderAnnotation = options.ConfigmapExcludeReloaderAnnotation
+ CommandLineOptions.SecretExcludeReloaderAnnotation = options.SecretExcludeReloaderAnnotation
+ CommandLineOptions.SecretProviderClassExcludeReloaderAnnotation = options.SecretProviderClassExcludeReloaderAnnotation
+ CommandLineOptions.AutoSearchAnnotation = options.AutoSearchAnnotation
+ CommandLineOptions.SearchMatchAnnotation = options.SearchMatchAnnotation
+ CommandLineOptions.RolloutStrategyAnnotation = options.RolloutStrategyAnnotation
+ CommandLineOptions.PauseDeploymentAnnotation = options.PauseDeploymentAnnotation
+ CommandLineOptions.PauseDeploymentTimeAnnotation = options.PauseDeploymentTimeAnnotation
+ CommandLineOptions.LogFormat = options.LogFormat
+ CommandLineOptions.LogLevel = options.LogLevel
+ CommandLineOptions.ReloadStrategy = options.ReloadStrategy
+ CommandLineOptions.SyncAfterRestart = options.SyncAfterRestart
+ CommandLineOptions.EnableHA = options.EnableHA
+ CommandLineOptions.EnableCSIIntegration = options.EnableCSIIntegration
+ CommandLineOptions.WebhookUrl = options.WebhookUrl
+ CommandLineOptions.ResourcesToIgnore = options.ResourcesToIgnore
+ CommandLineOptions.WorkloadTypesToIgnore = options.WorkloadTypesToIgnore
+ CommandLineOptions.NamespaceSelectors = options.NamespaceSelectors
+ CommandLineOptions.ResourceSelectors = options.ResourceSelectors
+ CommandLineOptions.NamespacesToIgnore = options.NamespacesToIgnore
+ CommandLineOptions.IsArgoRollouts = parseBool(options.IsArgoRollouts)
+ CommandLineOptions.ReloadOnCreate = parseBool(options.ReloadOnCreate)
+ CommandLineOptions.ReloadOnDelete = parseBool(options.ReloadOnDelete)
+ CommandLineOptions.EnablePProf = options.EnablePProf
+ CommandLineOptions.PProfAddr = options.PProfAddr
+
+ return CommandLineOptions
+}
+
+func parseBool(value string) bool {
+ if value == "" {
+ return false
+ }
+ result, err := strconv.ParseBool(value)
+ if err != nil {
+ return false // Default to false if parsing fails
+ }
+ return result
+}
diff --git a/pkg/common/common_test.go b/pkg/common/common_test.go
new file mode 100644
index 0000000..532d3ad
--- /dev/null
+++ b/pkg/common/common_test.go
@@ -0,0 +1,224 @@
+package common
+
+import (
+ "testing"
+
+ "github.com/stakater/Reloader/internal/pkg/options"
+)
+
+func TestShouldReload_IgnoredWorkloadTypes(t *testing.T) {
+ // Save original state
+ originalWorkloadTypes := options.WorkloadTypesToIgnore
+ defer func() {
+ options.WorkloadTypesToIgnore = originalWorkloadTypes
+ }()
+
+ tests := []struct {
+ name string
+ ignoredWorkloadTypes []string
+ resourceType string
+ shouldReload bool
+ description string
+ }{
+ {
+ name: "Jobs ignored - Job should not reload",
+ ignoredWorkloadTypes: []string{"jobs"},
+ resourceType: "Job",
+ shouldReload: false,
+ description: "When jobs are ignored, Job resources should not be reloaded",
+ },
+ {
+ name: "Jobs ignored - CronJob should reload",
+ ignoredWorkloadTypes: []string{"jobs"},
+ resourceType: "CronJob",
+ shouldReload: true,
+ description: "When jobs are ignored, CronJob resources should still be processed",
+ },
+ {
+ name: "CronJobs ignored - CronJob should not reload",
+ ignoredWorkloadTypes: []string{"cronjobs"},
+ resourceType: "CronJob",
+ shouldReload: false,
+ description: "When cronjobs are ignored, CronJob resources should not be reloaded",
+ },
+ {
+ name: "CronJobs ignored - Job should reload",
+ ignoredWorkloadTypes: []string{"cronjobs"},
+ resourceType: "Job",
+ shouldReload: true,
+ description: "When cronjobs are ignored, Job resources should still be processed",
+ },
+ {
+ name: "Both ignored - Job should not reload",
+ ignoredWorkloadTypes: []string{"jobs", "cronjobs"},
+ resourceType: "Job",
+ shouldReload: false,
+ description: "When both are ignored, Job resources should not be reloaded",
+ },
+ {
+ name: "Both ignored - CronJob should not reload",
+ ignoredWorkloadTypes: []string{"jobs", "cronjobs"},
+ resourceType: "CronJob",
+ shouldReload: false,
+ description: "When both are ignored, CronJob resources should not be reloaded",
+ },
+ {
+ name: "Both ignored - Deployment should reload",
+ ignoredWorkloadTypes: []string{"jobs", "cronjobs"},
+ resourceType: "Deployment",
+ shouldReload: true,
+ description: "When both are ignored, other workload types should still be processed",
+ },
+ {
+ name: "None ignored - Job should reload",
+ ignoredWorkloadTypes: []string{},
+ resourceType: "Job",
+ shouldReload: true,
+ description: "When nothing is ignored, all workload types should be processed",
+ },
+ {
+ name: "None ignored - CronJob should reload",
+ ignoredWorkloadTypes: []string{},
+ resourceType: "CronJob",
+ shouldReload: true,
+ description: "When nothing is ignored, all workload types should be processed",
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ // Set the ignored workload types
+ options.WorkloadTypesToIgnore = tt.ignoredWorkloadTypes
+
+ // Create minimal test config and options
+ config := Config{
+ ResourceName: "test-resource",
+ Annotation: "configmap.reloader.stakater.com/reload",
+ }
+
+ annotations := Map{
+ "configmap.reloader.stakater.com/reload": "test-config",
+ }
+
+ // Create ReloaderOptions with the ignored workload types
+ opts := &ReloaderOptions{
+ WorkloadTypesToIgnore: tt.ignoredWorkloadTypes,
+ AutoReloadAll: true, // Enable auto-reload to simplify test
+ ReloaderAutoAnnotation: "reloader.stakater.com/auto",
+ }
+
+ // Call ShouldReload
+ result := ShouldReload(config, tt.resourceType, annotations, Map{}, opts)
+
+ // Check the result
+ if result.ShouldReload != tt.shouldReload {
+ t.Errorf("For resource type %s with ignored types %v, expected ShouldReload=%v, got=%v",
+ tt.resourceType, tt.ignoredWorkloadTypes, tt.shouldReload, result.ShouldReload)
+ }
+
+ t.Logf("β %s", tt.description)
+ })
+ }
+}
+
+func TestShouldReload_IgnoredWorkloadTypes_ValidationError(t *testing.T) {
+ // Save original state
+ originalWorkloadTypes := options.WorkloadTypesToIgnore
+ defer func() {
+ options.WorkloadTypesToIgnore = originalWorkloadTypes
+ }()
+
+ // Test with invalid workload type - should still continue processing
+ options.WorkloadTypesToIgnore = []string{"invalid"}
+
+ config := Config{
+ ResourceName: "test-resource",
+ Annotation: "configmap.reloader.stakater.com/reload",
+ }
+
+ annotations := Map{
+ "configmap.reloader.stakater.com/reload": "test-config",
+ }
+
+ opts := &ReloaderOptions{
+ WorkloadTypesToIgnore: []string{"invalid"},
+ AutoReloadAll: true, // Enable auto-reload to simplify test
+ ReloaderAutoAnnotation: "reloader.stakater.com/auto",
+ }
+
+ // Should not panic and should continue with normal processing
+ result := ShouldReload(config, "Job", annotations, Map{}, opts)
+
+ // Since validation failed, it should continue with normal processing (should reload)
+ if !result.ShouldReload {
+ t.Errorf("Expected ShouldReload=true when validation fails, got=%v", result.ShouldReload)
+ }
+}
+
+// Test that validates the fix for issue #996
+func TestShouldReload_IssueRBACPermissionFixed(t *testing.T) {
+ // Save original state
+ originalWorkloadTypes := options.WorkloadTypesToIgnore
+ defer func() {
+ options.WorkloadTypesToIgnore = originalWorkloadTypes
+ }()
+
+ tests := []struct {
+ name string
+ ignoredWorkloadTypes []string
+ resourceType string
+ description string
+ }{
+ {
+ name: "Issue #996 - ignoreJobs prevents Job processing",
+ ignoredWorkloadTypes: []string{"jobs"},
+ resourceType: "Job",
+ description: "Job resources are skipped entirely, preventing RBAC permission errors",
+ },
+ {
+ name: "Issue #996 - ignoreCronJobs prevents CronJob processing",
+ ignoredWorkloadTypes: []string{"cronjobs"},
+ resourceType: "CronJob",
+ description: "CronJob resources are skipped entirely, preventing RBAC permission errors",
+ },
+ {
+ name: "Issue #996 - both ignored prevent both types",
+ ignoredWorkloadTypes: []string{"jobs", "cronjobs"},
+ resourceType: "Job",
+ description: "Job resources are skipped entirely when both types are ignored",
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ // Set the ignored workload types
+ options.WorkloadTypesToIgnore = tt.ignoredWorkloadTypes
+
+ config := Config{
+ ResourceName: "test-resource",
+ Annotation: "configmap.reloader.stakater.com/reload",
+ }
+
+ annotations := Map{
+ "configmap.reloader.stakater.com/reload": "test-config",
+ }
+
+ opts := &ReloaderOptions{
+ WorkloadTypesToIgnore: tt.ignoredWorkloadTypes,
+ AutoReloadAll: true, // Enable auto-reload to simplify test
+ ReloaderAutoAnnotation: "reloader.stakater.com/auto",
+ }
+
+ // Call ShouldReload
+ result := ShouldReload(config, tt.resourceType, annotations, Map{}, opts)
+
+ // Should not reload when workload type is ignored
+ if result.ShouldReload {
+ t.Errorf("Expected ShouldReload=false for ignored workload type %s, got=%v",
+ tt.resourceType, result.ShouldReload)
+ }
+
+ t.Logf("β %s", tt.description)
+ })
+ }
+}
diff --git a/internal/pkg/util/config.go b/pkg/common/config.go
similarity index 83%
rename from internal/pkg/util/config.go
rename to pkg/common/config.go
index 6d6ff21..4421fa5 100644
--- a/internal/pkg/util/config.go
+++ b/pkg/common/config.go
@@ -1,8 +1,9 @@
-package util
+package common
import (
"github.com/stakater/Reloader/internal/pkg/constants"
"github.com/stakater/Reloader/internal/pkg/options"
+ "github.com/stakater/Reloader/internal/pkg/util"
v1 "k8s.io/api/core/v1"
csiv1 "sigs.k8s.io/secrets-store-csi-driver/apis/v1"
)
@@ -16,6 +17,7 @@ type Config struct {
TypedAutoAnnotation string
SHAValue string
Type string
+ Labels map[string]string
}
// GetConfigmapConfig provides utility config for configmap
@@ -26,8 +28,9 @@ func GetConfigmapConfig(configmap *v1.ConfigMap) Config {
ResourceAnnotations: configmap.Annotations,
Annotation: options.ConfigmapUpdateOnChangeAnnotation,
TypedAutoAnnotation: options.ConfigmapReloaderAutoAnnotation,
- SHAValue: GetSHAfromConfigmap(configmap),
+ SHAValue: util.GetSHAfromConfigmap(configmap),
Type: constants.ConfigmapEnvVarPostfix,
+ Labels: configmap.Labels,
}
}
@@ -39,8 +42,9 @@ func GetSecretConfig(secret *v1.Secret) Config {
ResourceAnnotations: secret.Annotations,
Annotation: options.SecretUpdateOnChangeAnnotation,
TypedAutoAnnotation: options.SecretReloaderAutoAnnotation,
- SHAValue: GetSHAfromSecret(secret.Data),
+ SHAValue: util.GetSHAfromSecret(secret.Data),
Type: constants.SecretEnvVarPostfix,
+ Labels: secret.Labels,
}
}
@@ -52,7 +56,7 @@ func GetSecretProviderClassPodStatusConfig(podStatus *csiv1.SecretProviderClassP
ResourceName: podStatus.Status.SecretProviderClassName,
Annotation: options.SecretProviderClassUpdateOnChangeAnnotation,
TypedAutoAnnotation: options.SecretProviderClassReloaderAutoAnnotation,
- SHAValue: GetSHAfromSecretProviderClassPodStatus(podStatus.Status),
+ SHAValue: util.GetSHAfromSecretProviderClassPodStatus(podStatus.Status),
Type: constants.SecretProviderClassEnvVarPostfix,
}
}
diff --git a/pkg/common/metainfo.go b/pkg/common/metainfo.go
new file mode 100644
index 0000000..b792c52
--- /dev/null
+++ b/pkg/common/metainfo.go
@@ -0,0 +1,129 @@
+package common
+
+import (
+ "encoding/json"
+ "fmt"
+ "runtime"
+ "time"
+
+ v1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+// Version, Commit, and BuildDate are set during the build process
+// using the -X linker flag to inject these values into the binary.
+// They provide metadata about the build version, commit hash, build date, and whether there are
+// uncommitted changes in the source code at the time of build.
+// This information is useful for debugging and tracking the specific build of the Reloader binary.
+var Version = "dev"
+var Commit = "unknown"
+var BuildDate = "unknown"
+
+const (
+ MetaInfoConfigmapName = "reloader-meta-info"
+ MetaInfoConfigmapLabelKey = "reloader.stakater.com/meta-info"
+ MetaInfoConfigmapLabelValue = "reloader-oss"
+)
+
+// MetaInfo contains comprehensive metadata about the Reloader instance.
+// This includes build information, configuration options, and deployment details.
+type MetaInfo struct {
+ // BuildInfo contains information about the build version, commit, and compilation details
+ BuildInfo BuildInfo `json:"buildInfo"`
+ // ReloaderOptions contains all the configuration options and flags used by this Reloader instance
+ ReloaderOptions ReloaderOptions `json:"reloaderOptions"`
+ // DeploymentInfo contains metadata about the Kubernetes deployment of this Reloader instance
+ DeploymentInfo metav1.ObjectMeta `json:"deploymentInfo"`
+}
+
+// BuildInfo contains information about the build and version of the Reloader binary.
+// This includes Go version, release version, commit details, and build timestamp.
+type BuildInfo struct {
+ // GoVersion is the version of Go used to compile the binary
+ GoVersion string `json:"goVersion"`
+ // ReleaseVersion is the version tag or branch of the Reloader release
+ ReleaseVersion string `json:"releaseVersion"`
+ // CommitHash is the Git commit hash of the source code used to build this binary
+ CommitHash string `json:"commitHash"`
+ // CommitTime is the timestamp of the Git commit used to build this binary
+ CommitTime time.Time `json:"commitTime"`
+}
+
+func NewBuildInfo() *BuildInfo {
+ metaInfo := &BuildInfo{
+ GoVersion: runtime.Version(),
+ ReleaseVersion: Version,
+ CommitHash: Commit,
+ CommitTime: ParseUTCTime(BuildDate),
+ }
+
+ return metaInfo
+}
+
+func (m *MetaInfo) ToConfigMap() *v1.ConfigMap {
+ return &v1.ConfigMap{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: MetaInfoConfigmapName,
+ Namespace: m.DeploymentInfo.Namespace,
+ Labels: map[string]string{
+ MetaInfoConfigmapLabelKey: MetaInfoConfigmapLabelValue,
+ },
+ },
+ Data: map[string]string{
+ "buildInfo": toJson(m.BuildInfo),
+ "reloaderOptions": toJson(m.ReloaderOptions),
+ "deploymentInfo": toJson(m.DeploymentInfo),
+ },
+ }
+}
+
+func NewMetaInfo(configmap *v1.ConfigMap) (*MetaInfo, error) {
+ var buildInfo BuildInfo
+ if val, ok := configmap.Data["buildInfo"]; ok {
+ err := json.Unmarshal([]byte(val), &buildInfo)
+ if err != nil {
+ return nil, fmt.Errorf("failed to unmarshal buildInfo: %w", err)
+ }
+ }
+
+ var reloaderOptions ReloaderOptions
+ if val, ok := configmap.Data["reloaderOptions"]; ok {
+ err := json.Unmarshal([]byte(val), &reloaderOptions)
+ if err != nil {
+ return nil, fmt.Errorf("failed to unmarshal reloaderOptions: %w", err)
+ }
+ }
+
+ var deploymentInfo metav1.ObjectMeta
+ if val, ok := configmap.Data["deploymentInfo"]; ok {
+ err := json.Unmarshal([]byte(val), &deploymentInfo)
+ if err != nil {
+ return nil, fmt.Errorf("failed to unmarshal deploymentInfo: %w", err)
+ }
+ }
+
+ return &MetaInfo{
+ BuildInfo: buildInfo,
+ ReloaderOptions: reloaderOptions,
+ DeploymentInfo: deploymentInfo,
+ }, nil
+}
+
+func toJson(data interface{}) string {
+ jsonData, err := json.Marshal(data)
+ if err != nil {
+ return ""
+ }
+ return string(jsonData)
+}
+
+func ParseUTCTime(value string) time.Time {
+ if value == "" {
+ return time.Time{} // Return zero time if value is empty
+ }
+ t, err := time.Parse(time.RFC3339, value)
+ if err != nil {
+ return time.Time{} // Return zero time if parsing fails
+ }
+ return t
+}
diff --git a/internal/pkg/util/reload_source.go b/pkg/common/reload_source.go
similarity index 98%
rename from internal/pkg/util/reload_source.go
rename to pkg/common/reload_source.go
index 8344d17..0938261 100644
--- a/internal/pkg/util/reload_source.go
+++ b/pkg/common/reload_source.go
@@ -1,4 +1,4 @@
-package util
+package common
import "time"
diff --git a/ubi-build-files-amd64.txt b/ubi-build-files-amd64.txt
index be576ff..5d586a4 100644
--- a/ubi-build-files-amd64.txt
+++ b/ubi-build-files-amd64.txt
@@ -1,5 +1,4 @@
etc/pki
-root/buildinfo
etc/ssl/certs
etc/redhat-release
usr/share/zoneinfo
@@ -7,4 +6,4 @@ usr/lib64/ld-linux-x86-64.so.2
usr/lib64/libc.so.6
usr/lib64/libdl.so.2
usr/lib64/libpthread.so.0
-usr/lib64/libm.so.6
\ No newline at end of file
+usr/lib64/libm.so.6
diff --git a/ubi-build-files-arm64.txt b/ubi-build-files-arm64.txt
index 1647f78..8b55d33 100644
--- a/ubi-build-files-arm64.txt
+++ b/ubi-build-files-arm64.txt
@@ -1,5 +1,4 @@
etc/pki
-root/buildinfo
etc/ssl/certs
etc/redhat-release
usr/share/zoneinfo
@@ -7,4 +6,4 @@ usr/lib/ld-linux-aarch64.so.1
usr/lib64/libc.so.6
usr/lib64/libdl.so.2
usr/lib64/libpthread.so.0
-usr/lib64/libm.so.6
\ No newline at end of file
+usr/lib64/libm.so.6