Compare commits

...

92 Commits

Author SHA1 Message Date
Muhammad Safwan Karim
1e46d44c7c Added pprof flag (#980)
* added pprof flag

* updated chart

* fixed linting and spelling
2025-07-29 15:49:30 +05:00
Muhammad Safwan Karim
49409dce54 Extracted some functions to public package to reuse them in gateway (#966)
* separate methods

* basic refactoring

* moved common code to util package to use it in gateway

* common check for argo rollouts

* made code compilable with latest changes on master

* Moved options to separate package and created CommandLineOptions instance that will be in sync with options values.

* reverted extra changes

* initialize CommandLineOptions with default options in module init

* wait for paused at annotation before checking deployment paused

* moved things around to fix things

* reverted unnecessary changes

* reverted rolling_upgrade changes

* reverted extra change
2025-07-29 11:50:02 +05:00
Muhammad Safwan Karim
9039956c32 Merge pull request #970 from stakater/sa-7499-expose-meta-info
Publish a configmap that contains build/version info and reloader flags
2025-07-22 14:46:44 +05:00
Safwan
82eb8d8b87 reverted reloader.yaml deployment name 2025-07-22 14:40:46 +05:00
Safwan
7af0728990 comments resolved 2025-07-22 13:47:50 +05:00
Safwan
d9b36a56b5 Removed is dirty flag 2025-07-21 17:40:50 +05:00
Safwan
dcf4b0d0f6 merged deployment pause related changes 2025-07-21 15:14:30 +05:00
Safwan
b8b7cdb610 Merge branch 'master' into sa-7499-expose-meta-info 2025-07-21 14:28:07 +05:00
Safwan
d2580930e4 reverted testing changes 2025-07-21 14:25:02 +05:00
Safwan
e0150145ec updated PR workflow 2025-07-21 13:50:21 +05:00
Safwan
52ac7e307d skip e2e test 2025-07-21 13:30:00 +05:00
Safwan
e311fe2fff updated pull request workflow 2025-07-21 13:05:25 +05:00
Safwan
dec3410b7f updated work flows 2025-07-21 12:58:11 +05:00
Safwan
1e1f094516 resolved comments and stuff 2025-07-18 19:27:26 +05:00
Muhammad Safwan Karim
a9566fa672 Merge pull request #958 from s10/feature/pause
Add pausing deployments during upgrades
2025-07-18 15:38:33 +05:00
Muhammad Safwan Karim
51ee0a19bb Merge pull request #967 from pinkavaj/pi-k8s-labels-simple
Use labels suggested by Kubernetes and Helm best practices
2025-07-18 15:23:25 +05:00
Safwan
19918e6fa8 fix error 2025-07-17 13:05:40 +05:00
Safwan
95cea97d34 make parsetime exportable 2025-07-17 13:04:47 +05:00
Safwan
d22a0f25de changed types to bool where needed 2025-07-17 13:03:42 +05:00
Safwan
2d2c35fcf4 discard error 2025-07-16 20:09:19 +05:00
Safwan
cf600f7761 type safety where applicable in meta info 2025-07-16 19:37:02 +05:00
Safwan
01f62cf823 renamed struct fields 2025-07-16 18:29:26 +05:00
Safwan
85bd7a075c updated docker file 2025-07-16 18:08:24 +05:00
Safwan
3999edb7a3 Merge branch 'master' into sa-7499-expose-meta-info 2025-07-16 17:33:38 +05:00
Safwan
f69773c588 refactoring 2025-07-15 20:08:23 +05:00
Safwan
8b257a3f0c publish configmap with meta info 2025-07-15 19:16:26 +05:00
Muhammad Safwan Karim
93936d12c1 Merge pull request #933 from yo-ga/fix/specific-namespace
Ignore namespaceSelector when not watch globally
2025-07-11 17:14:19 +05:00
Muhammad Safwan Karim
2d810f9824 Merge pull request #865 from ChristianCiach/job-owner-ref
Add OwnerReference to generated Jobs
2025-07-09 16:22:51 +05:00
Christian Ciach
99c45b3ca3 Add test for CronJob owner references 2025-07-09 12:45:24 +02:00
Christian Ciach
81315adc9b Add conventional annotation to triggered jobs 2025-07-09 12:38:12 +02:00
Christian Ciach
ad0407517d Add OwnerReference to generated Jobs 2025-07-09 12:38:12 +02:00
Vladislav Gusev
cafa14f0e7 Add pausing deployments during upgrades 2025-07-08 16:59:42 +03:00
Muhammad Safwan Karim
5089955691 Merge pull request #928 from jamie-stinson/master
feat: add ignore annotation support for skipping reloads
2025-07-08 14:41:09 +05:00
Jiří Pinkava
379c428283 Use labels suggested by Kubernetes and Helm best practices
This improves the compatibility and makes it easier to work with various tools,
which take advantake from the assumption of existence of those labels.

Keep the old labels for selectors for backward compatibility.

Also note there is slight change in behaviour of PDB which might
be backward-incompatible in some rare cases.
2025-07-07 22:28:54 +02:00
Muhammad Safwan Karim
404c3700f7 Update push-helm-chart.yaml (#965)
* Update push-helm-chart.yaml

* empty commit
2025-07-07 17:10:51 +05:00
Muhammad Safwan Karim
df812c555c bump-chart (#964) 2025-07-07 13:32:23 +02:00
Yoga Yu
e8cb97a6d1 Merge branch 'master' into fix/specific-namespace 2025-07-04 10:37:34 +08:00
Jamie
58ad781c0c fix: multiple blank lines lint 2025-07-03 16:04:49 +01:00
Jamie
be09beac29 feat: add ignore annotation support for skipping reloads 2025-07-03 14:06:44 +01:00
Muhammad Safwan Karim
516f9e8bc5 Merge pull request #934 from alexandermarston/support-gchat
feat: support Google Chat as an Alert notification provider
2025-07-03 16:24:26 +05:00
Alex Marston
e69ea41ece Merge branch 'stakater:master' into support-gchat 2025-07-03 11:41:06 +01:00
Muhammad Safwan Karim
c2107cccbb Merge pull request #959 from stakater/msafwankarim-patch-1
Update pull_request_docs.yaml
2025-07-03 15:23:14 +05:00
Muhammad Safwan Karim
7ea10b48ec Update README.md 2025-07-03 15:11:17 +05:00
Muhammad Safwan Karim
a6abbd278c Update pull_request_docs.yaml 2025-07-03 15:10:51 +05:00
Muhammad Safwan Karim
70e58394d1 Update pull_request_docs.yaml 2025-07-03 15:10:28 +05:00
Muhammad Safwan Karim
c807e6deaf Update pull_request_docs.yaml 2025-07-03 15:05:27 +05:00
Muhammad Safwan Karim
52815a4ee1 Update pull_request_docs.yaml 2025-07-03 15:05:03 +05:00
Muhammad Safwan Karim
4679d21e26 Update pull_request_docs.yaml 2025-07-03 14:57:00 +05:00
Muhammad Safwan Karim
172de75f01 Update README.md 2025-07-03 14:46:49 +05:00
Muhammad Safwan Karim
4eaaf2da79 Update pull_request_docs.yaml 2025-07-03 14:42:51 +05:00
Alex Marston
144cc910af Merge branch 'stakater:master' into support-gchat 2025-07-03 09:59:49 +01:00
Muhammad Safwan Karim
6faffdc0cf Merge pull request #956 from stakater/msafwankarim-patch-1
Update Workflow to not push the image
2025-07-03 13:50:43 +05:00
Muhammad Safwan Karim
c5481c6e7b Update pull_request_docs.yaml 2025-07-03 13:20:35 +05:00
Muhammad Safwan Karim
a47d927422 Update README.md 2025-07-03 13:11:47 +05:00
Muhammad Safwan Karim
70e0598833 Update README.md 2025-07-03 13:11:20 +05:00
Muhammad Safwan Karim
85bea39568 Update pull_request_docs.yaml 2025-07-03 13:09:22 +05:00
Muhammad Safwan Karim
97e74ad11b Update pull_request_docs.yaml 2025-07-03 13:04:36 +05:00
Muhammad Safwan Karim
9c77e27b2c Update README.md 2025-07-03 13:01:15 +05:00
Muhammad Safwan Karim
ad8e6f78a0 Update pull_request_docs.yaml 2025-07-03 12:59:04 +05:00
Muhammad Safwan Karim
93ba31821e Update README.md 2025-07-03 12:57:27 +05:00
Muhammad Safwan Karim
adbc22c143 Merge pull request #953 from stakater/renovate/stakater-vale-package-0.x
chore(deps): update dependency stakater/vale-package to v0.0.77
2025-07-02 18:46:34 +05:00
renovate[bot]
b6c6e45b5f chore(deps): update dependency stakater/vale-package to v0.0.77 2025-07-02 13:33:09 +00:00
Muhammad Safwan Karim
3c03be7b8e Merge pull request #929 from stakater/renovate/stakater-vale-package-0.x
chore(deps): update dependency stakater/vale-package to v0.0.65
2025-07-02 18:32:39 +05:00
Muhammad Safwan Karim
1717fe93b1 Merge pull request #952 from videlov/upgrade-optimization
[optimization] Do not re-fetch resource on first attempt on upgrade
2025-07-02 17:55:28 +05:00
Muhammad Safwan Karim
0ff6d0fcf2 Merge pull request #938 from vmizoules/feat(chart)_allow_templating_labels
[helm] Add templating support for additionals labels
2025-07-02 16:29:38 +05:00
Vladimir Videlov
f896e1462d Update 2025-07-02 12:20:42 +02:00
Vladimir Videlov
8b0311f799 Rename 2025-07-01 14:39:08 +02:00
Vladimir Videlov
fafb5f45d6 Do not fetch resource on first attempt (already up-to-date from Items func) 2025-07-01 14:27:47 +02:00
Muhammad Safwan Karim
22ee53ae33 Merge pull request #946 from stakater/SA-7383-remove-support-for-dc
Removed code for deployment config SA-7383
2025-06-18 19:44:54 +05:00
Safwan
61ba818d4d removed code for deployment config 2025-06-18 17:35:12 +05:00
Muhammad Safwan Karim
b56f8f5c83 Merge pull request #945 from stakater/msafwankarim-patch-1
Update Chart.yaml
2025-06-18 14:12:30 +05:00
Muhammad Safwan Karim
c021462893 Update Chart.yaml 2025-06-18 14:10:55 +05:00
Muhammad Safwan Karim
1874f871d9 Merge pull request #943 from stakater/msafwankarim-patch-1
Update Chart.yaml
2025-06-18 14:08:45 +05:00
Muhammad Safwan Karim
4741ff231b Update values.yaml 2025-06-18 14:01:53 +05:00
Muhammad Safwan Karim
b166c2bbfa Update values.yaml 2025-06-18 13:58:23 +05:00
Muhammad Safwan Karim
ab0980d6ee Update Chart.yaml 2025-06-18 13:56:42 +05:00
Muhammad Safwan Karim
f9ec2e99a8 Removed patch version from golang in dockerfile (#940)
* Update Dockerfile

* updated golang version
2025-06-18 13:36:24 +05:00
Vincent Mizoules
b06411479f [helm] Fix tpl missing context and remove debugging 2025-06-16 17:44:18 +02:00
Vincent Mizoules
e93731a686 [helm] Add tpl support for additionnals labels 2025-06-16 17:25:01 +02:00
Alex Marston
5139d65f9c feat: support Google Chat as an Alert notification provider 2025-06-10 14:38:06 +01:00
Yoga Yu
023425d4e1 chore: update doc 2025-06-08 21:54:13 +08:00
Yoga Yu
8c2f2e574c feat: ignore namespaceSelector when not necessory 2025-06-08 21:49:03 +08:00
Yoga Yu
b6d538dca8 fix: ignore filter when in-ns 2025-06-07 01:40:47 +08:00
renovate[bot]
fdfb083b27 chore(deps): update dependency stakater/vale-package to v0.0.65 2025-05-30 16:58:53 +00:00
renovate[bot]
533ba4f7eb chore(deps): update dependency stakater/vale-package to v0.0.62 (#924)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-05-21 13:40:57 +05:00
Muhammad Safwan Karim
662e1fcd9b Updated readmes (#923) 2025-05-13 20:53:09 +02:00
Felix
54999116c9 Merge pull request #877 from videlov/master
Support for resource patching with retries
2025-05-12 15:29:01 +02:00
Vladimir Videlov
f9561a2167 Fix lint (#2) 2025-05-07 17:49:03 +02:00
Vladimir Videlov
c2d6b95297 Support for resource patching with retries (#1)
* Initial

* State

* State

* Revert unintentional change

* Update

* Update

* Fix tests

* Support patching for both reload strategies

* rolling_upgrade tests

* Update

* More tests

* Remove unnecessary stuff
2025-05-07 16:24:07 +02:00
Tehreem
2312be3d68 Merge pull request #913 from stakater/rm3
add certificate as source
2025-05-02 11:22:49 +05:00
Rasheed Amir
2c82d6507f add certificate as source 2025-05-01 22:43:13 +02:00
Karl Johan Grahn
96dc40c8cb Update Helm chart to 2.1.3 for reloader version v1.4.2 2025-04-30 15:23:58 +05:00
46 changed files with 2908 additions and 575 deletions

View File

@@ -63,6 +63,11 @@ jobs:
check-latest: true
cache: true
- name: Create timestamp
id: prep
run: echo "created=$(date -u +'%Y-%m-%dT%H:%M:%SZ')" >> $GITHUB_OUTPUT
# Get highest tag and remove any suffixes with '-'
- name: Get Highest tag
id: highest_tag
@@ -104,6 +109,7 @@ jobs:
kind create cluster
kubectl cluster-info
- name: Test
run: make test
@@ -135,7 +141,12 @@ jobs:
file: ${{ env.DOCKER_FILE_PATH }}
pull: true
push: false
build-args: BUILD_PARAMETERS=${{ env.BUILD_PARAMETERS }}
build-args: |
VERSION=merge-${{ steps.generate_tag.outputs.GIT_TAG }}
COMMIT=${{github.event.pull_request.head.sha}}
BUILD_DATE=${{ steps.prep.outputs.created }}
BUILD_PARAMETERS=${{ env.BUILD_PARAMETERS }}
cache-to: type=inline
platforms: linux/amd64,linux/arm,linux/arm64
tags: |

View File

@@ -16,16 +16,17 @@ on:
jobs:
qa:
uses: stakater/.github/.github/workflows/pull_request_doc_qa.yaml@v0.0.131
uses: stakater/.github/.github/workflows/pull_request_doc_qa.yaml@v0.0.134
with:
MD_CONFIG: .github/md_config.json
DOC_SRC: docs
MD_LINT_CONFIG: .markdownlint.yaml
build:
uses: stakater/.github/.github/workflows/pull_request_container_build.yaml@v0.0.131
uses: stakater/.github/.github/workflows/pull_request_container_build.yaml@v0.0.134
with:
DOCKER_FILE_PATH: Dockerfile-docs
CONTAINER_REGISTRY_URL: ghcr.io/stakater
PUSH_IMAGE: false
secrets:
CONTAINER_REGISTRY_USERNAME: ${{ github.actor }}
CONTAINER_REGISTRY_PASSWORD: ${{ secrets.GHCR_TOKEN }}

View File

@@ -15,7 +15,7 @@ on:
env:
HELM_REGISTRY_URL: "https://stakater.github.io/stakater-charts"
REGISTRY: ghcr.io
REGISTRY: ghcr.io # container registry
jobs:
verify-and-push-helm-chart:

View File

@@ -91,6 +91,10 @@ jobs:
with:
username: ${{ secrets.STAKATER_DOCKERHUB_USERNAME }}
password: ${{ secrets.STAKATER_DOCKERHUB_PASSWORD }}
- name: Create timestamp
id: prep
run: echo "created=$(date -u +'%Y-%m-%dT%H:%M:%SZ')" >> $GITHUB_OUTPUT
- name: Generate image repository path for Docker registry
run: |
@@ -148,7 +152,11 @@ jobs:
file: ${{ env.DOCKER_FILE_PATH }}
pull: true
push: true
build-args: BUILD_PARAMETERS=${{ env.BUILD_PARAMETERS }}
build-args: |
VERSION=merge-${{ github.event.number }}
COMMIT=${{ github.sha }}
BUILD_DATE=${{ steps.prep.outputs.created }}
BUILD_PARAMETERS=${{ env.BUILD_PARAMETERS }}
cache-to: type=inline
platforms: linux/amd64,linux/arm,linux/arm64
tags: |

View File

@@ -79,6 +79,10 @@ jobs:
id: generate_tag
run: echo "RELEASE_VERSION=${GITHUB_REF#refs/*/}" >> $GITHUB_OUTPUT
- name: Create timestamp
id: prep
run: echo "created=$(date -u +'%Y-%m-%dT%H:%M:%SZ')" >> $GITHUB_OUTPUT
- name: Set up QEMU
uses: docker/setup-qemu-action@v3
@@ -106,6 +110,10 @@ jobs:
platforms: linux/amd64,linux/arm,linux/arm64
tags: |
${{ env.DOCKER_IMAGE_REPOSITORY }}:${{ steps.generate_tag.outputs.RELEASE_VERSION }}
build-args: |
VERSION=${{ steps.generate_tag.outputs.RELEASE_VERSION }}
COMMIT=${{ github.sha }}
BUILD_DATE=${{ steps.prep.outputs.created }}
labels: |
org.opencontainers.image.source=${{ github.event.repository.clone_url }}
org.opencontainers.image.created=${{ steps.prep.outputs.created }}
@@ -152,6 +160,10 @@ jobs:
platforms: linux/amd64,linux/arm,linux/arm64
tags: |
${{ env.GHCR_IMAGE_REPOSITORY }}:${{ steps.generate_tag.outputs.RELEASE_VERSION }},${{ env.GHCR_IMAGE_REPOSITORY }}:latest
build-args: |
VERSION=${{ steps.generate_tag.outputs.RELEASE_VERSION }}
COMMIT=${{ github.sha }}
BUILD_DATE=${{ steps.prep.outputs.created }}
labels: |
org.opencontainers.image.source=${{ github.event.repository.clone_url }}
org.opencontainers.image.created=${{ steps.prep.outputs.created }}

View File

@@ -1,7 +1,7 @@
StylesPath = styles
MinAlertLevel = warning
Packages = https://github.com/stakater/vale-package/releases/download/v0.0.61/Stakater.zip
Packages = https://github.com/stakater/vale-package/releases/download/v0.0.77/Stakater.zip
Vocab = Stakater
# Only check MarkDown files

View File

@@ -2,13 +2,17 @@ ARG BUILDER_IMAGE
ARG BASE_IMAGE
# Build the manager binary
FROM --platform=${BUILDPLATFORM} ${BUILDER_IMAGE:-golang:1.24.2} AS builder
FROM --platform=${BUILDPLATFORM} ${BUILDER_IMAGE:-golang:1.24.4} AS builder
ARG TARGETOS
ARG TARGETARCH
ARG GOPROXY
ARG GOPRIVATE
ARG COMMIT
ARG VERSION
ARG BUILD_DATE
WORKDIR /workspace
# Copy the Go Modules manifests
@@ -30,7 +34,10 @@ RUN CGO_ENABLED=0 \
GOPROXY=${GOPROXY} \
GOPRIVATE=${GOPRIVATE} \
GO111MODULE=on \
go build -mod=mod -a -o manager main.go
go build -ldflags="-s -w -X github.com/stakater/Reloader/pkg/common.Version=${VERSION} \
-X github.com/stakater/Reloader/pkg/common.Commit=${COMMIT} \
-X github.com/stakater/Reloader/pkg/common.BuildDate=${BUILD_DATE}" \
-installsuffix 'static' -mod=mod -a -o manager ./
# Use distroless as minimal base image to package the manager binary
# Refer to https://github.com/GoogleContainerTools/distroless for more details

View File

@@ -32,6 +32,7 @@ Reloader bridges that gap by ensuring your workloads stay in sync with configura
flowchart LR
ExternalSecret -->|Creates| Secret
SealedSecret -->|Creates| Secret
Certificate -->|Creates| Secret
Secret -->|Watched by| Reloader
ConfigMap -->|Watched by| Reloader
@@ -44,7 +45,7 @@ flowchart LR
Reloader -->|Sends Notification| Slack,Teams,Webhook
```
- Sources like `ExternalSecret` or `SealedSecret` create or manage your Kubernetes Secrets.
- Sources like `ExternalSecret`, `SealedSecret`, or `Certificate` from `cert-manager` can create or manage Kubernetes `Secrets` — but they can also be created manually or delivered through GitOps workflows.
- `Secrets` and `ConfigMaps` are watched by Reloader.
- When changes are detected, Reloader automatically triggers a rollout of the associated workloads, ensuring your app always runs with the latest configuration.
@@ -152,6 +153,21 @@ This pattern allows fine-grained reload control — workloads only restart if th
1. ✅ You want to reload a workload only if it references a ConfigMap or Secret that has been explicitly tagged with `reloader.stakater.com/match: "true"`.
1. ✅ Use this when you want full control over which shared or system-wide resources trigger reloads. Great in multi-tenant clusters or shared configs.
### ⛔ Resource-Level Ignore Annotation
When you need to prevent specific ConfigMaps or Secrets from triggering any reloads, use the ignore annotation on the resource itself:
```yaml
apiVersion: v1
kind: ConfigMap # or Secret
metadata:
name: my-config
annotations:
reloader.stakater.com/ignore: "true"
```
This instructs Reloader to skip all reload logic for that resource across all workloads.
### 4. ⚙️ Workload-Specific Rollout Strategy
By default, Reloader uses the **rollout** strategy — it updates the pod template to trigger a new rollout. This works well in most cases, but it can cause problems if you're using GitOps tools like ArgoCD, which detect this as configuration drift.
@@ -188,7 +204,7 @@ metadata:
Reloader can optionally **send alerts** whenever it triggers a rolling upgrade for a workload (e.g., `Deployment`, `StatefulSet`, etc.).
These alerts are sent to a configured **webhook endpoint**, which can be a generic receiver or services like Slack or Microsoft Teams.
These alerts are sent to a configured **webhook endpoint**, which can be a generic receiver or services like Slack, Microsoft Teams or Google Chat.
To enable this feature, update the `reloader.env.secret` section in your `values.yaml` (when installing via Helm):
@@ -197,11 +213,30 @@ reloader:
env:
secret:
ALERT_ON_RELOAD: "true" # Enable alerting (default: false)
ALERT_SINK: "slack" # Options: slack, teams, webhook (default: webhook)
ALERT_SINK: "slack" # Options: slack, teams, gchat or webhook (default: webhook)
ALERT_WEBHOOK_URL: "<your-webhook-url>" # Required if ALERT_ON_RELOAD is true
ALERT_ADDITIONAL_INFO: "Triggered by Reloader in staging environment"
```
### 7. ⏸️ Pause Deployments
This feature allows you to pause rollouts for a deployment for a specified duration, helping to prevent multiple restarts when several ConfigMaps or Secrets are updated in quick succession.
| Annotation | Applies To | Description |
|---------------------------------------------------------|--------------|-----------------------------------------------------------------------------|
| `deployment.reloader.stakater.com/pause-period: "5m"` | Deployment | Pauses reloads for the specified period (e.g., `5m`, `1h`) |
#### How it works
1. Add the `deployment.reloader.stakater.com/pause-period` annotation to your Deployment, specifying the pause duration (e.g., `"5m"` for five minutes).
1. When a watched ConfigMap or Secret changes, Reloader will still trigger a reload event, but if the deployment is paused, the rollout will have no effect until the pause period has elapsed.
1. This avoids repeated restarts if multiple resources are updated close together.
#### Use when
1. ✅ Your deployment references multiple ConfigMaps or Secrets that may be updated at the same time.
1. ✅ You want to minimize unnecessary rollouts and reduce downtime caused by back-to-back configuration changes.
## 🚀 Installation
### 1. 📦 Helm
@@ -304,7 +339,7 @@ Reloader supports multiple strategies for triggering rolling updates when a watc
| Flag | Description |
|------|-------------|
| `--namespace-selector=key=value` | Watch only namespaces with matching labels |
| `--namespace-selector='key=value'` <br /> <br />`--namespace-selector='key1=value1,key2=value2'` <br /> <br />`--namespace-selector='key in (value1,value2)'`| Watch only namespaces with matching labels. See [LIST and WATCH filtering](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#list-and-watch-filtering) for more details on label selectors |
| `--namespaces-to-ignore=ns1,ns2` | Skip specific namespaces from being watched |
#### 4. 📝 Annotation Key Overrides
@@ -320,6 +355,15 @@ These flags allow you to redefine annotation keys used in your workloads or reso
| `--search-match-annotation` | Overrides `reloader.stakater.com/match` |
| `--secret-annotation` | Overrides `secret.reloader.stakater.com/reload` |
| `--configmap-annotation` | Overrides `configmap.reloader.stakater.com/reload` |
| `--pause-deployment-annotation` | Overrides `deployment.reloader.stakater.com/pause-period` |
| `--pause-deployment-time-annotation` | Overrides `deployment.reloader.stakater.com/paused-at` |
### 5. 🕷️ Debugging
| Flag | Description |
|--- |-------------|
| `--enable-pprof` | Enables `pprof` for profiling |
| `--pprof-addr` | Address to start `pprof` server on. Default is `:6060` |
## Compatibility

View File

@@ -1,8 +1,8 @@
apiVersion: v1
name: reloader
description: Reloader chart that runs on kubernetes
version: 2.1.2
appVersion: v1.4.1
version: 2.2.0
appVersion: v1.4.5
keywords:
- Reloader
- kubernetes

View File

@@ -52,11 +52,13 @@ helm uninstall {{RELEASE_NAME}} -n {{NAMESPACE}}
| `reloader.syncAfterRestart` | Enable sync after Reloader restarts for **Add** events, works only when reloadOnCreate is `true`. Valid value are either `true` or `false` | boolean | `false` |
| `reloader.reloadStrategy` | Strategy to trigger resource restart, set to either `default`, `env-vars` or `annotations` | enumeration | `default` |
| `reloader.ignoreNamespaces` | List of comma separated namespaces to ignore, if multiple are provided, they are combined with the AND operator | string | `""` |
| `reloader.namespaceSelector` | List of comma separated namespaces to select, if multiple are provided, they are combined with the AND operator | string | `""` |
| `reloader.namespaceSelector` | List of comma separated k8s label selectors for namespaces selection. The parameter only used when `reloader.watchGlobally` is `true`. See [LIST and WATCH filtering](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#list-and-watch-filtering) for more details on label-selector | string | `""` |
| `reloader.resourceLabelSelector` | List of comma separated label selectors, if multiple are provided they are combined with the AND operator | string | `""` |
| `reloader.logFormat` | Set type of log format. Value could be either `json` or `""` | string | `""` |
| `reloader.watchGlobally` | Allow Reloader to watch in all namespaces (`true`) or just in a single namespace (`false`) | boolean | `true` |
| `reloader.enableHA` | Enable leadership election allowing you to run multiple replicas | boolean | `false` |
| `reloader.enablePProf` | Enables pprof for profiling | boolean | `false` |
| `reloader.pprofAddr` | Address to start pprof server on | string | `:6060` |
| `reloader.readOnlyRootFileSystem` | Enforce readOnlyRootFilesystem | boolean | `false` |
| `reloader.legacy.rbac` | | boolean | `false` |
| `reloader.matchLabels` | Pod labels to match | map | `{}` |

View File

@@ -27,12 +27,20 @@ Create chart name and version as used by the chart label.
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
{{- end }}
{{- define "reloader-labels.chart" -}}
{{- define "reloader-match-labels.chart" -}}
app: {{ template "reloader-fullname" . }}
chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}"
release: {{ .Release.Name | quote }}
{{- end -}}
{{- define "reloader-labels.chart" -}}
{{ include "reloader-match-labels.chart" . }}
app.kubernetes.io/name: {{ template "reloader-name" . }}
app.kubernetes.io/instance: {{ .Release.Name | quote }}
helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}"
chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}"
heritage: {{ .Release.Service | quote }}
app.kubernetes.io/managed-by: {{ .Release.Service | quote }}
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
{{- end -}}
{{/*
@@ -45,10 +53,10 @@ podAntiAffinity:
podAffinityTerm:
labelSelector:
matchExpressions:
- key: app
- key: app.kubernetes.io/instance
operator: In
values:
- {{ template "reloader-fullname" . }}
- {{ .Release.Name | quote }}
topologyKey: "kubernetes.io/hostname"
{{- end -}}
@@ -70,3 +78,12 @@ Create the annotations to support helm3
meta.helm.sh/release-namespace: {{ .Release.Namespace | quote }}
meta.helm.sh/release-name: {{ .Release.Name | quote }}
{{- end -}}
{{/*
Create the namespace selector if it does not watch globally
*/}}
{{- define "reloader-namespaceSelector" -}}
{{- if and .Values.reloader.watchGlobally .Values.reloader.namespaceSelector -}}
{{ .Values.reloader.namespaceSelector }}
{{- end -}}
{{- end -}}

View File

@@ -11,10 +11,10 @@ metadata:
labels:
{{ include "reloader-labels.chart" . | indent 4 }}
{{- if .Values.reloader.rbac.labels }}
{{ toYaml .Values.reloader.rbac.labels | indent 4 }}
{{ tpl (toYaml .Values.reloader.rbac.labels) . | indent 4 }}
{{- end }}
{{- if .Values.reloader.matchLabels }}
{{ toYaml .Values.reloader.matchLabels | indent 4 }}
{{ tpl (toYaml .Values.reloader.matchLabels) . | indent 4 }}
{{- end }}
name: {{ template "reloader-fullname" . }}-role
rules:
@@ -31,7 +31,7 @@ rules:
- list
- get
- watch
{{- if .Values.reloader.namespaceSelector }}
{{- if (include "reloader-namespaceSelector" .) }}
- apiGroups:
- ""
resources:

View File

@@ -11,10 +11,10 @@ metadata:
labels:
{{ include "reloader-labels.chart" . | indent 4 }}
{{- if .Values.reloader.rbac.labels }}
{{ toYaml .Values.reloader.rbac.labels | indent 4 }}
{{ tpl (toYaml .Values.reloader.rbac.labels) . | indent 4 }}
{{- end }}
{{- if .Values.reloader.matchLabels }}
{{ toYaml .Values.reloader.matchLabels | indent 4 }}
{{ tpl (toYaml .Values.reloader.matchLabels) . | indent 4 }}
{{- end }}
name: {{ template "reloader-fullname" . }}-role-binding
roleRef:

View File

@@ -4,15 +4,15 @@ metadata:
annotations:
{{ include "reloader-helm3.annotations" . | indent 4 }}
{{- if .Values.reloader.deployment.annotations }}
{{ toYaml .Values.reloader.deployment.annotations | indent 4 }}
{{ tpl (toYaml .Values.reloader.deployment.annotations) . | indent 4 }}
{{- end }}
labels:
{{ include "reloader-labels.chart" . | indent 4 }}
{{- if .Values.reloader.deployment.labels }}
{{ toYaml .Values.reloader.deployment.labels | indent 4 }}
{{ tpl (toYaml .Values.reloader.deployment.labels) . | indent 4 }}
{{- end }}
{{- if .Values.reloader.matchLabels }}
{{ toYaml .Values.reloader.matchLabels | indent 4 }}
{{ tpl (toYaml .Values.reloader.matchLabels) . | indent 4 }}
{{- end }}
name: {{ template "reloader-fullname" . }}
namespace: {{ .Values.namespace | default .Release.Namespace }}
@@ -25,24 +25,23 @@ spec:
revisionHistoryLimit: {{ .Values.reloader.deployment.revisionHistoryLimit }}
selector:
matchLabels:
app: {{ template "reloader-fullname" . }}
release: {{ .Release.Name | quote }}
{{ include "reloader-match-labels.chart" . | indent 6 }}
{{- if .Values.reloader.matchLabels }}
{{ toYaml .Values.reloader.matchLabels | indent 6 }}
{{ tpl (toYaml .Values.reloader.matchLabels) . | indent 6 }}
{{- end }}
template:
metadata:
{{- if .Values.reloader.deployment.pod.annotations }}
annotations:
{{ toYaml .Values.reloader.deployment.pod.annotations | indent 8 }}
{{ tpl (toYaml .Values.reloader.deployment.pod.annotations) . | indent 8 }}
{{- end }}
labels:
{{ include "reloader-labels.chart" . | indent 8 }}
{{- if .Values.reloader.deployment.labels }}
{{ toYaml .Values.reloader.deployment.labels | indent 8 }}
{{ tpl (toYaml .Values.reloader.deployment.labels) . | indent 8 }}
{{- end }}
{{- if .Values.reloader.matchLabels }}
{{ toYaml .Values.reloader.matchLabels | indent 8 }}
{{ tpl (toYaml .Values.reloader.matchLabels) . | indent 8 }}
{{- end }}
spec:
{{- with .Values.global.imagePullSecrets }}
@@ -144,6 +143,15 @@ spec:
fieldRef:
fieldPath: metadata.namespace
{{- end }}
- name: RELOADER_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: RELOADER_DEPLOYMENT_NAME
value: {{ template "reloader-fullname" . }}
{{- if .Values.reloader.enableHA }}
- name: POD_NAME
valueFrom:
@@ -198,7 +206,7 @@ spec:
{{- . | toYaml | nindent 10 }}
{{- end }}
{{- end }}
{{- if or (.Values.reloader.logFormat) (.Values.reloader.logLevel) (.Values.reloader.ignoreSecrets) (.Values.reloader.ignoreNamespaces) (.Values.reloader.namespaceSelector) (.Values.reloader.resourceLabelSelector) (.Values.reloader.ignoreConfigMaps) (.Values.reloader.custom_annotations) (eq .Values.reloader.isArgoRollouts true) (eq .Values.reloader.reloadOnCreate true) (eq .Values.reloader.reloadOnDelete true) (ne .Values.reloader.reloadStrategy "default") (.Values.reloader.enableHA) (.Values.reloader.autoReloadAll)}}
{{- if or (.Values.reloader.logFormat) (.Values.reloader.logLevel) (.Values.reloader.ignoreSecrets) (.Values.reloader.ignoreNamespaces) (include "reloader-namespaceSelector" .) (.Values.reloader.resourceLabelSelector) (.Values.reloader.ignoreConfigMaps) (.Values.reloader.custom_annotations) (eq .Values.reloader.isArgoRollouts true) (eq .Values.reloader.reloadOnCreate true) (eq .Values.reloader.reloadOnDelete true) (ne .Values.reloader.reloadStrategy "default") (.Values.reloader.enableHA) (.Values.reloader.autoReloadAll)}}
args:
{{- if .Values.reloader.logFormat }}
- "--log-format={{ .Values.reloader.logFormat }}"
@@ -215,12 +223,18 @@ spec:
{{- if .Values.reloader.ignoreNamespaces }}
- "--namespaces-to-ignore={{ .Values.reloader.ignoreNamespaces }}"
{{- end }}
{{- if .Values.reloader.namespaceSelector }}
- "--namespace-selector={{ .Values.reloader.namespaceSelector }}"
{{- if (include "reloader-namespaceSelector" .) }}
- "--namespace-selector=\"{{ include "reloader-namespaceSelector" . }}\""
{{- end }}
{{- if .Values.reloader.resourceLabelSelector }}
- "--resource-label-selector={{ .Values.reloader.resourceLabelSelector }}"
{{- end }}
{{- if .Values.reloader.enablePProf }}
- "--enable-pprof"
{{- if and .Values.reloader.pprofAddr }}
- "--pprof-addr={{ .Values.reloader.pprofAddr }}"
{{- end }}
{{- end }}
{{- if .Values.reloader.custom_annotations }}
{{- if .Values.reloader.custom_annotations.configmap }}
- "--configmap-annotation"

View File

@@ -7,17 +7,16 @@ metadata:
labels:
{{ include "reloader-labels.chart" . | indent 4 }}
{{- if .Values.reloader.matchLabels }}
{{ toYaml .Values.reloader.matchLabels | indent 4 }}
{{ tpl (toYaml .Values.reloader.matchLabels) . | indent 4 }}
{{- end }}
name: {{ template "reloader-fullname" . }}
namespace: {{ .Values.namespace | default .Release.Namespace }}
spec:
podSelector:
matchLabels:
app: {{ template "reloader-fullname" . }}
release: {{ .Release.Name | quote }}
{{ include "reloader-match-labels.chart" . | indent 6 }}
{{- if .Values.reloader.matchLabels }}
{{ toYaml .Values.reloader.matchLabels | indent 6 }}
{{ tpl (toYaml .Values.reloader.matchLabels) . | indent 6 }}
{{- end }}
policyTypes:
- Ingress

View File

@@ -13,5 +13,5 @@ spec:
{{- end }}
selector:
matchLabels:
app: {{ template "reloader-fullname" . }}
{{ include "reloader-match-labels.chart" . | nindent 6 }}
{{- end }}

View File

@@ -56,5 +56,5 @@ spec:
- {{ .Release.Namespace }}
selector:
matchLabels:
{{ include "reloader-labels.chart" . | nindent 6 }}
{{ include "reloader-match-labels.chart" . | nindent 6 }}
{{- end }}

View File

@@ -11,10 +11,10 @@ metadata:
labels:
{{ include "reloader-labels.chart" . | indent 4 }}
{{- if .Values.reloader.rbac.labels }}
{{ toYaml .Values.reloader.rbac.labels | indent 4 }}
{{ tpl (toYaml .Values.reloader.rbac.labels) . | indent 4 }}
{{- end }}
{{- if .Values.reloader.matchLabels }}
{{ toYaml .Values.reloader.matchLabels | indent 4 }}
{{ tpl (toYaml .Values.reloader.matchLabels) . | indent 4 }}
{{- end }}
name: {{ template "reloader-fullname" . }}-role
namespace: {{ .Values.namespace | default .Release.Namespace }}
@@ -101,3 +101,34 @@ rules:
- create
- patch
{{- end }}
---
{{- if .Values.reloader.rbac.enabled }}
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
annotations:
{{ include "reloader-helm3.annotations" . | indent 4 }}
labels:
{{ include "reloader-labels.chart" . | indent 4 }}
{{- if .Values.reloader.rbac.labels }}
{{ tpl (toYaml .Values.reloader.rbac.labels) . | indent 4 }}
{{- end }}
{{- if .Values.reloader.matchLabels }}
{{ tpl (toYaml .Values.reloader.matchLabels) . | indent 4 }}
{{- end }}
name: {{ template "reloader-fullname" . }}-metadata-role
namespace: {{ .Values.namespace | default .Release.Namespace }}
rules:
- apiGroups:
- ""
resources:
- configmaps
verbs:
- list
- get
- watch
- create
- update
{{- end }}

View File

@@ -11,10 +11,10 @@ metadata:
labels:
{{ include "reloader-labels.chart" . | indent 4 }}
{{- if .Values.reloader.rbac.labels }}
{{ toYaml .Values.reloader.rbac.labels | indent 4 }}
{{ tpl (toYaml .Values.reloader.rbac.labels) . | indent 4 }}
{{- end }}
{{- if .Values.reloader.matchLabels }}
{{ toYaml .Values.reloader.matchLabels | indent 4 }}
{{ tpl (toYaml .Values.reloader.matchLabels) . | indent 4 }}
{{- end }}
name: {{ template "reloader-fullname" . }}-role-binding
namespace: {{ .Values.namespace | default .Release.Namespace }}
@@ -27,3 +27,30 @@ subjects:
name: {{ template "reloader-serviceAccountName" . }}
namespace: {{ .Values.namespace | default .Release.Namespace }}
{{- end }}
---
{{- if .Values.reloader.rbac.enabled }}
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
annotations:
{{ include "reloader-helm3.annotations" . | indent 4 }}
labels:
{{ include "reloader-labels.chart" . | indent 4 }}
{{- if .Values.reloader.rbac.labels }}
{{ tpl (toYaml .Values.reloader.rbac.labels) . | indent 4 }}
{{- end }}
{{- if .Values.reloader.matchLabels }}
{{ tpl (toYaml .Values.reloader.matchLabels) . | indent 4 }}
{{- end }}
name: {{ template "reloader-fullname" . }}-metadata-role-binding
namespace: {{ .Values.namespace | default .Release.Namespace }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: {{ template "reloader-fullname" . }}-metadata-role
subjects:
- kind: ServiceAccount
name: {{ template "reloader-serviceAccountName" . }}
namespace: {{ .Values.namespace | default .Release.Namespace }}
{{- end }}

View File

@@ -5,22 +5,22 @@ metadata:
annotations:
{{ include "reloader-helm3.annotations" . | indent 4 }}
{{- if .Values.reloader.service.annotations }}
{{ toYaml .Values.reloader.service.annotations | indent 4 }}
{{ tpl (toYaml .Values.reloader.service.annotations) . | indent 4 }}
{{- end }}
labels:
{{ include "reloader-labels.chart" . | indent 4 }}
{{- if .Values.reloader.service.labels }}
{{ toYaml .Values.reloader.service.labels | indent 4 }}
{{ tpl (toYaml .Values.reloader.service.labels) . | indent 4 }}
{{- end }}
name: {{ template "reloader-fullname" . }}
namespace: {{ .Values.namespace | default .Release.Namespace }}
spec:
selector:
{{- if .Values.reloader.deployment.labels }}
{{ toYaml .Values.reloader.deployment.labels | indent 4 }}
{{ tpl (toYaml .Values.reloader.deployment.labels) . | indent 4 }}
{{- end }}
{{- if .Values.reloader.matchLabels }}
{{ toYaml .Values.reloader.matchLabels | indent 4 }}
{{ tpl (toYaml .Values.reloader.matchLabels) . | indent 4 }}
{{- end }}
ports:
- port: {{ .Values.reloader.service.port }}

View File

@@ -11,15 +11,15 @@ metadata:
annotations:
{{ include "reloader-helm3.annotations" . | indent 4 }}
{{- if .Values.reloader.serviceAccount.annotations }}
{{ toYaml .Values.reloader.serviceAccount.annotations | indent 4 }}
{{ tpl (toYaml .Values.reloader.serviceAccount.annotations) . | indent 4 }}
{{- end }}
labels:
{{ include "reloader-labels.chart" . | indent 4 }}
{{- if .Values.reloader.serviceAccount.labels }}
{{ toYaml .Values.reloader.serviceAccount.labels | indent 4 }}
{{ tpl (toYaml .Values.reloader.serviceAccount.labels) . | indent 4 }}
{{- end }}
{{- if .Values.reloader.matchLabels }}
{{ toYaml .Values.reloader.matchLabels | indent 4 }}
{{ tpl (toYaml .Values.reloader.matchLabels) . | indent 4 }}
{{- end }}
name: {{ template "reloader-serviceAccountName" . }}
namespace: {{ .Values.namespace | default .Release.Namespace }}

View File

@@ -56,5 +56,5 @@ spec:
- {{ .Release.Namespace }}
selector:
matchLabels:
{{ include "reloader-labels.chart" . | nindent 6 }}
{{ include "reloader-match-labels.chart" . | nindent 6 }}
{{- end }}

View File

@@ -17,7 +17,7 @@ fullnameOverride: ""
image:
name: stakater/reloader
repository: ghcr.io/stakater/reloader
tag: v1.4.1
tag: v1.4.5
# digest: sha256:1234567
pullPolicy: IfNotPresent
@@ -41,6 +41,10 @@ reloader:
watchGlobally: true
# Set to true to enable leadership election allowing you to run multiple replicas
enableHA: false
# Set to true to enable pprof for profiling
enablePProf: false
# Address to start pprof server on. Default is ":6060"
pprofAddr: ":6060"
# Set to true if you have a pod security policy that enforces readOnlyRootFilesystem
readOnlyRootFileSystem: false
legacy:
@@ -99,14 +103,14 @@ reloader:
# whenUnsatisfiable: DoNotSchedule
# labelSelector:
# matchLabels:
# app: my-app
# app.kubernetes.io/instance: my-app
topologySpreadConstraints: []
annotations: {}
labels:
provider: stakater
group: com.stakater.platform
version: v1.4.1
version: v1.4.5
# Support for extra environment variables.
env:
# Open supports Key value pair as environment variables.

View File

@@ -6,3 +6,4 @@ resources:
- manifests/clusterrolebinding.yaml
- manifests/serviceaccount.yaml
- manifests/deployment.yaml
- manifests/role.yaml

View File

@@ -31,6 +31,13 @@ spec:
resourceFieldRef:
resource: limits.memory
divisor: '1'
- name: RELOADER_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: RELOADER_DEPLOYMENT_NAME
value: reloader-reloader
ports:
- name: http
containerPort: 9090

View File

@@ -0,0 +1,32 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: reloader-reloader-metadata-role
namespace: default
rules:
- apiGroups:
- ""
resources:
- configmaps
verbs:
- list
- get
- watch
- create
- update
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: reloader-reloader-metadata-rolebinding
namespace: default
subjects:
- kind: ServiceAccount
name: reloader-reloader
namespace: default
roleRef:
kind: Role
name: reloader-reloader-metadata-role
apiGroup: rbac.authorization.k8s.io

View File

@@ -5,6 +5,23 @@ metadata:
namespace: default
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: reloader-reloader-metadata-role
namespace: default
rules:
- apiGroups:
- ""
resources:
- configmaps
verbs:
- list
- get
- watch
- create
- update
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: reloader-reloader-role
@@ -64,6 +81,20 @@ rules:
- patch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: reloader-reloader-metadata-rolebinding
namespace: default
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: reloader-reloader-metadata-role
subjects:
- kind: ServiceAccount
name: reloader-reloader
namespace: default
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: reloader-reloader-role-binding
@@ -104,7 +135,13 @@ spec:
resourceFieldRef:
divisor: "1"
resource: limits.memory
image: "ghcr.io/stakater/reloader:latest"
- name: RELOADER_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: RELOADER_DEPLOYMENT_NAME
value: reloader-reloader
image: ghcr.io/stakater/reloader:latest
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 5

View File

@@ -8,7 +8,7 @@ In-order to enable this feature, you need to update the `reloader.env.secret` se
```yaml
ALERT_ON_RELOAD: [ true/false ] Default: false
ALERT_SINK: [ slack/teams/webhook ] Default: webhook
ALERT_SINK: [ slack/teams/gchat/webhook ] Default: webhook
ALERT_WEBHOOK_URL: Required if ALERT_ON_RELOAD is true
ALERT_ADDITIONAL_INFO: Any additional information to be added to alert
```

3
go.mod
View File

@@ -1,6 +1,6 @@
module github.com/stakater/Reloader
go 1.24.2
go 1.24.4
require (
github.com/argoproj/argo-rollouts v1.8.2
@@ -38,7 +38,6 @@ require (
github.com/inconshreveable/mousetrap v1.1.0 // indirect
github.com/josharian/intern v1.0.0 // indirect
github.com/json-iterator/go v1.1.12 // indirect
github.com/klauspost/compress v1.18.0 // indirect
github.com/kylelemons/godebug v1.1.0 // indirect
github.com/mailru/easyjson v0.9.0 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect

18
go.sum
View File

@@ -76,12 +76,8 @@ github.com/onsi/ginkgo/v2 v2.21.0 h1:7rg/4f3rB88pb5obDgNZrNHrQ4e6WpjonchcpuBRnZM
github.com/onsi/ginkgo/v2 v2.21.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo=
github.com/onsi/gomega v1.35.1 h1:Cwbd75ZBPxFSuZ6T+rN/WCb/gOc6YgFBXLlZLhC7Ds4=
github.com/onsi/gomega v1.35.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog=
github.com/openshift/api v0.0.0-20250331192611-6179881b782d h1:Vadr+xFmNi6RzWRTJtqMQJv1hiUe7as1rV2svKQffoY=
github.com/openshift/api v0.0.0-20250331192611-6179881b782d/go.mod h1:yk60tHAmHhtVpJQo3TwVYq2zpuP70iJIFDCmeKMIzPw=
github.com/openshift/api v0.0.0-20250411135543-10a8fa583797 h1:8x3G8QOZqo2bRAL8JFlPz/odqQECI/XmlZeRwnFxJ8I=
github.com/openshift/api v0.0.0-20250411135543-10a8fa583797/go.mod h1:yk60tHAmHhtVpJQo3TwVYq2zpuP70iJIFDCmeKMIzPw=
github.com/openshift/client-go v0.0.0-20250330132942-bc2e3c2af6e1 h1:9SaT0p5FsRDvz4STV1VnxMyfXXzAXv1PubZ0nczzDYk=
github.com/openshift/client-go v0.0.0-20250330132942-bc2e3c2af6e1/go.mod h1:6a0Hj32FrkokKMeTck1uStmNV0wHYv46dHWAWER5iis=
github.com/openshift/client-go v0.0.0-20250402181141-b3bad3b645f2 h1:bPXR0R8zp1o12nSUphN26hSM+OKYq5pMorbDCpApzDQ=
github.com/openshift/client-go v0.0.0-20250402181141-b3bad3b645f2/go.mod h1:dT1cJyVTperQ53GvVRa+GZ27r02fDZy2k5j+9QoQsCo=
github.com/parnurzeal/gorequest v0.3.0 h1:SoFyqCDC9COr1xuS6VA8fC8RU7XyrJZN2ona1kEX7FI=
@@ -91,12 +87,8 @@ github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/prometheus/client_golang v1.21.1 h1:DOvXXTqVzvkIewV/CDPFdejpMCGeMcbGCQ8YOmu+Ibk=
github.com/prometheus/client_golang v1.21.1/go.mod h1:U9NM32ykUErtVBxdvD3zfi+EuFkkaBvMb09mIfe0Zgg=
github.com/prometheus/client_golang v1.22.0 h1:rb93p9lokFEsctTys46VnV1kLCDpVZ0a/Y92Vm0Zc6Q=
github.com/prometheus/client_golang v1.22.0/go.mod h1:R7ljNsLXhuQXYZYtw6GAE9AZg8Y7vEW5scdCXrWRXC0=
github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E=
github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY=
github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk=
github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE=
github.com/prometheus/common v0.63.0 h1:YR/EIY1o3mEFP/kZCD7iDMnLPlGyuU2Gb3HIcXnA98k=
@@ -137,12 +129,8 @@ golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.38.0 h1:vRMAPTMaeGqVhG5QyLJHqNDwecKTomGeqbnfZyKlBI8=
golang.org/x/net v0.38.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8=
golang.org/x/net v0.39.0 h1:ZCu7HMWDxpXpaiKdhzIfaltL9Lp31x/3fCP11bc6/fY=
golang.org/x/net v0.39.0/go.mod h1:X7NRbYVEA+ewNkCNyJ513WmMdQ3BineSwVtN2zD/d+E=
golang.org/x/oauth2 v0.28.0 h1:CrgCKl8PPAVtLnU3c+EDw6x11699EWlsDeWNWKdIOkc=
golang.org/x/oauth2 v0.28.0/go.mod h1:onh5ek6nERTohokkhCD/y2cV4Do3fxFHFuAejCkRWT8=
golang.org/x/oauth2 v0.29.0 h1:WdYw2tdTK1S8olAzWHdgeqfy+Mtm9XNhv/xJsY65d98=
golang.org/x/oauth2 v0.29.0/go.mod h1:onh5ek6nERTohokkhCD/y2cV4Do3fxFHFuAejCkRWT8=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@@ -152,18 +140,12 @@ golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5h
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.31.0 h1:ioabZlmFYtWhL+TRYpcnNlLwhyxaM9kWTDEmfnprqik=
golang.org/x/sys v0.31.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
golang.org/x/sys v0.32.0 h1:s77OFDvIQeibCmezSnk/q6iAfkdiQaJi4VzroCFrN20=
golang.org/x/sys v0.32.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
golang.org/x/term v0.30.0 h1:PQ39fJZ+mfadBm0y5WlL4vlM7Sx1Hgf13sMIY2+QS9Y=
golang.org/x/term v0.30.0/go.mod h1:NYYFdzHoI5wRh/h5tDMdMqCqPJZEuNqVR5xJLd/n67g=
golang.org/x/term v0.31.0 h1:erwDkOK1Msy6offm1mOgvspSkslFnIGsFnxOKoufg3o=
golang.org/x/term v0.31.0/go.mod h1:R4BeIy7D95HzImkxGkTW1UQTtP54tio2RyHz7PwK0aw=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.23.0 h1:D71I7dUrlY+VX0gQShAThNGHFxZ13dGLBHQLVl1mJlY=
golang.org/x/text v0.23.0/go.mod h1:/BLNzu4aZCJ1+kcD0DNRotWKage4q2rGVAg4o22unh4=
golang.org/x/text v0.24.0 h1:dd5Bzh4yt5KYA8f9CJHCP4FB4D51c2c6JvN37xJJkJ0=
golang.org/x/text v0.24.0/go.mod h1:L8rBsPeo2pSS+xqN0d5u2ikmjtmoJbDBT1b7nHvFCdU=
golang.org/x/time v0.11.0 h1:/bpjEDfN9tkoN/ryeYHnv5hcMlc8ncjMcM4XBk5NWV0=

View File

@@ -35,6 +35,8 @@ func SendWebhookAlert(msg string) {
sendSlackAlert(webhook_url, webhook_proxy, msg)
} else if alert_sink == "teams" {
sendTeamsAlert(webhook_url, webhook_proxy, msg)
} else if alert_sink == "gchat" {
sendGoogleChatAlert(webhook_url, webhook_proxy, msg)
} else {
msg = strings.Replace(msg, "*", "", -1)
sendRawWebhookAlert(webhook_url, webhook_proxy, msg)
@@ -98,6 +100,29 @@ func sendTeamsAlert(webhookUrl string, proxy string, msg string) []error {
return nil
}
// function to send alert to Google Chat webhook
func sendGoogleChatAlert(webhookUrl string, proxy string, msg string) []error {
payload := map[string]interface{}{
"text": msg,
}
request := gorequest.New().Proxy(proxy)
resp, _, err := request.
Post(webhookUrl).
RedirectPolicy(redirectPolicy).
Send(payload).
End()
if err != nil {
return err
}
if resp.StatusCode != 200 {
return []error{fmt.Errorf("error sending msg. status: %v", resp.Status)}
}
return nil
}
// function to send alert to webhook service as text
func sendRawWebhookAlert(webhookUrl string, proxy string, msg string) []error {
request := gorequest.New().Proxy(proxy)

View File

@@ -2,6 +2,7 @@ package callbacks
import (
"context"
"errors"
"fmt"
"time"
@@ -15,10 +16,14 @@ import (
"k8s.io/apimachinery/pkg/runtime"
patchtypes "k8s.io/apimachinery/pkg/types"
"maps"
argorolloutv1alpha1 "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1"
openshiftv1 "github.com/openshift/api/apps/v1"
)
// ItemFunc is a generic function to return a specific resource in given namespace
type ItemFunc func(kube.Clients, string, string) (runtime.Object, error)
// ItemsFunc is a generic function to return a specific resource array in given namespace
type ItemsFunc func(kube.Clients, string) []runtime.Object
@@ -34,6 +39,12 @@ type VolumesFunc func(runtime.Object) []v1.Volume
// UpdateFunc performs the resource update
type UpdateFunc func(kube.Clients, string, runtime.Object) error
// PatchFunc performs the resource patch
type PatchFunc func(kube.Clients, string, runtime.Object, patchtypes.PatchType, []byte) error
// PatchTemplateFunc is a generic func to return strategic merge JSON patch template
type PatchTemplatesFunc func() PatchTemplates
// AnnotationsFunc is a generic func to return annotations
type AnnotationsFunc func(runtime.Object) map[string]string
@@ -42,14 +53,42 @@ type PodAnnotationsFunc func(runtime.Object) map[string]string
// RollingUpgradeFuncs contains generic functions to perform rolling upgrade
type RollingUpgradeFuncs struct {
ItemsFunc ItemsFunc
AnnotationsFunc AnnotationsFunc
PodAnnotationsFunc PodAnnotationsFunc
ContainersFunc ContainersFunc
InitContainersFunc InitContainersFunc
UpdateFunc UpdateFunc
VolumesFunc VolumesFunc
ResourceType string
ItemFunc ItemFunc
ItemsFunc ItemsFunc
AnnotationsFunc AnnotationsFunc
PodAnnotationsFunc PodAnnotationsFunc
ContainersFunc ContainersFunc
ContainerPatchPathFunc ContainersFunc
InitContainersFunc InitContainersFunc
UpdateFunc UpdateFunc
PatchFunc PatchFunc
PatchTemplatesFunc PatchTemplatesFunc
VolumesFunc VolumesFunc
ResourceType string
SupportsPatch bool
}
// PatchTemplates contains merge JSON patch templates
type PatchTemplates struct {
AnnotationTemplate string
EnvVarTemplate string
DeleteEnvVarTemplate string
}
// GetDeploymentItem returns the deployment in given namespace
func GetDeploymentItem(clients kube.Clients, name string, namespace string) (runtime.Object, error) {
deployment, err := clients.KubernetesClient.AppsV1().Deployments(namespace).Get(context.TODO(), name, meta_v1.GetOptions{})
if err != nil {
logrus.Errorf("Failed to get deployment %v", err)
return nil, err
}
if deployment.Spec.Template.ObjectMeta.Annotations == nil {
annotations := make(map[string]string)
deployment.Spec.Template.ObjectMeta.Annotations = annotations
}
return deployment, nil
}
// GetDeploymentItems returns the deployments in given namespace
@@ -72,6 +111,17 @@ func GetDeploymentItems(clients kube.Clients, namespace string) []runtime.Object
return items
}
// GetCronJobItem returns the job in given namespace
func GetCronJobItem(clients kube.Clients, name string, namespace string) (runtime.Object, error) {
cronjob, err := clients.KubernetesClient.BatchV1().CronJobs(namespace).Get(context.TODO(), name, meta_v1.GetOptions{})
if err != nil {
logrus.Errorf("Failed to get cronjob %v", err)
return nil, err
}
return cronjob, nil
}
// GetCronJobItems returns the jobs in given namespace
func GetCronJobItems(clients kube.Clients, namespace string) []runtime.Object {
cronjobs, err := clients.KubernetesClient.BatchV1().CronJobs(namespace).List(context.TODO(), meta_v1.ListOptions{})
@@ -92,6 +142,17 @@ func GetCronJobItems(clients kube.Clients, namespace string) []runtime.Object {
return items
}
// GetJobItem returns the job in given namespace
func GetJobItem(clients kube.Clients, name string, namespace string) (runtime.Object, error) {
job, err := clients.KubernetesClient.BatchV1().Jobs(namespace).Get(context.TODO(), name, meta_v1.GetOptions{})
if err != nil {
logrus.Errorf("Failed to get job %v", err)
return nil, err
}
return job, nil
}
// GetJobItems returns the jobs in given namespace
func GetJobItems(clients kube.Clients, namespace string) []runtime.Object {
jobs, err := clients.KubernetesClient.BatchV1().Jobs(namespace).List(context.TODO(), meta_v1.ListOptions{})
@@ -112,6 +173,17 @@ func GetJobItems(clients kube.Clients, namespace string) []runtime.Object {
return items
}
// GetDaemonSetItem returns the daemonSet in given namespace
func GetDaemonSetItem(clients kube.Clients, name string, namespace string) (runtime.Object, error) {
daemonSet, err := clients.KubernetesClient.AppsV1().DaemonSets(namespace).Get(context.TODO(), name, meta_v1.GetOptions{})
if err != nil {
logrus.Errorf("Failed to get daemonSet %v", err)
return nil, err
}
return daemonSet, nil
}
// GetDaemonSetItems returns the daemonSets in given namespace
func GetDaemonSetItems(clients kube.Clients, namespace string) []runtime.Object {
daemonSets, err := clients.KubernetesClient.AppsV1().DaemonSets(namespace).List(context.TODO(), meta_v1.ListOptions{})
@@ -131,6 +203,17 @@ func GetDaemonSetItems(clients kube.Clients, namespace string) []runtime.Object
return items
}
// GetStatefulSetItem returns the statefulSet in given namespace
func GetStatefulSetItem(clients kube.Clients, name string, namespace string) (runtime.Object, error) {
statefulSet, err := clients.KubernetesClient.AppsV1().StatefulSets(namespace).Get(context.TODO(), name, meta_v1.GetOptions{})
if err != nil {
logrus.Errorf("Failed to get statefulSet %v", err)
return nil, err
}
return statefulSet, nil
}
// GetStatefulSetItems returns the statefulSets in given namespace
func GetStatefulSetItems(clients kube.Clients, namespace string) []runtime.Object {
statefulSets, err := clients.KubernetesClient.AppsV1().StatefulSets(namespace).List(context.TODO(), meta_v1.ListOptions{})
@@ -150,23 +233,15 @@ func GetStatefulSetItems(clients kube.Clients, namespace string) []runtime.Objec
return items
}
// GetDeploymentConfigItems returns the deploymentConfigs in given namespace
func GetDeploymentConfigItems(clients kube.Clients, namespace string) []runtime.Object {
deploymentConfigs, err := clients.OpenshiftAppsClient.AppsV1().DeploymentConfigs(namespace).List(context.TODO(), meta_v1.ListOptions{})
// GetRolloutItem returns the rollout in given namespace
func GetRolloutItem(clients kube.Clients, name string, namespace string) (runtime.Object, error) {
rollout, err := clients.ArgoRolloutClient.ArgoprojV1alpha1().Rollouts(namespace).Get(context.TODO(), name, meta_v1.GetOptions{})
if err != nil {
logrus.Errorf("Failed to list deploymentConfigs %v", err)
logrus.Errorf("Failed to get Rollout %v", err)
return nil, err
}
items := make([]runtime.Object, len(deploymentConfigs.Items))
// Ensure we always have pod annotations to add to
for i, v := range deploymentConfigs.Items {
if v.Spec.Template.ObjectMeta.Annotations == nil {
deploymentConfigs.Items[i].Spec.Template.ObjectMeta.Annotations = make(map[string]string)
}
items[i] = &deploymentConfigs.Items[i]
}
return items
return rollout, nil
}
// GetRolloutItems returns the rollouts in given namespace
@@ -190,71 +265,97 @@ func GetRolloutItems(clients kube.Clients, namespace string) []runtime.Object {
// GetDeploymentAnnotations returns the annotations of given deployment
func GetDeploymentAnnotations(item runtime.Object) map[string]string {
if item.(*appsv1.Deployment).ObjectMeta.Annotations == nil {
item.(*appsv1.Deployment).ObjectMeta.Annotations = make(map[string]string)
}
return item.(*appsv1.Deployment).ObjectMeta.Annotations
}
// GetCronJobAnnotations returns the annotations of given cronjob
func GetCronJobAnnotations(item runtime.Object) map[string]string {
if item.(*batchv1.CronJob).ObjectMeta.Annotations == nil {
item.(*batchv1.CronJob).ObjectMeta.Annotations = make(map[string]string)
}
return item.(*batchv1.CronJob).ObjectMeta.Annotations
}
// GetJobAnnotations returns the annotations of given job
func GetJobAnnotations(item runtime.Object) map[string]string {
if item.(*batchv1.Job).ObjectMeta.Annotations == nil {
item.(*batchv1.Job).ObjectMeta.Annotations = make(map[string]string)
}
return item.(*batchv1.Job).ObjectMeta.Annotations
}
// GetDaemonSetAnnotations returns the annotations of given daemonSet
func GetDaemonSetAnnotations(item runtime.Object) map[string]string {
if item.(*appsv1.DaemonSet).ObjectMeta.Annotations == nil {
item.(*appsv1.DaemonSet).ObjectMeta.Annotations = make(map[string]string)
}
return item.(*appsv1.DaemonSet).ObjectMeta.Annotations
}
// GetStatefulSetAnnotations returns the annotations of given statefulSet
func GetStatefulSetAnnotations(item runtime.Object) map[string]string {
if item.(*appsv1.StatefulSet).ObjectMeta.Annotations == nil {
item.(*appsv1.StatefulSet).ObjectMeta.Annotations = make(map[string]string)
}
return item.(*appsv1.StatefulSet).ObjectMeta.Annotations
}
// GetDeploymentConfigAnnotations returns the annotations of given deploymentConfig
func GetDeploymentConfigAnnotations(item runtime.Object) map[string]string {
return item.(*openshiftv1.DeploymentConfig).ObjectMeta.Annotations
}
// GetRolloutAnnotations returns the annotations of given rollout
func GetRolloutAnnotations(item runtime.Object) map[string]string {
if item.(*argorolloutv1alpha1.Rollout).ObjectMeta.Annotations == nil {
item.(*argorolloutv1alpha1.Rollout).ObjectMeta.Annotations = make(map[string]string)
}
return item.(*argorolloutv1alpha1.Rollout).ObjectMeta.Annotations
}
// GetDeploymentPodAnnotations returns the pod's annotations of given deployment
func GetDeploymentPodAnnotations(item runtime.Object) map[string]string {
if item.(*appsv1.Deployment).Spec.Template.ObjectMeta.Annotations == nil {
item.(*appsv1.Deployment).Spec.Template.ObjectMeta.Annotations = make(map[string]string)
}
return item.(*appsv1.Deployment).Spec.Template.ObjectMeta.Annotations
}
// GetCronJobPodAnnotations returns the pod's annotations of given cronjob
func GetCronJobPodAnnotations(item runtime.Object) map[string]string {
if item.(*batchv1.CronJob).Spec.JobTemplate.Spec.Template.ObjectMeta.Annotations == nil {
item.(*batchv1.CronJob).Spec.JobTemplate.Spec.Template.ObjectMeta.Annotations = make(map[string]string)
}
return item.(*batchv1.CronJob).Spec.JobTemplate.Spec.Template.ObjectMeta.Annotations
}
// GetJobPodAnnotations returns the pod's annotations of given job
func GetJobPodAnnotations(item runtime.Object) map[string]string {
if item.(*batchv1.Job).Spec.Template.ObjectMeta.Annotations == nil {
item.(*batchv1.Job).Spec.Template.ObjectMeta.Annotations = make(map[string]string)
}
return item.(*batchv1.Job).Spec.Template.ObjectMeta.Annotations
}
// GetDaemonSetPodAnnotations returns the pod's annotations of given daemonSet
func GetDaemonSetPodAnnotations(item runtime.Object) map[string]string {
if item.(*appsv1.DaemonSet).Spec.Template.ObjectMeta.Annotations == nil {
item.(*appsv1.DaemonSet).Spec.Template.ObjectMeta.Annotations = make(map[string]string)
}
return item.(*appsv1.DaemonSet).Spec.Template.ObjectMeta.Annotations
}
// GetStatefulSetPodAnnotations returns the pod's annotations of given statefulSet
func GetStatefulSetPodAnnotations(item runtime.Object) map[string]string {
if item.(*appsv1.StatefulSet).Spec.Template.ObjectMeta.Annotations == nil {
item.(*appsv1.StatefulSet).Spec.Template.ObjectMeta.Annotations = make(map[string]string)
}
return item.(*appsv1.StatefulSet).Spec.Template.ObjectMeta.Annotations
}
// GetDeploymentConfigPodAnnotations returns the pod's annotations of given deploymentConfig
func GetDeploymentConfigPodAnnotations(item runtime.Object) map[string]string {
return item.(*openshiftv1.DeploymentConfig).Spec.Template.ObjectMeta.Annotations
}
// GetRolloutPodAnnotations returns the pod's annotations of given rollout
func GetRolloutPodAnnotations(item runtime.Object) map[string]string {
if item.(*argorolloutv1alpha1.Rollout).Spec.Template.ObjectMeta.Annotations == nil {
item.(*argorolloutv1alpha1.Rollout).Spec.Template.ObjectMeta.Annotations = make(map[string]string)
}
return item.(*argorolloutv1alpha1.Rollout).Spec.Template.ObjectMeta.Annotations
}
@@ -283,11 +384,6 @@ func GetStatefulSetContainers(item runtime.Object) []v1.Container {
return item.(*appsv1.StatefulSet).Spec.Template.Spec.Containers
}
// GetDeploymentConfigContainers returns the containers of given deploymentConfig
func GetDeploymentConfigContainers(item runtime.Object) []v1.Container {
return item.(*openshiftv1.DeploymentConfig).Spec.Template.Spec.Containers
}
// GetRolloutContainers returns the containers of given rollout
func GetRolloutContainers(item runtime.Object) []v1.Container {
return item.(*argorolloutv1alpha1.Rollout).Spec.Template.Spec.Containers
@@ -318,16 +414,20 @@ func GetStatefulSetInitContainers(item runtime.Object) []v1.Container {
return item.(*appsv1.StatefulSet).Spec.Template.Spec.InitContainers
}
// GetDeploymentConfigInitContainers returns the containers of given deploymentConfig
func GetDeploymentConfigInitContainers(item runtime.Object) []v1.Container {
return item.(*openshiftv1.DeploymentConfig).Spec.Template.Spec.InitContainers
}
// GetRolloutInitContainers returns the containers of given rollout
func GetRolloutInitContainers(item runtime.Object) []v1.Container {
return item.(*argorolloutv1alpha1.Rollout).Spec.Template.Spec.InitContainers
}
// GetPatchTemplates returns patch templates
func GetPatchTemplates() PatchTemplates {
return PatchTemplates{
AnnotationTemplate: `{"spec":{"template":{"metadata":{"annotations":{"%s":"%s"}}}}}`, // strategic merge patch
EnvVarTemplate: `{"spec":{"template":{"spec":{"containers":[{"name":"%s","env":[{"name":"%s","value":"%s"}]}]}}}}`, // strategic merge patch
DeleteEnvVarTemplate: `[{"op":"remove","path":"/spec/template/spec/containers/%d/env/%d"}]`, // JSON patch
}
}
// UpdateDeployment performs rolling upgrade on deployment
func UpdateDeployment(clients kube.Clients, namespace string, resource runtime.Object) error {
deployment := resource.(*appsv1.Deployment)
@@ -335,18 +435,39 @@ func UpdateDeployment(clients kube.Clients, namespace string, resource runtime.O
return err
}
// PatchDeployment performs rolling upgrade on deployment
func PatchDeployment(clients kube.Clients, namespace string, resource runtime.Object, patchType patchtypes.PatchType, bytes []byte) error {
deployment := resource.(*appsv1.Deployment)
_, err := clients.KubernetesClient.AppsV1().Deployments(namespace).Patch(context.TODO(), deployment.Name, patchType, bytes, meta_v1.PatchOptions{FieldManager: "Reloader"})
return err
}
// CreateJobFromCronjob performs rolling upgrade on cronjob
func CreateJobFromCronjob(clients kube.Clients, namespace string, resource runtime.Object) error {
cronJob := resource.(*batchv1.CronJob)
annotations := make(map[string]string)
annotations["cronjob.kubernetes.io/instantiate"] = "manual"
maps.Copy(annotations, cronJob.Spec.JobTemplate.Annotations)
job := &batchv1.Job{
ObjectMeta: cronJob.Spec.JobTemplate.ObjectMeta,
Spec: cronJob.Spec.JobTemplate.Spec,
ObjectMeta: meta_v1.ObjectMeta{
GenerateName: cronJob.Name + "-",
Namespace: cronJob.Namespace,
Annotations: annotations,
Labels: cronJob.Spec.JobTemplate.Labels,
OwnerReferences: []meta_v1.OwnerReference{*meta_v1.NewControllerRef(cronJob, batchv1.SchemeGroupVersion.WithKind("CronJob"))},
},
Spec: cronJob.Spec.JobTemplate.Spec,
}
job.GenerateName = cronJob.Name + "-"
_, err := clients.KubernetesClient.BatchV1().Jobs(namespace).Create(context.TODO(), job, meta_v1.CreateOptions{FieldManager: "Reloader"})
return err
}
func PatchCronJob(clients kube.Clients, namespace string, resource runtime.Object, patchType patchtypes.PatchType, bytes []byte) error {
return errors.New("not supported patching: CronJob")
}
// ReCreateJobFromjob performs rolling upgrade on job
func ReCreateJobFromjob(clients kube.Clients, namespace string, resource runtime.Object) error {
oldJob := resource.(*batchv1.Job)
@@ -379,6 +500,10 @@ func ReCreateJobFromjob(clients kube.Clients, namespace string, resource runtime
return err
}
func PatchJob(clients kube.Clients, namespace string, resource runtime.Object, patchType patchtypes.PatchType, bytes []byte) error {
return errors.New("not supported patching: Job")
}
// UpdateDaemonSet performs rolling upgrade on daemonSet
func UpdateDaemonSet(clients kube.Clients, namespace string, resource runtime.Object) error {
daemonSet := resource.(*appsv1.DaemonSet)
@@ -386,6 +511,12 @@ func UpdateDaemonSet(clients kube.Clients, namespace string, resource runtime.Ob
return err
}
func PatchDaemonSet(clients kube.Clients, namespace string, resource runtime.Object, patchType patchtypes.PatchType, bytes []byte) error {
daemonSet := resource.(*appsv1.DaemonSet)
_, err := clients.KubernetesClient.AppsV1().DaemonSets(namespace).Patch(context.TODO(), daemonSet.Name, patchType, bytes, meta_v1.PatchOptions{FieldManager: "Reloader"})
return err
}
// UpdateStatefulSet performs rolling upgrade on statefulSet
func UpdateStatefulSet(clients kube.Clients, namespace string, resource runtime.Object) error {
statefulSet := resource.(*appsv1.StatefulSet)
@@ -393,18 +524,17 @@ func UpdateStatefulSet(clients kube.Clients, namespace string, resource runtime.
return err
}
// UpdateDeploymentConfig performs rolling upgrade on deploymentConfig
func UpdateDeploymentConfig(clients kube.Clients, namespace string, resource runtime.Object) error {
deploymentConfig := resource.(*openshiftv1.DeploymentConfig)
_, err := clients.OpenshiftAppsClient.AppsV1().DeploymentConfigs(namespace).Update(context.TODO(), deploymentConfig, meta_v1.UpdateOptions{FieldManager: "Reloader"})
func PatchStatefulSet(clients kube.Clients, namespace string, resource runtime.Object, patchType patchtypes.PatchType, bytes []byte) error {
statefulSet := resource.(*appsv1.StatefulSet)
_, err := clients.KubernetesClient.AppsV1().StatefulSets(namespace).Patch(context.TODO(), statefulSet.Name, patchType, bytes, meta_v1.PatchOptions{FieldManager: "Reloader"})
return err
}
// UpdateRollout performs rolling upgrade on rollout
func UpdateRollout(clients kube.Clients, namespace string, resource runtime.Object) error {
var err error
rollout := resource.(*argorolloutv1alpha1.Rollout)
strategy := rollout.GetAnnotations()[options.RolloutStrategyAnnotation]
var err error
switch options.ToArgoRolloutStrategy(strategy) {
case options.RestartStrategy:
_, err = clients.ArgoRolloutClient.ArgoprojV1alpha1().Rollouts(namespace).Patch(context.TODO(), rollout.Name, patchtypes.MergePatchType, []byte(fmt.Sprintf(`{"spec": {"restartAt": "%s"}}`, time.Now().Format(time.RFC3339))), meta_v1.PatchOptions{FieldManager: "Reloader"})
@@ -414,6 +544,10 @@ func UpdateRollout(clients kube.Clients, namespace string, resource runtime.Obje
return err
}
func PatchRollout(clients kube.Clients, namespace string, resource runtime.Object, patchType patchtypes.PatchType, bytes []byte) error {
return errors.New("not supported patching: Rollout")
}
// GetDeploymentVolumes returns the Volumes of given deployment
func GetDeploymentVolumes(item runtime.Object) []v1.Volume {
return item.(*appsv1.Deployment).Spec.Template.Spec.Volumes
@@ -439,11 +573,6 @@ func GetStatefulSetVolumes(item runtime.Object) []v1.Volume {
return item.(*appsv1.StatefulSet).Spec.Template.Spec.Volumes
}
// GetDeploymentConfigVolumes returns the Volumes of given deploymentConfig
func GetDeploymentConfigVolumes(item runtime.Object) []v1.Volume {
return item.(*openshiftv1.DeploymentConfig).Spec.Template.Spec.Volumes
}
// GetRolloutVolumes returns the Volumes of given rollout
func GetRolloutVolumes(item runtime.Object) []v1.Volume {
return item.(*argorolloutv1alpha1.Rollout).Spec.Template.Spec.Volumes

View File

@@ -3,6 +3,7 @@ package callbacks_test
import (
"context"
"fmt"
"strings"
"testing"
"time"
@@ -10,7 +11,7 @@ import (
appsv1 "k8s.io/api/apps/v1"
batchv1 "k8s.io/api/batch/v1"
v1 "k8s.io/api/core/v1"
meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
watch "k8s.io/apimachinery/pkg/watch"
@@ -18,6 +19,7 @@ import (
argorolloutv1alpha1 "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1"
fakeargoclientset "github.com/argoproj/argo-rollouts/pkg/client/clientset/versioned/fake"
patchtypes "k8s.io/apimachinery/pkg/types"
"github.com/stakater/Reloader/internal/pkg/callbacks"
"github.com/stakater/Reloader/internal/pkg/options"
@@ -93,7 +95,7 @@ func TestUpdateRollout(t *testing.T) {
t.Errorf("updating rollout: %v", err)
}
rollout, err = clients.ArgoRolloutClient.ArgoprojV1alpha1().Rollouts(
namespace).Get(context.TODO(), rollout.Name, meta_v1.GetOptions{})
namespace).Get(context.TODO(), rollout.Name, metav1.GetOptions{})
if err != nil {
t.Errorf("getting rollout: %v", err)
@@ -111,6 +113,71 @@ func TestUpdateRollout(t *testing.T) {
}
}
func TestPatchRollout(t *testing.T) {
namespace := "test-ns"
rollout := testutil.GetRollout(namespace, "test", map[string]string{options.RolloutStrategyAnnotation: ""})
err := callbacks.PatchRollout(clients, namespace, rollout, patchtypes.StrategicMergePatchType, []byte(`{"spec": {}}`))
assert.EqualError(t, err, "not supported patching: Rollout")
}
func TestResourceItem(t *testing.T) {
fixtures := newTestFixtures()
tests := []struct {
name string
createFunc func(kube.Clients, string, string) (runtime.Object, error)
getItemFunc func(kube.Clients, string, string) (runtime.Object, error)
deleteFunc func(kube.Clients, string, string) error
}{
{
name: "Deployment",
createFunc: createTestDeploymentWithAnnotations,
getItemFunc: callbacks.GetDeploymentItem,
deleteFunc: deleteTestDeployment,
},
{
name: "CronJob",
createFunc: createTestCronJobWithAnnotations,
getItemFunc: callbacks.GetCronJobItem,
deleteFunc: deleteTestCronJob,
},
{
name: "Job",
createFunc: createTestJobWithAnnotations,
getItemFunc: callbacks.GetJobItem,
deleteFunc: deleteTestJob,
},
{
name: "DaemonSet",
createFunc: createTestDaemonSetWithAnnotations,
getItemFunc: callbacks.GetDaemonSetItem,
deleteFunc: deleteTestDaemonSet,
},
{
name: "StatefulSet",
createFunc: createTestStatefulSetWithAnnotations,
getItemFunc: callbacks.GetStatefulSetItem,
deleteFunc: deleteTestStatefulSet,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
resource, err := tt.createFunc(clients, fixtures.namespace, "1")
assert.NoError(t, err)
accessor, err := meta.Accessor(resource)
assert.NoError(t, err)
_, err = tt.getItemFunc(clients, accessor.GetName(), fixtures.namespace)
assert.NoError(t, err)
err = tt.deleteFunc(clients, fixtures.namespace, accessor.GetName())
assert.NoError(t, err)
})
}
}
func TestResourceItems(t *testing.T) {
fixtures := newTestFixtures()
@@ -118,36 +185,42 @@ func TestResourceItems(t *testing.T) {
name string
createFunc func(kube.Clients, string) error
getItemsFunc func(kube.Clients, string) []runtime.Object
deleteFunc func(kube.Clients, string) error
expectedCount int
}{
{
name: "Deployments",
createFunc: createTestDeployments,
getItemsFunc: callbacks.GetDeploymentItems,
deleteFunc: deleteTestDeployments,
expectedCount: 2,
},
{
name: "CronJobs",
createFunc: createTestCronJobs,
getItemsFunc: callbacks.GetCronJobItems,
deleteFunc: deleteTestCronJobs,
expectedCount: 2,
},
{
name: "Jobs",
createFunc: createTestJobs,
getItemsFunc: callbacks.GetJobItems,
deleteFunc: deleteTestJobs,
expectedCount: 2,
},
{
name: "DaemonSets",
createFunc: createTestDaemonSets,
getItemsFunc: callbacks.GetDaemonSetItems,
deleteFunc: deleteTestDaemonSets,
expectedCount: 2,
},
{
name: "StatefulSets",
createFunc: createTestStatefulSets,
getItemsFunc: callbacks.GetStatefulSetItems,
deleteFunc: deleteTestStatefulSets,
expectedCount: 2,
},
}
@@ -262,10 +335,11 @@ func TestUpdateResources(t *testing.T) {
name string
createFunc func(kube.Clients, string, string) (runtime.Object, error)
updateFunc func(kube.Clients, string, runtime.Object) error
deleteFunc func(kube.Clients, string, string) error
}{
{"Deployment", createTestDeploymentWithAnnotations, callbacks.UpdateDeployment},
{"DaemonSet", createTestDaemonSetWithAnnotations, callbacks.UpdateDaemonSet},
{"StatefulSet", createTestStatefulSetWithAnnotations, callbacks.UpdateStatefulSet},
{"Deployment", createTestDeploymentWithAnnotations, callbacks.UpdateDeployment, deleteTestDeployment},
{"DaemonSet", createTestDaemonSetWithAnnotations, callbacks.UpdateDaemonSet, deleteTestDaemonSet},
{"StatefulSet", createTestStatefulSetWithAnnotations, callbacks.UpdateStatefulSet, deleteTestStatefulSet},
}
for _, tt := range tests {
@@ -275,6 +349,65 @@ func TestUpdateResources(t *testing.T) {
err = tt.updateFunc(clients, fixtures.namespace, resource)
assert.NoError(t, err)
accessor, err := meta.Accessor(resource)
assert.NoError(t, err)
err = tt.deleteFunc(clients, fixtures.namespace, accessor.GetName())
assert.NoError(t, err)
})
}
}
func TestPatchResources(t *testing.T) {
fixtures := newTestFixtures()
tests := []struct {
name string
createFunc func(kube.Clients, string, string) (runtime.Object, error)
patchFunc func(kube.Clients, string, runtime.Object, patchtypes.PatchType, []byte) error
deleteFunc func(kube.Clients, string, string) error
assertFunc func(err error)
}{
{"Deployment", createTestDeploymentWithAnnotations, callbacks.PatchDeployment, deleteTestDeployment, func(err error) {
assert.NoError(t, err)
patchedResource, err := callbacks.GetDeploymentItem(clients, "test-deployment", fixtures.namespace)
assert.NoError(t, err)
assert.Equal(t, "test", patchedResource.(*appsv1.Deployment).ObjectMeta.Annotations["test"])
}},
{"DaemonSet", createTestDaemonSetWithAnnotations, callbacks.PatchDaemonSet, deleteTestDaemonSet, func(err error) {
assert.NoError(t, err)
patchedResource, err := callbacks.GetDaemonSetItem(clients, "test-daemonset", fixtures.namespace)
assert.NoError(t, err)
assert.Equal(t, "test", patchedResource.(*appsv1.DaemonSet).ObjectMeta.Annotations["test"])
}},
{"StatefulSet", createTestStatefulSetWithAnnotations, callbacks.PatchStatefulSet, deleteTestStatefulSet, func(err error) {
assert.NoError(t, err)
patchedResource, err := callbacks.GetStatefulSetItem(clients, "test-statefulset", fixtures.namespace)
assert.NoError(t, err)
assert.Equal(t, "test", patchedResource.(*appsv1.StatefulSet).ObjectMeta.Annotations["test"])
}},
{"CronJob", createTestCronJobWithAnnotations, callbacks.PatchCronJob, deleteTestCronJob, func(err error) {
assert.EqualError(t, err, "not supported patching: CronJob")
}},
{"Job", createTestJobWithAnnotations, callbacks.PatchJob, deleteTestJob, func(err error) {
assert.EqualError(t, err, "not supported patching: Job")
}},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
resource, err := tt.createFunc(clients, fixtures.namespace, "1")
assert.NoError(t, err)
err = tt.patchFunc(clients, fixtures.namespace, resource, patchtypes.StrategicMergePatchType, []byte(`{"metadata":{"annotations":{"test":"test"}}}`))
tt.assertFunc(err)
accessor, err := meta.Accessor(resource)
assert.NoError(t, err)
err = tt.deleteFunc(clients, fixtures.namespace, accessor.GetName())
assert.NoError(t, err)
})
}
}
@@ -282,10 +415,26 @@ func TestUpdateResources(t *testing.T) {
func TestCreateJobFromCronjob(t *testing.T) {
fixtures := newTestFixtures()
cronJob, err := createTestCronJobWithAnnotations(clients, fixtures.namespace, "1")
runtimeObj, err := createTestCronJobWithAnnotations(clients, fixtures.namespace, "1")
assert.NoError(t, err)
err = callbacks.CreateJobFromCronjob(clients, fixtures.namespace, cronJob.(*batchv1.CronJob))
cronJob := runtimeObj.(*batchv1.CronJob)
err = callbacks.CreateJobFromCronjob(clients, fixtures.namespace, cronJob)
assert.NoError(t, err)
jobList, err := clients.KubernetesClient.BatchV1().Jobs(fixtures.namespace).List(context.TODO(), metav1.ListOptions{})
assert.NoError(t, err)
ownerFound := false
for _, job := range jobList.Items {
if isControllerOwner("CronJob", cronJob.Name, job.OwnerReferences) {
ownerFound = true
break
}
}
assert.Truef(t, ownerFound, "Missing CronJob owner reference")
err = deleteTestCronJob(clients, fixtures.namespace, cronJob.Name)
assert.NoError(t, err)
}
@@ -297,6 +446,9 @@ func TestReCreateJobFromJob(t *testing.T) {
err = callbacks.ReCreateJobFromjob(clients, fixtures.namespace, job.(*batchv1.Job))
assert.NoError(t, err)
err = deleteTestJob(clients, fixtures.namespace, "test-job")
assert.NoError(t, err)
}
func TestGetVolumes(t *testing.T) {
@@ -321,6 +473,24 @@ func TestGetVolumes(t *testing.T) {
}
}
func TesGetPatchTemplateAnnotation(t *testing.T) {
templates := callbacks.GetPatchTemplates()
assert.NotEmpty(t, templates.AnnotationTemplate)
assert.Equal(t, 2, strings.Count(templates.AnnotationTemplate, "%s"))
}
func TestGetPatchTemplateEnvVar(t *testing.T) {
templates := callbacks.GetPatchTemplates()
assert.NotEmpty(t, templates.EnvVarTemplate)
assert.Equal(t, 3, strings.Count(templates.EnvVarTemplate, "%s"))
}
func TestGetPatchDeleteTemplateEnvVar(t *testing.T) {
templates := callbacks.GetPatchTemplates()
assert.NotEmpty(t, templates.DeleteEnvVarTemplate)
assert.Equal(t, 2, strings.Count(templates.DeleteEnvVarTemplate, "%d"))
}
// Helper functions
func isRestartStrategy(rollout *argorolloutv1alpha1.Rollout) bool {
@@ -330,7 +500,7 @@ func isRestartStrategy(rollout *argorolloutv1alpha1.Rollout) bool {
func watchRollout(name, namespace string) chan interface{} {
timeOut := int64(1)
modifiedChan := make(chan interface{})
watcher, _ := clients.ArgoRolloutClient.ArgoprojV1alpha1().Rollouts(namespace).Watch(context.Background(), meta_v1.ListOptions{TimeoutSeconds: &timeOut})
watcher, _ := clients.ArgoRolloutClient.ArgoprojV1alpha1().Rollouts(namespace).Watch(context.Background(), metav1.ListOptions{TimeoutSeconds: &timeOut})
go watchModified(watcher, name, modifiedChan)
return modifiedChan
}
@@ -358,6 +528,16 @@ func createTestDeployments(clients kube.Clients, namespace string) error {
return nil
}
func deleteTestDeployments(clients kube.Clients, namespace string) error {
for i := 1; i <= 2; i++ {
err := testutil.DeleteDeployment(clients.KubernetesClient, namespace, fmt.Sprintf("test-deployment-%d", i))
if err != nil {
return err
}
}
return nil
}
func createTestCronJobs(clients kube.Clients, namespace string) error {
for i := 1; i <= 2; i++ {
_, err := testutil.CreateCronJob(clients.KubernetesClient, fmt.Sprintf("test-cron-%d", i), namespace, false)
@@ -368,6 +548,16 @@ func createTestCronJobs(clients kube.Clients, namespace string) error {
return nil
}
func deleteTestCronJobs(clients kube.Clients, namespace string) error {
for i := 1; i <= 2; i++ {
err := testutil.DeleteCronJob(clients.KubernetesClient, namespace, fmt.Sprintf("test-cron-%d", i))
if err != nil {
return err
}
}
return nil
}
func createTestJobs(clients kube.Clients, namespace string) error {
for i := 1; i <= 2; i++ {
_, err := testutil.CreateJob(clients.KubernetesClient, fmt.Sprintf("test-job-%d", i), namespace, false)
@@ -378,6 +568,16 @@ func createTestJobs(clients kube.Clients, namespace string) error {
return nil
}
func deleteTestJobs(clients kube.Clients, namespace string) error {
for i := 1; i <= 2; i++ {
err := testutil.DeleteJob(clients.KubernetesClient, namespace, fmt.Sprintf("test-job-%d", i))
if err != nil {
return err
}
}
return nil
}
func createTestDaemonSets(clients kube.Clients, namespace string) error {
for i := 1; i <= 2; i++ {
_, err := testutil.CreateDaemonSet(clients.KubernetesClient, fmt.Sprintf("test-daemonset-%d", i), namespace, false)
@@ -388,6 +588,16 @@ func createTestDaemonSets(clients kube.Clients, namespace string) error {
return nil
}
func deleteTestDaemonSets(clients kube.Clients, namespace string) error {
for i := 1; i <= 2; i++ {
err := testutil.DeleteDaemonSet(clients.KubernetesClient, namespace, fmt.Sprintf("test-daemonset-%d", i))
if err != nil {
return err
}
}
return nil
}
func createTestStatefulSets(clients kube.Clients, namespace string) error {
for i := 1; i <= 2; i++ {
_, err := testutil.CreateStatefulSet(clients.KubernetesClient, fmt.Sprintf("test-statefulset-%d", i), namespace, false)
@@ -398,6 +608,16 @@ func createTestStatefulSets(clients kube.Clients, namespace string) error {
return nil
}
func deleteTestStatefulSets(clients kube.Clients, namespace string) error {
for i := 1; i <= 2; i++ {
err := testutil.DeleteStatefulSet(clients.KubernetesClient, namespace, fmt.Sprintf("test-statefulset-%d", i))
if err != nil {
return err
}
}
return nil
}
func createResourceWithPodAnnotations(obj runtime.Object, annotations map[string]string) runtime.Object {
switch v := obj.(type) {
case *appsv1.Deployment:
@@ -479,6 +699,10 @@ func createTestDeploymentWithAnnotations(clients kube.Clients, namespace, versio
return clients.KubernetesClient.AppsV1().Deployments(namespace).Create(context.TODO(), deployment, metav1.CreateOptions{})
}
func deleteTestDeployment(clients kube.Clients, namespace, name string) error {
return clients.KubernetesClient.AppsV1().Deployments(namespace).Delete(context.TODO(), name, metav1.DeleteOptions{})
}
func createTestDaemonSetWithAnnotations(clients kube.Clients, namespace, version string) (runtime.Object, error) {
daemonSet := &appsv1.DaemonSet{
ObjectMeta: metav1.ObjectMeta{
@@ -490,6 +714,10 @@ func createTestDaemonSetWithAnnotations(clients kube.Clients, namespace, version
return clients.KubernetesClient.AppsV1().DaemonSets(namespace).Create(context.TODO(), daemonSet, metav1.CreateOptions{})
}
func deleteTestDaemonSet(clients kube.Clients, namespace, name string) error {
return clients.KubernetesClient.AppsV1().DaemonSets(namespace).Delete(context.TODO(), name, metav1.DeleteOptions{})
}
func createTestStatefulSetWithAnnotations(clients kube.Clients, namespace, version string) (runtime.Object, error) {
statefulSet := &appsv1.StatefulSet{
ObjectMeta: metav1.ObjectMeta{
@@ -501,6 +729,10 @@ func createTestStatefulSetWithAnnotations(clients kube.Clients, namespace, versi
return clients.KubernetesClient.AppsV1().StatefulSets(namespace).Create(context.TODO(), statefulSet, metav1.CreateOptions{})
}
func deleteTestStatefulSet(clients kube.Clients, namespace, name string) error {
return clients.KubernetesClient.AppsV1().StatefulSets(namespace).Delete(context.TODO(), name, metav1.DeleteOptions{})
}
func createTestCronJobWithAnnotations(clients kube.Clients, namespace, version string) (runtime.Object, error) {
cronJob := &batchv1.CronJob{
ObjectMeta: metav1.ObjectMeta{
@@ -512,6 +744,10 @@ func createTestCronJobWithAnnotations(clients kube.Clients, namespace, version s
return clients.KubernetesClient.BatchV1().CronJobs(namespace).Create(context.TODO(), cronJob, metav1.CreateOptions{})
}
func deleteTestCronJob(clients kube.Clients, namespace, name string) error {
return clients.KubernetesClient.BatchV1().CronJobs(namespace).Delete(context.TODO(), name, metav1.DeleteOptions{})
}
func createTestJobWithAnnotations(clients kube.Clients, namespace, version string) (runtime.Object, error) {
job := &batchv1.Job{
ObjectMeta: metav1.ObjectMeta{
@@ -522,3 +758,16 @@ func createTestJobWithAnnotations(clients kube.Clients, namespace, version strin
}
return clients.KubernetesClient.BatchV1().Jobs(namespace).Create(context.TODO(), job, metav1.CreateOptions{})
}
func deleteTestJob(clients kube.Clients, namespace, name string) error {
return clients.KubernetesClient.BatchV1().Jobs(namespace).Delete(context.TODO(), name, metav1.DeleteOptions{})
}
func isControllerOwner(kind, name string, ownerRefs []metav1.OwnerReference) bool {
for _, ownerRef := range ownerRefs {
if *ownerRef.Controller && ownerRef.Kind == kind && ownerRef.Name == name {
return true
}
}
return false
}

View File

@@ -5,6 +5,7 @@ import (
"errors"
"fmt"
"net/http"
_ "net/http/pprof"
"os"
"strings"
@@ -14,12 +15,12 @@ import (
"github.com/sirupsen/logrus"
"github.com/spf13/cobra"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"github.com/stakater/Reloader/internal/pkg/controller"
"github.com/stakater/Reloader/internal/pkg/metrics"
"github.com/stakater/Reloader/internal/pkg/options"
"github.com/stakater/Reloader/internal/pkg/util"
"github.com/stakater/Reloader/pkg/common"
"github.com/stakater/Reloader/pkg/kube"
)
@@ -33,27 +34,7 @@ func NewReloaderCommand() *cobra.Command {
}
// options
cmd.PersistentFlags().BoolVar(&options.AutoReloadAll, "auto-reload-all", false, "Auto reload all resources")
cmd.PersistentFlags().StringVar(&options.ConfigmapUpdateOnChangeAnnotation, "configmap-annotation", "configmap.reloader.stakater.com/reload", "annotation to detect changes in configmaps, specified by name")
cmd.PersistentFlags().StringVar(&options.SecretUpdateOnChangeAnnotation, "secret-annotation", "secret.reloader.stakater.com/reload", "annotation to detect changes in secrets, specified by name")
cmd.PersistentFlags().StringVar(&options.ReloaderAutoAnnotation, "auto-annotation", "reloader.stakater.com/auto", "annotation to detect changes in secrets/configmaps")
cmd.PersistentFlags().StringVar(&options.ConfigmapReloaderAutoAnnotation, "configmap-auto-annotation", "configmap.reloader.stakater.com/auto", "annotation to detect changes in configmaps")
cmd.PersistentFlags().StringVar(&options.SecretReloaderAutoAnnotation, "secret-auto-annotation", "secret.reloader.stakater.com/auto", "annotation to detect changes in secrets")
cmd.PersistentFlags().StringVar(&options.AutoSearchAnnotation, "auto-search-annotation", "reloader.stakater.com/search", "annotation to detect changes in configmaps or secrets tagged with special match annotation")
cmd.PersistentFlags().StringVar(&options.SearchMatchAnnotation, "search-match-annotation", "reloader.stakater.com/match", "annotation to mark secrets or configmaps to match the search")
cmd.PersistentFlags().StringVar(&options.LogFormat, "log-format", "", "Log format to use (empty string for text, or JSON)")
cmd.PersistentFlags().StringVar(&options.LogLevel, "log-level", "info", "Log level to use (trace, debug, info, warning, error, fatal and panic)")
cmd.PersistentFlags().StringVar(&options.WebhookUrl, "webhook-url", "", "webhook to trigger instead of performing a reload")
cmd.PersistentFlags().StringSlice("resources-to-ignore", []string{}, "list of resources to ignore (valid options 'configMaps' or 'secrets')")
cmd.PersistentFlags().StringSlice("namespaces-to-ignore", []string{}, "list of namespaces to ignore")
cmd.PersistentFlags().StringSlice("namespace-selector", []string{}, "list of key:value labels to filter on for namespaces")
cmd.PersistentFlags().StringSlice("resource-label-selector", []string{}, "list of key:value labels to filter on for configmaps and secrets")
cmd.PersistentFlags().StringVar(&options.IsArgoRollouts, "is-Argo-Rollouts", "false", "Add support for argo rollouts")
cmd.PersistentFlags().StringVar(&options.ReloadStrategy, constants.ReloadStrategyFlag, constants.EnvVarsReloadStrategy, "Specifies the desired reload strategy")
cmd.PersistentFlags().StringVar(&options.ReloadOnCreate, "reload-on-create", "false", "Add support to watch create events")
cmd.PersistentFlags().StringVar(&options.ReloadOnDelete, "reload-on-delete", "false", "Add support to watch delete events")
cmd.PersistentFlags().BoolVar(&options.EnableHA, "enable-ha", false, "Adds support for running multiple replicas via leadership election")
cmd.PersistentFlags().BoolVar(&options.SyncAfterRestart, "sync-after-restart", false, "Sync add events after reloader restarts")
util.ConfigureReloaderFlags(cmd)
return cmd
}
@@ -122,15 +103,18 @@ func getHAEnvs() (string, string) {
}
func startReloader(cmd *cobra.Command, args []string) {
common.GetCommandLineOptions()
err := configureLogging(options.LogFormat, options.LogLevel)
if err != nil {
logrus.Warn(err)
}
logrus.Info("Starting Reloader")
isGlobal := false
currentNamespace := os.Getenv("KUBERNETES_NAMESPACE")
if len(currentNamespace) == 0 {
currentNamespace = v1.NamespaceAll
isGlobal = true
logrus.Warnf("KUBERNETES_NAMESPACE is unset, will detect changes in all namespaces.")
}
@@ -140,22 +124,22 @@ func startReloader(cmd *cobra.Command, args []string) {
logrus.Fatal(err)
}
ignoredResourcesList, err := getIgnoredResourcesList(cmd)
ignoredResourcesList, err := util.GetIgnoredResourcesList()
if err != nil {
logrus.Fatal(err)
}
ignoredNamespacesList, err := getIgnoredNamespacesList(cmd)
if err != nil {
logrus.Fatal(err)
ignoredNamespacesList := options.NamespacesToIgnore
namespaceLabelSelector := ""
if isGlobal {
namespaceLabelSelector, err = util.GetNamespaceLabelSelector()
if err != nil {
logrus.Fatal(err)
}
}
namespaceLabelSelector, err := getNamespaceLabelSelector(cmd)
if err != nil {
logrus.Fatal(err)
}
resourceLabelSelector, err := getResourceLabelSelector(cmd)
resourceLabelSelector, err := util.GetResourceLabelSelector()
if err != nil {
logrus.Fatal(err)
}
@@ -207,107 +191,19 @@ func startReloader(cmd *cobra.Command, args []string) {
go leadership.RunLeaderElection(lock, ctx, cancel, podName, controllers)
}
common.PublishMetaInfoConfigmap(clientset)
if options.EnablePProf {
go startPProfServer()
}
leadership.SetupLivenessEndpoint()
logrus.Fatal(http.ListenAndServe(constants.DefaultHttpListenAddr, nil))
}
func getIgnoredNamespacesList(cmd *cobra.Command) (util.List, error) {
return getStringSliceFromFlags(cmd, "namespaces-to-ignore")
}
func getNamespaceLabelSelector(cmd *cobra.Command) (string, error) {
slice, err := getStringSliceFromFlags(cmd, "namespace-selector")
if err != nil {
logrus.Fatal(err)
}
for i, kv := range slice {
// Legacy support for ":" as a delimiter and "*" for wildcard.
if strings.Contains(kv, ":") {
split := strings.Split(kv, ":")
if split[1] == "*" {
slice[i] = split[0]
} else {
slice[i] = split[0] + "=" + split[1]
}
}
// Convert wildcard to valid apimachinery operator
if strings.Contains(kv, "=") {
split := strings.Split(kv, "=")
if split[1] == "*" {
slice[i] = split[0]
}
}
}
namespaceLabelSelector := strings.Join(slice[:], ",")
_, err = labels.Parse(namespaceLabelSelector)
if err != nil {
logrus.Fatal(err)
}
return namespaceLabelSelector, nil
}
func getResourceLabelSelector(cmd *cobra.Command) (string, error) {
slice, err := getStringSliceFromFlags(cmd, "resource-label-selector")
if err != nil {
logrus.Fatal(err)
}
for i, kv := range slice {
// Legacy support for ":" as a delimiter and "*" for wildcard.
if strings.Contains(kv, ":") {
split := strings.Split(kv, ":")
if split[1] == "*" {
slice[i] = split[0]
} else {
slice[i] = split[0] + "=" + split[1]
}
}
// Convert wildcard to valid apimachinery operator
if strings.Contains(kv, "=") {
split := strings.Split(kv, "=")
if split[1] == "*" {
slice[i] = split[0]
}
}
}
resourceLabelSelector := strings.Join(slice[:], ",")
_, err = labels.Parse(resourceLabelSelector)
if err != nil {
logrus.Fatal(err)
}
return resourceLabelSelector, nil
}
func getStringSliceFromFlags(cmd *cobra.Command, flag string) ([]string, error) {
slice, err := cmd.Flags().GetStringSlice(flag)
if err != nil {
return nil, err
}
return slice, nil
}
func getIgnoredResourcesList(cmd *cobra.Command) (util.List, error) {
ignoredResourcesList, err := getStringSliceFromFlags(cmd, "resources-to-ignore")
if err != nil {
return nil, err
}
for _, v := range ignoredResourcesList {
if v != "configMaps" && v != "secrets" {
return nil, fmt.Errorf("'resources-to-ignore' only accepts 'configMaps' or 'secrets', not '%s'", v)
}
}
if len(ignoredResourcesList) > 1 {
return nil, errors.New("'resources-to-ignore' only accepts 'configMaps' or 'secrets', not both")
}
return ignoredResourcesList, nil
func startPProfServer() {
logrus.Infof("Starting pprof server on %s", options.PProfAddr)
if err := http.ListenAndServe(options.PProfAddr, nil); err != nil {
logrus.Errorf("Failed to start pprof server: %v", err)
}
}

View File

@@ -68,64 +68,6 @@ func TestMain(m *testing.M) {
os.Exit(retCode)
}
// Perform rolling upgrade on deploymentConfig and create pod annotation var upon updating the configmap
func TestControllerUpdatingConfigmapShouldCreatePodAnnotationInDeploymentConfig(t *testing.T) {
options.ReloadStrategy = constants.AnnotationsReloadStrategy
// Don't run test on non-openshift environment
if !kube.IsOpenshift {
return
}
// Creating configmap
configmapName := configmapNamePrefix + "-update-" + testutil.RandSeq(5)
configmapClient, err := testutil.CreateConfigMap(clients.KubernetesClient, namespace, configmapName, "www.google.com")
if err != nil {
t.Errorf("Error while creating the configmap %v", err)
}
// Creating deployment
_, err = testutil.CreateDeploymentConfig(clients.OpenshiftAppsClient, configmapName, namespace, true)
if err != nil {
t.Errorf("Error in deploymentConfig creation: %v", err)
}
// Updating configmap for first time
updateErr := testutil.UpdateConfigMap(configmapClient, namespace, configmapName, "", "www.stakater.com")
if updateErr != nil {
t.Errorf("Configmap was not updated")
}
// Verifying deployment update
logrus.Infof("Verifying pod annotation has been created")
shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapName, "www.stakater.com")
config := util.Config{
Namespace: namespace,
ResourceName: configmapName,
SHAValue: shaData,
Annotation: options.ConfigmapUpdateOnChangeAnnotation,
}
deploymentConfigFuncs := handler.GetDeploymentConfigRollingUpgradeFuncs()
updated := testutil.VerifyResourceAnnotationUpdate(clients, config, deploymentConfigFuncs)
if !updated {
t.Errorf("DeploymentConfig was not updated")
}
time.Sleep(sleepDuration)
// Deleting deployment
err = testutil.DeleteDeploymentConfig(clients.OpenshiftAppsClient, namespace, configmapName)
if err != nil {
logrus.Errorf("Error while deleting the deploymentConfig %v", err)
}
// Deleting configmap
err = testutil.DeleteConfigMap(clients.KubernetesClient, namespace, configmapName)
if err != nil {
logrus.Errorf("Error while deleting the configmap %v", err)
}
time.Sleep(sleepDuration)
}
// Perform rolling upgrade on deployment and create pod annotation var upon updating the configmap
func TestControllerUpdatingConfigmapShouldCreatePodAnnotationInDeployment(t *testing.T) {
options.ReloadStrategy = constants.AnnotationsReloadStrategy
@@ -1078,64 +1020,6 @@ func TestControllerUpdatingSecretShouldCreatePodAnnotationInStatefulSet(t *testi
time.Sleep(sleepDuration)
}
// Perform rolling upgrade on deploymentConfig and create env var upon updating the configmap
func TestControllerUpdatingConfigmapShouldCreateEnvInDeploymentConfig(t *testing.T) {
options.ReloadStrategy = constants.EnvVarsReloadStrategy
// Don't run test on non-openshift environment
if !kube.IsOpenshift {
return
}
// Creating configmap
configmapName := configmapNamePrefix + "-update-" + testutil.RandSeq(5)
configmapClient, err := testutil.CreateConfigMap(clients.KubernetesClient, namespace, configmapName, "www.google.com")
if err != nil {
t.Errorf("Error while creating the configmap %v", err)
}
// Creating deployment
_, err = testutil.CreateDeploymentConfig(clients.OpenshiftAppsClient, configmapName, namespace, true)
if err != nil {
t.Errorf("Error in deploymentConfig creation: %v", err)
}
// Updating configmap for first time
updateErr := testutil.UpdateConfigMap(configmapClient, namespace, configmapName, "", "www.stakater.com")
if updateErr != nil {
t.Errorf("Configmap was not updated")
}
// Verifying deployment update
logrus.Infof("Verifying env var has been created")
shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapName, "www.stakater.com")
config := util.Config{
Namespace: namespace,
ResourceName: configmapName,
SHAValue: shaData,
Annotation: options.ConfigmapUpdateOnChangeAnnotation,
}
deploymentConfigFuncs := handler.GetDeploymentConfigRollingUpgradeFuncs()
updated := testutil.VerifyResourceEnvVarUpdate(clients, config, constants.ConfigmapEnvVarPostfix, deploymentConfigFuncs)
if !updated {
t.Errorf("DeploymentConfig was not updated")
}
time.Sleep(sleepDuration)
// Deleting deployment
err = testutil.DeleteDeploymentConfig(clients.OpenshiftAppsClient, namespace, configmapName)
if err != nil {
logrus.Errorf("Error while deleting the deploymentConfig %v", err)
}
// Deleting configmap
err = testutil.DeleteConfigMap(clients.KubernetesClient, namespace, configmapName)
if err != nil {
logrus.Errorf("Error while deleting the configmap %v", err)
}
time.Sleep(sleepDuration)
}
// Perform rolling upgrade on deployment and create env var upon updating the configmap
func TestControllerUpdatingConfigmapShouldCreateEnvInDeployment(t *testing.T) {
options.ReloadStrategy = constants.EnvVarsReloadStrategy

View File

@@ -1,6 +1,9 @@
package handler
import (
"fmt"
"slices"
"github.com/sirupsen/logrus"
"github.com/stakater/Reloader/internal/pkg/callbacks"
"github.com/stakater/Reloader/internal/pkg/constants"
@@ -8,8 +11,10 @@ import (
"github.com/stakater/Reloader/internal/pkg/options"
"github.com/stakater/Reloader/internal/pkg/testutil"
"github.com/stakater/Reloader/internal/pkg/util"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/runtime"
patchtypes "k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/tools/record"
)
@@ -50,7 +55,7 @@ func (r ResourceDeleteHandler) GetConfig() (util.Config, string) {
return config, oldSHAData
}
func invokeDeleteStrategy(upgradeFuncs callbacks.RollingUpgradeFuncs, item runtime.Object, config util.Config, autoReload bool) constants.Result {
func invokeDeleteStrategy(upgradeFuncs callbacks.RollingUpgradeFuncs, item runtime.Object, config util.Config, autoReload bool) InvokeStrategyResult {
if options.ReloadStrategy == constants.AnnotationsReloadStrategy {
return removePodAnnotations(upgradeFuncs, item, config, autoReload)
}
@@ -58,35 +63,38 @@ func invokeDeleteStrategy(upgradeFuncs callbacks.RollingUpgradeFuncs, item runti
return removeContainerEnvVars(upgradeFuncs, item, config, autoReload)
}
func removePodAnnotations(upgradeFuncs callbacks.RollingUpgradeFuncs, item runtime.Object, config util.Config, autoReload bool) constants.Result {
func removePodAnnotations(upgradeFuncs callbacks.RollingUpgradeFuncs, item runtime.Object, config util.Config, autoReload bool) InvokeStrategyResult {
config.SHAValue = testutil.GetSHAfromEmptyData()
return updatePodAnnotations(upgradeFuncs, item, config, autoReload)
}
func removeContainerEnvVars(upgradeFuncs callbacks.RollingUpgradeFuncs, item runtime.Object, config util.Config, autoReload bool) constants.Result {
func removeContainerEnvVars(upgradeFuncs callbacks.RollingUpgradeFuncs, item runtime.Object, config util.Config, autoReload bool) InvokeStrategyResult {
envVar := getEnvVarName(config.ResourceName, config.Type)
container := getContainerUsingResource(upgradeFuncs, item, config, autoReload)
if container == nil {
return constants.NoContainerFound
return InvokeStrategyResult{constants.NoContainerFound, nil}
}
//remove if env var exists
containers := upgradeFuncs.ContainersFunc(item)
for i := range containers {
envs := containers[i].Env
index := -1
for j := range envs {
if envs[j].Name == envVar {
index = j
break
}
}
if len(container.Env) > 0 {
index := slices.IndexFunc(container.Env, func(envVariable v1.EnvVar) bool {
return envVariable.Name == envVar
})
if index != -1 {
containers[i].Env = append(containers[i].Env[:index], containers[i].Env[index+1:]...)
return constants.Updated
var patch []byte
if upgradeFuncs.SupportsPatch {
containers := upgradeFuncs.ContainersFunc(item)
containerIndex := slices.IndexFunc(containers, func(c v1.Container) bool {
return c.Name == container.Name
})
patch = fmt.Appendf(nil, upgradeFuncs.PatchTemplatesFunc().DeleteEnvVarTemplate, containerIndex, index)
}
container.Env = append(container.Env[:index], container.Env[index+1:]...)
return InvokeStrategyResult{constants.Updated, &Patch{Type: patchtypes.JSONPatchType, Bytes: patch}}
}
}
return constants.NotUpdated
return InvokeStrategyResult{constants.NotUpdated, nil}
}

View File

@@ -0,0 +1,242 @@
package handler
import (
"context"
"encoding/json"
"fmt"
"time"
"github.com/sirupsen/logrus"
"github.com/stakater/Reloader/internal/pkg/options"
"github.com/stakater/Reloader/pkg/kube"
app "k8s.io/api/apps/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
patchtypes "k8s.io/apimachinery/pkg/types"
)
// Keeps track of currently active timers
var activeTimers = make(map[string]*time.Timer)
// Returns unique key for the activeTimers map
func getTimerKey(namespace, deploymentName string) string {
return fmt.Sprintf("%s/%s", namespace, deploymentName)
}
// Checks if a deployment is currently paused
func IsPaused(deployment *app.Deployment) bool {
return deployment.Spec.Paused
}
// Deployment paused by reloader ?
func IsPausedByReloader(deployment *app.Deployment) bool {
if IsPaused(deployment) {
pausedAtAnnotationValue := deployment.Annotations[options.PauseDeploymentTimeAnnotation]
return pausedAtAnnotationValue != ""
}
return false
}
// Returns the time, the deployment was paused by reloader, nil otherwise
func GetPauseStartTime(deployment *app.Deployment) (*time.Time, error) {
if !IsPausedByReloader(deployment) {
return nil, nil
}
pausedAtStr := deployment.Annotations[options.PauseDeploymentTimeAnnotation]
parsedTime, err := time.Parse(time.RFC3339, pausedAtStr)
if err != nil {
return nil, err
}
return &parsedTime, nil
}
// ParsePauseDuration parses the pause interval value and returns a time.Duration
func ParsePauseDuration(pauseIntervalValue string) (time.Duration, error) {
pauseDuration, err := time.ParseDuration(pauseIntervalValue)
if err != nil {
logrus.Warnf("Failed to parse pause interval value '%s': %v", pauseIntervalValue, err)
return 0, err
}
return pauseDuration, nil
}
// Pauses a deployment for a specified duration and creates a timer to resume it
// after the specified duration
func PauseDeployment(deployment *app.Deployment, clients kube.Clients, namespace, pauseIntervalValue string) (*app.Deployment, error) {
deploymentName := deployment.Name
pauseDuration, err := ParsePauseDuration(pauseIntervalValue)
if err != nil {
return nil, err
}
if !IsPaused(deployment) {
logrus.Infof("Pausing Deployment '%s' in namespace '%s' for %s", deploymentName, namespace, pauseDuration)
deploymentFuncs := GetDeploymentRollingUpgradeFuncs()
pausePatch, err := CreatePausePatch()
if err != nil {
logrus.Errorf("Failed to create pause patch for deployment '%s': %v", deploymentName, err)
return deployment, err
}
err = deploymentFuncs.PatchFunc(clients, namespace, deployment, patchtypes.StrategicMergePatchType, pausePatch)
if err != nil {
logrus.Errorf("Failed to patch deployment '%s' in namespace '%s': %v", deploymentName, namespace, err)
return deployment, err
}
updatedDeployment, err := clients.KubernetesClient.AppsV1().Deployments(namespace).Get(context.TODO(), deploymentName, metav1.GetOptions{})
CreateResumeTimer(deployment, clients, namespace, pauseDuration)
return updatedDeployment, err
}
if !IsPausedByReloader(deployment) {
logrus.Infof("Deployment '%s' in namespace '%s' already paused", deploymentName, namespace)
return deployment, nil
}
// Deployment has already been paused by reloader, check for timer
logrus.Debugf("Deployment '%s' in namespace '%s' is already paused by reloader", deploymentName, namespace)
timerKey := getTimerKey(namespace, deploymentName)
_, timerExists := activeTimers[timerKey]
if !timerExists {
logrus.Warnf("Timer does not exist for already paused deployment '%s' in namespace '%s', creating new one",
deploymentName, namespace)
HandleMissingTimer(deployment, pauseDuration, clients, namespace)
}
return deployment, nil
}
// Handles the case where missing timers for deployments that have been paused by reloader.
// Could occur after new leader election or reloader restart
func HandleMissingTimer(deployment *app.Deployment, pauseDuration time.Duration, clients kube.Clients, namespace string) {
deploymentName := deployment.Name
pauseStartTime, err := GetPauseStartTime(deployment)
if err != nil {
logrus.Errorf("Error parsing pause start time for deployment '%s' in namespace '%s': %v. Resuming deployment immediately",
deploymentName, namespace, err)
ResumeDeployment(deployment, namespace, clients)
return
}
if pauseStartTime == nil {
return
}
elapsedPauseTime := time.Since(*pauseStartTime)
remainingPauseTime := pauseDuration - elapsedPauseTime
if remainingPauseTime <= 0 {
logrus.Infof("Pause period for deployment '%s' in namespace '%s' has expired. Resuming immediately",
deploymentName, namespace)
ResumeDeployment(deployment, namespace, clients)
return
}
logrus.Infof("Creating missing timer for already paused deployment '%s' in namespace '%s' with remaining time %s",
deploymentName, namespace, remainingPauseTime)
CreateResumeTimer(deployment, clients, namespace, remainingPauseTime)
}
// CreateResumeTimer creates a timer to resume the deployment after the specified duration
func CreateResumeTimer(deployment *app.Deployment, clients kube.Clients, namespace string, pauseDuration time.Duration) {
deploymentName := deployment.Name
timerKey := getTimerKey(namespace, deployment.Name)
// Check if there's an existing timer for this deployment
if _, exists := activeTimers[timerKey]; exists {
logrus.Debugf("Timer already exists for deployment '%s' in namespace '%s', Skipping creation",
deploymentName, namespace)
return
}
// Create and store the new timer
timer := time.AfterFunc(pauseDuration, func() {
ResumeDeployment(deployment, namespace, clients)
})
// Add the new timer to the map
activeTimers[timerKey] = timer
logrus.Debugf("Created pause timer for deployment '%s' in namespace '%s' with duration %s",
deploymentName, namespace, pauseDuration)
}
// ResumeDeployment resumes a deployment that has been paused by reloader
func ResumeDeployment(deployment *app.Deployment, namespace string, clients kube.Clients) {
deploymentName := deployment.Name
currentDeployment, err := clients.KubernetesClient.AppsV1().Deployments(namespace).Get(context.TODO(), deploymentName, metav1.GetOptions{})
if err != nil {
logrus.Errorf("Failed to get deployment '%s' in namespace '%s': %v", deploymentName, namespace, err)
return
}
if !IsPausedByReloader(currentDeployment) {
logrus.Infof("Deployment '%s' in namespace '%s' not paused by Reloader. Skipping resume", deploymentName, namespace)
return
}
deploymentFuncs := GetDeploymentRollingUpgradeFuncs()
resumePatch, err := CreateResumePatch()
if err != nil {
logrus.Errorf("Failed to create resume patch for deployment '%s': %v", deploymentName, err)
return
}
// Remove the timer
timerKey := getTimerKey(namespace, deploymentName)
if timer, exists := activeTimers[timerKey]; exists {
timer.Stop()
delete(activeTimers, timerKey)
logrus.Debugf("Removed pause timer for deployment '%s' in namespace '%s'", deploymentName, namespace)
}
err = deploymentFuncs.PatchFunc(clients, namespace, currentDeployment, patchtypes.StrategicMergePatchType, resumePatch)
if err != nil {
logrus.Errorf("Failed to resume deployment '%s' in namespace '%s': %v", deploymentName, namespace, err)
return
}
logrus.Infof("Successfully resumed deployment '%s' in namespace '%s'", deploymentName, namespace)
}
func CreatePausePatch() ([]byte, error) {
patchData := map[string]interface{}{
"spec": map[string]interface{}{
"paused": true,
},
"metadata": map[string]interface{}{
"annotations": map[string]string{
options.PauseDeploymentTimeAnnotation: time.Now().Format(time.RFC3339),
},
},
}
return json.Marshal(patchData)
}
func CreateResumePatch() ([]byte, error) {
patchData := map[string]interface{}{
"spec": map[string]interface{}{
"paused": false,
},
"metadata": map[string]interface{}{
"annotations": map[string]interface{}{
options.PauseDeploymentTimeAnnotation: nil,
},
},
}
return json.Marshal(patchData)
}

View File

@@ -0,0 +1,392 @@
package handler
import (
"context"
"fmt"
"testing"
"time"
"github.com/stakater/Reloader/internal/pkg/options"
"github.com/stakater/Reloader/pkg/kube"
"github.com/stretchr/testify/assert"
app "k8s.io/api/apps/v1"
appsv1 "k8s.io/api/apps/v1"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
testclient "k8s.io/client-go/kubernetes/fake"
)
func TestIsPaused(t *testing.T) {
tests := []struct {
name string
deployment *appsv1.Deployment
paused bool
}{
{
name: "paused deployment",
deployment: &appsv1.Deployment{
Spec: appsv1.DeploymentSpec{
Paused: true,
},
},
paused: true,
},
{
name: "unpaused deployment",
deployment: &appsv1.Deployment{
Spec: appsv1.DeploymentSpec{
Paused: false,
},
},
paused: false,
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
result := IsPaused(test.deployment)
assert.Equal(t, test.paused, result)
})
}
}
func TestIsPausedByReloader(t *testing.T) {
tests := []struct {
name string
deployment *appsv1.Deployment
pausedByReloader bool
}{
{
name: "paused by reloader",
deployment: &appsv1.Deployment{
Spec: appsv1.DeploymentSpec{
Paused: true,
},
ObjectMeta: metav1.ObjectMeta{
Annotations: map[string]string{
options.PauseDeploymentTimeAnnotation: time.Now().Format(time.RFC3339),
},
},
},
pausedByReloader: true,
},
{
name: "not paused by reloader",
deployment: &appsv1.Deployment{
Spec: appsv1.DeploymentSpec{
Paused: true,
},
ObjectMeta: metav1.ObjectMeta{
Annotations: map[string]string{},
},
},
pausedByReloader: false,
},
{
name: "not paused",
deployment: &appsv1.Deployment{
Spec: appsv1.DeploymentSpec{
Paused: false,
},
},
pausedByReloader: false,
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
pausedByReloader := IsPausedByReloader(test.deployment)
assert.Equal(t, test.pausedByReloader, pausedByReloader)
})
}
}
func TestGetPauseStartTime(t *testing.T) {
now := time.Now()
nowStr := now.Format(time.RFC3339)
tests := []struct {
name string
deployment *appsv1.Deployment
pausedByReloader bool
expectedStartTime time.Time
}{
{
name: "valid pause time",
deployment: &appsv1.Deployment{
Spec: appsv1.DeploymentSpec{
Paused: true,
},
ObjectMeta: metav1.ObjectMeta{
Annotations: map[string]string{
options.PauseDeploymentTimeAnnotation: nowStr,
},
},
},
pausedByReloader: true,
expectedStartTime: now,
},
{
name: "not paused by reloader",
deployment: &appsv1.Deployment{
Spec: appsv1.DeploymentSpec{
Paused: false,
},
},
pausedByReloader: false,
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
actualStartTime, err := GetPauseStartTime(test.deployment)
assert.NoError(t, err)
if !test.pausedByReloader {
assert.Nil(t, actualStartTime)
} else {
assert.NotNil(t, actualStartTime)
assert.WithinDuration(t, test.expectedStartTime, *actualStartTime, time.Second)
}
})
}
}
func TestParsePauseDuration(t *testing.T) {
tests := []struct {
name string
pauseIntervalValue string
expectedDuration time.Duration
invalidDuration bool
}{
{
name: "valid duration",
pauseIntervalValue: "10s",
expectedDuration: 10 * time.Second,
invalidDuration: false,
},
{
name: "valid minute duration",
pauseIntervalValue: "2m",
expectedDuration: 2 * time.Minute,
invalidDuration: false,
},
{
name: "invalid duration",
pauseIntervalValue: "invalid",
expectedDuration: 0,
invalidDuration: true,
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
actualDuration, err := ParsePauseDuration(test.pauseIntervalValue)
if test.invalidDuration {
assert.Error(t, err)
} else {
assert.NoError(t, err)
assert.Equal(t, test.expectedDuration, actualDuration)
}
})
}
}
func TestHandleMissingTimerSimple(t *testing.T) {
tests := []struct {
name string
deployment *appsv1.Deployment
shouldBePaused bool // Should be unpaused after HandleMissingTimer ?
}{
{
name: "deployment paused by reloader, pause period has expired and no timer",
deployment: &appsv1.Deployment{
ObjectMeta: metav1.ObjectMeta{
Name: "test-deployment-1",
Annotations: map[string]string{
options.PauseDeploymentTimeAnnotation: time.Now().Add(-6 * time.Minute).Format(time.RFC3339),
options.PauseDeploymentAnnotation: "5m",
},
},
Spec: appsv1.DeploymentSpec{
Paused: true,
},
},
shouldBePaused: false,
},
{
name: "deployment paused by reloader, pause period expires in the future and no timer",
deployment: &appsv1.Deployment{
ObjectMeta: metav1.ObjectMeta{
Name: "test-deployment-2",
Annotations: map[string]string{
options.PauseDeploymentTimeAnnotation: time.Now().Add(1 * time.Minute).Format(time.RFC3339),
options.PauseDeploymentAnnotation: "5m",
},
},
Spec: appsv1.DeploymentSpec{
Paused: true,
},
},
shouldBePaused: true,
},
}
for _, test := range tests {
// Clean up any timers at the end of the test
defer func() {
for key, timer := range activeTimers {
timer.Stop()
delete(activeTimers, key)
}
}()
t.Run(test.name, func(t *testing.T) {
fakeClient := testclient.NewSimpleClientset()
clients := kube.Clients{
KubernetesClient: fakeClient,
}
_, err := fakeClient.AppsV1().Deployments("default").Create(
context.TODO(),
test.deployment,
metav1.CreateOptions{})
assert.NoError(t, err, "Expected no error when creating deployment")
pauseDuration, _ := ParsePauseDuration(test.deployment.Annotations[options.PauseDeploymentAnnotation])
HandleMissingTimer(test.deployment, pauseDuration, clients, "default")
updatedDeployment, _ := fakeClient.AppsV1().Deployments("default").Get(context.TODO(), test.deployment.Name, metav1.GetOptions{})
assert.Equal(t, test.shouldBePaused, updatedDeployment.Spec.Paused,
"Deployment should have correct paused state after timer expiration")
if test.shouldBePaused {
pausedAtAnnotationValue := updatedDeployment.Annotations[options.PauseDeploymentTimeAnnotation]
assert.NotEmpty(t, pausedAtAnnotationValue,
"Pause annotation should be present and contain a value when deployment is paused")
}
})
}
}
func TestPauseDeployment(t *testing.T) {
tests := []struct {
name string
deployment *appsv1.Deployment
expectedError bool
expectedPaused bool
expectedAnnotation bool // Should have pause time annotation
pauseInterval string
}{
{
name: "deployment without pause annotation",
deployment: &appsv1.Deployment{
ObjectMeta: metav1.ObjectMeta{
Name: "test-deployment",
Annotations: map[string]string{},
},
Spec: appsv1.DeploymentSpec{
Paused: false,
},
},
expectedError: true,
expectedPaused: false,
expectedAnnotation: false,
pauseInterval: "",
},
{
name: "deployment already paused but not by reloader",
deployment: &appsv1.Deployment{
ObjectMeta: metav1.ObjectMeta{
Name: "test-deployment",
Annotations: map[string]string{
options.PauseDeploymentAnnotation: "5m",
},
},
Spec: appsv1.DeploymentSpec{
Paused: true,
},
},
expectedError: false,
expectedPaused: true,
expectedAnnotation: false,
pauseInterval: "5m",
},
{
name: "deployment unpaused that needs to be paused by reloader",
deployment: &appsv1.Deployment{
ObjectMeta: metav1.ObjectMeta{
Name: "test-deployment-3",
Annotations: map[string]string{
options.PauseDeploymentAnnotation: "5m",
},
},
Spec: appsv1.DeploymentSpec{
Paused: false,
},
},
expectedError: false,
expectedPaused: true,
expectedAnnotation: true,
pauseInterval: "5m",
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
fakeClient := testclient.NewSimpleClientset()
clients := kube.Clients{
KubernetesClient: fakeClient,
}
_, err := fakeClient.AppsV1().Deployments("default").Create(
context.TODO(),
test.deployment,
metav1.CreateOptions{})
assert.NoError(t, err, "Expected no error when creating deployment")
updatedDeployment, err := PauseDeployment(test.deployment, clients, "default", test.pauseInterval)
if test.expectedError {
assert.Error(t, err, "Expected an error pausing the deployment")
return
} else {
assert.NoError(t, err, "Expected no error pausing the deployment")
}
assert.Equal(t, test.expectedPaused, updatedDeployment.Spec.Paused,
"Deployment should have correct paused state after pause")
if test.expectedAnnotation {
pausedAtAnnotationValue := updatedDeployment.Annotations[options.PauseDeploymentTimeAnnotation]
assert.NotEmpty(t, pausedAtAnnotationValue,
"Pause annotation should be present and contain a value when deployment is paused")
} else {
pausedAtAnnotationValue := updatedDeployment.Annotations[options.PauseDeploymentTimeAnnotation]
assert.Empty(t, pausedAtAnnotationValue,
"Pause annotation should not be present when deployment has not been paused by reloader")
}
})
}
}
// Simple helper function for test cases
func FindDeploymentByName(deployments []runtime.Object, deploymentName string) (*app.Deployment, error) {
for _, deployment := range deployments {
accessor, err := meta.Accessor(deployment)
if err != nil {
return nil, fmt.Errorf("error getting accessor for item: %v", err)
}
if accessor.GetName() == deploymentName {
deploymentObj, ok := deployment.(*app.Deployment)
if !ok {
return nil, fmt.Errorf("failed to cast to Deployment")
}
return deploymentObj, nil
}
}
return nil, fmt.Errorf("deployment '%s' not found", deploymentName)
}

View File

@@ -7,9 +7,6 @@ import (
"fmt"
"io"
"os"
"regexp"
"strconv"
"strings"
"github.com/parnurzeal/gorequest"
"github.com/prometheus/client_golang/prometheus"
@@ -20,108 +17,124 @@ import (
"github.com/stakater/Reloader/internal/pkg/metrics"
"github.com/stakater/Reloader/internal/pkg/options"
"github.com/stakater/Reloader/internal/pkg/util"
"github.com/stakater/Reloader/pkg/common"
"github.com/stakater/Reloader/pkg/kube"
app "k8s.io/api/apps/v1"
v1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/meta"
"k8s.io/apimachinery/pkg/runtime"
patchtypes "k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/tools/record"
"k8s.io/client-go/util/retry"
)
// GetDeploymentRollingUpgradeFuncs returns all callback funcs for a deployment
func GetDeploymentRollingUpgradeFuncs() callbacks.RollingUpgradeFuncs {
return callbacks.RollingUpgradeFuncs{
ItemFunc: callbacks.GetDeploymentItem,
ItemsFunc: callbacks.GetDeploymentItems,
AnnotationsFunc: callbacks.GetDeploymentAnnotations,
PodAnnotationsFunc: callbacks.GetDeploymentPodAnnotations,
ContainersFunc: callbacks.GetDeploymentContainers,
InitContainersFunc: callbacks.GetDeploymentInitContainers,
UpdateFunc: callbacks.UpdateDeployment,
PatchFunc: callbacks.PatchDeployment,
PatchTemplatesFunc: callbacks.GetPatchTemplates,
VolumesFunc: callbacks.GetDeploymentVolumes,
ResourceType: "Deployment",
SupportsPatch: true,
}
}
// GetDeploymentRollingUpgradeFuncs returns all callback funcs for a cronjob
func GetCronJobCreateJobFuncs() callbacks.RollingUpgradeFuncs {
return callbacks.RollingUpgradeFuncs{
ItemFunc: callbacks.GetCronJobItem,
ItemsFunc: callbacks.GetCronJobItems,
AnnotationsFunc: callbacks.GetCronJobAnnotations,
PodAnnotationsFunc: callbacks.GetCronJobPodAnnotations,
ContainersFunc: callbacks.GetCronJobContainers,
InitContainersFunc: callbacks.GetCronJobInitContainers,
UpdateFunc: callbacks.CreateJobFromCronjob,
PatchFunc: callbacks.PatchCronJob,
PatchTemplatesFunc: func() callbacks.PatchTemplates { return callbacks.PatchTemplates{} },
VolumesFunc: callbacks.GetCronJobVolumes,
ResourceType: "CronJob",
SupportsPatch: false,
}
}
// GetDeploymentRollingUpgradeFuncs returns all callback funcs for a cronjob
func GetJobCreateJobFuncs() callbacks.RollingUpgradeFuncs {
return callbacks.RollingUpgradeFuncs{
ItemFunc: callbacks.GetJobItem,
ItemsFunc: callbacks.GetJobItems,
AnnotationsFunc: callbacks.GetJobAnnotations,
PodAnnotationsFunc: callbacks.GetJobPodAnnotations,
ContainersFunc: callbacks.GetJobContainers,
InitContainersFunc: callbacks.GetJobInitContainers,
UpdateFunc: callbacks.ReCreateJobFromjob,
PatchFunc: callbacks.PatchJob,
PatchTemplatesFunc: func() callbacks.PatchTemplates { return callbacks.PatchTemplates{} },
VolumesFunc: callbacks.GetJobVolumes,
ResourceType: "Job",
SupportsPatch: false,
}
}
// GetDaemonSetRollingUpgradeFuncs returns all callback funcs for a daemonset
func GetDaemonSetRollingUpgradeFuncs() callbacks.RollingUpgradeFuncs {
return callbacks.RollingUpgradeFuncs{
ItemFunc: callbacks.GetDaemonSetItem,
ItemsFunc: callbacks.GetDaemonSetItems,
AnnotationsFunc: callbacks.GetDaemonSetAnnotations,
PodAnnotationsFunc: callbacks.GetDaemonSetPodAnnotations,
ContainersFunc: callbacks.GetDaemonSetContainers,
InitContainersFunc: callbacks.GetDaemonSetInitContainers,
UpdateFunc: callbacks.UpdateDaemonSet,
PatchFunc: callbacks.PatchDaemonSet,
PatchTemplatesFunc: callbacks.GetPatchTemplates,
VolumesFunc: callbacks.GetDaemonSetVolumes,
ResourceType: "DaemonSet",
SupportsPatch: true,
}
}
// GetStatefulSetRollingUpgradeFuncs returns all callback funcs for a statefulSet
func GetStatefulSetRollingUpgradeFuncs() callbacks.RollingUpgradeFuncs {
return callbacks.RollingUpgradeFuncs{
ItemFunc: callbacks.GetStatefulSetItem,
ItemsFunc: callbacks.GetStatefulSetItems,
AnnotationsFunc: callbacks.GetStatefulSetAnnotations,
PodAnnotationsFunc: callbacks.GetStatefulSetPodAnnotations,
ContainersFunc: callbacks.GetStatefulSetContainers,
InitContainersFunc: callbacks.GetStatefulSetInitContainers,
UpdateFunc: callbacks.UpdateStatefulSet,
PatchFunc: callbacks.PatchStatefulSet,
PatchTemplatesFunc: callbacks.GetPatchTemplates,
VolumesFunc: callbacks.GetStatefulSetVolumes,
ResourceType: "StatefulSet",
}
}
// GetDeploymentConfigRollingUpgradeFuncs returns all callback funcs for a deploymentConfig
func GetDeploymentConfigRollingUpgradeFuncs() callbacks.RollingUpgradeFuncs {
return callbacks.RollingUpgradeFuncs{
ItemsFunc: callbacks.GetDeploymentConfigItems,
AnnotationsFunc: callbacks.GetDeploymentConfigAnnotations,
PodAnnotationsFunc: callbacks.GetDeploymentConfigPodAnnotations,
ContainersFunc: callbacks.GetDeploymentConfigContainers,
InitContainersFunc: callbacks.GetDeploymentConfigInitContainers,
UpdateFunc: callbacks.UpdateDeploymentConfig,
VolumesFunc: callbacks.GetDeploymentConfigVolumes,
ResourceType: "DeploymentConfig",
SupportsPatch: true,
}
}
// GetArgoRolloutRollingUpgradeFuncs returns all callback funcs for a rollout
func GetArgoRolloutRollingUpgradeFuncs() callbacks.RollingUpgradeFuncs {
return callbacks.RollingUpgradeFuncs{
ItemFunc: callbacks.GetRolloutItem,
ItemsFunc: callbacks.GetRolloutItems,
AnnotationsFunc: callbacks.GetRolloutAnnotations,
PodAnnotationsFunc: callbacks.GetRolloutPodAnnotations,
ContainersFunc: callbacks.GetRolloutContainers,
InitContainersFunc: callbacks.GetRolloutInitContainers,
UpdateFunc: callbacks.UpdateRollout,
PatchFunc: callbacks.PatchRollout,
PatchTemplatesFunc: func() callbacks.PatchTemplates { return callbacks.PatchTemplates{} },
VolumesFunc: callbacks.GetRolloutVolumes,
ResourceType: "Rollout",
SupportsPatch: false,
}
}
@@ -180,13 +193,6 @@ func doRollingUpgrade(config util.Config, collectors metrics.Collectors, recorde
return err
}
if kube.IsOpenshift {
err = rollingUpgrade(clients, config, GetDeploymentConfigRollingUpgradeFuncs(), collectors, recorder, invoke)
if err != nil {
return err
}
}
if options.IsArgoRollouts == "true" {
err = rollingUpgrade(clients, config, GetArgoRolloutRollingUpgradeFuncs(), collectors, recorder, invoke)
if err != nil {
@@ -198,7 +204,6 @@ func doRollingUpgrade(config util.Config, collectors metrics.Collectors, recorde
}
func rollingUpgrade(clients kube.Clients, config util.Config, upgradeFuncs callbacks.RollingUpgradeFuncs, collectors metrics.Collectors, recorder record.EventRecorder, strategy invokeStrategy) error {
err := PerformAction(clients, config, upgradeFuncs, collectors, recorder, strategy)
if err != nil {
logrus.Errorf("Rolling upgrade for '%s' failed with error = %v", config.ResourceName, err)
@@ -210,123 +215,121 @@ func rollingUpgrade(clients kube.Clients, config util.Config, upgradeFuncs callb
func PerformAction(clients kube.Clients, config util.Config, upgradeFuncs callbacks.RollingUpgradeFuncs, collectors metrics.Collectors, recorder record.EventRecorder, strategy invokeStrategy) error {
items := upgradeFuncs.ItemsFunc(clients, config.Namespace)
for _, i := range items {
// find correct annotation and update the resource
annotations := upgradeFuncs.AnnotationsFunc(i)
annotationValue, found := annotations[config.Annotation]
searchAnnotationValue, foundSearchAnn := annotations[options.AutoSearchAnnotation]
reloaderEnabledValue, foundAuto := annotations[options.ReloaderAutoAnnotation]
typedAutoAnnotationEnabledValue, foundTypedAuto := annotations[config.TypedAutoAnnotation]
excludeConfigmapAnnotationValue, foundExcludeConfigmap := annotations[options.ConfigmapExcludeReloaderAnnotation]
excludeSecretAnnotationValue, foundExcludeSecret := annotations[options.SecretExcludeReloaderAnnotation]
if !found && !foundAuto && !foundTypedAuto && !foundSearchAnn {
annotations = upgradeFuncs.PodAnnotationsFunc(i)
annotationValue = annotations[config.Annotation]
searchAnnotationValue = annotations[options.AutoSearchAnnotation]
reloaderEnabledValue = annotations[options.ReloaderAutoAnnotation]
typedAutoAnnotationEnabledValue = annotations[config.TypedAutoAnnotation]
}
isResourceExcluded := false
switch config.Type {
case constants.ConfigmapEnvVarPostfix:
if foundExcludeConfigmap {
isResourceExcluded = checkIfResourceIsExcluded(config.ResourceName, excludeConfigmapAnnotationValue)
}
case constants.SecretEnvVarPostfix:
if foundExcludeSecret {
isResourceExcluded = checkIfResourceIsExcluded(config.ResourceName, excludeSecretAnnotationValue)
}
}
if isResourceExcluded {
continue
}
result := constants.NotUpdated
reloaderEnabled, _ := strconv.ParseBool(reloaderEnabledValue)
typedAutoAnnotationEnabled, _ := strconv.ParseBool(typedAutoAnnotationEnabledValue)
if reloaderEnabled || typedAutoAnnotationEnabled || reloaderEnabledValue == "" && typedAutoAnnotationEnabledValue == "" && options.AutoReloadAll {
result = strategy(upgradeFuncs, i, config, true)
}
if result != constants.Updated && annotationValue != "" {
values := strings.Split(annotationValue, ",")
for _, value := range values {
value = strings.TrimSpace(value)
re := regexp.MustCompile("^" + value + "$")
if re.Match([]byte(config.ResourceName)) {
result = strategy(upgradeFuncs, i, config, false)
if result == constants.Updated {
break
}
}
}
}
if result != constants.Updated && searchAnnotationValue == "true" {
matchAnnotationValue := config.ResourceAnnotations[options.SearchMatchAnnotation]
if matchAnnotationValue == "true" {
result = strategy(upgradeFuncs, i, config, true)
}
}
if result == constants.Updated {
accessor, err := meta.Accessor(i)
if err != nil {
return err
}
resourceName := accessor.GetName()
err = upgradeFuncs.UpdateFunc(clients, config.Namespace, i)
if err != nil {
message := fmt.Sprintf("Update for '%s' of type '%s' in namespace '%s' failed with error %v", resourceName, upgradeFuncs.ResourceType, config.Namespace, err)
logrus.Errorf("Update for '%s' of type '%s' in namespace '%s' failed with error %v", resourceName, upgradeFuncs.ResourceType, config.Namespace, err)
collectors.Reloaded.With(prometheus.Labels{"success": "false"}).Inc()
collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "false", "namespace": config.Namespace}).Inc()
if recorder != nil {
recorder.Event(i, v1.EventTypeWarning, "ReloadFail", message)
}
return err
} else {
message := fmt.Sprintf("Changes detected in '%s' of type '%s' in namespace '%s'", config.ResourceName, config.Type, config.Namespace)
message += fmt.Sprintf(", Updated '%s' of type '%s' in namespace '%s'", resourceName, upgradeFuncs.ResourceType, config.Namespace)
logrus.Infof("Changes detected in '%s' of type '%s' in namespace '%s'; updated '%s' of type '%s' in namespace '%s'", config.ResourceName, config.Type, config.Namespace, resourceName, upgradeFuncs.ResourceType, config.Namespace)
collectors.Reloaded.With(prometheus.Labels{"success": "true"}).Inc()
collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": config.Namespace}).Inc()
alert_on_reload, ok := os.LookupEnv("ALERT_ON_RELOAD")
if recorder != nil {
recorder.Event(i, v1.EventTypeNormal, "Reloaded", message)
}
if ok && alert_on_reload == "true" {
msg := fmt.Sprintf(
"Reloader detected changes in *%s* of type *%s* in namespace *%s*. Hence reloaded *%s* of type *%s* in namespace *%s*",
config.ResourceName, config.Type, config.Namespace, resourceName, upgradeFuncs.ResourceType, config.Namespace)
alert.SendWebhookAlert(msg)
}
}
for _, item := range items {
err := retryOnConflict(retry.DefaultRetry, func(fetchResource bool) error {
return upgradeResource(clients, config, upgradeFuncs, collectors, recorder, strategy, item, fetchResource)
})
if err != nil {
return err
}
}
return nil
}
func checkIfResourceIsExcluded(resourceName, excludedResources string) bool {
if excludedResources == "" {
return false
func retryOnConflict(backoff wait.Backoff, fn func(_ bool) error) error {
var lastError error
fetchResource := false // do not fetch resource on first attempt, already done by ItemsFunc
err := wait.ExponentialBackoff(backoff, func() (bool, error) {
err := fn(fetchResource)
fetchResource = true
switch {
case err == nil:
return true, nil
case apierrors.IsConflict(err):
lastError = err
return false, nil
default:
return false, err
}
})
if wait.Interrupted(err) {
err = lastError
}
return err
}
func upgradeResource(clients kube.Clients, config util.Config, upgradeFuncs callbacks.RollingUpgradeFuncs, collectors metrics.Collectors, recorder record.EventRecorder, strategy invokeStrategy, resource runtime.Object, fetchResource bool) error {
accessor, err := meta.Accessor(resource)
if err != nil {
return err
}
excludedResourcesList := strings.Split(excludedResources, ",")
for _, excludedResource := range excludedResourcesList {
if strings.TrimSpace(excludedResource) == resourceName {
return true
resourceName := accessor.GetName()
if fetchResource {
resource, err = upgradeFuncs.ItemFunc(clients, resourceName, config.Namespace)
if err != nil {
return err
}
}
annotations := upgradeFuncs.AnnotationsFunc(resource)
podAnnotations := upgradeFuncs.PodAnnotationsFunc(resource)
result := common.ShouldReload(config, upgradeFuncs.ResourceType, annotations, podAnnotations, common.GetCommandLineOptions())
if !result.ShouldReload {
logrus.Debugf("No changes detected in '%s' of type '%s' in namespace '%s'", config.ResourceName, config.Type, config.Namespace)
return nil
}
strategyResult := strategy(upgradeFuncs, resource, config, result.AutoReload)
if strategyResult.Result != constants.Updated {
return nil
}
// find correct annotation and update the resource
pauseInterval, foundPauseInterval := annotations[options.PauseDeploymentAnnotation]
if foundPauseInterval {
deployment, ok := resource.(*app.Deployment)
if !ok {
logrus.Warnf("Annotation '%s' only applicable for deployments", options.PauseDeploymentAnnotation)
} else {
_, err = PauseDeployment(deployment, clients, config.Namespace, pauseInterval)
if err != nil {
logrus.Errorf("Failed to pause deployment '%s' in namespace '%s': %v", resourceName, config.Namespace, err)
return err
}
}
}
return false
if upgradeFuncs.SupportsPatch && strategyResult.Patch != nil {
err = upgradeFuncs.PatchFunc(clients, config.Namespace, resource, strategyResult.Patch.Type, strategyResult.Patch.Bytes)
} else {
err = upgradeFuncs.UpdateFunc(clients, config.Namespace, resource)
}
if err != nil {
message := fmt.Sprintf("Update for '%s' of type '%s' in namespace '%s' failed with error %v", resourceName, upgradeFuncs.ResourceType, config.Namespace, err)
logrus.Errorf("Update for '%s' of type '%s' in namespace '%s' failed with error %v", resourceName, upgradeFuncs.ResourceType, config.Namespace, err)
collectors.Reloaded.With(prometheus.Labels{"success": "false"}).Inc()
collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "false", "namespace": config.Namespace}).Inc()
if recorder != nil {
recorder.Event(resource, v1.EventTypeWarning, "ReloadFail", message)
}
return err
} else {
message := fmt.Sprintf("Changes detected in '%s' of type '%s' in namespace '%s'", config.ResourceName, config.Type, config.Namespace)
message += fmt.Sprintf(", Updated '%s' of type '%s' in namespace '%s'", resourceName, upgradeFuncs.ResourceType, config.Namespace)
logrus.Infof("Changes detected in '%s' of type '%s' in namespace '%s'; updated '%s' of type '%s' in namespace '%s'", config.ResourceName, config.Type, config.Namespace, resourceName, upgradeFuncs.ResourceType, config.Namespace)
collectors.Reloaded.With(prometheus.Labels{"success": "true"}).Inc()
collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": config.Namespace}).Inc()
alert_on_reload, ok := os.LookupEnv("ALERT_ON_RELOAD")
if recorder != nil {
recorder.Event(resource, v1.EventTypeNormal, "Reloaded", message)
}
if ok && alert_on_reload == "true" {
msg := fmt.Sprintf(
"Reloader detected changes in *%s* of type *%s* in namespace *%s*. Hence reloaded *%s* of type *%s* in namespace *%s*",
config.ResourceName, config.Type, config.Namespace, resourceName, upgradeFuncs.ResourceType, config.Namespace)
alert.SendWebhookAlert(msg)
}
}
return nil
}
func getVolumeMountName(volumes []v1.Volume, mountType string, volumeName string) string {
@@ -439,42 +442,51 @@ func getContainerUsingResource(upgradeFuncs callbacks.RollingUpgradeFuncs, item
return container
}
type invokeStrategy func(upgradeFuncs callbacks.RollingUpgradeFuncs, item runtime.Object, config util.Config, autoReload bool) constants.Result
type Patch struct {
Type patchtypes.PatchType
Bytes []byte
}
func invokeReloadStrategy(upgradeFuncs callbacks.RollingUpgradeFuncs, item runtime.Object, config util.Config, autoReload bool) constants.Result {
type InvokeStrategyResult struct {
Result constants.Result
Patch *Patch
}
type invokeStrategy func(upgradeFuncs callbacks.RollingUpgradeFuncs, item runtime.Object, config util.Config, autoReload bool) InvokeStrategyResult
func invokeReloadStrategy(upgradeFuncs callbacks.RollingUpgradeFuncs, item runtime.Object, config util.Config, autoReload bool) InvokeStrategyResult {
if options.ReloadStrategy == constants.AnnotationsReloadStrategy {
return updatePodAnnotations(upgradeFuncs, item, config, autoReload)
}
return updateContainerEnvVars(upgradeFuncs, item, config, autoReload)
}
func updatePodAnnotations(upgradeFuncs callbacks.RollingUpgradeFuncs, item runtime.Object, config util.Config, autoReload bool) constants.Result {
func updatePodAnnotations(upgradeFuncs callbacks.RollingUpgradeFuncs, item runtime.Object, config util.Config, autoReload bool) InvokeStrategyResult {
container := getContainerUsingResource(upgradeFuncs, item, config, autoReload)
if container == nil {
return constants.NoContainerFound
return InvokeStrategyResult{constants.NoContainerFound, nil}
}
// Generate reloaded annotations. Attaching this to the item's annotation will trigger a rollout
// Note: the data on this struct is purely informational and is not used for future updates
reloadSource := util.NewReloadSourceFromConfig(config, []string{container.Name})
annotations, err := createReloadedAnnotations(&reloadSource)
annotations, patch, err := createReloadedAnnotations(&reloadSource, upgradeFuncs)
if err != nil {
logrus.Errorf("Failed to create reloaded annotations for %s! error = %v", config.ResourceName, err)
return constants.NotUpdated
return InvokeStrategyResult{constants.NotUpdated, nil}
}
// Copy the all annotations to the item's annotations
pa := upgradeFuncs.PodAnnotationsFunc(item)
if pa == nil {
return constants.NotUpdated
return InvokeStrategyResult{constants.NotUpdated, nil}
}
for k, v := range annotations {
pa[k] = v
}
return constants.Updated
return InvokeStrategyResult{constants.Updated, &Patch{Type: patchtypes.StrategicMergePatchType, Bytes: patch}}
}
func getReloaderAnnotationKey() string {
@@ -484,9 +496,9 @@ func getReloaderAnnotationKey() string {
)
}
func createReloadedAnnotations(target *util.ReloadSource) (map[string]string, error) {
func createReloadedAnnotations(target *util.ReloadSource, upgradeFuncs callbacks.RollingUpgradeFuncs) (map[string]string, []byte, error) {
if target == nil {
return nil, errors.New("target is required")
return nil, nil, errors.New("target is required")
}
// Create a single "last-invokeReloadStrategy-from" annotation that stores metadata about the
@@ -498,53 +510,76 @@ func createReloadedAnnotations(target *util.ReloadSource) (map[string]string, er
lastReloadedResource, err := json.Marshal(target)
if err != nil {
return nil, err
return nil, nil, err
}
annotations[lastReloadedResourceName] = string(lastReloadedResource)
return annotations, nil
var patch []byte
if upgradeFuncs.SupportsPatch {
escapedValue, err := jsonEscape(annotations[lastReloadedResourceName])
if err != nil {
return nil, nil, err
}
patch = fmt.Appendf(nil, upgradeFuncs.PatchTemplatesFunc().AnnotationTemplate, lastReloadedResourceName, escapedValue)
}
return annotations, patch, nil
}
func getEnvVarName(resourceName string, typeName string) string {
return constants.EnvVarPrefix + util.ConvertToEnvVarName(resourceName) + "_" + typeName
}
func updateContainerEnvVars(upgradeFuncs callbacks.RollingUpgradeFuncs, item runtime.Object, config util.Config, autoReload bool) constants.Result {
var result constants.Result
func updateContainerEnvVars(upgradeFuncs callbacks.RollingUpgradeFuncs, item runtime.Object, config util.Config, autoReload bool) InvokeStrategyResult {
envVar := getEnvVarName(config.ResourceName, config.Type)
container := getContainerUsingResource(upgradeFuncs, item, config, autoReload)
if container == nil {
return constants.NoContainerFound
return InvokeStrategyResult{constants.NoContainerFound, nil}
}
//update if env var exists
result = updateEnvVar(upgradeFuncs.ContainersFunc(item), envVar, config.SHAValue)
updateResult := updateEnvVar(container, envVar, config.SHAValue)
// if no existing env var exists lets create one
if result == constants.NoEnvVarFound {
if updateResult == constants.NoEnvVarFound {
e := v1.EnvVar{
Name: envVar,
Value: config.SHAValue,
}
container.Env = append(container.Env, e)
result = constants.Updated
updateResult = constants.Updated
}
return result
var patch []byte
if upgradeFuncs.SupportsPatch {
patch = fmt.Appendf(nil, upgradeFuncs.PatchTemplatesFunc().EnvVarTemplate, container.Name, envVar, config.SHAValue)
}
return InvokeStrategyResult{updateResult, &Patch{Type: patchtypes.StrategicMergePatchType, Bytes: patch}}
}
func updateEnvVar(containers []v1.Container, envVar string, shaData string) constants.Result {
for i := range containers {
envs := containers[i].Env
for j := range envs {
if envs[j].Name == envVar {
if envs[j].Value != shaData {
envs[j].Value = shaData
return constants.Updated
}
return constants.NotUpdated
func updateEnvVar(container *v1.Container, envVar string, shaData string) constants.Result {
envs := container.Env
for j := range envs {
if envs[j].Name == envVar {
if envs[j].Value != shaData {
envs[j].Value = shaData
return constants.Updated
}
return constants.NotUpdated
}
}
return constants.NoEnvVarFound
}
func jsonEscape(toEscape string) (string, error) {
bytes, err := json.Marshal(toEscape)
if err != nil {
return "", err
}
escaped := string(bytes)
return escaped[1 : len(escaped)-1], nil
}

View File

@@ -17,9 +17,12 @@ import (
"github.com/stakater/Reloader/internal/pkg/testutil"
"github.com/stakater/Reloader/internal/pkg/util"
"github.com/stakater/Reloader/pkg/kube"
"github.com/stretchr/testify/assert"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/meta"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
patchtypes "k8s.io/apimachinery/pkg/types"
testclient "k8s.io/client-go/kubernetes/fake"
)
@@ -49,6 +52,9 @@ var (
arsConfigmapWithConfigMapAutoAnnotation = "testconfigmapwithconfigmapautoannotationdeployment-handler-" + testutil.RandSeq(5)
arsSecretWithExcludeSecretAnnotation = "testsecretwithsecretexcludeannotationdeployment-handler-" + testutil.RandSeq(5)
arsConfigmapWithExcludeConfigMapAnnotation = "testconfigmapwithconfigmapexcludeannotationdeployment-handler-" + testutil.RandSeq(5)
arsConfigmapWithIgnoreAnnotation = "testconfigmapWithIgnoreAnnotation-handler-" + testutil.RandSeq(5)
arsSecretWithIgnoreAnnotation = "testsecretWithIgnoreAnnotation-handler-" + testutil.RandSeq(5)
arsConfigmapWithPausedDeployment = "testconfigmapWithPausedDeployment-handler-" + testutil.RandSeq(5)
ersNamespace = "test-handler-" + testutil.RandSeq(5)
ersConfigmapName = "testconfigmap-handler-" + testutil.RandSeq(5)
@@ -72,6 +78,9 @@ var (
ersConfigmapWithConfigMapAutoAnnotation = "testconfigmapwithconfigmapautoannotationdeployment-handler-" + testutil.RandSeq(5)
ersSecretWithSecretExcludeAnnotation = "testsecretwithsecretexcludeannotationdeployment-handler-" + testutil.RandSeq(5)
ersConfigmapWithConfigMapExcludeAnnotation = "testconfigmapwithconfigmapexcludeannotationdeployment-handler-" + testutil.RandSeq(5)
ersConfigmapWithIgnoreAnnotation = "testconfigmapWithIgnoreAnnotation-handler-" + testutil.RandSeq(5)
ersSecretWithIgnoreAnnotation = "testsecretWithIgnoreAnnotation-handler-" + testutil.RandSeq(5)
ersConfigmapWithPausedDeployment = "testconfigmapWithPausedDeployment-handler-" + testutil.RandSeq(5)
)
func TestMain(m *testing.M) {
@@ -200,6 +209,12 @@ func setupArs() {
logrus.Errorf("Error in configmap creation: %v", err)
}
// Creating configmap for testing pausing deployments
_, err = testutil.CreateConfigMap(clients.KubernetesClient, arsNamespace, arsConfigmapWithPausedDeployment, "www.google.com")
if err != nil {
logrus.Errorf("Error in configmap creation: %v", err)
}
// Creating secret used with secret auto annotation
_, err = testutil.CreateSecret(clients.KubernetesClient, arsNamespace, arsSecretWithExcludeSecretAnnotation, data)
if err != nil {
@@ -212,6 +227,35 @@ func setupArs() {
logrus.Errorf("Error in configmap creation: %v", err)
}
// Creating configmap with ignore annotation
_, err = testutil.CreateConfigMap(clients.KubernetesClient, arsNamespace, arsConfigmapWithIgnoreAnnotation, "www.google.com")
if err != nil {
logrus.Errorf("Error in configmap creation: %v", err)
}
// Patch with ignore annotation
cmClient := clients.KubernetesClient.CoreV1().ConfigMaps(arsNamespace)
patch := []byte(`{"metadata":{"annotations":{"reloader.stakater.com/ignore":"true"}}}`)
_, _ = cmClient.Patch(context.TODO(), arsConfigmapWithIgnoreAnnotation, patchtypes.MergePatchType, patch, metav1.PatchOptions{})
// Creating secret with ignore annotation
_, err = testutil.CreateSecret(clients.KubernetesClient, arsNamespace, arsSecretWithIgnoreAnnotation, data)
if err != nil {
logrus.Errorf("Error in secret creation: %v", err)
}
secretClient := clients.KubernetesClient.CoreV1().Secrets(arsNamespace)
_, _ = secretClient.Patch(context.TODO(), arsSecretWithIgnoreAnnotation, patchtypes.MergePatchType, patch, metav1.PatchOptions{})
// Creating Deployment referencing configmap with ignore annotation
_, err = testutil.CreateDeployment(clients.KubernetesClient, arsConfigmapWithIgnoreAnnotation, arsNamespace, true)
if err != nil {
logrus.Errorf("Error in Deployment with configmap ignore annotation creation: %v", err)
}
// Creating Deployment referencing secret with ignore annotation
_, err = testutil.CreateDeployment(clients.KubernetesClient, arsSecretWithIgnoreAnnotation, arsNamespace, true)
if err != nil {
logrus.Errorf("Error in Deployment with secret ignore annotation creation: %v", err)
}
// Creating Deployment with configmap
_, err = testutil.CreateDeployment(clients.KubernetesClient, arsConfigmapName, arsNamespace, true)
if err != nil {
@@ -421,6 +465,12 @@ func setupArs() {
if err != nil {
logrus.Errorf("Error in Deployment with both annotations: %v", err)
}
// Creating Deployment with pause annotation
_, err = testutil.CreateDeploymentWithAnnotations(clients.KubernetesClient, arsConfigmapWithPausedDeployment, arsNamespace, map[string]string{options.PauseDeploymentAnnotation: "10s"}, false)
if err != nil {
logrus.Errorf("Error in Deployment with configmap creation: %v", err)
}
}
func teardownArs() {
@@ -622,6 +672,12 @@ func teardownArs() {
logrus.Errorf("Error while deleting statefulSet with secret as env var source %v", statefulSetError)
}
// Deleting Deployment with pasuse annotation
deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, arsNamespace, arsConfigmapWithPausedDeployment)
if deploymentError != nil {
logrus.Errorf("Error while deleting deployment with configmap %v", deploymentError)
}
// Deleting Configmap
err := testutil.DeleteConfigMap(clients.KubernetesClient, arsNamespace, arsConfigmapName)
if err != nil {
@@ -735,6 +791,12 @@ func teardownArs() {
logrus.Errorf("Error while deleting the configmap used with configmap auto annotations: %v", err)
}
// Deleting configmap for testing pausing deployments
err = testutil.DeleteConfigMap(clients.KubernetesClient, arsNamespace, arsConfigmapWithPausedDeployment)
if err != nil {
logrus.Errorf("Error while deleting the configmap: %v", err)
}
// Deleting namespace
testutil.DeleteNamespace(arsNamespace, clients.KubernetesClient)
@@ -794,6 +856,12 @@ func setupErs() {
logrus.Errorf("Error in configmap creation: %v", err)
}
// Creating configmap for testing pausing deployments
_, err = testutil.CreateConfigMap(clients.KubernetesClient, ersNamespace, ersConfigmapWithPausedDeployment, "www.google.com")
if err != nil {
logrus.Errorf("Error in configmap creation: %v", err)
}
// Creating secret
_, err = testutil.CreateSecret(clients.KubernetesClient, ersNamespace, ersSecretWithInitEnv, data)
if err != nil {
@@ -851,6 +919,34 @@ func setupErs() {
logrus.Errorf("Error in configmap creation: %v", err)
}
// Creating configmap with ignore annotation
_, err = testutil.CreateConfigMap(clients.KubernetesClient, ersNamespace, ersConfigmapWithIgnoreAnnotation, "www.google.com")
if err != nil {
logrus.Errorf("Error in configmap creation: %v", err)
}
cmClient := clients.KubernetesClient.CoreV1().ConfigMaps(ersNamespace)
patch := []byte(`{"metadata":{"annotations":{"reloader.stakater.com/ignore":"true"}}}`)
_, _ = cmClient.Patch(context.TODO(), ersConfigmapWithIgnoreAnnotation, patchtypes.MergePatchType, patch, metav1.PatchOptions{})
// Creating secret with ignore annotation
_, err = testutil.CreateSecret(clients.KubernetesClient, ersNamespace, ersSecretWithIgnoreAnnotation, data)
if err != nil {
logrus.Errorf("Error in secret creation: %v", err)
}
secretClient := clients.KubernetesClient.CoreV1().Secrets(ersNamespace)
_, _ = secretClient.Patch(context.TODO(), ersSecretWithIgnoreAnnotation, patchtypes.MergePatchType, patch, metav1.PatchOptions{})
// Creating Deployment referencing configmap with ignore annotation
_, err = testutil.CreateDeployment(clients.KubernetesClient, ersConfigmapWithIgnoreAnnotation, ersNamespace, true)
if err != nil {
logrus.Errorf("Error in Deployment with configmap ignore annotation creation: %v", err)
}
// Creating Deployment referencing secret with ignore annotation
_, err = testutil.CreateDeployment(clients.KubernetesClient, ersSecretWithIgnoreAnnotation, ersNamespace, true)
if err != nil {
logrus.Errorf("Error in Deployment with secret ignore annotation creation: %v", err)
}
// Creating Deployment with configmap
_, err = testutil.CreateDeployment(clients.KubernetesClient, ersConfigmapName, ersNamespace, true)
if err != nil {
@@ -970,6 +1066,12 @@ func setupErs() {
logrus.Errorf("Error in Deployment with configmap and with configmap exclude annotation: %v", err)
}
// Creating Deployment with pause annotation
_, err = testutil.CreateDeploymentWithAnnotations(clients.KubernetesClient, ersConfigmapWithPausedDeployment, ersNamespace, map[string]string{options.PauseDeploymentAnnotation: "10s"}, false)
if err != nil {
logrus.Errorf("Error in Deployment with configmap creation: %v", err)
}
// Creating DaemonSet with configmap
_, err = testutil.CreateDaemonSet(clients.KubernetesClient, ersConfigmapName, ersNamespace, true)
if err != nil {
@@ -1254,6 +1356,12 @@ func teardownErs() {
logrus.Errorf("Error while deleting statefulSet with secret as env var source %v", statefulSetError)
}
// Deleting Deployment for testing pausing deployments
deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, ersNamespace, ersConfigmapWithPausedDeployment)
if deploymentError != nil {
logrus.Errorf("Error while deleting deployment with configmap %v", deploymentError)
}
// Deleting Configmap
err := testutil.DeleteConfigMap(clients.KubernetesClient, ersNamespace, ersConfigmapName)
if err != nil {
@@ -1367,6 +1475,12 @@ func teardownErs() {
logrus.Errorf("Error while deleting the configmap used with configmap exclude annotation: %v", err)
}
// Deleting ConfigMap for testins pausing deployments
err = testutil.DeleteConfigMap(clients.KubernetesClient, ersNamespace, ersConfigmapWithPausedDeployment)
if err != nil {
logrus.Errorf("Error while deleting the configmap: %v", err)
}
// Deleting namespace
testutil.DeleteNamespace(ersNamespace, clients.KubernetesClient)
@@ -1413,6 +1527,22 @@ func testRollingUpgradeInvokeDeleteStrategyArs(t *testing.T, clients kube.Client
}
}
func testRollingUpgradeWithPatchAndInvokeDeleteStrategyArs(t *testing.T, clients kube.Clients, config util.Config, upgradeFuncs callbacks.RollingUpgradeFuncs, collectors metrics.Collectors, envVarPostfix string) {
err := PerformAction(clients, config, upgradeFuncs, collectors, nil, invokeDeleteStrategy)
upgradeFuncs.PatchFunc = func(client kube.Clients, namespace string, resource runtime.Object, patchType patchtypes.PatchType, bytes []byte) error {
assert.Equal(t, patchtypes.StrategicMergePatchType, patchType)
assert.NotEmpty(t, bytes)
return nil
}
upgradeFuncs.UpdateFunc = func(kube.Clients, string, runtime.Object) error {
t.Errorf("Update should not be called")
return nil
}
if err != nil {
t.Errorf("Rolling upgrade failed for %s with %s", upgradeFuncs.ResourceType, envVarPostfix)
}
}
func TestRollingUpgradeForDeploymentWithConfigmapUsingArs(t *testing.T) {
options.ReloadStrategy = constants.AnnotationsReloadStrategy
envVarPostfix := constants.ConfigmapEnvVarPostfix
@@ -1422,6 +1552,18 @@ func TestRollingUpgradeForDeploymentWithConfigmapUsingArs(t *testing.T) {
deploymentFuncs := GetDeploymentRollingUpgradeFuncs()
collectors := getCollectors()
itemCalled := 0
itemsCalled := 0
deploymentFuncs.ItemFunc = func(client kube.Clients, namespace string, name string) (runtime.Object, error) {
itemCalled++
return callbacks.GetDeploymentItem(client, namespace, name)
}
deploymentFuncs.ItemsFunc = func(client kube.Clients, namespace string) []runtime.Object {
itemsCalled++
return callbacks.GetDeploymentItems(client, namespace)
}
err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy)
time.Sleep(5 * time.Second)
if err != nil {
@@ -1441,9 +1583,68 @@ func TestRollingUpgradeForDeploymentWithConfigmapUsingArs(t *testing.T) {
if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": arsNamespace})) != 1 {
t.Errorf("Counter by namespace was not increased")
}
assert.Equal(t, 0, itemCalled, "ItemFunc should not be called")
assert.Equal(t, 2, itemsCalled, "ItemsFunc should be called twice")
testRollingUpgradeInvokeDeleteStrategyArs(t, clients, config, deploymentFuncs, collectors, envVarPostfix)
}
func TestRollingUpgradeForDeploymentWithPatchAndRetryUsingArs(t *testing.T) {
options.ReloadStrategy = constants.AnnotationsReloadStrategy
envVarPostfix := constants.ConfigmapEnvVarPostfix
shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, arsNamespace, arsConfigmapName, "www.stakater.com")
config := getConfigWithAnnotations(envVarPostfix, arsConfigmapName, shaData, options.ConfigmapUpdateOnChangeAnnotation, options.ConfigmapReloaderAutoAnnotation)
deploymentFuncs := GetDeploymentRollingUpgradeFuncs()
assert.True(t, deploymentFuncs.SupportsPatch)
assert.NotEmpty(t, deploymentFuncs.PatchTemplatesFunc().AnnotationTemplate)
itemCalled := 0
itemsCalled := 0
deploymentFuncs.ItemFunc = func(client kube.Clients, namespace string, name string) (runtime.Object, error) {
itemCalled++
return callbacks.GetDeploymentItem(client, namespace, name)
}
deploymentFuncs.ItemsFunc = func(client kube.Clients, namespace string) []runtime.Object {
itemsCalled++
return callbacks.GetDeploymentItems(client, namespace)
}
patchCalled := 0
deploymentFuncs.PatchFunc = func(client kube.Clients, namespace string, resource runtime.Object, patchType patchtypes.PatchType, bytes []byte) error {
patchCalled++
if patchCalled < 2 {
return &errors.StatusError{ErrStatus: metav1.Status{Reason: metav1.StatusReasonConflict}} // simulate conflict
}
assert.Equal(t, patchtypes.StrategicMergePatchType, patchType)
assert.NotEmpty(t, bytes)
assert.Contains(t, string(bytes), `{"spec":{"template":{"metadata":{"annotations":{"reloader.stakater.com/last-reloaded-from":`)
assert.Contains(t, string(bytes), `\"hash\":\"3c9a892aeaedc759abc3df9884a37b8be5680382\"`)
return nil
}
deploymentFuncs.UpdateFunc = func(kube.Clients, string, runtime.Object) error {
t.Errorf("Update should not be called")
return nil
}
collectors := getCollectors()
err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy)
if err != nil {
t.Errorf("Rolling upgrade failed for Deployment with Configmap")
}
assert.Equal(t, 1, itemCalled, "ItemFunc should be called once")
assert.Equal(t, 1, itemsCalled, "ItemsFunc should be called once")
assert.Equal(t, 2, patchCalled, "PatchFunc should be called twice")
deploymentFuncs = GetDeploymentRollingUpgradeFuncs()
testRollingUpgradeWithPatchAndInvokeDeleteStrategyArs(t, clients, config, deploymentFuncs, collectors, envVarPostfix)
}
func TestRollingUpgradeForDeploymentWithConfigmapWithoutReloadAnnotationAndWithoutAutoReloadAllNoTriggersUsingArs(t *testing.T) {
options.ReloadStrategy = constants.AnnotationsReloadStrategy
envVarPostfix := constants.ConfigmapEnvVarPostfix
@@ -1616,7 +1817,7 @@ func TestRollingUpgradeForDeploymentWithConfigmapViaSearchAnnotationNotMappedUsi
t.Errorf("Failed to create deployment with search annotation.")
}
defer func() {
_ = clients.KubernetesClient.AppsV1().Deployments(arsNamespace).Delete(context.TODO(), deployment.Name, v1.DeleteOptions{})
_ = clients.KubernetesClient.AppsV1().Deployments(arsNamespace).Delete(context.TODO(), deployment.Name, metav1.DeleteOptions{})
}()
// defer clients.KubernetesClient.AppsV1().Deployments(namespace).Delete(deployment.Name, &v1.DeleteOptions{})
@@ -2102,6 +2303,7 @@ func TestRollingUpgradeForDeploymentWithExcludeConfigMapAnnotationUsingArs(t *te
t.Errorf("Deployment which had to be excluded was updated")
}
}
func TestRollingUpgradeForDeploymentWithConfigMapAutoAnnotationUsingArs(t *testing.T) {
options.ReloadStrategy = constants.AnnotationsReloadStrategy
envVarPostfix := constants.ConfigmapEnvVarPostfix
@@ -2143,6 +2345,18 @@ func TestRollingUpgradeForDaemonSetWithConfigmapUsingArs(t *testing.T) {
daemonSetFuncs := GetDaemonSetRollingUpgradeFuncs()
collectors := getCollectors()
itemCalled := 0
itemsCalled := 0
daemonSetFuncs.ItemFunc = func(client kube.Clients, namespace string, name string) (runtime.Object, error) {
itemCalled++
return callbacks.GetDaemonSetItem(client, namespace, name)
}
daemonSetFuncs.ItemsFunc = func(client kube.Clients, namespace string) []runtime.Object {
itemsCalled++
return callbacks.GetDaemonSetItems(client, namespace)
}
err := PerformAction(clients, config, daemonSetFuncs, collectors, nil, invokeReloadStrategy)
time.Sleep(5 * time.Second)
if err != nil {
@@ -2163,9 +2377,68 @@ func TestRollingUpgradeForDaemonSetWithConfigmapUsingArs(t *testing.T) {
t.Errorf("Counter by namespace was not increased")
}
assert.Equal(t, 0, itemCalled, "ItemFunc should not be called")
assert.Equal(t, 2, itemsCalled, "ItemsFunc should be called twice")
testRollingUpgradeInvokeDeleteStrategyArs(t, clients, config, daemonSetFuncs, collectors, envVarPostfix)
}
func TestRollingUpgradeForDaemonSetWithPatchAndRetryUsingArs(t *testing.T) {
options.ReloadStrategy = constants.AnnotationsReloadStrategy
envVarPostfix := constants.ConfigmapEnvVarPostfix
shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, arsNamespace, arsConfigmapName, "www.facebook.com")
config := getConfigWithAnnotations(envVarPostfix, arsConfigmapName, shaData, options.ConfigmapUpdateOnChangeAnnotation, options.ConfigmapReloaderAutoAnnotation)
daemonSetFuncs := GetDaemonSetRollingUpgradeFuncs()
itemCalled := 0
itemsCalled := 0
daemonSetFuncs.ItemFunc = func(client kube.Clients, namespace string, name string) (runtime.Object, error) {
itemCalled++
return callbacks.GetDaemonSetItem(client, namespace, name)
}
daemonSetFuncs.ItemsFunc = func(client kube.Clients, namespace string) []runtime.Object {
itemsCalled++
return callbacks.GetDaemonSetItems(client, namespace)
}
assert.True(t, daemonSetFuncs.SupportsPatch)
assert.NotEmpty(t, daemonSetFuncs.PatchTemplatesFunc().AnnotationTemplate)
patchCalled := 0
daemonSetFuncs.PatchFunc = func(client kube.Clients, namespace string, resource runtime.Object, patchType patchtypes.PatchType, bytes []byte) error {
patchCalled++
if patchCalled < 2 {
return &errors.StatusError{ErrStatus: metav1.Status{Reason: metav1.StatusReasonConflict}} // simulate conflict
}
assert.Equal(t, patchtypes.StrategicMergePatchType, patchType)
assert.NotEmpty(t, bytes)
assert.Contains(t, string(bytes), `{"spec":{"template":{"metadata":{"annotations":{"reloader.stakater.com/last-reloaded-from":`)
assert.Contains(t, string(bytes), `\"hash\":\"314a2269170750a974d79f02b5b9ee517de7f280\"`)
return nil
}
daemonSetFuncs.UpdateFunc = func(kube.Clients, string, runtime.Object) error {
t.Errorf("Update should not be called")
return nil
}
collectors := getCollectors()
err := PerformAction(clients, config, daemonSetFuncs, collectors, nil, invokeReloadStrategy)
if err != nil {
t.Errorf("Rolling upgrade failed for DaemonSet with configmap")
}
assert.Equal(t, 1, itemCalled, "ItemFunc should be called once")
assert.Equal(t, 1, itemsCalled, "ItemsFunc should be called once")
assert.Equal(t, 2, patchCalled, "PatchFunc should be called twice")
daemonSetFuncs = GetDeploymentRollingUpgradeFuncs()
testRollingUpgradeWithPatchAndInvokeDeleteStrategyArs(t, clients, config, daemonSetFuncs, collectors, envVarPostfix)
}
func TestRollingUpgradeForDaemonSetWithConfigmapInProjectedVolumeUsingArs(t *testing.T) {
options.ReloadStrategy = constants.AnnotationsReloadStrategy
envVarPostfix := constants.ConfigmapEnvVarPostfix
@@ -2303,6 +2576,18 @@ func TestRollingUpgradeForStatefulSetWithConfigmapUsingArs(t *testing.T) {
statefulSetFuncs := GetStatefulSetRollingUpgradeFuncs()
collectors := getCollectors()
itemCalled := 0
itemsCalled := 0
statefulSetFuncs.ItemFunc = func(client kube.Clients, namespace string, name string) (runtime.Object, error) {
itemCalled++
return callbacks.GetStatefulSetItem(client, namespace, name)
}
statefulSetFuncs.ItemsFunc = func(client kube.Clients, namespace string) []runtime.Object {
itemsCalled++
return callbacks.GetStatefulSetItems(client, namespace)
}
err := PerformAction(clients, config, statefulSetFuncs, collectors, nil, invokeReloadStrategy)
time.Sleep(5 * time.Second)
if err != nil {
@@ -2323,9 +2608,68 @@ func TestRollingUpgradeForStatefulSetWithConfigmapUsingArs(t *testing.T) {
t.Errorf("Counter by namespace was not increased")
}
assert.Equal(t, 0, itemCalled, "ItemFunc should not be called")
assert.Equal(t, 2, itemsCalled, "ItemsFunc should be called twice")
testRollingUpgradeInvokeDeleteStrategyArs(t, clients, config, statefulSetFuncs, collectors, envVarPostfix)
}
func TestRollingUpgradeForStatefulSetWithPatchAndRetryUsingArs(t *testing.T) {
options.ReloadStrategy = constants.AnnotationsReloadStrategy
envVarPostfix := constants.ConfigmapEnvVarPostfix
shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, arsNamespace, arsConfigmapName, "www.twitter.com")
config := getConfigWithAnnotations(envVarPostfix, arsConfigmapName, shaData, options.ConfigmapUpdateOnChangeAnnotation, options.ConfigmapReloaderAutoAnnotation)
statefulSetFuncs := GetStatefulSetRollingUpgradeFuncs()
itemCalled := 0
itemsCalled := 0
statefulSetFuncs.ItemFunc = func(client kube.Clients, namespace string, name string) (runtime.Object, error) {
itemCalled++
return callbacks.GetStatefulSetItem(client, namespace, name)
}
statefulSetFuncs.ItemsFunc = func(client kube.Clients, namespace string) []runtime.Object {
itemsCalled++
return callbacks.GetStatefulSetItems(client, namespace)
}
assert.True(t, statefulSetFuncs.SupportsPatch)
assert.NotEmpty(t, statefulSetFuncs.PatchTemplatesFunc().AnnotationTemplate)
patchCalled := 0
statefulSetFuncs.PatchFunc = func(client kube.Clients, namespace string, resource runtime.Object, patchType patchtypes.PatchType, bytes []byte) error {
patchCalled++
if patchCalled < 2 {
return &errors.StatusError{ErrStatus: metav1.Status{Reason: metav1.StatusReasonConflict}} // simulate conflict
}
assert.Equal(t, patchtypes.StrategicMergePatchType, patchType)
assert.NotEmpty(t, bytes)
assert.Contains(t, string(bytes), `{"spec":{"template":{"metadata":{"annotations":{"reloader.stakater.com/last-reloaded-from":`)
assert.Contains(t, string(bytes), `\"hash\":\"f821414d40d8815fb330763f74a4ff7ab651d4fa\"`)
return nil
}
statefulSetFuncs.UpdateFunc = func(kube.Clients, string, runtime.Object) error {
t.Errorf("Update should not be called")
return nil
}
collectors := getCollectors()
err := PerformAction(clients, config, statefulSetFuncs, collectors, nil, invokeReloadStrategy)
if err != nil {
t.Errorf("Rolling upgrade failed for StatefulSet with configmap")
}
assert.Equal(t, 1, itemCalled, "ItemFunc should be called once")
assert.Equal(t, 1, itemsCalled, "ItemsFunc should be called once")
assert.Equal(t, 2, patchCalled, "PatchFunc should be called twice")
statefulSetFuncs = GetDeploymentRollingUpgradeFuncs()
testRollingUpgradeWithPatchAndInvokeDeleteStrategyArs(t, clients, config, statefulSetFuncs, collectors, envVarPostfix)
}
func TestRollingUpgradeForStatefulSetWithConfigmapInProjectedVolumeUsingArs(t *testing.T) {
options.ReloadStrategy = constants.AnnotationsReloadStrategy
envVarPostfix := constants.ConfigmapEnvVarPostfix
@@ -2488,6 +2832,9 @@ func TestFailedRollingUpgradeUsingArs(t *testing.T) {
deploymentFuncs.UpdateFunc = func(_ kube.Clients, _ string, _ runtime.Object) error {
return fmt.Errorf("error")
}
deploymentFuncs.PatchFunc = func(kube.Clients, string, runtime.Object, patchtypes.PatchType, []byte) error {
return fmt.Errorf("error")
}
collectors := getCollectors()
_ = PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy)
@@ -2501,6 +2848,65 @@ func TestFailedRollingUpgradeUsingArs(t *testing.T) {
}
}
func TestIgnoreAnnotationNoReloadUsingArs(t *testing.T) {
options.ReloadStrategy = constants.AnnotationsReloadStrategy
envVarPostfix := constants.ConfigmapEnvVarPostfix
shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, arsNamespace, arsConfigmapWithIgnoreAnnotation, "www.stakater.com")
config := getConfigWithAnnotations(envVarPostfix, arsConfigmapWithIgnoreAnnotation, shaData, options.ConfigmapUpdateOnChangeAnnotation, options.ConfigmapReloaderAutoAnnotation)
config.ResourceAnnotations = map[string]string{"reloader.stakater.com/ignore": "true"}
deploymentFuncs := GetDeploymentRollingUpgradeFuncs()
collectors := getCollectors()
err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy)
if err != nil {
t.Errorf("Rolling upgrade failed for Deployment with Configmap and ignore annotation using ARS")
}
// Ensure deployment is NOT updated
updated := testutil.VerifyResourceAnnotationUpdate(clients, config, deploymentFuncs)
if updated {
t.Errorf("Deployment was updated but should not have been")
}
// Ensure counters remain zero
if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 0 {
t.Errorf("Reload counter should not have increased")
}
if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": arsNamespace})) != 0 {
t.Errorf("Reload counter by namespace should not have increased")
}
}
func TestIgnoreAnnotationNoReloadUsingErs(t *testing.T) {
options.ReloadStrategy = constants.EnvVarsReloadStrategy
envVarPostfix := constants.ConfigmapEnvVarPostfix
shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, ersNamespace, ersConfigmapWithIgnoreAnnotation, "www.stakater.com")
config := getConfigWithAnnotations(envVarPostfix, ersConfigmapWithIgnoreAnnotation, shaData, options.ConfigmapUpdateOnChangeAnnotation, options.ConfigmapReloaderAutoAnnotation)
config.ResourceAnnotations = map[string]string{"reloader.stakater.com/ignore": "true"}
deploymentFuncs := GetDeploymentRollingUpgradeFuncs()
collectors := getCollectors()
err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy)
if err != nil {
t.Errorf("Rolling upgrade failed for Deployment with Configmap and ignore annotation using ERS")
}
// Ensure deployment is NOT updated
updated := testutil.VerifyResourceEnvVarUpdate(clients, config, envVarPostfix, deploymentFuncs)
if updated {
t.Errorf("Deployment was updated but should not have been (ERS)")
}
// Ensure counters remain zero
if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 0 {
t.Errorf("Reload counter should not have increased (ERS)")
}
if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": ersNamespace})) != 0 {
t.Errorf("Reload counter by namespace should not have increased (ERS)")
}
}
func testRollingUpgradeInvokeDeleteStrategyErs(t *testing.T, clients kube.Clients, config util.Config, upgradeFuncs callbacks.RollingUpgradeFuncs, collectors metrics.Collectors, envVarPostfix string) {
err := PerformAction(clients, config, upgradeFuncs, collectors, nil, invokeDeleteStrategy)
time.Sleep(5 * time.Second)
@@ -2518,6 +2924,24 @@ func testRollingUpgradeInvokeDeleteStrategyErs(t *testing.T, clients kube.Client
}
}
func testRollingUpgradeWithPatchAndInvokeDeleteStrategyErs(t *testing.T, clients kube.Clients, config util.Config, upgradeFuncs callbacks.RollingUpgradeFuncs, collectors metrics.Collectors, envVarPostfix string) {
assert.NotEmpty(t, upgradeFuncs.PatchTemplatesFunc().DeleteEnvVarTemplate)
err := PerformAction(clients, config, upgradeFuncs, collectors, nil, invokeDeleteStrategy)
upgradeFuncs.PatchFunc = func(client kube.Clients, namespace string, resource runtime.Object, patchType patchtypes.PatchType, bytes []byte) error {
assert.Equal(t, patchtypes.JSONPatchType, patchType)
assert.NotEmpty(t, bytes)
return nil
}
upgradeFuncs.UpdateFunc = func(kube.Clients, string, runtime.Object) error {
t.Errorf("Update should not be called")
return nil
}
if err != nil {
t.Errorf("Rolling upgrade failed for %s with %s", upgradeFuncs.ResourceType, envVarPostfix)
}
}
func TestRollingUpgradeForDeploymentWithConfigmapUsingErs(t *testing.T) {
options.ReloadStrategy = constants.EnvVarsReloadStrategy
envVarPostfix := constants.ConfigmapEnvVarPostfix
@@ -2550,6 +2974,48 @@ func TestRollingUpgradeForDeploymentWithConfigmapUsingErs(t *testing.T) {
testRollingUpgradeInvokeDeleteStrategyErs(t, clients, config, deploymentFuncs, collectors, envVarPostfix)
}
func TestRollingUpgradeForDeploymentWithPatchAndRetryUsingErs(t *testing.T) {
options.ReloadStrategy = constants.EnvVarsReloadStrategy
envVarPostfix := constants.ConfigmapEnvVarPostfix
shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, ersNamespace, ersConfigmapName, "www.stakater.com")
config := getConfigWithAnnotations(envVarPostfix, ersConfigmapName, shaData, options.ConfigmapUpdateOnChangeAnnotation, options.ConfigmapReloaderAutoAnnotation)
deploymentFuncs := GetDeploymentRollingUpgradeFuncs()
assert.True(t, deploymentFuncs.SupportsPatch)
assert.NotEmpty(t, deploymentFuncs.PatchTemplatesFunc().EnvVarTemplate)
patchCalled := 0
deploymentFuncs.PatchFunc = func(client kube.Clients, namespace string, resource runtime.Object, patchType patchtypes.PatchType, bytes []byte) error {
patchCalled++
if patchCalled < 2 {
return &errors.StatusError{ErrStatus: metav1.Status{Reason: metav1.StatusReasonConflict}} // simulate conflict
}
assert.Equal(t, patchtypes.StrategicMergePatchType, patchType)
assert.NotEmpty(t, bytes)
assert.Contains(t, string(bytes), `{"spec":{"template":{"spec":{"containers":[{"name":`)
assert.Contains(t, string(bytes), `"value":"3c9a892aeaedc759abc3df9884a37b8be5680382"`)
return nil
}
deploymentFuncs.UpdateFunc = func(kube.Clients, string, runtime.Object) error {
t.Errorf("Update should not be called")
return nil
}
collectors := getCollectors()
err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy)
if err != nil {
t.Errorf("Rolling upgrade failed for %s with %s", deploymentFuncs.ResourceType, envVarPostfix)
}
assert.Equal(t, 2, patchCalled)
deploymentFuncs = GetDeploymentRollingUpgradeFuncs()
testRollingUpgradeWithPatchAndInvokeDeleteStrategyErs(t, clients, config, deploymentFuncs, collectors, envVarPostfix)
}
func TestRollingUpgradeForDeploymentWithConfigmapInProjectedVolumeUsingErs(t *testing.T) {
options.ReloadStrategy = constants.EnvVarsReloadStrategy
envVarPostfix := constants.ConfigmapEnvVarPostfix
@@ -2658,7 +3124,7 @@ func TestRollingUpgradeForDeploymentWithConfigmapViaSearchAnnotationNotMappedUsi
t.Errorf("Failed to create deployment with search annotation.")
}
defer func() {
_ = clients.KubernetesClient.AppsV1().Deployments(ersNamespace).Delete(context.TODO(), deployment.Name, v1.DeleteOptions{})
_ = clients.KubernetesClient.AppsV1().Deployments(ersNamespace).Delete(context.TODO(), deployment.Name, metav1.DeleteOptions{})
}()
// defer clients.KubernetesClient.AppsV1().Deployments(namespace).Delete(deployment.Name, &v1.DeleteOptions{})
@@ -3212,6 +3678,49 @@ func TestRollingUpgradeForDaemonSetWithConfigmapUsingErs(t *testing.T) {
testRollingUpgradeInvokeDeleteStrategyErs(t, clients, config, daemonSetFuncs, collectors, envVarPostfix)
}
func TestRollingUpgradeForDaemonSetWithPatchAndRetryUsingErs(t *testing.T) {
options.ReloadStrategy = constants.EnvVarsReloadStrategy
envVarPostfix := constants.ConfigmapEnvVarPostfix
shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, ersNamespace, ersConfigmapName, "www.facebook.com")
config := getConfigWithAnnotations(envVarPostfix, ersConfigmapName, shaData, options.ConfigmapUpdateOnChangeAnnotation, options.ConfigmapReloaderAutoAnnotation)
daemonSetFuncs := GetDaemonSetRollingUpgradeFuncs()
assert.True(t, daemonSetFuncs.SupportsPatch)
assert.NotEmpty(t, daemonSetFuncs.PatchTemplatesFunc().EnvVarTemplate)
patchCalled := 0
daemonSetFuncs.PatchFunc = func(client kube.Clients, namespace string, resource runtime.Object, patchType patchtypes.PatchType, bytes []byte) error {
patchCalled++
if patchCalled < 2 {
return &errors.StatusError{ErrStatus: metav1.Status{Reason: metav1.StatusReasonConflict}} // simulate conflict
}
assert.Equal(t, patchtypes.StrategicMergePatchType, patchType)
assert.NotEmpty(t, bytes)
assert.Contains(t, string(bytes), `{"spec":{"template":{"spec":{"containers":[{"name":`)
assert.Contains(t, string(bytes), `"value":"314a2269170750a974d79f02b5b9ee517de7f280"`)
return nil
}
daemonSetFuncs.UpdateFunc = func(kube.Clients, string, runtime.Object) error {
t.Errorf("Update should not be called")
return nil
}
collectors := getCollectors()
err := PerformAction(clients, config, daemonSetFuncs, collectors, nil, invokeReloadStrategy)
time.Sleep(5 * time.Second)
if err != nil {
t.Errorf("Rolling upgrade failed for DaemonSet with configmap")
}
assert.Equal(t, 2, patchCalled)
daemonSetFuncs = GetDeploymentRollingUpgradeFuncs()
testRollingUpgradeWithPatchAndInvokeDeleteStrategyErs(t, clients, config, daemonSetFuncs, collectors, envVarPostfix)
}
func TestRollingUpgradeForDaemonSetWithConfigmapInProjectedVolumeUsingErs(t *testing.T) {
options.ReloadStrategy = constants.EnvVarsReloadStrategy
envVarPostfix := constants.ConfigmapEnvVarPostfix
@@ -3372,6 +3881,49 @@ func TestRollingUpgradeForStatefulSetWithConfigmapUsingErs(t *testing.T) {
testRollingUpgradeInvokeDeleteStrategyErs(t, clients, config, statefulSetFuncs, collectors, envVarPostfix)
}
func TestRollingUpgradeForStatefulSetWithPatchAndRetryUsingErs(t *testing.T) {
options.ReloadStrategy = constants.EnvVarsReloadStrategy
envVarPostfix := constants.ConfigmapEnvVarPostfix
shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, ersNamespace, ersConfigmapName, "www.twitter.com")
config := getConfigWithAnnotations(envVarPostfix, ersConfigmapName, shaData, options.ConfigmapUpdateOnChangeAnnotation, options.ConfigmapReloaderAutoAnnotation)
statefulSetFuncs := GetStatefulSetRollingUpgradeFuncs()
assert.True(t, statefulSetFuncs.SupportsPatch)
assert.NotEmpty(t, statefulSetFuncs.PatchTemplatesFunc().EnvVarTemplate)
patchCalled := 0
statefulSetFuncs.PatchFunc = func(client kube.Clients, namespace string, resource runtime.Object, patchType patchtypes.PatchType, bytes []byte) error {
patchCalled++
if patchCalled < 2 {
return &errors.StatusError{ErrStatus: metav1.Status{Reason: metav1.StatusReasonConflict}} // simulate conflict
}
assert.Equal(t, patchtypes.StrategicMergePatchType, patchType)
assert.NotEmpty(t, bytes)
assert.Contains(t, string(bytes), `{"spec":{"template":{"spec":{"containers":[{"name":`)
assert.Contains(t, string(bytes), `"value":"f821414d40d8815fb330763f74a4ff7ab651d4fa"`)
return nil
}
statefulSetFuncs.UpdateFunc = func(kube.Clients, string, runtime.Object) error {
t.Errorf("Update should not be called")
return nil
}
collectors := getCollectors()
err := PerformAction(clients, config, statefulSetFuncs, collectors, nil, invokeReloadStrategy)
time.Sleep(5 * time.Second)
if err != nil {
t.Errorf("Rolling upgrade failed for StatefulSet with configmap")
}
assert.Equal(t, 2, patchCalled)
statefulSetFuncs = GetDeploymentRollingUpgradeFuncs()
testRollingUpgradeWithPatchAndInvokeDeleteStrategyErs(t, clients, config, statefulSetFuncs, collectors, envVarPostfix)
}
func TestRollingUpgradeForStatefulSetWithConfigmapInProjectedVolumeUsingErs(t *testing.T) {
options.ReloadStrategy = constants.EnvVarsReloadStrategy
envVarPostfix := constants.ConfigmapEnvVarPostfix
@@ -3536,6 +4088,9 @@ func TestFailedRollingUpgradeUsingErs(t *testing.T) {
deploymentFuncs.UpdateFunc = func(_ kube.Clients, _ string, _ runtime.Object) error {
return fmt.Errorf("error")
}
deploymentFuncs.PatchFunc = func(kube.Clients, string, runtime.Object, patchtypes.PatchType, []byte) error {
return fmt.Errorf("error")
}
collectors := getCollectors()
_ = PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy)
@@ -3548,3 +4103,114 @@ func TestFailedRollingUpgradeUsingErs(t *testing.T) {
t.Errorf("Counter by namespace was not increased")
}
}
func TestPausingDeploymentUsingErs(t *testing.T) {
options.ReloadStrategy = constants.EnvVarsReloadStrategy
testPausingDeployment(t, options.ReloadStrategy, ersConfigmapWithPausedDeployment, ersNamespace)
}
func TestPausingDeploymentUsingArs(t *testing.T) {
options.ReloadStrategy = constants.AnnotationsReloadStrategy
testPausingDeployment(t, options.ReloadStrategy, arsConfigmapWithPausedDeployment, arsNamespace)
}
func testPausingDeployment(t *testing.T, reloadStrategy string, testName string, namespace string) {
options.ReloadStrategy = reloadStrategy
envVarPostfix := constants.ConfigmapEnvVarPostfix
shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, testName, "pause.stakater.com")
config := getConfigWithAnnotations(envVarPostfix, testName, shaData, options.ConfigmapUpdateOnChangeAnnotation, options.ConfigmapReloaderAutoAnnotation)
deploymentFuncs := GetDeploymentRollingUpgradeFuncs()
collectors := getCollectors()
_ = PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy)
// Wait for deployment to have paused-at annotation
logrus.Infof("Waiting for deployment %s to have paused-at annotation", testName)
err := waitForDeploymentPausedAtAnnotation(clients, deploymentFuncs, config.Namespace, testName, 30*time.Second)
if err != nil {
t.Errorf("Failed to wait for deployment paused-at annotation: %v", err)
}
if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 {
t.Errorf("Counter was not increased")
}
if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": namespace})) != 1 {
t.Errorf("Counter by namespace was not increased")
}
logrus.Infof("Verifying deployment has been paused")
items := deploymentFuncs.ItemsFunc(clients, config.Namespace)
deploymentPaused, err := isDeploymentPaused(items, testName)
if err != nil {
t.Errorf("%s", err.Error())
}
if !deploymentPaused {
t.Errorf("Deployment has not been paused")
}
shaData = testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, testName, "pause-changed.stakater.com")
config = getConfigWithAnnotations(envVarPostfix, testName, shaData, options.ConfigmapUpdateOnChangeAnnotation, options.ConfigmapReloaderAutoAnnotation)
_ = PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy)
if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 2 {
t.Errorf("Counter was not increased")
}
if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": namespace})) != 2 {
t.Errorf("Counter by namespace was not increased")
}
logrus.Infof("Verifying deployment is still paused")
items = deploymentFuncs.ItemsFunc(clients, config.Namespace)
deploymentPaused, err = isDeploymentPaused(items, testName)
if err != nil {
t.Errorf("%s", err.Error())
}
if !deploymentPaused {
t.Errorf("Deployment should still be paused")
}
logrus.Infof("Verifying deployment has been resumed after pause interval")
time.Sleep(11 * time.Second)
items = deploymentFuncs.ItemsFunc(clients, config.Namespace)
deploymentPaused, err = isDeploymentPaused(items, testName)
if err != nil {
t.Errorf("%s", err.Error())
}
if deploymentPaused {
t.Errorf("Deployment should have been resumed after pause interval")
}
}
func isDeploymentPaused(deployments []runtime.Object, deploymentName string) (bool, error) {
deployment, err := FindDeploymentByName(deployments, deploymentName)
if err != nil {
return false, err
}
return IsPaused(deployment), nil
}
// waitForDeploymentPausedAtAnnotation waits for a deployment to have the pause-period annotation
func waitForDeploymentPausedAtAnnotation(clients kube.Clients, deploymentFuncs callbacks.RollingUpgradeFuncs, namespace, deploymentName string, timeout time.Duration) error {
start := time.Now()
for time.Since(start) < timeout {
items := deploymentFuncs.ItemsFunc(clients, namespace)
deployment, err := FindDeploymentByName(items, deploymentName)
if err == nil {
annotations := deployment.GetAnnotations()
if annotations != nil {
if _, exists := annotations[options.PauseDeploymentTimeAnnotation]; exists {
return nil
}
}
}
time.Sleep(100 * time.Millisecond)
}
return fmt.Errorf("timeout waiting for deployment %s to have pause-period annotation", deploymentName)
}

View File

@@ -22,6 +22,8 @@ var (
SecretUpdateOnChangeAnnotation = "secret.reloader.stakater.com/reload"
// ReloaderAutoAnnotation is an annotation to detect changes in secrets/configmaps
ReloaderAutoAnnotation = "reloader.stakater.com/auto"
// IgnoreResourceAnnotation is an annotation to ignore changes in secrets/configmaps
IgnoreResourceAnnotation = "reloader.stakater.com/ignore"
// ConfigmapReloaderAutoAnnotation is an annotation to detect changes in configmaps
ConfigmapReloaderAutoAnnotation = "configmap.reloader.stakater.com/auto"
// SecretReloaderAutoAnnotation is an annotation to detect changes in secrets
@@ -38,6 +40,12 @@ var (
SearchMatchAnnotation = "reloader.stakater.com/match"
// RolloutStrategyAnnotation is an annotation to define rollout update strategy
RolloutStrategyAnnotation = "reloader.stakater.com/rollout-strategy"
// PauseDeploymentAnnotation is an annotation to define the time period to pause a deployment after
// a configmap/secret change has been detected. Valid values are described here: https://pkg.go.dev/time#ParseDuration
// only positive values are allowed
PauseDeploymentAnnotation = "deployment.reloader.stakater.com/pause-period"
// Annotation set by reloader to indicate that the deployment has been paused
PauseDeploymentTimeAnnotation = "deployment.reloader.stakater.com/paused-at"
// LogFormat is the log format to use (json, or empty string for default)
LogFormat = ""
// LogLevel is the log level to use (trace, debug, info, warning, error, fatal and panic)
@@ -55,6 +63,19 @@ var (
EnableHA = false
// Url to send a request to instead of triggering a reload
WebhookUrl = ""
// ResourcesToIgnore is a list of resources to ignore when watching for changes
ResourcesToIgnore = []string{}
// NamespacesToIgnore is a list of namespace names to ignore when watching for changes
NamespacesToIgnore = []string{}
// NamespaceSelectors is a list of namespace selectors to watch for changes
NamespaceSelectors = []string{}
// ResourceSelectors is a list of resource selectors to watch for changes
ResourceSelectors = []string{}
// EnablePProf enables pprof for profiling
EnablePProf = false
// PProfAddr is the address to start pprof server on
// Default is :6060
PProfAddr = ":6060"
)
func ToArgoRolloutStrategy(s string) ArgoRolloutStrategy {

View File

@@ -794,6 +794,26 @@ func CreateDeployment(client kubernetes.Interface, deploymentName string, namesp
return deployment, err
}
// CreateDeployment creates a deployment in given namespace and returns the Deployment
func CreateDeploymentWithAnnotations(client kubernetes.Interface, deploymentName string, namespace string, additionalAnnotations map[string]string, volumeMount bool) (*appsv1.Deployment, error) {
logrus.Infof("Creating Deployment")
deploymentClient := client.AppsV1().Deployments(namespace)
var deploymentObj *appsv1.Deployment
if volumeMount {
deploymentObj = GetDeployment(namespace, deploymentName)
} else {
deploymentObj = GetDeploymentWithEnvVars(namespace, deploymentName)
}
for annotationKey, annotationValue := range additionalAnnotations {
deploymentObj.Annotations[annotationKey] = annotationValue
}
deployment, err := deploymentClient.Create(context.TODO(), deploymentObj, metav1.CreateOptions{})
time.Sleep(3 * time.Second)
return deployment, err
}
// CreateDeploymentConfig creates a deploymentConfig in given namespace and returns the DeploymentConfig
func CreateDeploymentConfig(client appsclient.Interface, deploymentName string, namespace string, volumeMount bool) (*openshiftv1.DeploymentConfig, error) {
logrus.Infof("Creating DeploymentConfig")
@@ -968,6 +988,22 @@ func DeleteStatefulSet(client kubernetes.Interface, namespace string, statefulse
return statefulsetError
}
// DeleteCronJob deletes a cronJob in given namespace and returns the error if any
func DeleteCronJob(client kubernetes.Interface, namespace string, cronJobName string) error {
logrus.Infof("Deleting CronJob %s", cronJobName)
cronJobError := client.BatchV1().CronJobs(namespace).Delete(context.TODO(), cronJobName, metav1.DeleteOptions{})
time.Sleep(3 * time.Second)
return cronJobError
}
// Deleteob deletes a job in given namespace and returns the error if any
func DeleteJob(client kubernetes.Interface, namespace string, jobName string) error {
logrus.Infof("Deleting Job %s", jobName)
jobError := client.BatchV1().Jobs(namespace).Delete(context.TODO(), jobName, metav1.DeleteOptions{})
time.Sleep(3 * time.Second)
return jobError
}
// UpdateConfigMap updates a configmap in given namespace and returns the error if any
func UpdateConfigMap(configmapClient core_v1.ConfigMapInterface, namespace string, configmapName string, label string, data string) error {
logrus.Infof("Updating configmap %q.\n", configmapName)

View File

@@ -6,7 +6,7 @@ import (
v1 "k8s.io/api/core/v1"
)
//Config contains rolling upgrade configuration parameters
// Config contains rolling upgrade configuration parameters
type Config struct {
Namespace string
ResourceName string

View File

@@ -3,11 +3,18 @@ package util
import (
"bytes"
"encoding/base64"
"errors"
"fmt"
"sort"
"strings"
"github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"github.com/stakater/Reloader/internal/pkg/constants"
"github.com/stakater/Reloader/internal/pkg/crypto"
"github.com/stakater/Reloader/internal/pkg/options"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/labels"
)
// ConvertToEnvVarName converts the given text into a usable env var
@@ -54,8 +61,6 @@ func GetSHAfromSecret(data map[string][]byte) string {
type List []string
type Map map[string]string
func (l *List) Contains(s string) bool {
for _, v := range *l {
if v == s {
@@ -64,3 +69,110 @@ func (l *List) Contains(s string) bool {
}
return false
}
func ConfigureReloaderFlags(cmd *cobra.Command) {
cmd.PersistentFlags().BoolVar(&options.AutoReloadAll, "auto-reload-all", false, "Auto reload all resources")
cmd.PersistentFlags().StringVar(&options.ConfigmapUpdateOnChangeAnnotation, "configmap-annotation", "configmap.reloader.stakater.com/reload", "annotation to detect changes in configmaps, specified by name")
cmd.PersistentFlags().StringVar(&options.SecretUpdateOnChangeAnnotation, "secret-annotation", "secret.reloader.stakater.com/reload", "annotation to detect changes in secrets, specified by name")
cmd.PersistentFlags().StringVar(&options.ReloaderAutoAnnotation, "auto-annotation", "reloader.stakater.com/auto", "annotation to detect changes in secrets/configmaps")
cmd.PersistentFlags().StringVar(&options.ConfigmapReloaderAutoAnnotation, "configmap-auto-annotation", "configmap.reloader.stakater.com/auto", "annotation to detect changes in configmaps")
cmd.PersistentFlags().StringVar(&options.SecretReloaderAutoAnnotation, "secret-auto-annotation", "secret.reloader.stakater.com/auto", "annotation to detect changes in secrets")
cmd.PersistentFlags().StringVar(&options.AutoSearchAnnotation, "auto-search-annotation", "reloader.stakater.com/search", "annotation to detect changes in configmaps or secrets tagged with special match annotation")
cmd.PersistentFlags().StringVar(&options.SearchMatchAnnotation, "search-match-annotation", "reloader.stakater.com/match", "annotation to mark secrets or configmaps to match the search")
cmd.PersistentFlags().StringVar(&options.PauseDeploymentAnnotation, "pause-deployment-annotation", "deployment.reloader.stakater.com/pause-period", "annotation to define the time period to pause a deployment after a configmap/secret change has been detected")
cmd.PersistentFlags().StringVar(&options.PauseDeploymentTimeAnnotation, "pause-deployment-time-annotation", "deployment.reloader.stakater.com/paused-at", "annotation to indicate when a deployment was paused by Reloader")
cmd.PersistentFlags().StringVar(&options.LogFormat, "log-format", "", "Log format to use (empty string for text, or JSON)")
cmd.PersistentFlags().StringVar(&options.LogLevel, "log-level", "info", "Log level to use (trace, debug, info, warning, error, fatal and panic)")
cmd.PersistentFlags().StringVar(&options.WebhookUrl, "webhook-url", "", "webhook to trigger instead of performing a reload")
cmd.PersistentFlags().StringSliceVar(&options.ResourcesToIgnore, "resources-to-ignore", options.ResourcesToIgnore, "list of resources to ignore (valid options 'configMaps' or 'secrets')")
cmd.PersistentFlags().StringSliceVar(&options.NamespacesToIgnore, "namespaces-to-ignore", options.NamespacesToIgnore, "list of namespaces to ignore")
cmd.PersistentFlags().StringSliceVar(&options.NamespaceSelectors, "namespace-selector", options.NamespaceSelectors, "list of key:value labels to filter on for namespaces")
cmd.PersistentFlags().StringSliceVar(&options.ResourceSelectors, "resource-label-selector", options.ResourceSelectors, "list of key:value labels to filter on for configmaps and secrets")
cmd.PersistentFlags().StringVar(&options.IsArgoRollouts, "is-Argo-Rollouts", "false", "Add support for argo rollouts")
cmd.PersistentFlags().StringVar(&options.ReloadStrategy, constants.ReloadStrategyFlag, constants.EnvVarsReloadStrategy, "Specifies the desired reload strategy")
cmd.PersistentFlags().StringVar(&options.ReloadOnCreate, "reload-on-create", "false", "Add support to watch create events")
cmd.PersistentFlags().StringVar(&options.ReloadOnDelete, "reload-on-delete", "false", "Add support to watch delete events")
cmd.PersistentFlags().BoolVar(&options.EnableHA, "enable-ha", false, "Adds support for running multiple replicas via leadership election")
cmd.PersistentFlags().BoolVar(&options.SyncAfterRestart, "sync-after-restart", false, "Sync add events after reloader restarts")
cmd.PersistentFlags().BoolVar(&options.EnablePProf, "enable-pprof", false, "Enable pprof for profiling")
cmd.PersistentFlags().StringVar(&options.PProfAddr, "pprof-addr", ":6060", "Address to start pprof server on. Default is :6060")
}
func GetNamespaceLabelSelector() (string, error) {
slice := options.NamespaceSelectors
for i, kv := range slice {
// Legacy support for ":" as a delimiter and "*" for wildcard.
if strings.Contains(kv, ":") {
split := strings.Split(kv, ":")
if split[1] == "*" {
slice[i] = split[0]
} else {
slice[i] = split[0] + "=" + split[1]
}
}
// Convert wildcard to valid apimachinery operator
if strings.Contains(kv, "=") {
split := strings.Split(kv, "=")
if split[1] == "*" {
slice[i] = split[0]
}
}
}
namespaceLabelSelector := strings.Join(slice[:], ",")
_, err := labels.Parse(namespaceLabelSelector)
if err != nil {
logrus.Fatal(err)
}
return namespaceLabelSelector, nil
}
func GetResourceLabelSelector() (string, error) {
slice := options.ResourceSelectors
for i, kv := range slice {
// Legacy support for ":" as a delimiter and "*" for wildcard.
if strings.Contains(kv, ":") {
split := strings.Split(kv, ":")
if split[1] == "*" {
slice[i] = split[0]
} else {
slice[i] = split[0] + "=" + split[1]
}
}
// Convert wildcard to valid apimachinery operator
if strings.Contains(kv, "=") {
split := strings.Split(kv, "=")
if split[1] == "*" {
slice[i] = split[0]
}
}
}
resourceLabelSelector := strings.Join(slice[:], ",")
_, err := labels.Parse(resourceLabelSelector)
if err != nil {
logrus.Fatal(err)
}
return resourceLabelSelector, nil
}
func GetIgnoredResourcesList() (List, error) {
ignoredResourcesList := options.ResourcesToIgnore // getStringSliceFromFlags(cmd, "resources-to-ignore")
for _, v := range ignoredResourcesList {
if v != "configMaps" && v != "secrets" {
return nil, fmt.Errorf("'resources-to-ignore' only accepts 'configMaps' or 'secrets', not '%s'", v)
}
}
if len(ignoredResourcesList) > 1 {
return nil, errors.New("'resources-to-ignore' only accepts 'configMaps' or 'secrets', not both")
}
return ignoredResourcesList, nil
}

274
pkg/common/common.go Normal file
View File

@@ -0,0 +1,274 @@
package common
import (
"context"
"os"
"regexp"
"strconv"
"strings"
"github.com/sirupsen/logrus"
"github.com/stakater/Reloader/internal/pkg/constants"
"github.com/stakater/Reloader/internal/pkg/options"
"github.com/stakater/Reloader/internal/pkg/util"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
)
type Map map[string]string
type ReloadCheckResult struct {
ShouldReload bool
AutoReload bool
}
// ReloaderOptions contains all configurable options for the Reloader controller.
// These options control how Reloader behaves when watching for changes in ConfigMaps and Secrets.
type ReloaderOptions struct {
// AutoReloadAll enables automatic reloading of all resources when their corresponding ConfigMaps/Secrets are updated
AutoReloadAll bool `json:"autoReloadAll"`
// ConfigmapUpdateOnChangeAnnotation is the annotation key used to detect changes in ConfigMaps specified by name
ConfigmapUpdateOnChangeAnnotation string `json:"configmapUpdateOnChangeAnnotation"`
// SecretUpdateOnChangeAnnotation is the annotation key used to detect changes in Secrets specified by name
SecretUpdateOnChangeAnnotation string `json:"secretUpdateOnChangeAnnotation"`
// ReloaderAutoAnnotation is the annotation key used to detect changes in any referenced ConfigMaps or Secrets
ReloaderAutoAnnotation string `json:"reloaderAutoAnnotation"`
// IgnoreResourceAnnotation is the annotation key used to ignore resources from being watched
IgnoreResourceAnnotation string `json:"ignoreResourceAnnotation"`
// ConfigmapReloaderAutoAnnotation is the annotation key used to detect changes in ConfigMaps only
ConfigmapReloaderAutoAnnotation string `json:"configmapReloaderAutoAnnotation"`
// SecretReloaderAutoAnnotation is the annotation key used to detect changes in Secrets only
SecretReloaderAutoAnnotation string `json:"secretReloaderAutoAnnotation"`
// ConfigmapExcludeReloaderAnnotation is the annotation key containing comma-separated list of ConfigMaps to exclude from watching
ConfigmapExcludeReloaderAnnotation string `json:"configmapExcludeReloaderAnnotation"`
// SecretExcludeReloaderAnnotation is the annotation key containing comma-separated list of Secrets to exclude from watching
SecretExcludeReloaderAnnotation string `json:"secretExcludeReloaderAnnotation"`
// AutoSearchAnnotation is the annotation key used to detect changes in ConfigMaps/Secrets tagged with SearchMatchAnnotation
AutoSearchAnnotation string `json:"autoSearchAnnotation"`
// SearchMatchAnnotation is the annotation key used to tag ConfigMaps/Secrets to be found by AutoSearchAnnotation
SearchMatchAnnotation string `json:"searchMatchAnnotation"`
// RolloutStrategyAnnotation is the annotation key used to define the rollout update strategy for workloads
RolloutStrategyAnnotation string `json:"rolloutStrategyAnnotation"`
// PauseDeploymentAnnotation is the annotation key used to define the time period to pause a deployment after
PauseDeploymentAnnotation string `json:"pauseDeploymentAnnotation"`
// PauseDeploymentTimeAnnotation is the annotation key used to indicate when a deployment was paused by Reloader
PauseDeploymentTimeAnnotation string `json:"pauseDeploymentTimeAnnotation"`
// LogFormat specifies the log format to use (json, or empty string for default text format)
LogFormat string `json:"logFormat"`
// LogLevel specifies the log level to use (trace, debug, info, warning, error, fatal, panic)
LogLevel string `json:"logLevel"`
// IsArgoRollouts indicates whether support for Argo Rollouts is enabled
IsArgoRollouts bool `json:"isArgoRollouts"`
// ReloadStrategy specifies the strategy used to trigger resource reloads (env-vars or annotations)
ReloadStrategy string `json:"reloadStrategy"`
// ReloadOnCreate indicates whether to trigger reloads when ConfigMaps/Secrets are created
ReloadOnCreate bool `json:"reloadOnCreate"`
// ReloadOnDelete indicates whether to trigger reloads when ConfigMaps/Secrets are deleted
ReloadOnDelete bool `json:"reloadOnDelete"`
// SyncAfterRestart indicates whether to sync add events after Reloader restarts (only works when ReloadOnCreate is true)
SyncAfterRestart bool `json:"syncAfterRestart"`
// EnableHA indicates whether High Availability mode is enabled with leader election
EnableHA bool `json:"enableHA"`
// WebhookUrl is the URL to send webhook notifications to instead of performing reloads
WebhookUrl string `json:"webhookUrl"`
// ResourcesToIgnore is a list of resource types to ignore (e.g., "configmaps" or "secrets")
ResourcesToIgnore []string `json:"resourcesToIgnore"`
// NamespaceSelectors is a list of label selectors to filter namespaces to watch
NamespaceSelectors []string `json:"namespaceSelectors"`
// ResourceSelectors is a list of label selectors to filter ConfigMaps and Secrets to watch
ResourceSelectors []string `json:"resourceSelectors"`
// NamespacesToIgnore is a list of namespace names to ignore when watching for changes
NamespacesToIgnore []string `json:"namespacesToIgnore"`
// EnablePProf enables pprof for profiling
EnablePProf bool `json:"enablePProf"`
// PProfAddr is the address to start pprof server on
PProfAddr string `json:"pprofAddr"`
}
var CommandLineOptions *ReloaderOptions
func PublishMetaInfoConfigmap(clientset kubernetes.Interface) {
namespace := os.Getenv("RELOADER_NAMESPACE")
if namespace == "" {
logrus.Warn("RELOADER_NAMESPACE is not set, skipping meta info configmap creation")
return
}
metaInfo := &MetaInfo{
BuildInfo: *NewBuildInfo(),
ReloaderOptions: *GetCommandLineOptions(),
DeploymentInfo: metav1.ObjectMeta{
Name: os.Getenv("RELOADER_DEPLOYMENT_NAME"),
Namespace: namespace,
},
}
configMap := metaInfo.ToConfigMap()
if _, err := clientset.CoreV1().ConfigMaps(namespace).Get(context.Background(), configMap.Name, metav1.GetOptions{}); err == nil {
logrus.Info("Meta info configmap already exists, updating it")
_, err = clientset.CoreV1().ConfigMaps(namespace).Update(context.Background(), configMap, metav1.UpdateOptions{})
if err != nil {
logrus.Warn("Failed to update existing meta info configmap: ", err)
}
return
}
_, err := clientset.CoreV1().ConfigMaps(namespace).Create(context.Background(), configMap, metav1.CreateOptions{})
if err != nil {
logrus.Warn("Failed to create meta info configmap: ", err)
}
}
func ShouldReload(config util.Config, resourceType string, annotations Map, podAnnotations Map, options *ReloaderOptions) ReloadCheckResult {
if resourceType == "Rollout" && !options.IsArgoRollouts {
return ReloadCheckResult{
ShouldReload: false,
}
}
ignoreResourceAnnotatonValue := config.ResourceAnnotations[options.IgnoreResourceAnnotation]
if ignoreResourceAnnotatonValue == "true" {
return ReloadCheckResult{
ShouldReload: false,
}
}
annotationValue, found := annotations[config.Annotation]
searchAnnotationValue, foundSearchAnn := annotations[options.AutoSearchAnnotation]
reloaderEnabledValue, foundAuto := annotations[options.ReloaderAutoAnnotation]
typedAutoAnnotationEnabledValue, foundTypedAuto := annotations[config.TypedAutoAnnotation]
excludeConfigmapAnnotationValue, foundExcludeConfigmap := annotations[options.ConfigmapExcludeReloaderAnnotation]
excludeSecretAnnotationValue, foundExcludeSecret := annotations[options.SecretExcludeReloaderAnnotation]
if !found && !foundAuto && !foundTypedAuto && !foundSearchAnn {
annotations = podAnnotations
annotationValue = annotations[config.Annotation]
searchAnnotationValue = annotations[options.AutoSearchAnnotation]
reloaderEnabledValue = annotations[options.ReloaderAutoAnnotation]
typedAutoAnnotationEnabledValue = annotations[config.TypedAutoAnnotation]
}
isResourceExcluded := false
switch config.Type {
case constants.ConfigmapEnvVarPostfix:
if foundExcludeConfigmap {
isResourceExcluded = checkIfResourceIsExcluded(config.ResourceName, excludeConfigmapAnnotationValue)
}
case constants.SecretEnvVarPostfix:
if foundExcludeSecret {
isResourceExcluded = checkIfResourceIsExcluded(config.ResourceName, excludeSecretAnnotationValue)
}
}
if isResourceExcluded {
return ReloadCheckResult{
ShouldReload: false,
}
}
reloaderEnabled, _ := strconv.ParseBool(reloaderEnabledValue)
typedAutoAnnotationEnabled, _ := strconv.ParseBool(typedAutoAnnotationEnabledValue)
if reloaderEnabled || typedAutoAnnotationEnabled || reloaderEnabledValue == "" && typedAutoAnnotationEnabledValue == "" && options.AutoReloadAll {
return ReloadCheckResult{
ShouldReload: true,
AutoReload: true,
}
}
values := strings.Split(annotationValue, ",")
for _, value := range values {
value = strings.TrimSpace(value)
re := regexp.MustCompile("^" + value + "$")
if re.Match([]byte(config.ResourceName)) {
return ReloadCheckResult{
ShouldReload: true,
AutoReload: false,
}
}
}
if searchAnnotationValue == "true" {
matchAnnotationValue := config.ResourceAnnotations[options.SearchMatchAnnotation]
if matchAnnotationValue == "true" {
return ReloadCheckResult{
ShouldReload: true,
AutoReload: true,
}
}
}
return ReloadCheckResult{
ShouldReload: false,
}
}
func checkIfResourceIsExcluded(resourceName, excludedResources string) bool {
if excludedResources == "" {
return false
}
excludedResourcesList := strings.Split(excludedResources, ",")
for _, excludedResource := range excludedResourcesList {
if strings.TrimSpace(excludedResource) == resourceName {
return true
}
}
return false
}
func init() {
GetCommandLineOptions()
}
func GetCommandLineOptions() *ReloaderOptions {
if CommandLineOptions == nil {
CommandLineOptions = &ReloaderOptions{}
}
CommandLineOptions.AutoReloadAll = options.AutoReloadAll
CommandLineOptions.ConfigmapUpdateOnChangeAnnotation = options.ConfigmapUpdateOnChangeAnnotation
CommandLineOptions.SecretUpdateOnChangeAnnotation = options.SecretUpdateOnChangeAnnotation
CommandLineOptions.ReloaderAutoAnnotation = options.ReloaderAutoAnnotation
CommandLineOptions.IgnoreResourceAnnotation = options.IgnoreResourceAnnotation
CommandLineOptions.ConfigmapReloaderAutoAnnotation = options.ConfigmapReloaderAutoAnnotation
CommandLineOptions.SecretReloaderAutoAnnotation = options.SecretReloaderAutoAnnotation
CommandLineOptions.ConfigmapExcludeReloaderAnnotation = options.ConfigmapExcludeReloaderAnnotation
CommandLineOptions.SecretExcludeReloaderAnnotation = options.SecretExcludeReloaderAnnotation
CommandLineOptions.AutoSearchAnnotation = options.AutoSearchAnnotation
CommandLineOptions.SearchMatchAnnotation = options.SearchMatchAnnotation
CommandLineOptions.RolloutStrategyAnnotation = options.RolloutStrategyAnnotation
CommandLineOptions.PauseDeploymentAnnotation = options.PauseDeploymentAnnotation
CommandLineOptions.PauseDeploymentTimeAnnotation = options.PauseDeploymentTimeAnnotation
CommandLineOptions.LogFormat = options.LogFormat
CommandLineOptions.LogLevel = options.LogLevel
CommandLineOptions.ReloadStrategy = options.ReloadStrategy
CommandLineOptions.SyncAfterRestart = options.SyncAfterRestart
CommandLineOptions.EnableHA = options.EnableHA
CommandLineOptions.WebhookUrl = options.WebhookUrl
CommandLineOptions.ResourcesToIgnore = options.ResourcesToIgnore
CommandLineOptions.NamespaceSelectors = options.NamespaceSelectors
CommandLineOptions.ResourceSelectors = options.ResourceSelectors
CommandLineOptions.NamespacesToIgnore = options.NamespacesToIgnore
CommandLineOptions.IsArgoRollouts = parseBool(options.IsArgoRollouts)
CommandLineOptions.ReloadOnCreate = parseBool(options.ReloadOnCreate)
CommandLineOptions.ReloadOnDelete = parseBool(options.ReloadOnDelete)
CommandLineOptions.EnablePProf = options.EnablePProf
return CommandLineOptions
}
func parseBool(value string) bool {
if value == "" {
return false
}
result, err := strconv.ParseBool(value)
if err != nil {
return false // Default to false if parsing fails
}
return result
}

129
pkg/common/metainfo.go Normal file
View File

@@ -0,0 +1,129 @@
package common
import (
"encoding/json"
"fmt"
"runtime"
"time"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// Version, Commit, and BuildDate are set during the build process
// using the -X linker flag to inject these values into the binary.
// They provide metadata about the build version, commit hash, build date, and whether there are
// uncommitted changes in the source code at the time of build.
// This information is useful for debugging and tracking the specific build of the Reloader binary.
var Version = "dev"
var Commit = "unknown"
var BuildDate = "unknown"
const (
MetaInfoConfigmapName = "reloader-meta-info"
MetaInfoConfigmapLabelKey = "reloader.stakater.com/meta-info"
MetaInfoConfigmapLabelValue = "reloader-oss"
)
// MetaInfo contains comprehensive metadata about the Reloader instance.
// This includes build information, configuration options, and deployment details.
type MetaInfo struct {
// BuildInfo contains information about the build version, commit, and compilation details
BuildInfo BuildInfo `json:"buildInfo"`
// ReloaderOptions contains all the configuration options and flags used by this Reloader instance
ReloaderOptions ReloaderOptions `json:"reloaderOptions"`
// DeploymentInfo contains metadata about the Kubernetes deployment of this Reloader instance
DeploymentInfo metav1.ObjectMeta `json:"deploymentInfo"`
}
// BuildInfo contains information about the build and version of the Reloader binary.
// This includes Go version, release version, commit details, and build timestamp.
type BuildInfo struct {
// GoVersion is the version of Go used to compile the binary
GoVersion string `json:"goVersion"`
// ReleaseVersion is the version tag or branch of the Reloader release
ReleaseVersion string `json:"releaseVersion"`
// CommitHash is the Git commit hash of the source code used to build this binary
CommitHash string `json:"commitHash"`
// CommitTime is the timestamp of the Git commit used to build this binary
CommitTime time.Time `json:"commitTime"`
}
func NewBuildInfo() *BuildInfo {
metaInfo := &BuildInfo{
GoVersion: runtime.Version(),
ReleaseVersion: Version,
CommitHash: Commit,
CommitTime: ParseUTCTime(BuildDate),
}
return metaInfo
}
func (m *MetaInfo) ToConfigMap() *v1.ConfigMap {
return &v1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: MetaInfoConfigmapName,
Namespace: m.DeploymentInfo.Namespace,
Labels: map[string]string{
MetaInfoConfigmapLabelKey: MetaInfoConfigmapLabelValue,
},
},
Data: map[string]string{
"buildInfo": toJson(m.BuildInfo),
"reloaderOptions": toJson(m.ReloaderOptions),
"deploymentInfo": toJson(m.DeploymentInfo),
},
}
}
func NewMetaInfo(configmap *v1.ConfigMap) (*MetaInfo, error) {
var buildInfo BuildInfo
if val, ok := configmap.Data["buildInfo"]; ok {
err := json.Unmarshal([]byte(val), &buildInfo)
if err != nil {
return nil, fmt.Errorf("failed to unmarshal buildInfo: %w", err)
}
}
var reloaderOptions ReloaderOptions
if val, ok := configmap.Data["reloaderOptions"]; ok {
err := json.Unmarshal([]byte(val), &reloaderOptions)
if err != nil {
return nil, fmt.Errorf("failed to unmarshal reloaderOptions: %w", err)
}
}
var deploymentInfo metav1.ObjectMeta
if val, ok := configmap.Data["deploymentInfo"]; ok {
err := json.Unmarshal([]byte(val), &deploymentInfo)
if err != nil {
return nil, fmt.Errorf("failed to unmarshal deploymentInfo: %w", err)
}
}
return &MetaInfo{
BuildInfo: buildInfo,
ReloaderOptions: reloaderOptions,
DeploymentInfo: deploymentInfo,
}, nil
}
func toJson(data interface{}) string {
jsonData, err := json.Marshal(data)
if err != nil {
return ""
}
return string(jsonData)
}
func ParseUTCTime(value string) time.Time {
if value == "" {
return time.Time{} // Return zero time if value is empty
}
t, err := time.Parse(time.RFC3339, value)
if err != nil {
return time.Time{} // Return zero time if parsing fails
}
return t
}