mirror of
https://github.com/stakater/Reloader.git
synced 2026-02-14 18:09:50 +00:00
Compare commits
26 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
a43dcc7b85 | ||
|
|
acaa00e256 | ||
|
|
dffed992d6 | ||
|
|
eff894e919 | ||
|
|
03c3f5947b | ||
|
|
1084574bd0 | ||
|
|
3103e5ac4d | ||
|
|
a77c10a2c6 | ||
|
|
bd767a7ef1 | ||
|
|
3a1cc8f348 | ||
|
|
dd0807e951 | ||
|
|
b8edc25177 | ||
|
|
f9d658d3b4 | ||
|
|
816ad6d430 | ||
|
|
19a76258d0 | ||
|
|
aa481d9568 | ||
|
|
177d2756a8 | ||
|
|
9b2af6f9b7 | ||
|
|
7c4899a7eb | ||
|
|
54d44858f8 | ||
|
|
6304a9e5ab | ||
|
|
1e6a6ec2d9 | ||
|
|
42cd7e71a2 | ||
|
|
1107fee109 | ||
|
|
9e33dac9ef | ||
|
|
517fd33fb1 |
4
.github/workflows/push-helm-chart.yaml
vendored
4
.github/workflows/push-helm-chart.yaml
vendored
@@ -73,7 +73,7 @@ jobs:
|
||||
exit 1
|
||||
|
||||
- name: Install Cosign
|
||||
uses: sigstore/cosign-installer@v3.8.2
|
||||
uses: sigstore/cosign-installer@v3.10.1
|
||||
|
||||
- name: Login to GHCR Registry
|
||||
uses: docker/login-action@v3
|
||||
@@ -106,7 +106,7 @@ jobs:
|
||||
commit_email: stakater@gmail.com
|
||||
|
||||
- name: Push new chart tag
|
||||
uses: anothrNick/github-tag-action@1.71.0
|
||||
uses: anothrNick/github-tag-action@1.75.0
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.PUBLISH_TOKEN }}
|
||||
WITH_V: false
|
||||
|
||||
2
.github/workflows/push.yaml
vendored
2
.github/workflows/push.yaml
vendored
@@ -215,7 +215,7 @@ jobs:
|
||||
org.opencontainers.image.revision=${{ github.sha }}
|
||||
|
||||
- name: Push Latest Tag
|
||||
uses: anothrNick/github-tag-action@1.71.0
|
||||
uses: anothrNick/github-tag-action@1.75.0
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.PUBLISH_TOKEN }}
|
||||
WITH_V: false
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
StylesPath = styles
|
||||
MinAlertLevel = warning
|
||||
|
||||
Packages = https://github.com/stakater/vale-package/releases/download/v0.0.77/Stakater.zip
|
||||
Packages = https://github.com/stakater/vale-package/releases/download/v0.0.86/Stakater.zip
|
||||
Vocab = Stakater
|
||||
|
||||
# Only check MarkDown files
|
||||
|
||||
@@ -1,3 +1,3 @@
|
||||
# Code of Conduct
|
||||
|
||||
Reloader follows the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/master/code-of-conduct.md).
|
||||
Reloader follows the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/main/code-of-conduct.md).
|
||||
|
||||
@@ -2,7 +2,7 @@ ARG BUILDER_IMAGE
|
||||
ARG BASE_IMAGE
|
||||
|
||||
# Build the manager binary
|
||||
FROM --platform=${BUILDPLATFORM} ${BUILDER_IMAGE:-golang:1.24.4} AS builder
|
||||
FROM --platform=${BUILDPLATFORM} ${BUILDER_IMAGE:-golang:1.24.6} AS builder
|
||||
|
||||
ARG TARGETOS
|
||||
ARG TARGETARCH
|
||||
|
||||
26
README.md
26
README.md
@@ -2,6 +2,7 @@
|
||||
<img src="assets/web/reloader.jpg" alt="Reloader" width="40%"/>
|
||||
</p>
|
||||
|
||||
[](https://github.com/sponsors/stakater?utm_source=github&utm_medium=readme&utm_campaign=reloader)
|
||||
[](https://goreportcard.com/report/github.com/stakater/reloader)
|
||||
[](https://godoc.org/github.com/stakater/reloader)
|
||||
[](https://github.com/stakater/reloader/releases/latest)
|
||||
@@ -328,13 +329,30 @@ Reloader supports multiple strategies for triggering rolling updates when a watc
|
||||
|------|-------------|
|
||||
| `--resources-to-ignore=configmaps` | Ignore ConfigMaps (only one type can be ignored at a time) |
|
||||
| `--resources-to-ignore=secrets` | Ignore Secrets (cannot combine with configMaps) |
|
||||
| `--ignored-workload-types=jobs,cronjobs` | Ignore specific workload types from reload monitoring |
|
||||
| `--resource-label-selector=key=value` | Only watch ConfigMaps/Secrets with matching labels |
|
||||
|
||||
> **⚠️ Note:**
|
||||
> Only **one** resource type can be ignored at a time.
|
||||
> Trying to ignore **both `configmaps` and `secrets`** will cause an error in Reloader.
|
||||
> **⚠️ Note:**
|
||||
>
|
||||
> Only **one** resource type can be ignored at a time.
|
||||
> Trying to ignore **both `configmaps` and `secrets`** will cause an error in Reloader.
|
||||
> ✅ **Workaround:** Scale the Reloader deployment to `0` replicas if you want to disable it completely.
|
||||
|
||||
**💡 Workload Type Examples:**
|
||||
|
||||
```bash
|
||||
# Ignore only Jobs
|
||||
--ignored-workload-types=jobs
|
||||
|
||||
# Ignore only CronJobs
|
||||
--ignored-workload-types=cronjobs
|
||||
|
||||
# Ignore both (comma-separated)
|
||||
--ignored-workload-types=jobs,cronjobs
|
||||
```
|
||||
|
||||
> **🔧 Use Case:** Ignoring workload types is useful when you don't want certain types of workloads to be automatically reloaded.
|
||||
|
||||
#### 3. 🧩 Namespace Filtering
|
||||
|
||||
| Flag | Description |
|
||||
@@ -419,7 +437,7 @@ To make a GitHub release:
|
||||
1. Code owners run a dispatch mode workflow to automatically generate version and manifests on the release branch
|
||||
1. A PR is created to bump the image version on the release branch, example: [PR-798](https://github.com/stakater/Reloader/pull/798)
|
||||
1. Code owners create a GitHub release with tag `vX.Y.Z` and target branch `release-vX.Y.Z`, which triggers creation of images
|
||||
1. Code owners create a PR to update the Helm chart version, example: [PR-846](https://github.com/stakater/Reloader/pull/846)
|
||||
1. Code owners create a PR with `release/helm-chart` label to update the Helm chart version, example: [PR-846](https://github.com/stakater/Reloader/pull/846)
|
||||
|
||||
_Repository git tagging_: Push to the main branch will create a merge-image and merge-tag named `merge-${{ github.event.number }}`, for example `merge-800` when pull request number 800 is merged.
|
||||
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
apiVersion: v1
|
||||
name: reloader
|
||||
description: Reloader chart that runs on kubernetes
|
||||
version: 2.2.0
|
||||
appVersion: v1.4.5
|
||||
version: 2.2.3
|
||||
appVersion: v1.4.8
|
||||
keywords:
|
||||
- Reloader
|
||||
- kubernetes
|
||||
|
||||
@@ -5,6 +5,7 @@ If you have configured helm on your cluster, you can add Reloader to helm from o
|
||||
## Installation
|
||||
|
||||
```bash
|
||||
# Add stakater helm repoository
|
||||
helm repo add stakater https://stakater.github.io/stakater-charts
|
||||
|
||||
helm repo update
|
||||
@@ -14,6 +15,8 @@ helm install stakater/reloader # For helm3 add --generate-name flag or set the r
|
||||
helm install {{RELEASE_NAME}} stakater/reloader -n {{NAMESPACE}} --set reloader.watchGlobally=false # By default, Reloader watches in all namespaces. To watch in single namespace, set watchGlobally=false
|
||||
|
||||
helm install stakater/reloader --set reloader.watchGlobally=false --namespace test --generate-name # Install Reloader in `test` namespace which will only watch `Deployments`, `Daemonsets` `Statefulsets` and `Rollouts` in `test` namespace.
|
||||
|
||||
helm install stakater/reloader --set reloader.ignoreJobs=true --set reloader.ignoreCronJobs=true --generate-name # Install Reloader ignoring Jobs and CronJobs from reload monitoring
|
||||
```
|
||||
|
||||
## Uninstalling
|
||||
@@ -47,6 +50,8 @@ helm uninstall {{RELEASE_NAME}} -n {{NAMESPACE}}
|
||||
| `reloader.isOpenshift` | Enable OpenShift DeploymentConfigs. Valid value are either `true` or `false` | boolean | `false` |
|
||||
| `reloader.ignoreSecrets` | To ignore secrets. Valid value are either `true` or `false`. Either `ignoreSecrets` or `ignoreConfigMaps` can be ignored, not both at the same time | boolean | `false` |
|
||||
| `reloader.ignoreConfigMaps` | To ignore configmaps. Valid value are either `true` or `false` | boolean | `false` |
|
||||
| `reloader.ignoreJobs` | To ignore jobs from reload monitoring. Valid value are either `true` or `false`. Translates to `--ignored-workload-types=jobs` | boolean | `false` |
|
||||
| `reloader.ignoreCronJobs` | To ignore CronJobs from reload monitoring. Valid value are either `true` or `false`. Translates to `--ignored-workload-types=cronjobs` | boolean | `false` |
|
||||
| `reloader.reloadOnCreate` | Enable reload on create events. Valid value are either `true` or `false` | boolean | `false` |
|
||||
| `reloader.reloadOnDelete` | Enable reload on delete events. Valid value are either `true` or `false` | boolean | `false` |
|
||||
| `reloader.syncAfterRestart` | Enable sync after Reloader restarts for **Add** events, works only when reloadOnCreate is `true`. Valid value are either `true` or `false` | boolean | `false` |
|
||||
@@ -58,7 +63,7 @@ helm uninstall {{RELEASE_NAME}} -n {{NAMESPACE}}
|
||||
| `reloader.watchGlobally` | Allow Reloader to watch in all namespaces (`true`) or just in a single namespace (`false`) | boolean | `true` |
|
||||
| `reloader.enableHA` | Enable leadership election allowing you to run multiple replicas | boolean | `false` |
|
||||
| `reloader.enablePProf` | Enables pprof for profiling | boolean | `false` |
|
||||
| `reloader.pprofAddr` | Address to start pprof server on | string | `:6060` |
|
||||
| `reloader.pprofAddr` | Address to start pprof server on | string | `:6060` |
|
||||
| `reloader.readOnlyRootFileSystem` | Enforce readOnlyRootFilesystem | boolean | `false` |
|
||||
| `reloader.legacy.rbac` | | boolean | `false` |
|
||||
| `reloader.matchLabels` | Pod labels to match | map | `{}` |
|
||||
@@ -84,7 +89,10 @@ helm uninstall {{RELEASE_NAME}} -n {{NAMESPACE}}
|
||||
| `reloader.deployment.resources` | Set container requests and limits (e.g. CPU or memory) | map | `{}` |
|
||||
| `reloader.deployment.pod.annotations` | Set annotations for pod | map | `{}` |
|
||||
| `reloader.deployment.priorityClassName` | Set priority class for pod in cluster | string | `""` |
|
||||
| `reloader.deployment.volumeMounts` | Mount volume | array | `[]` |
|
||||
| `reloader.deployment.volumes` | Add volume to a pod | array | `[]` |
|
||||
|
||||
| `reloader.deployment.dnsConfig` | dns configuration for pods | map | `{}` |
|
||||
### Other Reloader Parameters
|
||||
|
||||
| Parameter | Description | Type | Default |
|
||||
@@ -112,6 +120,10 @@ helm uninstall {{RELEASE_NAME}} -n {{NAMESPACE}}
|
||||
- Only one of these resources can be ignored at a time:
|
||||
- `ignoreConfigMaps` **or** `ignoreSecrets`
|
||||
- Trying to ignore both will cause Helm template compilation errors
|
||||
- The `ignoreJobs` and `ignoreCronJobs` flags can be used together or individually
|
||||
- When both are enabled, translates to `--ignored-workload-types=jobs,cronjobs`
|
||||
- When used individually, translates to `--ignored-workload-types=jobs` or `--ignored-workload-types=cronjobs`
|
||||
- These flags prevent Reloader from monitoring and reloading the specified workload types
|
||||
|
||||
### Special Integrations
|
||||
- OpenShift (`DeploymentConfig`) and Argo Rollouts support must be **explicitly enabled**
|
||||
|
||||
@@ -71,6 +71,10 @@ spec:
|
||||
{{- if .Values.reloader.deployment.priorityClassName }}
|
||||
priorityClassName: {{ .Values.reloader.deployment.priorityClassName }}
|
||||
{{- end }}
|
||||
{{- with .Values.reloader.deployment.dnsConfig }}
|
||||
dnsConfig:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
containers:
|
||||
{{- if .Values.global.imageRegistry }}
|
||||
- image: "{{ .Values.global.imageRegistry }}/{{ .Values.image.name }}:{{ .Values.image.tag }}"
|
||||
@@ -151,7 +155,7 @@ spec:
|
||||
|
||||
- name: RELOADER_DEPLOYMENT_NAME
|
||||
value: {{ template "reloader-fullname" . }}
|
||||
|
||||
|
||||
{{- if .Values.reloader.enableHA }}
|
||||
- name: POD_NAME
|
||||
valueFrom:
|
||||
@@ -206,7 +210,7 @@ spec:
|
||||
{{- . | toYaml | nindent 10 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- if or (.Values.reloader.logFormat) (.Values.reloader.logLevel) (.Values.reloader.ignoreSecrets) (.Values.reloader.ignoreNamespaces) (include "reloader-namespaceSelector" .) (.Values.reloader.resourceLabelSelector) (.Values.reloader.ignoreConfigMaps) (.Values.reloader.custom_annotations) (eq .Values.reloader.isArgoRollouts true) (eq .Values.reloader.reloadOnCreate true) (eq .Values.reloader.reloadOnDelete true) (ne .Values.reloader.reloadStrategy "default") (.Values.reloader.enableHA) (.Values.reloader.autoReloadAll)}}
|
||||
{{- if or (.Values.reloader.logFormat) (.Values.reloader.logLevel) (.Values.reloader.ignoreSecrets) (.Values.reloader.ignoreNamespaces) (include "reloader-namespaceSelector" .) (.Values.reloader.resourceLabelSelector) (.Values.reloader.ignoreConfigMaps) (.Values.reloader.custom_annotations) (eq .Values.reloader.isArgoRollouts true) (eq .Values.reloader.reloadOnCreate true) (eq .Values.reloader.reloadOnDelete true) (ne .Values.reloader.reloadStrategy "default") (.Values.reloader.enableHA) (.Values.reloader.autoReloadAll) (.Values.reloader.ignoreJobs) (.Values.reloader.ignoreCronJobs)}}
|
||||
args:
|
||||
{{- if .Values.reloader.logFormat }}
|
||||
- "--log-format={{ .Values.reloader.logFormat }}"
|
||||
@@ -220,6 +224,13 @@ spec:
|
||||
{{- if .Values.reloader.ignoreConfigMaps }}
|
||||
- "--resources-to-ignore=configMaps"
|
||||
{{- end }}
|
||||
{{- if and (.Values.reloader.ignoreJobs) (.Values.reloader.ignoreCronJobs) }}
|
||||
- "--ignored-workload-types=jobs,cronjobs"
|
||||
{{- else if .Values.reloader.ignoreJobs }}
|
||||
- "--ignored-workload-types=jobs"
|
||||
{{- else if .Values.reloader.ignoreCronJobs }}
|
||||
- "--ignored-workload-types=cronjobs"
|
||||
{{- end }}
|
||||
{{- if .Values.reloader.ignoreNamespaces }}
|
||||
- "--namespaces-to-ignore={{ .Values.reloader.ignoreNamespaces }}"
|
||||
{{- end }}
|
||||
@@ -263,6 +274,14 @@ spec:
|
||||
{{- if .Values.reloader.custom_annotations.match }}
|
||||
- "--search-match-annotation"
|
||||
- "{{ .Values.reloader.custom_annotations.match }}"
|
||||
{{- end }}
|
||||
{{- if .Values.reloader.custom_annotations.pausePeriod }}
|
||||
- "--pause-deployment-annotation"
|
||||
- "{{ .Values.reloader.custom_annotations.pausePeriod }}"
|
||||
{{- end }}
|
||||
{{- if .Values.reloader.custom_annotations.pauseTime }}
|
||||
- "--pause-deployment-annotation"
|
||||
- "{{ .Values.reloader.custom_annotations.pauseTime }}"
|
||||
{{- end }}
|
||||
{{- if .Values.reloader.webhookUrl }}
|
||||
- "--webhook-url"
|
||||
|
||||
@@ -61,3 +61,44 @@ tests:
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.name
|
||||
|
||||
- it: sets ignored-workload-types argument when ignoreJobs is true
|
||||
set:
|
||||
reloader:
|
||||
ignoreJobs: true
|
||||
asserts:
|
||||
- contains:
|
||||
path: spec.template.spec.containers[0].args
|
||||
content: "--ignored-workload-types=jobs"
|
||||
|
||||
- it: sets ignored-workload-types argument when ignoreCronJobs is true
|
||||
set:
|
||||
reloader:
|
||||
ignoreCronJobs: true
|
||||
asserts:
|
||||
- contains:
|
||||
path: spec.template.spec.containers[0].args
|
||||
content: "--ignored-workload-types=cronjobs"
|
||||
|
||||
- it: sets ignored-workload-types argument when both ignoreJobs and ignoreCronJobs are true
|
||||
set:
|
||||
reloader:
|
||||
ignoreJobs: true
|
||||
ignoreCronJobs: true
|
||||
asserts:
|
||||
- contains:
|
||||
path: spec.template.spec.containers[0].args
|
||||
content: "--ignored-workload-types=jobs,cronjobs"
|
||||
|
||||
- it: does not set ignored-workload-types argument when both ignoreJobs and ignoreCronJobs are false
|
||||
set:
|
||||
reloader:
|
||||
ignoreJobs: false
|
||||
ignoreCronJobs: false
|
||||
asserts:
|
||||
- notContains:
|
||||
path: spec.template.spec.containers[0].args
|
||||
content: "--ignored-workload-types=jobs"
|
||||
- notContains:
|
||||
path: spec.template.spec.containers[0].args
|
||||
content: "--ignored-workload-types=cronjobs"
|
||||
|
||||
@@ -17,7 +17,7 @@ fullnameOverride: ""
|
||||
image:
|
||||
name: stakater/reloader
|
||||
repository: ghcr.io/stakater/reloader
|
||||
tag: v1.4.5
|
||||
tag: v1.4.8
|
||||
# digest: sha256:1234567
|
||||
pullPolicy: IfNotPresent
|
||||
|
||||
@@ -27,7 +27,11 @@ reloader:
|
||||
isOpenshift: false
|
||||
ignoreSecrets: false
|
||||
ignoreConfigMaps: false
|
||||
# Set to true to exclude Job workloads from automatic reload monitoring
|
||||
# Useful when you don't want Jobs to be restarted when their referenced ConfigMaps/Secrets change
|
||||
ignoreJobs: false
|
||||
# Set to true to exclude CronJob workloads from automatic reload monitoring
|
||||
# Useful when you don't want CronJobs to be restarted when their referenced ConfigMaps/Secrets change
|
||||
ignoreCronJobs: false
|
||||
reloadOnCreate: false
|
||||
reloadOnDelete: false
|
||||
@@ -53,6 +57,19 @@ reloader:
|
||||
# Set to true to expose a prometheus counter of reloads by namespace (this metric may have high cardinality in clusters with many namespaces)
|
||||
enableMetricsByNamespace: false
|
||||
deployment:
|
||||
# Specifies the deployment DNS configuration.
|
||||
dnsConfig: {}
|
||||
# nameservers:
|
||||
# - 1.2.3.4
|
||||
# searches:
|
||||
# - ns1.svc.cluster-domain.example
|
||||
# - my.dns.search.suffix
|
||||
# options:
|
||||
# - name: ndots
|
||||
# value: "1"
|
||||
# - name: attempts
|
||||
# value: "3"
|
||||
|
||||
# If you wish to run multiple replicas set reloader.enableHA = true
|
||||
replicas: 1
|
||||
|
||||
@@ -72,6 +89,9 @@ reloader:
|
||||
# operator: "Exists"
|
||||
affinity: {}
|
||||
|
||||
volumeMounts: []
|
||||
volumes: []
|
||||
|
||||
securityContext:
|
||||
runAsNonRoot: true
|
||||
runAsUser: 65534
|
||||
@@ -110,7 +130,7 @@ reloader:
|
||||
labels:
|
||||
provider: stakater
|
||||
group: com.stakater.platform
|
||||
version: v1.4.5
|
||||
version: v1.4.8
|
||||
# Support for extra environment variables.
|
||||
env:
|
||||
# Open supports Key value pair as environment variables.
|
||||
@@ -341,8 +361,4 @@ reloader:
|
||||
# are applied during the life of a Pod. Possible values are "Off", "Initial", "Recreate", and "Auto".
|
||||
updateMode: Auto
|
||||
|
||||
volumeMounts: []
|
||||
|
||||
volumes: []
|
||||
|
||||
webhookUrl: ""
|
||||
|
||||
@@ -10,3 +10,17 @@ These are the key features of Reloader:
|
||||
1. Restart pod in a `rollout` on change in linked/related `ConfigMaps` or `Secrets`
|
||||
|
||||
This site contains more details on how Reloader works. For an overview, please see the repository's [README file](https://github.com/stakater/Reloader/blob/master/README.md).
|
||||
|
||||
---
|
||||
|
||||
<div align="center">
|
||||
|
||||
[](https://github.com/sponsors/stakater?utm_source=docs&utm_medium=footer&utm_campaign=reloader)
|
||||
|
||||
<p>
|
||||
Your support funds maintenance, security updates, and new features for Reloader, plus continued investment in other open source tools.
|
||||
</p>
|
||||
|
||||
</div>
|
||||
|
||||
---
|
||||
|
||||
2
go.mod
2
go.mod
@@ -1,6 +1,6 @@
|
||||
module github.com/stakater/Reloader
|
||||
|
||||
go 1.24.4
|
||||
go 1.24.6
|
||||
|
||||
require (
|
||||
github.com/argoproj/argo-rollouts v1.8.2
|
||||
|
||||
@@ -133,13 +133,13 @@ func startReloader(cmd *cobra.Command, args []string) {
|
||||
namespaceLabelSelector := ""
|
||||
|
||||
if isGlobal {
|
||||
namespaceLabelSelector, err = util.GetNamespaceLabelSelector()
|
||||
namespaceLabelSelector, err = common.GetNamespaceLabelSelector(options.NamespaceSelectors)
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
resourceLabelSelector, err := util.GetResourceLabelSelector()
|
||||
resourceLabelSelector, err := common.GetResourceLabelSelector(options.ResourceSelectors)
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
}
|
||||
|
||||
@@ -15,6 +15,7 @@ import (
|
||||
"github.com/stakater/Reloader/internal/pkg/options"
|
||||
"github.com/stakater/Reloader/internal/pkg/testutil"
|
||||
"github.com/stakater/Reloader/internal/pkg/util"
|
||||
"github.com/stakater/Reloader/pkg/common"
|
||||
"github.com/stakater/Reloader/pkg/kube"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
@@ -94,7 +95,7 @@ func TestControllerUpdatingConfigmapShouldCreatePodAnnotationInDeployment(t *tes
|
||||
// Verifying deployment update
|
||||
logrus.Infof("Verifying pod annotation has been created")
|
||||
shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapName, "www.stakater.com")
|
||||
config := util.Config{
|
||||
config := common.Config{
|
||||
Namespace: namespace,
|
||||
ResourceName: configmapName,
|
||||
SHAValue: shaData,
|
||||
@@ -147,7 +148,7 @@ func TestControllerUpdatingConfigmapShouldAutoCreatePodAnnotationInDeployment(t
|
||||
// Verifying deployment update
|
||||
logrus.Infof("Verifying pod annotation has been created")
|
||||
shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapName, "www.stakater.com")
|
||||
config := util.Config{
|
||||
config := common.Config{
|
||||
Namespace: namespace,
|
||||
ResourceName: configmapName,
|
||||
SHAValue: shaData,
|
||||
@@ -212,7 +213,7 @@ func TestControllerCreatingConfigmapShouldCreatePodAnnotationInDeployment(t *tes
|
||||
// Verifying deployment update
|
||||
logrus.Infof("Verifying pod annotation has been created")
|
||||
shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapName, "www.stakater.com")
|
||||
config := util.Config{
|
||||
config := common.Config{
|
||||
Namespace: namespace,
|
||||
ResourceName: configmapName,
|
||||
SHAValue: shaData,
|
||||
@@ -271,7 +272,7 @@ func TestControllerForUpdatingConfigmapShouldUpdateDeploymentUsingArs(t *testing
|
||||
// Verifying deployment update
|
||||
logrus.Infof("Verifying pod annotation has been updated")
|
||||
shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapName, "aurorasolutions.io")
|
||||
config := util.Config{
|
||||
config := common.Config{
|
||||
Namespace: namespace,
|
||||
ResourceName: configmapName,
|
||||
SHAValue: shaData,
|
||||
@@ -326,7 +327,7 @@ func TestControllerUpdatingConfigmapLabelsShouldNotCreateOrCreatePodAnnotationIn
|
||||
// Verifying deployment update
|
||||
logrus.Infof("Verifying pod annotation has been created")
|
||||
shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapName, "www.google.com")
|
||||
config := util.Config{
|
||||
config := common.Config{
|
||||
Namespace: namespace,
|
||||
ResourceName: configmapName,
|
||||
SHAValue: shaData,
|
||||
@@ -390,7 +391,7 @@ func TestControllerCreatingSecretShouldCreatePodAnnotationInDeployment(t *testin
|
||||
// Verifying Upgrade
|
||||
logrus.Infof("Verifying pod annotation has been created")
|
||||
shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, namespace, secretName, newData)
|
||||
config := util.Config{
|
||||
config := common.Config{
|
||||
Namespace: namespace,
|
||||
ResourceName: secretName,
|
||||
SHAValue: shaData,
|
||||
@@ -443,7 +444,7 @@ func TestControllerUpdatingSecretShouldCreatePodAnnotationInDeployment(t *testin
|
||||
// Verifying Upgrade
|
||||
logrus.Infof("Verifying pod annotation has been created")
|
||||
shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, namespace, secretName, newData)
|
||||
config := util.Config{
|
||||
config := common.Config{
|
||||
Namespace: namespace,
|
||||
ResourceName: secretName,
|
||||
SHAValue: shaData,
|
||||
@@ -501,7 +502,7 @@ func TestControllerUpdatingSecretShouldUpdatePodAnnotationInDeployment(t *testin
|
||||
// Verifying Upgrade
|
||||
logrus.Infof("Verifying pod annotation has been updated")
|
||||
shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, namespace, secretName, updatedData)
|
||||
config := util.Config{
|
||||
config := common.Config{
|
||||
Namespace: namespace,
|
||||
ResourceName: secretName,
|
||||
SHAValue: shaData,
|
||||
@@ -552,7 +553,7 @@ func TestControllerUpdatingSecretLabelsShouldNotCreateOrUpdatePodAnnotationInDep
|
||||
// Verifying Upgrade
|
||||
logrus.Infof("Verifying pod annotation has been created")
|
||||
shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, namespace, secretName, data)
|
||||
config := util.Config{
|
||||
config := common.Config{
|
||||
Namespace: namespace,
|
||||
ResourceName: secretName,
|
||||
SHAValue: shaData,
|
||||
@@ -604,7 +605,7 @@ func TestControllerUpdatingConfigmapShouldCreatePodAnnotationInDaemonSet(t *test
|
||||
// Verifying DaemonSet update
|
||||
logrus.Infof("Verifying pod annotation has been created")
|
||||
shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapName, "www.stakater.com")
|
||||
config := util.Config{
|
||||
config := common.Config{
|
||||
Namespace: namespace,
|
||||
ResourceName: configmapName,
|
||||
SHAValue: shaData,
|
||||
@@ -667,7 +668,7 @@ func TestControllerForUpdatingConfigmapShouldUpdateDaemonSetUsingArs(t *testing.
|
||||
// Verifying DaemonSet update
|
||||
logrus.Infof("Verifying pod annotation has been updated")
|
||||
shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapName, "aurorasolutions.io")
|
||||
config := util.Config{
|
||||
config := common.Config{
|
||||
Namespace: namespace,
|
||||
ResourceName: configmapName,
|
||||
SHAValue: shaData,
|
||||
@@ -720,7 +721,7 @@ func TestControllerUpdatingSecretShouldCreatePodAnnotationInDaemonSet(t *testing
|
||||
// Verifying Upgrade
|
||||
logrus.Infof("Verifying pod annotation has been created")
|
||||
shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, namespace, secretName, newData)
|
||||
config := util.Config{
|
||||
config := common.Config{
|
||||
Namespace: namespace,
|
||||
ResourceName: secretName,
|
||||
SHAValue: shaData,
|
||||
@@ -779,7 +780,7 @@ func TestControllerUpdatingSecretShouldUpdatePodAnnotationInDaemonSet(t *testing
|
||||
// Verifying Upgrade
|
||||
logrus.Infof("Verifying pod annotation has been updated")
|
||||
shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, namespace, secretName, updatedData)
|
||||
config := util.Config{
|
||||
config := common.Config{
|
||||
Namespace: namespace,
|
||||
ResourceName: secretName,
|
||||
SHAValue: shaData,
|
||||
@@ -830,7 +831,7 @@ func TestControllerUpdatingSecretLabelsShouldNotCreateOrUpdatePodAnnotationInDae
|
||||
// Verifying Upgrade
|
||||
logrus.Infof("Verifying pod annotation has been created")
|
||||
shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, namespace, secretName, data)
|
||||
config := util.Config{
|
||||
config := common.Config{
|
||||
Namespace: namespace,
|
||||
ResourceName: secretName,
|
||||
SHAValue: shaData,
|
||||
@@ -882,7 +883,7 @@ func TestControllerUpdatingConfigmapShouldCreatePodAnnotationInStatefulSet(t *te
|
||||
// Verifying StatefulSet update
|
||||
logrus.Infof("Verifying pod annotation has been created")
|
||||
shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapName, "www.stakater.com")
|
||||
config := util.Config{
|
||||
config := common.Config{
|
||||
Namespace: namespace,
|
||||
ResourceName: configmapName,
|
||||
SHAValue: shaData,
|
||||
@@ -941,7 +942,7 @@ func TestControllerForUpdatingConfigmapShouldUpdateStatefulSetUsingArs(t *testin
|
||||
// Verifying StatefulSet update
|
||||
logrus.Infof("Verifying pod annotation has been updated")
|
||||
shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapName, "aurorasolutions.io")
|
||||
config := util.Config{
|
||||
config := common.Config{
|
||||
Namespace: namespace,
|
||||
ResourceName: configmapName,
|
||||
SHAValue: shaData,
|
||||
@@ -994,7 +995,7 @@ func TestControllerUpdatingSecretShouldCreatePodAnnotationInStatefulSet(t *testi
|
||||
// Verifying Upgrade
|
||||
logrus.Infof("Verifying pod annotation has been created")
|
||||
shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, namespace, secretName, newData)
|
||||
config := util.Config{
|
||||
config := common.Config{
|
||||
Namespace: namespace,
|
||||
ResourceName: secretName,
|
||||
SHAValue: shaData,
|
||||
@@ -1046,7 +1047,7 @@ func TestControllerUpdatingConfigmapShouldCreateEnvInDeployment(t *testing.T) {
|
||||
// Verifying deployment update
|
||||
logrus.Infof("Verifying env var has been created")
|
||||
shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapName, "www.stakater.com")
|
||||
config := util.Config{
|
||||
config := common.Config{
|
||||
Namespace: namespace,
|
||||
ResourceName: configmapName,
|
||||
SHAValue: shaData,
|
||||
@@ -1099,7 +1100,7 @@ func TestControllerUpdatingConfigmapShouldAutoCreateEnvInDeployment(t *testing.T
|
||||
// Verifying deployment update
|
||||
logrus.Infof("Verifying env var has been created")
|
||||
shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapName, "www.stakater.com")
|
||||
config := util.Config{
|
||||
config := common.Config{
|
||||
Namespace: namespace,
|
||||
ResourceName: configmapName,
|
||||
SHAValue: shaData,
|
||||
@@ -1164,7 +1165,7 @@ func TestControllerCreatingConfigmapShouldCreateEnvInDeployment(t *testing.T) {
|
||||
// Verifying deployment update
|
||||
logrus.Infof("Verifying env var has been created")
|
||||
shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapName, "www.stakater.com")
|
||||
config := util.Config{
|
||||
config := common.Config{
|
||||
Namespace: namespace,
|
||||
ResourceName: configmapName,
|
||||
SHAValue: shaData,
|
||||
@@ -1223,7 +1224,7 @@ func TestControllerForUpdatingConfigmapShouldUpdateDeploymentUsingErs(t *testing
|
||||
// Verifying deployment update
|
||||
logrus.Infof("Verifying env var has been updated")
|
||||
shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapName, "aurorasolutions.io")
|
||||
config := util.Config{
|
||||
config := common.Config{
|
||||
Namespace: namespace,
|
||||
ResourceName: configmapName,
|
||||
SHAValue: shaData,
|
||||
@@ -1278,7 +1279,7 @@ func TestControllerUpdatingConfigmapLabelsShouldNotCreateOrUpdateEnvInDeployment
|
||||
// Verifying deployment update
|
||||
logrus.Infof("Verifying env var has been created")
|
||||
shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapName, "www.google.com")
|
||||
config := util.Config{
|
||||
config := common.Config{
|
||||
Namespace: namespace,
|
||||
ResourceName: configmapName,
|
||||
SHAValue: shaData,
|
||||
@@ -1342,7 +1343,7 @@ func TestControllerCreatingSecretShouldCreateEnvInDeployment(t *testing.T) {
|
||||
// Verifying Upgrade
|
||||
logrus.Infof("Verifying env var has been created")
|
||||
shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, namespace, secretName, newData)
|
||||
config := util.Config{
|
||||
config := common.Config{
|
||||
Namespace: namespace,
|
||||
ResourceName: secretName,
|
||||
SHAValue: shaData,
|
||||
@@ -1395,7 +1396,7 @@ func TestControllerUpdatingSecretShouldCreateEnvInDeployment(t *testing.T) {
|
||||
// Verifying Upgrade
|
||||
logrus.Infof("Verifying env var has been created")
|
||||
shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, namespace, secretName, newData)
|
||||
config := util.Config{
|
||||
config := common.Config{
|
||||
Namespace: namespace,
|
||||
ResourceName: secretName,
|
||||
SHAValue: shaData,
|
||||
@@ -1453,7 +1454,7 @@ func TestControllerUpdatingSecretShouldUpdateEnvInDeployment(t *testing.T) {
|
||||
// Verifying Upgrade
|
||||
logrus.Infof("Verifying env var has been updated")
|
||||
shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, namespace, secretName, updatedData)
|
||||
config := util.Config{
|
||||
config := common.Config{
|
||||
Namespace: namespace,
|
||||
ResourceName: secretName,
|
||||
SHAValue: shaData,
|
||||
@@ -1504,7 +1505,7 @@ func TestControllerUpdatingSecretLabelsShouldNotCreateOrUpdateEnvInDeployment(t
|
||||
// Verifying Upgrade
|
||||
logrus.Infof("Verifying env var has been created")
|
||||
shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, namespace, secretName, data)
|
||||
config := util.Config{
|
||||
config := common.Config{
|
||||
Namespace: namespace,
|
||||
ResourceName: secretName,
|
||||
SHAValue: shaData,
|
||||
@@ -1556,7 +1557,7 @@ func TestControllerUpdatingConfigmapShouldCreateEnvInDaemonSet(t *testing.T) {
|
||||
// Verifying DaemonSet update
|
||||
logrus.Infof("Verifying env var has been created")
|
||||
shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapName, "www.stakater.com")
|
||||
config := util.Config{
|
||||
config := common.Config{
|
||||
Namespace: namespace,
|
||||
ResourceName: configmapName,
|
||||
SHAValue: shaData,
|
||||
@@ -1619,7 +1620,7 @@ func TestControllerForUpdatingConfigmapShouldUpdateDaemonSetUsingErs(t *testing.
|
||||
// Verifying DaemonSet update
|
||||
logrus.Infof("Verifying env var has been updated")
|
||||
shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapName, "aurorasolutions.io")
|
||||
config := util.Config{
|
||||
config := common.Config{
|
||||
Namespace: namespace,
|
||||
ResourceName: configmapName,
|
||||
SHAValue: shaData,
|
||||
@@ -1672,7 +1673,7 @@ func TestControllerUpdatingSecretShouldCreateEnvInDaemonSet(t *testing.T) {
|
||||
// Verifying Upgrade
|
||||
logrus.Infof("Verifying env var has been created")
|
||||
shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, namespace, secretName, newData)
|
||||
config := util.Config{
|
||||
config := common.Config{
|
||||
Namespace: namespace,
|
||||
ResourceName: secretName,
|
||||
SHAValue: shaData,
|
||||
@@ -1731,7 +1732,7 @@ func TestControllerUpdatingSecretShouldUpdateEnvInDaemonSet(t *testing.T) {
|
||||
// Verifying Upgrade
|
||||
logrus.Infof("Verifying env var has been updated")
|
||||
shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, namespace, secretName, updatedData)
|
||||
config := util.Config{
|
||||
config := common.Config{
|
||||
Namespace: namespace,
|
||||
ResourceName: secretName,
|
||||
SHAValue: shaData,
|
||||
@@ -1782,7 +1783,7 @@ func TestControllerUpdatingSecretLabelsShouldNotCreateOrUpdateEnvInDaemonSet(t *
|
||||
// Verifying Upgrade
|
||||
logrus.Infof("Verifying env var has been created")
|
||||
shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, namespace, secretName, data)
|
||||
config := util.Config{
|
||||
config := common.Config{
|
||||
Namespace: namespace,
|
||||
ResourceName: secretName,
|
||||
SHAValue: shaData,
|
||||
@@ -1834,7 +1835,7 @@ func TestControllerUpdatingConfigmapShouldCreateEnvInStatefulSet(t *testing.T) {
|
||||
// Verifying StatefulSet update
|
||||
logrus.Infof("Verifying env var has been created")
|
||||
shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapName, "www.stakater.com")
|
||||
config := util.Config{
|
||||
config := common.Config{
|
||||
Namespace: namespace,
|
||||
ResourceName: configmapName,
|
||||
SHAValue: shaData,
|
||||
@@ -1893,7 +1894,7 @@ func TestControllerForUpdatingConfigmapShouldUpdateStatefulSetUsingErs(t *testin
|
||||
// Verifying StatefulSet update
|
||||
logrus.Infof("Verifying env var has been updated")
|
||||
shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapName, "aurorasolutions.io")
|
||||
config := util.Config{
|
||||
config := common.Config{
|
||||
Namespace: namespace,
|
||||
ResourceName: configmapName,
|
||||
SHAValue: shaData,
|
||||
@@ -1946,7 +1947,7 @@ func TestControllerUpdatingSecretShouldCreateEnvInStatefulSet(t *testing.T) {
|
||||
// Verifying Upgrade
|
||||
logrus.Infof("Verifying env var has been created")
|
||||
shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, namespace, secretName, newData)
|
||||
config := util.Config{
|
||||
config := common.Config{
|
||||
Namespace: namespace,
|
||||
ResourceName: secretName,
|
||||
SHAValue: shaData,
|
||||
@@ -2004,7 +2005,7 @@ func TestControllerUpdatingSecretShouldUpdateEnvInStatefulSet(t *testing.T) {
|
||||
// Verifying Upgrade
|
||||
logrus.Infof("Verifying env var has been updated")
|
||||
shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, namespace, secretName, updatedData)
|
||||
config := util.Config{
|
||||
config := common.Config{
|
||||
Namespace: namespace,
|
||||
ResourceName: secretName,
|
||||
SHAValue: shaData,
|
||||
@@ -2062,7 +2063,7 @@ func TestControllerUpdatingSecretShouldUpdatePodAnnotationInStatefulSet(t *testi
|
||||
// Verifying Upgrade
|
||||
logrus.Infof("Verifying pod annotation has been updated")
|
||||
shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, namespace, secretName, updatedData)
|
||||
config := util.Config{
|
||||
config := common.Config{
|
||||
Namespace: namespace,
|
||||
ResourceName: secretName,
|
||||
SHAValue: shaData,
|
||||
|
||||
@@ -4,7 +4,7 @@ import (
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/stakater/Reloader/internal/pkg/metrics"
|
||||
"github.com/stakater/Reloader/internal/pkg/options"
|
||||
"github.com/stakater/Reloader/internal/pkg/util"
|
||||
"github.com/stakater/Reloader/pkg/common"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/client-go/tools/record"
|
||||
)
|
||||
@@ -33,13 +33,13 @@ func (r ResourceCreatedHandler) Handle() error {
|
||||
}
|
||||
|
||||
// GetConfig gets configurations containing SHA, annotations, namespace and resource name
|
||||
func (r ResourceCreatedHandler) GetConfig() (util.Config, string) {
|
||||
func (r ResourceCreatedHandler) GetConfig() (common.Config, string) {
|
||||
var oldSHAData string
|
||||
var config util.Config
|
||||
var config common.Config
|
||||
if _, ok := r.Resource.(*v1.ConfigMap); ok {
|
||||
config = util.GetConfigmapConfig(r.Resource.(*v1.ConfigMap))
|
||||
config = common.GetConfigmapConfig(r.Resource.(*v1.ConfigMap))
|
||||
} else if _, ok := r.Resource.(*v1.Secret); ok {
|
||||
config = util.GetSecretConfig(r.Resource.(*v1.Secret))
|
||||
config = common.GetSecretConfig(r.Resource.(*v1.Secret))
|
||||
} else {
|
||||
logrus.Warnf("Invalid resource: Resource should be 'Secret' or 'Configmap' but found, %v", r.Resource)
|
||||
}
|
||||
|
||||
@@ -10,7 +10,7 @@ import (
|
||||
"github.com/stakater/Reloader/internal/pkg/metrics"
|
||||
"github.com/stakater/Reloader/internal/pkg/options"
|
||||
"github.com/stakater/Reloader/internal/pkg/testutil"
|
||||
"github.com/stakater/Reloader/internal/pkg/util"
|
||||
"github.com/stakater/Reloader/pkg/common"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
@@ -42,20 +42,20 @@ func (r ResourceDeleteHandler) Handle() error {
|
||||
}
|
||||
|
||||
// GetConfig gets configurations containing SHA, annotations, namespace and resource name
|
||||
func (r ResourceDeleteHandler) GetConfig() (util.Config, string) {
|
||||
func (r ResourceDeleteHandler) GetConfig() (common.Config, string) {
|
||||
var oldSHAData string
|
||||
var config util.Config
|
||||
var config common.Config
|
||||
if _, ok := r.Resource.(*v1.ConfigMap); ok {
|
||||
config = util.GetConfigmapConfig(r.Resource.(*v1.ConfigMap))
|
||||
config = common.GetConfigmapConfig(r.Resource.(*v1.ConfigMap))
|
||||
} else if _, ok := r.Resource.(*v1.Secret); ok {
|
||||
config = util.GetSecretConfig(r.Resource.(*v1.Secret))
|
||||
config = common.GetSecretConfig(r.Resource.(*v1.Secret))
|
||||
} else {
|
||||
logrus.Warnf("Invalid resource: Resource should be 'Secret' or 'Configmap' but found, %v", r.Resource)
|
||||
}
|
||||
return config, oldSHAData
|
||||
}
|
||||
|
||||
func invokeDeleteStrategy(upgradeFuncs callbacks.RollingUpgradeFuncs, item runtime.Object, config util.Config, autoReload bool) InvokeStrategyResult {
|
||||
func invokeDeleteStrategy(upgradeFuncs callbacks.RollingUpgradeFuncs, item runtime.Object, config common.Config, autoReload bool) InvokeStrategyResult {
|
||||
if options.ReloadStrategy == constants.AnnotationsReloadStrategy {
|
||||
return removePodAnnotations(upgradeFuncs, item, config, autoReload)
|
||||
}
|
||||
@@ -63,12 +63,12 @@ func invokeDeleteStrategy(upgradeFuncs callbacks.RollingUpgradeFuncs, item runti
|
||||
return removeContainerEnvVars(upgradeFuncs, item, config, autoReload)
|
||||
}
|
||||
|
||||
func removePodAnnotations(upgradeFuncs callbacks.RollingUpgradeFuncs, item runtime.Object, config util.Config, autoReload bool) InvokeStrategyResult {
|
||||
func removePodAnnotations(upgradeFuncs callbacks.RollingUpgradeFuncs, item runtime.Object, config common.Config, autoReload bool) InvokeStrategyResult {
|
||||
config.SHAValue = testutil.GetSHAfromEmptyData()
|
||||
return updatePodAnnotations(upgradeFuncs, item, config, autoReload)
|
||||
}
|
||||
|
||||
func removeContainerEnvVars(upgradeFuncs callbacks.RollingUpgradeFuncs, item runtime.Object, config util.Config, autoReload bool) InvokeStrategyResult {
|
||||
func removeContainerEnvVars(upgradeFuncs callbacks.RollingUpgradeFuncs, item runtime.Object, config common.Config, autoReload bool) InvokeStrategyResult {
|
||||
envVar := getEnvVarName(config.ResourceName, config.Type)
|
||||
container := getContainerUsingResource(upgradeFuncs, item, config, autoReload)
|
||||
|
||||
|
||||
@@ -1,11 +1,9 @@
|
||||
package handler
|
||||
|
||||
import (
|
||||
"github.com/stakater/Reloader/internal/pkg/util"
|
||||
)
|
||||
import "github.com/stakater/Reloader/pkg/common"
|
||||
|
||||
// ResourceHandler handles the creation and update of resources
|
||||
type ResourceHandler interface {
|
||||
Handle() error
|
||||
GetConfig() (util.Config, string)
|
||||
GetConfig() (common.Config, string)
|
||||
}
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"github.com/stakater/Reloader/internal/pkg/metrics"
|
||||
"github.com/stakater/Reloader/internal/pkg/options"
|
||||
"github.com/stakater/Reloader/internal/pkg/util"
|
||||
"github.com/stakater/Reloader/pkg/common"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/client-go/tools/record"
|
||||
)
|
||||
@@ -36,15 +37,15 @@ func (r ResourceUpdatedHandler) Handle() error {
|
||||
}
|
||||
|
||||
// GetConfig gets configurations containing SHA, annotations, namespace and resource name
|
||||
func (r ResourceUpdatedHandler) GetConfig() (util.Config, string) {
|
||||
func (r ResourceUpdatedHandler) GetConfig() (common.Config, string) {
|
||||
var oldSHAData string
|
||||
var config util.Config
|
||||
var config common.Config
|
||||
if _, ok := r.Resource.(*v1.ConfigMap); ok {
|
||||
oldSHAData = util.GetSHAfromConfigmap(r.OldResource.(*v1.ConfigMap))
|
||||
config = util.GetConfigmapConfig(r.Resource.(*v1.ConfigMap))
|
||||
config = common.GetConfigmapConfig(r.Resource.(*v1.ConfigMap))
|
||||
} else if _, ok := r.Resource.(*v1.Secret); ok {
|
||||
oldSHAData = util.GetSHAfromSecret(r.OldResource.(*v1.Secret).Data)
|
||||
config = util.GetSecretConfig(r.Resource.(*v1.Secret))
|
||||
config = common.GetSecretConfig(r.Resource.(*v1.Secret))
|
||||
} else {
|
||||
logrus.Warnf("Invalid resource: Resource should be 'Secret' or 'Configmap' but found, %v", r.Resource)
|
||||
}
|
||||
|
||||
@@ -138,7 +138,7 @@ func GetArgoRolloutRollingUpgradeFuncs() callbacks.RollingUpgradeFuncs {
|
||||
}
|
||||
}
|
||||
|
||||
func sendUpgradeWebhook(config util.Config, webhookUrl string) error {
|
||||
func sendUpgradeWebhook(config common.Config, webhookUrl string) error {
|
||||
logrus.Infof("Changes detected in '%s' of type '%s' in namespace '%s', Sending webhook to '%s'",
|
||||
config.ResourceName, config.Type, config.Namespace, webhookUrl)
|
||||
|
||||
@@ -169,21 +169,37 @@ func sendWebhook(url string) (string, []error) {
|
||||
return buffer.String(), nil
|
||||
}
|
||||
|
||||
func doRollingUpgrade(config util.Config, collectors metrics.Collectors, recorder record.EventRecorder, invoke invokeStrategy) error {
|
||||
func doRollingUpgrade(config common.Config, collectors metrics.Collectors, recorder record.EventRecorder, invoke invokeStrategy) error {
|
||||
clients := kube.GetClients()
|
||||
|
||||
err := rollingUpgrade(clients, config, GetDeploymentRollingUpgradeFuncs(), collectors, recorder, invoke)
|
||||
// Get ignored workload types to avoid listing resources without RBAC permissions
|
||||
ignoredWorkloadTypes, err := util.GetIgnoredWorkloadTypesList()
|
||||
if err != nil {
|
||||
logrus.Errorf("Failed to parse ignored workload types: %v", err)
|
||||
ignoredWorkloadTypes = util.List{} // Continue with empty list if parsing fails
|
||||
}
|
||||
|
||||
err = rollingUpgrade(clients, config, GetDeploymentRollingUpgradeFuncs(), collectors, recorder, invoke)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = rollingUpgrade(clients, config, GetCronJobCreateJobFuncs(), collectors, recorder, invoke)
|
||||
if err != nil {
|
||||
return err
|
||||
|
||||
// Only process CronJobs if they are not ignored
|
||||
if !ignoredWorkloadTypes.Contains("cronjobs") {
|
||||
err = rollingUpgrade(clients, config, GetCronJobCreateJobFuncs(), collectors, recorder, invoke)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
err = rollingUpgrade(clients, config, GetJobCreateJobFuncs(), collectors, recorder, invoke)
|
||||
if err != nil {
|
||||
return err
|
||||
|
||||
// Only process Jobs if they are not ignored
|
||||
if !ignoredWorkloadTypes.Contains("jobs") {
|
||||
err = rollingUpgrade(clients, config, GetJobCreateJobFuncs(), collectors, recorder, invoke)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
err = rollingUpgrade(clients, config, GetDaemonSetRollingUpgradeFuncs(), collectors, recorder, invoke)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -203,7 +219,7 @@ func doRollingUpgrade(config util.Config, collectors metrics.Collectors, recorde
|
||||
return nil
|
||||
}
|
||||
|
||||
func rollingUpgrade(clients kube.Clients, config util.Config, upgradeFuncs callbacks.RollingUpgradeFuncs, collectors metrics.Collectors, recorder record.EventRecorder, strategy invokeStrategy) error {
|
||||
func rollingUpgrade(clients kube.Clients, config common.Config, upgradeFuncs callbacks.RollingUpgradeFuncs, collectors metrics.Collectors, recorder record.EventRecorder, strategy invokeStrategy) error {
|
||||
err := PerformAction(clients, config, upgradeFuncs, collectors, recorder, strategy)
|
||||
if err != nil {
|
||||
logrus.Errorf("Rolling upgrade for '%s' failed with error = %v", config.ResourceName, err)
|
||||
@@ -212,7 +228,7 @@ func rollingUpgrade(clients kube.Clients, config util.Config, upgradeFuncs callb
|
||||
}
|
||||
|
||||
// PerformAction invokes the deployment if there is any change in configmap or secret data
|
||||
func PerformAction(clients kube.Clients, config util.Config, upgradeFuncs callbacks.RollingUpgradeFuncs, collectors metrics.Collectors, recorder record.EventRecorder, strategy invokeStrategy) error {
|
||||
func PerformAction(clients kube.Clients, config common.Config, upgradeFuncs callbacks.RollingUpgradeFuncs, collectors metrics.Collectors, recorder record.EventRecorder, strategy invokeStrategy) error {
|
||||
items := upgradeFuncs.ItemsFunc(clients, config.Namespace)
|
||||
|
||||
for _, item := range items {
|
||||
@@ -249,7 +265,7 @@ func retryOnConflict(backoff wait.Backoff, fn func(_ bool) error) error {
|
||||
return err
|
||||
}
|
||||
|
||||
func upgradeResource(clients kube.Clients, config util.Config, upgradeFuncs callbacks.RollingUpgradeFuncs, collectors metrics.Collectors, recorder record.EventRecorder, strategy invokeStrategy, resource runtime.Object, fetchResource bool) error {
|
||||
func upgradeResource(clients kube.Clients, config common.Config, upgradeFuncs callbacks.RollingUpgradeFuncs, collectors metrics.Collectors, recorder record.EventRecorder, strategy invokeStrategy, resource runtime.Object, fetchResource bool) error {
|
||||
accessor, err := meta.Accessor(resource)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -403,7 +419,7 @@ func getContainerWithEnvReference(containers []v1.Container, resourceName string
|
||||
return nil
|
||||
}
|
||||
|
||||
func getContainerUsingResource(upgradeFuncs callbacks.RollingUpgradeFuncs, item runtime.Object, config util.Config, autoReload bool) *v1.Container {
|
||||
func getContainerUsingResource(upgradeFuncs callbacks.RollingUpgradeFuncs, item runtime.Object, config common.Config, autoReload bool) *v1.Container {
|
||||
volumes := upgradeFuncs.VolumesFunc(item)
|
||||
containers := upgradeFuncs.ContainersFunc(item)
|
||||
initContainers := upgradeFuncs.InitContainersFunc(item)
|
||||
@@ -417,7 +433,11 @@ func getContainerUsingResource(upgradeFuncs callbacks.RollingUpgradeFuncs, item
|
||||
container = getContainerWithVolumeMount(initContainers, volumeMountName)
|
||||
if container != nil {
|
||||
// if configmap/secret is being used in init container then return the first Pod container to save reloader env
|
||||
return &containers[0]
|
||||
if len(containers) > 0 {
|
||||
return &containers[0]
|
||||
}
|
||||
// No containers available, return nil to avoid crash
|
||||
return nil
|
||||
}
|
||||
} else if container != nil {
|
||||
return container
|
||||
@@ -430,13 +450,21 @@ func getContainerUsingResource(upgradeFuncs callbacks.RollingUpgradeFuncs, item
|
||||
container = getContainerWithEnvReference(initContainers, config.ResourceName, config.Type)
|
||||
if container != nil {
|
||||
// if configmap/secret is being used in init container then return the first Pod container to save reloader env
|
||||
return &containers[0]
|
||||
if len(containers) > 0 {
|
||||
return &containers[0]
|
||||
}
|
||||
// No containers available, return nil to avoid crash
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// Get the first container if the annotation is related to specified configmap or secret i.e. configmap.reloader.stakater.com/reload
|
||||
if container == nil && !autoReload {
|
||||
return &containers[0]
|
||||
if len(containers) > 0 {
|
||||
return &containers[0]
|
||||
}
|
||||
// No containers available, return nil to avoid crash
|
||||
return nil
|
||||
}
|
||||
|
||||
return container
|
||||
@@ -452,16 +480,16 @@ type InvokeStrategyResult struct {
|
||||
Patch *Patch
|
||||
}
|
||||
|
||||
type invokeStrategy func(upgradeFuncs callbacks.RollingUpgradeFuncs, item runtime.Object, config util.Config, autoReload bool) InvokeStrategyResult
|
||||
type invokeStrategy func(upgradeFuncs callbacks.RollingUpgradeFuncs, item runtime.Object, config common.Config, autoReload bool) InvokeStrategyResult
|
||||
|
||||
func invokeReloadStrategy(upgradeFuncs callbacks.RollingUpgradeFuncs, item runtime.Object, config util.Config, autoReload bool) InvokeStrategyResult {
|
||||
func invokeReloadStrategy(upgradeFuncs callbacks.RollingUpgradeFuncs, item runtime.Object, config common.Config, autoReload bool) InvokeStrategyResult {
|
||||
if options.ReloadStrategy == constants.AnnotationsReloadStrategy {
|
||||
return updatePodAnnotations(upgradeFuncs, item, config, autoReload)
|
||||
}
|
||||
return updateContainerEnvVars(upgradeFuncs, item, config, autoReload)
|
||||
}
|
||||
|
||||
func updatePodAnnotations(upgradeFuncs callbacks.RollingUpgradeFuncs, item runtime.Object, config util.Config, autoReload bool) InvokeStrategyResult {
|
||||
func updatePodAnnotations(upgradeFuncs callbacks.RollingUpgradeFuncs, item runtime.Object, config common.Config, autoReload bool) InvokeStrategyResult {
|
||||
container := getContainerUsingResource(upgradeFuncs, item, config, autoReload)
|
||||
if container == nil {
|
||||
return InvokeStrategyResult{constants.NoContainerFound, nil}
|
||||
@@ -469,7 +497,7 @@ func updatePodAnnotations(upgradeFuncs callbacks.RollingUpgradeFuncs, item runti
|
||||
|
||||
// Generate reloaded annotations. Attaching this to the item's annotation will trigger a rollout
|
||||
// Note: the data on this struct is purely informational and is not used for future updates
|
||||
reloadSource := util.NewReloadSourceFromConfig(config, []string{container.Name})
|
||||
reloadSource := common.NewReloadSourceFromConfig(config, []string{container.Name})
|
||||
annotations, patch, err := createReloadedAnnotations(&reloadSource, upgradeFuncs)
|
||||
if err != nil {
|
||||
logrus.Errorf("Failed to create reloaded annotations for %s! error = %v", config.ResourceName, err)
|
||||
@@ -496,7 +524,7 @@ func getReloaderAnnotationKey() string {
|
||||
)
|
||||
}
|
||||
|
||||
func createReloadedAnnotations(target *util.ReloadSource, upgradeFuncs callbacks.RollingUpgradeFuncs) (map[string]string, []byte, error) {
|
||||
func createReloadedAnnotations(target *common.ReloadSource, upgradeFuncs callbacks.RollingUpgradeFuncs) (map[string]string, []byte, error) {
|
||||
if target == nil {
|
||||
return nil, nil, errors.New("target is required")
|
||||
}
|
||||
@@ -531,7 +559,7 @@ func getEnvVarName(resourceName string, typeName string) string {
|
||||
return constants.EnvVarPrefix + util.ConvertToEnvVarName(resourceName) + "_" + typeName
|
||||
}
|
||||
|
||||
func updateContainerEnvVars(upgradeFuncs callbacks.RollingUpgradeFuncs, item runtime.Object, config util.Config, autoReload bool) InvokeStrategyResult {
|
||||
func updateContainerEnvVars(upgradeFuncs callbacks.RollingUpgradeFuncs, item runtime.Object, config common.Config, autoReload bool) InvokeStrategyResult {
|
||||
envVar := getEnvVarName(config.ResourceName, config.Type)
|
||||
container := getContainerUsingResource(upgradeFuncs, item, config, autoReload)
|
||||
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
argorolloutv1alpha1 "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
promtestutil "github.com/prometheus/client_golang/prometheus/testutil"
|
||||
"github.com/sirupsen/logrus"
|
||||
@@ -16,8 +17,10 @@ import (
|
||||
"github.com/stakater/Reloader/internal/pkg/options"
|
||||
"github.com/stakater/Reloader/internal/pkg/testutil"
|
||||
"github.com/stakater/Reloader/internal/pkg/util"
|
||||
"github.com/stakater/Reloader/pkg/common"
|
||||
"github.com/stakater/Reloader/pkg/kube"
|
||||
"github.com/stretchr/testify/assert"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
@@ -672,7 +675,7 @@ func teardownArs() {
|
||||
logrus.Errorf("Error while deleting statefulSet with secret as env var source %v", statefulSetError)
|
||||
}
|
||||
|
||||
// Deleting Deployment with pasuse annotation
|
||||
// Deleting Deployment with pause annotation
|
||||
deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, arsNamespace, arsConfigmapWithPausedDeployment)
|
||||
if deploymentError != nil {
|
||||
logrus.Errorf("Error while deleting deployment with configmap %v", deploymentError)
|
||||
@@ -708,7 +711,7 @@ func teardownArs() {
|
||||
logrus.Errorf("Error while deleting the configmap %v", err)
|
||||
}
|
||||
|
||||
// Deleting Configmap used projected volume in init containers
|
||||
// Deleting secret used in projected volume in init containers
|
||||
err = testutil.DeleteSecret(clients.KubernetesClient, arsNamespace, arsProjectedSecretWithInitContainer)
|
||||
if err != nil {
|
||||
logrus.Errorf("Error while deleting the secret %v", err)
|
||||
@@ -1392,7 +1395,7 @@ func teardownErs() {
|
||||
logrus.Errorf("Error while deleting the configmap %v", err)
|
||||
}
|
||||
|
||||
// Deleting Configmap used projected volume in init containers
|
||||
// Deleting secret used in projected volume in init containers
|
||||
err = testutil.DeleteSecret(clients.KubernetesClient, ersNamespace, ersProjectedSecretWithInitContainer)
|
||||
if err != nil {
|
||||
logrus.Errorf("Error while deleting the secret %v", err)
|
||||
@@ -1475,7 +1478,7 @@ func teardownErs() {
|
||||
logrus.Errorf("Error while deleting the configmap used with configmap exclude annotation: %v", err)
|
||||
}
|
||||
|
||||
// Deleting ConfigMap for testins pausing deployments
|
||||
// Deleting ConfigMap for testing pausing deployments
|
||||
err = testutil.DeleteConfigMap(clients.KubernetesClient, ersNamespace, ersConfigmapWithPausedDeployment)
|
||||
if err != nil {
|
||||
logrus.Errorf("Error while deleting the configmap: %v", err)
|
||||
@@ -1486,13 +1489,13 @@ func teardownErs() {
|
||||
|
||||
}
|
||||
|
||||
func getConfigWithAnnotations(resourceType string, name string, shaData string, annotation string, typedAutoAnnotation string) util.Config {
|
||||
func getConfigWithAnnotations(resourceType string, name string, shaData string, annotation string, typedAutoAnnotation string) common.Config {
|
||||
ns := ersNamespace
|
||||
if options.ReloadStrategy == constants.AnnotationsReloadStrategy {
|
||||
ns = arsNamespace
|
||||
}
|
||||
|
||||
return util.Config{
|
||||
return common.Config{
|
||||
Namespace: ns,
|
||||
ResourceName: name,
|
||||
SHAValue: shaData,
|
||||
@@ -1509,7 +1512,7 @@ func getCollectors() metrics.Collectors {
|
||||
var labelSucceeded = prometheus.Labels{"success": "true"}
|
||||
var labelFailed = prometheus.Labels{"success": "false"}
|
||||
|
||||
func testRollingUpgradeInvokeDeleteStrategyArs(t *testing.T, clients kube.Clients, config util.Config, upgradeFuncs callbacks.RollingUpgradeFuncs, collectors metrics.Collectors, envVarPostfix string) {
|
||||
func testRollingUpgradeInvokeDeleteStrategyArs(t *testing.T, clients kube.Clients, config common.Config, upgradeFuncs callbacks.RollingUpgradeFuncs, collectors metrics.Collectors, envVarPostfix string) {
|
||||
err := PerformAction(clients, config, upgradeFuncs, collectors, nil, invokeDeleteStrategy)
|
||||
time.Sleep(5 * time.Second)
|
||||
if err != nil {
|
||||
@@ -1527,7 +1530,7 @@ func testRollingUpgradeInvokeDeleteStrategyArs(t *testing.T, clients kube.Client
|
||||
}
|
||||
}
|
||||
|
||||
func testRollingUpgradeWithPatchAndInvokeDeleteStrategyArs(t *testing.T, clients kube.Clients, config util.Config, upgradeFuncs callbacks.RollingUpgradeFuncs, collectors metrics.Collectors, envVarPostfix string) {
|
||||
func testRollingUpgradeWithPatchAndInvokeDeleteStrategyArs(t *testing.T, clients kube.Clients, config common.Config, upgradeFuncs callbacks.RollingUpgradeFuncs, collectors metrics.Collectors, envVarPostfix string) {
|
||||
err := PerformAction(clients, config, upgradeFuncs, collectors, nil, invokeDeleteStrategy)
|
||||
upgradeFuncs.PatchFunc = func(client kube.Clients, namespace string, resource runtime.Object, patchType patchtypes.PatchType, bytes []byte) error {
|
||||
assert.Equal(t, patchtypes.StrategicMergePatchType, patchType)
|
||||
@@ -2247,7 +2250,7 @@ func TestRollingUpgradeForDeploymentWithSecretExcludeAnnotationUsingArs(t *testi
|
||||
logrus.Infof("Verifying deployment did not update")
|
||||
updated := testutil.VerifyResourceAnnotationUpdate(clients, config, deploymentFuncs)
|
||||
if updated {
|
||||
t.Errorf("Deployment which had to be exluded was updated")
|
||||
t.Errorf("Deployment which had to be excluded was updated")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2907,7 +2910,7 @@ func TestIgnoreAnnotationNoReloadUsingErs(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func testRollingUpgradeInvokeDeleteStrategyErs(t *testing.T, clients kube.Clients, config util.Config, upgradeFuncs callbacks.RollingUpgradeFuncs, collectors metrics.Collectors, envVarPostfix string) {
|
||||
func testRollingUpgradeInvokeDeleteStrategyErs(t *testing.T, clients kube.Clients, config common.Config, upgradeFuncs callbacks.RollingUpgradeFuncs, collectors metrics.Collectors, envVarPostfix string) {
|
||||
err := PerformAction(clients, config, upgradeFuncs, collectors, nil, invokeDeleteStrategy)
|
||||
time.Sleep(5 * time.Second)
|
||||
if err != nil {
|
||||
@@ -2924,7 +2927,7 @@ func testRollingUpgradeInvokeDeleteStrategyErs(t *testing.T, clients kube.Client
|
||||
}
|
||||
}
|
||||
|
||||
func testRollingUpgradeWithPatchAndInvokeDeleteStrategyErs(t *testing.T, clients kube.Clients, config util.Config, upgradeFuncs callbacks.RollingUpgradeFuncs, collectors metrics.Collectors, envVarPostfix string) {
|
||||
func testRollingUpgradeWithPatchAndInvokeDeleteStrategyErs(t *testing.T, clients kube.Clients, config common.Config, upgradeFuncs callbacks.RollingUpgradeFuncs, collectors metrics.Collectors, envVarPostfix string) {
|
||||
assert.NotEmpty(t, upgradeFuncs.PatchTemplatesFunc().DeleteEnvVarTemplate)
|
||||
|
||||
err := PerformAction(clients, config, upgradeFuncs, collectors, nil, invokeDeleteStrategy)
|
||||
@@ -4214,3 +4217,72 @@ func waitForDeploymentPausedAtAnnotation(clients kube.Clients, deploymentFuncs c
|
||||
|
||||
return fmt.Errorf("timeout waiting for deployment %s to have pause-period annotation", deploymentName)
|
||||
}
|
||||
|
||||
// MockArgoRolloutWithEmptyContainers creates a mock Argo Rollout with no containers
|
||||
// This simulates the scenario where Argo Rollouts with workloadRef return empty containers
|
||||
func MockArgoRolloutWithEmptyContainers(namespace, name string) *runtime.Object {
|
||||
rollout := &argorolloutv1alpha1.Rollout{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Namespace: namespace,
|
||||
},
|
||||
Spec: argorolloutv1alpha1.RolloutSpec{
|
||||
Template: v1.PodTemplateSpec{
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{}, // Empty containers slice
|
||||
InitContainers: []v1.Container{}, // Empty init containers slice
|
||||
Volumes: []v1.Volume{}, // Empty volumes slice
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
var obj runtime.Object = rollout
|
||||
return &obj
|
||||
}
|
||||
|
||||
// TestGetContainerUsingResourceWithArgoRolloutEmptyContainers tests with real Argo Rollout functions
|
||||
func TestGetContainerUsingResourceWithArgoRolloutEmptyContainers(t *testing.T) {
|
||||
namespace := "test-namespace"
|
||||
resourceName := "test-configmap"
|
||||
|
||||
// Use real Argo Rollout functions but mock the containers function
|
||||
rolloutFuncs := GetArgoRolloutRollingUpgradeFuncs()
|
||||
originalContainersFunc := rolloutFuncs.ContainersFunc
|
||||
originalInitContainersFunc := rolloutFuncs.InitContainersFunc
|
||||
|
||||
// Override to return empty containers (simulating workloadRef scenario)
|
||||
rolloutFuncs.ContainersFunc = func(item runtime.Object) []v1.Container {
|
||||
return []v1.Container{} // Empty like workloadRef rollouts
|
||||
}
|
||||
rolloutFuncs.InitContainersFunc = func(item runtime.Object) []v1.Container {
|
||||
return []v1.Container{} // Empty like workloadRef rollouts
|
||||
}
|
||||
|
||||
// Restore original functions after test
|
||||
defer func() {
|
||||
rolloutFuncs.ContainersFunc = originalContainersFunc
|
||||
rolloutFuncs.InitContainersFunc = originalInitContainersFunc
|
||||
}()
|
||||
|
||||
// Use proper Argo Rollout object instead of Pod
|
||||
mockRollout := MockArgoRolloutWithEmptyContainers(namespace, "test-rollout")
|
||||
|
||||
config := common.Config{
|
||||
Namespace: namespace,
|
||||
ResourceName: resourceName,
|
||||
Type: constants.ConfigmapEnvVarPostfix,
|
||||
SHAValue: "test-sha",
|
||||
}
|
||||
|
||||
// Test both autoReload scenarios using subtests as suggested by Felix
|
||||
for _, autoReload := range []bool{true, false} {
|
||||
t.Run(fmt.Sprintf("autoReload_%t", autoReload), func(t *testing.T) {
|
||||
// This tests the actual fix in the context of Argo Rollouts
|
||||
result := getContainerUsingResource(rolloutFuncs, *mockRollout, config, autoReload)
|
||||
|
||||
if result != nil {
|
||||
t.Errorf("Expected nil when using real Argo Rollout functions with empty containers (workloadRef scenario), got %v", result)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -16,7 +16,7 @@ import (
|
||||
"github.com/stakater/Reloader/internal/pkg/metrics"
|
||||
"github.com/stakater/Reloader/internal/pkg/options"
|
||||
"github.com/stakater/Reloader/internal/pkg/testutil"
|
||||
"github.com/stakater/Reloader/internal/pkg/util"
|
||||
"github.com/stakater/Reloader/pkg/common"
|
||||
"github.com/stakater/Reloader/pkg/kube"
|
||||
)
|
||||
|
||||
@@ -159,7 +159,7 @@ func TestRunLeaderElectionWithControllers(t *testing.T) {
|
||||
// Verifying deployment update
|
||||
logrus.Infof("Verifying pod envvars has been created")
|
||||
shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, testutil.Namespace, configmapName, "www.stakater.com")
|
||||
config := util.Config{
|
||||
config := common.Config{
|
||||
Namespace: testutil.Namespace,
|
||||
ResourceName: configmapName,
|
||||
SHAValue: shaData,
|
||||
@@ -186,7 +186,7 @@ func TestRunLeaderElectionWithControllers(t *testing.T) {
|
||||
// Verifying that the deployment was not updated as leadership has been lost
|
||||
logrus.Infof("Verifying pod envvars has not been updated")
|
||||
shaData = testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, testutil.Namespace, configmapName, "www.stakater.com/new")
|
||||
config = util.Config{
|
||||
config = common.Config{
|
||||
Namespace: testutil.Namespace,
|
||||
ResourceName: configmapName,
|
||||
SHAValue: shaData,
|
||||
|
||||
@@ -65,6 +65,8 @@ var (
|
||||
WebhookUrl = ""
|
||||
// ResourcesToIgnore is a list of resources to ignore when watching for changes
|
||||
ResourcesToIgnore = []string{}
|
||||
// WorkloadTypesToIgnore is a list of workload types to ignore when watching for changes
|
||||
WorkloadTypesToIgnore = []string{}
|
||||
// NamespacesToIgnore is a list of namespace names to ignore when watching for changes
|
||||
NamespacesToIgnore = []string{}
|
||||
// NamespaceSelectors is a list of namespace selectors to watch for changes
|
||||
|
||||
@@ -21,6 +21,7 @@ import (
|
||||
"github.com/stakater/Reloader/internal/pkg/metrics"
|
||||
"github.com/stakater/Reloader/internal/pkg/options"
|
||||
"github.com/stakater/Reloader/internal/pkg/util"
|
||||
"github.com/stakater/Reloader/pkg/common"
|
||||
"github.com/stakater/Reloader/pkg/kube"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
batchv1 "k8s.io/api/batch/v1"
|
||||
@@ -733,7 +734,7 @@ func GetResourceSHAFromAnnotation(podAnnotations map[string]string) string {
|
||||
return ""
|
||||
}
|
||||
|
||||
var last util.ReloadSource
|
||||
var last common.ReloadSource
|
||||
bytes := []byte(annotationJson)
|
||||
err := json.Unmarshal(bytes, &last)
|
||||
if err != nil {
|
||||
@@ -1058,7 +1059,7 @@ func RandSeq(n int) string {
|
||||
}
|
||||
|
||||
// VerifyResourceEnvVarUpdate verifies whether the rolling upgrade happened or not
|
||||
func VerifyResourceEnvVarUpdate(clients kube.Clients, config util.Config, envVarPostfix string, upgradeFuncs callbacks.RollingUpgradeFuncs) bool {
|
||||
func VerifyResourceEnvVarUpdate(clients kube.Clients, config common.Config, envVarPostfix string, upgradeFuncs callbacks.RollingUpgradeFuncs) bool {
|
||||
items := upgradeFuncs.ItemsFunc(clients, config.Namespace)
|
||||
for _, i := range items {
|
||||
containers := upgradeFuncs.ContainersFunc(i)
|
||||
@@ -1104,7 +1105,7 @@ func VerifyResourceEnvVarUpdate(clients kube.Clients, config util.Config, envVar
|
||||
}
|
||||
|
||||
// VerifyResourceEnvVarRemoved verifies whether the rolling upgrade happened or not and all Envvars SKAKATER_name_CONFIGMAP/SECRET are removed
|
||||
func VerifyResourceEnvVarRemoved(clients kube.Clients, config util.Config, envVarPostfix string, upgradeFuncs callbacks.RollingUpgradeFuncs) bool {
|
||||
func VerifyResourceEnvVarRemoved(clients kube.Clients, config common.Config, envVarPostfix string, upgradeFuncs callbacks.RollingUpgradeFuncs) bool {
|
||||
items := upgradeFuncs.ItemsFunc(clients, config.Namespace)
|
||||
for _, i := range items {
|
||||
containers := upgradeFuncs.ContainersFunc(i)
|
||||
@@ -1153,7 +1154,7 @@ func VerifyResourceEnvVarRemoved(clients kube.Clients, config util.Config, envVa
|
||||
}
|
||||
|
||||
// VerifyResourceAnnotationUpdate verifies whether the rolling upgrade happened or not
|
||||
func VerifyResourceAnnotationUpdate(clients kube.Clients, config util.Config, upgradeFuncs callbacks.RollingUpgradeFuncs) bool {
|
||||
func VerifyResourceAnnotationUpdate(clients kube.Clients, config common.Config, upgradeFuncs callbacks.RollingUpgradeFuncs) bool {
|
||||
items := upgradeFuncs.ItemsFunc(clients, config.Namespace)
|
||||
for _, i := range items {
|
||||
podAnnotations := upgradeFuncs.PodAnnotationsFunc(i)
|
||||
|
||||
@@ -8,13 +8,11 @@ import (
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/stakater/Reloader/internal/pkg/constants"
|
||||
"github.com/stakater/Reloader/internal/pkg/crypto"
|
||||
"github.com/stakater/Reloader/internal/pkg/options"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
)
|
||||
|
||||
// ConvertToEnvVarName converts the given text into a usable env var
|
||||
@@ -85,6 +83,7 @@ func ConfigureReloaderFlags(cmd *cobra.Command) {
|
||||
cmd.PersistentFlags().StringVar(&options.LogLevel, "log-level", "info", "Log level to use (trace, debug, info, warning, error, fatal and panic)")
|
||||
cmd.PersistentFlags().StringVar(&options.WebhookUrl, "webhook-url", "", "webhook to trigger instead of performing a reload")
|
||||
cmd.PersistentFlags().StringSliceVar(&options.ResourcesToIgnore, "resources-to-ignore", options.ResourcesToIgnore, "list of resources to ignore (valid options 'configMaps' or 'secrets')")
|
||||
cmd.PersistentFlags().StringSliceVar(&options.WorkloadTypesToIgnore, "ignored-workload-types", options.WorkloadTypesToIgnore, "list of workload types to ignore (valid options: 'jobs', 'cronjobs', or both)")
|
||||
cmd.PersistentFlags().StringSliceVar(&options.NamespacesToIgnore, "namespaces-to-ignore", options.NamespacesToIgnore, "list of namespaces to ignore")
|
||||
cmd.PersistentFlags().StringSliceVar(&options.NamespaceSelectors, "namespace-selector", options.NamespaceSelectors, "list of key:value labels to filter on for namespaces")
|
||||
cmd.PersistentFlags().StringSliceVar(&options.ResourceSelectors, "resource-label-selector", options.ResourceSelectors, "list of key:value labels to filter on for configmaps and secrets")
|
||||
@@ -98,68 +97,6 @@ func ConfigureReloaderFlags(cmd *cobra.Command) {
|
||||
cmd.PersistentFlags().StringVar(&options.PProfAddr, "pprof-addr", ":6060", "Address to start pprof server on. Default is :6060")
|
||||
}
|
||||
|
||||
func GetNamespaceLabelSelector() (string, error) {
|
||||
slice := options.NamespaceSelectors
|
||||
|
||||
for i, kv := range slice {
|
||||
// Legacy support for ":" as a delimiter and "*" for wildcard.
|
||||
if strings.Contains(kv, ":") {
|
||||
split := strings.Split(kv, ":")
|
||||
if split[1] == "*" {
|
||||
slice[i] = split[0]
|
||||
} else {
|
||||
slice[i] = split[0] + "=" + split[1]
|
||||
}
|
||||
}
|
||||
// Convert wildcard to valid apimachinery operator
|
||||
if strings.Contains(kv, "=") {
|
||||
split := strings.Split(kv, "=")
|
||||
if split[1] == "*" {
|
||||
slice[i] = split[0]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
namespaceLabelSelector := strings.Join(slice[:], ",")
|
||||
_, err := labels.Parse(namespaceLabelSelector)
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
}
|
||||
|
||||
return namespaceLabelSelector, nil
|
||||
}
|
||||
|
||||
func GetResourceLabelSelector() (string, error) {
|
||||
slice := options.ResourceSelectors
|
||||
|
||||
for i, kv := range slice {
|
||||
// Legacy support for ":" as a delimiter and "*" for wildcard.
|
||||
if strings.Contains(kv, ":") {
|
||||
split := strings.Split(kv, ":")
|
||||
if split[1] == "*" {
|
||||
slice[i] = split[0]
|
||||
} else {
|
||||
slice[i] = split[0] + "=" + split[1]
|
||||
}
|
||||
}
|
||||
// Convert wildcard to valid apimachinery operator
|
||||
if strings.Contains(kv, "=") {
|
||||
split := strings.Split(kv, "=")
|
||||
if split[1] == "*" {
|
||||
slice[i] = split[0]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
resourceLabelSelector := strings.Join(slice[:], ",")
|
||||
_, err := labels.Parse(resourceLabelSelector)
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
}
|
||||
|
||||
return resourceLabelSelector, nil
|
||||
}
|
||||
|
||||
func GetIgnoredResourcesList() (List, error) {
|
||||
|
||||
ignoredResourcesList := options.ResourcesToIgnore // getStringSliceFromFlags(cmd, "resources-to-ignore")
|
||||
@@ -176,3 +113,16 @@ func GetIgnoredResourcesList() (List, error) {
|
||||
|
||||
return ignoredResourcesList, nil
|
||||
}
|
||||
|
||||
func GetIgnoredWorkloadTypesList() (List, error) {
|
||||
|
||||
ignoredWorkloadTypesList := options.WorkloadTypesToIgnore
|
||||
|
||||
for _, v := range ignoredWorkloadTypesList {
|
||||
if v != "jobs" && v != "cronjobs" {
|
||||
return nil, fmt.Errorf("'ignored-workload-types' accepts 'jobs', 'cronjobs', or both, not '%s'", v)
|
||||
}
|
||||
}
|
||||
|
||||
return ignoredWorkloadTypesList, nil
|
||||
}
|
||||
|
||||
@@ -3,6 +3,7 @@ package util
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stakater/Reloader/internal/pkg/options"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
)
|
||||
|
||||
@@ -45,3 +46,141 @@ func TestGetHashFromConfigMap(t *testing.T) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetIgnoredWorkloadTypesList(t *testing.T) {
|
||||
// Save original state
|
||||
originalWorkloadTypes := options.WorkloadTypesToIgnore
|
||||
defer func() {
|
||||
options.WorkloadTypesToIgnore = originalWorkloadTypes
|
||||
}()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
workloadTypes []string
|
||||
expectError bool
|
||||
expected []string
|
||||
}{
|
||||
{
|
||||
name: "Both jobs and cronjobs",
|
||||
workloadTypes: []string{"jobs", "cronjobs"},
|
||||
expectError: false,
|
||||
expected: []string{"jobs", "cronjobs"},
|
||||
},
|
||||
{
|
||||
name: "Only jobs",
|
||||
workloadTypes: []string{"jobs"},
|
||||
expectError: false,
|
||||
expected: []string{"jobs"},
|
||||
},
|
||||
{
|
||||
name: "Only cronjobs",
|
||||
workloadTypes: []string{"cronjobs"},
|
||||
expectError: false,
|
||||
expected: []string{"cronjobs"},
|
||||
},
|
||||
{
|
||||
name: "Empty list",
|
||||
workloadTypes: []string{},
|
||||
expectError: false,
|
||||
expected: []string{},
|
||||
},
|
||||
{
|
||||
name: "Invalid workload type",
|
||||
workloadTypes: []string{"invalid"},
|
||||
expectError: true,
|
||||
expected: nil,
|
||||
},
|
||||
{
|
||||
name: "Mixed valid and invalid",
|
||||
workloadTypes: []string{"jobs", "invalid"},
|
||||
expectError: true,
|
||||
expected: nil,
|
||||
},
|
||||
{
|
||||
name: "Duplicate values",
|
||||
workloadTypes: []string{"jobs", "jobs"},
|
||||
expectError: false,
|
||||
expected: []string{"jobs", "jobs"},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
// Set the global option
|
||||
options.WorkloadTypesToIgnore = tt.workloadTypes
|
||||
|
||||
result, err := GetIgnoredWorkloadTypesList()
|
||||
|
||||
if tt.expectError && err == nil {
|
||||
t.Errorf("Expected error but got none")
|
||||
}
|
||||
|
||||
if !tt.expectError && err != nil {
|
||||
t.Errorf("Expected no error but got: %v", err)
|
||||
}
|
||||
|
||||
if !tt.expectError {
|
||||
if len(result) != len(tt.expected) {
|
||||
t.Errorf("Expected %v, got %v", tt.expected, result)
|
||||
return
|
||||
}
|
||||
|
||||
for i, expected := range tt.expected {
|
||||
if i >= len(result) || result[i] != expected {
|
||||
t.Errorf("Expected %v, got %v", tt.expected, result)
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestListContains(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
list List
|
||||
item string
|
||||
expected bool
|
||||
}{
|
||||
{
|
||||
name: "List contains item",
|
||||
list: List{"jobs", "cronjobs"},
|
||||
item: "jobs",
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
name: "List does not contain item",
|
||||
list: List{"jobs"},
|
||||
item: "cronjobs",
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
name: "Empty list",
|
||||
list: List{},
|
||||
item: "jobs",
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
name: "Case sensitive matching",
|
||||
list: List{"jobs", "cronjobs"},
|
||||
item: "Jobs",
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
name: "Multiple occurrences",
|
||||
list: List{"jobs", "jobs", "cronjobs"},
|
||||
item: "jobs",
|
||||
expected: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
result := tt.list.Contains(tt.item)
|
||||
if result != tt.expected {
|
||||
t.Errorf("Expected %v, got %v", tt.expected, result)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -12,6 +12,7 @@ import (
|
||||
"github.com/stakater/Reloader/internal/pkg/options"
|
||||
"github.com/stakater/Reloader/internal/pkg/util"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
)
|
||||
|
||||
@@ -74,6 +75,8 @@ type ReloaderOptions struct {
|
||||
WebhookUrl string `json:"webhookUrl"`
|
||||
// ResourcesToIgnore is a list of resource types to ignore (e.g., "configmaps" or "secrets")
|
||||
ResourcesToIgnore []string `json:"resourcesToIgnore"`
|
||||
// WorkloadTypesToIgnore is a list of workload types to ignore (e.g., "jobs" or "cronjobs")
|
||||
WorkloadTypesToIgnore []string `json:"workloadTypesToIgnore"`
|
||||
// NamespaceSelectors is a list of label selectors to filter namespaces to watch
|
||||
NamespaceSelectors []string `json:"namespaceSelectors"`
|
||||
// ResourceSelectors is a list of label selectors to filter ConfigMaps and Secrets to watch
|
||||
@@ -121,11 +124,90 @@ func PublishMetaInfoConfigmap(clientset kubernetes.Interface) {
|
||||
}
|
||||
}
|
||||
|
||||
func ShouldReload(config util.Config, resourceType string, annotations Map, podAnnotations Map, options *ReloaderOptions) ReloadCheckResult {
|
||||
func GetNamespaceLabelSelector(slice []string) (string, error) {
|
||||
for i, kv := range slice {
|
||||
// Legacy support for ":" as a delimiter and "*" for wildcard.
|
||||
if strings.Contains(kv, ":") {
|
||||
split := strings.Split(kv, ":")
|
||||
if split[1] == "*" {
|
||||
slice[i] = split[0]
|
||||
} else {
|
||||
slice[i] = split[0] + "=" + split[1]
|
||||
}
|
||||
}
|
||||
// Convert wildcard to valid apimachinery operator
|
||||
if strings.Contains(kv, "=") {
|
||||
split := strings.Split(kv, "=")
|
||||
if split[1] == "*" {
|
||||
slice[i] = split[0]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if resourceType == "Rollout" && !options.IsArgoRollouts {
|
||||
return ReloadCheckResult{
|
||||
ShouldReload: false,
|
||||
namespaceLabelSelector := strings.Join(slice[:], ",")
|
||||
_, err := labels.Parse(namespaceLabelSelector)
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
}
|
||||
|
||||
return namespaceLabelSelector, nil
|
||||
}
|
||||
|
||||
func GetResourceLabelSelector(slice []string) (string, error) {
|
||||
for i, kv := range slice {
|
||||
// Legacy support for ":" as a delimiter and "*" for wildcard.
|
||||
if strings.Contains(kv, ":") {
|
||||
split := strings.Split(kv, ":")
|
||||
if split[1] == "*" {
|
||||
slice[i] = split[0]
|
||||
} else {
|
||||
slice[i] = split[0] + "=" + split[1]
|
||||
}
|
||||
}
|
||||
// Convert wildcard to valid apimachinery operator
|
||||
if strings.Contains(kv, "=") {
|
||||
split := strings.Split(kv, "=")
|
||||
if split[1] == "*" {
|
||||
slice[i] = split[0]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
resourceLabelSelector := strings.Join(slice[:], ",")
|
||||
_, err := labels.Parse(resourceLabelSelector)
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
}
|
||||
|
||||
return resourceLabelSelector, nil
|
||||
}
|
||||
|
||||
// ShouldReload checks if a resource should be reloaded based on its annotations and the provided options.
|
||||
func ShouldReload(config Config, resourceType string, annotations Map, podAnnotations Map, options *ReloaderOptions) ReloadCheckResult {
|
||||
|
||||
// Check if this workload type should be ignored
|
||||
if len(options.WorkloadTypesToIgnore) > 0 {
|
||||
ignoredWorkloadTypes, err := util.GetIgnoredWorkloadTypesList()
|
||||
if err != nil {
|
||||
logrus.Errorf("Failed to parse ignored workload types: %v", err)
|
||||
} else {
|
||||
// Map Kubernetes resource types to CLI-friendly names for comparison
|
||||
var resourceToCheck string
|
||||
switch resourceType {
|
||||
case "Job":
|
||||
resourceToCheck = "jobs"
|
||||
case "CronJob":
|
||||
resourceToCheck = "cronjobs"
|
||||
default:
|
||||
resourceToCheck = resourceType // For other types, use as-is
|
||||
}
|
||||
|
||||
// Check if current resource type should be ignored
|
||||
if ignoredWorkloadTypes.Contains(resourceToCheck) {
|
||||
return ReloadCheckResult{
|
||||
ShouldReload: false,
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -251,6 +333,7 @@ func GetCommandLineOptions() *ReloaderOptions {
|
||||
CommandLineOptions.EnableHA = options.EnableHA
|
||||
CommandLineOptions.WebhookUrl = options.WebhookUrl
|
||||
CommandLineOptions.ResourcesToIgnore = options.ResourcesToIgnore
|
||||
CommandLineOptions.WorkloadTypesToIgnore = options.WorkloadTypesToIgnore
|
||||
CommandLineOptions.NamespaceSelectors = options.NamespaceSelectors
|
||||
CommandLineOptions.ResourceSelectors = options.ResourceSelectors
|
||||
CommandLineOptions.NamespacesToIgnore = options.NamespacesToIgnore
|
||||
@@ -258,6 +341,7 @@ func GetCommandLineOptions() *ReloaderOptions {
|
||||
CommandLineOptions.ReloadOnCreate = parseBool(options.ReloadOnCreate)
|
||||
CommandLineOptions.ReloadOnDelete = parseBool(options.ReloadOnDelete)
|
||||
CommandLineOptions.EnablePProf = options.EnablePProf
|
||||
CommandLineOptions.PProfAddr = options.PProfAddr
|
||||
|
||||
return CommandLineOptions
|
||||
}
|
||||
|
||||
224
pkg/common/common_test.go
Normal file
224
pkg/common/common_test.go
Normal file
@@ -0,0 +1,224 @@
|
||||
package common
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stakater/Reloader/internal/pkg/options"
|
||||
)
|
||||
|
||||
func TestShouldReload_IgnoredWorkloadTypes(t *testing.T) {
|
||||
// Save original state
|
||||
originalWorkloadTypes := options.WorkloadTypesToIgnore
|
||||
defer func() {
|
||||
options.WorkloadTypesToIgnore = originalWorkloadTypes
|
||||
}()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
ignoredWorkloadTypes []string
|
||||
resourceType string
|
||||
shouldReload bool
|
||||
description string
|
||||
}{
|
||||
{
|
||||
name: "Jobs ignored - Job should not reload",
|
||||
ignoredWorkloadTypes: []string{"jobs"},
|
||||
resourceType: "Job",
|
||||
shouldReload: false,
|
||||
description: "When jobs are ignored, Job resources should not be reloaded",
|
||||
},
|
||||
{
|
||||
name: "Jobs ignored - CronJob should reload",
|
||||
ignoredWorkloadTypes: []string{"jobs"},
|
||||
resourceType: "CronJob",
|
||||
shouldReload: true,
|
||||
description: "When jobs are ignored, CronJob resources should still be processed",
|
||||
},
|
||||
{
|
||||
name: "CronJobs ignored - CronJob should not reload",
|
||||
ignoredWorkloadTypes: []string{"cronjobs"},
|
||||
resourceType: "CronJob",
|
||||
shouldReload: false,
|
||||
description: "When cronjobs are ignored, CronJob resources should not be reloaded",
|
||||
},
|
||||
{
|
||||
name: "CronJobs ignored - Job should reload",
|
||||
ignoredWorkloadTypes: []string{"cronjobs"},
|
||||
resourceType: "Job",
|
||||
shouldReload: true,
|
||||
description: "When cronjobs are ignored, Job resources should still be processed",
|
||||
},
|
||||
{
|
||||
name: "Both ignored - Job should not reload",
|
||||
ignoredWorkloadTypes: []string{"jobs", "cronjobs"},
|
||||
resourceType: "Job",
|
||||
shouldReload: false,
|
||||
description: "When both are ignored, Job resources should not be reloaded",
|
||||
},
|
||||
{
|
||||
name: "Both ignored - CronJob should not reload",
|
||||
ignoredWorkloadTypes: []string{"jobs", "cronjobs"},
|
||||
resourceType: "CronJob",
|
||||
shouldReload: false,
|
||||
description: "When both are ignored, CronJob resources should not be reloaded",
|
||||
},
|
||||
{
|
||||
name: "Both ignored - Deployment should reload",
|
||||
ignoredWorkloadTypes: []string{"jobs", "cronjobs"},
|
||||
resourceType: "Deployment",
|
||||
shouldReload: true,
|
||||
description: "When both are ignored, other workload types should still be processed",
|
||||
},
|
||||
{
|
||||
name: "None ignored - Job should reload",
|
||||
ignoredWorkloadTypes: []string{},
|
||||
resourceType: "Job",
|
||||
shouldReload: true,
|
||||
description: "When nothing is ignored, all workload types should be processed",
|
||||
},
|
||||
{
|
||||
name: "None ignored - CronJob should reload",
|
||||
ignoredWorkloadTypes: []string{},
|
||||
resourceType: "CronJob",
|
||||
shouldReload: true,
|
||||
description: "When nothing is ignored, all workload types should be processed",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
// Set the ignored workload types
|
||||
options.WorkloadTypesToIgnore = tt.ignoredWorkloadTypes
|
||||
|
||||
// Create minimal test config and options
|
||||
config := Config{
|
||||
ResourceName: "test-resource",
|
||||
Annotation: "configmap.reloader.stakater.com/reload",
|
||||
}
|
||||
|
||||
annotations := Map{
|
||||
"configmap.reloader.stakater.com/reload": "test-config",
|
||||
}
|
||||
|
||||
// Create ReloaderOptions with the ignored workload types
|
||||
opts := &ReloaderOptions{
|
||||
WorkloadTypesToIgnore: tt.ignoredWorkloadTypes,
|
||||
AutoReloadAll: true, // Enable auto-reload to simplify test
|
||||
ReloaderAutoAnnotation: "reloader.stakater.com/auto",
|
||||
}
|
||||
|
||||
// Call ShouldReload
|
||||
result := ShouldReload(config, tt.resourceType, annotations, Map{}, opts)
|
||||
|
||||
// Check the result
|
||||
if result.ShouldReload != tt.shouldReload {
|
||||
t.Errorf("For resource type %s with ignored types %v, expected ShouldReload=%v, got=%v",
|
||||
tt.resourceType, tt.ignoredWorkloadTypes, tt.shouldReload, result.ShouldReload)
|
||||
}
|
||||
|
||||
t.Logf("✓ %s", tt.description)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestShouldReload_IgnoredWorkloadTypes_ValidationError(t *testing.T) {
|
||||
// Save original state
|
||||
originalWorkloadTypes := options.WorkloadTypesToIgnore
|
||||
defer func() {
|
||||
options.WorkloadTypesToIgnore = originalWorkloadTypes
|
||||
}()
|
||||
|
||||
// Test with invalid workload type - should still continue processing
|
||||
options.WorkloadTypesToIgnore = []string{"invalid"}
|
||||
|
||||
config := Config{
|
||||
ResourceName: "test-resource",
|
||||
Annotation: "configmap.reloader.stakater.com/reload",
|
||||
}
|
||||
|
||||
annotations := Map{
|
||||
"configmap.reloader.stakater.com/reload": "test-config",
|
||||
}
|
||||
|
||||
opts := &ReloaderOptions{
|
||||
WorkloadTypesToIgnore: []string{"invalid"},
|
||||
AutoReloadAll: true, // Enable auto-reload to simplify test
|
||||
ReloaderAutoAnnotation: "reloader.stakater.com/auto",
|
||||
}
|
||||
|
||||
// Should not panic and should continue with normal processing
|
||||
result := ShouldReload(config, "Job", annotations, Map{}, opts)
|
||||
|
||||
// Since validation failed, it should continue with normal processing (should reload)
|
||||
if !result.ShouldReload {
|
||||
t.Errorf("Expected ShouldReload=true when validation fails, got=%v", result.ShouldReload)
|
||||
}
|
||||
}
|
||||
|
||||
// Test that validates the fix for issue #996
|
||||
func TestShouldReload_IssueRBACPermissionFixed(t *testing.T) {
|
||||
// Save original state
|
||||
originalWorkloadTypes := options.WorkloadTypesToIgnore
|
||||
defer func() {
|
||||
options.WorkloadTypesToIgnore = originalWorkloadTypes
|
||||
}()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
ignoredWorkloadTypes []string
|
||||
resourceType string
|
||||
description string
|
||||
}{
|
||||
{
|
||||
name: "Issue #996 - ignoreJobs prevents Job processing",
|
||||
ignoredWorkloadTypes: []string{"jobs"},
|
||||
resourceType: "Job",
|
||||
description: "Job resources are skipped entirely, preventing RBAC permission errors",
|
||||
},
|
||||
{
|
||||
name: "Issue #996 - ignoreCronJobs prevents CronJob processing",
|
||||
ignoredWorkloadTypes: []string{"cronjobs"},
|
||||
resourceType: "CronJob",
|
||||
description: "CronJob resources are skipped entirely, preventing RBAC permission errors",
|
||||
},
|
||||
{
|
||||
name: "Issue #996 - both ignored prevent both types",
|
||||
ignoredWorkloadTypes: []string{"jobs", "cronjobs"},
|
||||
resourceType: "Job",
|
||||
description: "Job resources are skipped entirely when both types are ignored",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
// Set the ignored workload types
|
||||
options.WorkloadTypesToIgnore = tt.ignoredWorkloadTypes
|
||||
|
||||
config := Config{
|
||||
ResourceName: "test-resource",
|
||||
Annotation: "configmap.reloader.stakater.com/reload",
|
||||
}
|
||||
|
||||
annotations := Map{
|
||||
"configmap.reloader.stakater.com/reload": "test-config",
|
||||
}
|
||||
|
||||
opts := &ReloaderOptions{
|
||||
WorkloadTypesToIgnore: tt.ignoredWorkloadTypes,
|
||||
AutoReloadAll: true, // Enable auto-reload to simplify test
|
||||
ReloaderAutoAnnotation: "reloader.stakater.com/auto",
|
||||
}
|
||||
|
||||
// Call ShouldReload
|
||||
result := ShouldReload(config, tt.resourceType, annotations, Map{}, opts)
|
||||
|
||||
// Should not reload when workload type is ignored
|
||||
if result.ShouldReload {
|
||||
t.Errorf("Expected ShouldReload=false for ignored workload type %s, got=%v",
|
||||
tt.resourceType, result.ShouldReload)
|
||||
}
|
||||
|
||||
t.Logf("✓ %s", tt.description)
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -1,8 +1,9 @@
|
||||
package util
|
||||
package common
|
||||
|
||||
import (
|
||||
"github.com/stakater/Reloader/internal/pkg/constants"
|
||||
"github.com/stakater/Reloader/internal/pkg/options"
|
||||
"github.com/stakater/Reloader/internal/pkg/util"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
)
|
||||
|
||||
@@ -15,6 +16,7 @@ type Config struct {
|
||||
TypedAutoAnnotation string
|
||||
SHAValue string
|
||||
Type string
|
||||
Labels map[string]string
|
||||
}
|
||||
|
||||
// GetConfigmapConfig provides utility config for configmap
|
||||
@@ -25,8 +27,9 @@ func GetConfigmapConfig(configmap *v1.ConfigMap) Config {
|
||||
ResourceAnnotations: configmap.Annotations,
|
||||
Annotation: options.ConfigmapUpdateOnChangeAnnotation,
|
||||
TypedAutoAnnotation: options.ConfigmapReloaderAutoAnnotation,
|
||||
SHAValue: GetSHAfromConfigmap(configmap),
|
||||
SHAValue: util.GetSHAfromConfigmap(configmap),
|
||||
Type: constants.ConfigmapEnvVarPostfix,
|
||||
Labels: configmap.Labels,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -38,7 +41,8 @@ func GetSecretConfig(secret *v1.Secret) Config {
|
||||
ResourceAnnotations: secret.Annotations,
|
||||
Annotation: options.SecretUpdateOnChangeAnnotation,
|
||||
TypedAutoAnnotation: options.SecretReloaderAutoAnnotation,
|
||||
SHAValue: GetSHAfromSecret(secret.Data),
|
||||
SHAValue: util.GetSHAfromSecret(secret.Data),
|
||||
Type: constants.SecretEnvVarPostfix,
|
||||
Labels: secret.Labels,
|
||||
}
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
package util
|
||||
package common
|
||||
|
||||
import "time"
|
||||
|
||||
Reference in New Issue
Block a user