mirror of
https://github.com/stakater/Reloader.git
synced 2026-02-14 18:09:50 +00:00
Compare commits
110 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
19a76258d0 | ||
|
|
aa481d9568 | ||
|
|
177d2756a8 | ||
|
|
9b2af6f9b7 | ||
|
|
7c4899a7eb | ||
|
|
54d44858f8 | ||
|
|
6304a9e5ab | ||
|
|
1e6a6ec2d9 | ||
|
|
42cd7e71a2 | ||
|
|
1107fee109 | ||
|
|
9e33dac9ef | ||
|
|
517fd33fb1 | ||
|
|
1e46d44c7c | ||
|
|
49409dce54 | ||
|
|
9039956c32 | ||
|
|
82eb8d8b87 | ||
|
|
7af0728990 | ||
|
|
d9b36a56b5 | ||
|
|
dcf4b0d0f6 | ||
|
|
b8b7cdb610 | ||
|
|
d2580930e4 | ||
|
|
e0150145ec | ||
|
|
52ac7e307d | ||
|
|
e311fe2fff | ||
|
|
dec3410b7f | ||
|
|
1e1f094516 | ||
|
|
a9566fa672 | ||
|
|
51ee0a19bb | ||
|
|
19918e6fa8 | ||
|
|
95cea97d34 | ||
|
|
d22a0f25de | ||
|
|
2d2c35fcf4 | ||
|
|
cf600f7761 | ||
|
|
01f62cf823 | ||
|
|
85bd7a075c | ||
|
|
3999edb7a3 | ||
|
|
f69773c588 | ||
|
|
8b257a3f0c | ||
|
|
93936d12c1 | ||
|
|
2d810f9824 | ||
|
|
99c45b3ca3 | ||
|
|
81315adc9b | ||
|
|
ad0407517d | ||
|
|
cafa14f0e7 | ||
|
|
5089955691 | ||
|
|
379c428283 | ||
|
|
404c3700f7 | ||
|
|
df812c555c | ||
|
|
e8cb97a6d1 | ||
|
|
58ad781c0c | ||
|
|
be09beac29 | ||
|
|
516f9e8bc5 | ||
|
|
e69ea41ece | ||
|
|
c2107cccbb | ||
|
|
7ea10b48ec | ||
|
|
a6abbd278c | ||
|
|
70e58394d1 | ||
|
|
c807e6deaf | ||
|
|
52815a4ee1 | ||
|
|
4679d21e26 | ||
|
|
172de75f01 | ||
|
|
4eaaf2da79 | ||
|
|
144cc910af | ||
|
|
6faffdc0cf | ||
|
|
c5481c6e7b | ||
|
|
a47d927422 | ||
|
|
70e0598833 | ||
|
|
85bea39568 | ||
|
|
97e74ad11b | ||
|
|
9c77e27b2c | ||
|
|
ad8e6f78a0 | ||
|
|
93ba31821e | ||
|
|
adbc22c143 | ||
|
|
b6c6e45b5f | ||
|
|
3c03be7b8e | ||
|
|
1717fe93b1 | ||
|
|
0ff6d0fcf2 | ||
|
|
f896e1462d | ||
|
|
8b0311f799 | ||
|
|
fafb5f45d6 | ||
|
|
22ee53ae33 | ||
|
|
61ba818d4d | ||
|
|
b56f8f5c83 | ||
|
|
c021462893 | ||
|
|
1874f871d9 | ||
|
|
4741ff231b | ||
|
|
b166c2bbfa | ||
|
|
ab0980d6ee | ||
|
|
f9ec2e99a8 | ||
|
|
b06411479f | ||
|
|
e93731a686 | ||
|
|
5139d65f9c | ||
|
|
023425d4e1 | ||
|
|
8c2f2e574c | ||
|
|
b6d538dca8 | ||
|
|
fdfb083b27 | ||
|
|
533ba4f7eb | ||
|
|
662e1fcd9b | ||
|
|
54999116c9 | ||
|
|
f9561a2167 | ||
|
|
c2d6b95297 | ||
|
|
2312be3d68 | ||
|
|
2c82d6507f | ||
|
|
96dc40c8cb | ||
|
|
ee12df2b32 | ||
|
|
e08b1d3927 | ||
|
|
aee1366017 | ||
|
|
7dc8002029 | ||
|
|
e041b6d3f9 | ||
|
|
7c96cf3f57 |
13
.github/workflows/pull_request.yaml
vendored
13
.github/workflows/pull_request.yaml
vendored
@@ -63,6 +63,11 @@ jobs:
|
||||
check-latest: true
|
||||
cache: true
|
||||
|
||||
- name: Create timestamp
|
||||
id: prep
|
||||
run: echo "created=$(date -u +'%Y-%m-%dT%H:%M:%SZ')" >> $GITHUB_OUTPUT
|
||||
|
||||
|
||||
# Get highest tag and remove any suffixes with '-'
|
||||
- name: Get Highest tag
|
||||
id: highest_tag
|
||||
@@ -104,6 +109,7 @@ jobs:
|
||||
kind create cluster
|
||||
kubectl cluster-info
|
||||
|
||||
|
||||
- name: Test
|
||||
run: make test
|
||||
|
||||
@@ -135,7 +141,12 @@ jobs:
|
||||
file: ${{ env.DOCKER_FILE_PATH }}
|
||||
pull: true
|
||||
push: false
|
||||
build-args: BUILD_PARAMETERS=${{ env.BUILD_PARAMETERS }}
|
||||
build-args: |
|
||||
VERSION=merge-${{ steps.generate_tag.outputs.GIT_TAG }}
|
||||
COMMIT=${{github.event.pull_request.head.sha}}
|
||||
BUILD_DATE=${{ steps.prep.outputs.created }}
|
||||
BUILD_PARAMETERS=${{ env.BUILD_PARAMETERS }}
|
||||
|
||||
cache-to: type=inline
|
||||
platforms: linux/amd64,linux/arm,linux/arm64
|
||||
tags: |
|
||||
|
||||
5
.github/workflows/pull_request_docs.yaml
vendored
5
.github/workflows/pull_request_docs.yaml
vendored
@@ -16,16 +16,17 @@ on:
|
||||
|
||||
jobs:
|
||||
qa:
|
||||
uses: stakater/.github/.github/workflows/pull_request_doc_qa.yaml@v0.0.131
|
||||
uses: stakater/.github/.github/workflows/pull_request_doc_qa.yaml@v0.0.134
|
||||
with:
|
||||
MD_CONFIG: .github/md_config.json
|
||||
DOC_SRC: docs
|
||||
MD_LINT_CONFIG: .markdownlint.yaml
|
||||
build:
|
||||
uses: stakater/.github/.github/workflows/pull_request_container_build.yaml@v0.0.131
|
||||
uses: stakater/.github/.github/workflows/pull_request_container_build.yaml@v0.0.134
|
||||
with:
|
||||
DOCKER_FILE_PATH: Dockerfile-docs
|
||||
CONTAINER_REGISTRY_URL: ghcr.io/stakater
|
||||
PUSH_IMAGE: false
|
||||
secrets:
|
||||
CONTAINER_REGISTRY_USERNAME: ${{ github.actor }}
|
||||
CONTAINER_REGISTRY_PASSWORD: ${{ secrets.GHCR_TOKEN }}
|
||||
|
||||
5
.github/workflows/push-helm-chart.yaml
vendored
5
.github/workflows/push-helm-chart.yaml
vendored
@@ -11,10 +11,11 @@ on:
|
||||
paths:
|
||||
- 'deployments/kubernetes/chart/reloader/**'
|
||||
- '.github/workflows/push-helm-chart.yaml'
|
||||
- '.github/workflows/release-helm-chart.yaml'
|
||||
|
||||
env:
|
||||
HELM_REGISTRY_URL: "https://stakater.github.io/stakater-charts"
|
||||
REGISTRY: ghcr.io
|
||||
REGISTRY: ghcr.io # container registry
|
||||
|
||||
jobs:
|
||||
verify-and-push-helm-chart:
|
||||
@@ -72,7 +73,7 @@ jobs:
|
||||
exit 1
|
||||
|
||||
- name: Install Cosign
|
||||
uses: sigstore/cosign-installer@v3.8.1
|
||||
uses: sigstore/cosign-installer@v3.8.2
|
||||
|
||||
- name: Login to GHCR Registry
|
||||
uses: docker/login-action@v3
|
||||
|
||||
10
.github/workflows/push.yaml
vendored
10
.github/workflows/push.yaml
vendored
@@ -91,6 +91,10 @@ jobs:
|
||||
with:
|
||||
username: ${{ secrets.STAKATER_DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.STAKATER_DOCKERHUB_PASSWORD }}
|
||||
|
||||
- name: Create timestamp
|
||||
id: prep
|
||||
run: echo "created=$(date -u +'%Y-%m-%dT%H:%M:%SZ')" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Generate image repository path for Docker registry
|
||||
run: |
|
||||
@@ -148,7 +152,11 @@ jobs:
|
||||
file: ${{ env.DOCKER_FILE_PATH }}
|
||||
pull: true
|
||||
push: true
|
||||
build-args: BUILD_PARAMETERS=${{ env.BUILD_PARAMETERS }}
|
||||
build-args: |
|
||||
VERSION=merge-${{ github.event.number }}
|
||||
COMMIT=${{ github.sha }}
|
||||
BUILD_DATE=${{ steps.prep.outputs.created }}
|
||||
BUILD_PARAMETERS=${{ env.BUILD_PARAMETERS }}
|
||||
cache-to: type=inline
|
||||
platforms: linux/amd64,linux/arm,linux/arm64
|
||||
tags: |
|
||||
|
||||
2
.github/workflows/release-helm-chart.yaml
vendored
2
.github/workflows/release-helm-chart.yaml
vendored
@@ -4,8 +4,6 @@ on:
|
||||
push:
|
||||
tags:
|
||||
- "chart-v*"
|
||||
tags-ignore:
|
||||
- "v*"
|
||||
|
||||
permissions:
|
||||
contents: write
|
||||
|
||||
14
.github/workflows/release.yaml
vendored
14
.github/workflows/release.yaml
vendored
@@ -4,8 +4,6 @@ on:
|
||||
push:
|
||||
tags:
|
||||
- "v*"
|
||||
tags-ignore:
|
||||
- "chart-v*"
|
||||
|
||||
env:
|
||||
DOCKER_FILE_PATH: Dockerfile
|
||||
@@ -81,6 +79,10 @@ jobs:
|
||||
id: generate_tag
|
||||
run: echo "RELEASE_VERSION=${GITHUB_REF#refs/*/}" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Create timestamp
|
||||
id: prep
|
||||
run: echo "created=$(date -u +'%Y-%m-%dT%H:%M:%SZ')" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v3
|
||||
|
||||
@@ -108,6 +110,10 @@ jobs:
|
||||
platforms: linux/amd64,linux/arm,linux/arm64
|
||||
tags: |
|
||||
${{ env.DOCKER_IMAGE_REPOSITORY }}:${{ steps.generate_tag.outputs.RELEASE_VERSION }}
|
||||
build-args: |
|
||||
VERSION=${{ steps.generate_tag.outputs.RELEASE_VERSION }}
|
||||
COMMIT=${{ github.sha }}
|
||||
BUILD_DATE=${{ steps.prep.outputs.created }}
|
||||
labels: |
|
||||
org.opencontainers.image.source=${{ github.event.repository.clone_url }}
|
||||
org.opencontainers.image.created=${{ steps.prep.outputs.created }}
|
||||
@@ -154,6 +160,10 @@ jobs:
|
||||
platforms: linux/amd64,linux/arm,linux/arm64
|
||||
tags: |
|
||||
${{ env.GHCR_IMAGE_REPOSITORY }}:${{ steps.generate_tag.outputs.RELEASE_VERSION }},${{ env.GHCR_IMAGE_REPOSITORY }}:latest
|
||||
build-args: |
|
||||
VERSION=${{ steps.generate_tag.outputs.RELEASE_VERSION }}
|
||||
COMMIT=${{ github.sha }}
|
||||
BUILD_DATE=${{ steps.prep.outputs.created }}
|
||||
labels: |
|
||||
org.opencontainers.image.source=${{ github.event.repository.clone_url }}
|
||||
org.opencontainers.image.created=${{ steps.prep.outputs.created }}
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
StylesPath = styles
|
||||
MinAlertLevel = warning
|
||||
|
||||
Packages = https://github.com/stakater/vale-package/releases/download/v0.0.60/Stakater.zip
|
||||
Packages = https://github.com/stakater/vale-package/releases/download/v0.0.77/Stakater.zip
|
||||
Vocab = Stakater
|
||||
|
||||
# Only check MarkDown files
|
||||
|
||||
11
Dockerfile
11
Dockerfile
@@ -2,13 +2,17 @@ ARG BUILDER_IMAGE
|
||||
ARG BASE_IMAGE
|
||||
|
||||
# Build the manager binary
|
||||
FROM --platform=${BUILDPLATFORM} ${BUILDER_IMAGE:-golang:1.24.2} AS builder
|
||||
FROM --platform=${BUILDPLATFORM} ${BUILDER_IMAGE:-golang:1.24.4} AS builder
|
||||
|
||||
ARG TARGETOS
|
||||
ARG TARGETARCH
|
||||
ARG GOPROXY
|
||||
ARG GOPRIVATE
|
||||
|
||||
ARG COMMIT
|
||||
ARG VERSION
|
||||
ARG BUILD_DATE
|
||||
|
||||
WORKDIR /workspace
|
||||
|
||||
# Copy the Go Modules manifests
|
||||
@@ -30,7 +34,10 @@ RUN CGO_ENABLED=0 \
|
||||
GOPROXY=${GOPROXY} \
|
||||
GOPRIVATE=${GOPRIVATE} \
|
||||
GO111MODULE=on \
|
||||
go build -mod=mod -a -o manager main.go
|
||||
go build -ldflags="-s -w -X github.com/stakater/Reloader/pkg/common.Version=${VERSION} \
|
||||
-X github.com/stakater/Reloader/pkg/common.Commit=${COMMIT} \
|
||||
-X github.com/stakater/Reloader/pkg/common.BuildDate=${BUILD_DATE}" \
|
||||
-installsuffix 'static' -mod=mod -a -o manager ./
|
||||
|
||||
# Use distroless as minimal base image to package the manager binary
|
||||
# Refer to https://github.com/GoogleContainerTools/distroless for more details
|
||||
|
||||
53
README.md
53
README.md
@@ -2,6 +2,7 @@
|
||||
<img src="assets/web/reloader.jpg" alt="Reloader" width="40%"/>
|
||||
</p>
|
||||
|
||||
[](https://github.com/sponsors/stakater?utm_source=github&utm_medium=readme&utm_campaign=reloader)
|
||||
[](https://goreportcard.com/report/github.com/stakater/reloader)
|
||||
[](https://godoc.org/github.com/stakater/reloader)
|
||||
[](https://github.com/stakater/reloader/releases/latest)
|
||||
@@ -32,6 +33,7 @@ Reloader bridges that gap by ensuring your workloads stay in sync with configura
|
||||
flowchart LR
|
||||
ExternalSecret -->|Creates| Secret
|
||||
SealedSecret -->|Creates| Secret
|
||||
Certificate -->|Creates| Secret
|
||||
Secret -->|Watched by| Reloader
|
||||
ConfigMap -->|Watched by| Reloader
|
||||
|
||||
@@ -44,7 +46,7 @@ flowchart LR
|
||||
Reloader -->|Sends Notification| Slack,Teams,Webhook
|
||||
```
|
||||
|
||||
- Sources like `ExternalSecret` or `SealedSecret` create or manage your Kubernetes Secrets.
|
||||
- Sources like `ExternalSecret`, `SealedSecret`, or `Certificate` from `cert-manager` can create or manage Kubernetes `Secrets` — but they can also be created manually or delivered through GitOps workflows.
|
||||
- `Secrets` and `ConfigMaps` are watched by Reloader.
|
||||
- When changes are detected, Reloader automatically triggers a rollout of the associated workloads, ensuring your app always runs with the latest configuration.
|
||||
|
||||
@@ -152,6 +154,21 @@ This pattern allows fine-grained reload control — workloads only restart if th
|
||||
1. ✅ You want to reload a workload only if it references a ConfigMap or Secret that has been explicitly tagged with `reloader.stakater.com/match: "true"`.
|
||||
1. ✅ Use this when you want full control over which shared or system-wide resources trigger reloads. Great in multi-tenant clusters or shared configs.
|
||||
|
||||
### ⛔ Resource-Level Ignore Annotation
|
||||
|
||||
When you need to prevent specific ConfigMaps or Secrets from triggering any reloads, use the ignore annotation on the resource itself:
|
||||
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
kind: ConfigMap # or Secret
|
||||
metadata:
|
||||
name: my-config
|
||||
annotations:
|
||||
reloader.stakater.com/ignore: "true"
|
||||
```
|
||||
|
||||
This instructs Reloader to skip all reload logic for that resource across all workloads.
|
||||
|
||||
### 4. ⚙️ Workload-Specific Rollout Strategy
|
||||
|
||||
By default, Reloader uses the **rollout** strategy — it updates the pod template to trigger a new rollout. This works well in most cases, but it can cause problems if you're using GitOps tools like ArgoCD, which detect this as configuration drift.
|
||||
@@ -188,7 +205,7 @@ metadata:
|
||||
|
||||
Reloader can optionally **send alerts** whenever it triggers a rolling upgrade for a workload (e.g., `Deployment`, `StatefulSet`, etc.).
|
||||
|
||||
These alerts are sent to a configured **webhook endpoint**, which can be a generic receiver or services like Slack or Microsoft Teams.
|
||||
These alerts are sent to a configured **webhook endpoint**, which can be a generic receiver or services like Slack, Microsoft Teams or Google Chat.
|
||||
|
||||
To enable this feature, update the `reloader.env.secret` section in your `values.yaml` (when installing via Helm):
|
||||
|
||||
@@ -197,11 +214,30 @@ reloader:
|
||||
env:
|
||||
secret:
|
||||
ALERT_ON_RELOAD: "true" # Enable alerting (default: false)
|
||||
ALERT_SINK: "slack" # Options: slack, teams, webhook (default: webhook)
|
||||
ALERT_SINK: "slack" # Options: slack, teams, gchat or webhook (default: webhook)
|
||||
ALERT_WEBHOOK_URL: "<your-webhook-url>" # Required if ALERT_ON_RELOAD is true
|
||||
ALERT_ADDITIONAL_INFO: "Triggered by Reloader in staging environment"
|
||||
```
|
||||
|
||||
### 7. ⏸️ Pause Deployments
|
||||
|
||||
This feature allows you to pause rollouts for a deployment for a specified duration, helping to prevent multiple restarts when several ConfigMaps or Secrets are updated in quick succession.
|
||||
|
||||
| Annotation | Applies To | Description |
|
||||
|---------------------------------------------------------|--------------|-----------------------------------------------------------------------------|
|
||||
| `deployment.reloader.stakater.com/pause-period: "5m"` | Deployment | Pauses reloads for the specified period (e.g., `5m`, `1h`) |
|
||||
|
||||
#### How it works
|
||||
|
||||
1. Add the `deployment.reloader.stakater.com/pause-period` annotation to your Deployment, specifying the pause duration (e.g., `"5m"` for five minutes).
|
||||
1. When a watched ConfigMap or Secret changes, Reloader will still trigger a reload event, but if the deployment is paused, the rollout will have no effect until the pause period has elapsed.
|
||||
1. This avoids repeated restarts if multiple resources are updated close together.
|
||||
|
||||
#### Use when
|
||||
|
||||
1. ✅ Your deployment references multiple ConfigMaps or Secrets that may be updated at the same time.
|
||||
1. ✅ You want to minimize unnecessary rollouts and reduce downtime caused by back-to-back configuration changes.
|
||||
|
||||
## 🚀 Installation
|
||||
|
||||
### 1. 📦 Helm
|
||||
@@ -304,7 +340,7 @@ Reloader supports multiple strategies for triggering rolling updates when a watc
|
||||
|
||||
| Flag | Description |
|
||||
|------|-------------|
|
||||
| `--namespace-selector=key=value` | Watch only namespaces with matching labels |
|
||||
| `--namespace-selector='key=value'` <br /> <br />`--namespace-selector='key1=value1,key2=value2'` <br /> <br />`--namespace-selector='key in (value1,value2)'`| Watch only namespaces with matching labels. See [LIST and WATCH filtering](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#list-and-watch-filtering) for more details on label selectors |
|
||||
| `--namespaces-to-ignore=ns1,ns2` | Skip specific namespaces from being watched |
|
||||
|
||||
#### 4. 📝 Annotation Key Overrides
|
||||
@@ -320,6 +356,15 @@ These flags allow you to redefine annotation keys used in your workloads or reso
|
||||
| `--search-match-annotation` | Overrides `reloader.stakater.com/match` |
|
||||
| `--secret-annotation` | Overrides `secret.reloader.stakater.com/reload` |
|
||||
| `--configmap-annotation` | Overrides `configmap.reloader.stakater.com/reload` |
|
||||
| `--pause-deployment-annotation` | Overrides `deployment.reloader.stakater.com/pause-period` |
|
||||
| `--pause-deployment-time-annotation` | Overrides `deployment.reloader.stakater.com/paused-at` |
|
||||
|
||||
### 5. 🕷️ Debugging
|
||||
|
||||
| Flag | Description |
|
||||
|--- |-------------|
|
||||
| `--enable-pprof` | Enables `pprof` for profiling |
|
||||
| `--pprof-addr` | Address to start `pprof` server on. Default is `:6060` |
|
||||
|
||||
## Compatibility
|
||||
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
apiVersion: v1
|
||||
name: reloader
|
||||
description: Reloader chart that runs on kubernetes
|
||||
version: 2.1.0
|
||||
appVersion: v1.4.0
|
||||
version: 2.2.1
|
||||
appVersion: v1.4.6
|
||||
keywords:
|
||||
- Reloader
|
||||
- kubernetes
|
||||
|
||||
@@ -52,11 +52,13 @@ helm uninstall {{RELEASE_NAME}} -n {{NAMESPACE}}
|
||||
| `reloader.syncAfterRestart` | Enable sync after Reloader restarts for **Add** events, works only when reloadOnCreate is `true`. Valid value are either `true` or `false` | boolean | `false` |
|
||||
| `reloader.reloadStrategy` | Strategy to trigger resource restart, set to either `default`, `env-vars` or `annotations` | enumeration | `default` |
|
||||
| `reloader.ignoreNamespaces` | List of comma separated namespaces to ignore, if multiple are provided, they are combined with the AND operator | string | `""` |
|
||||
| `reloader.namespaceSelector` | List of comma separated namespaces to select, if multiple are provided, they are combined with the AND operator | string | `""` |
|
||||
| `reloader.namespaceSelector` | List of comma separated k8s label selectors for namespaces selection. The parameter only used when `reloader.watchGlobally` is `true`. See [LIST and WATCH filtering](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#list-and-watch-filtering) for more details on label-selector | string | `""` |
|
||||
| `reloader.resourceLabelSelector` | List of comma separated label selectors, if multiple are provided they are combined with the AND operator | string | `""` |
|
||||
| `reloader.logFormat` | Set type of log format. Value could be either `json` or `""` | string | `""` |
|
||||
| `reloader.watchGlobally` | Allow Reloader to watch in all namespaces (`true`) or just in a single namespace (`false`) | boolean | `true` |
|
||||
| `reloader.enableHA` | Enable leadership election allowing you to run multiple replicas | boolean | `false` |
|
||||
| `reloader.enablePProf` | Enables pprof for profiling | boolean | `false` |
|
||||
| `reloader.pprofAddr` | Address to start pprof server on | string | `:6060` |
|
||||
| `reloader.readOnlyRootFileSystem` | Enforce readOnlyRootFilesystem | boolean | `false` |
|
||||
| `reloader.legacy.rbac` | | boolean | `false` |
|
||||
| `reloader.matchLabels` | Pod labels to match | map | `{}` |
|
||||
@@ -82,7 +84,10 @@ helm uninstall {{RELEASE_NAME}} -n {{NAMESPACE}}
|
||||
| `reloader.deployment.resources` | Set container requests and limits (e.g. CPU or memory) | map | `{}` |
|
||||
| `reloader.deployment.pod.annotations` | Set annotations for pod | map | `{}` |
|
||||
| `reloader.deployment.priorityClassName` | Set priority class for pod in cluster | string | `""` |
|
||||
| `reloader.deployment.volumeMounts` | Mount volume | array | `[]` |
|
||||
| `reloader.deployment.volumes` | Add volume to a pod | array | `[]` |
|
||||
|
||||
| `reloader.deployment.dnsConfig` | dns configuration for pods | map | `{}` |
|
||||
### Other Reloader Parameters
|
||||
|
||||
| Parameter | Description | Type | Default |
|
||||
@@ -125,25 +130,25 @@ helm uninstall {{RELEASE_NAME}} -n {{NAMESPACE}}
|
||||
|
||||
#### 🔄 `reloadOnCreate` Behavior
|
||||
**When true:**
|
||||
✅ New ConfigMaps/Secrets trigger rolling updates
|
||||
✅ New deployments referencing existing resources reload
|
||||
✅ In HA mode, new leader reloads all tracked workloads
|
||||
✅ New ConfigMaps/Secrets trigger rolling updates
|
||||
✅ New deployments referencing existing resources reload
|
||||
✅ In HA mode, new leader reloads all tracked workloads
|
||||
|
||||
**When false:**
|
||||
❌ Updates during leader downtime are missed
|
||||
⏳ Potential 15s delay window (default `LeaseDuration`)
|
||||
❌ Updates during leader downtime are missed
|
||||
⏳ Potential 15s delay window (default `LeaseDuration`)
|
||||
|
||||
#### 🗑️ `reloadOnDelete` Behavior
|
||||
**When true:**
|
||||
✅ Deleted resources trigger rolling updates of referencing workloads
|
||||
✅ Deleted resources trigger rolling updates of referencing workloads
|
||||
|
||||
**When false:**
|
||||
❌ Deletions have no effect on referencing pods
|
||||
❌ Deletions have no effect on referencing pods
|
||||
|
||||
#### Default Settings
|
||||
⚠️ All flags default to `false` (must be enabled explicitly):
|
||||
- `reloadOnCreate`
|
||||
- `reloadOnDelete`
|
||||
- `reloadOnDelete`
|
||||
- `syncAfterRestart`
|
||||
|
||||
### Deprecation Notice
|
||||
|
||||
@@ -27,14 +27,20 @@ Create chart name and version as used by the chart label.
|
||||
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
|
||||
{{- end }}
|
||||
|
||||
{{- define "reloader-match-labels.chart" -}}
|
||||
app: {{ template "reloader-fullname" . }}
|
||||
release: {{ .Release.Name | quote }}
|
||||
{{- end -}}
|
||||
|
||||
{{- define "reloader-labels.chart" -}}
|
||||
app.kubernetes.io/name: {{ include "reloader-name" . }}
|
||||
helm.sh/chart: {{ include "reloader-chart" . }}
|
||||
{{- if .Chart.AppVersion }}
|
||||
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
|
||||
{{- end}}
|
||||
{{ include "reloader-match-labels.chart" . }}
|
||||
app.kubernetes.io/name: {{ template "reloader-name" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name | quote }}
|
||||
helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}"
|
||||
chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}"
|
||||
heritage: {{ .Release.Service | quote }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service | quote }}
|
||||
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
@@ -47,10 +53,10 @@ podAntiAffinity:
|
||||
podAffinityTerm:
|
||||
labelSelector:
|
||||
matchExpressions:
|
||||
- key: app
|
||||
- key: app.kubernetes.io/instance
|
||||
operator: In
|
||||
values:
|
||||
- {{ template "reloader-fullname" . }}
|
||||
- {{ .Release.Name | quote }}
|
||||
topologyKey: "kubernetes.io/hostname"
|
||||
{{- end -}}
|
||||
|
||||
@@ -72,3 +78,12 @@ Create the annotations to support helm3
|
||||
meta.helm.sh/release-namespace: {{ .Release.Namespace | quote }}
|
||||
meta.helm.sh/release-name: {{ .Release.Name | quote }}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Create the namespace selector if it does not watch globally
|
||||
*/}}
|
||||
{{- define "reloader-namespaceSelector" -}}
|
||||
{{- if and .Values.reloader.watchGlobally .Values.reloader.namespaceSelector -}}
|
||||
{{ .Values.reloader.namespaceSelector }}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
||||
@@ -11,10 +11,10 @@ metadata:
|
||||
labels:
|
||||
{{ include "reloader-labels.chart" . | indent 4 }}
|
||||
{{- if .Values.reloader.rbac.labels }}
|
||||
{{ toYaml .Values.reloader.rbac.labels | indent 4 }}
|
||||
{{ tpl (toYaml .Values.reloader.rbac.labels) . | indent 4 }}
|
||||
{{- end }}
|
||||
{{- if .Values.reloader.matchLabels }}
|
||||
{{ toYaml .Values.reloader.matchLabels | indent 4 }}
|
||||
{{ tpl (toYaml .Values.reloader.matchLabels) . | indent 4 }}
|
||||
{{- end }}
|
||||
name: {{ template "reloader-fullname" . }}-role
|
||||
rules:
|
||||
@@ -31,7 +31,7 @@ rules:
|
||||
- list
|
||||
- get
|
||||
- watch
|
||||
{{- if .Values.reloader.namespaceSelector }}
|
||||
{{- if (include "reloader-namespaceSelector" .) }}
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
|
||||
@@ -11,10 +11,10 @@ metadata:
|
||||
labels:
|
||||
{{ include "reloader-labels.chart" . | indent 4 }}
|
||||
{{- if .Values.reloader.rbac.labels }}
|
||||
{{ toYaml .Values.reloader.rbac.labels | indent 4 }}
|
||||
{{ tpl (toYaml .Values.reloader.rbac.labels) . | indent 4 }}
|
||||
{{- end }}
|
||||
{{- if .Values.reloader.matchLabels }}
|
||||
{{ toYaml .Values.reloader.matchLabels | indent 4 }}
|
||||
{{ tpl (toYaml .Values.reloader.matchLabels) . | indent 4 }}
|
||||
{{- end }}
|
||||
name: {{ template "reloader-fullname" . }}-role-binding
|
||||
roleRef:
|
||||
|
||||
@@ -4,15 +4,15 @@ metadata:
|
||||
annotations:
|
||||
{{ include "reloader-helm3.annotations" . | indent 4 }}
|
||||
{{- if .Values.reloader.deployment.annotations }}
|
||||
{{ toYaml .Values.reloader.deployment.annotations | indent 4 }}
|
||||
{{ tpl (toYaml .Values.reloader.deployment.annotations) . | indent 4 }}
|
||||
{{- end }}
|
||||
labels:
|
||||
{{ include "reloader-labels.chart" . | indent 4 }}
|
||||
{{- if .Values.reloader.deployment.labels }}
|
||||
{{ toYaml .Values.reloader.deployment.labels | indent 4 }}
|
||||
{{ tpl (toYaml .Values.reloader.deployment.labels) . | indent 4 }}
|
||||
{{- end }}
|
||||
{{- if .Values.reloader.matchLabels }}
|
||||
{{ toYaml .Values.reloader.matchLabels | indent 4 }}
|
||||
{{ tpl (toYaml .Values.reloader.matchLabels) . | indent 4 }}
|
||||
{{- end }}
|
||||
name: {{ template "reloader-fullname" . }}
|
||||
namespace: {{ .Values.namespace | default .Release.Namespace }}
|
||||
@@ -25,24 +25,23 @@ spec:
|
||||
revisionHistoryLimit: {{ .Values.reloader.deployment.revisionHistoryLimit }}
|
||||
selector:
|
||||
matchLabels:
|
||||
app.kubernetes.io/name: {{ template "reloader-fullname" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name | quote }}
|
||||
{{ include "reloader-match-labels.chart" . | indent 6 }}
|
||||
{{- if .Values.reloader.matchLabels }}
|
||||
{{ toYaml .Values.reloader.matchLabels | indent 6 }}
|
||||
{{ tpl (toYaml .Values.reloader.matchLabels) . | indent 6 }}
|
||||
{{- end }}
|
||||
template:
|
||||
metadata:
|
||||
{{- if .Values.reloader.deployment.pod.annotations }}
|
||||
annotations:
|
||||
{{ toYaml .Values.reloader.deployment.pod.annotations | indent 8 }}
|
||||
{{ tpl (toYaml .Values.reloader.deployment.pod.annotations) . | indent 8 }}
|
||||
{{- end }}
|
||||
labels:
|
||||
{{ include "reloader-labels.chart" . | indent 8 }}
|
||||
{{- if .Values.reloader.deployment.labels }}
|
||||
{{ toYaml .Values.reloader.deployment.labels | indent 8 }}
|
||||
{{ tpl (toYaml .Values.reloader.deployment.labels) . | indent 8 }}
|
||||
{{- end }}
|
||||
{{- if .Values.reloader.matchLabels }}
|
||||
{{ toYaml .Values.reloader.matchLabels | indent 8 }}
|
||||
{{ tpl (toYaml .Values.reloader.matchLabels) . | indent 8 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
{{- with .Values.global.imagePullSecrets }}
|
||||
@@ -72,6 +71,10 @@ spec:
|
||||
{{- if .Values.reloader.deployment.priorityClassName }}
|
||||
priorityClassName: {{ .Values.reloader.deployment.priorityClassName }}
|
||||
{{- end }}
|
||||
{{- with .Values.reloader.deployment.dnsConfig }}
|
||||
dnsConfig:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
containers:
|
||||
{{- if .Values.global.imageRegistry }}
|
||||
- image: "{{ .Values.global.imageRegistry }}/{{ .Values.image.name }}:{{ .Values.image.tag }}"
|
||||
@@ -144,6 +147,15 @@ spec:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
{{- end }}
|
||||
|
||||
- name: RELOADER_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
|
||||
- name: RELOADER_DEPLOYMENT_NAME
|
||||
value: {{ template "reloader-fullname" . }}
|
||||
|
||||
{{- if .Values.reloader.enableHA }}
|
||||
- name: POD_NAME
|
||||
valueFrom:
|
||||
@@ -198,7 +210,7 @@ spec:
|
||||
{{- . | toYaml | nindent 10 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- if or (.Values.reloader.logFormat) (.Values.reloader.logLevel) (.Values.reloader.ignoreSecrets) (.Values.reloader.ignoreNamespaces) (.Values.reloader.namespaceSelector) (.Values.reloader.resourceLabelSelector) (.Values.reloader.ignoreConfigMaps) (.Values.reloader.custom_annotations) (eq .Values.reloader.isArgoRollouts true) (eq .Values.reloader.reloadOnCreate true) (eq .Values.reloader.reloadOnDelete true) (ne .Values.reloader.reloadStrategy "default") (.Values.reloader.enableHA) (.Values.reloader.autoReloadAll)}}
|
||||
{{- if or (.Values.reloader.logFormat) (.Values.reloader.logLevel) (.Values.reloader.ignoreSecrets) (.Values.reloader.ignoreNamespaces) (include "reloader-namespaceSelector" .) (.Values.reloader.resourceLabelSelector) (.Values.reloader.ignoreConfigMaps) (.Values.reloader.custom_annotations) (eq .Values.reloader.isArgoRollouts true) (eq .Values.reloader.reloadOnCreate true) (eq .Values.reloader.reloadOnDelete true) (ne .Values.reloader.reloadStrategy "default") (.Values.reloader.enableHA) (.Values.reloader.autoReloadAll)}}
|
||||
args:
|
||||
{{- if .Values.reloader.logFormat }}
|
||||
- "--log-format={{ .Values.reloader.logFormat }}"
|
||||
@@ -215,12 +227,18 @@ spec:
|
||||
{{- if .Values.reloader.ignoreNamespaces }}
|
||||
- "--namespaces-to-ignore={{ .Values.reloader.ignoreNamespaces }}"
|
||||
{{- end }}
|
||||
{{- if .Values.reloader.namespaceSelector }}
|
||||
- "--namespace-selector={{ .Values.reloader.namespaceSelector }}"
|
||||
{{- if (include "reloader-namespaceSelector" .) }}
|
||||
- "--namespace-selector=\"{{ include "reloader-namespaceSelector" . }}\""
|
||||
{{- end }}
|
||||
{{- if .Values.reloader.resourceLabelSelector }}
|
||||
- "--resource-label-selector={{ .Values.reloader.resourceLabelSelector }}"
|
||||
{{- end }}
|
||||
{{- if .Values.reloader.enablePProf }}
|
||||
- "--enable-pprof"
|
||||
{{- if and .Values.reloader.pprofAddr }}
|
||||
- "--pprof-addr={{ .Values.reloader.pprofAddr }}"
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- if .Values.reloader.custom_annotations }}
|
||||
{{- if .Values.reloader.custom_annotations.configmap }}
|
||||
- "--configmap-annotation"
|
||||
@@ -249,6 +267,14 @@ spec:
|
||||
{{- if .Values.reloader.custom_annotations.match }}
|
||||
- "--search-match-annotation"
|
||||
- "{{ .Values.reloader.custom_annotations.match }}"
|
||||
{{- end }}
|
||||
{{- if .Values.reloader.custom_annotations.pausePeriod }}
|
||||
- "--pause-deployment-annotation"
|
||||
- "{{ .Values.reloader.custom_annotations.pausePeriod }}"
|
||||
{{- end }}
|
||||
{{- if .Values.reloader.custom_annotations.pauseTime }}
|
||||
- "--pause-deployment-annotation"
|
||||
- "{{ .Values.reloader.custom_annotations.pauseTime }}"
|
||||
{{- end }}
|
||||
{{- if .Values.reloader.webhookUrl }}
|
||||
- "--webhook-url"
|
||||
|
||||
@@ -7,17 +7,16 @@ metadata:
|
||||
labels:
|
||||
{{ include "reloader-labels.chart" . | indent 4 }}
|
||||
{{- if .Values.reloader.matchLabels }}
|
||||
{{ toYaml .Values.reloader.matchLabels | indent 4 }}
|
||||
{{ tpl (toYaml .Values.reloader.matchLabels) . | indent 4 }}
|
||||
{{- end }}
|
||||
name: {{ template "reloader-fullname" . }}
|
||||
namespace: {{ .Values.namespace | default .Release.Namespace }}
|
||||
spec:
|
||||
podSelector:
|
||||
matchLabels:
|
||||
app.kubernetes.io/name: {{ template "reloader-fullname" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name | quote }}
|
||||
{{ include "reloader-match-labels.chart" . | indent 6 }}
|
||||
{{- if .Values.reloader.matchLabels }}
|
||||
{{ toYaml .Values.reloader.matchLabels | indent 6 }}
|
||||
{{ tpl (toYaml .Values.reloader.matchLabels) . | indent 6 }}
|
||||
{{- end }}
|
||||
policyTypes:
|
||||
- Ingress
|
||||
|
||||
@@ -13,5 +13,5 @@ spec:
|
||||
{{- end }}
|
||||
selector:
|
||||
matchLabels:
|
||||
app.kubernetes.io/name: {{ template "reloader-fullname" . }}
|
||||
{{ include "reloader-match-labels.chart" . | nindent 6 }}
|
||||
{{- end }}
|
||||
|
||||
@@ -56,5 +56,5 @@ spec:
|
||||
- {{ .Release.Namespace }}
|
||||
selector:
|
||||
matchLabels:
|
||||
{{ include "reloader-labels.chart" . | nindent 6 }}
|
||||
{{ include "reloader-match-labels.chart" . | nindent 6 }}
|
||||
{{- end }}
|
||||
|
||||
@@ -11,10 +11,10 @@ metadata:
|
||||
labels:
|
||||
{{ include "reloader-labels.chart" . | indent 4 }}
|
||||
{{- if .Values.reloader.rbac.labels }}
|
||||
{{ toYaml .Values.reloader.rbac.labels | indent 4 }}
|
||||
{{ tpl (toYaml .Values.reloader.rbac.labels) . | indent 4 }}
|
||||
{{- end }}
|
||||
{{- if .Values.reloader.matchLabels }}
|
||||
{{ toYaml .Values.reloader.matchLabels | indent 4 }}
|
||||
{{ tpl (toYaml .Values.reloader.matchLabels) . | indent 4 }}
|
||||
{{- end }}
|
||||
name: {{ template "reloader-fullname" . }}-role
|
||||
namespace: {{ .Values.namespace | default .Release.Namespace }}
|
||||
@@ -101,3 +101,34 @@ rules:
|
||||
- create
|
||||
- patch
|
||||
{{- end }}
|
||||
|
||||
---
|
||||
|
||||
{{- if .Values.reloader.rbac.enabled }}
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
metadata:
|
||||
annotations:
|
||||
{{ include "reloader-helm3.annotations" . | indent 4 }}
|
||||
labels:
|
||||
{{ include "reloader-labels.chart" . | indent 4 }}
|
||||
{{- if .Values.reloader.rbac.labels }}
|
||||
{{ tpl (toYaml .Values.reloader.rbac.labels) . | indent 4 }}
|
||||
{{- end }}
|
||||
{{- if .Values.reloader.matchLabels }}
|
||||
{{ tpl (toYaml .Values.reloader.matchLabels) . | indent 4 }}
|
||||
{{- end }}
|
||||
name: {{ template "reloader-fullname" . }}-metadata-role
|
||||
namespace: {{ .Values.namespace | default .Release.Namespace }}
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- configmaps
|
||||
verbs:
|
||||
- list
|
||||
- get
|
||||
- watch
|
||||
- create
|
||||
- update
|
||||
{{- end }}
|
||||
@@ -11,10 +11,10 @@ metadata:
|
||||
labels:
|
||||
{{ include "reloader-labels.chart" . | indent 4 }}
|
||||
{{- if .Values.reloader.rbac.labels }}
|
||||
{{ toYaml .Values.reloader.rbac.labels | indent 4 }}
|
||||
{{ tpl (toYaml .Values.reloader.rbac.labels) . | indent 4 }}
|
||||
{{- end }}
|
||||
{{- if .Values.reloader.matchLabels }}
|
||||
{{ toYaml .Values.reloader.matchLabels | indent 4 }}
|
||||
{{ tpl (toYaml .Values.reloader.matchLabels) . | indent 4 }}
|
||||
{{- end }}
|
||||
name: {{ template "reloader-fullname" . }}-role-binding
|
||||
namespace: {{ .Values.namespace | default .Release.Namespace }}
|
||||
@@ -27,3 +27,30 @@ subjects:
|
||||
name: {{ template "reloader-serviceAccountName" . }}
|
||||
namespace: {{ .Values.namespace | default .Release.Namespace }}
|
||||
{{- end }}
|
||||
|
||||
---
|
||||
{{- if .Values.reloader.rbac.enabled }}
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
annotations:
|
||||
{{ include "reloader-helm3.annotations" . | indent 4 }}
|
||||
labels:
|
||||
{{ include "reloader-labels.chart" . | indent 4 }}
|
||||
{{- if .Values.reloader.rbac.labels }}
|
||||
{{ tpl (toYaml .Values.reloader.rbac.labels) . | indent 4 }}
|
||||
{{- end }}
|
||||
{{- if .Values.reloader.matchLabels }}
|
||||
{{ tpl (toYaml .Values.reloader.matchLabels) . | indent 4 }}
|
||||
{{- end }}
|
||||
name: {{ template "reloader-fullname" . }}-metadata-role-binding
|
||||
namespace: {{ .Values.namespace | default .Release.Namespace }}
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: {{ template "reloader-fullname" . }}-metadata-role
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: {{ template "reloader-serviceAccountName" . }}
|
||||
namespace: {{ .Values.namespace | default .Release.Namespace }}
|
||||
{{- end }}
|
||||
@@ -5,22 +5,22 @@ metadata:
|
||||
annotations:
|
||||
{{ include "reloader-helm3.annotations" . | indent 4 }}
|
||||
{{- if .Values.reloader.service.annotations }}
|
||||
{{ toYaml .Values.reloader.service.annotations | indent 4 }}
|
||||
{{ tpl (toYaml .Values.reloader.service.annotations) . | indent 4 }}
|
||||
{{- end }}
|
||||
labels:
|
||||
{{ include "reloader-labels.chart" . | indent 4 }}
|
||||
{{- if .Values.reloader.service.labels }}
|
||||
{{ toYaml .Values.reloader.service.labels | indent 4 }}
|
||||
{{ tpl (toYaml .Values.reloader.service.labels) . | indent 4 }}
|
||||
{{- end }}
|
||||
name: {{ template "reloader-fullname" . }}
|
||||
namespace: {{ .Values.namespace | default .Release.Namespace }}
|
||||
spec:
|
||||
selector:
|
||||
{{- if .Values.reloader.deployment.labels }}
|
||||
{{ toYaml .Values.reloader.deployment.labels | indent 4 }}
|
||||
{{ tpl (toYaml .Values.reloader.deployment.labels) . | indent 4 }}
|
||||
{{- end }}
|
||||
{{- if .Values.reloader.matchLabels }}
|
||||
{{ toYaml .Values.reloader.matchLabels | indent 4 }}
|
||||
{{ tpl (toYaml .Values.reloader.matchLabels) . | indent 4 }}
|
||||
{{- end }}
|
||||
ports:
|
||||
- port: {{ .Values.reloader.service.port }}
|
||||
|
||||
@@ -11,15 +11,15 @@ metadata:
|
||||
annotations:
|
||||
{{ include "reloader-helm3.annotations" . | indent 4 }}
|
||||
{{- if .Values.reloader.serviceAccount.annotations }}
|
||||
{{ toYaml .Values.reloader.serviceAccount.annotations | indent 4 }}
|
||||
{{ tpl (toYaml .Values.reloader.serviceAccount.annotations) . | indent 4 }}
|
||||
{{- end }}
|
||||
labels:
|
||||
{{ include "reloader-labels.chart" . | indent 4 }}
|
||||
{{- if .Values.reloader.serviceAccount.labels }}
|
||||
{{ toYaml .Values.reloader.serviceAccount.labels | indent 4 }}
|
||||
{{ tpl (toYaml .Values.reloader.serviceAccount.labels) . | indent 4 }}
|
||||
{{- end }}
|
||||
{{- if .Values.reloader.matchLabels }}
|
||||
{{ toYaml .Values.reloader.matchLabels | indent 4 }}
|
||||
{{ tpl (toYaml .Values.reloader.matchLabels) . | indent 4 }}
|
||||
{{- end }}
|
||||
name: {{ template "reloader-serviceAccountName" . }}
|
||||
namespace: {{ .Values.namespace | default .Release.Namespace }}
|
||||
|
||||
@@ -56,5 +56,5 @@ spec:
|
||||
- {{ .Release.Namespace }}
|
||||
selector:
|
||||
matchLabels:
|
||||
{{ include "reloader-labels.chart" . | nindent 6 }}
|
||||
{{ include "reloader-match-labels.chart" . | nindent 6 }}
|
||||
{{- end }}
|
||||
|
||||
@@ -17,7 +17,7 @@ fullnameOverride: ""
|
||||
image:
|
||||
name: stakater/reloader
|
||||
repository: ghcr.io/stakater/reloader
|
||||
tag: v1.4.0
|
||||
tag: v1.4.6
|
||||
# digest: sha256:1234567
|
||||
pullPolicy: IfNotPresent
|
||||
|
||||
@@ -41,6 +41,10 @@ reloader:
|
||||
watchGlobally: true
|
||||
# Set to true to enable leadership election allowing you to run multiple replicas
|
||||
enableHA: false
|
||||
# Set to true to enable pprof for profiling
|
||||
enablePProf: false
|
||||
# Address to start pprof server on. Default is ":6060"
|
||||
pprofAddr: ":6060"
|
||||
# Set to true if you have a pod security policy that enforces readOnlyRootFilesystem
|
||||
readOnlyRootFileSystem: false
|
||||
legacy:
|
||||
@@ -49,6 +53,19 @@ reloader:
|
||||
# Set to true to expose a prometheus counter of reloads by namespace (this metric may have high cardinality in clusters with many namespaces)
|
||||
enableMetricsByNamespace: false
|
||||
deployment:
|
||||
# Specifies the deployment DNS configuration.
|
||||
dnsConfig: {}
|
||||
# nameservers:
|
||||
# - 1.2.3.4
|
||||
# searches:
|
||||
# - ns1.svc.cluster-domain.example
|
||||
# - my.dns.search.suffix
|
||||
# options:
|
||||
# - name: ndots
|
||||
# value: "1"
|
||||
# - name: attempts
|
||||
# value: "3"
|
||||
|
||||
# If you wish to run multiple replicas set reloader.enableHA = true
|
||||
replicas: 1
|
||||
|
||||
@@ -67,6 +84,9 @@ reloader:
|
||||
# - key: "node-role.kubernetes.io/infra-worker"
|
||||
# operator: "Exists"
|
||||
affinity: {}
|
||||
|
||||
volumeMounts: []
|
||||
volumes: []
|
||||
|
||||
securityContext:
|
||||
runAsNonRoot: true
|
||||
@@ -99,14 +119,14 @@ reloader:
|
||||
# whenUnsatisfiable: DoNotSchedule
|
||||
# labelSelector:
|
||||
# matchLabels:
|
||||
# app: my-app
|
||||
# app.kubernetes.io/instance: my-app
|
||||
topologySpreadConstraints: []
|
||||
|
||||
annotations: {}
|
||||
labels:
|
||||
provider: stakater
|
||||
group: com.stakater.platform
|
||||
version: v1.4.0
|
||||
version: v1.4.6
|
||||
# Support for extra environment variables.
|
||||
env:
|
||||
# Open supports Key value pair as environment variables.
|
||||
@@ -337,8 +357,4 @@ reloader:
|
||||
# are applied during the life of a Pod. Possible values are "Off", "Initial", "Recreate", and "Auto".
|
||||
updateMode: Auto
|
||||
|
||||
volumeMounts: []
|
||||
|
||||
volumes: []
|
||||
|
||||
webhookUrl: ""
|
||||
|
||||
@@ -6,3 +6,4 @@ resources:
|
||||
- manifests/clusterrolebinding.yaml
|
||||
- manifests/serviceaccount.yaml
|
||||
- manifests/deployment.yaml
|
||||
- manifests/role.yaml
|
||||
|
||||
@@ -31,6 +31,13 @@ spec:
|
||||
resourceFieldRef:
|
||||
resource: limits.memory
|
||||
divisor: '1'
|
||||
- name: RELOADER_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
|
||||
- name: RELOADER_DEPLOYMENT_NAME
|
||||
value: reloader-reloader
|
||||
ports:
|
||||
- name: http
|
||||
containerPort: 9090
|
||||
|
||||
32
deployments/kubernetes/manifests/role.yaml
Normal file
32
deployments/kubernetes/manifests/role.yaml
Normal file
@@ -0,0 +1,32 @@
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
metadata:
|
||||
name: reloader-reloader-metadata-role
|
||||
namespace: default
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- configmaps
|
||||
verbs:
|
||||
- list
|
||||
- get
|
||||
- watch
|
||||
- create
|
||||
- update
|
||||
|
||||
---
|
||||
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
name: reloader-reloader-metadata-rolebinding
|
||||
namespace: default
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: reloader-reloader
|
||||
namespace: default
|
||||
roleRef:
|
||||
kind: Role
|
||||
name: reloader-reloader-metadata-role
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
@@ -5,6 +5,23 @@ metadata:
|
||||
namespace: default
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
metadata:
|
||||
name: reloader-reloader-metadata-role
|
||||
namespace: default
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- configmaps
|
||||
verbs:
|
||||
- list
|
||||
- get
|
||||
- watch
|
||||
- create
|
||||
- update
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: reloader-reloader-role
|
||||
@@ -64,6 +81,20 @@ rules:
|
||||
- patch
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
name: reloader-reloader-metadata-rolebinding
|
||||
namespace: default
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: reloader-reloader-metadata-role
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: reloader-reloader
|
||||
namespace: default
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: reloader-reloader-role-binding
|
||||
@@ -104,7 +135,13 @@ spec:
|
||||
resourceFieldRef:
|
||||
divisor: "1"
|
||||
resource: limits.memory
|
||||
image: "ghcr.io/stakater/reloader:latest"
|
||||
- name: RELOADER_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
- name: RELOADER_DEPLOYMENT_NAME
|
||||
value: reloader-reloader
|
||||
image: ghcr.io/stakater/reloader:latest
|
||||
imagePullPolicy: IfNotPresent
|
||||
livenessProbe:
|
||||
failureThreshold: 5
|
||||
|
||||
@@ -8,7 +8,7 @@ In-order to enable this feature, you need to update the `reloader.env.secret` se
|
||||
|
||||
```yaml
|
||||
ALERT_ON_RELOAD: [ true/false ] Default: false
|
||||
ALERT_SINK: [ slack/teams/webhook ] Default: webhook
|
||||
ALERT_SINK: [ slack/teams/gchat/webhook ] Default: webhook
|
||||
ALERT_WEBHOOK_URL: Required if ALERT_ON_RELOAD is true
|
||||
ALERT_ADDITIONAL_INFO: Any additional information to be added to alert
|
||||
```
|
||||
|
||||
@@ -10,3 +10,17 @@ These are the key features of Reloader:
|
||||
1. Restart pod in a `rollout` on change in linked/related `ConfigMaps` or `Secrets`
|
||||
|
||||
This site contains more details on how Reloader works. For an overview, please see the repository's [README file](https://github.com/stakater/Reloader/blob/master/README.md).
|
||||
|
||||
---
|
||||
|
||||
<div align="center">
|
||||
|
||||
[](https://github.com/sponsors/stakater?utm_source=docs&utm_medium=footer&utm_campaign=reloader)
|
||||
|
||||
<p>
|
||||
Your support funds maintenance, security updates, and new features for Reloader, plus continued investment in other open source tools.
|
||||
</p>
|
||||
|
||||
</div>
|
||||
|
||||
---
|
||||
|
||||
3
go.mod
3
go.mod
@@ -1,6 +1,6 @@
|
||||
module github.com/stakater/Reloader
|
||||
|
||||
go 1.24.2
|
||||
go 1.24.4
|
||||
|
||||
require (
|
||||
github.com/argoproj/argo-rollouts v1.8.2
|
||||
@@ -38,7 +38,6 @@ require (
|
||||
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
||||
github.com/josharian/intern v1.0.0 // indirect
|
||||
github.com/json-iterator/go v1.1.12 // indirect
|
||||
github.com/klauspost/compress v1.18.0 // indirect
|
||||
github.com/kylelemons/godebug v1.1.0 // indirect
|
||||
github.com/mailru/easyjson v0.9.0 // indirect
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||
|
||||
18
go.sum
18
go.sum
@@ -76,12 +76,8 @@ github.com/onsi/ginkgo/v2 v2.21.0 h1:7rg/4f3rB88pb5obDgNZrNHrQ4e6WpjonchcpuBRnZM
|
||||
github.com/onsi/ginkgo/v2 v2.21.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo=
|
||||
github.com/onsi/gomega v1.35.1 h1:Cwbd75ZBPxFSuZ6T+rN/WCb/gOc6YgFBXLlZLhC7Ds4=
|
||||
github.com/onsi/gomega v1.35.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog=
|
||||
github.com/openshift/api v0.0.0-20250331192611-6179881b782d h1:Vadr+xFmNi6RzWRTJtqMQJv1hiUe7as1rV2svKQffoY=
|
||||
github.com/openshift/api v0.0.0-20250331192611-6179881b782d/go.mod h1:yk60tHAmHhtVpJQo3TwVYq2zpuP70iJIFDCmeKMIzPw=
|
||||
github.com/openshift/api v0.0.0-20250411135543-10a8fa583797 h1:8x3G8QOZqo2bRAL8JFlPz/odqQECI/XmlZeRwnFxJ8I=
|
||||
github.com/openshift/api v0.0.0-20250411135543-10a8fa583797/go.mod h1:yk60tHAmHhtVpJQo3TwVYq2zpuP70iJIFDCmeKMIzPw=
|
||||
github.com/openshift/client-go v0.0.0-20250330132942-bc2e3c2af6e1 h1:9SaT0p5FsRDvz4STV1VnxMyfXXzAXv1PubZ0nczzDYk=
|
||||
github.com/openshift/client-go v0.0.0-20250330132942-bc2e3c2af6e1/go.mod h1:6a0Hj32FrkokKMeTck1uStmNV0wHYv46dHWAWER5iis=
|
||||
github.com/openshift/client-go v0.0.0-20250402181141-b3bad3b645f2 h1:bPXR0R8zp1o12nSUphN26hSM+OKYq5pMorbDCpApzDQ=
|
||||
github.com/openshift/client-go v0.0.0-20250402181141-b3bad3b645f2/go.mod h1:dT1cJyVTperQ53GvVRa+GZ27r02fDZy2k5j+9QoQsCo=
|
||||
github.com/parnurzeal/gorequest v0.3.0 h1:SoFyqCDC9COr1xuS6VA8fC8RU7XyrJZN2ona1kEX7FI=
|
||||
@@ -91,12 +87,8 @@ github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/prometheus/client_golang v1.21.1 h1:DOvXXTqVzvkIewV/CDPFdejpMCGeMcbGCQ8YOmu+Ibk=
|
||||
github.com/prometheus/client_golang v1.21.1/go.mod h1:U9NM32ykUErtVBxdvD3zfi+EuFkkaBvMb09mIfe0Zgg=
|
||||
github.com/prometheus/client_golang v1.22.0 h1:rb93p9lokFEsctTys46VnV1kLCDpVZ0a/Y92Vm0Zc6Q=
|
||||
github.com/prometheus/client_golang v1.22.0/go.mod h1:R7ljNsLXhuQXYZYtw6GAE9AZg8Y7vEW5scdCXrWRXC0=
|
||||
github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E=
|
||||
github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY=
|
||||
github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk=
|
||||
github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE=
|
||||
github.com/prometheus/common v0.63.0 h1:YR/EIY1o3mEFP/kZCD7iDMnLPlGyuU2Gb3HIcXnA98k=
|
||||
@@ -137,12 +129,8 @@ golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/net v0.38.0 h1:vRMAPTMaeGqVhG5QyLJHqNDwecKTomGeqbnfZyKlBI8=
|
||||
golang.org/x/net v0.38.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8=
|
||||
golang.org/x/net v0.39.0 h1:ZCu7HMWDxpXpaiKdhzIfaltL9Lp31x/3fCP11bc6/fY=
|
||||
golang.org/x/net v0.39.0/go.mod h1:X7NRbYVEA+ewNkCNyJ513WmMdQ3BineSwVtN2zD/d+E=
|
||||
golang.org/x/oauth2 v0.28.0 h1:CrgCKl8PPAVtLnU3c+EDw6x11699EWlsDeWNWKdIOkc=
|
||||
golang.org/x/oauth2 v0.28.0/go.mod h1:onh5ek6nERTohokkhCD/y2cV4Do3fxFHFuAejCkRWT8=
|
||||
golang.org/x/oauth2 v0.29.0 h1:WdYw2tdTK1S8olAzWHdgeqfy+Mtm9XNhv/xJsY65d98=
|
||||
golang.org/x/oauth2 v0.29.0/go.mod h1:onh5ek6nERTohokkhCD/y2cV4Do3fxFHFuAejCkRWT8=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
@@ -152,18 +140,12 @@ golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5h
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.31.0 h1:ioabZlmFYtWhL+TRYpcnNlLwhyxaM9kWTDEmfnprqik=
|
||||
golang.org/x/sys v0.31.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
|
||||
golang.org/x/sys v0.32.0 h1:s77OFDvIQeibCmezSnk/q6iAfkdiQaJi4VzroCFrN20=
|
||||
golang.org/x/sys v0.32.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
|
||||
golang.org/x/term v0.30.0 h1:PQ39fJZ+mfadBm0y5WlL4vlM7Sx1Hgf13sMIY2+QS9Y=
|
||||
golang.org/x/term v0.30.0/go.mod h1:NYYFdzHoI5wRh/h5tDMdMqCqPJZEuNqVR5xJLd/n67g=
|
||||
golang.org/x/term v0.31.0 h1:erwDkOK1Msy6offm1mOgvspSkslFnIGsFnxOKoufg3o=
|
||||
golang.org/x/term v0.31.0/go.mod h1:R4BeIy7D95HzImkxGkTW1UQTtP54tio2RyHz7PwK0aw=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.23.0 h1:D71I7dUrlY+VX0gQShAThNGHFxZ13dGLBHQLVl1mJlY=
|
||||
golang.org/x/text v0.23.0/go.mod h1:/BLNzu4aZCJ1+kcD0DNRotWKage4q2rGVAg4o22unh4=
|
||||
golang.org/x/text v0.24.0 h1:dd5Bzh4yt5KYA8f9CJHCP4FB4D51c2c6JvN37xJJkJ0=
|
||||
golang.org/x/text v0.24.0/go.mod h1:L8rBsPeo2pSS+xqN0d5u2ikmjtmoJbDBT1b7nHvFCdU=
|
||||
golang.org/x/time v0.11.0 h1:/bpjEDfN9tkoN/ryeYHnv5hcMlc8ncjMcM4XBk5NWV0=
|
||||
|
||||
@@ -35,6 +35,8 @@ func SendWebhookAlert(msg string) {
|
||||
sendSlackAlert(webhook_url, webhook_proxy, msg)
|
||||
} else if alert_sink == "teams" {
|
||||
sendTeamsAlert(webhook_url, webhook_proxy, msg)
|
||||
} else if alert_sink == "gchat" {
|
||||
sendGoogleChatAlert(webhook_url, webhook_proxy, msg)
|
||||
} else {
|
||||
msg = strings.Replace(msg, "*", "", -1)
|
||||
sendRawWebhookAlert(webhook_url, webhook_proxy, msg)
|
||||
@@ -98,6 +100,29 @@ func sendTeamsAlert(webhookUrl string, proxy string, msg string) []error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// function to send alert to Google Chat webhook
|
||||
func sendGoogleChatAlert(webhookUrl string, proxy string, msg string) []error {
|
||||
payload := map[string]interface{}{
|
||||
"text": msg,
|
||||
}
|
||||
|
||||
request := gorequest.New().Proxy(proxy)
|
||||
resp, _, err := request.
|
||||
Post(webhookUrl).
|
||||
RedirectPolicy(redirectPolicy).
|
||||
Send(payload).
|
||||
End()
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if resp.StatusCode != 200 {
|
||||
return []error{fmt.Errorf("error sending msg. status: %v", resp.Status)}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// function to send alert to webhook service as text
|
||||
func sendRawWebhookAlert(webhookUrl string, proxy string, msg string) []error {
|
||||
request := gorequest.New().Proxy(proxy)
|
||||
|
||||
@@ -2,6 +2,7 @@ package callbacks
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
@@ -15,10 +16,14 @@ import (
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
patchtypes "k8s.io/apimachinery/pkg/types"
|
||||
|
||||
"maps"
|
||||
|
||||
argorolloutv1alpha1 "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1"
|
||||
openshiftv1 "github.com/openshift/api/apps/v1"
|
||||
)
|
||||
|
||||
// ItemFunc is a generic function to return a specific resource in given namespace
|
||||
type ItemFunc func(kube.Clients, string, string) (runtime.Object, error)
|
||||
|
||||
// ItemsFunc is a generic function to return a specific resource array in given namespace
|
||||
type ItemsFunc func(kube.Clients, string) []runtime.Object
|
||||
|
||||
@@ -34,6 +39,12 @@ type VolumesFunc func(runtime.Object) []v1.Volume
|
||||
// UpdateFunc performs the resource update
|
||||
type UpdateFunc func(kube.Clients, string, runtime.Object) error
|
||||
|
||||
// PatchFunc performs the resource patch
|
||||
type PatchFunc func(kube.Clients, string, runtime.Object, patchtypes.PatchType, []byte) error
|
||||
|
||||
// PatchTemplateFunc is a generic func to return strategic merge JSON patch template
|
||||
type PatchTemplatesFunc func() PatchTemplates
|
||||
|
||||
// AnnotationsFunc is a generic func to return annotations
|
||||
type AnnotationsFunc func(runtime.Object) map[string]string
|
||||
|
||||
@@ -42,14 +53,42 @@ type PodAnnotationsFunc func(runtime.Object) map[string]string
|
||||
|
||||
// RollingUpgradeFuncs contains generic functions to perform rolling upgrade
|
||||
type RollingUpgradeFuncs struct {
|
||||
ItemsFunc ItemsFunc
|
||||
AnnotationsFunc AnnotationsFunc
|
||||
PodAnnotationsFunc PodAnnotationsFunc
|
||||
ContainersFunc ContainersFunc
|
||||
InitContainersFunc InitContainersFunc
|
||||
UpdateFunc UpdateFunc
|
||||
VolumesFunc VolumesFunc
|
||||
ResourceType string
|
||||
ItemFunc ItemFunc
|
||||
ItemsFunc ItemsFunc
|
||||
AnnotationsFunc AnnotationsFunc
|
||||
PodAnnotationsFunc PodAnnotationsFunc
|
||||
ContainersFunc ContainersFunc
|
||||
ContainerPatchPathFunc ContainersFunc
|
||||
InitContainersFunc InitContainersFunc
|
||||
UpdateFunc UpdateFunc
|
||||
PatchFunc PatchFunc
|
||||
PatchTemplatesFunc PatchTemplatesFunc
|
||||
VolumesFunc VolumesFunc
|
||||
ResourceType string
|
||||
SupportsPatch bool
|
||||
}
|
||||
|
||||
// PatchTemplates contains merge JSON patch templates
|
||||
type PatchTemplates struct {
|
||||
AnnotationTemplate string
|
||||
EnvVarTemplate string
|
||||
DeleteEnvVarTemplate string
|
||||
}
|
||||
|
||||
// GetDeploymentItem returns the deployment in given namespace
|
||||
func GetDeploymentItem(clients kube.Clients, name string, namespace string) (runtime.Object, error) {
|
||||
deployment, err := clients.KubernetesClient.AppsV1().Deployments(namespace).Get(context.TODO(), name, meta_v1.GetOptions{})
|
||||
if err != nil {
|
||||
logrus.Errorf("Failed to get deployment %v", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if deployment.Spec.Template.ObjectMeta.Annotations == nil {
|
||||
annotations := make(map[string]string)
|
||||
deployment.Spec.Template.ObjectMeta.Annotations = annotations
|
||||
}
|
||||
|
||||
return deployment, nil
|
||||
}
|
||||
|
||||
// GetDeploymentItems returns the deployments in given namespace
|
||||
@@ -72,6 +111,17 @@ func GetDeploymentItems(clients kube.Clients, namespace string) []runtime.Object
|
||||
return items
|
||||
}
|
||||
|
||||
// GetCronJobItem returns the job in given namespace
|
||||
func GetCronJobItem(clients kube.Clients, name string, namespace string) (runtime.Object, error) {
|
||||
cronjob, err := clients.KubernetesClient.BatchV1().CronJobs(namespace).Get(context.TODO(), name, meta_v1.GetOptions{})
|
||||
if err != nil {
|
||||
logrus.Errorf("Failed to get cronjob %v", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return cronjob, nil
|
||||
}
|
||||
|
||||
// GetCronJobItems returns the jobs in given namespace
|
||||
func GetCronJobItems(clients kube.Clients, namespace string) []runtime.Object {
|
||||
cronjobs, err := clients.KubernetesClient.BatchV1().CronJobs(namespace).List(context.TODO(), meta_v1.ListOptions{})
|
||||
@@ -92,6 +142,17 @@ func GetCronJobItems(clients kube.Clients, namespace string) []runtime.Object {
|
||||
return items
|
||||
}
|
||||
|
||||
// GetJobItem returns the job in given namespace
|
||||
func GetJobItem(clients kube.Clients, name string, namespace string) (runtime.Object, error) {
|
||||
job, err := clients.KubernetesClient.BatchV1().Jobs(namespace).Get(context.TODO(), name, meta_v1.GetOptions{})
|
||||
if err != nil {
|
||||
logrus.Errorf("Failed to get job %v", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return job, nil
|
||||
}
|
||||
|
||||
// GetJobItems returns the jobs in given namespace
|
||||
func GetJobItems(clients kube.Clients, namespace string) []runtime.Object {
|
||||
jobs, err := clients.KubernetesClient.BatchV1().Jobs(namespace).List(context.TODO(), meta_v1.ListOptions{})
|
||||
@@ -112,6 +173,17 @@ func GetJobItems(clients kube.Clients, namespace string) []runtime.Object {
|
||||
return items
|
||||
}
|
||||
|
||||
// GetDaemonSetItem returns the daemonSet in given namespace
|
||||
func GetDaemonSetItem(clients kube.Clients, name string, namespace string) (runtime.Object, error) {
|
||||
daemonSet, err := clients.KubernetesClient.AppsV1().DaemonSets(namespace).Get(context.TODO(), name, meta_v1.GetOptions{})
|
||||
if err != nil {
|
||||
logrus.Errorf("Failed to get daemonSet %v", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return daemonSet, nil
|
||||
}
|
||||
|
||||
// GetDaemonSetItems returns the daemonSets in given namespace
|
||||
func GetDaemonSetItems(clients kube.Clients, namespace string) []runtime.Object {
|
||||
daemonSets, err := clients.KubernetesClient.AppsV1().DaemonSets(namespace).List(context.TODO(), meta_v1.ListOptions{})
|
||||
@@ -131,6 +203,17 @@ func GetDaemonSetItems(clients kube.Clients, namespace string) []runtime.Object
|
||||
return items
|
||||
}
|
||||
|
||||
// GetStatefulSetItem returns the statefulSet in given namespace
|
||||
func GetStatefulSetItem(clients kube.Clients, name string, namespace string) (runtime.Object, error) {
|
||||
statefulSet, err := clients.KubernetesClient.AppsV1().StatefulSets(namespace).Get(context.TODO(), name, meta_v1.GetOptions{})
|
||||
if err != nil {
|
||||
logrus.Errorf("Failed to get statefulSet %v", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return statefulSet, nil
|
||||
}
|
||||
|
||||
// GetStatefulSetItems returns the statefulSets in given namespace
|
||||
func GetStatefulSetItems(clients kube.Clients, namespace string) []runtime.Object {
|
||||
statefulSets, err := clients.KubernetesClient.AppsV1().StatefulSets(namespace).List(context.TODO(), meta_v1.ListOptions{})
|
||||
@@ -150,23 +233,15 @@ func GetStatefulSetItems(clients kube.Clients, namespace string) []runtime.Objec
|
||||
return items
|
||||
}
|
||||
|
||||
// GetDeploymentConfigItems returns the deploymentConfigs in given namespace
|
||||
func GetDeploymentConfigItems(clients kube.Clients, namespace string) []runtime.Object {
|
||||
deploymentConfigs, err := clients.OpenshiftAppsClient.AppsV1().DeploymentConfigs(namespace).List(context.TODO(), meta_v1.ListOptions{})
|
||||
// GetRolloutItem returns the rollout in given namespace
|
||||
func GetRolloutItem(clients kube.Clients, name string, namespace string) (runtime.Object, error) {
|
||||
rollout, err := clients.ArgoRolloutClient.ArgoprojV1alpha1().Rollouts(namespace).Get(context.TODO(), name, meta_v1.GetOptions{})
|
||||
if err != nil {
|
||||
logrus.Errorf("Failed to list deploymentConfigs %v", err)
|
||||
logrus.Errorf("Failed to get Rollout %v", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
items := make([]runtime.Object, len(deploymentConfigs.Items))
|
||||
// Ensure we always have pod annotations to add to
|
||||
for i, v := range deploymentConfigs.Items {
|
||||
if v.Spec.Template.ObjectMeta.Annotations == nil {
|
||||
deploymentConfigs.Items[i].Spec.Template.ObjectMeta.Annotations = make(map[string]string)
|
||||
}
|
||||
items[i] = &deploymentConfigs.Items[i]
|
||||
}
|
||||
|
||||
return items
|
||||
return rollout, nil
|
||||
}
|
||||
|
||||
// GetRolloutItems returns the rollouts in given namespace
|
||||
@@ -190,71 +265,97 @@ func GetRolloutItems(clients kube.Clients, namespace string) []runtime.Object {
|
||||
|
||||
// GetDeploymentAnnotations returns the annotations of given deployment
|
||||
func GetDeploymentAnnotations(item runtime.Object) map[string]string {
|
||||
if item.(*appsv1.Deployment).ObjectMeta.Annotations == nil {
|
||||
item.(*appsv1.Deployment).ObjectMeta.Annotations = make(map[string]string)
|
||||
}
|
||||
return item.(*appsv1.Deployment).ObjectMeta.Annotations
|
||||
}
|
||||
|
||||
// GetCronJobAnnotations returns the annotations of given cronjob
|
||||
func GetCronJobAnnotations(item runtime.Object) map[string]string {
|
||||
if item.(*batchv1.CronJob).ObjectMeta.Annotations == nil {
|
||||
item.(*batchv1.CronJob).ObjectMeta.Annotations = make(map[string]string)
|
||||
}
|
||||
return item.(*batchv1.CronJob).ObjectMeta.Annotations
|
||||
}
|
||||
|
||||
// GetJobAnnotations returns the annotations of given job
|
||||
func GetJobAnnotations(item runtime.Object) map[string]string {
|
||||
if item.(*batchv1.Job).ObjectMeta.Annotations == nil {
|
||||
item.(*batchv1.Job).ObjectMeta.Annotations = make(map[string]string)
|
||||
}
|
||||
return item.(*batchv1.Job).ObjectMeta.Annotations
|
||||
}
|
||||
|
||||
// GetDaemonSetAnnotations returns the annotations of given daemonSet
|
||||
func GetDaemonSetAnnotations(item runtime.Object) map[string]string {
|
||||
if item.(*appsv1.DaemonSet).ObjectMeta.Annotations == nil {
|
||||
item.(*appsv1.DaemonSet).ObjectMeta.Annotations = make(map[string]string)
|
||||
}
|
||||
return item.(*appsv1.DaemonSet).ObjectMeta.Annotations
|
||||
}
|
||||
|
||||
// GetStatefulSetAnnotations returns the annotations of given statefulSet
|
||||
func GetStatefulSetAnnotations(item runtime.Object) map[string]string {
|
||||
if item.(*appsv1.StatefulSet).ObjectMeta.Annotations == nil {
|
||||
item.(*appsv1.StatefulSet).ObjectMeta.Annotations = make(map[string]string)
|
||||
}
|
||||
return item.(*appsv1.StatefulSet).ObjectMeta.Annotations
|
||||
}
|
||||
|
||||
// GetDeploymentConfigAnnotations returns the annotations of given deploymentConfig
|
||||
func GetDeploymentConfigAnnotations(item runtime.Object) map[string]string {
|
||||
return item.(*openshiftv1.DeploymentConfig).ObjectMeta.Annotations
|
||||
}
|
||||
|
||||
// GetRolloutAnnotations returns the annotations of given rollout
|
||||
func GetRolloutAnnotations(item runtime.Object) map[string]string {
|
||||
if item.(*argorolloutv1alpha1.Rollout).ObjectMeta.Annotations == nil {
|
||||
item.(*argorolloutv1alpha1.Rollout).ObjectMeta.Annotations = make(map[string]string)
|
||||
}
|
||||
return item.(*argorolloutv1alpha1.Rollout).ObjectMeta.Annotations
|
||||
}
|
||||
|
||||
// GetDeploymentPodAnnotations returns the pod's annotations of given deployment
|
||||
func GetDeploymentPodAnnotations(item runtime.Object) map[string]string {
|
||||
if item.(*appsv1.Deployment).Spec.Template.ObjectMeta.Annotations == nil {
|
||||
item.(*appsv1.Deployment).Spec.Template.ObjectMeta.Annotations = make(map[string]string)
|
||||
}
|
||||
return item.(*appsv1.Deployment).Spec.Template.ObjectMeta.Annotations
|
||||
}
|
||||
|
||||
// GetCronJobPodAnnotations returns the pod's annotations of given cronjob
|
||||
func GetCronJobPodAnnotations(item runtime.Object) map[string]string {
|
||||
if item.(*batchv1.CronJob).Spec.JobTemplate.Spec.Template.ObjectMeta.Annotations == nil {
|
||||
item.(*batchv1.CronJob).Spec.JobTemplate.Spec.Template.ObjectMeta.Annotations = make(map[string]string)
|
||||
}
|
||||
return item.(*batchv1.CronJob).Spec.JobTemplate.Spec.Template.ObjectMeta.Annotations
|
||||
}
|
||||
|
||||
// GetJobPodAnnotations returns the pod's annotations of given job
|
||||
func GetJobPodAnnotations(item runtime.Object) map[string]string {
|
||||
if item.(*batchv1.Job).Spec.Template.ObjectMeta.Annotations == nil {
|
||||
item.(*batchv1.Job).Spec.Template.ObjectMeta.Annotations = make(map[string]string)
|
||||
}
|
||||
return item.(*batchv1.Job).Spec.Template.ObjectMeta.Annotations
|
||||
}
|
||||
|
||||
// GetDaemonSetPodAnnotations returns the pod's annotations of given daemonSet
|
||||
func GetDaemonSetPodAnnotations(item runtime.Object) map[string]string {
|
||||
if item.(*appsv1.DaemonSet).Spec.Template.ObjectMeta.Annotations == nil {
|
||||
item.(*appsv1.DaemonSet).Spec.Template.ObjectMeta.Annotations = make(map[string]string)
|
||||
}
|
||||
return item.(*appsv1.DaemonSet).Spec.Template.ObjectMeta.Annotations
|
||||
}
|
||||
|
||||
// GetStatefulSetPodAnnotations returns the pod's annotations of given statefulSet
|
||||
func GetStatefulSetPodAnnotations(item runtime.Object) map[string]string {
|
||||
if item.(*appsv1.StatefulSet).Spec.Template.ObjectMeta.Annotations == nil {
|
||||
item.(*appsv1.StatefulSet).Spec.Template.ObjectMeta.Annotations = make(map[string]string)
|
||||
}
|
||||
return item.(*appsv1.StatefulSet).Spec.Template.ObjectMeta.Annotations
|
||||
}
|
||||
|
||||
// GetDeploymentConfigPodAnnotations returns the pod's annotations of given deploymentConfig
|
||||
func GetDeploymentConfigPodAnnotations(item runtime.Object) map[string]string {
|
||||
return item.(*openshiftv1.DeploymentConfig).Spec.Template.ObjectMeta.Annotations
|
||||
}
|
||||
|
||||
// GetRolloutPodAnnotations returns the pod's annotations of given rollout
|
||||
func GetRolloutPodAnnotations(item runtime.Object) map[string]string {
|
||||
if item.(*argorolloutv1alpha1.Rollout).Spec.Template.ObjectMeta.Annotations == nil {
|
||||
item.(*argorolloutv1alpha1.Rollout).Spec.Template.ObjectMeta.Annotations = make(map[string]string)
|
||||
}
|
||||
return item.(*argorolloutv1alpha1.Rollout).Spec.Template.ObjectMeta.Annotations
|
||||
}
|
||||
|
||||
@@ -283,11 +384,6 @@ func GetStatefulSetContainers(item runtime.Object) []v1.Container {
|
||||
return item.(*appsv1.StatefulSet).Spec.Template.Spec.Containers
|
||||
}
|
||||
|
||||
// GetDeploymentConfigContainers returns the containers of given deploymentConfig
|
||||
func GetDeploymentConfigContainers(item runtime.Object) []v1.Container {
|
||||
return item.(*openshiftv1.DeploymentConfig).Spec.Template.Spec.Containers
|
||||
}
|
||||
|
||||
// GetRolloutContainers returns the containers of given rollout
|
||||
func GetRolloutContainers(item runtime.Object) []v1.Container {
|
||||
return item.(*argorolloutv1alpha1.Rollout).Spec.Template.Spec.Containers
|
||||
@@ -318,16 +414,20 @@ func GetStatefulSetInitContainers(item runtime.Object) []v1.Container {
|
||||
return item.(*appsv1.StatefulSet).Spec.Template.Spec.InitContainers
|
||||
}
|
||||
|
||||
// GetDeploymentConfigInitContainers returns the containers of given deploymentConfig
|
||||
func GetDeploymentConfigInitContainers(item runtime.Object) []v1.Container {
|
||||
return item.(*openshiftv1.DeploymentConfig).Spec.Template.Spec.InitContainers
|
||||
}
|
||||
|
||||
// GetRolloutInitContainers returns the containers of given rollout
|
||||
func GetRolloutInitContainers(item runtime.Object) []v1.Container {
|
||||
return item.(*argorolloutv1alpha1.Rollout).Spec.Template.Spec.InitContainers
|
||||
}
|
||||
|
||||
// GetPatchTemplates returns patch templates
|
||||
func GetPatchTemplates() PatchTemplates {
|
||||
return PatchTemplates{
|
||||
AnnotationTemplate: `{"spec":{"template":{"metadata":{"annotations":{"%s":"%s"}}}}}`, // strategic merge patch
|
||||
EnvVarTemplate: `{"spec":{"template":{"spec":{"containers":[{"name":"%s","env":[{"name":"%s","value":"%s"}]}]}}}}`, // strategic merge patch
|
||||
DeleteEnvVarTemplate: `[{"op":"remove","path":"/spec/template/spec/containers/%d/env/%d"}]`, // JSON patch
|
||||
}
|
||||
}
|
||||
|
||||
// UpdateDeployment performs rolling upgrade on deployment
|
||||
func UpdateDeployment(clients kube.Clients, namespace string, resource runtime.Object) error {
|
||||
deployment := resource.(*appsv1.Deployment)
|
||||
@@ -335,18 +435,39 @@ func UpdateDeployment(clients kube.Clients, namespace string, resource runtime.O
|
||||
return err
|
||||
}
|
||||
|
||||
// PatchDeployment performs rolling upgrade on deployment
|
||||
func PatchDeployment(clients kube.Clients, namespace string, resource runtime.Object, patchType patchtypes.PatchType, bytes []byte) error {
|
||||
deployment := resource.(*appsv1.Deployment)
|
||||
_, err := clients.KubernetesClient.AppsV1().Deployments(namespace).Patch(context.TODO(), deployment.Name, patchType, bytes, meta_v1.PatchOptions{FieldManager: "Reloader"})
|
||||
return err
|
||||
}
|
||||
|
||||
// CreateJobFromCronjob performs rolling upgrade on cronjob
|
||||
func CreateJobFromCronjob(clients kube.Clients, namespace string, resource runtime.Object) error {
|
||||
cronJob := resource.(*batchv1.CronJob)
|
||||
|
||||
annotations := make(map[string]string)
|
||||
annotations["cronjob.kubernetes.io/instantiate"] = "manual"
|
||||
maps.Copy(annotations, cronJob.Spec.JobTemplate.Annotations)
|
||||
|
||||
job := &batchv1.Job{
|
||||
ObjectMeta: cronJob.Spec.JobTemplate.ObjectMeta,
|
||||
Spec: cronJob.Spec.JobTemplate.Spec,
|
||||
ObjectMeta: meta_v1.ObjectMeta{
|
||||
GenerateName: cronJob.Name + "-",
|
||||
Namespace: cronJob.Namespace,
|
||||
Annotations: annotations,
|
||||
Labels: cronJob.Spec.JobTemplate.Labels,
|
||||
OwnerReferences: []meta_v1.OwnerReference{*meta_v1.NewControllerRef(cronJob, batchv1.SchemeGroupVersion.WithKind("CronJob"))},
|
||||
},
|
||||
Spec: cronJob.Spec.JobTemplate.Spec,
|
||||
}
|
||||
job.GenerateName = cronJob.Name + "-"
|
||||
_, err := clients.KubernetesClient.BatchV1().Jobs(namespace).Create(context.TODO(), job, meta_v1.CreateOptions{FieldManager: "Reloader"})
|
||||
return err
|
||||
}
|
||||
|
||||
func PatchCronJob(clients kube.Clients, namespace string, resource runtime.Object, patchType patchtypes.PatchType, bytes []byte) error {
|
||||
return errors.New("not supported patching: CronJob")
|
||||
}
|
||||
|
||||
// ReCreateJobFromjob performs rolling upgrade on job
|
||||
func ReCreateJobFromjob(clients kube.Clients, namespace string, resource runtime.Object) error {
|
||||
oldJob := resource.(*batchv1.Job)
|
||||
@@ -379,6 +500,10 @@ func ReCreateJobFromjob(clients kube.Clients, namespace string, resource runtime
|
||||
return err
|
||||
}
|
||||
|
||||
func PatchJob(clients kube.Clients, namespace string, resource runtime.Object, patchType patchtypes.PatchType, bytes []byte) error {
|
||||
return errors.New("not supported patching: Job")
|
||||
}
|
||||
|
||||
// UpdateDaemonSet performs rolling upgrade on daemonSet
|
||||
func UpdateDaemonSet(clients kube.Clients, namespace string, resource runtime.Object) error {
|
||||
daemonSet := resource.(*appsv1.DaemonSet)
|
||||
@@ -386,6 +511,12 @@ func UpdateDaemonSet(clients kube.Clients, namespace string, resource runtime.Ob
|
||||
return err
|
||||
}
|
||||
|
||||
func PatchDaemonSet(clients kube.Clients, namespace string, resource runtime.Object, patchType patchtypes.PatchType, bytes []byte) error {
|
||||
daemonSet := resource.(*appsv1.DaemonSet)
|
||||
_, err := clients.KubernetesClient.AppsV1().DaemonSets(namespace).Patch(context.TODO(), daemonSet.Name, patchType, bytes, meta_v1.PatchOptions{FieldManager: "Reloader"})
|
||||
return err
|
||||
}
|
||||
|
||||
// UpdateStatefulSet performs rolling upgrade on statefulSet
|
||||
func UpdateStatefulSet(clients kube.Clients, namespace string, resource runtime.Object) error {
|
||||
statefulSet := resource.(*appsv1.StatefulSet)
|
||||
@@ -393,18 +524,17 @@ func UpdateStatefulSet(clients kube.Clients, namespace string, resource runtime.
|
||||
return err
|
||||
}
|
||||
|
||||
// UpdateDeploymentConfig performs rolling upgrade on deploymentConfig
|
||||
func UpdateDeploymentConfig(clients kube.Clients, namespace string, resource runtime.Object) error {
|
||||
deploymentConfig := resource.(*openshiftv1.DeploymentConfig)
|
||||
_, err := clients.OpenshiftAppsClient.AppsV1().DeploymentConfigs(namespace).Update(context.TODO(), deploymentConfig, meta_v1.UpdateOptions{FieldManager: "Reloader"})
|
||||
func PatchStatefulSet(clients kube.Clients, namespace string, resource runtime.Object, patchType patchtypes.PatchType, bytes []byte) error {
|
||||
statefulSet := resource.(*appsv1.StatefulSet)
|
||||
_, err := clients.KubernetesClient.AppsV1().StatefulSets(namespace).Patch(context.TODO(), statefulSet.Name, patchType, bytes, meta_v1.PatchOptions{FieldManager: "Reloader"})
|
||||
return err
|
||||
}
|
||||
|
||||
// UpdateRollout performs rolling upgrade on rollout
|
||||
func UpdateRollout(clients kube.Clients, namespace string, resource runtime.Object) error {
|
||||
var err error
|
||||
rollout := resource.(*argorolloutv1alpha1.Rollout)
|
||||
strategy := rollout.GetAnnotations()[options.RolloutStrategyAnnotation]
|
||||
var err error
|
||||
switch options.ToArgoRolloutStrategy(strategy) {
|
||||
case options.RestartStrategy:
|
||||
_, err = clients.ArgoRolloutClient.ArgoprojV1alpha1().Rollouts(namespace).Patch(context.TODO(), rollout.Name, patchtypes.MergePatchType, []byte(fmt.Sprintf(`{"spec": {"restartAt": "%s"}}`, time.Now().Format(time.RFC3339))), meta_v1.PatchOptions{FieldManager: "Reloader"})
|
||||
@@ -414,6 +544,10 @@ func UpdateRollout(clients kube.Clients, namespace string, resource runtime.Obje
|
||||
return err
|
||||
}
|
||||
|
||||
func PatchRollout(clients kube.Clients, namespace string, resource runtime.Object, patchType patchtypes.PatchType, bytes []byte) error {
|
||||
return errors.New("not supported patching: Rollout")
|
||||
}
|
||||
|
||||
// GetDeploymentVolumes returns the Volumes of given deployment
|
||||
func GetDeploymentVolumes(item runtime.Object) []v1.Volume {
|
||||
return item.(*appsv1.Deployment).Spec.Template.Spec.Volumes
|
||||
@@ -439,11 +573,6 @@ func GetStatefulSetVolumes(item runtime.Object) []v1.Volume {
|
||||
return item.(*appsv1.StatefulSet).Spec.Template.Spec.Volumes
|
||||
}
|
||||
|
||||
// GetDeploymentConfigVolumes returns the Volumes of given deploymentConfig
|
||||
func GetDeploymentConfigVolumes(item runtime.Object) []v1.Volume {
|
||||
return item.(*openshiftv1.DeploymentConfig).Spec.Template.Spec.Volumes
|
||||
}
|
||||
|
||||
// GetRolloutVolumes returns the Volumes of given rollout
|
||||
func GetRolloutVolumes(item runtime.Object) []v1.Volume {
|
||||
return item.(*argorolloutv1alpha1.Rollout).Spec.Template.Spec.Volumes
|
||||
|
||||
@@ -3,6 +3,7 @@ package callbacks_test
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@@ -10,7 +11,7 @@ import (
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
batchv1 "k8s.io/api/batch/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
watch "k8s.io/apimachinery/pkg/watch"
|
||||
@@ -18,6 +19,7 @@ import (
|
||||
|
||||
argorolloutv1alpha1 "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1"
|
||||
fakeargoclientset "github.com/argoproj/argo-rollouts/pkg/client/clientset/versioned/fake"
|
||||
patchtypes "k8s.io/apimachinery/pkg/types"
|
||||
|
||||
"github.com/stakater/Reloader/internal/pkg/callbacks"
|
||||
"github.com/stakater/Reloader/internal/pkg/options"
|
||||
@@ -93,7 +95,7 @@ func TestUpdateRollout(t *testing.T) {
|
||||
t.Errorf("updating rollout: %v", err)
|
||||
}
|
||||
rollout, err = clients.ArgoRolloutClient.ArgoprojV1alpha1().Rollouts(
|
||||
namespace).Get(context.TODO(), rollout.Name, meta_v1.GetOptions{})
|
||||
namespace).Get(context.TODO(), rollout.Name, metav1.GetOptions{})
|
||||
|
||||
if err != nil {
|
||||
t.Errorf("getting rollout: %v", err)
|
||||
@@ -111,6 +113,71 @@ func TestUpdateRollout(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestPatchRollout(t *testing.T) {
|
||||
namespace := "test-ns"
|
||||
rollout := testutil.GetRollout(namespace, "test", map[string]string{options.RolloutStrategyAnnotation: ""})
|
||||
err := callbacks.PatchRollout(clients, namespace, rollout, patchtypes.StrategicMergePatchType, []byte(`{"spec": {}}`))
|
||||
assert.EqualError(t, err, "not supported patching: Rollout")
|
||||
}
|
||||
|
||||
func TestResourceItem(t *testing.T) {
|
||||
fixtures := newTestFixtures()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
createFunc func(kube.Clients, string, string) (runtime.Object, error)
|
||||
getItemFunc func(kube.Clients, string, string) (runtime.Object, error)
|
||||
deleteFunc func(kube.Clients, string, string) error
|
||||
}{
|
||||
{
|
||||
name: "Deployment",
|
||||
createFunc: createTestDeploymentWithAnnotations,
|
||||
getItemFunc: callbacks.GetDeploymentItem,
|
||||
deleteFunc: deleteTestDeployment,
|
||||
},
|
||||
{
|
||||
name: "CronJob",
|
||||
createFunc: createTestCronJobWithAnnotations,
|
||||
getItemFunc: callbacks.GetCronJobItem,
|
||||
deleteFunc: deleteTestCronJob,
|
||||
},
|
||||
{
|
||||
name: "Job",
|
||||
createFunc: createTestJobWithAnnotations,
|
||||
getItemFunc: callbacks.GetJobItem,
|
||||
deleteFunc: deleteTestJob,
|
||||
},
|
||||
{
|
||||
name: "DaemonSet",
|
||||
createFunc: createTestDaemonSetWithAnnotations,
|
||||
getItemFunc: callbacks.GetDaemonSetItem,
|
||||
deleteFunc: deleteTestDaemonSet,
|
||||
},
|
||||
{
|
||||
name: "StatefulSet",
|
||||
createFunc: createTestStatefulSetWithAnnotations,
|
||||
getItemFunc: callbacks.GetStatefulSetItem,
|
||||
deleteFunc: deleteTestStatefulSet,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
resource, err := tt.createFunc(clients, fixtures.namespace, "1")
|
||||
assert.NoError(t, err)
|
||||
|
||||
accessor, err := meta.Accessor(resource)
|
||||
assert.NoError(t, err)
|
||||
|
||||
_, err = tt.getItemFunc(clients, accessor.GetName(), fixtures.namespace)
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = tt.deleteFunc(clients, fixtures.namespace, accessor.GetName())
|
||||
assert.NoError(t, err)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestResourceItems(t *testing.T) {
|
||||
fixtures := newTestFixtures()
|
||||
|
||||
@@ -118,36 +185,42 @@ func TestResourceItems(t *testing.T) {
|
||||
name string
|
||||
createFunc func(kube.Clients, string) error
|
||||
getItemsFunc func(kube.Clients, string) []runtime.Object
|
||||
deleteFunc func(kube.Clients, string) error
|
||||
expectedCount int
|
||||
}{
|
||||
{
|
||||
name: "Deployments",
|
||||
createFunc: createTestDeployments,
|
||||
getItemsFunc: callbacks.GetDeploymentItems,
|
||||
deleteFunc: deleteTestDeployments,
|
||||
expectedCount: 2,
|
||||
},
|
||||
{
|
||||
name: "CronJobs",
|
||||
createFunc: createTestCronJobs,
|
||||
getItemsFunc: callbacks.GetCronJobItems,
|
||||
deleteFunc: deleteTestCronJobs,
|
||||
expectedCount: 2,
|
||||
},
|
||||
{
|
||||
name: "Jobs",
|
||||
createFunc: createTestJobs,
|
||||
getItemsFunc: callbacks.GetJobItems,
|
||||
deleteFunc: deleteTestJobs,
|
||||
expectedCount: 2,
|
||||
},
|
||||
{
|
||||
name: "DaemonSets",
|
||||
createFunc: createTestDaemonSets,
|
||||
getItemsFunc: callbacks.GetDaemonSetItems,
|
||||
deleteFunc: deleteTestDaemonSets,
|
||||
expectedCount: 2,
|
||||
},
|
||||
{
|
||||
name: "StatefulSets",
|
||||
createFunc: createTestStatefulSets,
|
||||
getItemsFunc: callbacks.GetStatefulSetItems,
|
||||
deleteFunc: deleteTestStatefulSets,
|
||||
expectedCount: 2,
|
||||
},
|
||||
}
|
||||
@@ -262,10 +335,11 @@ func TestUpdateResources(t *testing.T) {
|
||||
name string
|
||||
createFunc func(kube.Clients, string, string) (runtime.Object, error)
|
||||
updateFunc func(kube.Clients, string, runtime.Object) error
|
||||
deleteFunc func(kube.Clients, string, string) error
|
||||
}{
|
||||
{"Deployment", createTestDeploymentWithAnnotations, callbacks.UpdateDeployment},
|
||||
{"DaemonSet", createTestDaemonSetWithAnnotations, callbacks.UpdateDaemonSet},
|
||||
{"StatefulSet", createTestStatefulSetWithAnnotations, callbacks.UpdateStatefulSet},
|
||||
{"Deployment", createTestDeploymentWithAnnotations, callbacks.UpdateDeployment, deleteTestDeployment},
|
||||
{"DaemonSet", createTestDaemonSetWithAnnotations, callbacks.UpdateDaemonSet, deleteTestDaemonSet},
|
||||
{"StatefulSet", createTestStatefulSetWithAnnotations, callbacks.UpdateStatefulSet, deleteTestStatefulSet},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
@@ -275,6 +349,65 @@ func TestUpdateResources(t *testing.T) {
|
||||
|
||||
err = tt.updateFunc(clients, fixtures.namespace, resource)
|
||||
assert.NoError(t, err)
|
||||
|
||||
accessor, err := meta.Accessor(resource)
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = tt.deleteFunc(clients, fixtures.namespace, accessor.GetName())
|
||||
assert.NoError(t, err)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestPatchResources(t *testing.T) {
|
||||
fixtures := newTestFixtures()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
createFunc func(kube.Clients, string, string) (runtime.Object, error)
|
||||
patchFunc func(kube.Clients, string, runtime.Object, patchtypes.PatchType, []byte) error
|
||||
deleteFunc func(kube.Clients, string, string) error
|
||||
assertFunc func(err error)
|
||||
}{
|
||||
{"Deployment", createTestDeploymentWithAnnotations, callbacks.PatchDeployment, deleteTestDeployment, func(err error) {
|
||||
assert.NoError(t, err)
|
||||
patchedResource, err := callbacks.GetDeploymentItem(clients, "test-deployment", fixtures.namespace)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "test", patchedResource.(*appsv1.Deployment).ObjectMeta.Annotations["test"])
|
||||
}},
|
||||
{"DaemonSet", createTestDaemonSetWithAnnotations, callbacks.PatchDaemonSet, deleteTestDaemonSet, func(err error) {
|
||||
assert.NoError(t, err)
|
||||
patchedResource, err := callbacks.GetDaemonSetItem(clients, "test-daemonset", fixtures.namespace)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "test", patchedResource.(*appsv1.DaemonSet).ObjectMeta.Annotations["test"])
|
||||
}},
|
||||
{"StatefulSet", createTestStatefulSetWithAnnotations, callbacks.PatchStatefulSet, deleteTestStatefulSet, func(err error) {
|
||||
assert.NoError(t, err)
|
||||
patchedResource, err := callbacks.GetStatefulSetItem(clients, "test-statefulset", fixtures.namespace)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "test", patchedResource.(*appsv1.StatefulSet).ObjectMeta.Annotations["test"])
|
||||
}},
|
||||
{"CronJob", createTestCronJobWithAnnotations, callbacks.PatchCronJob, deleteTestCronJob, func(err error) {
|
||||
assert.EqualError(t, err, "not supported patching: CronJob")
|
||||
}},
|
||||
{"Job", createTestJobWithAnnotations, callbacks.PatchJob, deleteTestJob, func(err error) {
|
||||
assert.EqualError(t, err, "not supported patching: Job")
|
||||
}},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
resource, err := tt.createFunc(clients, fixtures.namespace, "1")
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = tt.patchFunc(clients, fixtures.namespace, resource, patchtypes.StrategicMergePatchType, []byte(`{"metadata":{"annotations":{"test":"test"}}}`))
|
||||
tt.assertFunc(err)
|
||||
|
||||
accessor, err := meta.Accessor(resource)
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = tt.deleteFunc(clients, fixtures.namespace, accessor.GetName())
|
||||
assert.NoError(t, err)
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -282,10 +415,26 @@ func TestUpdateResources(t *testing.T) {
|
||||
func TestCreateJobFromCronjob(t *testing.T) {
|
||||
fixtures := newTestFixtures()
|
||||
|
||||
cronJob, err := createTestCronJobWithAnnotations(clients, fixtures.namespace, "1")
|
||||
runtimeObj, err := createTestCronJobWithAnnotations(clients, fixtures.namespace, "1")
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = callbacks.CreateJobFromCronjob(clients, fixtures.namespace, cronJob.(*batchv1.CronJob))
|
||||
cronJob := runtimeObj.(*batchv1.CronJob)
|
||||
err = callbacks.CreateJobFromCronjob(clients, fixtures.namespace, cronJob)
|
||||
assert.NoError(t, err)
|
||||
|
||||
jobList, err := clients.KubernetesClient.BatchV1().Jobs(fixtures.namespace).List(context.TODO(), metav1.ListOptions{})
|
||||
assert.NoError(t, err)
|
||||
|
||||
ownerFound := false
|
||||
for _, job := range jobList.Items {
|
||||
if isControllerOwner("CronJob", cronJob.Name, job.OwnerReferences) {
|
||||
ownerFound = true
|
||||
break
|
||||
}
|
||||
}
|
||||
assert.Truef(t, ownerFound, "Missing CronJob owner reference")
|
||||
|
||||
err = deleteTestCronJob(clients, fixtures.namespace, cronJob.Name)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
@@ -297,6 +446,9 @@ func TestReCreateJobFromJob(t *testing.T) {
|
||||
|
||||
err = callbacks.ReCreateJobFromjob(clients, fixtures.namespace, job.(*batchv1.Job))
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = deleteTestJob(clients, fixtures.namespace, "test-job")
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestGetVolumes(t *testing.T) {
|
||||
@@ -321,6 +473,24 @@ func TestGetVolumes(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TesGetPatchTemplateAnnotation(t *testing.T) {
|
||||
templates := callbacks.GetPatchTemplates()
|
||||
assert.NotEmpty(t, templates.AnnotationTemplate)
|
||||
assert.Equal(t, 2, strings.Count(templates.AnnotationTemplate, "%s"))
|
||||
}
|
||||
|
||||
func TestGetPatchTemplateEnvVar(t *testing.T) {
|
||||
templates := callbacks.GetPatchTemplates()
|
||||
assert.NotEmpty(t, templates.EnvVarTemplate)
|
||||
assert.Equal(t, 3, strings.Count(templates.EnvVarTemplate, "%s"))
|
||||
}
|
||||
|
||||
func TestGetPatchDeleteTemplateEnvVar(t *testing.T) {
|
||||
templates := callbacks.GetPatchTemplates()
|
||||
assert.NotEmpty(t, templates.DeleteEnvVarTemplate)
|
||||
assert.Equal(t, 2, strings.Count(templates.DeleteEnvVarTemplate, "%d"))
|
||||
}
|
||||
|
||||
// Helper functions
|
||||
|
||||
func isRestartStrategy(rollout *argorolloutv1alpha1.Rollout) bool {
|
||||
@@ -330,7 +500,7 @@ func isRestartStrategy(rollout *argorolloutv1alpha1.Rollout) bool {
|
||||
func watchRollout(name, namespace string) chan interface{} {
|
||||
timeOut := int64(1)
|
||||
modifiedChan := make(chan interface{})
|
||||
watcher, _ := clients.ArgoRolloutClient.ArgoprojV1alpha1().Rollouts(namespace).Watch(context.Background(), meta_v1.ListOptions{TimeoutSeconds: &timeOut})
|
||||
watcher, _ := clients.ArgoRolloutClient.ArgoprojV1alpha1().Rollouts(namespace).Watch(context.Background(), metav1.ListOptions{TimeoutSeconds: &timeOut})
|
||||
go watchModified(watcher, name, modifiedChan)
|
||||
return modifiedChan
|
||||
}
|
||||
@@ -358,6 +528,16 @@ func createTestDeployments(clients kube.Clients, namespace string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func deleteTestDeployments(clients kube.Clients, namespace string) error {
|
||||
for i := 1; i <= 2; i++ {
|
||||
err := testutil.DeleteDeployment(clients.KubernetesClient, namespace, fmt.Sprintf("test-deployment-%d", i))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func createTestCronJobs(clients kube.Clients, namespace string) error {
|
||||
for i := 1; i <= 2; i++ {
|
||||
_, err := testutil.CreateCronJob(clients.KubernetesClient, fmt.Sprintf("test-cron-%d", i), namespace, false)
|
||||
@@ -368,6 +548,16 @@ func createTestCronJobs(clients kube.Clients, namespace string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func deleteTestCronJobs(clients kube.Clients, namespace string) error {
|
||||
for i := 1; i <= 2; i++ {
|
||||
err := testutil.DeleteCronJob(clients.KubernetesClient, namespace, fmt.Sprintf("test-cron-%d", i))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func createTestJobs(clients kube.Clients, namespace string) error {
|
||||
for i := 1; i <= 2; i++ {
|
||||
_, err := testutil.CreateJob(clients.KubernetesClient, fmt.Sprintf("test-job-%d", i), namespace, false)
|
||||
@@ -378,6 +568,16 @@ func createTestJobs(clients kube.Clients, namespace string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func deleteTestJobs(clients kube.Clients, namespace string) error {
|
||||
for i := 1; i <= 2; i++ {
|
||||
err := testutil.DeleteJob(clients.KubernetesClient, namespace, fmt.Sprintf("test-job-%d", i))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func createTestDaemonSets(clients kube.Clients, namespace string) error {
|
||||
for i := 1; i <= 2; i++ {
|
||||
_, err := testutil.CreateDaemonSet(clients.KubernetesClient, fmt.Sprintf("test-daemonset-%d", i), namespace, false)
|
||||
@@ -388,6 +588,16 @@ func createTestDaemonSets(clients kube.Clients, namespace string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func deleteTestDaemonSets(clients kube.Clients, namespace string) error {
|
||||
for i := 1; i <= 2; i++ {
|
||||
err := testutil.DeleteDaemonSet(clients.KubernetesClient, namespace, fmt.Sprintf("test-daemonset-%d", i))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func createTestStatefulSets(clients kube.Clients, namespace string) error {
|
||||
for i := 1; i <= 2; i++ {
|
||||
_, err := testutil.CreateStatefulSet(clients.KubernetesClient, fmt.Sprintf("test-statefulset-%d", i), namespace, false)
|
||||
@@ -398,6 +608,16 @@ func createTestStatefulSets(clients kube.Clients, namespace string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func deleteTestStatefulSets(clients kube.Clients, namespace string) error {
|
||||
for i := 1; i <= 2; i++ {
|
||||
err := testutil.DeleteStatefulSet(clients.KubernetesClient, namespace, fmt.Sprintf("test-statefulset-%d", i))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func createResourceWithPodAnnotations(obj runtime.Object, annotations map[string]string) runtime.Object {
|
||||
switch v := obj.(type) {
|
||||
case *appsv1.Deployment:
|
||||
@@ -479,6 +699,10 @@ func createTestDeploymentWithAnnotations(clients kube.Clients, namespace, versio
|
||||
return clients.KubernetesClient.AppsV1().Deployments(namespace).Create(context.TODO(), deployment, metav1.CreateOptions{})
|
||||
}
|
||||
|
||||
func deleteTestDeployment(clients kube.Clients, namespace, name string) error {
|
||||
return clients.KubernetesClient.AppsV1().Deployments(namespace).Delete(context.TODO(), name, metav1.DeleteOptions{})
|
||||
}
|
||||
|
||||
func createTestDaemonSetWithAnnotations(clients kube.Clients, namespace, version string) (runtime.Object, error) {
|
||||
daemonSet := &appsv1.DaemonSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
@@ -490,6 +714,10 @@ func createTestDaemonSetWithAnnotations(clients kube.Clients, namespace, version
|
||||
return clients.KubernetesClient.AppsV1().DaemonSets(namespace).Create(context.TODO(), daemonSet, metav1.CreateOptions{})
|
||||
}
|
||||
|
||||
func deleteTestDaemonSet(clients kube.Clients, namespace, name string) error {
|
||||
return clients.KubernetesClient.AppsV1().DaemonSets(namespace).Delete(context.TODO(), name, metav1.DeleteOptions{})
|
||||
}
|
||||
|
||||
func createTestStatefulSetWithAnnotations(clients kube.Clients, namespace, version string) (runtime.Object, error) {
|
||||
statefulSet := &appsv1.StatefulSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
@@ -501,6 +729,10 @@ func createTestStatefulSetWithAnnotations(clients kube.Clients, namespace, versi
|
||||
return clients.KubernetesClient.AppsV1().StatefulSets(namespace).Create(context.TODO(), statefulSet, metav1.CreateOptions{})
|
||||
}
|
||||
|
||||
func deleteTestStatefulSet(clients kube.Clients, namespace, name string) error {
|
||||
return clients.KubernetesClient.AppsV1().StatefulSets(namespace).Delete(context.TODO(), name, metav1.DeleteOptions{})
|
||||
}
|
||||
|
||||
func createTestCronJobWithAnnotations(clients kube.Clients, namespace, version string) (runtime.Object, error) {
|
||||
cronJob := &batchv1.CronJob{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
@@ -512,6 +744,10 @@ func createTestCronJobWithAnnotations(clients kube.Clients, namespace, version s
|
||||
return clients.KubernetesClient.BatchV1().CronJobs(namespace).Create(context.TODO(), cronJob, metav1.CreateOptions{})
|
||||
}
|
||||
|
||||
func deleteTestCronJob(clients kube.Clients, namespace, name string) error {
|
||||
return clients.KubernetesClient.BatchV1().CronJobs(namespace).Delete(context.TODO(), name, metav1.DeleteOptions{})
|
||||
}
|
||||
|
||||
func createTestJobWithAnnotations(clients kube.Clients, namespace, version string) (runtime.Object, error) {
|
||||
job := &batchv1.Job{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
@@ -522,3 +758,16 @@ func createTestJobWithAnnotations(clients kube.Clients, namespace, version strin
|
||||
}
|
||||
return clients.KubernetesClient.BatchV1().Jobs(namespace).Create(context.TODO(), job, metav1.CreateOptions{})
|
||||
}
|
||||
|
||||
func deleteTestJob(clients kube.Clients, namespace, name string) error {
|
||||
return clients.KubernetesClient.BatchV1().Jobs(namespace).Delete(context.TODO(), name, metav1.DeleteOptions{})
|
||||
}
|
||||
|
||||
func isControllerOwner(kind, name string, ownerRefs []metav1.OwnerReference) bool {
|
||||
for _, ownerRef := range ownerRefs {
|
||||
if *ownerRef.Controller && ownerRef.Kind == kind && ownerRef.Name == name {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
_ "net/http/pprof"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
@@ -14,12 +15,12 @@ import (
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
|
||||
"github.com/stakater/Reloader/internal/pkg/controller"
|
||||
"github.com/stakater/Reloader/internal/pkg/metrics"
|
||||
"github.com/stakater/Reloader/internal/pkg/options"
|
||||
"github.com/stakater/Reloader/internal/pkg/util"
|
||||
"github.com/stakater/Reloader/pkg/common"
|
||||
"github.com/stakater/Reloader/pkg/kube"
|
||||
)
|
||||
|
||||
@@ -33,27 +34,7 @@ func NewReloaderCommand() *cobra.Command {
|
||||
}
|
||||
|
||||
// options
|
||||
cmd.PersistentFlags().BoolVar(&options.AutoReloadAll, "auto-reload-all", false, "Auto reload all resources")
|
||||
cmd.PersistentFlags().StringVar(&options.ConfigmapUpdateOnChangeAnnotation, "configmap-annotation", "configmap.reloader.stakater.com/reload", "annotation to detect changes in configmaps, specified by name")
|
||||
cmd.PersistentFlags().StringVar(&options.SecretUpdateOnChangeAnnotation, "secret-annotation", "secret.reloader.stakater.com/reload", "annotation to detect changes in secrets, specified by name")
|
||||
cmd.PersistentFlags().StringVar(&options.ReloaderAutoAnnotation, "auto-annotation", "reloader.stakater.com/auto", "annotation to detect changes in secrets/configmaps")
|
||||
cmd.PersistentFlags().StringVar(&options.ConfigmapReloaderAutoAnnotation, "configmap-auto-annotation", "configmap.reloader.stakater.com/auto", "annotation to detect changes in configmaps")
|
||||
cmd.PersistentFlags().StringVar(&options.SecretReloaderAutoAnnotation, "secret-auto-annotation", "secret.reloader.stakater.com/auto", "annotation to detect changes in secrets")
|
||||
cmd.PersistentFlags().StringVar(&options.AutoSearchAnnotation, "auto-search-annotation", "reloader.stakater.com/search", "annotation to detect changes in configmaps or secrets tagged with special match annotation")
|
||||
cmd.PersistentFlags().StringVar(&options.SearchMatchAnnotation, "search-match-annotation", "reloader.stakater.com/match", "annotation to mark secrets or configmaps to match the search")
|
||||
cmd.PersistentFlags().StringVar(&options.LogFormat, "log-format", "", "Log format to use (empty string for text, or JSON)")
|
||||
cmd.PersistentFlags().StringVar(&options.LogLevel, "log-level", "info", "Log level to use (trace, debug, info, warning, error, fatal and panic)")
|
||||
cmd.PersistentFlags().StringVar(&options.WebhookUrl, "webhook-url", "", "webhook to trigger instead of performing a reload")
|
||||
cmd.PersistentFlags().StringSlice("resources-to-ignore", []string{}, "list of resources to ignore (valid options 'configMaps' or 'secrets')")
|
||||
cmd.PersistentFlags().StringSlice("namespaces-to-ignore", []string{}, "list of namespaces to ignore")
|
||||
cmd.PersistentFlags().StringSlice("namespace-selector", []string{}, "list of key:value labels to filter on for namespaces")
|
||||
cmd.PersistentFlags().StringSlice("resource-label-selector", []string{}, "list of key:value labels to filter on for configmaps and secrets")
|
||||
cmd.PersistentFlags().StringVar(&options.IsArgoRollouts, "is-Argo-Rollouts", "false", "Add support for argo rollouts")
|
||||
cmd.PersistentFlags().StringVar(&options.ReloadStrategy, constants.ReloadStrategyFlag, constants.EnvVarsReloadStrategy, "Specifies the desired reload strategy")
|
||||
cmd.PersistentFlags().StringVar(&options.ReloadOnCreate, "reload-on-create", "false", "Add support to watch create events")
|
||||
cmd.PersistentFlags().StringVar(&options.ReloadOnDelete, "reload-on-delete", "false", "Add support to watch delete events")
|
||||
cmd.PersistentFlags().BoolVar(&options.EnableHA, "enable-ha", false, "Adds support for running multiple replicas via leadership election")
|
||||
cmd.PersistentFlags().BoolVar(&options.SyncAfterRestart, "sync-after-restart", false, "Sync add events after reloader restarts")
|
||||
util.ConfigureReloaderFlags(cmd)
|
||||
|
||||
return cmd
|
||||
}
|
||||
@@ -122,15 +103,18 @@ func getHAEnvs() (string, string) {
|
||||
}
|
||||
|
||||
func startReloader(cmd *cobra.Command, args []string) {
|
||||
common.GetCommandLineOptions()
|
||||
err := configureLogging(options.LogFormat, options.LogLevel)
|
||||
if err != nil {
|
||||
logrus.Warn(err)
|
||||
}
|
||||
|
||||
logrus.Info("Starting Reloader")
|
||||
isGlobal := false
|
||||
currentNamespace := os.Getenv("KUBERNETES_NAMESPACE")
|
||||
if len(currentNamespace) == 0 {
|
||||
currentNamespace = v1.NamespaceAll
|
||||
isGlobal = true
|
||||
logrus.Warnf("KUBERNETES_NAMESPACE is unset, will detect changes in all namespaces.")
|
||||
}
|
||||
|
||||
@@ -140,22 +124,22 @@ func startReloader(cmd *cobra.Command, args []string) {
|
||||
logrus.Fatal(err)
|
||||
}
|
||||
|
||||
ignoredResourcesList, err := getIgnoredResourcesList(cmd)
|
||||
ignoredResourcesList, err := util.GetIgnoredResourcesList()
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
}
|
||||
|
||||
ignoredNamespacesList, err := getIgnoredNamespacesList(cmd)
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
ignoredNamespacesList := options.NamespacesToIgnore
|
||||
namespaceLabelSelector := ""
|
||||
|
||||
if isGlobal {
|
||||
namespaceLabelSelector, err = common.GetNamespaceLabelSelector(options.NamespaceSelectors)
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
namespaceLabelSelector, err := getNamespaceLabelSelector(cmd)
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
}
|
||||
|
||||
resourceLabelSelector, err := getResourceLabelSelector(cmd)
|
||||
resourceLabelSelector, err := common.GetResourceLabelSelector(options.ResourceSelectors)
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
}
|
||||
@@ -207,107 +191,19 @@ func startReloader(cmd *cobra.Command, args []string) {
|
||||
go leadership.RunLeaderElection(lock, ctx, cancel, podName, controllers)
|
||||
}
|
||||
|
||||
common.PublishMetaInfoConfigmap(clientset)
|
||||
|
||||
if options.EnablePProf {
|
||||
go startPProfServer()
|
||||
}
|
||||
|
||||
leadership.SetupLivenessEndpoint()
|
||||
logrus.Fatal(http.ListenAndServe(constants.DefaultHttpListenAddr, nil))
|
||||
}
|
||||
|
||||
func getIgnoredNamespacesList(cmd *cobra.Command) (util.List, error) {
|
||||
return getStringSliceFromFlags(cmd, "namespaces-to-ignore")
|
||||
}
|
||||
|
||||
func getNamespaceLabelSelector(cmd *cobra.Command) (string, error) {
|
||||
slice, err := getStringSliceFromFlags(cmd, "namespace-selector")
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
}
|
||||
|
||||
for i, kv := range slice {
|
||||
// Legacy support for ":" as a delimiter and "*" for wildcard.
|
||||
if strings.Contains(kv, ":") {
|
||||
split := strings.Split(kv, ":")
|
||||
if split[1] == "*" {
|
||||
slice[i] = split[0]
|
||||
} else {
|
||||
slice[i] = split[0] + "=" + split[1]
|
||||
}
|
||||
}
|
||||
// Convert wildcard to valid apimachinery operator
|
||||
if strings.Contains(kv, "=") {
|
||||
split := strings.Split(kv, "=")
|
||||
if split[1] == "*" {
|
||||
slice[i] = split[0]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
namespaceLabelSelector := strings.Join(slice[:], ",")
|
||||
_, err = labels.Parse(namespaceLabelSelector)
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
}
|
||||
|
||||
return namespaceLabelSelector, nil
|
||||
}
|
||||
|
||||
func getResourceLabelSelector(cmd *cobra.Command) (string, error) {
|
||||
slice, err := getStringSliceFromFlags(cmd, "resource-label-selector")
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
}
|
||||
|
||||
for i, kv := range slice {
|
||||
// Legacy support for ":" as a delimiter and "*" for wildcard.
|
||||
if strings.Contains(kv, ":") {
|
||||
split := strings.Split(kv, ":")
|
||||
if split[1] == "*" {
|
||||
slice[i] = split[0]
|
||||
} else {
|
||||
slice[i] = split[0] + "=" + split[1]
|
||||
}
|
||||
}
|
||||
// Convert wildcard to valid apimachinery operator
|
||||
if strings.Contains(kv, "=") {
|
||||
split := strings.Split(kv, "=")
|
||||
if split[1] == "*" {
|
||||
slice[i] = split[0]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
resourceLabelSelector := strings.Join(slice[:], ",")
|
||||
_, err = labels.Parse(resourceLabelSelector)
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
}
|
||||
|
||||
return resourceLabelSelector, nil
|
||||
}
|
||||
|
||||
func getStringSliceFromFlags(cmd *cobra.Command, flag string) ([]string, error) {
|
||||
slice, err := cmd.Flags().GetStringSlice(flag)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return slice, nil
|
||||
}
|
||||
|
||||
func getIgnoredResourcesList(cmd *cobra.Command) (util.List, error) {
|
||||
|
||||
ignoredResourcesList, err := getStringSliceFromFlags(cmd, "resources-to-ignore")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, v := range ignoredResourcesList {
|
||||
if v != "configMaps" && v != "secrets" {
|
||||
return nil, fmt.Errorf("'resources-to-ignore' only accepts 'configMaps' or 'secrets', not '%s'", v)
|
||||
}
|
||||
}
|
||||
|
||||
if len(ignoredResourcesList) > 1 {
|
||||
return nil, errors.New("'resources-to-ignore' only accepts 'configMaps' or 'secrets', not both")
|
||||
}
|
||||
|
||||
return ignoredResourcesList, nil
|
||||
func startPProfServer() {
|
||||
logrus.Infof("Starting pprof server on %s", options.PProfAddr)
|
||||
if err := http.ListenAndServe(options.PProfAddr, nil); err != nil {
|
||||
logrus.Errorf("Failed to start pprof server: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -15,6 +15,7 @@ import (
|
||||
"github.com/stakater/Reloader/internal/pkg/options"
|
||||
"github.com/stakater/Reloader/internal/pkg/testutil"
|
||||
"github.com/stakater/Reloader/internal/pkg/util"
|
||||
"github.com/stakater/Reloader/pkg/common"
|
||||
"github.com/stakater/Reloader/pkg/kube"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
@@ -68,64 +69,6 @@ func TestMain(m *testing.M) {
|
||||
os.Exit(retCode)
|
||||
}
|
||||
|
||||
// Perform rolling upgrade on deploymentConfig and create pod annotation var upon updating the configmap
|
||||
func TestControllerUpdatingConfigmapShouldCreatePodAnnotationInDeploymentConfig(t *testing.T) {
|
||||
options.ReloadStrategy = constants.AnnotationsReloadStrategy
|
||||
|
||||
// Don't run test on non-openshift environment
|
||||
if !kube.IsOpenshift {
|
||||
return
|
||||
}
|
||||
|
||||
// Creating configmap
|
||||
configmapName := configmapNamePrefix + "-update-" + testutil.RandSeq(5)
|
||||
configmapClient, err := testutil.CreateConfigMap(clients.KubernetesClient, namespace, configmapName, "www.google.com")
|
||||
if err != nil {
|
||||
t.Errorf("Error while creating the configmap %v", err)
|
||||
}
|
||||
|
||||
// Creating deployment
|
||||
_, err = testutil.CreateDeploymentConfig(clients.OpenshiftAppsClient, configmapName, namespace, true)
|
||||
if err != nil {
|
||||
t.Errorf("Error in deploymentConfig creation: %v", err)
|
||||
}
|
||||
|
||||
// Updating configmap for first time
|
||||
updateErr := testutil.UpdateConfigMap(configmapClient, namespace, configmapName, "", "www.stakater.com")
|
||||
if updateErr != nil {
|
||||
t.Errorf("Configmap was not updated")
|
||||
}
|
||||
|
||||
// Verifying deployment update
|
||||
logrus.Infof("Verifying pod annotation has been created")
|
||||
shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapName, "www.stakater.com")
|
||||
config := util.Config{
|
||||
Namespace: namespace,
|
||||
ResourceName: configmapName,
|
||||
SHAValue: shaData,
|
||||
Annotation: options.ConfigmapUpdateOnChangeAnnotation,
|
||||
}
|
||||
deploymentConfigFuncs := handler.GetDeploymentConfigRollingUpgradeFuncs()
|
||||
updated := testutil.VerifyResourceAnnotationUpdate(clients, config, deploymentConfigFuncs)
|
||||
if !updated {
|
||||
t.Errorf("DeploymentConfig was not updated")
|
||||
}
|
||||
time.Sleep(sleepDuration)
|
||||
|
||||
// Deleting deployment
|
||||
err = testutil.DeleteDeploymentConfig(clients.OpenshiftAppsClient, namespace, configmapName)
|
||||
if err != nil {
|
||||
logrus.Errorf("Error while deleting the deploymentConfig %v", err)
|
||||
}
|
||||
|
||||
// Deleting configmap
|
||||
err = testutil.DeleteConfigMap(clients.KubernetesClient, namespace, configmapName)
|
||||
if err != nil {
|
||||
logrus.Errorf("Error while deleting the configmap %v", err)
|
||||
}
|
||||
time.Sleep(sleepDuration)
|
||||
}
|
||||
|
||||
// Perform rolling upgrade on deployment and create pod annotation var upon updating the configmap
|
||||
func TestControllerUpdatingConfigmapShouldCreatePodAnnotationInDeployment(t *testing.T) {
|
||||
options.ReloadStrategy = constants.AnnotationsReloadStrategy
|
||||
@@ -152,7 +95,7 @@ func TestControllerUpdatingConfigmapShouldCreatePodAnnotationInDeployment(t *tes
|
||||
// Verifying deployment update
|
||||
logrus.Infof("Verifying pod annotation has been created")
|
||||
shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapName, "www.stakater.com")
|
||||
config := util.Config{
|
||||
config := common.Config{
|
||||
Namespace: namespace,
|
||||
ResourceName: configmapName,
|
||||
SHAValue: shaData,
|
||||
@@ -205,7 +148,7 @@ func TestControllerUpdatingConfigmapShouldAutoCreatePodAnnotationInDeployment(t
|
||||
// Verifying deployment update
|
||||
logrus.Infof("Verifying pod annotation has been created")
|
||||
shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapName, "www.stakater.com")
|
||||
config := util.Config{
|
||||
config := common.Config{
|
||||
Namespace: namespace,
|
||||
ResourceName: configmapName,
|
||||
SHAValue: shaData,
|
||||
@@ -270,7 +213,7 @@ func TestControllerCreatingConfigmapShouldCreatePodAnnotationInDeployment(t *tes
|
||||
// Verifying deployment update
|
||||
logrus.Infof("Verifying pod annotation has been created")
|
||||
shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapName, "www.stakater.com")
|
||||
config := util.Config{
|
||||
config := common.Config{
|
||||
Namespace: namespace,
|
||||
ResourceName: configmapName,
|
||||
SHAValue: shaData,
|
||||
@@ -329,7 +272,7 @@ func TestControllerForUpdatingConfigmapShouldUpdateDeploymentUsingArs(t *testing
|
||||
// Verifying deployment update
|
||||
logrus.Infof("Verifying pod annotation has been updated")
|
||||
shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapName, "aurorasolutions.io")
|
||||
config := util.Config{
|
||||
config := common.Config{
|
||||
Namespace: namespace,
|
||||
ResourceName: configmapName,
|
||||
SHAValue: shaData,
|
||||
@@ -384,7 +327,7 @@ func TestControllerUpdatingConfigmapLabelsShouldNotCreateOrCreatePodAnnotationIn
|
||||
// Verifying deployment update
|
||||
logrus.Infof("Verifying pod annotation has been created")
|
||||
shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapName, "www.google.com")
|
||||
config := util.Config{
|
||||
config := common.Config{
|
||||
Namespace: namespace,
|
||||
ResourceName: configmapName,
|
||||
SHAValue: shaData,
|
||||
@@ -448,7 +391,7 @@ func TestControllerCreatingSecretShouldCreatePodAnnotationInDeployment(t *testin
|
||||
// Verifying Upgrade
|
||||
logrus.Infof("Verifying pod annotation has been created")
|
||||
shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, namespace, secretName, newData)
|
||||
config := util.Config{
|
||||
config := common.Config{
|
||||
Namespace: namespace,
|
||||
ResourceName: secretName,
|
||||
SHAValue: shaData,
|
||||
@@ -501,7 +444,7 @@ func TestControllerUpdatingSecretShouldCreatePodAnnotationInDeployment(t *testin
|
||||
// Verifying Upgrade
|
||||
logrus.Infof("Verifying pod annotation has been created")
|
||||
shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, namespace, secretName, newData)
|
||||
config := util.Config{
|
||||
config := common.Config{
|
||||
Namespace: namespace,
|
||||
ResourceName: secretName,
|
||||
SHAValue: shaData,
|
||||
@@ -559,7 +502,7 @@ func TestControllerUpdatingSecretShouldUpdatePodAnnotationInDeployment(t *testin
|
||||
// Verifying Upgrade
|
||||
logrus.Infof("Verifying pod annotation has been updated")
|
||||
shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, namespace, secretName, updatedData)
|
||||
config := util.Config{
|
||||
config := common.Config{
|
||||
Namespace: namespace,
|
||||
ResourceName: secretName,
|
||||
SHAValue: shaData,
|
||||
@@ -610,7 +553,7 @@ func TestControllerUpdatingSecretLabelsShouldNotCreateOrUpdatePodAnnotationInDep
|
||||
// Verifying Upgrade
|
||||
logrus.Infof("Verifying pod annotation has been created")
|
||||
shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, namespace, secretName, data)
|
||||
config := util.Config{
|
||||
config := common.Config{
|
||||
Namespace: namespace,
|
||||
ResourceName: secretName,
|
||||
SHAValue: shaData,
|
||||
@@ -662,7 +605,7 @@ func TestControllerUpdatingConfigmapShouldCreatePodAnnotationInDaemonSet(t *test
|
||||
// Verifying DaemonSet update
|
||||
logrus.Infof("Verifying pod annotation has been created")
|
||||
shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapName, "www.stakater.com")
|
||||
config := util.Config{
|
||||
config := common.Config{
|
||||
Namespace: namespace,
|
||||
ResourceName: configmapName,
|
||||
SHAValue: shaData,
|
||||
@@ -725,7 +668,7 @@ func TestControllerForUpdatingConfigmapShouldUpdateDaemonSetUsingArs(t *testing.
|
||||
// Verifying DaemonSet update
|
||||
logrus.Infof("Verifying pod annotation has been updated")
|
||||
shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapName, "aurorasolutions.io")
|
||||
config := util.Config{
|
||||
config := common.Config{
|
||||
Namespace: namespace,
|
||||
ResourceName: configmapName,
|
||||
SHAValue: shaData,
|
||||
@@ -778,7 +721,7 @@ func TestControllerUpdatingSecretShouldCreatePodAnnotationInDaemonSet(t *testing
|
||||
// Verifying Upgrade
|
||||
logrus.Infof("Verifying pod annotation has been created")
|
||||
shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, namespace, secretName, newData)
|
||||
config := util.Config{
|
||||
config := common.Config{
|
||||
Namespace: namespace,
|
||||
ResourceName: secretName,
|
||||
SHAValue: shaData,
|
||||
@@ -837,7 +780,7 @@ func TestControllerUpdatingSecretShouldUpdatePodAnnotationInDaemonSet(t *testing
|
||||
// Verifying Upgrade
|
||||
logrus.Infof("Verifying pod annotation has been updated")
|
||||
shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, namespace, secretName, updatedData)
|
||||
config := util.Config{
|
||||
config := common.Config{
|
||||
Namespace: namespace,
|
||||
ResourceName: secretName,
|
||||
SHAValue: shaData,
|
||||
@@ -888,7 +831,7 @@ func TestControllerUpdatingSecretLabelsShouldNotCreateOrUpdatePodAnnotationInDae
|
||||
// Verifying Upgrade
|
||||
logrus.Infof("Verifying pod annotation has been created")
|
||||
shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, namespace, secretName, data)
|
||||
config := util.Config{
|
||||
config := common.Config{
|
||||
Namespace: namespace,
|
||||
ResourceName: secretName,
|
||||
SHAValue: shaData,
|
||||
@@ -940,7 +883,7 @@ func TestControllerUpdatingConfigmapShouldCreatePodAnnotationInStatefulSet(t *te
|
||||
// Verifying StatefulSet update
|
||||
logrus.Infof("Verifying pod annotation has been created")
|
||||
shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapName, "www.stakater.com")
|
||||
config := util.Config{
|
||||
config := common.Config{
|
||||
Namespace: namespace,
|
||||
ResourceName: configmapName,
|
||||
SHAValue: shaData,
|
||||
@@ -999,7 +942,7 @@ func TestControllerForUpdatingConfigmapShouldUpdateStatefulSetUsingArs(t *testin
|
||||
// Verifying StatefulSet update
|
||||
logrus.Infof("Verifying pod annotation has been updated")
|
||||
shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapName, "aurorasolutions.io")
|
||||
config := util.Config{
|
||||
config := common.Config{
|
||||
Namespace: namespace,
|
||||
ResourceName: configmapName,
|
||||
SHAValue: shaData,
|
||||
@@ -1052,7 +995,7 @@ func TestControllerUpdatingSecretShouldCreatePodAnnotationInStatefulSet(t *testi
|
||||
// Verifying Upgrade
|
||||
logrus.Infof("Verifying pod annotation has been created")
|
||||
shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, namespace, secretName, newData)
|
||||
config := util.Config{
|
||||
config := common.Config{
|
||||
Namespace: namespace,
|
||||
ResourceName: secretName,
|
||||
SHAValue: shaData,
|
||||
@@ -1078,64 +1021,6 @@ func TestControllerUpdatingSecretShouldCreatePodAnnotationInStatefulSet(t *testi
|
||||
time.Sleep(sleepDuration)
|
||||
}
|
||||
|
||||
// Perform rolling upgrade on deploymentConfig and create env var upon updating the configmap
|
||||
func TestControllerUpdatingConfigmapShouldCreateEnvInDeploymentConfig(t *testing.T) {
|
||||
options.ReloadStrategy = constants.EnvVarsReloadStrategy
|
||||
|
||||
// Don't run test on non-openshift environment
|
||||
if !kube.IsOpenshift {
|
||||
return
|
||||
}
|
||||
|
||||
// Creating configmap
|
||||
configmapName := configmapNamePrefix + "-update-" + testutil.RandSeq(5)
|
||||
configmapClient, err := testutil.CreateConfigMap(clients.KubernetesClient, namespace, configmapName, "www.google.com")
|
||||
if err != nil {
|
||||
t.Errorf("Error while creating the configmap %v", err)
|
||||
}
|
||||
|
||||
// Creating deployment
|
||||
_, err = testutil.CreateDeploymentConfig(clients.OpenshiftAppsClient, configmapName, namespace, true)
|
||||
if err != nil {
|
||||
t.Errorf("Error in deploymentConfig creation: %v", err)
|
||||
}
|
||||
|
||||
// Updating configmap for first time
|
||||
updateErr := testutil.UpdateConfigMap(configmapClient, namespace, configmapName, "", "www.stakater.com")
|
||||
if updateErr != nil {
|
||||
t.Errorf("Configmap was not updated")
|
||||
}
|
||||
|
||||
// Verifying deployment update
|
||||
logrus.Infof("Verifying env var has been created")
|
||||
shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapName, "www.stakater.com")
|
||||
config := util.Config{
|
||||
Namespace: namespace,
|
||||
ResourceName: configmapName,
|
||||
SHAValue: shaData,
|
||||
Annotation: options.ConfigmapUpdateOnChangeAnnotation,
|
||||
}
|
||||
deploymentConfigFuncs := handler.GetDeploymentConfigRollingUpgradeFuncs()
|
||||
updated := testutil.VerifyResourceEnvVarUpdate(clients, config, constants.ConfigmapEnvVarPostfix, deploymentConfigFuncs)
|
||||
if !updated {
|
||||
t.Errorf("DeploymentConfig was not updated")
|
||||
}
|
||||
time.Sleep(sleepDuration)
|
||||
|
||||
// Deleting deployment
|
||||
err = testutil.DeleteDeploymentConfig(clients.OpenshiftAppsClient, namespace, configmapName)
|
||||
if err != nil {
|
||||
logrus.Errorf("Error while deleting the deploymentConfig %v", err)
|
||||
}
|
||||
|
||||
// Deleting configmap
|
||||
err = testutil.DeleteConfigMap(clients.KubernetesClient, namespace, configmapName)
|
||||
if err != nil {
|
||||
logrus.Errorf("Error while deleting the configmap %v", err)
|
||||
}
|
||||
time.Sleep(sleepDuration)
|
||||
}
|
||||
|
||||
// Perform rolling upgrade on deployment and create env var upon updating the configmap
|
||||
func TestControllerUpdatingConfigmapShouldCreateEnvInDeployment(t *testing.T) {
|
||||
options.ReloadStrategy = constants.EnvVarsReloadStrategy
|
||||
@@ -1162,7 +1047,7 @@ func TestControllerUpdatingConfigmapShouldCreateEnvInDeployment(t *testing.T) {
|
||||
// Verifying deployment update
|
||||
logrus.Infof("Verifying env var has been created")
|
||||
shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapName, "www.stakater.com")
|
||||
config := util.Config{
|
||||
config := common.Config{
|
||||
Namespace: namespace,
|
||||
ResourceName: configmapName,
|
||||
SHAValue: shaData,
|
||||
@@ -1215,7 +1100,7 @@ func TestControllerUpdatingConfigmapShouldAutoCreateEnvInDeployment(t *testing.T
|
||||
// Verifying deployment update
|
||||
logrus.Infof("Verifying env var has been created")
|
||||
shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapName, "www.stakater.com")
|
||||
config := util.Config{
|
||||
config := common.Config{
|
||||
Namespace: namespace,
|
||||
ResourceName: configmapName,
|
||||
SHAValue: shaData,
|
||||
@@ -1280,7 +1165,7 @@ func TestControllerCreatingConfigmapShouldCreateEnvInDeployment(t *testing.T) {
|
||||
// Verifying deployment update
|
||||
logrus.Infof("Verifying env var has been created")
|
||||
shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapName, "www.stakater.com")
|
||||
config := util.Config{
|
||||
config := common.Config{
|
||||
Namespace: namespace,
|
||||
ResourceName: configmapName,
|
||||
SHAValue: shaData,
|
||||
@@ -1339,7 +1224,7 @@ func TestControllerForUpdatingConfigmapShouldUpdateDeploymentUsingErs(t *testing
|
||||
// Verifying deployment update
|
||||
logrus.Infof("Verifying env var has been updated")
|
||||
shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapName, "aurorasolutions.io")
|
||||
config := util.Config{
|
||||
config := common.Config{
|
||||
Namespace: namespace,
|
||||
ResourceName: configmapName,
|
||||
SHAValue: shaData,
|
||||
@@ -1394,7 +1279,7 @@ func TestControllerUpdatingConfigmapLabelsShouldNotCreateOrUpdateEnvInDeployment
|
||||
// Verifying deployment update
|
||||
logrus.Infof("Verifying env var has been created")
|
||||
shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapName, "www.google.com")
|
||||
config := util.Config{
|
||||
config := common.Config{
|
||||
Namespace: namespace,
|
||||
ResourceName: configmapName,
|
||||
SHAValue: shaData,
|
||||
@@ -1458,7 +1343,7 @@ func TestControllerCreatingSecretShouldCreateEnvInDeployment(t *testing.T) {
|
||||
// Verifying Upgrade
|
||||
logrus.Infof("Verifying env var has been created")
|
||||
shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, namespace, secretName, newData)
|
||||
config := util.Config{
|
||||
config := common.Config{
|
||||
Namespace: namespace,
|
||||
ResourceName: secretName,
|
||||
SHAValue: shaData,
|
||||
@@ -1511,7 +1396,7 @@ func TestControllerUpdatingSecretShouldCreateEnvInDeployment(t *testing.T) {
|
||||
// Verifying Upgrade
|
||||
logrus.Infof("Verifying env var has been created")
|
||||
shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, namespace, secretName, newData)
|
||||
config := util.Config{
|
||||
config := common.Config{
|
||||
Namespace: namespace,
|
||||
ResourceName: secretName,
|
||||
SHAValue: shaData,
|
||||
@@ -1569,7 +1454,7 @@ func TestControllerUpdatingSecretShouldUpdateEnvInDeployment(t *testing.T) {
|
||||
// Verifying Upgrade
|
||||
logrus.Infof("Verifying env var has been updated")
|
||||
shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, namespace, secretName, updatedData)
|
||||
config := util.Config{
|
||||
config := common.Config{
|
||||
Namespace: namespace,
|
||||
ResourceName: secretName,
|
||||
SHAValue: shaData,
|
||||
@@ -1620,7 +1505,7 @@ func TestControllerUpdatingSecretLabelsShouldNotCreateOrUpdateEnvInDeployment(t
|
||||
// Verifying Upgrade
|
||||
logrus.Infof("Verifying env var has been created")
|
||||
shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, namespace, secretName, data)
|
||||
config := util.Config{
|
||||
config := common.Config{
|
||||
Namespace: namespace,
|
||||
ResourceName: secretName,
|
||||
SHAValue: shaData,
|
||||
@@ -1672,7 +1557,7 @@ func TestControllerUpdatingConfigmapShouldCreateEnvInDaemonSet(t *testing.T) {
|
||||
// Verifying DaemonSet update
|
||||
logrus.Infof("Verifying env var has been created")
|
||||
shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapName, "www.stakater.com")
|
||||
config := util.Config{
|
||||
config := common.Config{
|
||||
Namespace: namespace,
|
||||
ResourceName: configmapName,
|
||||
SHAValue: shaData,
|
||||
@@ -1735,7 +1620,7 @@ func TestControllerForUpdatingConfigmapShouldUpdateDaemonSetUsingErs(t *testing.
|
||||
// Verifying DaemonSet update
|
||||
logrus.Infof("Verifying env var has been updated")
|
||||
shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapName, "aurorasolutions.io")
|
||||
config := util.Config{
|
||||
config := common.Config{
|
||||
Namespace: namespace,
|
||||
ResourceName: configmapName,
|
||||
SHAValue: shaData,
|
||||
@@ -1788,7 +1673,7 @@ func TestControllerUpdatingSecretShouldCreateEnvInDaemonSet(t *testing.T) {
|
||||
// Verifying Upgrade
|
||||
logrus.Infof("Verifying env var has been created")
|
||||
shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, namespace, secretName, newData)
|
||||
config := util.Config{
|
||||
config := common.Config{
|
||||
Namespace: namespace,
|
||||
ResourceName: secretName,
|
||||
SHAValue: shaData,
|
||||
@@ -1847,7 +1732,7 @@ func TestControllerUpdatingSecretShouldUpdateEnvInDaemonSet(t *testing.T) {
|
||||
// Verifying Upgrade
|
||||
logrus.Infof("Verifying env var has been updated")
|
||||
shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, namespace, secretName, updatedData)
|
||||
config := util.Config{
|
||||
config := common.Config{
|
||||
Namespace: namespace,
|
||||
ResourceName: secretName,
|
||||
SHAValue: shaData,
|
||||
@@ -1898,7 +1783,7 @@ func TestControllerUpdatingSecretLabelsShouldNotCreateOrUpdateEnvInDaemonSet(t *
|
||||
// Verifying Upgrade
|
||||
logrus.Infof("Verifying env var has been created")
|
||||
shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, namespace, secretName, data)
|
||||
config := util.Config{
|
||||
config := common.Config{
|
||||
Namespace: namespace,
|
||||
ResourceName: secretName,
|
||||
SHAValue: shaData,
|
||||
@@ -1950,7 +1835,7 @@ func TestControllerUpdatingConfigmapShouldCreateEnvInStatefulSet(t *testing.T) {
|
||||
// Verifying StatefulSet update
|
||||
logrus.Infof("Verifying env var has been created")
|
||||
shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapName, "www.stakater.com")
|
||||
config := util.Config{
|
||||
config := common.Config{
|
||||
Namespace: namespace,
|
||||
ResourceName: configmapName,
|
||||
SHAValue: shaData,
|
||||
@@ -2009,7 +1894,7 @@ func TestControllerForUpdatingConfigmapShouldUpdateStatefulSetUsingErs(t *testin
|
||||
// Verifying StatefulSet update
|
||||
logrus.Infof("Verifying env var has been updated")
|
||||
shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapName, "aurorasolutions.io")
|
||||
config := util.Config{
|
||||
config := common.Config{
|
||||
Namespace: namespace,
|
||||
ResourceName: configmapName,
|
||||
SHAValue: shaData,
|
||||
@@ -2062,7 +1947,7 @@ func TestControllerUpdatingSecretShouldCreateEnvInStatefulSet(t *testing.T) {
|
||||
// Verifying Upgrade
|
||||
logrus.Infof("Verifying env var has been created")
|
||||
shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, namespace, secretName, newData)
|
||||
config := util.Config{
|
||||
config := common.Config{
|
||||
Namespace: namespace,
|
||||
ResourceName: secretName,
|
||||
SHAValue: shaData,
|
||||
@@ -2120,7 +2005,7 @@ func TestControllerUpdatingSecretShouldUpdateEnvInStatefulSet(t *testing.T) {
|
||||
// Verifying Upgrade
|
||||
logrus.Infof("Verifying env var has been updated")
|
||||
shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, namespace, secretName, updatedData)
|
||||
config := util.Config{
|
||||
config := common.Config{
|
||||
Namespace: namespace,
|
||||
ResourceName: secretName,
|
||||
SHAValue: shaData,
|
||||
@@ -2178,7 +2063,7 @@ func TestControllerUpdatingSecretShouldUpdatePodAnnotationInStatefulSet(t *testi
|
||||
// Verifying Upgrade
|
||||
logrus.Infof("Verifying pod annotation has been updated")
|
||||
shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, namespace, secretName, updatedData)
|
||||
config := util.Config{
|
||||
config := common.Config{
|
||||
Namespace: namespace,
|
||||
ResourceName: secretName,
|
||||
SHAValue: shaData,
|
||||
|
||||
@@ -4,7 +4,7 @@ import (
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/stakater/Reloader/internal/pkg/metrics"
|
||||
"github.com/stakater/Reloader/internal/pkg/options"
|
||||
"github.com/stakater/Reloader/internal/pkg/util"
|
||||
"github.com/stakater/Reloader/pkg/common"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/client-go/tools/record"
|
||||
)
|
||||
@@ -33,13 +33,13 @@ func (r ResourceCreatedHandler) Handle() error {
|
||||
}
|
||||
|
||||
// GetConfig gets configurations containing SHA, annotations, namespace and resource name
|
||||
func (r ResourceCreatedHandler) GetConfig() (util.Config, string) {
|
||||
func (r ResourceCreatedHandler) GetConfig() (common.Config, string) {
|
||||
var oldSHAData string
|
||||
var config util.Config
|
||||
var config common.Config
|
||||
if _, ok := r.Resource.(*v1.ConfigMap); ok {
|
||||
config = util.GetConfigmapConfig(r.Resource.(*v1.ConfigMap))
|
||||
config = common.GetConfigmapConfig(r.Resource.(*v1.ConfigMap))
|
||||
} else if _, ok := r.Resource.(*v1.Secret); ok {
|
||||
config = util.GetSecretConfig(r.Resource.(*v1.Secret))
|
||||
config = common.GetSecretConfig(r.Resource.(*v1.Secret))
|
||||
} else {
|
||||
logrus.Warnf("Invalid resource: Resource should be 'Secret' or 'Configmap' but found, %v", r.Resource)
|
||||
}
|
||||
|
||||
@@ -1,15 +1,20 @@
|
||||
package handler
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"slices"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/stakater/Reloader/internal/pkg/callbacks"
|
||||
"github.com/stakater/Reloader/internal/pkg/constants"
|
||||
"github.com/stakater/Reloader/internal/pkg/metrics"
|
||||
"github.com/stakater/Reloader/internal/pkg/options"
|
||||
"github.com/stakater/Reloader/internal/pkg/testutil"
|
||||
"github.com/stakater/Reloader/internal/pkg/util"
|
||||
"github.com/stakater/Reloader/pkg/common"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
patchtypes "k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/client-go/tools/record"
|
||||
)
|
||||
|
||||
@@ -37,20 +42,20 @@ func (r ResourceDeleteHandler) Handle() error {
|
||||
}
|
||||
|
||||
// GetConfig gets configurations containing SHA, annotations, namespace and resource name
|
||||
func (r ResourceDeleteHandler) GetConfig() (util.Config, string) {
|
||||
func (r ResourceDeleteHandler) GetConfig() (common.Config, string) {
|
||||
var oldSHAData string
|
||||
var config util.Config
|
||||
var config common.Config
|
||||
if _, ok := r.Resource.(*v1.ConfigMap); ok {
|
||||
config = util.GetConfigmapConfig(r.Resource.(*v1.ConfigMap))
|
||||
config = common.GetConfigmapConfig(r.Resource.(*v1.ConfigMap))
|
||||
} else if _, ok := r.Resource.(*v1.Secret); ok {
|
||||
config = util.GetSecretConfig(r.Resource.(*v1.Secret))
|
||||
config = common.GetSecretConfig(r.Resource.(*v1.Secret))
|
||||
} else {
|
||||
logrus.Warnf("Invalid resource: Resource should be 'Secret' or 'Configmap' but found, %v", r.Resource)
|
||||
}
|
||||
return config, oldSHAData
|
||||
}
|
||||
|
||||
func invokeDeleteStrategy(upgradeFuncs callbacks.RollingUpgradeFuncs, item runtime.Object, config util.Config, autoReload bool) constants.Result {
|
||||
func invokeDeleteStrategy(upgradeFuncs callbacks.RollingUpgradeFuncs, item runtime.Object, config common.Config, autoReload bool) InvokeStrategyResult {
|
||||
if options.ReloadStrategy == constants.AnnotationsReloadStrategy {
|
||||
return removePodAnnotations(upgradeFuncs, item, config, autoReload)
|
||||
}
|
||||
@@ -58,35 +63,38 @@ func invokeDeleteStrategy(upgradeFuncs callbacks.RollingUpgradeFuncs, item runti
|
||||
return removeContainerEnvVars(upgradeFuncs, item, config, autoReload)
|
||||
}
|
||||
|
||||
func removePodAnnotations(upgradeFuncs callbacks.RollingUpgradeFuncs, item runtime.Object, config util.Config, autoReload bool) constants.Result {
|
||||
func removePodAnnotations(upgradeFuncs callbacks.RollingUpgradeFuncs, item runtime.Object, config common.Config, autoReload bool) InvokeStrategyResult {
|
||||
config.SHAValue = testutil.GetSHAfromEmptyData()
|
||||
return updatePodAnnotations(upgradeFuncs, item, config, autoReload)
|
||||
}
|
||||
|
||||
func removeContainerEnvVars(upgradeFuncs callbacks.RollingUpgradeFuncs, item runtime.Object, config util.Config, autoReload bool) constants.Result {
|
||||
func removeContainerEnvVars(upgradeFuncs callbacks.RollingUpgradeFuncs, item runtime.Object, config common.Config, autoReload bool) InvokeStrategyResult {
|
||||
envVar := getEnvVarName(config.ResourceName, config.Type)
|
||||
container := getContainerUsingResource(upgradeFuncs, item, config, autoReload)
|
||||
|
||||
if container == nil {
|
||||
return constants.NoContainerFound
|
||||
return InvokeStrategyResult{constants.NoContainerFound, nil}
|
||||
}
|
||||
|
||||
//remove if env var exists
|
||||
containers := upgradeFuncs.ContainersFunc(item)
|
||||
for i := range containers {
|
||||
envs := containers[i].Env
|
||||
index := -1
|
||||
for j := range envs {
|
||||
if envs[j].Name == envVar {
|
||||
index = j
|
||||
break
|
||||
}
|
||||
}
|
||||
if len(container.Env) > 0 {
|
||||
index := slices.IndexFunc(container.Env, func(envVariable v1.EnvVar) bool {
|
||||
return envVariable.Name == envVar
|
||||
})
|
||||
if index != -1 {
|
||||
containers[i].Env = append(containers[i].Env[:index], containers[i].Env[index+1:]...)
|
||||
return constants.Updated
|
||||
var patch []byte
|
||||
if upgradeFuncs.SupportsPatch {
|
||||
containers := upgradeFuncs.ContainersFunc(item)
|
||||
containerIndex := slices.IndexFunc(containers, func(c v1.Container) bool {
|
||||
return c.Name == container.Name
|
||||
})
|
||||
patch = fmt.Appendf(nil, upgradeFuncs.PatchTemplatesFunc().DeleteEnvVarTemplate, containerIndex, index)
|
||||
}
|
||||
|
||||
container.Env = append(container.Env[:index], container.Env[index+1:]...)
|
||||
return InvokeStrategyResult{constants.Updated, &Patch{Type: patchtypes.JSONPatchType, Bytes: patch}}
|
||||
}
|
||||
}
|
||||
|
||||
return constants.NotUpdated
|
||||
return InvokeStrategyResult{constants.NotUpdated, nil}
|
||||
}
|
||||
|
||||
@@ -1,11 +1,9 @@
|
||||
package handler
|
||||
|
||||
import (
|
||||
"github.com/stakater/Reloader/internal/pkg/util"
|
||||
)
|
||||
import "github.com/stakater/Reloader/pkg/common"
|
||||
|
||||
// ResourceHandler handles the creation and update of resources
|
||||
type ResourceHandler interface {
|
||||
Handle() error
|
||||
GetConfig() (util.Config, string)
|
||||
GetConfig() (common.Config, string)
|
||||
}
|
||||
|
||||
242
internal/pkg/handler/pause_deployment.go
Normal file
242
internal/pkg/handler/pause_deployment.go
Normal file
@@ -0,0 +1,242 @@
|
||||
package handler
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/stakater/Reloader/internal/pkg/options"
|
||||
"github.com/stakater/Reloader/pkg/kube"
|
||||
app "k8s.io/api/apps/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
patchtypes "k8s.io/apimachinery/pkg/types"
|
||||
)
|
||||
|
||||
// Keeps track of currently active timers
|
||||
var activeTimers = make(map[string]*time.Timer)
|
||||
|
||||
// Returns unique key for the activeTimers map
|
||||
func getTimerKey(namespace, deploymentName string) string {
|
||||
return fmt.Sprintf("%s/%s", namespace, deploymentName)
|
||||
}
|
||||
|
||||
// Checks if a deployment is currently paused
|
||||
func IsPaused(deployment *app.Deployment) bool {
|
||||
return deployment.Spec.Paused
|
||||
}
|
||||
|
||||
// Deployment paused by reloader ?
|
||||
func IsPausedByReloader(deployment *app.Deployment) bool {
|
||||
if IsPaused(deployment) {
|
||||
pausedAtAnnotationValue := deployment.Annotations[options.PauseDeploymentTimeAnnotation]
|
||||
return pausedAtAnnotationValue != ""
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Returns the time, the deployment was paused by reloader, nil otherwise
|
||||
func GetPauseStartTime(deployment *app.Deployment) (*time.Time, error) {
|
||||
if !IsPausedByReloader(deployment) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
pausedAtStr := deployment.Annotations[options.PauseDeploymentTimeAnnotation]
|
||||
parsedTime, err := time.Parse(time.RFC3339, pausedAtStr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &parsedTime, nil
|
||||
}
|
||||
|
||||
// ParsePauseDuration parses the pause interval value and returns a time.Duration
|
||||
func ParsePauseDuration(pauseIntervalValue string) (time.Duration, error) {
|
||||
pauseDuration, err := time.ParseDuration(pauseIntervalValue)
|
||||
if err != nil {
|
||||
logrus.Warnf("Failed to parse pause interval value '%s': %v", pauseIntervalValue, err)
|
||||
return 0, err
|
||||
}
|
||||
return pauseDuration, nil
|
||||
}
|
||||
|
||||
// Pauses a deployment for a specified duration and creates a timer to resume it
|
||||
// after the specified duration
|
||||
func PauseDeployment(deployment *app.Deployment, clients kube.Clients, namespace, pauseIntervalValue string) (*app.Deployment, error) {
|
||||
deploymentName := deployment.Name
|
||||
pauseDuration, err := ParsePauseDuration(pauseIntervalValue)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if !IsPaused(deployment) {
|
||||
logrus.Infof("Pausing Deployment '%s' in namespace '%s' for %s", deploymentName, namespace, pauseDuration)
|
||||
|
||||
deploymentFuncs := GetDeploymentRollingUpgradeFuncs()
|
||||
|
||||
pausePatch, err := CreatePausePatch()
|
||||
if err != nil {
|
||||
logrus.Errorf("Failed to create pause patch for deployment '%s': %v", deploymentName, err)
|
||||
return deployment, err
|
||||
}
|
||||
|
||||
err = deploymentFuncs.PatchFunc(clients, namespace, deployment, patchtypes.StrategicMergePatchType, pausePatch)
|
||||
|
||||
if err != nil {
|
||||
logrus.Errorf("Failed to patch deployment '%s' in namespace '%s': %v", deploymentName, namespace, err)
|
||||
return deployment, err
|
||||
}
|
||||
|
||||
updatedDeployment, err := clients.KubernetesClient.AppsV1().Deployments(namespace).Get(context.TODO(), deploymentName, metav1.GetOptions{})
|
||||
|
||||
CreateResumeTimer(deployment, clients, namespace, pauseDuration)
|
||||
return updatedDeployment, err
|
||||
}
|
||||
|
||||
if !IsPausedByReloader(deployment) {
|
||||
logrus.Infof("Deployment '%s' in namespace '%s' already paused", deploymentName, namespace)
|
||||
return deployment, nil
|
||||
}
|
||||
|
||||
// Deployment has already been paused by reloader, check for timer
|
||||
logrus.Debugf("Deployment '%s' in namespace '%s' is already paused by reloader", deploymentName, namespace)
|
||||
|
||||
timerKey := getTimerKey(namespace, deploymentName)
|
||||
_, timerExists := activeTimers[timerKey]
|
||||
|
||||
if !timerExists {
|
||||
logrus.Warnf("Timer does not exist for already paused deployment '%s' in namespace '%s', creating new one",
|
||||
deploymentName, namespace)
|
||||
HandleMissingTimer(deployment, pauseDuration, clients, namespace)
|
||||
}
|
||||
return deployment, nil
|
||||
}
|
||||
|
||||
// Handles the case where missing timers for deployments that have been paused by reloader.
|
||||
// Could occur after new leader election or reloader restart
|
||||
func HandleMissingTimer(deployment *app.Deployment, pauseDuration time.Duration, clients kube.Clients, namespace string) {
|
||||
deploymentName := deployment.Name
|
||||
pauseStartTime, err := GetPauseStartTime(deployment)
|
||||
if err != nil {
|
||||
logrus.Errorf("Error parsing pause start time for deployment '%s' in namespace '%s': %v. Resuming deployment immediately",
|
||||
deploymentName, namespace, err)
|
||||
ResumeDeployment(deployment, namespace, clients)
|
||||
return
|
||||
}
|
||||
|
||||
if pauseStartTime == nil {
|
||||
return
|
||||
}
|
||||
|
||||
elapsedPauseTime := time.Since(*pauseStartTime)
|
||||
remainingPauseTime := pauseDuration - elapsedPauseTime
|
||||
|
||||
if remainingPauseTime <= 0 {
|
||||
logrus.Infof("Pause period for deployment '%s' in namespace '%s' has expired. Resuming immediately",
|
||||
deploymentName, namespace)
|
||||
ResumeDeployment(deployment, namespace, clients)
|
||||
return
|
||||
}
|
||||
|
||||
logrus.Infof("Creating missing timer for already paused deployment '%s' in namespace '%s' with remaining time %s",
|
||||
deploymentName, namespace, remainingPauseTime)
|
||||
CreateResumeTimer(deployment, clients, namespace, remainingPauseTime)
|
||||
}
|
||||
|
||||
// CreateResumeTimer creates a timer to resume the deployment after the specified duration
|
||||
func CreateResumeTimer(deployment *app.Deployment, clients kube.Clients, namespace string, pauseDuration time.Duration) {
|
||||
deploymentName := deployment.Name
|
||||
timerKey := getTimerKey(namespace, deployment.Name)
|
||||
|
||||
// Check if there's an existing timer for this deployment
|
||||
if _, exists := activeTimers[timerKey]; exists {
|
||||
logrus.Debugf("Timer already exists for deployment '%s' in namespace '%s', Skipping creation",
|
||||
deploymentName, namespace)
|
||||
return
|
||||
}
|
||||
|
||||
// Create and store the new timer
|
||||
timer := time.AfterFunc(pauseDuration, func() {
|
||||
ResumeDeployment(deployment, namespace, clients)
|
||||
})
|
||||
|
||||
// Add the new timer to the map
|
||||
activeTimers[timerKey] = timer
|
||||
|
||||
logrus.Debugf("Created pause timer for deployment '%s' in namespace '%s' with duration %s",
|
||||
deploymentName, namespace, pauseDuration)
|
||||
}
|
||||
|
||||
// ResumeDeployment resumes a deployment that has been paused by reloader
|
||||
func ResumeDeployment(deployment *app.Deployment, namespace string, clients kube.Clients) {
|
||||
deploymentName := deployment.Name
|
||||
|
||||
currentDeployment, err := clients.KubernetesClient.AppsV1().Deployments(namespace).Get(context.TODO(), deploymentName, metav1.GetOptions{})
|
||||
|
||||
if err != nil {
|
||||
logrus.Errorf("Failed to get deployment '%s' in namespace '%s': %v", deploymentName, namespace, err)
|
||||
return
|
||||
}
|
||||
|
||||
if !IsPausedByReloader(currentDeployment) {
|
||||
logrus.Infof("Deployment '%s' in namespace '%s' not paused by Reloader. Skipping resume", deploymentName, namespace)
|
||||
return
|
||||
}
|
||||
|
||||
deploymentFuncs := GetDeploymentRollingUpgradeFuncs()
|
||||
|
||||
resumePatch, err := CreateResumePatch()
|
||||
if err != nil {
|
||||
logrus.Errorf("Failed to create resume patch for deployment '%s': %v", deploymentName, err)
|
||||
return
|
||||
}
|
||||
|
||||
// Remove the timer
|
||||
timerKey := getTimerKey(namespace, deploymentName)
|
||||
if timer, exists := activeTimers[timerKey]; exists {
|
||||
timer.Stop()
|
||||
delete(activeTimers, timerKey)
|
||||
logrus.Debugf("Removed pause timer for deployment '%s' in namespace '%s'", deploymentName, namespace)
|
||||
}
|
||||
|
||||
err = deploymentFuncs.PatchFunc(clients, namespace, currentDeployment, patchtypes.StrategicMergePatchType, resumePatch)
|
||||
|
||||
if err != nil {
|
||||
logrus.Errorf("Failed to resume deployment '%s' in namespace '%s': %v", deploymentName, namespace, err)
|
||||
return
|
||||
}
|
||||
|
||||
logrus.Infof("Successfully resumed deployment '%s' in namespace '%s'", deploymentName, namespace)
|
||||
}
|
||||
|
||||
func CreatePausePatch() ([]byte, error) {
|
||||
patchData := map[string]interface{}{
|
||||
"spec": map[string]interface{}{
|
||||
"paused": true,
|
||||
},
|
||||
"metadata": map[string]interface{}{
|
||||
"annotations": map[string]string{
|
||||
options.PauseDeploymentTimeAnnotation: time.Now().Format(time.RFC3339),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
return json.Marshal(patchData)
|
||||
}
|
||||
|
||||
func CreateResumePatch() ([]byte, error) {
|
||||
patchData := map[string]interface{}{
|
||||
"spec": map[string]interface{}{
|
||||
"paused": false,
|
||||
},
|
||||
"metadata": map[string]interface{}{
|
||||
"annotations": map[string]interface{}{
|
||||
options.PauseDeploymentTimeAnnotation: nil,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
return json.Marshal(patchData)
|
||||
}
|
||||
392
internal/pkg/handler/pause_deployment_test.go
Normal file
392
internal/pkg/handler/pause_deployment_test.go
Normal file
@@ -0,0 +1,392 @@
|
||||
package handler
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stakater/Reloader/internal/pkg/options"
|
||||
"github.com/stakater/Reloader/pkg/kube"
|
||||
"github.com/stretchr/testify/assert"
|
||||
app "k8s.io/api/apps/v1"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
testclient "k8s.io/client-go/kubernetes/fake"
|
||||
)
|
||||
|
||||
func TestIsPaused(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
deployment *appsv1.Deployment
|
||||
paused bool
|
||||
}{
|
||||
{
|
||||
name: "paused deployment",
|
||||
deployment: &appsv1.Deployment{
|
||||
Spec: appsv1.DeploymentSpec{
|
||||
Paused: true,
|
||||
},
|
||||
},
|
||||
paused: true,
|
||||
},
|
||||
{
|
||||
name: "unpaused deployment",
|
||||
deployment: &appsv1.Deployment{
|
||||
Spec: appsv1.DeploymentSpec{
|
||||
Paused: false,
|
||||
},
|
||||
},
|
||||
paused: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
result := IsPaused(test.deployment)
|
||||
assert.Equal(t, test.paused, result)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestIsPausedByReloader(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
deployment *appsv1.Deployment
|
||||
pausedByReloader bool
|
||||
}{
|
||||
{
|
||||
name: "paused by reloader",
|
||||
deployment: &appsv1.Deployment{
|
||||
Spec: appsv1.DeploymentSpec{
|
||||
Paused: true,
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Annotations: map[string]string{
|
||||
options.PauseDeploymentTimeAnnotation: time.Now().Format(time.RFC3339),
|
||||
},
|
||||
},
|
||||
},
|
||||
pausedByReloader: true,
|
||||
},
|
||||
{
|
||||
name: "not paused by reloader",
|
||||
deployment: &appsv1.Deployment{
|
||||
Spec: appsv1.DeploymentSpec{
|
||||
Paused: true,
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Annotations: map[string]string{},
|
||||
},
|
||||
},
|
||||
pausedByReloader: false,
|
||||
},
|
||||
{
|
||||
name: "not paused",
|
||||
deployment: &appsv1.Deployment{
|
||||
Spec: appsv1.DeploymentSpec{
|
||||
Paused: false,
|
||||
},
|
||||
},
|
||||
pausedByReloader: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
pausedByReloader := IsPausedByReloader(test.deployment)
|
||||
assert.Equal(t, test.pausedByReloader, pausedByReloader)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetPauseStartTime(t *testing.T) {
|
||||
now := time.Now()
|
||||
nowStr := now.Format(time.RFC3339)
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
deployment *appsv1.Deployment
|
||||
pausedByReloader bool
|
||||
expectedStartTime time.Time
|
||||
}{
|
||||
{
|
||||
name: "valid pause time",
|
||||
deployment: &appsv1.Deployment{
|
||||
Spec: appsv1.DeploymentSpec{
|
||||
Paused: true,
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Annotations: map[string]string{
|
||||
options.PauseDeploymentTimeAnnotation: nowStr,
|
||||
},
|
||||
},
|
||||
},
|
||||
pausedByReloader: true,
|
||||
expectedStartTime: now,
|
||||
},
|
||||
{
|
||||
name: "not paused by reloader",
|
||||
deployment: &appsv1.Deployment{
|
||||
Spec: appsv1.DeploymentSpec{
|
||||
Paused: false,
|
||||
},
|
||||
},
|
||||
pausedByReloader: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
actualStartTime, err := GetPauseStartTime(test.deployment)
|
||||
|
||||
assert.NoError(t, err)
|
||||
|
||||
if !test.pausedByReloader {
|
||||
assert.Nil(t, actualStartTime)
|
||||
} else {
|
||||
assert.NotNil(t, actualStartTime)
|
||||
assert.WithinDuration(t, test.expectedStartTime, *actualStartTime, time.Second)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestParsePauseDuration(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
pauseIntervalValue string
|
||||
expectedDuration time.Duration
|
||||
invalidDuration bool
|
||||
}{
|
||||
{
|
||||
name: "valid duration",
|
||||
pauseIntervalValue: "10s",
|
||||
expectedDuration: 10 * time.Second,
|
||||
invalidDuration: false,
|
||||
},
|
||||
{
|
||||
name: "valid minute duration",
|
||||
pauseIntervalValue: "2m",
|
||||
expectedDuration: 2 * time.Minute,
|
||||
invalidDuration: false,
|
||||
},
|
||||
{
|
||||
name: "invalid duration",
|
||||
pauseIntervalValue: "invalid",
|
||||
expectedDuration: 0,
|
||||
invalidDuration: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
actualDuration, err := ParsePauseDuration(test.pauseIntervalValue)
|
||||
|
||||
if test.invalidDuration {
|
||||
assert.Error(t, err)
|
||||
} else {
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, test.expectedDuration, actualDuration)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestHandleMissingTimerSimple(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
deployment *appsv1.Deployment
|
||||
shouldBePaused bool // Should be unpaused after HandleMissingTimer ?
|
||||
}{
|
||||
{
|
||||
name: "deployment paused by reloader, pause period has expired and no timer",
|
||||
deployment: &appsv1.Deployment{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-deployment-1",
|
||||
Annotations: map[string]string{
|
||||
options.PauseDeploymentTimeAnnotation: time.Now().Add(-6 * time.Minute).Format(time.RFC3339),
|
||||
options.PauseDeploymentAnnotation: "5m",
|
||||
},
|
||||
},
|
||||
Spec: appsv1.DeploymentSpec{
|
||||
Paused: true,
|
||||
},
|
||||
},
|
||||
shouldBePaused: false,
|
||||
},
|
||||
{
|
||||
name: "deployment paused by reloader, pause period expires in the future and no timer",
|
||||
deployment: &appsv1.Deployment{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-deployment-2",
|
||||
Annotations: map[string]string{
|
||||
options.PauseDeploymentTimeAnnotation: time.Now().Add(1 * time.Minute).Format(time.RFC3339),
|
||||
options.PauseDeploymentAnnotation: "5m",
|
||||
},
|
||||
},
|
||||
Spec: appsv1.DeploymentSpec{
|
||||
Paused: true,
|
||||
},
|
||||
},
|
||||
shouldBePaused: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
// Clean up any timers at the end of the test
|
||||
defer func() {
|
||||
for key, timer := range activeTimers {
|
||||
timer.Stop()
|
||||
delete(activeTimers, key)
|
||||
}
|
||||
}()
|
||||
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
fakeClient := testclient.NewSimpleClientset()
|
||||
clients := kube.Clients{
|
||||
KubernetesClient: fakeClient,
|
||||
}
|
||||
|
||||
_, err := fakeClient.AppsV1().Deployments("default").Create(
|
||||
context.TODO(),
|
||||
test.deployment,
|
||||
metav1.CreateOptions{})
|
||||
assert.NoError(t, err, "Expected no error when creating deployment")
|
||||
|
||||
pauseDuration, _ := ParsePauseDuration(test.deployment.Annotations[options.PauseDeploymentAnnotation])
|
||||
HandleMissingTimer(test.deployment, pauseDuration, clients, "default")
|
||||
|
||||
updatedDeployment, _ := fakeClient.AppsV1().Deployments("default").Get(context.TODO(), test.deployment.Name, metav1.GetOptions{})
|
||||
|
||||
assert.Equal(t, test.shouldBePaused, updatedDeployment.Spec.Paused,
|
||||
"Deployment should have correct paused state after timer expiration")
|
||||
|
||||
if test.shouldBePaused {
|
||||
pausedAtAnnotationValue := updatedDeployment.Annotations[options.PauseDeploymentTimeAnnotation]
|
||||
assert.NotEmpty(t, pausedAtAnnotationValue,
|
||||
"Pause annotation should be present and contain a value when deployment is paused")
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestPauseDeployment(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
deployment *appsv1.Deployment
|
||||
expectedError bool
|
||||
expectedPaused bool
|
||||
expectedAnnotation bool // Should have pause time annotation
|
||||
pauseInterval string
|
||||
}{
|
||||
{
|
||||
name: "deployment without pause annotation",
|
||||
deployment: &appsv1.Deployment{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-deployment",
|
||||
Annotations: map[string]string{},
|
||||
},
|
||||
Spec: appsv1.DeploymentSpec{
|
||||
Paused: false,
|
||||
},
|
||||
},
|
||||
expectedError: true,
|
||||
expectedPaused: false,
|
||||
expectedAnnotation: false,
|
||||
pauseInterval: "",
|
||||
},
|
||||
{
|
||||
name: "deployment already paused but not by reloader",
|
||||
deployment: &appsv1.Deployment{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-deployment",
|
||||
Annotations: map[string]string{
|
||||
options.PauseDeploymentAnnotation: "5m",
|
||||
},
|
||||
},
|
||||
Spec: appsv1.DeploymentSpec{
|
||||
Paused: true,
|
||||
},
|
||||
},
|
||||
expectedError: false,
|
||||
expectedPaused: true,
|
||||
expectedAnnotation: false,
|
||||
pauseInterval: "5m",
|
||||
},
|
||||
{
|
||||
name: "deployment unpaused that needs to be paused by reloader",
|
||||
deployment: &appsv1.Deployment{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-deployment-3",
|
||||
Annotations: map[string]string{
|
||||
options.PauseDeploymentAnnotation: "5m",
|
||||
},
|
||||
},
|
||||
Spec: appsv1.DeploymentSpec{
|
||||
Paused: false,
|
||||
},
|
||||
},
|
||||
expectedError: false,
|
||||
expectedPaused: true,
|
||||
expectedAnnotation: true,
|
||||
pauseInterval: "5m",
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
fakeClient := testclient.NewSimpleClientset()
|
||||
clients := kube.Clients{
|
||||
KubernetesClient: fakeClient,
|
||||
}
|
||||
|
||||
_, err := fakeClient.AppsV1().Deployments("default").Create(
|
||||
context.TODO(),
|
||||
test.deployment,
|
||||
metav1.CreateOptions{})
|
||||
assert.NoError(t, err, "Expected no error when creating deployment")
|
||||
|
||||
updatedDeployment, err := PauseDeployment(test.deployment, clients, "default", test.pauseInterval)
|
||||
if test.expectedError {
|
||||
assert.Error(t, err, "Expected an error pausing the deployment")
|
||||
return
|
||||
} else {
|
||||
assert.NoError(t, err, "Expected no error pausing the deployment")
|
||||
}
|
||||
|
||||
assert.Equal(t, test.expectedPaused, updatedDeployment.Spec.Paused,
|
||||
"Deployment should have correct paused state after pause")
|
||||
|
||||
if test.expectedAnnotation {
|
||||
pausedAtAnnotationValue := updatedDeployment.Annotations[options.PauseDeploymentTimeAnnotation]
|
||||
assert.NotEmpty(t, pausedAtAnnotationValue,
|
||||
"Pause annotation should be present and contain a value when deployment is paused")
|
||||
} else {
|
||||
pausedAtAnnotationValue := updatedDeployment.Annotations[options.PauseDeploymentTimeAnnotation]
|
||||
assert.Empty(t, pausedAtAnnotationValue,
|
||||
"Pause annotation should not be present when deployment has not been paused by reloader")
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// Simple helper function for test cases
|
||||
func FindDeploymentByName(deployments []runtime.Object, deploymentName string) (*app.Deployment, error) {
|
||||
for _, deployment := range deployments {
|
||||
accessor, err := meta.Accessor(deployment)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error getting accessor for item: %v", err)
|
||||
}
|
||||
if accessor.GetName() == deploymentName {
|
||||
deploymentObj, ok := deployment.(*app.Deployment)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("failed to cast to Deployment")
|
||||
}
|
||||
return deploymentObj, nil
|
||||
}
|
||||
}
|
||||
return nil, fmt.Errorf("deployment '%s' not found", deploymentName)
|
||||
}
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"github.com/stakater/Reloader/internal/pkg/metrics"
|
||||
"github.com/stakater/Reloader/internal/pkg/options"
|
||||
"github.com/stakater/Reloader/internal/pkg/util"
|
||||
"github.com/stakater/Reloader/pkg/common"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/client-go/tools/record"
|
||||
)
|
||||
@@ -36,15 +37,15 @@ func (r ResourceUpdatedHandler) Handle() error {
|
||||
}
|
||||
|
||||
// GetConfig gets configurations containing SHA, annotations, namespace and resource name
|
||||
func (r ResourceUpdatedHandler) GetConfig() (util.Config, string) {
|
||||
func (r ResourceUpdatedHandler) GetConfig() (common.Config, string) {
|
||||
var oldSHAData string
|
||||
var config util.Config
|
||||
var config common.Config
|
||||
if _, ok := r.Resource.(*v1.ConfigMap); ok {
|
||||
oldSHAData = util.GetSHAfromConfigmap(r.OldResource.(*v1.ConfigMap))
|
||||
config = util.GetConfigmapConfig(r.Resource.(*v1.ConfigMap))
|
||||
config = common.GetConfigmapConfig(r.Resource.(*v1.ConfigMap))
|
||||
} else if _, ok := r.Resource.(*v1.Secret); ok {
|
||||
oldSHAData = util.GetSHAfromSecret(r.OldResource.(*v1.Secret).Data)
|
||||
config = util.GetSecretConfig(r.Resource.(*v1.Secret))
|
||||
config = common.GetSecretConfig(r.Resource.(*v1.Secret))
|
||||
} else {
|
||||
logrus.Warnf("Invalid resource: Resource should be 'Secret' or 'Configmap' but found, %v", r.Resource)
|
||||
}
|
||||
|
||||
@@ -7,9 +7,6 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/parnurzeal/gorequest"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
@@ -20,112 +17,128 @@ import (
|
||||
"github.com/stakater/Reloader/internal/pkg/metrics"
|
||||
"github.com/stakater/Reloader/internal/pkg/options"
|
||||
"github.com/stakater/Reloader/internal/pkg/util"
|
||||
"github.com/stakater/Reloader/pkg/common"
|
||||
"github.com/stakater/Reloader/pkg/kube"
|
||||
app "k8s.io/api/apps/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
patchtypes "k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/tools/record"
|
||||
"k8s.io/client-go/util/retry"
|
||||
)
|
||||
|
||||
// GetDeploymentRollingUpgradeFuncs returns all callback funcs for a deployment
|
||||
func GetDeploymentRollingUpgradeFuncs() callbacks.RollingUpgradeFuncs {
|
||||
return callbacks.RollingUpgradeFuncs{
|
||||
ItemFunc: callbacks.GetDeploymentItem,
|
||||
ItemsFunc: callbacks.GetDeploymentItems,
|
||||
AnnotationsFunc: callbacks.GetDeploymentAnnotations,
|
||||
PodAnnotationsFunc: callbacks.GetDeploymentPodAnnotations,
|
||||
ContainersFunc: callbacks.GetDeploymentContainers,
|
||||
InitContainersFunc: callbacks.GetDeploymentInitContainers,
|
||||
UpdateFunc: callbacks.UpdateDeployment,
|
||||
PatchFunc: callbacks.PatchDeployment,
|
||||
PatchTemplatesFunc: callbacks.GetPatchTemplates,
|
||||
VolumesFunc: callbacks.GetDeploymentVolumes,
|
||||
ResourceType: "Deployment",
|
||||
SupportsPatch: true,
|
||||
}
|
||||
}
|
||||
|
||||
// GetDeploymentRollingUpgradeFuncs returns all callback funcs for a cronjob
|
||||
func GetCronJobCreateJobFuncs() callbacks.RollingUpgradeFuncs {
|
||||
return callbacks.RollingUpgradeFuncs{
|
||||
ItemFunc: callbacks.GetCronJobItem,
|
||||
ItemsFunc: callbacks.GetCronJobItems,
|
||||
AnnotationsFunc: callbacks.GetCronJobAnnotations,
|
||||
PodAnnotationsFunc: callbacks.GetCronJobPodAnnotations,
|
||||
ContainersFunc: callbacks.GetCronJobContainers,
|
||||
InitContainersFunc: callbacks.GetCronJobInitContainers,
|
||||
UpdateFunc: callbacks.CreateJobFromCronjob,
|
||||
PatchFunc: callbacks.PatchCronJob,
|
||||
PatchTemplatesFunc: func() callbacks.PatchTemplates { return callbacks.PatchTemplates{} },
|
||||
VolumesFunc: callbacks.GetCronJobVolumes,
|
||||
ResourceType: "CronJob",
|
||||
SupportsPatch: false,
|
||||
}
|
||||
}
|
||||
|
||||
// GetDeploymentRollingUpgradeFuncs returns all callback funcs for a cronjob
|
||||
func GetJobCreateJobFuncs() callbacks.RollingUpgradeFuncs {
|
||||
return callbacks.RollingUpgradeFuncs{
|
||||
ItemFunc: callbacks.GetJobItem,
|
||||
ItemsFunc: callbacks.GetJobItems,
|
||||
AnnotationsFunc: callbacks.GetJobAnnotations,
|
||||
PodAnnotationsFunc: callbacks.GetJobPodAnnotations,
|
||||
ContainersFunc: callbacks.GetJobContainers,
|
||||
InitContainersFunc: callbacks.GetJobInitContainers,
|
||||
UpdateFunc: callbacks.ReCreateJobFromjob,
|
||||
PatchFunc: callbacks.PatchJob,
|
||||
PatchTemplatesFunc: func() callbacks.PatchTemplates { return callbacks.PatchTemplates{} },
|
||||
VolumesFunc: callbacks.GetJobVolumes,
|
||||
ResourceType: "Job",
|
||||
SupportsPatch: false,
|
||||
}
|
||||
}
|
||||
|
||||
// GetDaemonSetRollingUpgradeFuncs returns all callback funcs for a daemonset
|
||||
func GetDaemonSetRollingUpgradeFuncs() callbacks.RollingUpgradeFuncs {
|
||||
return callbacks.RollingUpgradeFuncs{
|
||||
ItemFunc: callbacks.GetDaemonSetItem,
|
||||
ItemsFunc: callbacks.GetDaemonSetItems,
|
||||
AnnotationsFunc: callbacks.GetDaemonSetAnnotations,
|
||||
PodAnnotationsFunc: callbacks.GetDaemonSetPodAnnotations,
|
||||
ContainersFunc: callbacks.GetDaemonSetContainers,
|
||||
InitContainersFunc: callbacks.GetDaemonSetInitContainers,
|
||||
UpdateFunc: callbacks.UpdateDaemonSet,
|
||||
PatchFunc: callbacks.PatchDaemonSet,
|
||||
PatchTemplatesFunc: callbacks.GetPatchTemplates,
|
||||
VolumesFunc: callbacks.GetDaemonSetVolumes,
|
||||
ResourceType: "DaemonSet",
|
||||
SupportsPatch: true,
|
||||
}
|
||||
}
|
||||
|
||||
// GetStatefulSetRollingUpgradeFuncs returns all callback funcs for a statefulSet
|
||||
func GetStatefulSetRollingUpgradeFuncs() callbacks.RollingUpgradeFuncs {
|
||||
return callbacks.RollingUpgradeFuncs{
|
||||
ItemFunc: callbacks.GetStatefulSetItem,
|
||||
ItemsFunc: callbacks.GetStatefulSetItems,
|
||||
AnnotationsFunc: callbacks.GetStatefulSetAnnotations,
|
||||
PodAnnotationsFunc: callbacks.GetStatefulSetPodAnnotations,
|
||||
ContainersFunc: callbacks.GetStatefulSetContainers,
|
||||
InitContainersFunc: callbacks.GetStatefulSetInitContainers,
|
||||
UpdateFunc: callbacks.UpdateStatefulSet,
|
||||
PatchFunc: callbacks.PatchStatefulSet,
|
||||
PatchTemplatesFunc: callbacks.GetPatchTemplates,
|
||||
VolumesFunc: callbacks.GetStatefulSetVolumes,
|
||||
ResourceType: "StatefulSet",
|
||||
}
|
||||
}
|
||||
|
||||
// GetDeploymentConfigRollingUpgradeFuncs returns all callback funcs for a deploymentConfig
|
||||
func GetDeploymentConfigRollingUpgradeFuncs() callbacks.RollingUpgradeFuncs {
|
||||
return callbacks.RollingUpgradeFuncs{
|
||||
ItemsFunc: callbacks.GetDeploymentConfigItems,
|
||||
AnnotationsFunc: callbacks.GetDeploymentConfigAnnotations,
|
||||
PodAnnotationsFunc: callbacks.GetDeploymentConfigPodAnnotations,
|
||||
ContainersFunc: callbacks.GetDeploymentConfigContainers,
|
||||
InitContainersFunc: callbacks.GetDeploymentConfigInitContainers,
|
||||
UpdateFunc: callbacks.UpdateDeploymentConfig,
|
||||
VolumesFunc: callbacks.GetDeploymentConfigVolumes,
|
||||
ResourceType: "DeploymentConfig",
|
||||
SupportsPatch: true,
|
||||
}
|
||||
}
|
||||
|
||||
// GetArgoRolloutRollingUpgradeFuncs returns all callback funcs for a rollout
|
||||
func GetArgoRolloutRollingUpgradeFuncs() callbacks.RollingUpgradeFuncs {
|
||||
return callbacks.RollingUpgradeFuncs{
|
||||
ItemFunc: callbacks.GetRolloutItem,
|
||||
ItemsFunc: callbacks.GetRolloutItems,
|
||||
AnnotationsFunc: callbacks.GetRolloutAnnotations,
|
||||
PodAnnotationsFunc: callbacks.GetRolloutPodAnnotations,
|
||||
ContainersFunc: callbacks.GetRolloutContainers,
|
||||
InitContainersFunc: callbacks.GetRolloutInitContainers,
|
||||
UpdateFunc: callbacks.UpdateRollout,
|
||||
PatchFunc: callbacks.PatchRollout,
|
||||
PatchTemplatesFunc: func() callbacks.PatchTemplates { return callbacks.PatchTemplates{} },
|
||||
VolumesFunc: callbacks.GetRolloutVolumes,
|
||||
ResourceType: "Rollout",
|
||||
SupportsPatch: false,
|
||||
}
|
||||
}
|
||||
|
||||
func sendUpgradeWebhook(config util.Config, webhookUrl string) error {
|
||||
func sendUpgradeWebhook(config common.Config, webhookUrl string) error {
|
||||
logrus.Infof("Changes detected in '%s' of type '%s' in namespace '%s', Sending webhook to '%s'",
|
||||
config.ResourceName, config.Type, config.Namespace, webhookUrl)
|
||||
|
||||
@@ -156,7 +169,7 @@ func sendWebhook(url string) (string, []error) {
|
||||
return buffer.String(), nil
|
||||
}
|
||||
|
||||
func doRollingUpgrade(config util.Config, collectors metrics.Collectors, recorder record.EventRecorder, invoke invokeStrategy) error {
|
||||
func doRollingUpgrade(config common.Config, collectors metrics.Collectors, recorder record.EventRecorder, invoke invokeStrategy) error {
|
||||
clients := kube.GetClients()
|
||||
|
||||
err := rollingUpgrade(clients, config, GetDeploymentRollingUpgradeFuncs(), collectors, recorder, invoke)
|
||||
@@ -180,13 +193,6 @@ func doRollingUpgrade(config util.Config, collectors metrics.Collectors, recorde
|
||||
return err
|
||||
}
|
||||
|
||||
if kube.IsOpenshift {
|
||||
err = rollingUpgrade(clients, config, GetDeploymentConfigRollingUpgradeFuncs(), collectors, recorder, invoke)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if options.IsArgoRollouts == "true" {
|
||||
err = rollingUpgrade(clients, config, GetArgoRolloutRollingUpgradeFuncs(), collectors, recorder, invoke)
|
||||
if err != nil {
|
||||
@@ -197,8 +203,7 @@ func doRollingUpgrade(config util.Config, collectors metrics.Collectors, recorde
|
||||
return nil
|
||||
}
|
||||
|
||||
func rollingUpgrade(clients kube.Clients, config util.Config, upgradeFuncs callbacks.RollingUpgradeFuncs, collectors metrics.Collectors, recorder record.EventRecorder, strategy invokeStrategy) error {
|
||||
|
||||
func rollingUpgrade(clients kube.Clients, config common.Config, upgradeFuncs callbacks.RollingUpgradeFuncs, collectors metrics.Collectors, recorder record.EventRecorder, strategy invokeStrategy) error {
|
||||
err := PerformAction(clients, config, upgradeFuncs, collectors, recorder, strategy)
|
||||
if err != nil {
|
||||
logrus.Errorf("Rolling upgrade for '%s' failed with error = %v", config.ResourceName, err)
|
||||
@@ -207,126 +212,124 @@ func rollingUpgrade(clients kube.Clients, config util.Config, upgradeFuncs callb
|
||||
}
|
||||
|
||||
// PerformAction invokes the deployment if there is any change in configmap or secret data
|
||||
func PerformAction(clients kube.Clients, config util.Config, upgradeFuncs callbacks.RollingUpgradeFuncs, collectors metrics.Collectors, recorder record.EventRecorder, strategy invokeStrategy) error {
|
||||
func PerformAction(clients kube.Clients, config common.Config, upgradeFuncs callbacks.RollingUpgradeFuncs, collectors metrics.Collectors, recorder record.EventRecorder, strategy invokeStrategy) error {
|
||||
items := upgradeFuncs.ItemsFunc(clients, config.Namespace)
|
||||
|
||||
for _, i := range items {
|
||||
// find correct annotation and update the resource
|
||||
annotations := upgradeFuncs.AnnotationsFunc(i)
|
||||
annotationValue, found := annotations[config.Annotation]
|
||||
searchAnnotationValue, foundSearchAnn := annotations[options.AutoSearchAnnotation]
|
||||
reloaderEnabledValue, foundAuto := annotations[options.ReloaderAutoAnnotation]
|
||||
typedAutoAnnotationEnabledValue, foundTypedAuto := annotations[config.TypedAutoAnnotation]
|
||||
excludeConfigmapAnnotationValue, foundExcludeConfigmap := annotations[options.ConfigmapExcludeReloaderAnnotation]
|
||||
excludeSecretAnnotationValue, foundExcludeSecret := annotations[options.SecretExcludeReloaderAnnotation]
|
||||
|
||||
if !found && !foundAuto && !foundTypedAuto && !foundSearchAnn {
|
||||
annotations = upgradeFuncs.PodAnnotationsFunc(i)
|
||||
annotationValue = annotations[config.Annotation]
|
||||
searchAnnotationValue = annotations[options.AutoSearchAnnotation]
|
||||
reloaderEnabledValue = annotations[options.ReloaderAutoAnnotation]
|
||||
typedAutoAnnotationEnabledValue = annotations[config.TypedAutoAnnotation]
|
||||
}
|
||||
|
||||
isResourceExcluded := false
|
||||
|
||||
switch config.Type {
|
||||
case constants.ConfigmapEnvVarPostfix:
|
||||
if foundExcludeConfigmap {
|
||||
isResourceExcluded = checkIfResourceIsExcluded(config.ResourceName, excludeConfigmapAnnotationValue)
|
||||
}
|
||||
case constants.SecretEnvVarPostfix:
|
||||
if foundExcludeSecret {
|
||||
isResourceExcluded = checkIfResourceIsExcluded(config.ResourceName, excludeSecretAnnotationValue)
|
||||
}
|
||||
}
|
||||
|
||||
if isResourceExcluded {
|
||||
continue
|
||||
}
|
||||
|
||||
result := constants.NotUpdated
|
||||
reloaderEnabled, _ := strconv.ParseBool(reloaderEnabledValue)
|
||||
typedAutoAnnotationEnabled, _ := strconv.ParseBool(typedAutoAnnotationEnabledValue)
|
||||
if reloaderEnabled || typedAutoAnnotationEnabled || reloaderEnabledValue == "" && typedAutoAnnotationEnabledValue == "" && options.AutoReloadAll {
|
||||
result = strategy(upgradeFuncs, i, config, true)
|
||||
}
|
||||
|
||||
if result != constants.Updated && annotationValue != "" {
|
||||
values := strings.Split(annotationValue, ",")
|
||||
for _, value := range values {
|
||||
value = strings.TrimSpace(value)
|
||||
re := regexp.MustCompile("^" + value + "$")
|
||||
if re.Match([]byte(config.ResourceName)) {
|
||||
result = strategy(upgradeFuncs, i, config, false)
|
||||
if result == constants.Updated {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if result != constants.Updated && searchAnnotationValue == "true" {
|
||||
matchAnnotationValue := config.ResourceAnnotations[options.SearchMatchAnnotation]
|
||||
if matchAnnotationValue == "true" {
|
||||
result = strategy(upgradeFuncs, i, config, true)
|
||||
}
|
||||
}
|
||||
|
||||
if result == constants.Updated {
|
||||
accessor, err := meta.Accessor(i)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
resourceName := accessor.GetName()
|
||||
err = upgradeFuncs.UpdateFunc(clients, config.Namespace, i)
|
||||
if err != nil {
|
||||
message := fmt.Sprintf("Update for '%s' of type '%s' in namespace '%s' failed with error %v", resourceName, upgradeFuncs.ResourceType, config.Namespace, err)
|
||||
logrus.Errorf("Update for '%s' of type '%s' in namespace '%s' failed with error %v", resourceName, upgradeFuncs.ResourceType, config.Namespace, err)
|
||||
|
||||
collectors.Reloaded.With(prometheus.Labels{"success": "false"}).Inc()
|
||||
collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "false", "namespace": config.Namespace}).Inc()
|
||||
if recorder != nil {
|
||||
recorder.Event(i, v1.EventTypeWarning, "ReloadFail", message)
|
||||
}
|
||||
return err
|
||||
} else {
|
||||
message := fmt.Sprintf("Changes detected in '%s' of type '%s' in namespace '%s'", config.ResourceName, config.Type, config.Namespace)
|
||||
message += fmt.Sprintf(", Updated '%s' of type '%s' in namespace '%s'", resourceName, upgradeFuncs.ResourceType, config.Namespace)
|
||||
|
||||
logrus.Infof("Changes detected in '%s' of type '%s' in namespace '%s'; updated '%s' of type '%s' in namespace '%s'", config.ResourceName, config.Type, config.Namespace, resourceName, upgradeFuncs.ResourceType, config.Namespace)
|
||||
|
||||
collectors.Reloaded.With(prometheus.Labels{"success": "true"}).Inc()
|
||||
collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": config.Namespace}).Inc()
|
||||
alert_on_reload, ok := os.LookupEnv("ALERT_ON_RELOAD")
|
||||
if recorder != nil {
|
||||
recorder.Event(i, v1.EventTypeNormal, "Reloaded", message)
|
||||
}
|
||||
if ok && alert_on_reload == "true" {
|
||||
msg := fmt.Sprintf(
|
||||
"Reloader detected changes in *%s* of type *%s* in namespace *%s*. Hence reloaded *%s* of type *%s* in namespace *%s*",
|
||||
config.ResourceName, config.Type, config.Namespace, resourceName, upgradeFuncs.ResourceType, config.Namespace)
|
||||
alert.SendWebhookAlert(msg)
|
||||
}
|
||||
}
|
||||
for _, item := range items {
|
||||
err := retryOnConflict(retry.DefaultRetry, func(fetchResource bool) error {
|
||||
return upgradeResource(clients, config, upgradeFuncs, collectors, recorder, strategy, item, fetchResource)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func checkIfResourceIsExcluded(resourceName, excludedResources string) bool {
|
||||
if excludedResources == "" {
|
||||
return false
|
||||
func retryOnConflict(backoff wait.Backoff, fn func(_ bool) error) error {
|
||||
var lastError error
|
||||
fetchResource := false // do not fetch resource on first attempt, already done by ItemsFunc
|
||||
err := wait.ExponentialBackoff(backoff, func() (bool, error) {
|
||||
err := fn(fetchResource)
|
||||
fetchResource = true
|
||||
switch {
|
||||
case err == nil:
|
||||
return true, nil
|
||||
case apierrors.IsConflict(err):
|
||||
lastError = err
|
||||
return false, nil
|
||||
default:
|
||||
return false, err
|
||||
}
|
||||
})
|
||||
if wait.Interrupted(err) {
|
||||
err = lastError
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func upgradeResource(clients kube.Clients, config common.Config, upgradeFuncs callbacks.RollingUpgradeFuncs, collectors metrics.Collectors, recorder record.EventRecorder, strategy invokeStrategy, resource runtime.Object, fetchResource bool) error {
|
||||
accessor, err := meta.Accessor(resource)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
excludedResourcesList := strings.Split(excludedResources, ",")
|
||||
for _, excludedResource := range excludedResourcesList {
|
||||
if strings.TrimSpace(excludedResource) == resourceName {
|
||||
return true
|
||||
resourceName := accessor.GetName()
|
||||
if fetchResource {
|
||||
resource, err = upgradeFuncs.ItemFunc(clients, resourceName, config.Namespace)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
annotations := upgradeFuncs.AnnotationsFunc(resource)
|
||||
podAnnotations := upgradeFuncs.PodAnnotationsFunc(resource)
|
||||
result := common.ShouldReload(config, upgradeFuncs.ResourceType, annotations, podAnnotations, common.GetCommandLineOptions())
|
||||
|
||||
if !result.ShouldReload {
|
||||
logrus.Debugf("No changes detected in '%s' of type '%s' in namespace '%s'", config.ResourceName, config.Type, config.Namespace)
|
||||
return nil
|
||||
}
|
||||
|
||||
strategyResult := strategy(upgradeFuncs, resource, config, result.AutoReload)
|
||||
|
||||
if strategyResult.Result != constants.Updated {
|
||||
return nil
|
||||
}
|
||||
|
||||
// find correct annotation and update the resource
|
||||
pauseInterval, foundPauseInterval := annotations[options.PauseDeploymentAnnotation]
|
||||
|
||||
if foundPauseInterval {
|
||||
deployment, ok := resource.(*app.Deployment)
|
||||
if !ok {
|
||||
logrus.Warnf("Annotation '%s' only applicable for deployments", options.PauseDeploymentAnnotation)
|
||||
} else {
|
||||
_, err = PauseDeployment(deployment, clients, config.Namespace, pauseInterval)
|
||||
if err != nil {
|
||||
logrus.Errorf("Failed to pause deployment '%s' in namespace '%s': %v", resourceName, config.Namespace, err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
if upgradeFuncs.SupportsPatch && strategyResult.Patch != nil {
|
||||
err = upgradeFuncs.PatchFunc(clients, config.Namespace, resource, strategyResult.Patch.Type, strategyResult.Patch.Bytes)
|
||||
} else {
|
||||
err = upgradeFuncs.UpdateFunc(clients, config.Namespace, resource)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
message := fmt.Sprintf("Update for '%s' of type '%s' in namespace '%s' failed with error %v", resourceName, upgradeFuncs.ResourceType, config.Namespace, err)
|
||||
logrus.Errorf("Update for '%s' of type '%s' in namespace '%s' failed with error %v", resourceName, upgradeFuncs.ResourceType, config.Namespace, err)
|
||||
|
||||
collectors.Reloaded.With(prometheus.Labels{"success": "false"}).Inc()
|
||||
collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "false", "namespace": config.Namespace}).Inc()
|
||||
if recorder != nil {
|
||||
recorder.Event(resource, v1.EventTypeWarning, "ReloadFail", message)
|
||||
}
|
||||
return err
|
||||
} else {
|
||||
message := fmt.Sprintf("Changes detected in '%s' of type '%s' in namespace '%s'", config.ResourceName, config.Type, config.Namespace)
|
||||
message += fmt.Sprintf(", Updated '%s' of type '%s' in namespace '%s'", resourceName, upgradeFuncs.ResourceType, config.Namespace)
|
||||
|
||||
logrus.Infof("Changes detected in '%s' of type '%s' in namespace '%s'; updated '%s' of type '%s' in namespace '%s'", config.ResourceName, config.Type, config.Namespace, resourceName, upgradeFuncs.ResourceType, config.Namespace)
|
||||
|
||||
collectors.Reloaded.With(prometheus.Labels{"success": "true"}).Inc()
|
||||
collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": config.Namespace}).Inc()
|
||||
alert_on_reload, ok := os.LookupEnv("ALERT_ON_RELOAD")
|
||||
if recorder != nil {
|
||||
recorder.Event(resource, v1.EventTypeNormal, "Reloaded", message)
|
||||
}
|
||||
if ok && alert_on_reload == "true" {
|
||||
msg := fmt.Sprintf(
|
||||
"Reloader detected changes in *%s* of type *%s* in namespace *%s*. Hence reloaded *%s* of type *%s* in namespace *%s*",
|
||||
config.ResourceName, config.Type, config.Namespace, resourceName, upgradeFuncs.ResourceType, config.Namespace)
|
||||
alert.SendWebhookAlert(msg)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func getVolumeMountName(volumes []v1.Volume, mountType string, volumeName string) string {
|
||||
@@ -400,7 +403,7 @@ func getContainerWithEnvReference(containers []v1.Container, resourceName string
|
||||
return nil
|
||||
}
|
||||
|
||||
func getContainerUsingResource(upgradeFuncs callbacks.RollingUpgradeFuncs, item runtime.Object, config util.Config, autoReload bool) *v1.Container {
|
||||
func getContainerUsingResource(upgradeFuncs callbacks.RollingUpgradeFuncs, item runtime.Object, config common.Config, autoReload bool) *v1.Container {
|
||||
volumes := upgradeFuncs.VolumesFunc(item)
|
||||
containers := upgradeFuncs.ContainersFunc(item)
|
||||
initContainers := upgradeFuncs.InitContainersFunc(item)
|
||||
@@ -414,7 +417,11 @@ func getContainerUsingResource(upgradeFuncs callbacks.RollingUpgradeFuncs, item
|
||||
container = getContainerWithVolumeMount(initContainers, volumeMountName)
|
||||
if container != nil {
|
||||
// if configmap/secret is being used in init container then return the first Pod container to save reloader env
|
||||
return &containers[0]
|
||||
if len(containers) > 0 {
|
||||
return &containers[0]
|
||||
}
|
||||
// No containers available, return nil to avoid crash
|
||||
return nil
|
||||
}
|
||||
} else if container != nil {
|
||||
return container
|
||||
@@ -427,54 +434,71 @@ func getContainerUsingResource(upgradeFuncs callbacks.RollingUpgradeFuncs, item
|
||||
container = getContainerWithEnvReference(initContainers, config.ResourceName, config.Type)
|
||||
if container != nil {
|
||||
// if configmap/secret is being used in init container then return the first Pod container to save reloader env
|
||||
return &containers[0]
|
||||
if len(containers) > 0 {
|
||||
return &containers[0]
|
||||
}
|
||||
// No containers available, return nil to avoid crash
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// Get the first container if the annotation is related to specified configmap or secret i.e. configmap.reloader.stakater.com/reload
|
||||
if container == nil && !autoReload {
|
||||
return &containers[0]
|
||||
if len(containers) > 0 {
|
||||
return &containers[0]
|
||||
}
|
||||
// No containers available, return nil to avoid crash
|
||||
return nil
|
||||
}
|
||||
|
||||
return container
|
||||
}
|
||||
|
||||
type invokeStrategy func(upgradeFuncs callbacks.RollingUpgradeFuncs, item runtime.Object, config util.Config, autoReload bool) constants.Result
|
||||
type Patch struct {
|
||||
Type patchtypes.PatchType
|
||||
Bytes []byte
|
||||
}
|
||||
|
||||
func invokeReloadStrategy(upgradeFuncs callbacks.RollingUpgradeFuncs, item runtime.Object, config util.Config, autoReload bool) constants.Result {
|
||||
type InvokeStrategyResult struct {
|
||||
Result constants.Result
|
||||
Patch *Patch
|
||||
}
|
||||
|
||||
type invokeStrategy func(upgradeFuncs callbacks.RollingUpgradeFuncs, item runtime.Object, config common.Config, autoReload bool) InvokeStrategyResult
|
||||
|
||||
func invokeReloadStrategy(upgradeFuncs callbacks.RollingUpgradeFuncs, item runtime.Object, config common.Config, autoReload bool) InvokeStrategyResult {
|
||||
if options.ReloadStrategy == constants.AnnotationsReloadStrategy {
|
||||
return updatePodAnnotations(upgradeFuncs, item, config, autoReload)
|
||||
}
|
||||
|
||||
return updateContainerEnvVars(upgradeFuncs, item, config, autoReload)
|
||||
}
|
||||
|
||||
func updatePodAnnotations(upgradeFuncs callbacks.RollingUpgradeFuncs, item runtime.Object, config util.Config, autoReload bool) constants.Result {
|
||||
func updatePodAnnotations(upgradeFuncs callbacks.RollingUpgradeFuncs, item runtime.Object, config common.Config, autoReload bool) InvokeStrategyResult {
|
||||
container := getContainerUsingResource(upgradeFuncs, item, config, autoReload)
|
||||
if container == nil {
|
||||
return constants.NoContainerFound
|
||||
return InvokeStrategyResult{constants.NoContainerFound, nil}
|
||||
}
|
||||
|
||||
// Generate reloaded annotations. Attaching this to the item's annotation will trigger a rollout
|
||||
// Note: the data on this struct is purely informational and is not used for future updates
|
||||
reloadSource := util.NewReloadSourceFromConfig(config, []string{container.Name})
|
||||
annotations, err := createReloadedAnnotations(&reloadSource)
|
||||
reloadSource := common.NewReloadSourceFromConfig(config, []string{container.Name})
|
||||
annotations, patch, err := createReloadedAnnotations(&reloadSource, upgradeFuncs)
|
||||
if err != nil {
|
||||
logrus.Errorf("Failed to create reloaded annotations for %s! error = %v", config.ResourceName, err)
|
||||
return constants.NotUpdated
|
||||
return InvokeStrategyResult{constants.NotUpdated, nil}
|
||||
}
|
||||
|
||||
// Copy the all annotations to the item's annotations
|
||||
pa := upgradeFuncs.PodAnnotationsFunc(item)
|
||||
if pa == nil {
|
||||
return constants.NotUpdated
|
||||
return InvokeStrategyResult{constants.NotUpdated, nil}
|
||||
}
|
||||
|
||||
for k, v := range annotations {
|
||||
pa[k] = v
|
||||
}
|
||||
|
||||
return constants.Updated
|
||||
return InvokeStrategyResult{constants.Updated, &Patch{Type: patchtypes.StrategicMergePatchType, Bytes: patch}}
|
||||
}
|
||||
|
||||
func getReloaderAnnotationKey() string {
|
||||
@@ -484,9 +508,9 @@ func getReloaderAnnotationKey() string {
|
||||
)
|
||||
}
|
||||
|
||||
func createReloadedAnnotations(target *util.ReloadSource) (map[string]string, error) {
|
||||
func createReloadedAnnotations(target *common.ReloadSource, upgradeFuncs callbacks.RollingUpgradeFuncs) (map[string]string, []byte, error) {
|
||||
if target == nil {
|
||||
return nil, errors.New("target is required")
|
||||
return nil, nil, errors.New("target is required")
|
||||
}
|
||||
|
||||
// Create a single "last-invokeReloadStrategy-from" annotation that stores metadata about the
|
||||
@@ -498,53 +522,76 @@ func createReloadedAnnotations(target *util.ReloadSource) (map[string]string, er
|
||||
|
||||
lastReloadedResource, err := json.Marshal(target)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
annotations[lastReloadedResourceName] = string(lastReloadedResource)
|
||||
return annotations, nil
|
||||
|
||||
var patch []byte
|
||||
if upgradeFuncs.SupportsPatch {
|
||||
escapedValue, err := jsonEscape(annotations[lastReloadedResourceName])
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
patch = fmt.Appendf(nil, upgradeFuncs.PatchTemplatesFunc().AnnotationTemplate, lastReloadedResourceName, escapedValue)
|
||||
}
|
||||
|
||||
return annotations, patch, nil
|
||||
}
|
||||
|
||||
func getEnvVarName(resourceName string, typeName string) string {
|
||||
return constants.EnvVarPrefix + util.ConvertToEnvVarName(resourceName) + "_" + typeName
|
||||
}
|
||||
|
||||
func updateContainerEnvVars(upgradeFuncs callbacks.RollingUpgradeFuncs, item runtime.Object, config util.Config, autoReload bool) constants.Result {
|
||||
var result constants.Result
|
||||
func updateContainerEnvVars(upgradeFuncs callbacks.RollingUpgradeFuncs, item runtime.Object, config common.Config, autoReload bool) InvokeStrategyResult {
|
||||
envVar := getEnvVarName(config.ResourceName, config.Type)
|
||||
container := getContainerUsingResource(upgradeFuncs, item, config, autoReload)
|
||||
|
||||
if container == nil {
|
||||
return constants.NoContainerFound
|
||||
return InvokeStrategyResult{constants.NoContainerFound, nil}
|
||||
}
|
||||
|
||||
//update if env var exists
|
||||
result = updateEnvVar(upgradeFuncs.ContainersFunc(item), envVar, config.SHAValue)
|
||||
updateResult := updateEnvVar(container, envVar, config.SHAValue)
|
||||
|
||||
// if no existing env var exists lets create one
|
||||
if result == constants.NoEnvVarFound {
|
||||
if updateResult == constants.NoEnvVarFound {
|
||||
e := v1.EnvVar{
|
||||
Name: envVar,
|
||||
Value: config.SHAValue,
|
||||
}
|
||||
container.Env = append(container.Env, e)
|
||||
result = constants.Updated
|
||||
updateResult = constants.Updated
|
||||
}
|
||||
return result
|
||||
|
||||
var patch []byte
|
||||
if upgradeFuncs.SupportsPatch {
|
||||
patch = fmt.Appendf(nil, upgradeFuncs.PatchTemplatesFunc().EnvVarTemplate, container.Name, envVar, config.SHAValue)
|
||||
}
|
||||
|
||||
return InvokeStrategyResult{updateResult, &Patch{Type: patchtypes.StrategicMergePatchType, Bytes: patch}}
|
||||
}
|
||||
|
||||
func updateEnvVar(containers []v1.Container, envVar string, shaData string) constants.Result {
|
||||
for i := range containers {
|
||||
envs := containers[i].Env
|
||||
for j := range envs {
|
||||
if envs[j].Name == envVar {
|
||||
if envs[j].Value != shaData {
|
||||
envs[j].Value = shaData
|
||||
return constants.Updated
|
||||
}
|
||||
return constants.NotUpdated
|
||||
func updateEnvVar(container *v1.Container, envVar string, shaData string) constants.Result {
|
||||
envs := container.Env
|
||||
for j := range envs {
|
||||
if envs[j].Name == envVar {
|
||||
if envs[j].Value != shaData {
|
||||
envs[j].Value = shaData
|
||||
return constants.Updated
|
||||
}
|
||||
return constants.NotUpdated
|
||||
}
|
||||
}
|
||||
|
||||
return constants.NoEnvVarFound
|
||||
}
|
||||
|
||||
func jsonEscape(toEscape string) (string, error) {
|
||||
bytes, err := json.Marshal(toEscape)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
escaped := string(bytes)
|
||||
return escaped[1 : len(escaped)-1], nil
|
||||
}
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
argorolloutv1alpha1 "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
promtestutil "github.com/prometheus/client_golang/prometheus/testutil"
|
||||
"github.com/sirupsen/logrus"
|
||||
@@ -16,10 +17,15 @@ import (
|
||||
"github.com/stakater/Reloader/internal/pkg/options"
|
||||
"github.com/stakater/Reloader/internal/pkg/testutil"
|
||||
"github.com/stakater/Reloader/internal/pkg/util"
|
||||
"github.com/stakater/Reloader/pkg/common"
|
||||
"github.com/stakater/Reloader/pkg/kube"
|
||||
"github.com/stretchr/testify/assert"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
patchtypes "k8s.io/apimachinery/pkg/types"
|
||||
testclient "k8s.io/client-go/kubernetes/fake"
|
||||
)
|
||||
|
||||
@@ -49,6 +55,9 @@ var (
|
||||
arsConfigmapWithConfigMapAutoAnnotation = "testconfigmapwithconfigmapautoannotationdeployment-handler-" + testutil.RandSeq(5)
|
||||
arsSecretWithExcludeSecretAnnotation = "testsecretwithsecretexcludeannotationdeployment-handler-" + testutil.RandSeq(5)
|
||||
arsConfigmapWithExcludeConfigMapAnnotation = "testconfigmapwithconfigmapexcludeannotationdeployment-handler-" + testutil.RandSeq(5)
|
||||
arsConfigmapWithIgnoreAnnotation = "testconfigmapWithIgnoreAnnotation-handler-" + testutil.RandSeq(5)
|
||||
arsSecretWithIgnoreAnnotation = "testsecretWithIgnoreAnnotation-handler-" + testutil.RandSeq(5)
|
||||
arsConfigmapWithPausedDeployment = "testconfigmapWithPausedDeployment-handler-" + testutil.RandSeq(5)
|
||||
|
||||
ersNamespace = "test-handler-" + testutil.RandSeq(5)
|
||||
ersConfigmapName = "testconfigmap-handler-" + testutil.RandSeq(5)
|
||||
@@ -72,6 +81,9 @@ var (
|
||||
ersConfigmapWithConfigMapAutoAnnotation = "testconfigmapwithconfigmapautoannotationdeployment-handler-" + testutil.RandSeq(5)
|
||||
ersSecretWithSecretExcludeAnnotation = "testsecretwithsecretexcludeannotationdeployment-handler-" + testutil.RandSeq(5)
|
||||
ersConfigmapWithConfigMapExcludeAnnotation = "testconfigmapwithconfigmapexcludeannotationdeployment-handler-" + testutil.RandSeq(5)
|
||||
ersConfigmapWithIgnoreAnnotation = "testconfigmapWithIgnoreAnnotation-handler-" + testutil.RandSeq(5)
|
||||
ersSecretWithIgnoreAnnotation = "testsecretWithIgnoreAnnotation-handler-" + testutil.RandSeq(5)
|
||||
ersConfigmapWithPausedDeployment = "testconfigmapWithPausedDeployment-handler-" + testutil.RandSeq(5)
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
@@ -200,6 +212,12 @@ func setupArs() {
|
||||
logrus.Errorf("Error in configmap creation: %v", err)
|
||||
}
|
||||
|
||||
// Creating configmap for testing pausing deployments
|
||||
_, err = testutil.CreateConfigMap(clients.KubernetesClient, arsNamespace, arsConfigmapWithPausedDeployment, "www.google.com")
|
||||
if err != nil {
|
||||
logrus.Errorf("Error in configmap creation: %v", err)
|
||||
}
|
||||
|
||||
// Creating secret used with secret auto annotation
|
||||
_, err = testutil.CreateSecret(clients.KubernetesClient, arsNamespace, arsSecretWithExcludeSecretAnnotation, data)
|
||||
if err != nil {
|
||||
@@ -212,6 +230,35 @@ func setupArs() {
|
||||
logrus.Errorf("Error in configmap creation: %v", err)
|
||||
}
|
||||
|
||||
// Creating configmap with ignore annotation
|
||||
_, err = testutil.CreateConfigMap(clients.KubernetesClient, arsNamespace, arsConfigmapWithIgnoreAnnotation, "www.google.com")
|
||||
if err != nil {
|
||||
logrus.Errorf("Error in configmap creation: %v", err)
|
||||
}
|
||||
// Patch with ignore annotation
|
||||
cmClient := clients.KubernetesClient.CoreV1().ConfigMaps(arsNamespace)
|
||||
patch := []byte(`{"metadata":{"annotations":{"reloader.stakater.com/ignore":"true"}}}`)
|
||||
_, _ = cmClient.Patch(context.TODO(), arsConfigmapWithIgnoreAnnotation, patchtypes.MergePatchType, patch, metav1.PatchOptions{})
|
||||
|
||||
// Creating secret with ignore annotation
|
||||
_, err = testutil.CreateSecret(clients.KubernetesClient, arsNamespace, arsSecretWithIgnoreAnnotation, data)
|
||||
if err != nil {
|
||||
logrus.Errorf("Error in secret creation: %v", err)
|
||||
}
|
||||
secretClient := clients.KubernetesClient.CoreV1().Secrets(arsNamespace)
|
||||
_, _ = secretClient.Patch(context.TODO(), arsSecretWithIgnoreAnnotation, patchtypes.MergePatchType, patch, metav1.PatchOptions{})
|
||||
|
||||
// Creating Deployment referencing configmap with ignore annotation
|
||||
_, err = testutil.CreateDeployment(clients.KubernetesClient, arsConfigmapWithIgnoreAnnotation, arsNamespace, true)
|
||||
if err != nil {
|
||||
logrus.Errorf("Error in Deployment with configmap ignore annotation creation: %v", err)
|
||||
}
|
||||
// Creating Deployment referencing secret with ignore annotation
|
||||
_, err = testutil.CreateDeployment(clients.KubernetesClient, arsSecretWithIgnoreAnnotation, arsNamespace, true)
|
||||
if err != nil {
|
||||
logrus.Errorf("Error in Deployment with secret ignore annotation creation: %v", err)
|
||||
}
|
||||
|
||||
// Creating Deployment with configmap
|
||||
_, err = testutil.CreateDeployment(clients.KubernetesClient, arsConfigmapName, arsNamespace, true)
|
||||
if err != nil {
|
||||
@@ -421,6 +468,12 @@ func setupArs() {
|
||||
if err != nil {
|
||||
logrus.Errorf("Error in Deployment with both annotations: %v", err)
|
||||
}
|
||||
|
||||
// Creating Deployment with pause annotation
|
||||
_, err = testutil.CreateDeploymentWithAnnotations(clients.KubernetesClient, arsConfigmapWithPausedDeployment, arsNamespace, map[string]string{options.PauseDeploymentAnnotation: "10s"}, false)
|
||||
if err != nil {
|
||||
logrus.Errorf("Error in Deployment with configmap creation: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func teardownArs() {
|
||||
@@ -622,6 +675,12 @@ func teardownArs() {
|
||||
logrus.Errorf("Error while deleting statefulSet with secret as env var source %v", statefulSetError)
|
||||
}
|
||||
|
||||
// Deleting Deployment with pause annotation
|
||||
deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, arsNamespace, arsConfigmapWithPausedDeployment)
|
||||
if deploymentError != nil {
|
||||
logrus.Errorf("Error while deleting deployment with configmap %v", deploymentError)
|
||||
}
|
||||
|
||||
// Deleting Configmap
|
||||
err := testutil.DeleteConfigMap(clients.KubernetesClient, arsNamespace, arsConfigmapName)
|
||||
if err != nil {
|
||||
@@ -652,7 +711,7 @@ func teardownArs() {
|
||||
logrus.Errorf("Error while deleting the configmap %v", err)
|
||||
}
|
||||
|
||||
// Deleting Configmap used projected volume in init containers
|
||||
// Deleting secret used in projected volume in init containers
|
||||
err = testutil.DeleteSecret(clients.KubernetesClient, arsNamespace, arsProjectedSecretWithInitContainer)
|
||||
if err != nil {
|
||||
logrus.Errorf("Error while deleting the secret %v", err)
|
||||
@@ -735,6 +794,12 @@ func teardownArs() {
|
||||
logrus.Errorf("Error while deleting the configmap used with configmap auto annotations: %v", err)
|
||||
}
|
||||
|
||||
// Deleting configmap for testing pausing deployments
|
||||
err = testutil.DeleteConfigMap(clients.KubernetesClient, arsNamespace, arsConfigmapWithPausedDeployment)
|
||||
if err != nil {
|
||||
logrus.Errorf("Error while deleting the configmap: %v", err)
|
||||
}
|
||||
|
||||
// Deleting namespace
|
||||
testutil.DeleteNamespace(arsNamespace, clients.KubernetesClient)
|
||||
|
||||
@@ -794,6 +859,12 @@ func setupErs() {
|
||||
logrus.Errorf("Error in configmap creation: %v", err)
|
||||
}
|
||||
|
||||
// Creating configmap for testing pausing deployments
|
||||
_, err = testutil.CreateConfigMap(clients.KubernetesClient, ersNamespace, ersConfigmapWithPausedDeployment, "www.google.com")
|
||||
if err != nil {
|
||||
logrus.Errorf("Error in configmap creation: %v", err)
|
||||
}
|
||||
|
||||
// Creating secret
|
||||
_, err = testutil.CreateSecret(clients.KubernetesClient, ersNamespace, ersSecretWithInitEnv, data)
|
||||
if err != nil {
|
||||
@@ -851,6 +922,34 @@ func setupErs() {
|
||||
logrus.Errorf("Error in configmap creation: %v", err)
|
||||
}
|
||||
|
||||
// Creating configmap with ignore annotation
|
||||
_, err = testutil.CreateConfigMap(clients.KubernetesClient, ersNamespace, ersConfigmapWithIgnoreAnnotation, "www.google.com")
|
||||
if err != nil {
|
||||
logrus.Errorf("Error in configmap creation: %v", err)
|
||||
}
|
||||
cmClient := clients.KubernetesClient.CoreV1().ConfigMaps(ersNamespace)
|
||||
patch := []byte(`{"metadata":{"annotations":{"reloader.stakater.com/ignore":"true"}}}`)
|
||||
_, _ = cmClient.Patch(context.TODO(), ersConfigmapWithIgnoreAnnotation, patchtypes.MergePatchType, patch, metav1.PatchOptions{})
|
||||
|
||||
// Creating secret with ignore annotation
|
||||
_, err = testutil.CreateSecret(clients.KubernetesClient, ersNamespace, ersSecretWithIgnoreAnnotation, data)
|
||||
if err != nil {
|
||||
logrus.Errorf("Error in secret creation: %v", err)
|
||||
}
|
||||
secretClient := clients.KubernetesClient.CoreV1().Secrets(ersNamespace)
|
||||
_, _ = secretClient.Patch(context.TODO(), ersSecretWithIgnoreAnnotation, patchtypes.MergePatchType, patch, metav1.PatchOptions{})
|
||||
|
||||
// Creating Deployment referencing configmap with ignore annotation
|
||||
_, err = testutil.CreateDeployment(clients.KubernetesClient, ersConfigmapWithIgnoreAnnotation, ersNamespace, true)
|
||||
if err != nil {
|
||||
logrus.Errorf("Error in Deployment with configmap ignore annotation creation: %v", err)
|
||||
}
|
||||
// Creating Deployment referencing secret with ignore annotation
|
||||
_, err = testutil.CreateDeployment(clients.KubernetesClient, ersSecretWithIgnoreAnnotation, ersNamespace, true)
|
||||
if err != nil {
|
||||
logrus.Errorf("Error in Deployment with secret ignore annotation creation: %v", err)
|
||||
}
|
||||
|
||||
// Creating Deployment with configmap
|
||||
_, err = testutil.CreateDeployment(clients.KubernetesClient, ersConfigmapName, ersNamespace, true)
|
||||
if err != nil {
|
||||
@@ -970,6 +1069,12 @@ func setupErs() {
|
||||
logrus.Errorf("Error in Deployment with configmap and with configmap exclude annotation: %v", err)
|
||||
}
|
||||
|
||||
// Creating Deployment with pause annotation
|
||||
_, err = testutil.CreateDeploymentWithAnnotations(clients.KubernetesClient, ersConfigmapWithPausedDeployment, ersNamespace, map[string]string{options.PauseDeploymentAnnotation: "10s"}, false)
|
||||
if err != nil {
|
||||
logrus.Errorf("Error in Deployment with configmap creation: %v", err)
|
||||
}
|
||||
|
||||
// Creating DaemonSet with configmap
|
||||
_, err = testutil.CreateDaemonSet(clients.KubernetesClient, ersConfigmapName, ersNamespace, true)
|
||||
if err != nil {
|
||||
@@ -1254,6 +1359,12 @@ func teardownErs() {
|
||||
logrus.Errorf("Error while deleting statefulSet with secret as env var source %v", statefulSetError)
|
||||
}
|
||||
|
||||
// Deleting Deployment for testing pausing deployments
|
||||
deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, ersNamespace, ersConfigmapWithPausedDeployment)
|
||||
if deploymentError != nil {
|
||||
logrus.Errorf("Error while deleting deployment with configmap %v", deploymentError)
|
||||
}
|
||||
|
||||
// Deleting Configmap
|
||||
err := testutil.DeleteConfigMap(clients.KubernetesClient, ersNamespace, ersConfigmapName)
|
||||
if err != nil {
|
||||
@@ -1284,7 +1395,7 @@ func teardownErs() {
|
||||
logrus.Errorf("Error while deleting the configmap %v", err)
|
||||
}
|
||||
|
||||
// Deleting Configmap used projected volume in init containers
|
||||
// Deleting secret used in projected volume in init containers
|
||||
err = testutil.DeleteSecret(clients.KubernetesClient, ersNamespace, ersProjectedSecretWithInitContainer)
|
||||
if err != nil {
|
||||
logrus.Errorf("Error while deleting the secret %v", err)
|
||||
@@ -1367,18 +1478,24 @@ func teardownErs() {
|
||||
logrus.Errorf("Error while deleting the configmap used with configmap exclude annotation: %v", err)
|
||||
}
|
||||
|
||||
// Deleting ConfigMap for testing pausing deployments
|
||||
err = testutil.DeleteConfigMap(clients.KubernetesClient, ersNamespace, ersConfigmapWithPausedDeployment)
|
||||
if err != nil {
|
||||
logrus.Errorf("Error while deleting the configmap: %v", err)
|
||||
}
|
||||
|
||||
// Deleting namespace
|
||||
testutil.DeleteNamespace(ersNamespace, clients.KubernetesClient)
|
||||
|
||||
}
|
||||
|
||||
func getConfigWithAnnotations(resourceType string, name string, shaData string, annotation string, typedAutoAnnotation string) util.Config {
|
||||
func getConfigWithAnnotations(resourceType string, name string, shaData string, annotation string, typedAutoAnnotation string) common.Config {
|
||||
ns := ersNamespace
|
||||
if options.ReloadStrategy == constants.AnnotationsReloadStrategy {
|
||||
ns = arsNamespace
|
||||
}
|
||||
|
||||
return util.Config{
|
||||
return common.Config{
|
||||
Namespace: ns,
|
||||
ResourceName: name,
|
||||
SHAValue: shaData,
|
||||
@@ -1395,7 +1512,7 @@ func getCollectors() metrics.Collectors {
|
||||
var labelSucceeded = prometheus.Labels{"success": "true"}
|
||||
var labelFailed = prometheus.Labels{"success": "false"}
|
||||
|
||||
func testRollingUpgradeInvokeDeleteStrategyArs(t *testing.T, clients kube.Clients, config util.Config, upgradeFuncs callbacks.RollingUpgradeFuncs, collectors metrics.Collectors, envVarPostfix string) {
|
||||
func testRollingUpgradeInvokeDeleteStrategyArs(t *testing.T, clients kube.Clients, config common.Config, upgradeFuncs callbacks.RollingUpgradeFuncs, collectors metrics.Collectors, envVarPostfix string) {
|
||||
err := PerformAction(clients, config, upgradeFuncs, collectors, nil, invokeDeleteStrategy)
|
||||
time.Sleep(5 * time.Second)
|
||||
if err != nil {
|
||||
@@ -1413,6 +1530,22 @@ func testRollingUpgradeInvokeDeleteStrategyArs(t *testing.T, clients kube.Client
|
||||
}
|
||||
}
|
||||
|
||||
func testRollingUpgradeWithPatchAndInvokeDeleteStrategyArs(t *testing.T, clients kube.Clients, config common.Config, upgradeFuncs callbacks.RollingUpgradeFuncs, collectors metrics.Collectors, envVarPostfix string) {
|
||||
err := PerformAction(clients, config, upgradeFuncs, collectors, nil, invokeDeleteStrategy)
|
||||
upgradeFuncs.PatchFunc = func(client kube.Clients, namespace string, resource runtime.Object, patchType patchtypes.PatchType, bytes []byte) error {
|
||||
assert.Equal(t, patchtypes.StrategicMergePatchType, patchType)
|
||||
assert.NotEmpty(t, bytes)
|
||||
return nil
|
||||
}
|
||||
upgradeFuncs.UpdateFunc = func(kube.Clients, string, runtime.Object) error {
|
||||
t.Errorf("Update should not be called")
|
||||
return nil
|
||||
}
|
||||
if err != nil {
|
||||
t.Errorf("Rolling upgrade failed for %s with %s", upgradeFuncs.ResourceType, envVarPostfix)
|
||||
}
|
||||
}
|
||||
|
||||
func TestRollingUpgradeForDeploymentWithConfigmapUsingArs(t *testing.T) {
|
||||
options.ReloadStrategy = constants.AnnotationsReloadStrategy
|
||||
envVarPostfix := constants.ConfigmapEnvVarPostfix
|
||||
@@ -1422,6 +1555,18 @@ func TestRollingUpgradeForDeploymentWithConfigmapUsingArs(t *testing.T) {
|
||||
deploymentFuncs := GetDeploymentRollingUpgradeFuncs()
|
||||
collectors := getCollectors()
|
||||
|
||||
itemCalled := 0
|
||||
itemsCalled := 0
|
||||
|
||||
deploymentFuncs.ItemFunc = func(client kube.Clients, namespace string, name string) (runtime.Object, error) {
|
||||
itemCalled++
|
||||
return callbacks.GetDeploymentItem(client, namespace, name)
|
||||
}
|
||||
deploymentFuncs.ItemsFunc = func(client kube.Clients, namespace string) []runtime.Object {
|
||||
itemsCalled++
|
||||
return callbacks.GetDeploymentItems(client, namespace)
|
||||
}
|
||||
|
||||
err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy)
|
||||
time.Sleep(5 * time.Second)
|
||||
if err != nil {
|
||||
@@ -1441,9 +1586,68 @@ func TestRollingUpgradeForDeploymentWithConfigmapUsingArs(t *testing.T) {
|
||||
if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": arsNamespace})) != 1 {
|
||||
t.Errorf("Counter by namespace was not increased")
|
||||
}
|
||||
|
||||
assert.Equal(t, 0, itemCalled, "ItemFunc should not be called")
|
||||
assert.Equal(t, 2, itemsCalled, "ItemsFunc should be called twice")
|
||||
|
||||
testRollingUpgradeInvokeDeleteStrategyArs(t, clients, config, deploymentFuncs, collectors, envVarPostfix)
|
||||
}
|
||||
|
||||
func TestRollingUpgradeForDeploymentWithPatchAndRetryUsingArs(t *testing.T) {
|
||||
options.ReloadStrategy = constants.AnnotationsReloadStrategy
|
||||
envVarPostfix := constants.ConfigmapEnvVarPostfix
|
||||
|
||||
shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, arsNamespace, arsConfigmapName, "www.stakater.com")
|
||||
config := getConfigWithAnnotations(envVarPostfix, arsConfigmapName, shaData, options.ConfigmapUpdateOnChangeAnnotation, options.ConfigmapReloaderAutoAnnotation)
|
||||
deploymentFuncs := GetDeploymentRollingUpgradeFuncs()
|
||||
|
||||
assert.True(t, deploymentFuncs.SupportsPatch)
|
||||
assert.NotEmpty(t, deploymentFuncs.PatchTemplatesFunc().AnnotationTemplate)
|
||||
|
||||
itemCalled := 0
|
||||
itemsCalled := 0
|
||||
|
||||
deploymentFuncs.ItemFunc = func(client kube.Clients, namespace string, name string) (runtime.Object, error) {
|
||||
itemCalled++
|
||||
return callbacks.GetDeploymentItem(client, namespace, name)
|
||||
}
|
||||
deploymentFuncs.ItemsFunc = func(client kube.Clients, namespace string) []runtime.Object {
|
||||
itemsCalled++
|
||||
return callbacks.GetDeploymentItems(client, namespace)
|
||||
}
|
||||
|
||||
patchCalled := 0
|
||||
deploymentFuncs.PatchFunc = func(client kube.Clients, namespace string, resource runtime.Object, patchType patchtypes.PatchType, bytes []byte) error {
|
||||
patchCalled++
|
||||
if patchCalled < 2 {
|
||||
return &errors.StatusError{ErrStatus: metav1.Status{Reason: metav1.StatusReasonConflict}} // simulate conflict
|
||||
}
|
||||
assert.Equal(t, patchtypes.StrategicMergePatchType, patchType)
|
||||
assert.NotEmpty(t, bytes)
|
||||
assert.Contains(t, string(bytes), `{"spec":{"template":{"metadata":{"annotations":{"reloader.stakater.com/last-reloaded-from":`)
|
||||
assert.Contains(t, string(bytes), `\"hash\":\"3c9a892aeaedc759abc3df9884a37b8be5680382\"`)
|
||||
return nil
|
||||
}
|
||||
|
||||
deploymentFuncs.UpdateFunc = func(kube.Clients, string, runtime.Object) error {
|
||||
t.Errorf("Update should not be called")
|
||||
return nil
|
||||
}
|
||||
|
||||
collectors := getCollectors()
|
||||
err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy)
|
||||
if err != nil {
|
||||
t.Errorf("Rolling upgrade failed for Deployment with Configmap")
|
||||
}
|
||||
|
||||
assert.Equal(t, 1, itemCalled, "ItemFunc should be called once")
|
||||
assert.Equal(t, 1, itemsCalled, "ItemsFunc should be called once")
|
||||
assert.Equal(t, 2, patchCalled, "PatchFunc should be called twice")
|
||||
|
||||
deploymentFuncs = GetDeploymentRollingUpgradeFuncs()
|
||||
testRollingUpgradeWithPatchAndInvokeDeleteStrategyArs(t, clients, config, deploymentFuncs, collectors, envVarPostfix)
|
||||
}
|
||||
|
||||
func TestRollingUpgradeForDeploymentWithConfigmapWithoutReloadAnnotationAndWithoutAutoReloadAllNoTriggersUsingArs(t *testing.T) {
|
||||
options.ReloadStrategy = constants.AnnotationsReloadStrategy
|
||||
envVarPostfix := constants.ConfigmapEnvVarPostfix
|
||||
@@ -1616,7 +1820,7 @@ func TestRollingUpgradeForDeploymentWithConfigmapViaSearchAnnotationNotMappedUsi
|
||||
t.Errorf("Failed to create deployment with search annotation.")
|
||||
}
|
||||
defer func() {
|
||||
_ = clients.KubernetesClient.AppsV1().Deployments(arsNamespace).Delete(context.TODO(), deployment.Name, v1.DeleteOptions{})
|
||||
_ = clients.KubernetesClient.AppsV1().Deployments(arsNamespace).Delete(context.TODO(), deployment.Name, metav1.DeleteOptions{})
|
||||
}()
|
||||
// defer clients.KubernetesClient.AppsV1().Deployments(namespace).Delete(deployment.Name, &v1.DeleteOptions{})
|
||||
|
||||
@@ -2046,7 +2250,7 @@ func TestRollingUpgradeForDeploymentWithSecretExcludeAnnotationUsingArs(t *testi
|
||||
logrus.Infof("Verifying deployment did not update")
|
||||
updated := testutil.VerifyResourceAnnotationUpdate(clients, config, deploymentFuncs)
|
||||
if updated {
|
||||
t.Errorf("Deployment which had to be exluded was updated")
|
||||
t.Errorf("Deployment which had to be excluded was updated")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2102,6 +2306,7 @@ func TestRollingUpgradeForDeploymentWithExcludeConfigMapAnnotationUsingArs(t *te
|
||||
t.Errorf("Deployment which had to be excluded was updated")
|
||||
}
|
||||
}
|
||||
|
||||
func TestRollingUpgradeForDeploymentWithConfigMapAutoAnnotationUsingArs(t *testing.T) {
|
||||
options.ReloadStrategy = constants.AnnotationsReloadStrategy
|
||||
envVarPostfix := constants.ConfigmapEnvVarPostfix
|
||||
@@ -2143,6 +2348,18 @@ func TestRollingUpgradeForDaemonSetWithConfigmapUsingArs(t *testing.T) {
|
||||
daemonSetFuncs := GetDaemonSetRollingUpgradeFuncs()
|
||||
collectors := getCollectors()
|
||||
|
||||
itemCalled := 0
|
||||
itemsCalled := 0
|
||||
|
||||
daemonSetFuncs.ItemFunc = func(client kube.Clients, namespace string, name string) (runtime.Object, error) {
|
||||
itemCalled++
|
||||
return callbacks.GetDaemonSetItem(client, namespace, name)
|
||||
}
|
||||
daemonSetFuncs.ItemsFunc = func(client kube.Clients, namespace string) []runtime.Object {
|
||||
itemsCalled++
|
||||
return callbacks.GetDaemonSetItems(client, namespace)
|
||||
}
|
||||
|
||||
err := PerformAction(clients, config, daemonSetFuncs, collectors, nil, invokeReloadStrategy)
|
||||
time.Sleep(5 * time.Second)
|
||||
if err != nil {
|
||||
@@ -2163,9 +2380,68 @@ func TestRollingUpgradeForDaemonSetWithConfigmapUsingArs(t *testing.T) {
|
||||
t.Errorf("Counter by namespace was not increased")
|
||||
}
|
||||
|
||||
assert.Equal(t, 0, itemCalled, "ItemFunc should not be called")
|
||||
assert.Equal(t, 2, itemsCalled, "ItemsFunc should be called twice")
|
||||
|
||||
testRollingUpgradeInvokeDeleteStrategyArs(t, clients, config, daemonSetFuncs, collectors, envVarPostfix)
|
||||
}
|
||||
|
||||
func TestRollingUpgradeForDaemonSetWithPatchAndRetryUsingArs(t *testing.T) {
|
||||
options.ReloadStrategy = constants.AnnotationsReloadStrategy
|
||||
envVarPostfix := constants.ConfigmapEnvVarPostfix
|
||||
|
||||
shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, arsNamespace, arsConfigmapName, "www.facebook.com")
|
||||
config := getConfigWithAnnotations(envVarPostfix, arsConfigmapName, shaData, options.ConfigmapUpdateOnChangeAnnotation, options.ConfigmapReloaderAutoAnnotation)
|
||||
daemonSetFuncs := GetDaemonSetRollingUpgradeFuncs()
|
||||
|
||||
itemCalled := 0
|
||||
itemsCalled := 0
|
||||
|
||||
daemonSetFuncs.ItemFunc = func(client kube.Clients, namespace string, name string) (runtime.Object, error) {
|
||||
itemCalled++
|
||||
return callbacks.GetDaemonSetItem(client, namespace, name)
|
||||
}
|
||||
daemonSetFuncs.ItemsFunc = func(client kube.Clients, namespace string) []runtime.Object {
|
||||
itemsCalled++
|
||||
return callbacks.GetDaemonSetItems(client, namespace)
|
||||
}
|
||||
|
||||
assert.True(t, daemonSetFuncs.SupportsPatch)
|
||||
assert.NotEmpty(t, daemonSetFuncs.PatchTemplatesFunc().AnnotationTemplate)
|
||||
|
||||
patchCalled := 0
|
||||
daemonSetFuncs.PatchFunc = func(client kube.Clients, namespace string, resource runtime.Object, patchType patchtypes.PatchType, bytes []byte) error {
|
||||
patchCalled++
|
||||
if patchCalled < 2 {
|
||||
return &errors.StatusError{ErrStatus: metav1.Status{Reason: metav1.StatusReasonConflict}} // simulate conflict
|
||||
}
|
||||
assert.Equal(t, patchtypes.StrategicMergePatchType, patchType)
|
||||
assert.NotEmpty(t, bytes)
|
||||
assert.Contains(t, string(bytes), `{"spec":{"template":{"metadata":{"annotations":{"reloader.stakater.com/last-reloaded-from":`)
|
||||
assert.Contains(t, string(bytes), `\"hash\":\"314a2269170750a974d79f02b5b9ee517de7f280\"`)
|
||||
return nil
|
||||
}
|
||||
|
||||
daemonSetFuncs.UpdateFunc = func(kube.Clients, string, runtime.Object) error {
|
||||
t.Errorf("Update should not be called")
|
||||
return nil
|
||||
}
|
||||
|
||||
collectors := getCollectors()
|
||||
|
||||
err := PerformAction(clients, config, daemonSetFuncs, collectors, nil, invokeReloadStrategy)
|
||||
if err != nil {
|
||||
t.Errorf("Rolling upgrade failed for DaemonSet with configmap")
|
||||
}
|
||||
|
||||
assert.Equal(t, 1, itemCalled, "ItemFunc should be called once")
|
||||
assert.Equal(t, 1, itemsCalled, "ItemsFunc should be called once")
|
||||
assert.Equal(t, 2, patchCalled, "PatchFunc should be called twice")
|
||||
|
||||
daemonSetFuncs = GetDeploymentRollingUpgradeFuncs()
|
||||
testRollingUpgradeWithPatchAndInvokeDeleteStrategyArs(t, clients, config, daemonSetFuncs, collectors, envVarPostfix)
|
||||
}
|
||||
|
||||
func TestRollingUpgradeForDaemonSetWithConfigmapInProjectedVolumeUsingArs(t *testing.T) {
|
||||
options.ReloadStrategy = constants.AnnotationsReloadStrategy
|
||||
envVarPostfix := constants.ConfigmapEnvVarPostfix
|
||||
@@ -2303,6 +2579,18 @@ func TestRollingUpgradeForStatefulSetWithConfigmapUsingArs(t *testing.T) {
|
||||
statefulSetFuncs := GetStatefulSetRollingUpgradeFuncs()
|
||||
collectors := getCollectors()
|
||||
|
||||
itemCalled := 0
|
||||
itemsCalled := 0
|
||||
|
||||
statefulSetFuncs.ItemFunc = func(client kube.Clients, namespace string, name string) (runtime.Object, error) {
|
||||
itemCalled++
|
||||
return callbacks.GetStatefulSetItem(client, namespace, name)
|
||||
}
|
||||
statefulSetFuncs.ItemsFunc = func(client kube.Clients, namespace string) []runtime.Object {
|
||||
itemsCalled++
|
||||
return callbacks.GetStatefulSetItems(client, namespace)
|
||||
}
|
||||
|
||||
err := PerformAction(clients, config, statefulSetFuncs, collectors, nil, invokeReloadStrategy)
|
||||
time.Sleep(5 * time.Second)
|
||||
if err != nil {
|
||||
@@ -2323,9 +2611,68 @@ func TestRollingUpgradeForStatefulSetWithConfigmapUsingArs(t *testing.T) {
|
||||
t.Errorf("Counter by namespace was not increased")
|
||||
}
|
||||
|
||||
assert.Equal(t, 0, itemCalled, "ItemFunc should not be called")
|
||||
assert.Equal(t, 2, itemsCalled, "ItemsFunc should be called twice")
|
||||
|
||||
testRollingUpgradeInvokeDeleteStrategyArs(t, clients, config, statefulSetFuncs, collectors, envVarPostfix)
|
||||
}
|
||||
|
||||
func TestRollingUpgradeForStatefulSetWithPatchAndRetryUsingArs(t *testing.T) {
|
||||
options.ReloadStrategy = constants.AnnotationsReloadStrategy
|
||||
envVarPostfix := constants.ConfigmapEnvVarPostfix
|
||||
|
||||
shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, arsNamespace, arsConfigmapName, "www.twitter.com")
|
||||
config := getConfigWithAnnotations(envVarPostfix, arsConfigmapName, shaData, options.ConfigmapUpdateOnChangeAnnotation, options.ConfigmapReloaderAutoAnnotation)
|
||||
statefulSetFuncs := GetStatefulSetRollingUpgradeFuncs()
|
||||
|
||||
itemCalled := 0
|
||||
itemsCalled := 0
|
||||
|
||||
statefulSetFuncs.ItemFunc = func(client kube.Clients, namespace string, name string) (runtime.Object, error) {
|
||||
itemCalled++
|
||||
return callbacks.GetStatefulSetItem(client, namespace, name)
|
||||
}
|
||||
statefulSetFuncs.ItemsFunc = func(client kube.Clients, namespace string) []runtime.Object {
|
||||
itemsCalled++
|
||||
return callbacks.GetStatefulSetItems(client, namespace)
|
||||
}
|
||||
|
||||
assert.True(t, statefulSetFuncs.SupportsPatch)
|
||||
assert.NotEmpty(t, statefulSetFuncs.PatchTemplatesFunc().AnnotationTemplate)
|
||||
|
||||
patchCalled := 0
|
||||
statefulSetFuncs.PatchFunc = func(client kube.Clients, namespace string, resource runtime.Object, patchType patchtypes.PatchType, bytes []byte) error {
|
||||
patchCalled++
|
||||
if patchCalled < 2 {
|
||||
return &errors.StatusError{ErrStatus: metav1.Status{Reason: metav1.StatusReasonConflict}} // simulate conflict
|
||||
}
|
||||
assert.Equal(t, patchtypes.StrategicMergePatchType, patchType)
|
||||
assert.NotEmpty(t, bytes)
|
||||
assert.Contains(t, string(bytes), `{"spec":{"template":{"metadata":{"annotations":{"reloader.stakater.com/last-reloaded-from":`)
|
||||
assert.Contains(t, string(bytes), `\"hash\":\"f821414d40d8815fb330763f74a4ff7ab651d4fa\"`)
|
||||
return nil
|
||||
}
|
||||
|
||||
statefulSetFuncs.UpdateFunc = func(kube.Clients, string, runtime.Object) error {
|
||||
t.Errorf("Update should not be called")
|
||||
return nil
|
||||
}
|
||||
|
||||
collectors := getCollectors()
|
||||
|
||||
err := PerformAction(clients, config, statefulSetFuncs, collectors, nil, invokeReloadStrategy)
|
||||
if err != nil {
|
||||
t.Errorf("Rolling upgrade failed for StatefulSet with configmap")
|
||||
}
|
||||
|
||||
assert.Equal(t, 1, itemCalled, "ItemFunc should be called once")
|
||||
assert.Equal(t, 1, itemsCalled, "ItemsFunc should be called once")
|
||||
assert.Equal(t, 2, patchCalled, "PatchFunc should be called twice")
|
||||
|
||||
statefulSetFuncs = GetDeploymentRollingUpgradeFuncs()
|
||||
testRollingUpgradeWithPatchAndInvokeDeleteStrategyArs(t, clients, config, statefulSetFuncs, collectors, envVarPostfix)
|
||||
}
|
||||
|
||||
func TestRollingUpgradeForStatefulSetWithConfigmapInProjectedVolumeUsingArs(t *testing.T) {
|
||||
options.ReloadStrategy = constants.AnnotationsReloadStrategy
|
||||
envVarPostfix := constants.ConfigmapEnvVarPostfix
|
||||
@@ -2488,6 +2835,9 @@ func TestFailedRollingUpgradeUsingArs(t *testing.T) {
|
||||
deploymentFuncs.UpdateFunc = func(_ kube.Clients, _ string, _ runtime.Object) error {
|
||||
return fmt.Errorf("error")
|
||||
}
|
||||
deploymentFuncs.PatchFunc = func(kube.Clients, string, runtime.Object, patchtypes.PatchType, []byte) error {
|
||||
return fmt.Errorf("error")
|
||||
}
|
||||
collectors := getCollectors()
|
||||
|
||||
_ = PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy)
|
||||
@@ -2501,7 +2851,66 @@ func TestFailedRollingUpgradeUsingArs(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func testRollingUpgradeInvokeDeleteStrategyErs(t *testing.T, clients kube.Clients, config util.Config, upgradeFuncs callbacks.RollingUpgradeFuncs, collectors metrics.Collectors, envVarPostfix string) {
|
||||
func TestIgnoreAnnotationNoReloadUsingArs(t *testing.T) {
|
||||
options.ReloadStrategy = constants.AnnotationsReloadStrategy
|
||||
envVarPostfix := constants.ConfigmapEnvVarPostfix
|
||||
|
||||
shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, arsNamespace, arsConfigmapWithIgnoreAnnotation, "www.stakater.com")
|
||||
config := getConfigWithAnnotations(envVarPostfix, arsConfigmapWithIgnoreAnnotation, shaData, options.ConfigmapUpdateOnChangeAnnotation, options.ConfigmapReloaderAutoAnnotation)
|
||||
config.ResourceAnnotations = map[string]string{"reloader.stakater.com/ignore": "true"}
|
||||
deploymentFuncs := GetDeploymentRollingUpgradeFuncs()
|
||||
collectors := getCollectors()
|
||||
|
||||
err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy)
|
||||
if err != nil {
|
||||
t.Errorf("Rolling upgrade failed for Deployment with Configmap and ignore annotation using ARS")
|
||||
}
|
||||
|
||||
// Ensure deployment is NOT updated
|
||||
updated := testutil.VerifyResourceAnnotationUpdate(clients, config, deploymentFuncs)
|
||||
if updated {
|
||||
t.Errorf("Deployment was updated but should not have been")
|
||||
}
|
||||
|
||||
// Ensure counters remain zero
|
||||
if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 0 {
|
||||
t.Errorf("Reload counter should not have increased")
|
||||
}
|
||||
if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": arsNamespace})) != 0 {
|
||||
t.Errorf("Reload counter by namespace should not have increased")
|
||||
}
|
||||
}
|
||||
func TestIgnoreAnnotationNoReloadUsingErs(t *testing.T) {
|
||||
options.ReloadStrategy = constants.EnvVarsReloadStrategy
|
||||
envVarPostfix := constants.ConfigmapEnvVarPostfix
|
||||
|
||||
shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, ersNamespace, ersConfigmapWithIgnoreAnnotation, "www.stakater.com")
|
||||
config := getConfigWithAnnotations(envVarPostfix, ersConfigmapWithIgnoreAnnotation, shaData, options.ConfigmapUpdateOnChangeAnnotation, options.ConfigmapReloaderAutoAnnotation)
|
||||
config.ResourceAnnotations = map[string]string{"reloader.stakater.com/ignore": "true"}
|
||||
deploymentFuncs := GetDeploymentRollingUpgradeFuncs()
|
||||
collectors := getCollectors()
|
||||
|
||||
err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy)
|
||||
if err != nil {
|
||||
t.Errorf("Rolling upgrade failed for Deployment with Configmap and ignore annotation using ERS")
|
||||
}
|
||||
|
||||
// Ensure deployment is NOT updated
|
||||
updated := testutil.VerifyResourceEnvVarUpdate(clients, config, envVarPostfix, deploymentFuncs)
|
||||
if updated {
|
||||
t.Errorf("Deployment was updated but should not have been (ERS)")
|
||||
}
|
||||
|
||||
// Ensure counters remain zero
|
||||
if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 0 {
|
||||
t.Errorf("Reload counter should not have increased (ERS)")
|
||||
}
|
||||
if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": ersNamespace})) != 0 {
|
||||
t.Errorf("Reload counter by namespace should not have increased (ERS)")
|
||||
}
|
||||
}
|
||||
|
||||
func testRollingUpgradeInvokeDeleteStrategyErs(t *testing.T, clients kube.Clients, config common.Config, upgradeFuncs callbacks.RollingUpgradeFuncs, collectors metrics.Collectors, envVarPostfix string) {
|
||||
err := PerformAction(clients, config, upgradeFuncs, collectors, nil, invokeDeleteStrategy)
|
||||
time.Sleep(5 * time.Second)
|
||||
if err != nil {
|
||||
@@ -2518,6 +2927,24 @@ func testRollingUpgradeInvokeDeleteStrategyErs(t *testing.T, clients kube.Client
|
||||
}
|
||||
}
|
||||
|
||||
func testRollingUpgradeWithPatchAndInvokeDeleteStrategyErs(t *testing.T, clients kube.Clients, config common.Config, upgradeFuncs callbacks.RollingUpgradeFuncs, collectors metrics.Collectors, envVarPostfix string) {
|
||||
assert.NotEmpty(t, upgradeFuncs.PatchTemplatesFunc().DeleteEnvVarTemplate)
|
||||
|
||||
err := PerformAction(clients, config, upgradeFuncs, collectors, nil, invokeDeleteStrategy)
|
||||
upgradeFuncs.PatchFunc = func(client kube.Clients, namespace string, resource runtime.Object, patchType patchtypes.PatchType, bytes []byte) error {
|
||||
assert.Equal(t, patchtypes.JSONPatchType, patchType)
|
||||
assert.NotEmpty(t, bytes)
|
||||
return nil
|
||||
}
|
||||
upgradeFuncs.UpdateFunc = func(kube.Clients, string, runtime.Object) error {
|
||||
t.Errorf("Update should not be called")
|
||||
return nil
|
||||
}
|
||||
if err != nil {
|
||||
t.Errorf("Rolling upgrade failed for %s with %s", upgradeFuncs.ResourceType, envVarPostfix)
|
||||
}
|
||||
}
|
||||
|
||||
func TestRollingUpgradeForDeploymentWithConfigmapUsingErs(t *testing.T) {
|
||||
options.ReloadStrategy = constants.EnvVarsReloadStrategy
|
||||
envVarPostfix := constants.ConfigmapEnvVarPostfix
|
||||
@@ -2550,6 +2977,48 @@ func TestRollingUpgradeForDeploymentWithConfigmapUsingErs(t *testing.T) {
|
||||
testRollingUpgradeInvokeDeleteStrategyErs(t, clients, config, deploymentFuncs, collectors, envVarPostfix)
|
||||
}
|
||||
|
||||
func TestRollingUpgradeForDeploymentWithPatchAndRetryUsingErs(t *testing.T) {
|
||||
options.ReloadStrategy = constants.EnvVarsReloadStrategy
|
||||
envVarPostfix := constants.ConfigmapEnvVarPostfix
|
||||
|
||||
shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, ersNamespace, ersConfigmapName, "www.stakater.com")
|
||||
config := getConfigWithAnnotations(envVarPostfix, ersConfigmapName, shaData, options.ConfigmapUpdateOnChangeAnnotation, options.ConfigmapReloaderAutoAnnotation)
|
||||
deploymentFuncs := GetDeploymentRollingUpgradeFuncs()
|
||||
|
||||
assert.True(t, deploymentFuncs.SupportsPatch)
|
||||
assert.NotEmpty(t, deploymentFuncs.PatchTemplatesFunc().EnvVarTemplate)
|
||||
|
||||
patchCalled := 0
|
||||
deploymentFuncs.PatchFunc = func(client kube.Clients, namespace string, resource runtime.Object, patchType patchtypes.PatchType, bytes []byte) error {
|
||||
patchCalled++
|
||||
if patchCalled < 2 {
|
||||
return &errors.StatusError{ErrStatus: metav1.Status{Reason: metav1.StatusReasonConflict}} // simulate conflict
|
||||
}
|
||||
assert.Equal(t, patchtypes.StrategicMergePatchType, patchType)
|
||||
assert.NotEmpty(t, bytes)
|
||||
assert.Contains(t, string(bytes), `{"spec":{"template":{"spec":{"containers":[{"name":`)
|
||||
assert.Contains(t, string(bytes), `"value":"3c9a892aeaedc759abc3df9884a37b8be5680382"`)
|
||||
return nil
|
||||
}
|
||||
|
||||
deploymentFuncs.UpdateFunc = func(kube.Clients, string, runtime.Object) error {
|
||||
t.Errorf("Update should not be called")
|
||||
return nil
|
||||
}
|
||||
|
||||
collectors := getCollectors()
|
||||
|
||||
err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy)
|
||||
if err != nil {
|
||||
t.Errorf("Rolling upgrade failed for %s with %s", deploymentFuncs.ResourceType, envVarPostfix)
|
||||
}
|
||||
|
||||
assert.Equal(t, 2, patchCalled)
|
||||
|
||||
deploymentFuncs = GetDeploymentRollingUpgradeFuncs()
|
||||
testRollingUpgradeWithPatchAndInvokeDeleteStrategyErs(t, clients, config, deploymentFuncs, collectors, envVarPostfix)
|
||||
}
|
||||
|
||||
func TestRollingUpgradeForDeploymentWithConfigmapInProjectedVolumeUsingErs(t *testing.T) {
|
||||
options.ReloadStrategy = constants.EnvVarsReloadStrategy
|
||||
envVarPostfix := constants.ConfigmapEnvVarPostfix
|
||||
@@ -2658,7 +3127,7 @@ func TestRollingUpgradeForDeploymentWithConfigmapViaSearchAnnotationNotMappedUsi
|
||||
t.Errorf("Failed to create deployment with search annotation.")
|
||||
}
|
||||
defer func() {
|
||||
_ = clients.KubernetesClient.AppsV1().Deployments(ersNamespace).Delete(context.TODO(), deployment.Name, v1.DeleteOptions{})
|
||||
_ = clients.KubernetesClient.AppsV1().Deployments(ersNamespace).Delete(context.TODO(), deployment.Name, metav1.DeleteOptions{})
|
||||
}()
|
||||
// defer clients.KubernetesClient.AppsV1().Deployments(namespace).Delete(deployment.Name, &v1.DeleteOptions{})
|
||||
|
||||
@@ -3212,6 +3681,49 @@ func TestRollingUpgradeForDaemonSetWithConfigmapUsingErs(t *testing.T) {
|
||||
testRollingUpgradeInvokeDeleteStrategyErs(t, clients, config, daemonSetFuncs, collectors, envVarPostfix)
|
||||
}
|
||||
|
||||
func TestRollingUpgradeForDaemonSetWithPatchAndRetryUsingErs(t *testing.T) {
|
||||
options.ReloadStrategy = constants.EnvVarsReloadStrategy
|
||||
envVarPostfix := constants.ConfigmapEnvVarPostfix
|
||||
|
||||
shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, ersNamespace, ersConfigmapName, "www.facebook.com")
|
||||
config := getConfigWithAnnotations(envVarPostfix, ersConfigmapName, shaData, options.ConfigmapUpdateOnChangeAnnotation, options.ConfigmapReloaderAutoAnnotation)
|
||||
daemonSetFuncs := GetDaemonSetRollingUpgradeFuncs()
|
||||
|
||||
assert.True(t, daemonSetFuncs.SupportsPatch)
|
||||
assert.NotEmpty(t, daemonSetFuncs.PatchTemplatesFunc().EnvVarTemplate)
|
||||
|
||||
patchCalled := 0
|
||||
daemonSetFuncs.PatchFunc = func(client kube.Clients, namespace string, resource runtime.Object, patchType patchtypes.PatchType, bytes []byte) error {
|
||||
patchCalled++
|
||||
if patchCalled < 2 {
|
||||
return &errors.StatusError{ErrStatus: metav1.Status{Reason: metav1.StatusReasonConflict}} // simulate conflict
|
||||
}
|
||||
assert.Equal(t, patchtypes.StrategicMergePatchType, patchType)
|
||||
assert.NotEmpty(t, bytes)
|
||||
assert.Contains(t, string(bytes), `{"spec":{"template":{"spec":{"containers":[{"name":`)
|
||||
assert.Contains(t, string(bytes), `"value":"314a2269170750a974d79f02b5b9ee517de7f280"`)
|
||||
return nil
|
||||
}
|
||||
|
||||
daemonSetFuncs.UpdateFunc = func(kube.Clients, string, runtime.Object) error {
|
||||
t.Errorf("Update should not be called")
|
||||
return nil
|
||||
}
|
||||
|
||||
collectors := getCollectors()
|
||||
|
||||
err := PerformAction(clients, config, daemonSetFuncs, collectors, nil, invokeReloadStrategy)
|
||||
time.Sleep(5 * time.Second)
|
||||
if err != nil {
|
||||
t.Errorf("Rolling upgrade failed for DaemonSet with configmap")
|
||||
}
|
||||
|
||||
assert.Equal(t, 2, patchCalled)
|
||||
|
||||
daemonSetFuncs = GetDeploymentRollingUpgradeFuncs()
|
||||
testRollingUpgradeWithPatchAndInvokeDeleteStrategyErs(t, clients, config, daemonSetFuncs, collectors, envVarPostfix)
|
||||
}
|
||||
|
||||
func TestRollingUpgradeForDaemonSetWithConfigmapInProjectedVolumeUsingErs(t *testing.T) {
|
||||
options.ReloadStrategy = constants.EnvVarsReloadStrategy
|
||||
envVarPostfix := constants.ConfigmapEnvVarPostfix
|
||||
@@ -3372,6 +3884,49 @@ func TestRollingUpgradeForStatefulSetWithConfigmapUsingErs(t *testing.T) {
|
||||
testRollingUpgradeInvokeDeleteStrategyErs(t, clients, config, statefulSetFuncs, collectors, envVarPostfix)
|
||||
}
|
||||
|
||||
func TestRollingUpgradeForStatefulSetWithPatchAndRetryUsingErs(t *testing.T) {
|
||||
options.ReloadStrategy = constants.EnvVarsReloadStrategy
|
||||
envVarPostfix := constants.ConfigmapEnvVarPostfix
|
||||
|
||||
shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, ersNamespace, ersConfigmapName, "www.twitter.com")
|
||||
config := getConfigWithAnnotations(envVarPostfix, ersConfigmapName, shaData, options.ConfigmapUpdateOnChangeAnnotation, options.ConfigmapReloaderAutoAnnotation)
|
||||
statefulSetFuncs := GetStatefulSetRollingUpgradeFuncs()
|
||||
|
||||
assert.True(t, statefulSetFuncs.SupportsPatch)
|
||||
assert.NotEmpty(t, statefulSetFuncs.PatchTemplatesFunc().EnvVarTemplate)
|
||||
|
||||
patchCalled := 0
|
||||
statefulSetFuncs.PatchFunc = func(client kube.Clients, namespace string, resource runtime.Object, patchType patchtypes.PatchType, bytes []byte) error {
|
||||
patchCalled++
|
||||
if patchCalled < 2 {
|
||||
return &errors.StatusError{ErrStatus: metav1.Status{Reason: metav1.StatusReasonConflict}} // simulate conflict
|
||||
}
|
||||
assert.Equal(t, patchtypes.StrategicMergePatchType, patchType)
|
||||
assert.NotEmpty(t, bytes)
|
||||
assert.Contains(t, string(bytes), `{"spec":{"template":{"spec":{"containers":[{"name":`)
|
||||
assert.Contains(t, string(bytes), `"value":"f821414d40d8815fb330763f74a4ff7ab651d4fa"`)
|
||||
return nil
|
||||
}
|
||||
|
||||
statefulSetFuncs.UpdateFunc = func(kube.Clients, string, runtime.Object) error {
|
||||
t.Errorf("Update should not be called")
|
||||
return nil
|
||||
}
|
||||
|
||||
collectors := getCollectors()
|
||||
|
||||
err := PerformAction(clients, config, statefulSetFuncs, collectors, nil, invokeReloadStrategy)
|
||||
time.Sleep(5 * time.Second)
|
||||
if err != nil {
|
||||
t.Errorf("Rolling upgrade failed for StatefulSet with configmap")
|
||||
}
|
||||
|
||||
assert.Equal(t, 2, patchCalled)
|
||||
|
||||
statefulSetFuncs = GetDeploymentRollingUpgradeFuncs()
|
||||
testRollingUpgradeWithPatchAndInvokeDeleteStrategyErs(t, clients, config, statefulSetFuncs, collectors, envVarPostfix)
|
||||
}
|
||||
|
||||
func TestRollingUpgradeForStatefulSetWithConfigmapInProjectedVolumeUsingErs(t *testing.T) {
|
||||
options.ReloadStrategy = constants.EnvVarsReloadStrategy
|
||||
envVarPostfix := constants.ConfigmapEnvVarPostfix
|
||||
@@ -3536,6 +4091,9 @@ func TestFailedRollingUpgradeUsingErs(t *testing.T) {
|
||||
deploymentFuncs.UpdateFunc = func(_ kube.Clients, _ string, _ runtime.Object) error {
|
||||
return fmt.Errorf("error")
|
||||
}
|
||||
deploymentFuncs.PatchFunc = func(kube.Clients, string, runtime.Object, patchtypes.PatchType, []byte) error {
|
||||
return fmt.Errorf("error")
|
||||
}
|
||||
collectors := getCollectors()
|
||||
|
||||
_ = PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy)
|
||||
@@ -3548,3 +4106,183 @@ func TestFailedRollingUpgradeUsingErs(t *testing.T) {
|
||||
t.Errorf("Counter by namespace was not increased")
|
||||
}
|
||||
}
|
||||
|
||||
func TestPausingDeploymentUsingErs(t *testing.T) {
|
||||
options.ReloadStrategy = constants.EnvVarsReloadStrategy
|
||||
testPausingDeployment(t, options.ReloadStrategy, ersConfigmapWithPausedDeployment, ersNamespace)
|
||||
}
|
||||
|
||||
func TestPausingDeploymentUsingArs(t *testing.T) {
|
||||
options.ReloadStrategy = constants.AnnotationsReloadStrategy
|
||||
testPausingDeployment(t, options.ReloadStrategy, arsConfigmapWithPausedDeployment, arsNamespace)
|
||||
}
|
||||
|
||||
func testPausingDeployment(t *testing.T, reloadStrategy string, testName string, namespace string) {
|
||||
options.ReloadStrategy = reloadStrategy
|
||||
envVarPostfix := constants.ConfigmapEnvVarPostfix
|
||||
|
||||
shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, testName, "pause.stakater.com")
|
||||
config := getConfigWithAnnotations(envVarPostfix, testName, shaData, options.ConfigmapUpdateOnChangeAnnotation, options.ConfigmapReloaderAutoAnnotation)
|
||||
deploymentFuncs := GetDeploymentRollingUpgradeFuncs()
|
||||
collectors := getCollectors()
|
||||
|
||||
_ = PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy)
|
||||
|
||||
// Wait for deployment to have paused-at annotation
|
||||
logrus.Infof("Waiting for deployment %s to have paused-at annotation", testName)
|
||||
err := waitForDeploymentPausedAtAnnotation(clients, deploymentFuncs, config.Namespace, testName, 30*time.Second)
|
||||
if err != nil {
|
||||
t.Errorf("Failed to wait for deployment paused-at annotation: %v", err)
|
||||
}
|
||||
|
||||
if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 {
|
||||
t.Errorf("Counter was not increased")
|
||||
}
|
||||
|
||||
if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": namespace})) != 1 {
|
||||
t.Errorf("Counter by namespace was not increased")
|
||||
}
|
||||
|
||||
logrus.Infof("Verifying deployment has been paused")
|
||||
items := deploymentFuncs.ItemsFunc(clients, config.Namespace)
|
||||
deploymentPaused, err := isDeploymentPaused(items, testName)
|
||||
if err != nil {
|
||||
t.Errorf("%s", err.Error())
|
||||
}
|
||||
if !deploymentPaused {
|
||||
t.Errorf("Deployment has not been paused")
|
||||
}
|
||||
|
||||
shaData = testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, testName, "pause-changed.stakater.com")
|
||||
config = getConfigWithAnnotations(envVarPostfix, testName, shaData, options.ConfigmapUpdateOnChangeAnnotation, options.ConfigmapReloaderAutoAnnotation)
|
||||
|
||||
_ = PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy)
|
||||
|
||||
if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 2 {
|
||||
t.Errorf("Counter was not increased")
|
||||
}
|
||||
|
||||
if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": namespace})) != 2 {
|
||||
t.Errorf("Counter by namespace was not increased")
|
||||
}
|
||||
|
||||
logrus.Infof("Verifying deployment is still paused")
|
||||
items = deploymentFuncs.ItemsFunc(clients, config.Namespace)
|
||||
deploymentPaused, err = isDeploymentPaused(items, testName)
|
||||
if err != nil {
|
||||
t.Errorf("%s", err.Error())
|
||||
}
|
||||
if !deploymentPaused {
|
||||
t.Errorf("Deployment should still be paused")
|
||||
}
|
||||
|
||||
logrus.Infof("Verifying deployment has been resumed after pause interval")
|
||||
time.Sleep(11 * time.Second)
|
||||
items = deploymentFuncs.ItemsFunc(clients, config.Namespace)
|
||||
deploymentPaused, err = isDeploymentPaused(items, testName)
|
||||
if err != nil {
|
||||
t.Errorf("%s", err.Error())
|
||||
}
|
||||
if deploymentPaused {
|
||||
t.Errorf("Deployment should have been resumed after pause interval")
|
||||
}
|
||||
}
|
||||
|
||||
func isDeploymentPaused(deployments []runtime.Object, deploymentName string) (bool, error) {
|
||||
deployment, err := FindDeploymentByName(deployments, deploymentName)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return IsPaused(deployment), nil
|
||||
}
|
||||
|
||||
// waitForDeploymentPausedAtAnnotation waits for a deployment to have the pause-period annotation
|
||||
func waitForDeploymentPausedAtAnnotation(clients kube.Clients, deploymentFuncs callbacks.RollingUpgradeFuncs, namespace, deploymentName string, timeout time.Duration) error {
|
||||
start := time.Now()
|
||||
|
||||
for time.Since(start) < timeout {
|
||||
items := deploymentFuncs.ItemsFunc(clients, namespace)
|
||||
deployment, err := FindDeploymentByName(items, deploymentName)
|
||||
if err == nil {
|
||||
annotations := deployment.GetAnnotations()
|
||||
if annotations != nil {
|
||||
if _, exists := annotations[options.PauseDeploymentTimeAnnotation]; exists {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
}
|
||||
|
||||
return fmt.Errorf("timeout waiting for deployment %s to have pause-period annotation", deploymentName)
|
||||
}
|
||||
|
||||
// MockArgoRolloutWithEmptyContainers creates a mock Argo Rollout with no containers
|
||||
// This simulates the scenario where Argo Rollouts with workloadRef return empty containers
|
||||
func MockArgoRolloutWithEmptyContainers(namespace, name string) *runtime.Object {
|
||||
rollout := &argorolloutv1alpha1.Rollout{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Namespace: namespace,
|
||||
},
|
||||
Spec: argorolloutv1alpha1.RolloutSpec{
|
||||
Template: v1.PodTemplateSpec{
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{}, // Empty containers slice
|
||||
InitContainers: []v1.Container{}, // Empty init containers slice
|
||||
Volumes: []v1.Volume{}, // Empty volumes slice
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
var obj runtime.Object = rollout
|
||||
return &obj
|
||||
}
|
||||
|
||||
// TestGetContainerUsingResourceWithArgoRolloutEmptyContainers tests with real Argo Rollout functions
|
||||
func TestGetContainerUsingResourceWithArgoRolloutEmptyContainers(t *testing.T) {
|
||||
namespace := "test-namespace"
|
||||
resourceName := "test-configmap"
|
||||
|
||||
// Use real Argo Rollout functions but mock the containers function
|
||||
rolloutFuncs := GetArgoRolloutRollingUpgradeFuncs()
|
||||
originalContainersFunc := rolloutFuncs.ContainersFunc
|
||||
originalInitContainersFunc := rolloutFuncs.InitContainersFunc
|
||||
|
||||
// Override to return empty containers (simulating workloadRef scenario)
|
||||
rolloutFuncs.ContainersFunc = func(item runtime.Object) []v1.Container {
|
||||
return []v1.Container{} // Empty like workloadRef rollouts
|
||||
}
|
||||
rolloutFuncs.InitContainersFunc = func(item runtime.Object) []v1.Container {
|
||||
return []v1.Container{} // Empty like workloadRef rollouts
|
||||
}
|
||||
|
||||
// Restore original functions after test
|
||||
defer func() {
|
||||
rolloutFuncs.ContainersFunc = originalContainersFunc
|
||||
rolloutFuncs.InitContainersFunc = originalInitContainersFunc
|
||||
}()
|
||||
|
||||
// Use proper Argo Rollout object instead of Pod
|
||||
mockRollout := MockArgoRolloutWithEmptyContainers(namespace, "test-rollout")
|
||||
|
||||
config := common.Config{
|
||||
Namespace: namespace,
|
||||
ResourceName: resourceName,
|
||||
Type: constants.ConfigmapEnvVarPostfix,
|
||||
SHAValue: "test-sha",
|
||||
}
|
||||
|
||||
// Test both autoReload scenarios using subtests as suggested by Felix
|
||||
for _, autoReload := range []bool{true, false} {
|
||||
t.Run(fmt.Sprintf("autoReload_%t", autoReload), func(t *testing.T) {
|
||||
// This tests the actual fix in the context of Argo Rollouts
|
||||
result := getContainerUsingResource(rolloutFuncs, *mockRollout, config, autoReload)
|
||||
|
||||
if result != nil {
|
||||
t.Errorf("Expected nil when using real Argo Rollout functions with empty containers (workloadRef scenario), got %v", result)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -16,7 +16,7 @@ import (
|
||||
"github.com/stakater/Reloader/internal/pkg/metrics"
|
||||
"github.com/stakater/Reloader/internal/pkg/options"
|
||||
"github.com/stakater/Reloader/internal/pkg/testutil"
|
||||
"github.com/stakater/Reloader/internal/pkg/util"
|
||||
"github.com/stakater/Reloader/pkg/common"
|
||||
"github.com/stakater/Reloader/pkg/kube"
|
||||
)
|
||||
|
||||
@@ -159,7 +159,7 @@ func TestRunLeaderElectionWithControllers(t *testing.T) {
|
||||
// Verifying deployment update
|
||||
logrus.Infof("Verifying pod envvars has been created")
|
||||
shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, testutil.Namespace, configmapName, "www.stakater.com")
|
||||
config := util.Config{
|
||||
config := common.Config{
|
||||
Namespace: testutil.Namespace,
|
||||
ResourceName: configmapName,
|
||||
SHAValue: shaData,
|
||||
@@ -186,7 +186,7 @@ func TestRunLeaderElectionWithControllers(t *testing.T) {
|
||||
// Verifying that the deployment was not updated as leadership has been lost
|
||||
logrus.Infof("Verifying pod envvars has not been updated")
|
||||
shaData = testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, testutil.Namespace, configmapName, "www.stakater.com/new")
|
||||
config = util.Config{
|
||||
config = common.Config{
|
||||
Namespace: testutil.Namespace,
|
||||
ResourceName: configmapName,
|
||||
SHAValue: shaData,
|
||||
|
||||
@@ -22,6 +22,8 @@ var (
|
||||
SecretUpdateOnChangeAnnotation = "secret.reloader.stakater.com/reload"
|
||||
// ReloaderAutoAnnotation is an annotation to detect changes in secrets/configmaps
|
||||
ReloaderAutoAnnotation = "reloader.stakater.com/auto"
|
||||
// IgnoreResourceAnnotation is an annotation to ignore changes in secrets/configmaps
|
||||
IgnoreResourceAnnotation = "reloader.stakater.com/ignore"
|
||||
// ConfigmapReloaderAutoAnnotation is an annotation to detect changes in configmaps
|
||||
ConfigmapReloaderAutoAnnotation = "configmap.reloader.stakater.com/auto"
|
||||
// SecretReloaderAutoAnnotation is an annotation to detect changes in secrets
|
||||
@@ -38,6 +40,12 @@ var (
|
||||
SearchMatchAnnotation = "reloader.stakater.com/match"
|
||||
// RolloutStrategyAnnotation is an annotation to define rollout update strategy
|
||||
RolloutStrategyAnnotation = "reloader.stakater.com/rollout-strategy"
|
||||
// PauseDeploymentAnnotation is an annotation to define the time period to pause a deployment after
|
||||
// a configmap/secret change has been detected. Valid values are described here: https://pkg.go.dev/time#ParseDuration
|
||||
// only positive values are allowed
|
||||
PauseDeploymentAnnotation = "deployment.reloader.stakater.com/pause-period"
|
||||
// Annotation set by reloader to indicate that the deployment has been paused
|
||||
PauseDeploymentTimeAnnotation = "deployment.reloader.stakater.com/paused-at"
|
||||
// LogFormat is the log format to use (json, or empty string for default)
|
||||
LogFormat = ""
|
||||
// LogLevel is the log level to use (trace, debug, info, warning, error, fatal and panic)
|
||||
@@ -55,6 +63,19 @@ var (
|
||||
EnableHA = false
|
||||
// Url to send a request to instead of triggering a reload
|
||||
WebhookUrl = ""
|
||||
// ResourcesToIgnore is a list of resources to ignore when watching for changes
|
||||
ResourcesToIgnore = []string{}
|
||||
// NamespacesToIgnore is a list of namespace names to ignore when watching for changes
|
||||
NamespacesToIgnore = []string{}
|
||||
// NamespaceSelectors is a list of namespace selectors to watch for changes
|
||||
NamespaceSelectors = []string{}
|
||||
// ResourceSelectors is a list of resource selectors to watch for changes
|
||||
ResourceSelectors = []string{}
|
||||
// EnablePProf enables pprof for profiling
|
||||
EnablePProf = false
|
||||
// PProfAddr is the address to start pprof server on
|
||||
// Default is :6060
|
||||
PProfAddr = ":6060"
|
||||
)
|
||||
|
||||
func ToArgoRolloutStrategy(s string) ArgoRolloutStrategy {
|
||||
|
||||
@@ -21,6 +21,7 @@ import (
|
||||
"github.com/stakater/Reloader/internal/pkg/metrics"
|
||||
"github.com/stakater/Reloader/internal/pkg/options"
|
||||
"github.com/stakater/Reloader/internal/pkg/util"
|
||||
"github.com/stakater/Reloader/pkg/common"
|
||||
"github.com/stakater/Reloader/pkg/kube"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
batchv1 "k8s.io/api/batch/v1"
|
||||
@@ -733,7 +734,7 @@ func GetResourceSHAFromAnnotation(podAnnotations map[string]string) string {
|
||||
return ""
|
||||
}
|
||||
|
||||
var last util.ReloadSource
|
||||
var last common.ReloadSource
|
||||
bytes := []byte(annotationJson)
|
||||
err := json.Unmarshal(bytes, &last)
|
||||
if err != nil {
|
||||
@@ -794,6 +795,26 @@ func CreateDeployment(client kubernetes.Interface, deploymentName string, namesp
|
||||
return deployment, err
|
||||
}
|
||||
|
||||
// CreateDeployment creates a deployment in given namespace and returns the Deployment
|
||||
func CreateDeploymentWithAnnotations(client kubernetes.Interface, deploymentName string, namespace string, additionalAnnotations map[string]string, volumeMount bool) (*appsv1.Deployment, error) {
|
||||
logrus.Infof("Creating Deployment")
|
||||
deploymentClient := client.AppsV1().Deployments(namespace)
|
||||
var deploymentObj *appsv1.Deployment
|
||||
if volumeMount {
|
||||
deploymentObj = GetDeployment(namespace, deploymentName)
|
||||
} else {
|
||||
deploymentObj = GetDeploymentWithEnvVars(namespace, deploymentName)
|
||||
}
|
||||
|
||||
for annotationKey, annotationValue := range additionalAnnotations {
|
||||
deploymentObj.Annotations[annotationKey] = annotationValue
|
||||
}
|
||||
|
||||
deployment, err := deploymentClient.Create(context.TODO(), deploymentObj, metav1.CreateOptions{})
|
||||
time.Sleep(3 * time.Second)
|
||||
return deployment, err
|
||||
}
|
||||
|
||||
// CreateDeploymentConfig creates a deploymentConfig in given namespace and returns the DeploymentConfig
|
||||
func CreateDeploymentConfig(client appsclient.Interface, deploymentName string, namespace string, volumeMount bool) (*openshiftv1.DeploymentConfig, error) {
|
||||
logrus.Infof("Creating DeploymentConfig")
|
||||
@@ -968,6 +989,22 @@ func DeleteStatefulSet(client kubernetes.Interface, namespace string, statefulse
|
||||
return statefulsetError
|
||||
}
|
||||
|
||||
// DeleteCronJob deletes a cronJob in given namespace and returns the error if any
|
||||
func DeleteCronJob(client kubernetes.Interface, namespace string, cronJobName string) error {
|
||||
logrus.Infof("Deleting CronJob %s", cronJobName)
|
||||
cronJobError := client.BatchV1().CronJobs(namespace).Delete(context.TODO(), cronJobName, metav1.DeleteOptions{})
|
||||
time.Sleep(3 * time.Second)
|
||||
return cronJobError
|
||||
}
|
||||
|
||||
// Deleteob deletes a job in given namespace and returns the error if any
|
||||
func DeleteJob(client kubernetes.Interface, namespace string, jobName string) error {
|
||||
logrus.Infof("Deleting Job %s", jobName)
|
||||
jobError := client.BatchV1().Jobs(namespace).Delete(context.TODO(), jobName, metav1.DeleteOptions{})
|
||||
time.Sleep(3 * time.Second)
|
||||
return jobError
|
||||
}
|
||||
|
||||
// UpdateConfigMap updates a configmap in given namespace and returns the error if any
|
||||
func UpdateConfigMap(configmapClient core_v1.ConfigMapInterface, namespace string, configmapName string, label string, data string) error {
|
||||
logrus.Infof("Updating configmap %q.\n", configmapName)
|
||||
@@ -1022,7 +1059,7 @@ func RandSeq(n int) string {
|
||||
}
|
||||
|
||||
// VerifyResourceEnvVarUpdate verifies whether the rolling upgrade happened or not
|
||||
func VerifyResourceEnvVarUpdate(clients kube.Clients, config util.Config, envVarPostfix string, upgradeFuncs callbacks.RollingUpgradeFuncs) bool {
|
||||
func VerifyResourceEnvVarUpdate(clients kube.Clients, config common.Config, envVarPostfix string, upgradeFuncs callbacks.RollingUpgradeFuncs) bool {
|
||||
items := upgradeFuncs.ItemsFunc(clients, config.Namespace)
|
||||
for _, i := range items {
|
||||
containers := upgradeFuncs.ContainersFunc(i)
|
||||
@@ -1068,7 +1105,7 @@ func VerifyResourceEnvVarUpdate(clients kube.Clients, config util.Config, envVar
|
||||
}
|
||||
|
||||
// VerifyResourceEnvVarRemoved verifies whether the rolling upgrade happened or not and all Envvars SKAKATER_name_CONFIGMAP/SECRET are removed
|
||||
func VerifyResourceEnvVarRemoved(clients kube.Clients, config util.Config, envVarPostfix string, upgradeFuncs callbacks.RollingUpgradeFuncs) bool {
|
||||
func VerifyResourceEnvVarRemoved(clients kube.Clients, config common.Config, envVarPostfix string, upgradeFuncs callbacks.RollingUpgradeFuncs) bool {
|
||||
items := upgradeFuncs.ItemsFunc(clients, config.Namespace)
|
||||
for _, i := range items {
|
||||
containers := upgradeFuncs.ContainersFunc(i)
|
||||
@@ -1117,7 +1154,7 @@ func VerifyResourceEnvVarRemoved(clients kube.Clients, config util.Config, envVa
|
||||
}
|
||||
|
||||
// VerifyResourceAnnotationUpdate verifies whether the rolling upgrade happened or not
|
||||
func VerifyResourceAnnotationUpdate(clients kube.Clients, config util.Config, upgradeFuncs callbacks.RollingUpgradeFuncs) bool {
|
||||
func VerifyResourceAnnotationUpdate(clients kube.Clients, config common.Config, upgradeFuncs callbacks.RollingUpgradeFuncs) bool {
|
||||
items := upgradeFuncs.ItemsFunc(clients, config.Namespace)
|
||||
for _, i := range items {
|
||||
podAnnotations := upgradeFuncs.PodAnnotationsFunc(i)
|
||||
|
||||
@@ -3,10 +3,15 @@ package util
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/base64"
|
||||
"errors"
|
||||
"fmt"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/stakater/Reloader/internal/pkg/constants"
|
||||
"github.com/stakater/Reloader/internal/pkg/crypto"
|
||||
"github.com/stakater/Reloader/internal/pkg/options"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
)
|
||||
|
||||
@@ -54,8 +59,6 @@ func GetSHAfromSecret(data map[string][]byte) string {
|
||||
|
||||
type List []string
|
||||
|
||||
type Map map[string]string
|
||||
|
||||
func (l *List) Contains(s string) bool {
|
||||
for _, v := range *l {
|
||||
if v == s {
|
||||
@@ -64,3 +67,48 @@ func (l *List) Contains(s string) bool {
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func ConfigureReloaderFlags(cmd *cobra.Command) {
|
||||
cmd.PersistentFlags().BoolVar(&options.AutoReloadAll, "auto-reload-all", false, "Auto reload all resources")
|
||||
cmd.PersistentFlags().StringVar(&options.ConfigmapUpdateOnChangeAnnotation, "configmap-annotation", "configmap.reloader.stakater.com/reload", "annotation to detect changes in configmaps, specified by name")
|
||||
cmd.PersistentFlags().StringVar(&options.SecretUpdateOnChangeAnnotation, "secret-annotation", "secret.reloader.stakater.com/reload", "annotation to detect changes in secrets, specified by name")
|
||||
cmd.PersistentFlags().StringVar(&options.ReloaderAutoAnnotation, "auto-annotation", "reloader.stakater.com/auto", "annotation to detect changes in secrets/configmaps")
|
||||
cmd.PersistentFlags().StringVar(&options.ConfigmapReloaderAutoAnnotation, "configmap-auto-annotation", "configmap.reloader.stakater.com/auto", "annotation to detect changes in configmaps")
|
||||
cmd.PersistentFlags().StringVar(&options.SecretReloaderAutoAnnotation, "secret-auto-annotation", "secret.reloader.stakater.com/auto", "annotation to detect changes in secrets")
|
||||
cmd.PersistentFlags().StringVar(&options.AutoSearchAnnotation, "auto-search-annotation", "reloader.stakater.com/search", "annotation to detect changes in configmaps or secrets tagged with special match annotation")
|
||||
cmd.PersistentFlags().StringVar(&options.SearchMatchAnnotation, "search-match-annotation", "reloader.stakater.com/match", "annotation to mark secrets or configmaps to match the search")
|
||||
cmd.PersistentFlags().StringVar(&options.PauseDeploymentAnnotation, "pause-deployment-annotation", "deployment.reloader.stakater.com/pause-period", "annotation to define the time period to pause a deployment after a configmap/secret change has been detected")
|
||||
cmd.PersistentFlags().StringVar(&options.PauseDeploymentTimeAnnotation, "pause-deployment-time-annotation", "deployment.reloader.stakater.com/paused-at", "annotation to indicate when a deployment was paused by Reloader")
|
||||
cmd.PersistentFlags().StringVar(&options.LogFormat, "log-format", "", "Log format to use (empty string for text, or JSON)")
|
||||
cmd.PersistentFlags().StringVar(&options.LogLevel, "log-level", "info", "Log level to use (trace, debug, info, warning, error, fatal and panic)")
|
||||
cmd.PersistentFlags().StringVar(&options.WebhookUrl, "webhook-url", "", "webhook to trigger instead of performing a reload")
|
||||
cmd.PersistentFlags().StringSliceVar(&options.ResourcesToIgnore, "resources-to-ignore", options.ResourcesToIgnore, "list of resources to ignore (valid options 'configMaps' or 'secrets')")
|
||||
cmd.PersistentFlags().StringSliceVar(&options.NamespacesToIgnore, "namespaces-to-ignore", options.NamespacesToIgnore, "list of namespaces to ignore")
|
||||
cmd.PersistentFlags().StringSliceVar(&options.NamespaceSelectors, "namespace-selector", options.NamespaceSelectors, "list of key:value labels to filter on for namespaces")
|
||||
cmd.PersistentFlags().StringSliceVar(&options.ResourceSelectors, "resource-label-selector", options.ResourceSelectors, "list of key:value labels to filter on for configmaps and secrets")
|
||||
cmd.PersistentFlags().StringVar(&options.IsArgoRollouts, "is-Argo-Rollouts", "false", "Add support for argo rollouts")
|
||||
cmd.PersistentFlags().StringVar(&options.ReloadStrategy, constants.ReloadStrategyFlag, constants.EnvVarsReloadStrategy, "Specifies the desired reload strategy")
|
||||
cmd.PersistentFlags().StringVar(&options.ReloadOnCreate, "reload-on-create", "false", "Add support to watch create events")
|
||||
cmd.PersistentFlags().StringVar(&options.ReloadOnDelete, "reload-on-delete", "false", "Add support to watch delete events")
|
||||
cmd.PersistentFlags().BoolVar(&options.EnableHA, "enable-ha", false, "Adds support for running multiple replicas via leadership election")
|
||||
cmd.PersistentFlags().BoolVar(&options.SyncAfterRestart, "sync-after-restart", false, "Sync add events after reloader restarts")
|
||||
cmd.PersistentFlags().BoolVar(&options.EnablePProf, "enable-pprof", false, "Enable pprof for profiling")
|
||||
cmd.PersistentFlags().StringVar(&options.PProfAddr, "pprof-addr", ":6060", "Address to start pprof server on. Default is :6060")
|
||||
}
|
||||
|
||||
func GetIgnoredResourcesList() (List, error) {
|
||||
|
||||
ignoredResourcesList := options.ResourcesToIgnore // getStringSliceFromFlags(cmd, "resources-to-ignore")
|
||||
|
||||
for _, v := range ignoredResourcesList {
|
||||
if v != "configMaps" && v != "secrets" {
|
||||
return nil, fmt.Errorf("'resources-to-ignore' only accepts 'configMaps' or 'secrets', not '%s'", v)
|
||||
}
|
||||
}
|
||||
|
||||
if len(ignoredResourcesList) > 1 {
|
||||
return nil, errors.New("'resources-to-ignore' only accepts 'configMaps' or 'secrets', not both")
|
||||
}
|
||||
|
||||
return ignoredResourcesList, nil
|
||||
}
|
||||
|
||||
328
pkg/common/common.go
Normal file
328
pkg/common/common.go
Normal file
@@ -0,0 +1,328 @@
|
||||
package common
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/stakater/Reloader/internal/pkg/constants"
|
||||
"github.com/stakater/Reloader/internal/pkg/options"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
)
|
||||
|
||||
type Map map[string]string
|
||||
|
||||
type ReloadCheckResult struct {
|
||||
ShouldReload bool
|
||||
AutoReload bool
|
||||
}
|
||||
|
||||
// ReloaderOptions contains all configurable options for the Reloader controller.
|
||||
// These options control how Reloader behaves when watching for changes in ConfigMaps and Secrets.
|
||||
type ReloaderOptions struct {
|
||||
// AutoReloadAll enables automatic reloading of all resources when their corresponding ConfigMaps/Secrets are updated
|
||||
AutoReloadAll bool `json:"autoReloadAll"`
|
||||
// ConfigmapUpdateOnChangeAnnotation is the annotation key used to detect changes in ConfigMaps specified by name
|
||||
ConfigmapUpdateOnChangeAnnotation string `json:"configmapUpdateOnChangeAnnotation"`
|
||||
// SecretUpdateOnChangeAnnotation is the annotation key used to detect changes in Secrets specified by name
|
||||
SecretUpdateOnChangeAnnotation string `json:"secretUpdateOnChangeAnnotation"`
|
||||
// ReloaderAutoAnnotation is the annotation key used to detect changes in any referenced ConfigMaps or Secrets
|
||||
ReloaderAutoAnnotation string `json:"reloaderAutoAnnotation"`
|
||||
// IgnoreResourceAnnotation is the annotation key used to ignore resources from being watched
|
||||
IgnoreResourceAnnotation string `json:"ignoreResourceAnnotation"`
|
||||
// ConfigmapReloaderAutoAnnotation is the annotation key used to detect changes in ConfigMaps only
|
||||
ConfigmapReloaderAutoAnnotation string `json:"configmapReloaderAutoAnnotation"`
|
||||
// SecretReloaderAutoAnnotation is the annotation key used to detect changes in Secrets only
|
||||
SecretReloaderAutoAnnotation string `json:"secretReloaderAutoAnnotation"`
|
||||
// ConfigmapExcludeReloaderAnnotation is the annotation key containing comma-separated list of ConfigMaps to exclude from watching
|
||||
ConfigmapExcludeReloaderAnnotation string `json:"configmapExcludeReloaderAnnotation"`
|
||||
// SecretExcludeReloaderAnnotation is the annotation key containing comma-separated list of Secrets to exclude from watching
|
||||
SecretExcludeReloaderAnnotation string `json:"secretExcludeReloaderAnnotation"`
|
||||
// AutoSearchAnnotation is the annotation key used to detect changes in ConfigMaps/Secrets tagged with SearchMatchAnnotation
|
||||
AutoSearchAnnotation string `json:"autoSearchAnnotation"`
|
||||
// SearchMatchAnnotation is the annotation key used to tag ConfigMaps/Secrets to be found by AutoSearchAnnotation
|
||||
SearchMatchAnnotation string `json:"searchMatchAnnotation"`
|
||||
// RolloutStrategyAnnotation is the annotation key used to define the rollout update strategy for workloads
|
||||
RolloutStrategyAnnotation string `json:"rolloutStrategyAnnotation"`
|
||||
// PauseDeploymentAnnotation is the annotation key used to define the time period to pause a deployment after
|
||||
PauseDeploymentAnnotation string `json:"pauseDeploymentAnnotation"`
|
||||
// PauseDeploymentTimeAnnotation is the annotation key used to indicate when a deployment was paused by Reloader
|
||||
PauseDeploymentTimeAnnotation string `json:"pauseDeploymentTimeAnnotation"`
|
||||
|
||||
// LogFormat specifies the log format to use (json, or empty string for default text format)
|
||||
LogFormat string `json:"logFormat"`
|
||||
// LogLevel specifies the log level to use (trace, debug, info, warning, error, fatal, panic)
|
||||
LogLevel string `json:"logLevel"`
|
||||
// IsArgoRollouts indicates whether support for Argo Rollouts is enabled
|
||||
IsArgoRollouts bool `json:"isArgoRollouts"`
|
||||
// ReloadStrategy specifies the strategy used to trigger resource reloads (env-vars or annotations)
|
||||
ReloadStrategy string `json:"reloadStrategy"`
|
||||
// ReloadOnCreate indicates whether to trigger reloads when ConfigMaps/Secrets are created
|
||||
ReloadOnCreate bool `json:"reloadOnCreate"`
|
||||
// ReloadOnDelete indicates whether to trigger reloads when ConfigMaps/Secrets are deleted
|
||||
ReloadOnDelete bool `json:"reloadOnDelete"`
|
||||
// SyncAfterRestart indicates whether to sync add events after Reloader restarts (only works when ReloadOnCreate is true)
|
||||
SyncAfterRestart bool `json:"syncAfterRestart"`
|
||||
// EnableHA indicates whether High Availability mode is enabled with leader election
|
||||
EnableHA bool `json:"enableHA"`
|
||||
// WebhookUrl is the URL to send webhook notifications to instead of performing reloads
|
||||
WebhookUrl string `json:"webhookUrl"`
|
||||
// ResourcesToIgnore is a list of resource types to ignore (e.g., "configmaps" or "secrets")
|
||||
ResourcesToIgnore []string `json:"resourcesToIgnore"`
|
||||
// NamespaceSelectors is a list of label selectors to filter namespaces to watch
|
||||
NamespaceSelectors []string `json:"namespaceSelectors"`
|
||||
// ResourceSelectors is a list of label selectors to filter ConfigMaps and Secrets to watch
|
||||
ResourceSelectors []string `json:"resourceSelectors"`
|
||||
// NamespacesToIgnore is a list of namespace names to ignore when watching for changes
|
||||
NamespacesToIgnore []string `json:"namespacesToIgnore"`
|
||||
// EnablePProf enables pprof for profiling
|
||||
EnablePProf bool `json:"enablePProf"`
|
||||
// PProfAddr is the address to start pprof server on
|
||||
PProfAddr string `json:"pprofAddr"`
|
||||
}
|
||||
|
||||
var CommandLineOptions *ReloaderOptions
|
||||
|
||||
func PublishMetaInfoConfigmap(clientset kubernetes.Interface) {
|
||||
namespace := os.Getenv("RELOADER_NAMESPACE")
|
||||
if namespace == "" {
|
||||
logrus.Warn("RELOADER_NAMESPACE is not set, skipping meta info configmap creation")
|
||||
return
|
||||
}
|
||||
|
||||
metaInfo := &MetaInfo{
|
||||
BuildInfo: *NewBuildInfo(),
|
||||
ReloaderOptions: *GetCommandLineOptions(),
|
||||
DeploymentInfo: metav1.ObjectMeta{
|
||||
Name: os.Getenv("RELOADER_DEPLOYMENT_NAME"),
|
||||
Namespace: namespace,
|
||||
},
|
||||
}
|
||||
|
||||
configMap := metaInfo.ToConfigMap()
|
||||
|
||||
if _, err := clientset.CoreV1().ConfigMaps(namespace).Get(context.Background(), configMap.Name, metav1.GetOptions{}); err == nil {
|
||||
logrus.Info("Meta info configmap already exists, updating it")
|
||||
_, err = clientset.CoreV1().ConfigMaps(namespace).Update(context.Background(), configMap, metav1.UpdateOptions{})
|
||||
if err != nil {
|
||||
logrus.Warn("Failed to update existing meta info configmap: ", err)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
_, err := clientset.CoreV1().ConfigMaps(namespace).Create(context.Background(), configMap, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
logrus.Warn("Failed to create meta info configmap: ", err)
|
||||
}
|
||||
}
|
||||
|
||||
func GetNamespaceLabelSelector(slice []string) (string, error) {
|
||||
for i, kv := range slice {
|
||||
// Legacy support for ":" as a delimiter and "*" for wildcard.
|
||||
if strings.Contains(kv, ":") {
|
||||
split := strings.Split(kv, ":")
|
||||
if split[1] == "*" {
|
||||
slice[i] = split[0]
|
||||
} else {
|
||||
slice[i] = split[0] + "=" + split[1]
|
||||
}
|
||||
}
|
||||
// Convert wildcard to valid apimachinery operator
|
||||
if strings.Contains(kv, "=") {
|
||||
split := strings.Split(kv, "=")
|
||||
if split[1] == "*" {
|
||||
slice[i] = split[0]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
namespaceLabelSelector := strings.Join(slice[:], ",")
|
||||
_, err := labels.Parse(namespaceLabelSelector)
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
}
|
||||
|
||||
return namespaceLabelSelector, nil
|
||||
}
|
||||
|
||||
func GetResourceLabelSelector(slice []string) (string, error) {
|
||||
for i, kv := range slice {
|
||||
// Legacy support for ":" as a delimiter and "*" for wildcard.
|
||||
if strings.Contains(kv, ":") {
|
||||
split := strings.Split(kv, ":")
|
||||
if split[1] == "*" {
|
||||
slice[i] = split[0]
|
||||
} else {
|
||||
slice[i] = split[0] + "=" + split[1]
|
||||
}
|
||||
}
|
||||
// Convert wildcard to valid apimachinery operator
|
||||
if strings.Contains(kv, "=") {
|
||||
split := strings.Split(kv, "=")
|
||||
if split[1] == "*" {
|
||||
slice[i] = split[0]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
resourceLabelSelector := strings.Join(slice[:], ",")
|
||||
_, err := labels.Parse(resourceLabelSelector)
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
}
|
||||
|
||||
return resourceLabelSelector, nil
|
||||
}
|
||||
|
||||
// ShouldReload checks if a resource should be reloaded based on its annotations and the provided options.
|
||||
func ShouldReload(config Config, resourceType string, annotations Map, podAnnotations Map, options *ReloaderOptions) ReloadCheckResult {
|
||||
|
||||
ignoreResourceAnnotatonValue := config.ResourceAnnotations[options.IgnoreResourceAnnotation]
|
||||
if ignoreResourceAnnotatonValue == "true" {
|
||||
return ReloadCheckResult{
|
||||
ShouldReload: false,
|
||||
}
|
||||
}
|
||||
|
||||
annotationValue, found := annotations[config.Annotation]
|
||||
searchAnnotationValue, foundSearchAnn := annotations[options.AutoSearchAnnotation]
|
||||
reloaderEnabledValue, foundAuto := annotations[options.ReloaderAutoAnnotation]
|
||||
typedAutoAnnotationEnabledValue, foundTypedAuto := annotations[config.TypedAutoAnnotation]
|
||||
excludeConfigmapAnnotationValue, foundExcludeConfigmap := annotations[options.ConfigmapExcludeReloaderAnnotation]
|
||||
excludeSecretAnnotationValue, foundExcludeSecret := annotations[options.SecretExcludeReloaderAnnotation]
|
||||
|
||||
if !found && !foundAuto && !foundTypedAuto && !foundSearchAnn {
|
||||
annotations = podAnnotations
|
||||
annotationValue = annotations[config.Annotation]
|
||||
searchAnnotationValue = annotations[options.AutoSearchAnnotation]
|
||||
reloaderEnabledValue = annotations[options.ReloaderAutoAnnotation]
|
||||
typedAutoAnnotationEnabledValue = annotations[config.TypedAutoAnnotation]
|
||||
}
|
||||
|
||||
isResourceExcluded := false
|
||||
|
||||
switch config.Type {
|
||||
case constants.ConfigmapEnvVarPostfix:
|
||||
if foundExcludeConfigmap {
|
||||
isResourceExcluded = checkIfResourceIsExcluded(config.ResourceName, excludeConfigmapAnnotationValue)
|
||||
}
|
||||
case constants.SecretEnvVarPostfix:
|
||||
if foundExcludeSecret {
|
||||
isResourceExcluded = checkIfResourceIsExcluded(config.ResourceName, excludeSecretAnnotationValue)
|
||||
}
|
||||
}
|
||||
|
||||
if isResourceExcluded {
|
||||
return ReloadCheckResult{
|
||||
ShouldReload: false,
|
||||
}
|
||||
}
|
||||
|
||||
reloaderEnabled, _ := strconv.ParseBool(reloaderEnabledValue)
|
||||
typedAutoAnnotationEnabled, _ := strconv.ParseBool(typedAutoAnnotationEnabledValue)
|
||||
if reloaderEnabled || typedAutoAnnotationEnabled || reloaderEnabledValue == "" && typedAutoAnnotationEnabledValue == "" && options.AutoReloadAll {
|
||||
return ReloadCheckResult{
|
||||
ShouldReload: true,
|
||||
AutoReload: true,
|
||||
}
|
||||
}
|
||||
|
||||
values := strings.Split(annotationValue, ",")
|
||||
for _, value := range values {
|
||||
value = strings.TrimSpace(value)
|
||||
re := regexp.MustCompile("^" + value + "$")
|
||||
if re.Match([]byte(config.ResourceName)) {
|
||||
return ReloadCheckResult{
|
||||
ShouldReload: true,
|
||||
AutoReload: false,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if searchAnnotationValue == "true" {
|
||||
matchAnnotationValue := config.ResourceAnnotations[options.SearchMatchAnnotation]
|
||||
if matchAnnotationValue == "true" {
|
||||
return ReloadCheckResult{
|
||||
ShouldReload: true,
|
||||
AutoReload: true,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return ReloadCheckResult{
|
||||
ShouldReload: false,
|
||||
}
|
||||
}
|
||||
|
||||
func checkIfResourceIsExcluded(resourceName, excludedResources string) bool {
|
||||
if excludedResources == "" {
|
||||
return false
|
||||
}
|
||||
|
||||
excludedResourcesList := strings.Split(excludedResources, ",")
|
||||
for _, excludedResource := range excludedResourcesList {
|
||||
if strings.TrimSpace(excludedResource) == resourceName {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func init() {
|
||||
GetCommandLineOptions()
|
||||
}
|
||||
|
||||
func GetCommandLineOptions() *ReloaderOptions {
|
||||
if CommandLineOptions == nil {
|
||||
CommandLineOptions = &ReloaderOptions{}
|
||||
}
|
||||
|
||||
CommandLineOptions.AutoReloadAll = options.AutoReloadAll
|
||||
CommandLineOptions.ConfigmapUpdateOnChangeAnnotation = options.ConfigmapUpdateOnChangeAnnotation
|
||||
CommandLineOptions.SecretUpdateOnChangeAnnotation = options.SecretUpdateOnChangeAnnotation
|
||||
CommandLineOptions.ReloaderAutoAnnotation = options.ReloaderAutoAnnotation
|
||||
CommandLineOptions.IgnoreResourceAnnotation = options.IgnoreResourceAnnotation
|
||||
CommandLineOptions.ConfigmapReloaderAutoAnnotation = options.ConfigmapReloaderAutoAnnotation
|
||||
CommandLineOptions.SecretReloaderAutoAnnotation = options.SecretReloaderAutoAnnotation
|
||||
CommandLineOptions.ConfigmapExcludeReloaderAnnotation = options.ConfigmapExcludeReloaderAnnotation
|
||||
CommandLineOptions.SecretExcludeReloaderAnnotation = options.SecretExcludeReloaderAnnotation
|
||||
CommandLineOptions.AutoSearchAnnotation = options.AutoSearchAnnotation
|
||||
CommandLineOptions.SearchMatchAnnotation = options.SearchMatchAnnotation
|
||||
CommandLineOptions.RolloutStrategyAnnotation = options.RolloutStrategyAnnotation
|
||||
CommandLineOptions.PauseDeploymentAnnotation = options.PauseDeploymentAnnotation
|
||||
CommandLineOptions.PauseDeploymentTimeAnnotation = options.PauseDeploymentTimeAnnotation
|
||||
CommandLineOptions.LogFormat = options.LogFormat
|
||||
CommandLineOptions.LogLevel = options.LogLevel
|
||||
CommandLineOptions.ReloadStrategy = options.ReloadStrategy
|
||||
CommandLineOptions.SyncAfterRestart = options.SyncAfterRestart
|
||||
CommandLineOptions.EnableHA = options.EnableHA
|
||||
CommandLineOptions.WebhookUrl = options.WebhookUrl
|
||||
CommandLineOptions.ResourcesToIgnore = options.ResourcesToIgnore
|
||||
CommandLineOptions.NamespaceSelectors = options.NamespaceSelectors
|
||||
CommandLineOptions.ResourceSelectors = options.ResourceSelectors
|
||||
CommandLineOptions.NamespacesToIgnore = options.NamespacesToIgnore
|
||||
CommandLineOptions.IsArgoRollouts = parseBool(options.IsArgoRollouts)
|
||||
CommandLineOptions.ReloadOnCreate = parseBool(options.ReloadOnCreate)
|
||||
CommandLineOptions.ReloadOnDelete = parseBool(options.ReloadOnDelete)
|
||||
CommandLineOptions.EnablePProf = options.EnablePProf
|
||||
CommandLineOptions.PProfAddr = options.PProfAddr
|
||||
|
||||
return CommandLineOptions
|
||||
}
|
||||
|
||||
func parseBool(value string) bool {
|
||||
if value == "" {
|
||||
return false
|
||||
}
|
||||
result, err := strconv.ParseBool(value)
|
||||
if err != nil {
|
||||
return false // Default to false if parsing fails
|
||||
}
|
||||
return result
|
||||
}
|
||||
@@ -1,12 +1,13 @@
|
||||
package util
|
||||
package common
|
||||
|
||||
import (
|
||||
"github.com/stakater/Reloader/internal/pkg/constants"
|
||||
"github.com/stakater/Reloader/internal/pkg/options"
|
||||
"github.com/stakater/Reloader/internal/pkg/util"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
)
|
||||
|
||||
//Config contains rolling upgrade configuration parameters
|
||||
// Config contains rolling upgrade configuration parameters
|
||||
type Config struct {
|
||||
Namespace string
|
||||
ResourceName string
|
||||
@@ -15,6 +16,7 @@ type Config struct {
|
||||
TypedAutoAnnotation string
|
||||
SHAValue string
|
||||
Type string
|
||||
Labels map[string]string
|
||||
}
|
||||
|
||||
// GetConfigmapConfig provides utility config for configmap
|
||||
@@ -25,8 +27,9 @@ func GetConfigmapConfig(configmap *v1.ConfigMap) Config {
|
||||
ResourceAnnotations: configmap.Annotations,
|
||||
Annotation: options.ConfigmapUpdateOnChangeAnnotation,
|
||||
TypedAutoAnnotation: options.ConfigmapReloaderAutoAnnotation,
|
||||
SHAValue: GetSHAfromConfigmap(configmap),
|
||||
SHAValue: util.GetSHAfromConfigmap(configmap),
|
||||
Type: constants.ConfigmapEnvVarPostfix,
|
||||
Labels: configmap.Labels,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -38,7 +41,8 @@ func GetSecretConfig(secret *v1.Secret) Config {
|
||||
ResourceAnnotations: secret.Annotations,
|
||||
Annotation: options.SecretUpdateOnChangeAnnotation,
|
||||
TypedAutoAnnotation: options.SecretReloaderAutoAnnotation,
|
||||
SHAValue: GetSHAfromSecret(secret.Data),
|
||||
SHAValue: util.GetSHAfromSecret(secret.Data),
|
||||
Type: constants.SecretEnvVarPostfix,
|
||||
Labels: secret.Labels,
|
||||
}
|
||||
}
|
||||
129
pkg/common/metainfo.go
Normal file
129
pkg/common/metainfo.go
Normal file
@@ -0,0 +1,129 @@
|
||||
package common
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"runtime"
|
||||
"time"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
// Version, Commit, and BuildDate are set during the build process
|
||||
// using the -X linker flag to inject these values into the binary.
|
||||
// They provide metadata about the build version, commit hash, build date, and whether there are
|
||||
// uncommitted changes in the source code at the time of build.
|
||||
// This information is useful for debugging and tracking the specific build of the Reloader binary.
|
||||
var Version = "dev"
|
||||
var Commit = "unknown"
|
||||
var BuildDate = "unknown"
|
||||
|
||||
const (
|
||||
MetaInfoConfigmapName = "reloader-meta-info"
|
||||
MetaInfoConfigmapLabelKey = "reloader.stakater.com/meta-info"
|
||||
MetaInfoConfigmapLabelValue = "reloader-oss"
|
||||
)
|
||||
|
||||
// MetaInfo contains comprehensive metadata about the Reloader instance.
|
||||
// This includes build information, configuration options, and deployment details.
|
||||
type MetaInfo struct {
|
||||
// BuildInfo contains information about the build version, commit, and compilation details
|
||||
BuildInfo BuildInfo `json:"buildInfo"`
|
||||
// ReloaderOptions contains all the configuration options and flags used by this Reloader instance
|
||||
ReloaderOptions ReloaderOptions `json:"reloaderOptions"`
|
||||
// DeploymentInfo contains metadata about the Kubernetes deployment of this Reloader instance
|
||||
DeploymentInfo metav1.ObjectMeta `json:"deploymentInfo"`
|
||||
}
|
||||
|
||||
// BuildInfo contains information about the build and version of the Reloader binary.
|
||||
// This includes Go version, release version, commit details, and build timestamp.
|
||||
type BuildInfo struct {
|
||||
// GoVersion is the version of Go used to compile the binary
|
||||
GoVersion string `json:"goVersion"`
|
||||
// ReleaseVersion is the version tag or branch of the Reloader release
|
||||
ReleaseVersion string `json:"releaseVersion"`
|
||||
// CommitHash is the Git commit hash of the source code used to build this binary
|
||||
CommitHash string `json:"commitHash"`
|
||||
// CommitTime is the timestamp of the Git commit used to build this binary
|
||||
CommitTime time.Time `json:"commitTime"`
|
||||
}
|
||||
|
||||
func NewBuildInfo() *BuildInfo {
|
||||
metaInfo := &BuildInfo{
|
||||
GoVersion: runtime.Version(),
|
||||
ReleaseVersion: Version,
|
||||
CommitHash: Commit,
|
||||
CommitTime: ParseUTCTime(BuildDate),
|
||||
}
|
||||
|
||||
return metaInfo
|
||||
}
|
||||
|
||||
func (m *MetaInfo) ToConfigMap() *v1.ConfigMap {
|
||||
return &v1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: MetaInfoConfigmapName,
|
||||
Namespace: m.DeploymentInfo.Namespace,
|
||||
Labels: map[string]string{
|
||||
MetaInfoConfigmapLabelKey: MetaInfoConfigmapLabelValue,
|
||||
},
|
||||
},
|
||||
Data: map[string]string{
|
||||
"buildInfo": toJson(m.BuildInfo),
|
||||
"reloaderOptions": toJson(m.ReloaderOptions),
|
||||
"deploymentInfo": toJson(m.DeploymentInfo),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func NewMetaInfo(configmap *v1.ConfigMap) (*MetaInfo, error) {
|
||||
var buildInfo BuildInfo
|
||||
if val, ok := configmap.Data["buildInfo"]; ok {
|
||||
err := json.Unmarshal([]byte(val), &buildInfo)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to unmarshal buildInfo: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
var reloaderOptions ReloaderOptions
|
||||
if val, ok := configmap.Data["reloaderOptions"]; ok {
|
||||
err := json.Unmarshal([]byte(val), &reloaderOptions)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to unmarshal reloaderOptions: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
var deploymentInfo metav1.ObjectMeta
|
||||
if val, ok := configmap.Data["deploymentInfo"]; ok {
|
||||
err := json.Unmarshal([]byte(val), &deploymentInfo)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to unmarshal deploymentInfo: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
return &MetaInfo{
|
||||
BuildInfo: buildInfo,
|
||||
ReloaderOptions: reloaderOptions,
|
||||
DeploymentInfo: deploymentInfo,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func toJson(data interface{}) string {
|
||||
jsonData, err := json.Marshal(data)
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
return string(jsonData)
|
||||
}
|
||||
|
||||
func ParseUTCTime(value string) time.Time {
|
||||
if value == "" {
|
||||
return time.Time{} // Return zero time if value is empty
|
||||
}
|
||||
t, err := time.Parse(time.RFC3339, value)
|
||||
if err != nil {
|
||||
return time.Time{} // Return zero time if parsing fails
|
||||
}
|
||||
return t
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
package util
|
||||
package common
|
||||
|
||||
import "time"
|
||||
|
||||
Reference in New Issue
Block a user