Compare commits

..

4 Commits

Author SHA1 Message Date
Muneeb Aijaz
4517f71ca7 Merge pull request #819 from stakater/fix-ubi
Fix References of UBI image directories that no longer exist
2025-01-02 17:07:04 +05:00
Muneeb Aijaz
13ebaa4f92 remove reference of old paths 2025-01-02 16:36:00 +05:00
Muneeb Aijaz
a00261acee Merge pull request #818 from stakater/update-version-tprlgh2
Bump version to 1.2.1 on release-v1.2.1 branch
2025-01-02 15:27:54 +05:00
MuneebAijaz
ad35516413 Bump version to 1.2.1
Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
2025-01-02 10:25:53 +00:00
32 changed files with 101 additions and 927 deletions

View File

@@ -57,7 +57,7 @@ jobs:
git diff
- name: Create pull request
uses: peter-evans/create-pull-request@v7.0.6
uses: peter-evans/create-pull-request@v7.0.5
with:
commit-message: "Bump version to ${{ inputs.TARGET_VERSION }}"
title: "Bump version to ${{ inputs.TARGET_VERSION }} on ${{ inputs.TARGET_BRANCH }} branch"

View File

@@ -16,7 +16,7 @@ env:
jobs:
helm-chart-validation:
helm-validation:
permissions:
contents: read
@@ -42,24 +42,6 @@ jobs:
with:
charts: deployments/kubernetes/chart/reloader
helm-version-validation:
needs: helm-chart-validation
permissions:
contents: read
runs-on: ubuntu-latest
name: Helm Version Validation
if: ${{ contains(github.event.pull_request.labels.*.name, 'release/helm-chart') }}
steps:
- name: Check out code
uses: actions/checkout@v4
with:
ref: ${{github.event.pull_request.head.sha}}
fetch-depth: 0
- name: Add Stakater Helm Repo
run: |
helm repo add stakater https://stakater.github.io/stakater-charts

View File

@@ -25,7 +25,7 @@ env:
jobs:
qa:
uses: stakater/.github/.github/workflows/pull_request_doc_qa.yaml@v0.0.122
uses: stakater/.github/.github/workflows/pull_request_doc_qa.yaml@v0.0.106
with:
MD_CONFIG: .github/md_config.json
DOC_SRC: README.md

View File

@@ -15,17 +15,8 @@ on:
jobs:
qa:
uses: stakater/.github/.github/workflows/pull_request_doc_qa.yaml@v0.0.122
uses: stakater/.github/.github/workflows/pull_request_doc_qa.yaml@v0.0.106
with:
MD_CONFIG: .github/md_config.json
DOC_SRC: docs
MD_LINT_CONFIG: .markdownlint.yaml
build:
uses: stakater/.github/.github/workflows/pull_request_container_build.yaml@v0.0.122
with:
DOCKER_FILE_PATH: Dockerfile-docs
CONTAINER_REGISTRY_URL: ghcr.io/stakater
secrets:
CONTAINER_REGISTRY_USERNAME: ${{ github.actor }}
CONTAINER_REGISTRY_PASSWORD: ${{ secrets.GHCR_TOKEN }}
SLACK_WEBHOOK_URL: ${{ secrets.STAKATER_DELIVERY_SLACK_WEBHOOK }}

View File

@@ -15,14 +15,14 @@ env:
REGISTRY: ghcr.io
jobs:
verify-and-push-helm-chart:
build:
permissions:
contents: read
packages: write # to push artifacts to `ghcr.io`
name: Verify and Push Helm Chart
if: ${{ (github.event.pull_request.merged == true) && (contains(github.event.pull_request.labels.*.name, 'release/helm-chart')) }}
name: Build
if: github.event.pull_request.merged == true
runs-on: ubuntu-latest
steps:

View File

@@ -5,15 +5,6 @@ on:
branches:
- master
types: [ labeled ]
paths:
- '!.markdownlint.yaml'
- '!.vale.ini'
- '!Dockerfile-docs'
- '!docs-nginx.conf'
- '!docs/**'
- '!theme_common'
- '!theme_override'
- '!deployments/kubernetes/chart/reloader/**'
env:
DOCKER_FILE_PATH: Dockerfile

View File

@@ -200,6 +200,7 @@ jobs:
push: true
build-args: BUILD_PARAMETERS=${{ env.BUILD_PARAMETERS }}
cache-to: type=inline
platforms: linux/amd64,linux/arm,linux/arm64
tags: |
${{ env.GHCR_IMAGE_REPOSITORY }}/docs:merge-${{ github.event.number }}
labels: |

View File

@@ -1,7 +1,7 @@
StylesPath = styles
MinAlertLevel = warning
Packages = https://github.com/stakater/vale-package/releases/download/v0.0.53/Stakater.zip
Packages = https://github.com/stakater/vale-package/releases/download/v0.0.44/Stakater.zip
Vocab = Stakater
# Only check MarkDown files

View File

@@ -1,4 +1,4 @@
FROM python:3.13-alpine as builder
FROM python:3.12 as builder
# set workdir
RUN mkdir -p $HOME/application
@@ -10,7 +10,7 @@ COPY --chown=1001:root . .
RUN pip3 install -r theme_common/requirements.txt
# Combine Theme Resources
RUN python theme_common/scripts/combine_theme_resources.py -s theme_common/resources -ov theme_override/resources -o dist/_theme
RUN python theme_common/scripts/combine_theme_resources.py theme_common/resources theme_override/resources dist/_theme
# Produce mkdocs file
RUN python theme_common/scripts/combine_mkdocs_config_yaml.py theme_common/mkdocs.yml theme_override/mkdocs.yml mkdocs.yml

View File

@@ -50,13 +50,13 @@ spec:
metadata:
```
This will discover deploymentconfigs/deployments/daemonsets/statefulset/rollouts/cronjobs/jobs automatically where `foo-configmap` or `foo-secret` is being used either via environment variable or from volume mount. And it will perform rolling upgrade on related pods when `foo-configmap` or `foo-secret`are updated.
This will discover deploymentconfigs/deployments/daemonsets/statefulset/rollouts automatically where `foo-configmap` or `foo-secret` is being used either via environment variable or from volume mount. And it will perform rolling upgrade on related pods when `foo-configmap` or `foo-secret`are updated.
You can filter it by the type of monitored resource and use typed versions of `auto` annotation. If you want to discover changes only in mounted `Secret`s and ignore changes in `ConfigMap`s, add `secret.reloader.stakater.com/auto` annotation instead. Analogously, you can use `configmap.reloader.stakater.com/auto` annotation to look for changes in mounted `ConfigMap`, changes in any of mounted `Secret`s will not trigger a rolling upgrade on related pods.
You can also restrict this discovery to only `ConfigMap` or `Secret` objects that
are tagged with a special annotation. To take advantage of that, annotate
your deploymentconfigs/deployments/daemonsets/statefulset/rollouts/cronjobs/jobs like this:
your deploymentconfigs/deployments/daemonsets/statefulset/rollouts like this:
```yaml
kind: Deployment
@@ -91,11 +91,11 @@ not.
Similarly, `reloader.stakater.com/auto` and its typed version (`secret.reloader.stakater.com/auto` or `configmap.reloader.stakater.com/auto`) do not work together. If you have both annotations in your deployment, then only one of them needs to be true to trigger the restart. For example, having both `reloader.stakater.com/auto: "true"` and `secret.reloader.stakater.com/auto: "false"` or both `reloader.stakater.com/auto: "false"` and `secret.reloader.stakater.com/auto: "true"` will restart upon a change in a secret it uses.
We can also specify a specific configmap or secret which would trigger rolling upgrade only upon change in our specified configmap or secret, this way, it will not trigger rolling upgrade upon changes in all configmaps or secrets used in a `deploymentconfig`, `deployment`, `daemonset`, `statefulset`, `rollout`, `cronJob` or `job`.
We can also specify a specific configmap or secret which would trigger rolling upgrade only upon change in our specified configmap or secret, this way, it will not trigger rolling upgrade upon changes in all configmaps or secrets used in a `deploymentconfig`, `deployment`, `daemonset`, `statefulset` or `rollout`.
To do this either set the auto annotation to `"false"` (`reloader.stakater.com/auto: "false"`) or remove it altogether, and use annotations for [Configmap](.#Configmap) or [Secret](.#Secret).
It's also possible to enable auto reloading for all resources, by setting the `--auto-reload-all` flag.
In this case, all resources that do not have the auto annotation (or its typed version) set to `"false"`, will be reloaded automatically when their Configmaps or Secrets are updated.
In this case, all resources that do not have the auto annotation (or its typed version) set to `"false"`, will be reloaded automatically when their ConfigMaps or Secrets are updated.
Notice that setting the auto annotation to an undefined value counts as false as-well.
### Configmap
@@ -158,7 +158,7 @@ spec:
- Reloader also supports [sealed-secrets](https://github.com/bitnami-labs/sealed-secrets). [Here](docs/Reloader-with-Sealed-Secrets.md) are the steps to use sealed-secrets with Reloader.
- For [`rollouts`](https://github.com/argoproj/argo-rollouts/) Reloader simply triggers a change is up to you how you configure the `rollout` strategy.
- `reloader.stakater.com/auto: "true"` will only reload the pod, if the configmap or secret is used (as a volume mount or as an env) in `DeploymentConfigs/Deployment/Daemonsets/Statefulsets/CronJobs/Jobs`
- `reloader.stakater.com/auto: "true"` will only reload the pod, if the configmap or secret is used (as a volume mount or as an env) in `DeploymentConfigs/Deployment/Daemonsets/Statefulsets`
- `secret.reloader.stakater.com/reload` or `configmap.reloader.stakater.com/reload` annotation will reload the pod upon changes in specified configmap or secret, irrespective of the usage of configmap or secret.
- you may override the auto annotation with the `--auto-annotation` flag
- you may override the secret typed auto annotation with the `--secret-auto-annotation` flag
@@ -173,7 +173,6 @@ spec:
- you may want to prevent watching certain resources with the `--resources-to-ignore` flag
- you can configure logging in JSON format with the `--log-format=json` option
- you can configure the "reload strategy" with the `--reload-strategy=<strategy-name>` option (details below)
- you can configure rollout reload strategy with `reloader.stakater.com/rollout-strategy` annotation, `restart` or `rollout` values are available (defaults to `rollout`)
## Reload Strategies
@@ -210,8 +209,8 @@ Reloader can be configured to ignore the resources `secrets` and `configmaps` by
| Argument | Description |
|----------------------------------|----------------------|
| `--resources-to-ignore=configMaps` | To ignore configmaps |
| `--resources-to-ignore=secrets` | To ignore secrets |
| --resources-to-ignore=configMaps | To ignore configMaps |
| --resources-to-ignore=secrets | To ignore secrets |
**Note:** At one time only one of these resource can be ignored, trying to do it will cause error in Reloader. Workaround for ignoring both resources is by scaling down the Reloader pods to `0`.
@@ -329,7 +328,7 @@ helm uninstall {{RELEASE_NAME}} -n {{NAMESPACE}}
| `reloader.isArgoRollouts` | Enable Argo `Rollouts`. Valid value are either `true` or `false` | boolean | `false` |
| `reloader.isOpenshift` | Enable OpenShift DeploymentConfigs. Valid value are either `true` or `false` | boolean | `false` |
| `reloader.ignoreSecrets` | To ignore secrets. Valid value are either `true` or `false`. Either `ignoreSecrets` or `ignoreConfigMaps` can be ignored, not both at the same time | boolean | `false` |
| `reloader.ignoreConfigMaps` | To ignore configmaps. Valid value are either `true` or `false` | boolean | `false` |
| `reloader.ignoreConfigMaps` | To ignore configMaps. Valid value are either `true` or `false` | boolean | `false` |
| `reloader.reloadOnCreate` | Enable reload on create events. Valid value are either `true` or `false` | boolean | `false` |
| `reloader.reloadOnDelete` | Enable reload on delete events. Valid value are either `true` or `false` | boolean | `false` |
| `reloader.syncAfterRestart` | Enable sync after Reloader restarts for **Add** events, works only when reloadOnCreate is `true`. Valid value are either `true` or `false` | boolean | `false` |
@@ -347,25 +346,25 @@ helm uninstall {{RELEASE_NAME}} -n {{NAMESPACE}}
#### Deployment Reloader Parameters
| Parameter | Description | Type | Default |
|-------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------|--------|-------------------|
| `reloader.deployment.replicas` | Number of replicas, if you wish to run multiple replicas set `reloader.enableHA = true`. The replicas will be limited to 1 when `reloader.enableHA = false` | int | 1 |
| `reloader.deployment.revisionHistoryLimit` | Limit the number of revisions retained in the revision history | int | 2 |
| `reloader.deployment.nodeSelector` | Scheduling pod to a specific node based on set labels | map | `{}` |
| `reloader.deployment.affinity` | Set affinity rules on pod | map | `{}` |
| `reloader.deployment.securityContext` | Set pod security context | map | `{}` |
| `reloader.deployment.containerSecurityContext` | Set container security context | map | `{}` |
| `reloader.deployment.tolerations` | A list of `tolerations` to be applied to the deployment | array | `[]` |
| `reloader.deployment.topologySpreadConstraints` | Topology spread constraints for pod assignment | array | `[]` |
| `reloader.deployment.annotations` | Set deployment annotations | map | `{}` |
| `reloader.deployment.labels` | Set deployment labels, default to stakater settings | array | `see values.yaml` |
| `reloader.deployment.image` | Set container image name, tag and policy | array | `see values.yaml` |
| `reloader.deployment.env` | Support for extra environment variables | array | `[]` |
| `reloader.deployment.livenessProbe` | Set liveness probe timeout values | map | `{}` |
| `reloader.deployment.readinessProbe` | Set readiness probe timeout values | map | `{}` |
| `reloader.deployment.resources` | Set container requests and limits (e.g. CPU or memory) | map | `{}` |
| `reloader.deployment.pod.annotations` | Set annotations for pod | map | `{}` |
| `reloader.deployment.priorityClassName` | Set priority class for pod in cluster | string | `""` |
| Parameter | Description | Type | Default |
|-------------------------------------------------|-----------------------------------------------------------------------------------------|--------|-------------------|
| `reloader.deployment.replicas` | Number of replicas, if you wish to run multiple replicas set `reloader.enableHA = true` | int | 1 |
| `reloader.deployment.revisionHistoryLimit` | Limit the number of revisions retained in the revision history | int | 2 |
| `reloader.deployment.nodeSelector` | Scheduling pod to a specific node based on set labels | map | `{}` |
| `reloader.deployment.affinity` | Set affinity rules on pod | map | `{}` |
| `reloader.deployment.securityContext` | Set pod security context | map | `{}` |
| `reloader.deployment.containerSecurityContext` | Set container security context | map | `{}` |
| `reloader.deployment.tolerations` | A list of `tolerations` to be applied to the deployment | array | `[]` |
| `reloader.deployment.topologySpreadConstraints` | Topology spread constraints for pod assignment | array | `[]` |
| `reloader.deployment.annotations` | Set deployment annotations | map | `{}` |
| `reloader.deployment.labels` | Set deployment labels, default to stakater settings | array | `see values.yaml` |
| `reloader.deployment.image` | Set container image name, tag and policy | array | `see values.yaml` |
| `reloader.deployment.env` | Support for extra environment variables | array | `[]` |
| `reloader.deployment.livenessProbe` | Set liveness probe timeout values | map | `{}` |
| `reloader.deployment.readinessProbe` | Set readiness probe timeout values | map | `{}` |
| `reloader.deployment.resources` | Set container requests and limits (e.g. CPU or memory) | map | `{}` |
| `reloader.deployment.pod.annotations` | Set annotations for pod | map | `{}` |
| `reloader.deployment.priorityClassName` | Set priority class for pod in cluster | string | `""` |
#### Other Reloader Parameters
@@ -385,7 +384,7 @@ helm uninstall {{RELEASE_NAME}} -n {{NAMESPACE}}
#### Additional Remarks
- Both `namespaceSelector` & `resourceLabelSelector` can be used together. If they are then both conditions must be met for the configmap or secret to be eligible to trigger reload events. (e.g. If a configmap matches `resourceLabelSelector` but `namespaceSelector` does not match the namespace the configmap is in, it will be ignored).
- Both `namespaceSelector` & `resourceLabelSelector` can be used together. If they are then both conditions must be met for the configmap or secret to be eligible to trigger reload events. (e.g. If a configMap matches `resourceLabelSelector` but `namespaceSelector` does not match the namespace the configmap is in, it will be ignored).
- At one time only one of the resources `ignoreConfigMaps` or `ignoreSecrets` can be ignored, trying to do both will cause error in helm template compilation
- Reloading of OpenShift (DeploymentConfig) and/or Argo `Rollouts` has to be enabled explicitly because it might not be always possible to use it on a cluster with restricted permissions
- `isOpenShift` Recent versions of OpenShift (tested on 4.13.3) require the specified user to be in an `uid` range which is dynamically assigned by the namespace. The solution is to unset the runAsUser variable via ``deployment.securityContext.runAsUser=null`` and let OpenShift assign it at install
@@ -463,8 +462,6 @@ _Helm chart versioning_: The Reloader Helm chart is maintained in [this reposito
Helm chart will be released to the chart registry whenever files in `deployments/kubernetes/chart/reloader/**` change on the main branch.
Helm Chart will be released by the maintainers, on labelling a PR with `release/helm-chart` and pre-maturely updating the `version` field in `Chart.yaml` file.
## Changelog
View the [releases page](https://github.com/stakater/Reloader/releases) to see what has changed in each release.

View File

@@ -1 +1 @@
1.1.0
1.2.1

View File

@@ -3,8 +3,8 @@
apiVersion: v1
name: reloader
description: Reloader chart that runs on kubernetes
version: 1.2.2
appVersion: v1.2.1
version: 1.2.1
appVersion: v1.2.0
keywords:
- Reloader
- kubernetes

View File

@@ -89,9 +89,6 @@ rules:
- jobs
verbs:
- create
- delete
- list
- get
{{- if .Values.reloader.enableHA }}
- apiGroups:
- "coordination.k8s.io"

View File

@@ -18,7 +18,7 @@ metadata:
namespace: {{ .Values.namespace | default .Release.Namespace }}
spec:
{{- if not (.Values.reloader.enableHA) }}
replicas: {{ min .Values.reloader.deployment.replicas 1 }}
replicas: 1
{{- else }}
replicas: {{ .Values.reloader.deployment.replicas }}
{{- end}}
@@ -75,13 +75,9 @@ spec:
containers:
{{- if .Values.global.imageRegistry }}
- image: "{{ .Values.global.imageRegistry }}/{{ .Values.reloader.deployment.image.base }}:{{ .Values.reloader.deployment.image.tag }}"
{{- else }}
{{- if .Values.reloader.deployment.image.digest }}
- image: "{{ .Values.reloader.deployment.image.name }}@{{ .Values.reloader.deployment.image.digest }}"
{{- else }}
{{- else }}
- image: "{{ .Values.reloader.deployment.image.name }}:{{ .Values.reloader.deployment.image.tag }}"
{{- end }}
{{- end }}
{{- end }}
imagePullPolicy: {{ .Values.reloader.deployment.image.pullPolicy }}
name: {{ template "reloader-fullname" . }}
env:

View File

@@ -5,12 +5,7 @@ metadata:
name: {{ template "reloader-fullname" . }}
namespace: {{ .Values.namespace | default .Release.Namespace }}
spec:
{{- if .Values.reloader.podDisruptionBudget.maxUnavailable }}
maxUnavailable: {{ .Values.reloader.podDisruptionBudget.maxUnavailable }}
{{- end }}
{{- if and .Values.reloader.podDisruptionBudget.minAvailable (not .Values.reloader.podDisruptionBudget.maxUnavailable)}}
minAvailable: {{ .Values.reloader.podDisruptionBudget.minAvailable }}
{{- end }}
selector:
matchLabels:
app: {{ template "reloader-fullname" . }}

View File

@@ -67,6 +67,16 @@ rules:
- get
- update
- patch
- apiGroups:
- "extensions"
resources:
- deployments
- daemonsets
verbs:
- list
- get
- update
- patch
- apiGroups:
- "batch"
resources:
@@ -80,9 +90,6 @@ rules:
- jobs
verbs:
- create
- delete
- list
- get
{{- if .Values.reloader.enableHA }}
- apiGroups:
- "coordination.k8s.io"

View File

@@ -96,12 +96,11 @@ reloader:
labels:
provider: stakater
group: com.stakater.platform
version: v1.2.1
version: v1.2.0
image:
name: ghcr.io/stakater/reloader
base: stakater/reloader
tag: v1.2.1
# digest: sha256:1234567
tag: v1.2.0
pullPolicy: IfNotPresent
# Support for extra environment variables.
env:
@@ -289,9 +288,6 @@ reloader:
enabled: false
# Set the minimum available replicas
# minAvailable: 1
# OR Set the maximum unavailable replicas
# maxUnavailable: 1
# If both defined only maxUnavailable will be used
netpol:
enabled: false

View File

@@ -48,9 +48,6 @@ rules:
- jobs
verbs:
- create
- delete
- list
- get
- apiGroups:
- ""
resources:

View File

@@ -17,7 +17,7 @@ spec:
app: reloader-reloader
spec:
containers:
- image: "ghcr.io/stakater/reloader:v1.1.0"
- image: "ghcr.io/stakater/reloader:v1.2.1"
imagePullPolicy: IfNotPresent
name: reloader-reloader
env:

View File

@@ -52,9 +52,6 @@ rules:
- jobs
verbs:
- create
- delete
- list
- get
- apiGroups:
- ""
resources:
@@ -104,7 +101,7 @@ spec:
resourceFieldRef:
divisor: "1"
resource: limits.memory
image: "ghcr.io/stakater/reloader:latest"
image: ghcr.io/stakater/reloader:v1.2.1
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 5

View File

@@ -54,7 +54,7 @@ When Reloader detects changes in configmap. It gets two objects of configmap. Fi
After that, Reloader gets the list of all `deployments`, `daemonsets` and `statefulset` and looks for above mentioned annotation for configmap. If the annotation value contains the configmap name, it then looks for an environment variable which can contain the configmap or secret data change hash.
### Environment Variable for Configmap
### Environment Variable for ConfigMap
If configmap name is foo then

View File

@@ -1,6 +1,7 @@
# Reloader vs ConfigmapController
Reloader is inspired from [`Configmapcontroller`](https://github.com/fabric8io/configmapcontroller) but there are many ways in which it differs from `configmapController`. Below is the small comparison between these two controllers.
Reloader is inspired from [Configmapcontroller](https://github.com/fabric8io/configmapcontroller) but there are many ways in which it differs from configmapController. Below is the small comparison between these two controllers.
| Reloader | Configmap |
|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|

View File

@@ -3,12 +3,12 @@
Below are the steps to use Reloader with Sealed Secrets:
1. Download and install the kubeseal client from [here](https://github.com/bitnami-labs/sealed-secrets)
1. Install the controller for Sealed Secrets
1. Install the controller for sealed secrets
1. Fetch the encryption certificate
1. Encrypt the secret
1. Apply the secret
1. Install the tool which uses that Sealed Secret
1. Install the tool which uses that sealed secret
1. Install Reloader
1. Once everything is setup, update the original secret at client and encrypt it with kubeseal to see Reloader working
1. Apply the updated Sealed Secret
1. Apply the updated sealed secret
1. Reloader will restart the pod to use that updated secret

10
go.mod
View File

@@ -10,7 +10,6 @@ require (
github.com/prometheus/client_golang v1.20.5
github.com/sirupsen/logrus v1.9.3
github.com/spf13/cobra v1.8.1
github.com/stretchr/testify v1.9.0
k8s.io/api v0.31.1
k8s.io/apimachinery v0.31.1
k8s.io/client-go v0.31.1
@@ -48,18 +47,17 @@ require (
github.com/moul/http2curl v1.0.0 // indirect
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
github.com/pkg/errors v0.9.1 // indirect
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
github.com/prometheus/client_model v0.6.1 // indirect
github.com/prometheus/common v0.55.0 // indirect
github.com/prometheus/procfs v0.15.1 // indirect
github.com/smartystreets/goconvey v1.7.2 // indirect
github.com/spf13/pflag v1.0.5 // indirect
github.com/x448/float16 v0.8.4 // indirect
golang.org/x/net v0.33.0 // indirect
golang.org/x/net v0.26.0 // indirect
golang.org/x/oauth2 v0.21.0 // indirect
golang.org/x/sys v0.28.0 // indirect
golang.org/x/term v0.27.0 // indirect
golang.org/x/text v0.21.0 // indirect
golang.org/x/sys v0.22.0 // indirect
golang.org/x/term v0.21.0 // indirect
golang.org/x/text v0.16.0 // indirect
golang.org/x/time v0.3.0 // indirect
google.golang.org/protobuf v1.34.2 // indirect
gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect

10
go.sum
View File

@@ -178,6 +178,8 @@ github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/prometheus/client_golang v1.20.4 h1:Tgh3Yr67PaOv/uTqloMsCEdeuFTatm5zIq5+qNN23vI=
github.com/prometheus/client_golang v1.20.4/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE=
github.com/prometheus/client_golang v1.20.5 h1:cxppBPuYhUnsO6yo/aoRol4L7q7UFfdm+bR9r+8l63Y=
github.com/prometheus/client_golang v1.20.5/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE=
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
@@ -253,8 +255,6 @@ golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qx
golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
golang.org/x/net v0.26.0 h1:soB7SVo0PWrY4vPW/+ay0jKDNScG2X9wFeYlXIvJsOQ=
golang.org/x/net v0.26.0/go.mod h1:5YKkiSynbBIh3p6iOc/vibscux0x38BZDkn8sCUPxHE=
golang.org/x/net v0.33.0 h1:74SYHlV8BIgHIFC/LrYkOGIwL19eTYXQ5wc6TBuO36I=
golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.21.0 h1:tsimM75w1tF/uws5rbeHzIWxEqElMehnc+iW793zsZs=
golang.org/x/oauth2 v0.21.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI=
@@ -286,14 +286,10 @@ golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.22.0 h1:RI27ohtqKCnwULzJLqkv897zojh5/DwS/ENaMzUOaWI=
golang.org/x/sys v0.22.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA=
golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.21.0 h1:WVXCp+/EBEHOj53Rvu+7KiT/iElMrO8ACK16SMZ3jaA=
golang.org/x/term v0.21.0/go.mod h1:ooXLefLobQVslOqselCNF4SxFAaoS6KujMbsGzSDmX0=
golang.org/x/term v0.27.0 h1:WP60Sv1nlK1T6SupCHbXzSaN0b9wUmsPoRS9b61A23Q=
golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
@@ -301,8 +297,6 @@ golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4=
golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI=
golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo=
golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ=
golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4=
golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=

View File

@@ -2,11 +2,10 @@ package callbacks
import (
"context"
"fmt"
"time"
"fmt"
"github.com/sirupsen/logrus"
"github.com/stakater/Reloader/internal/pkg/options"
"github.com/stakater/Reloader/pkg/kube"
appsv1 "k8s.io/api/apps/v1"
batchv1 "k8s.io/api/batch/v1"
@@ -92,26 +91,6 @@ func GetCronJobItems(clients kube.Clients, namespace string) []runtime.Object {
return items
}
// GetJobItems returns the jobs in given namespace
func GetJobItems(clients kube.Clients, namespace string) []runtime.Object {
jobs, err := clients.KubernetesClient.BatchV1().Jobs(namespace).List(context.TODO(), meta_v1.ListOptions{})
if err != nil {
logrus.Errorf("Failed to list jobs %v", err)
}
items := make([]runtime.Object, len(jobs.Items))
// Ensure we always have pod annotations to add to
for i, v := range jobs.Items {
if v.Spec.Template.ObjectMeta.Annotations == nil {
annotations := make(map[string]string)
jobs.Items[i].Spec.Template.ObjectMeta.Annotations = annotations
}
items[i] = &jobs.Items[i]
}
return items
}
// GetDaemonSetItems returns the daemonSets in given namespace
func GetDaemonSetItems(clients kube.Clients, namespace string) []runtime.Object {
daemonSets, err := clients.KubernetesClient.AppsV1().DaemonSets(namespace).List(context.TODO(), meta_v1.ListOptions{})
@@ -198,11 +177,6 @@ func GetCronJobAnnotations(item runtime.Object) map[string]string {
return item.(*batchv1.CronJob).ObjectMeta.Annotations
}
// GetJobAnnotations returns the annotations of given job
func GetJobAnnotations(item runtime.Object) map[string]string {
return item.(*batchv1.Job).ObjectMeta.Annotations
}
// GetDaemonSetAnnotations returns the annotations of given daemonSet
func GetDaemonSetAnnotations(item runtime.Object) map[string]string {
return item.(*appsv1.DaemonSet).ObjectMeta.Annotations
@@ -233,11 +207,6 @@ func GetCronJobPodAnnotations(item runtime.Object) map[string]string {
return item.(*batchv1.CronJob).Spec.JobTemplate.Spec.Template.ObjectMeta.Annotations
}
// GetJobPodAnnotations returns the pod's annotations of given job
func GetJobPodAnnotations(item runtime.Object) map[string]string {
return item.(*batchv1.Job).Spec.Template.ObjectMeta.Annotations
}
// GetDaemonSetPodAnnotations returns the pod's annotations of given daemonSet
func GetDaemonSetPodAnnotations(item runtime.Object) map[string]string {
return item.(*appsv1.DaemonSet).Spec.Template.ObjectMeta.Annotations
@@ -268,11 +237,6 @@ func GetCronJobContainers(item runtime.Object) []v1.Container {
return item.(*batchv1.CronJob).Spec.JobTemplate.Spec.Template.Spec.Containers
}
// GetJobContainers returns the containers of given job
func GetJobContainers(item runtime.Object) []v1.Container {
return item.(*batchv1.Job).Spec.Template.Spec.Containers
}
// GetDaemonSetContainers returns the containers of given daemonSet
func GetDaemonSetContainers(item runtime.Object) []v1.Container {
return item.(*appsv1.DaemonSet).Spec.Template.Spec.Containers
@@ -303,11 +267,6 @@ func GetCronJobInitContainers(item runtime.Object) []v1.Container {
return item.(*batchv1.CronJob).Spec.JobTemplate.Spec.Template.Spec.InitContainers
}
// GetJobInitContainers returns the containers of given job
func GetJobInitContainers(item runtime.Object) []v1.Container {
return item.(*batchv1.Job).Spec.Template.Spec.InitContainers
}
// GetDaemonSetInitContainers returns the containers of given daemonSet
func GetDaemonSetInitContainers(item runtime.Object) []v1.Container {
return item.(*appsv1.DaemonSet).Spec.Template.Spec.InitContainers
@@ -347,38 +306,6 @@ func CreateJobFromCronjob(clients kube.Clients, namespace string, resource runti
return err
}
// ReCreateJobFromjob performs rolling upgrade on job
func ReCreateJobFromjob(clients kube.Clients, namespace string, resource runtime.Object) error {
oldJob := resource.(*batchv1.Job)
job := oldJob.DeepCopy()
// Delete the old job
policy := meta_v1.DeletePropagationBackground
err := clients.KubernetesClient.BatchV1().Jobs(namespace).Delete(context.TODO(), job.Name, meta_v1.DeleteOptions{PropagationPolicy: &policy})
if err != nil {
return err
}
// Remove fields that should not be specified when creating a new Job
job.ObjectMeta.ResourceVersion = ""
job.ObjectMeta.UID = ""
job.ObjectMeta.CreationTimestamp = meta_v1.Time{}
job.Status = batchv1.JobStatus{}
// Remove problematic labels
delete(job.Spec.Template.Labels, "controller-uid")
delete(job.Spec.Template.Labels, batchv1.ControllerUidLabel)
delete(job.Spec.Template.Labels, batchv1.JobNameLabel)
delete(job.Spec.Template.Labels, "job-name")
// Remove the selector to allow it to be auto-generated
job.Spec.Selector = nil
// Create the new job with same spec
_, err = clients.KubernetesClient.BatchV1().Jobs(namespace).Create(context.TODO(), job, meta_v1.CreateOptions{FieldManager: "Reloader"})
return err
}
// UpdateDaemonSet performs rolling upgrade on daemonSet
func UpdateDaemonSet(clients kube.Clients, namespace string, resource runtime.Object) error {
daemonSet := resource.(*appsv1.DaemonSet)
@@ -402,15 +329,11 @@ func UpdateDeploymentConfig(clients kube.Clients, namespace string, resource run
// UpdateRollout performs rolling upgrade on rollout
func UpdateRollout(clients kube.Clients, namespace string, resource runtime.Object) error {
var err error
rollout := resource.(*argorolloutv1alpha1.Rollout)
strategy := rollout.GetAnnotations()[options.RolloutStrategyAnnotation]
switch options.ToArgoRolloutStrategy(strategy) {
case options.RestartStrategy:
_, err = clients.ArgoRolloutClient.ArgoprojV1alpha1().Rollouts(namespace).Patch(context.TODO(), rollout.Name, patchtypes.MergePatchType, []byte(fmt.Sprintf(`{"spec": {"restartAt": "%s"}}`, time.Now().Format(time.RFC3339))), meta_v1.PatchOptions{FieldManager: "Reloader"})
case options.RolloutStrategy:
_, err = clients.ArgoRolloutClient.ArgoprojV1alpha1().Rollouts(namespace).Update(context.TODO(), rollout, meta_v1.UpdateOptions{FieldManager: "Reloader"})
}
rolloutBefore, _ := clients.ArgoRolloutClient.ArgoprojV1alpha1().Rollouts(namespace).Get(context.TODO(), rollout.Name, meta_v1.GetOptions{})
logrus.Warnf("Before: %+v", rolloutBefore.Spec.Template.Spec.Containers[0].Env)
logrus.Warnf("After: %+v", rollout.Spec.Template.Spec.Containers[0].Env)
_, err := clients.ArgoRolloutClient.ArgoprojV1alpha1().Rollouts(namespace).Patch(context.TODO(), rollout.Name, patchtypes.MergePatchType, []byte(fmt.Sprintf(`{"spec": {"restartAt": "%s"}}`, time.Now().Format(time.RFC3339))), meta_v1.PatchOptions{FieldManager: "Reloader"})
return err
}
@@ -424,11 +347,6 @@ func GetCronJobVolumes(item runtime.Object) []v1.Volume {
return item.(*batchv1.CronJob).Spec.JobTemplate.Spec.Template.Spec.Volumes
}
// GetJobVolumes returns the Volumes of given job
func GetJobVolumes(item runtime.Object) []v1.Volume {
return item.(*batchv1.Job).Spec.Template.Spec.Volumes
}
// GetDaemonSetVolumes returns the Volumes of given daemonSet
func GetDaemonSetVolumes(item runtime.Object) []v1.Volume {
return item.(*appsv1.DaemonSet).Spec.Template.Spec.Volumes

View File

@@ -1,524 +0,0 @@
package callbacks_test
import (
"context"
"fmt"
"testing"
"time"
"github.com/stretchr/testify/assert"
appsv1 "k8s.io/api/apps/v1"
batchv1 "k8s.io/api/batch/v1"
v1 "k8s.io/api/core/v1"
meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
watch "k8s.io/apimachinery/pkg/watch"
"k8s.io/client-go/kubernetes/fake"
argorolloutv1alpha1 "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1"
fakeargoclientset "github.com/argoproj/argo-rollouts/pkg/client/clientset/versioned/fake"
"github.com/stakater/Reloader/internal/pkg/callbacks"
"github.com/stakater/Reloader/internal/pkg/options"
"github.com/stakater/Reloader/internal/pkg/testutil"
"github.com/stakater/Reloader/pkg/kube"
)
var (
clients = setupTestClients()
)
type testFixtures struct {
defaultContainers []v1.Container
defaultInitContainers []v1.Container
defaultVolumes []v1.Volume
namespace string
}
func newTestFixtures() testFixtures {
return testFixtures{
defaultContainers: []v1.Container{{Name: "container1"}, {Name: "container2"}},
defaultInitContainers: []v1.Container{{Name: "init-container1"}, {Name: "init-container2"}},
defaultVolumes: []v1.Volume{{Name: "volume1"}, {Name: "volume2"}},
namespace: "default",
}
}
func setupTestClients() kube.Clients {
return kube.Clients{
KubernetesClient: fake.NewSimpleClientset(),
ArgoRolloutClient: fakeargoclientset.NewSimpleClientset(),
}
}
// TestUpdateRollout test update rollout strategy annotation
func TestUpdateRollout(t *testing.T) {
namespace := "test-ns"
cases := map[string]struct {
name string
strategy string
isRestart bool
}{
"test-without-strategy": {
name: "defaults to rollout strategy",
strategy: "",
isRestart: false,
},
"test-with-restart-strategy": {
name: "triggers a restart strategy",
strategy: "restart",
isRestart: true,
},
"test-with-rollout-strategy": {
name: "triggers a rollout strategy",
strategy: "rollout",
isRestart: false,
},
}
for name, tc := range cases {
t.Run(name, func(t *testing.T) {
rollout, err := testutil.CreateRollout(
clients.ArgoRolloutClient, name, namespace,
map[string]string{options.RolloutStrategyAnnotation: tc.strategy},
)
if err != nil {
t.Errorf("creating rollout: %v", err)
}
modifiedChan := watchRollout(rollout.Name, namespace)
err = callbacks.UpdateRollout(clients, namespace, rollout)
if err != nil {
t.Errorf("updating rollout: %v", err)
}
rollout, err = clients.ArgoRolloutClient.ArgoprojV1alpha1().Rollouts(
namespace).Get(context.TODO(), rollout.Name, meta_v1.GetOptions{})
if err != nil {
t.Errorf("getting rollout: %v", err)
}
if isRestartStrategy(rollout) == tc.isRestart {
t.Errorf("Should not be a restart strategy")
}
select {
case <-modifiedChan:
// object has been modified
case <-time.After(1 * time.Second):
t.Errorf("Rollout has not been updated")
}
})
}
}
func TestResourceItems(t *testing.T) {
fixtures := newTestFixtures()
tests := []struct {
name string
createFunc func(kube.Clients, string) error
getItemsFunc func(kube.Clients, string) []runtime.Object
expectedCount int
}{
{
name: "Deployments",
createFunc: createTestDeployments,
getItemsFunc: callbacks.GetDeploymentItems,
expectedCount: 2,
},
{
name: "CronJobs",
createFunc: createTestCronJobs,
getItemsFunc: callbacks.GetCronJobItems,
expectedCount: 2,
},
{
name: "Jobs",
createFunc: createTestJobs,
getItemsFunc: callbacks.GetJobItems,
expectedCount: 2,
},
{
name: "DaemonSets",
createFunc: createTestDaemonSets,
getItemsFunc: callbacks.GetDaemonSetItems,
expectedCount: 2,
},
{
name: "StatefulSets",
createFunc: createTestStatefulSets,
getItemsFunc: callbacks.GetStatefulSetItems,
expectedCount: 2,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
err := tt.createFunc(clients, fixtures.namespace)
assert.NoError(t, err)
items := tt.getItemsFunc(clients, fixtures.namespace)
assert.Equal(t, tt.expectedCount, len(items))
})
}
}
func TestGetAnnotations(t *testing.T) {
testAnnotations := map[string]string{"version": "1"}
tests := []struct {
name string
resource runtime.Object
getFunc func(runtime.Object) map[string]string
}{
{"Deployment", &appsv1.Deployment{ObjectMeta: metav1.ObjectMeta{Annotations: testAnnotations}}, callbacks.GetDeploymentAnnotations},
{"CronJob", &batchv1.CronJob{ObjectMeta: metav1.ObjectMeta{Annotations: testAnnotations}}, callbacks.GetCronJobAnnotations},
{"Job", &batchv1.Job{ObjectMeta: metav1.ObjectMeta{Annotations: testAnnotations}}, callbacks.GetJobAnnotations},
{"DaemonSet", &appsv1.DaemonSet{ObjectMeta: metav1.ObjectMeta{Annotations: testAnnotations}}, callbacks.GetDaemonSetAnnotations},
{"StatefulSet", &appsv1.StatefulSet{ObjectMeta: metav1.ObjectMeta{Annotations: testAnnotations}}, callbacks.GetStatefulSetAnnotations},
{"Rollout", &argorolloutv1alpha1.Rollout{ObjectMeta: metav1.ObjectMeta{Annotations: testAnnotations}}, callbacks.GetRolloutAnnotations},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
assert.Equal(t, testAnnotations, tt.getFunc(tt.resource))
})
}
}
func TestGetPodAnnotations(t *testing.T) {
testAnnotations := map[string]string{"version": "1"}
tests := []struct {
name string
resource runtime.Object
getFunc func(runtime.Object) map[string]string
}{
{"Deployment", createResourceWithPodAnnotations(&appsv1.Deployment{}, testAnnotations), callbacks.GetDeploymentPodAnnotations},
{"CronJob", createResourceWithPodAnnotations(&batchv1.CronJob{}, testAnnotations), callbacks.GetCronJobPodAnnotations},
{"Job", createResourceWithPodAnnotations(&batchv1.Job{}, testAnnotations), callbacks.GetJobPodAnnotations},
{"DaemonSet", createResourceWithPodAnnotations(&appsv1.DaemonSet{}, testAnnotations), callbacks.GetDaemonSetPodAnnotations},
{"StatefulSet", createResourceWithPodAnnotations(&appsv1.StatefulSet{}, testAnnotations), callbacks.GetStatefulSetPodAnnotations},
{"Rollout", createResourceWithPodAnnotations(&argorolloutv1alpha1.Rollout{}, testAnnotations), callbacks.GetRolloutPodAnnotations},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
assert.Equal(t, testAnnotations, tt.getFunc(tt.resource))
})
}
}
func TestGetContainers(t *testing.T) {
fixtures := newTestFixtures()
tests := []struct {
name string
resource runtime.Object
getFunc func(runtime.Object) []v1.Container
}{
{"Deployment", createResourceWithContainers(&appsv1.Deployment{}, fixtures.defaultContainers), callbacks.GetDeploymentContainers},
{"DaemonSet", createResourceWithContainers(&appsv1.DaemonSet{}, fixtures.defaultContainers), callbacks.GetDaemonSetContainers},
{"StatefulSet", createResourceWithContainers(&appsv1.StatefulSet{}, fixtures.defaultContainers), callbacks.GetStatefulSetContainers},
{"CronJob", createResourceWithContainers(&batchv1.CronJob{}, fixtures.defaultContainers), callbacks.GetCronJobContainers},
{"Job", createResourceWithContainers(&batchv1.Job{}, fixtures.defaultContainers), callbacks.GetJobContainers},
{"Rollout", createResourceWithContainers(&argorolloutv1alpha1.Rollout{}, fixtures.defaultContainers), callbacks.GetRolloutContainers},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
assert.Equal(t, fixtures.defaultContainers, tt.getFunc(tt.resource))
})
}
}
func TestGetInitContainers(t *testing.T) {
fixtures := newTestFixtures()
tests := []struct {
name string
resource runtime.Object
getFunc func(runtime.Object) []v1.Container
}{
{"Deployment", createResourceWithInitContainers(&appsv1.Deployment{}, fixtures.defaultInitContainers), callbacks.GetDeploymentInitContainers},
{"DaemonSet", createResourceWithInitContainers(&appsv1.DaemonSet{}, fixtures.defaultInitContainers), callbacks.GetDaemonSetInitContainers},
{"StatefulSet", createResourceWithInitContainers(&appsv1.StatefulSet{}, fixtures.defaultInitContainers), callbacks.GetStatefulSetInitContainers},
{"CronJob", createResourceWithInitContainers(&batchv1.CronJob{}, fixtures.defaultInitContainers), callbacks.GetCronJobInitContainers},
{"Job", createResourceWithInitContainers(&batchv1.Job{}, fixtures.defaultInitContainers), callbacks.GetJobInitContainers},
{"Rollout", createResourceWithInitContainers(&argorolloutv1alpha1.Rollout{}, fixtures.defaultInitContainers), callbacks.GetRolloutInitContainers},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
assert.Equal(t, fixtures.defaultInitContainers, tt.getFunc(tt.resource))
})
}
}
func TestUpdateResources(t *testing.T) {
fixtures := newTestFixtures()
tests := []struct {
name string
createFunc func(kube.Clients, string, string) (runtime.Object, error)
updateFunc func(kube.Clients, string, runtime.Object) error
}{
{"Deployment", createTestDeploymentWithAnnotations, callbacks.UpdateDeployment},
{"DaemonSet", createTestDaemonSetWithAnnotations, callbacks.UpdateDaemonSet},
{"StatefulSet", createTestStatefulSetWithAnnotations, callbacks.UpdateStatefulSet},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
resource, err := tt.createFunc(clients, fixtures.namespace, "1")
assert.NoError(t, err)
err = tt.updateFunc(clients, fixtures.namespace, resource)
assert.NoError(t, err)
})
}
}
func TestCreateJobFromCronjob(t *testing.T) {
fixtures := newTestFixtures()
cronJob, err := createTestCronJobWithAnnotations(clients, fixtures.namespace, "1")
assert.NoError(t, err)
err = callbacks.CreateJobFromCronjob(clients, fixtures.namespace, cronJob.(*batchv1.CronJob))
assert.NoError(t, err)
}
func TestReCreateJobFromJob(t *testing.T) {
fixtures := newTestFixtures()
job, err := createTestJobWithAnnotations(clients, fixtures.namespace, "1")
assert.NoError(t, err)
err = callbacks.ReCreateJobFromjob(clients, fixtures.namespace, job.(*batchv1.Job))
assert.NoError(t, err)
}
func TestGetVolumes(t *testing.T) {
fixtures := newTestFixtures()
tests := []struct {
name string
resource runtime.Object
getFunc func(runtime.Object) []v1.Volume
}{
{"Deployment", createResourceWithVolumes(&appsv1.Deployment{}, fixtures.defaultVolumes), callbacks.GetDeploymentVolumes},
{"CronJob", createResourceWithVolumes(&batchv1.CronJob{}, fixtures.defaultVolumes), callbacks.GetCronJobVolumes},
{"Job", createResourceWithVolumes(&batchv1.Job{}, fixtures.defaultVolumes), callbacks.GetJobVolumes},
{"DaemonSet", createResourceWithVolumes(&appsv1.DaemonSet{}, fixtures.defaultVolumes), callbacks.GetDaemonSetVolumes},
{"StatefulSet", createResourceWithVolumes(&appsv1.StatefulSet{}, fixtures.defaultVolumes), callbacks.GetStatefulSetVolumes},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
assert.Equal(t, fixtures.defaultVolumes, tt.getFunc(tt.resource))
})
}
}
// Helper functions
func isRestartStrategy(rollout *argorolloutv1alpha1.Rollout) bool {
return rollout.Spec.RestartAt == nil
}
func watchRollout(name, namespace string) chan interface{} {
timeOut := int64(1)
modifiedChan := make(chan interface{})
watcher, _ := clients.ArgoRolloutClient.ArgoprojV1alpha1().Rollouts(namespace).Watch(context.Background(), meta_v1.ListOptions{TimeoutSeconds: &timeOut})
go watchModified(watcher, name, modifiedChan)
return modifiedChan
}
func watchModified(watcher watch.Interface, name string, modifiedChan chan interface{}) {
for event := range watcher.ResultChan() {
item := event.Object.(*argorolloutv1alpha1.Rollout)
if item.Name == name {
switch event.Type {
case watch.Modified:
modifiedChan <- nil
}
return
}
}
}
func createTestDeployments(clients kube.Clients, namespace string) error {
for i := 1; i <= 2; i++ {
_, err := testutil.CreateDeployment(clients.KubernetesClient, fmt.Sprintf("test-deployment-%d", i), namespace, false)
if err != nil {
return err
}
}
return nil
}
func createTestCronJobs(clients kube.Clients, namespace string) error {
for i := 1; i <= 2; i++ {
_, err := testutil.CreateCronJob(clients.KubernetesClient, fmt.Sprintf("test-cron-%d", i), namespace, false)
if err != nil {
return err
}
}
return nil
}
func createTestJobs(clients kube.Clients, namespace string) error {
for i := 1; i <= 2; i++ {
_, err := testutil.CreateJob(clients.KubernetesClient, fmt.Sprintf("test-job-%d", i), namespace, false)
if err != nil {
return err
}
}
return nil
}
func createTestDaemonSets(clients kube.Clients, namespace string) error {
for i := 1; i <= 2; i++ {
_, err := testutil.CreateDaemonSet(clients.KubernetesClient, fmt.Sprintf("test-daemonset-%d", i), namespace, false)
if err != nil {
return err
}
}
return nil
}
func createTestStatefulSets(clients kube.Clients, namespace string) error {
for i := 1; i <= 2; i++ {
_, err := testutil.CreateStatefulSet(clients.KubernetesClient, fmt.Sprintf("test-statefulset-%d", i), namespace, false)
if err != nil {
return err
}
}
return nil
}
func createResourceWithPodAnnotations(obj runtime.Object, annotations map[string]string) runtime.Object {
switch v := obj.(type) {
case *appsv1.Deployment:
v.Spec.Template.ObjectMeta.Annotations = annotations
case *appsv1.DaemonSet:
v.Spec.Template.ObjectMeta.Annotations = annotations
case *appsv1.StatefulSet:
v.Spec.Template.ObjectMeta.Annotations = annotations
case *batchv1.CronJob:
v.Spec.JobTemplate.Spec.Template.ObjectMeta.Annotations = annotations
case *batchv1.Job:
v.Spec.Template.ObjectMeta.Annotations = annotations
case *argorolloutv1alpha1.Rollout:
v.Spec.Template.ObjectMeta.Annotations = annotations
}
return obj
}
func createResourceWithContainers(obj runtime.Object, containers []v1.Container) runtime.Object {
switch v := obj.(type) {
case *appsv1.Deployment:
v.Spec.Template.Spec.Containers = containers
case *appsv1.DaemonSet:
v.Spec.Template.Spec.Containers = containers
case *appsv1.StatefulSet:
v.Spec.Template.Spec.Containers = containers
case *batchv1.CronJob:
v.Spec.JobTemplate.Spec.Template.Spec.Containers = containers
case *batchv1.Job:
v.Spec.Template.Spec.Containers = containers
case *argorolloutv1alpha1.Rollout:
v.Spec.Template.Spec.Containers = containers
}
return obj
}
func createResourceWithInitContainers(obj runtime.Object, initContainers []v1.Container) runtime.Object {
switch v := obj.(type) {
case *appsv1.Deployment:
v.Spec.Template.Spec.InitContainers = initContainers
case *appsv1.DaemonSet:
v.Spec.Template.Spec.InitContainers = initContainers
case *appsv1.StatefulSet:
v.Spec.Template.Spec.InitContainers = initContainers
case *batchv1.CronJob:
v.Spec.JobTemplate.Spec.Template.Spec.InitContainers = initContainers
case *batchv1.Job:
v.Spec.Template.Spec.InitContainers = initContainers
case *argorolloutv1alpha1.Rollout:
v.Spec.Template.Spec.InitContainers = initContainers
}
return obj
}
func createResourceWithVolumes(obj runtime.Object, volumes []v1.Volume) runtime.Object {
switch v := obj.(type) {
case *appsv1.Deployment:
v.Spec.Template.Spec.Volumes = volumes
case *batchv1.CronJob:
v.Spec.JobTemplate.Spec.Template.Spec.Volumes = volumes
case *batchv1.Job:
v.Spec.Template.Spec.Volumes = volumes
case *appsv1.DaemonSet:
v.Spec.Template.Spec.Volumes = volumes
case *appsv1.StatefulSet:
v.Spec.Template.Spec.Volumes = volumes
}
return obj
}
func createTestDeploymentWithAnnotations(clients kube.Clients, namespace, version string) (runtime.Object, error) {
deployment := &appsv1.Deployment{
ObjectMeta: metav1.ObjectMeta{
Name: "test-deployment",
Namespace: namespace,
Annotations: map[string]string{"version": version},
},
}
return clients.KubernetesClient.AppsV1().Deployments(namespace).Create(context.TODO(), deployment, metav1.CreateOptions{})
}
func createTestDaemonSetWithAnnotations(clients kube.Clients, namespace, version string) (runtime.Object, error) {
daemonSet := &appsv1.DaemonSet{
ObjectMeta: metav1.ObjectMeta{
Name: "test-daemonset",
Namespace: namespace,
Annotations: map[string]string{"version": version},
},
}
return clients.KubernetesClient.AppsV1().DaemonSets(namespace).Create(context.TODO(), daemonSet, metav1.CreateOptions{})
}
func createTestStatefulSetWithAnnotations(clients kube.Clients, namespace, version string) (runtime.Object, error) {
statefulSet := &appsv1.StatefulSet{
ObjectMeta: metav1.ObjectMeta{
Name: "test-statefulset",
Namespace: namespace,
Annotations: map[string]string{"version": version},
},
}
return clients.KubernetesClient.AppsV1().StatefulSets(namespace).Create(context.TODO(), statefulSet, metav1.CreateOptions{})
}
func createTestCronJobWithAnnotations(clients kube.Clients, namespace, version string) (runtime.Object, error) {
cronJob := &batchv1.CronJob{
ObjectMeta: metav1.ObjectMeta{
Name: "test-cronjob",
Namespace: namespace,
Annotations: map[string]string{"version": version},
},
}
return clients.KubernetesClient.BatchV1().CronJobs(namespace).Create(context.TODO(), cronJob, metav1.CreateOptions{})
}
func createTestJobWithAnnotations(clients kube.Clients, namespace, version string) (runtime.Object, error) {
job := &batchv1.Job{
ObjectMeta: metav1.ObjectMeta{
Name: "test-job",
Namespace: namespace,
Annotations: map[string]string{"version": version},
},
}
return clients.KubernetesClient.BatchV1().Jobs(namespace).Create(context.TODO(), job, metav1.CreateOptions{})
}

View File

@@ -55,20 +55,6 @@ func GetCronJobCreateJobFuncs() callbacks.RollingUpgradeFuncs {
}
}
// GetDeploymentRollingUpgradeFuncs returns all callback funcs for a cronjob
func GetJobCreateJobFuncs() callbacks.RollingUpgradeFuncs {
return callbacks.RollingUpgradeFuncs{
ItemsFunc: callbacks.GetJobItems,
AnnotationsFunc: callbacks.GetJobAnnotations,
PodAnnotationsFunc: callbacks.GetJobPodAnnotations,
ContainersFunc: callbacks.GetJobContainers,
InitContainersFunc: callbacks.GetJobInitContainers,
UpdateFunc: callbacks.ReCreateJobFromjob,
VolumesFunc: callbacks.GetJobVolumes,
ResourceType: "Job",
}
}
// GetDaemonSetRollingUpgradeFuncs returns all callback funcs for a daemonset
func GetDaemonSetRollingUpgradeFuncs() callbacks.RollingUpgradeFuncs {
return callbacks.RollingUpgradeFuncs{
@@ -167,10 +153,6 @@ func doRollingUpgrade(config util.Config, collectors metrics.Collectors, recorde
if err != nil {
return err
}
err = rollingUpgrade(clients, config, GetJobCreateJobFuncs(), collectors, recorder, invoke)
if err != nil {
return err
}
err = rollingUpgrade(clients, config, GetDaemonSetRollingUpgradeFuncs(), collectors, recorder, invoke)
if err != nil {
return err

View File

@@ -2,15 +2,6 @@ package options
import "github.com/stakater/Reloader/internal/pkg/constants"
type ArgoRolloutStrategy int
const (
// RestartStrategy is the annotation value for restart strategy for rollouts
RestartStrategy ArgoRolloutStrategy = iota
// RolloutStrategy is the annotation value for rollout strategy for rollouts
RolloutStrategy
)
var (
// Auto reload all resources when their corresponding configmaps/secrets are updated
AutoReloadAll = false
@@ -36,8 +27,6 @@ var (
// SearchMatchAnnotation is an annotation to tag secrets to be found with
// AutoSearchAnnotation
SearchMatchAnnotation = "reloader.stakater.com/match"
// RolloutStrategyAnnotation is an annotation to define rollout update strategy
RolloutStrategyAnnotation = "reloader.stakater.com/rollout-strategy"
// LogFormat is the log format to use (json, or empty string for default)
LogFormat = ""
// LogLevel is the log level to use (trace, debug, info, warning, error, fatal and panic)
@@ -56,14 +45,3 @@ var (
// Url to send a request to instead of triggering a reload
WebhookUrl = ""
)
func ToArgoRolloutStrategy(s string) ArgoRolloutStrategy {
switch s {
case "restart":
return RestartStrategy
case "rollout":
fallthrough
default:
return RolloutStrategy
}
}

View File

@@ -10,8 +10,6 @@ import (
"strings"
"time"
argorolloutv1alpha1 "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1"
argorollout "github.com/argoproj/argo-rollouts/pkg/client/clientset/versioned"
openshiftv1 "github.com/openshift/api/apps/v1"
appsclient "github.com/openshift/client-go/apps/clientset/versioned"
"github.com/sirupsen/logrus"
@@ -23,7 +21,6 @@ import (
"github.com/stakater/Reloader/internal/pkg/util"
"github.com/stakater/Reloader/pkg/kube"
appsv1 "k8s.io/api/apps/v1"
batchv1 "k8s.io/api/batch/v1"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@@ -72,16 +69,16 @@ func DeleteNamespace(namespace string, client kubernetes.Interface) {
}
}
func getObjectMeta(namespace string, name string, autoReload bool, secretAutoReload bool, configmapAutoReload bool, extraAnnotations map[string]string) metav1.ObjectMeta {
func getObjectMeta(namespace string, name string, autoReload bool, secretAutoReload bool, configmapAutoReload bool) metav1.ObjectMeta {
return metav1.ObjectMeta{
Name: name,
Namespace: namespace,
Labels: map[string]string{"firstLabel": "temp"},
Annotations: getAnnotations(name, autoReload, secretAutoReload, configmapAutoReload, extraAnnotations),
Annotations: getAnnotations(name, autoReload, secretAutoReload, configmapAutoReload),
}
}
func getAnnotations(name string, autoReload bool, secretAutoReload bool, configmapAutoReload bool, extraAnnotations map[string]string) map[string]string {
func getAnnotations(name string, autoReload bool, secretAutoReload bool, configmapAutoReload bool) map[string]string {
annotations := make(map[string]string)
if autoReload {
annotations[options.ReloaderAutoAnnotation] = "true"
@@ -93,15 +90,13 @@ func getAnnotations(name string, autoReload bool, secretAutoReload bool, configm
annotations[options.ConfigmapReloaderAutoAnnotation] = "true"
}
if !(len(annotations) > 0) {
annotations = map[string]string{
if len(annotations) > 0 {
return annotations
} else {
return map[string]string{
options.ConfigmapUpdateOnChangeAnnotation: name,
options.SecretUpdateOnChangeAnnotation: name}
}
for k, v := range extraAnnotations {
annotations[k] = v
}
return annotations
}
func getEnvVarSources(name string) []v1.EnvFromSource {
@@ -178,7 +173,7 @@ func getVolumes(name string) []v1.Volume {
}
}
func getVolumeMounts() []v1.VolumeMount {
func getVolumeMounts(name string) []v1.VolumeMount {
return []v1.VolumeMount{
{
MountPath: "etc/config",
@@ -276,7 +271,7 @@ func getPodTemplateSpecWithVolumes(name string) v1.PodTemplateSpec {
Value: "test",
},
},
VolumeMounts: getVolumeMounts(),
VolumeMounts: getVolumeMounts(name),
},
},
Volumes: getVolumes(name),
@@ -294,7 +289,7 @@ func getPodTemplateSpecWithInitContainer(name string) v1.PodTemplateSpec {
{
Image: "busybox",
Name: "busyBox",
VolumeMounts: getVolumeMounts(),
VolumeMounts: getVolumeMounts(name),
},
},
Containers: []v1.Container{
@@ -347,7 +342,7 @@ func getPodTemplateSpecWithInitContainerAndEnv(name string) v1.PodTemplateSpec {
func GetDeployment(namespace string, deploymentName string) *appsv1.Deployment {
replicaset := int32(1)
return &appsv1.Deployment{
ObjectMeta: getObjectMeta(namespace, deploymentName, false, false, false, map[string]string{}),
ObjectMeta: getObjectMeta(namespace, deploymentName, false, false, false),
Spec: appsv1.DeploymentSpec{
Selector: &metav1.LabelSelector{
MatchLabels: map[string]string{"secondLabel": "temp"},
@@ -366,7 +361,7 @@ func GetDeploymentConfig(namespace string, deploymentConfigName string) *openshi
replicaset := int32(1)
podTemplateSpecWithVolume := getPodTemplateSpecWithVolumes(deploymentConfigName)
return &openshiftv1.DeploymentConfig{
ObjectMeta: getObjectMeta(namespace, deploymentConfigName, false, false, false, map[string]string{}),
ObjectMeta: getObjectMeta(namespace, deploymentConfigName, false, false, false),
Spec: openshiftv1.DeploymentConfigSpec{
Replicas: replicaset,
Strategy: openshiftv1.DeploymentStrategy{
@@ -381,7 +376,7 @@ func GetDeploymentConfig(namespace string, deploymentConfigName string) *openshi
func GetDeploymentWithInitContainer(namespace string, deploymentName string) *appsv1.Deployment {
replicaset := int32(1)
return &appsv1.Deployment{
ObjectMeta: getObjectMeta(namespace, deploymentName, false, false, false, map[string]string{}),
ObjectMeta: getObjectMeta(namespace, deploymentName, false, false, false),
Spec: appsv1.DeploymentSpec{
Selector: &metav1.LabelSelector{
MatchLabels: map[string]string{"secondLabel": "temp"},
@@ -399,7 +394,7 @@ func GetDeploymentWithInitContainer(namespace string, deploymentName string) *ap
func GetDeploymentWithInitContainerAndEnv(namespace string, deploymentName string) *appsv1.Deployment {
replicaset := int32(1)
return &appsv1.Deployment{
ObjectMeta: getObjectMeta(namespace, deploymentName, true, false, false, map[string]string{}),
ObjectMeta: getObjectMeta(namespace, deploymentName, true, false, false),
Spec: appsv1.DeploymentSpec{
Selector: &metav1.LabelSelector{
MatchLabels: map[string]string{"secondLabel": "temp"},
@@ -416,7 +411,7 @@ func GetDeploymentWithInitContainerAndEnv(namespace string, deploymentName strin
func GetDeploymentWithEnvVars(namespace string, deploymentName string) *appsv1.Deployment {
replicaset := int32(1)
return &appsv1.Deployment{
ObjectMeta: getObjectMeta(namespace, deploymentName, true, false, false, map[string]string{}),
ObjectMeta: getObjectMeta(namespace, deploymentName, true, false, false),
Spec: appsv1.DeploymentSpec{
Selector: &metav1.LabelSelector{
MatchLabels: map[string]string{"secondLabel": "temp"},
@@ -434,7 +429,7 @@ func GetDeploymentConfigWithEnvVars(namespace string, deploymentConfigName strin
replicaset := int32(1)
podTemplateSpecWithEnvVars := getPodTemplateSpecWithEnvVars(deploymentConfigName)
return &openshiftv1.DeploymentConfig{
ObjectMeta: getObjectMeta(namespace, deploymentConfigName, false, false, false, map[string]string{}),
ObjectMeta: getObjectMeta(namespace, deploymentConfigName, false, false, false),
Spec: openshiftv1.DeploymentConfigSpec{
Replicas: replicaset,
Strategy: openshiftv1.DeploymentStrategy{
@@ -448,7 +443,7 @@ func GetDeploymentConfigWithEnvVars(namespace string, deploymentConfigName strin
func GetDeploymentWithEnvVarSources(namespace string, deploymentName string) *appsv1.Deployment {
replicaset := int32(1)
return &appsv1.Deployment{
ObjectMeta: getObjectMeta(namespace, deploymentName, true, false, false, map[string]string{}),
ObjectMeta: getObjectMeta(namespace, deploymentName, true, false, false),
Spec: appsv1.DeploymentSpec{
Selector: &metav1.LabelSelector{
MatchLabels: map[string]string{"secondLabel": "temp"},
@@ -465,7 +460,7 @@ func GetDeploymentWithEnvVarSources(namespace string, deploymentName string) *ap
func GetDeploymentWithPodAnnotations(namespace string, deploymentName string, both bool) *appsv1.Deployment {
replicaset := int32(1)
deployment := &appsv1.Deployment{
ObjectMeta: getObjectMeta(namespace, deploymentName, false, false, false, map[string]string{}),
ObjectMeta: getObjectMeta(namespace, deploymentName, false, false, false),
Spec: appsv1.DeploymentSpec{
Selector: &metav1.LabelSelector{
MatchLabels: map[string]string{"secondLabel": "temp"},
@@ -480,7 +475,7 @@ func GetDeploymentWithPodAnnotations(namespace string, deploymentName string, bo
if !both {
deployment.ObjectMeta.Annotations = nil
}
deployment.Spec.Template.ObjectMeta.Annotations = getAnnotations(deploymentName, true, false, false, map[string]string{})
deployment.Spec.Template.ObjectMeta.Annotations = getAnnotations(deploymentName, true, false, false)
return deployment
}
@@ -488,9 +483,9 @@ func GetDeploymentWithTypedAutoAnnotation(namespace string, deploymentName strin
replicaset := int32(1)
var objectMeta metav1.ObjectMeta
if resourceType == SecretResourceType {
objectMeta = getObjectMeta(namespace, deploymentName, false, true, false, map[string]string{})
objectMeta = getObjectMeta(namespace, deploymentName, false, true, false)
} else if resourceType == ConfigmapResourceType {
objectMeta = getObjectMeta(namespace, deploymentName, false, false, true, map[string]string{})
objectMeta = getObjectMeta(namespace, deploymentName, false, false, true)
}
return &appsv1.Deployment{
@@ -542,7 +537,7 @@ func GetDeploymentWithExcludeAnnotation(namespace string, deploymentName string,
// GetDaemonSet provides daemonset for testing
func GetDaemonSet(namespace string, daemonsetName string) *appsv1.DaemonSet {
return &appsv1.DaemonSet{
ObjectMeta: getObjectMeta(namespace, daemonsetName, false, false, false, map[string]string{}),
ObjectMeta: getObjectMeta(namespace, daemonsetName, false, false, false),
Spec: appsv1.DaemonSetSpec{
Selector: &metav1.LabelSelector{
MatchLabels: map[string]string{"secondLabel": "temp"},
@@ -557,7 +552,7 @@ func GetDaemonSet(namespace string, daemonsetName string) *appsv1.DaemonSet {
func GetDaemonSetWithEnvVars(namespace string, daemonSetName string) *appsv1.DaemonSet {
return &appsv1.DaemonSet{
ObjectMeta: getObjectMeta(namespace, daemonSetName, true, false, false, map[string]string{}),
ObjectMeta: getObjectMeta(namespace, daemonSetName, true, false, false),
Spec: appsv1.DaemonSetSpec{
Selector: &metav1.LabelSelector{
MatchLabels: map[string]string{"secondLabel": "temp"},
@@ -573,7 +568,7 @@ func GetDaemonSetWithEnvVars(namespace string, daemonSetName string) *appsv1.Dae
// GetStatefulSet provides statefulset for testing
func GetStatefulSet(namespace string, statefulsetName string) *appsv1.StatefulSet {
return &appsv1.StatefulSet{
ObjectMeta: getObjectMeta(namespace, statefulsetName, false, false, false, map[string]string{}),
ObjectMeta: getObjectMeta(namespace, statefulsetName, false, false, false),
Spec: appsv1.StatefulSetSpec{
Selector: &metav1.LabelSelector{
MatchLabels: map[string]string{"secondLabel": "temp"},
@@ -589,7 +584,7 @@ func GetStatefulSet(namespace string, statefulsetName string) *appsv1.StatefulSe
// GetStatefulSet provides statefulset for testing
func GetStatefulSetWithEnvVar(namespace string, statefulsetName string) *appsv1.StatefulSet {
return &appsv1.StatefulSet{
ObjectMeta: getObjectMeta(namespace, statefulsetName, true, false, false, map[string]string{}),
ObjectMeta: getObjectMeta(namespace, statefulsetName, true, false, false),
Spec: appsv1.StatefulSetSpec{
Selector: &metav1.LabelSelector{
MatchLabels: map[string]string{"secondLabel": "temp"},
@@ -638,64 +633,6 @@ func GetSecret(namespace string, secretName string, data string) *v1.Secret {
}
}
func GetCronJob(namespace string, cronJobName string) *batchv1.CronJob {
return &batchv1.CronJob{
ObjectMeta: getObjectMeta(namespace, cronJobName, false, false, false, map[string]string{}),
Spec: batchv1.CronJobSpec{
Schedule: "*/5 * * * *", // Run every 5 minutes
JobTemplate: batchv1.JobTemplateSpec{
Spec: batchv1.JobSpec{
Selector: &metav1.LabelSelector{
MatchLabels: map[string]string{"secondLabel": "temp"},
},
Template: getPodTemplateSpecWithVolumes(cronJobName),
},
},
},
}
}
func GetJob(namespace string, jobName string) *batchv1.Job {
return &batchv1.Job{
ObjectMeta: getObjectMeta(namespace, jobName, false, false, false, map[string]string{}),
Spec: batchv1.JobSpec{
Selector: &metav1.LabelSelector{
MatchLabels: map[string]string{"secondLabel": "temp"},
},
Template: getPodTemplateSpecWithVolumes(jobName),
},
}
}
func GetCronJobWithEnvVar(namespace string, cronJobName string) *batchv1.CronJob {
return &batchv1.CronJob{
ObjectMeta: getObjectMeta(namespace, cronJobName, true, false, false, map[string]string{}),
Spec: batchv1.CronJobSpec{
Schedule: "*/5 * * * *", // Run every 5 minutes
JobTemplate: batchv1.JobTemplateSpec{
Spec: batchv1.JobSpec{
Selector: &metav1.LabelSelector{
MatchLabels: map[string]string{"secondLabel": "temp"},
},
Template: getPodTemplateSpecWithEnvVars(cronJobName),
},
},
},
}
}
func GetJobWithEnvVar(namespace string, jobName string) *batchv1.Job {
return &batchv1.Job{
ObjectMeta: getObjectMeta(namespace, jobName, true, false, false, map[string]string{}),
Spec: batchv1.JobSpec{
Selector: &metav1.LabelSelector{
MatchLabels: map[string]string{"secondLabel": "temp"},
},
Template: getPodTemplateSpecWithEnvVars(jobName),
},
}
}
// GetSecretWithUpdatedLabel provides secret for testing
func GetSecretWithUpdatedLabel(namespace string, secretName string, label string, data string) *v1.Secret {
return &v1.Secret{
@@ -906,36 +843,6 @@ func CreateStatefulSet(client kubernetes.Interface, statefulsetName string, name
return statefulset, err
}
// CreateCronJob creates a cronjob in given namespace and returns the CronJob
func CreateCronJob(client kubernetes.Interface, cronJobName string, namespace string, volumeMount bool) (*batchv1.CronJob, error) {
logrus.Infof("Creating CronJob")
cronJobClient := client.BatchV1().CronJobs(namespace)
var cronJobObj *batchv1.CronJob
if volumeMount {
cronJobObj = GetCronJob(namespace, cronJobName)
} else {
cronJobObj = GetCronJobWithEnvVar(namespace, cronJobName)
}
cronJob, err := cronJobClient.Create(context.TODO(), cronJobObj, metav1.CreateOptions{})
time.Sleep(3 * time.Second)
return cronJob, err
}
// CreateJob creates a job in given namespace and returns the Job
func CreateJob(client kubernetes.Interface, jobName string, namespace string, volumeMount bool) (*batchv1.Job, error) {
logrus.Infof("Creating Job")
jobClient := client.BatchV1().Jobs(namespace)
var jobObj *batchv1.Job
if volumeMount {
jobObj = GetJob(namespace, jobName)
} else {
jobObj = GetJobWithEnvVar(namespace, jobName)
}
job, err := jobClient.Create(context.TODO(), jobObj, metav1.CreateOptions{})
time.Sleep(3 * time.Second)
return job, err
}
// DeleteDeployment creates a deployment in given namespace and returns the error if any
func DeleteDeployment(client kubernetes.Interface, namespace string, deploymentName string) error {
logrus.Infof("Deleting Deployment")
@@ -1164,28 +1071,3 @@ func VerifyResourceAnnotationUpdate(clients kube.Clients, config util.Config, up
func GetSHAfromEmptyData() string {
return crypto.GenerateSHA("")
}
// GetRollout provides rollout for testing
func GetRollout(namespace string, rolloutName string, annotations map[string]string) *argorolloutv1alpha1.Rollout {
replicaset := int32(1)
return &argorolloutv1alpha1.Rollout{
ObjectMeta: getObjectMeta(namespace, rolloutName, false, false, false, annotations),
Spec: argorolloutv1alpha1.RolloutSpec{
Selector: &metav1.LabelSelector{
MatchLabels: map[string]string{"secondLabel": "temp"},
},
Replicas: &replicaset,
Template: getPodTemplateSpecWithVolumes(rolloutName),
},
}
}
// CreateRollout creates a rolout in given namespace and returns the Rollout
func CreateRollout(client argorollout.Interface, rolloutName string, namespace string, annotations map[string]string) (*argorolloutv1alpha1.Rollout, error) {
logrus.Infof("Creating Rollout")
rolloutClient := client.ArgoprojV1alpha1().Rollouts(namespace)
rolloutObj := GetRollout(namespace, rolloutName, annotations)
rollout, err := rolloutClient.Create(context.TODO(), rolloutObj, metav1.CreateOptions{})
time.Sleep(3 * time.Second)
return rollout, err
}

View File

@@ -1,5 +1,4 @@
etc/pki
root/buildinfo
etc/ssl/certs
etc/redhat-release
usr/share/zoneinfo

View File

@@ -1,5 +1,4 @@
etc/pki
root/buildinfo
etc/ssl/certs
etc/redhat-release
usr/share/zoneinfo