mirror of
https://github.com/stakater/Reloader.git
synced 2026-02-14 18:09:50 +00:00
Compare commits
37 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
e79e1ef37e | ||
|
|
561f21a81d | ||
|
|
0aa974f7e6 | ||
|
|
5185ff2c91 | ||
|
|
d6a95a923a | ||
|
|
3dc3b4726c | ||
|
|
1b2780f712 | ||
|
|
574129d637 | ||
|
|
919d0cc3ca | ||
|
|
ca3c95404e | ||
|
|
9ded29a9aa | ||
|
|
37e6c6b7c8 | ||
|
|
1d923bce36 | ||
|
|
636ab7a3f5 | ||
|
|
41802adc52 | ||
|
|
e3d352cf56 | ||
|
|
dd7d35e268 | ||
|
|
cb769d0f64 | ||
|
|
234084a042 | ||
|
|
c341a8bb97 | ||
|
|
6aff0b9c79 | ||
|
|
9fc4f5bcf7 | ||
|
|
d45054ba5a | ||
|
|
5246ec70e7 | ||
|
|
33f28ec1e3 | ||
|
|
ee8ff2d413 | ||
|
|
4de1659965 | ||
|
|
277dde8525 | ||
|
|
0e4db821d9 | ||
|
|
565a3d6916 | ||
|
|
04d72d0b70 | ||
|
|
0a2aa15d1f | ||
|
|
035fa66692 | ||
|
|
d99a510628 | ||
|
|
266bad4c7a | ||
|
|
bd54311ac2 | ||
|
|
7ddb1ae434 |
2
.github/workflows/init-branch-release.yaml
vendored
2
.github/workflows/init-branch-release.yaml
vendored
@@ -57,7 +57,7 @@ jobs:
|
||||
git diff
|
||||
|
||||
- name: Create pull request
|
||||
uses: peter-evans/create-pull-request@v7.0.5
|
||||
uses: peter-evans/create-pull-request@v7.0.6
|
||||
with:
|
||||
commit-message: "Bump version to ${{ inputs.TARGET_VERSION }}"
|
||||
title: "Bump version to ${{ inputs.TARGET_VERSION }} on ${{ inputs.TARGET_BRANCH }} branch"
|
||||
|
||||
20
.github/workflows/pull_request-helm.yaml
vendored
20
.github/workflows/pull_request-helm.yaml
vendored
@@ -16,7 +16,7 @@ env:
|
||||
|
||||
jobs:
|
||||
|
||||
helm-validation:
|
||||
helm-chart-validation:
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
@@ -42,6 +42,24 @@ jobs:
|
||||
with:
|
||||
charts: deployments/kubernetes/chart/reloader
|
||||
|
||||
helm-version-validation:
|
||||
needs: helm-chart-validation
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
runs-on: ubuntu-latest
|
||||
name: Helm Version Validation
|
||||
if: ${{ contains(github.event.pull_request.labels.*.name, 'release/helm-chart') }}
|
||||
|
||||
steps:
|
||||
|
||||
- name: Check out code
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{github.event.pull_request.head.sha}}
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Add Stakater Helm Repo
|
||||
run: |
|
||||
helm repo add stakater https://stakater.github.io/stakater-charts
|
||||
|
||||
2
.github/workflows/pull_request.yaml
vendored
2
.github/workflows/pull_request.yaml
vendored
@@ -25,7 +25,7 @@ env:
|
||||
|
||||
jobs:
|
||||
qa:
|
||||
uses: stakater/.github/.github/workflows/pull_request_doc_qa.yaml@v0.0.106
|
||||
uses: stakater/.github/.github/workflows/pull_request_doc_qa.yaml@v0.0.128
|
||||
with:
|
||||
MD_CONFIG: .github/md_config.json
|
||||
DOC_SRC: README.md
|
||||
|
||||
11
.github/workflows/pull_request_docs.yaml
vendored
11
.github/workflows/pull_request_docs.yaml
vendored
@@ -15,8 +15,17 @@ on:
|
||||
|
||||
jobs:
|
||||
qa:
|
||||
uses: stakater/.github/.github/workflows/pull_request_doc_qa.yaml@v0.0.106
|
||||
uses: stakater/.github/.github/workflows/pull_request_doc_qa.yaml@v0.0.128
|
||||
with:
|
||||
MD_CONFIG: .github/md_config.json
|
||||
DOC_SRC: docs
|
||||
MD_LINT_CONFIG: .markdownlint.yaml
|
||||
build:
|
||||
uses: stakater/.github/.github/workflows/pull_request_container_build.yaml@v0.0.122
|
||||
with:
|
||||
DOCKER_FILE_PATH: Dockerfile-docs
|
||||
CONTAINER_REGISTRY_URL: ghcr.io/stakater
|
||||
secrets:
|
||||
CONTAINER_REGISTRY_USERNAME: ${{ github.actor }}
|
||||
CONTAINER_REGISTRY_PASSWORD: ${{ secrets.GHCR_TOKEN }}
|
||||
SLACK_WEBHOOK_URL: ${{ secrets.STAKATER_DELIVERY_SLACK_WEBHOOK }}
|
||||
|
||||
6
.github/workflows/push-helm-chart.yaml
vendored
6
.github/workflows/push-helm-chart.yaml
vendored
@@ -15,14 +15,14 @@ env:
|
||||
REGISTRY: ghcr.io
|
||||
|
||||
jobs:
|
||||
build:
|
||||
verify-and-push-helm-chart:
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write # to push artifacts to `ghcr.io`
|
||||
|
||||
name: Build
|
||||
if: github.event.pull_request.merged == true
|
||||
name: Verify and Push Helm Chart
|
||||
if: ${{ (github.event.pull_request.merged == true) && (contains(github.event.pull_request.labels.*.name, 'release/helm-chart')) }}
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
|
||||
9
.github/workflows/push-pr-image.yaml
vendored
9
.github/workflows/push-pr-image.yaml
vendored
@@ -5,6 +5,15 @@ on:
|
||||
branches:
|
||||
- master
|
||||
types: [ labeled ]
|
||||
paths:
|
||||
- '!.markdownlint.yaml'
|
||||
- '!.vale.ini'
|
||||
- '!Dockerfile-docs'
|
||||
- '!docs-nginx.conf'
|
||||
- '!docs/**'
|
||||
- '!theme_common'
|
||||
- '!theme_override'
|
||||
- '!deployments/kubernetes/chart/reloader/**'
|
||||
|
||||
env:
|
||||
DOCKER_FILE_PATH: Dockerfile
|
||||
|
||||
1
.github/workflows/push.yaml
vendored
1
.github/workflows/push.yaml
vendored
@@ -200,7 +200,6 @@ jobs:
|
||||
push: true
|
||||
build-args: BUILD_PARAMETERS=${{ env.BUILD_PARAMETERS }}
|
||||
cache-to: type=inline
|
||||
platforms: linux/amd64,linux/arm,linux/arm64
|
||||
tags: |
|
||||
${{ env.GHCR_IMAGE_REPOSITORY }}/docs:merge-${{ github.event.number }}
|
||||
labels: |
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
StylesPath = styles
|
||||
MinAlertLevel = warning
|
||||
|
||||
Packages = https://github.com/stakater/vale-package/releases/download/v0.0.43/Stakater.zip
|
||||
Packages = https://github.com/stakater/vale-package/releases/download/v0.0.53/Stakater.zip
|
||||
Vocab = Stakater
|
||||
|
||||
# Only check MarkDown files
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
FROM python:3.12 as builder
|
||||
FROM python:3.13-alpine as builder
|
||||
|
||||
# set workdir
|
||||
RUN mkdir -p $HOME/application
|
||||
@@ -10,7 +10,7 @@ COPY --chown=1001:root . .
|
||||
RUN pip3 install -r theme_common/requirements.txt
|
||||
|
||||
# Combine Theme Resources
|
||||
RUN python theme_common/scripts/combine_theme_resources.py theme_common/resources theme_override/resources dist/_theme
|
||||
RUN python theme_common/scripts/combine_theme_resources.py -s theme_common/resources -ov theme_override/resources -o dist/_theme
|
||||
# Produce mkdocs file
|
||||
RUN python theme_common/scripts/combine_mkdocs_config_yaml.py theme_common/mkdocs.yml theme_override/mkdocs.yml mkdocs.yml
|
||||
|
||||
|
||||
89
README.md
89
README.md
@@ -50,13 +50,13 @@ spec:
|
||||
metadata:
|
||||
```
|
||||
|
||||
This will discover deploymentconfigs/deployments/daemonsets/statefulset/rollouts automatically where `foo-configmap` or `foo-secret` is being used either via environment variable or from volume mount. And it will perform rolling upgrade on related pods when `foo-configmap` or `foo-secret`are updated.
|
||||
This will discover deploymentconfigs/deployments/daemonsets/statefulset/rollouts/cronjobs/jobs automatically where `foo-configmap` or `foo-secret` is being used either via environment variable or from volume mount. And it will perform rolling upgrade on related pods when `foo-configmap` or `foo-secret`are updated.
|
||||
|
||||
You can filter it by the type of monitored resource and use typed versions of `auto` annotation. If you want to discover changes only in mounted `Secret`s and ignore changes in `ConfigMap`s, add `secret.reloader.stakater.com/auto` annotation instead. Analogously, you can use `configmap.reloader.stakater.com/auto` annotation to look for changes in mounted `ConfigMap`, changes in any of mounted `Secret`s will not trigger a rolling upgrade on related pods.
|
||||
|
||||
You can also restrict this discovery to only `ConfigMap` or `Secret` objects that
|
||||
are tagged with a special annotation. To take advantage of that, annotate
|
||||
your deploymentconfigs/deployments/daemonsets/statefulset/rollouts like this:
|
||||
your deploymentconfigs/deployments/daemonsets/statefulset/rollouts/cronjobs/jobs like this:
|
||||
|
||||
```yaml
|
||||
kind: Deployment
|
||||
@@ -91,11 +91,11 @@ not.
|
||||
|
||||
Similarly, `reloader.stakater.com/auto` and its typed version (`secret.reloader.stakater.com/auto` or `configmap.reloader.stakater.com/auto`) do not work together. If you have both annotations in your deployment, then only one of them needs to be true to trigger the restart. For example, having both `reloader.stakater.com/auto: "true"` and `secret.reloader.stakater.com/auto: "false"` or both `reloader.stakater.com/auto: "false"` and `secret.reloader.stakater.com/auto: "true"` will restart upon a change in a secret it uses.
|
||||
|
||||
We can also specify a specific configmap or secret which would trigger rolling upgrade only upon change in our specified configmap or secret, this way, it will not trigger rolling upgrade upon changes in all configmaps or secrets used in a `deploymentconfig`, `deployment`, `daemonset`, `statefulset` or `rollout`.
|
||||
We can also specify a specific configmap or secret which would trigger rolling upgrade only upon change in our specified configmap or secret, this way, it will not trigger rolling upgrade upon changes in all configmaps or secrets used in a `deploymentconfig`, `deployment`, `daemonset`, `statefulset`, `rollout`, `cronJob` or `job`.
|
||||
To do this either set the auto annotation to `"false"` (`reloader.stakater.com/auto: "false"`) or remove it altogether, and use annotations for [Configmap](.#Configmap) or [Secret](.#Secret).
|
||||
|
||||
It's also possible to enable auto reloading for all resources, by setting the `--auto-reload-all` flag.
|
||||
In this case, all resources that do not have the auto annotation (or its typed version) set to `"false"`, will be reloaded automatically when their ConfigMaps or Secrets are updated.
|
||||
In this case, all resources that do not have the auto annotation (or its typed version) set to `"false"`, will be reloaded automatically when their Configmaps or Secrets are updated.
|
||||
Notice that setting the auto annotation to an undefined value counts as false as-well.
|
||||
|
||||
### Configmap
|
||||
@@ -158,7 +158,7 @@ spec:
|
||||
|
||||
- Reloader also supports [sealed-secrets](https://github.com/bitnami-labs/sealed-secrets). [Here](docs/Reloader-with-Sealed-Secrets.md) are the steps to use sealed-secrets with Reloader.
|
||||
- For [`rollouts`](https://github.com/argoproj/argo-rollouts/) Reloader simply triggers a change is up to you how you configure the `rollout` strategy.
|
||||
- `reloader.stakater.com/auto: "true"` will only reload the pod, if the configmap or secret is used (as a volume mount or as an env) in `DeploymentConfigs/Deployment/Daemonsets/Statefulsets`
|
||||
- `reloader.stakater.com/auto: "true"` will only reload the pod, if the configmap or secret is used (as a volume mount or as an env) in `DeploymentConfigs/Deployment/Daemonsets/Statefulsets/CronJobs/Jobs`
|
||||
- `secret.reloader.stakater.com/reload` or `configmap.reloader.stakater.com/reload` annotation will reload the pod upon changes in specified configmap or secret, irrespective of the usage of configmap or secret.
|
||||
- you may override the auto annotation with the `--auto-annotation` flag
|
||||
- you may override the secret typed auto annotation with the `--secret-auto-annotation` flag
|
||||
@@ -173,6 +173,7 @@ spec:
|
||||
- you may want to prevent watching certain resources with the `--resources-to-ignore` flag
|
||||
- you can configure logging in JSON format with the `--log-format=json` option
|
||||
- you can configure the "reload strategy" with the `--reload-strategy=<strategy-name>` option (details below)
|
||||
- you can configure rollout reload strategy with `reloader.stakater.com/rollout-strategy` annotation, `restart` or `rollout` values are available (defaults to `rollout`)
|
||||
|
||||
## Reload Strategies
|
||||
|
||||
@@ -209,8 +210,8 @@ Reloader can be configured to ignore the resources `secrets` and `configmaps` by
|
||||
|
||||
| Argument | Description |
|
||||
|----------------------------------|----------------------|
|
||||
| --resources-to-ignore=configMaps | To ignore configMaps |
|
||||
| --resources-to-ignore=secrets | To ignore secrets |
|
||||
| `--resources-to-ignore=configMaps` | To ignore configmaps |
|
||||
| `--resources-to-ignore=secrets` | To ignore secrets |
|
||||
|
||||
**Note:** At one time only one of these resource can be ignored, trying to do it will cause error in Reloader. Workaround for ignoring both resources is by scaling down the Reloader pods to `0`.
|
||||
|
||||
@@ -328,7 +329,7 @@ helm uninstall {{RELEASE_NAME}} -n {{NAMESPACE}}
|
||||
| `reloader.isArgoRollouts` | Enable Argo `Rollouts`. Valid value are either `true` or `false` | boolean | `false` |
|
||||
| `reloader.isOpenshift` | Enable OpenShift DeploymentConfigs. Valid value are either `true` or `false` | boolean | `false` |
|
||||
| `reloader.ignoreSecrets` | To ignore secrets. Valid value are either `true` or `false`. Either `ignoreSecrets` or `ignoreConfigMaps` can be ignored, not both at the same time | boolean | `false` |
|
||||
| `reloader.ignoreConfigMaps` | To ignore configMaps. Valid value are either `true` or `false` | boolean | `false` |
|
||||
| `reloader.ignoreConfigMaps` | To ignore configmaps. Valid value are either `true` or `false` | boolean | `false` |
|
||||
| `reloader.reloadOnCreate` | Enable reload on create events. Valid value are either `true` or `false` | boolean | `false` |
|
||||
| `reloader.reloadOnDelete` | Enable reload on delete events. Valid value are either `true` or `false` | boolean | `false` |
|
||||
| `reloader.syncAfterRestart` | Enable sync after Reloader restarts for **Add** events, works only when reloadOnCreate is `true`. Valid value are either `true` or `false` | boolean | `false` |
|
||||
@@ -346,25 +347,25 @@ helm uninstall {{RELEASE_NAME}} -n {{NAMESPACE}}
|
||||
|
||||
#### Deployment Reloader Parameters
|
||||
|
||||
| Parameter | Description | Type | Default |
|
||||
|-------------------------------------------------|-----------------------------------------------------------------------------------------|--------|-------------------|
|
||||
| `reloader.deployment.replicas` | Number of replicas, if you wish to run multiple replicas set `reloader.enableHA = true` | int | 1 |
|
||||
| `reloader.deployment.revisionHistoryLimit` | Limit the number of revisions retained in the revision history | int | 2 |
|
||||
| `reloader.deployment.nodeSelector` | Scheduling pod to a specific node based on set labels | map | `{}` |
|
||||
| `reloader.deployment.affinity` | Set affinity rules on pod | map | `{}` |
|
||||
| `reloader.deployment.securityContext` | Set pod security context | map | `{}` |
|
||||
| `reloader.deployment.containerSecurityContext` | Set container security context | map | `{}` |
|
||||
| `reloader.deployment.tolerations` | A list of `tolerations` to be applied to the deployment | array | `[]` |
|
||||
| `reloader.deployment.topologySpreadConstraints` | Topology spread constraints for pod assignment | array | `[]` |
|
||||
| `reloader.deployment.annotations` | Set deployment annotations | map | `{}` |
|
||||
| `reloader.deployment.labels` | Set deployment labels, default to stakater settings | array | `see values.yaml` |
|
||||
| `reloader.deployment.image` | Set container image name, tag and policy | array | `see values.yaml` |
|
||||
| `reloader.deployment.env` | Support for extra environment variables | array | `[]` |
|
||||
| `reloader.deployment.livenessProbe` | Set liveness probe timeout values | map | `{}` |
|
||||
| `reloader.deployment.readinessProbe` | Set readiness probe timeout values | map | `{}` |
|
||||
| `reloader.deployment.resources` | Set container requests and limits (e.g. CPU or memory) | map | `{}` |
|
||||
| `reloader.deployment.pod.annotations` | Set annotations for pod | map | `{}` |
|
||||
| `reloader.deployment.priorityClassName` | Set priority class for pod in cluster | string | `""` |
|
||||
| Parameter | Description | Type | Default |
|
||||
|-------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------|--------|-------------------|
|
||||
| `reloader.deployment.replicas` | Number of replicas, if you wish to run multiple replicas set `reloader.enableHA = true`. The replicas will be limited to 1 when `reloader.enableHA = false` | int | 1 |
|
||||
| `reloader.deployment.revisionHistoryLimit` | Limit the number of revisions retained in the revision history | int | 2 |
|
||||
| `reloader.deployment.nodeSelector` | Scheduling pod to a specific node based on set labels | map | `{}` |
|
||||
| `reloader.deployment.affinity` | Set affinity rules on pod | map | `{}` |
|
||||
| `reloader.deployment.securityContext` | Set pod security context | map | `{}` |
|
||||
| `reloader.deployment.containerSecurityContext` | Set container security context | map | `{}` |
|
||||
| `reloader.deployment.tolerations` | A list of `tolerations` to be applied to the deployment | array | `[]` |
|
||||
| `reloader.deployment.topologySpreadConstraints` | Topology spread constraints for pod assignment | array | `[]` |
|
||||
| `reloader.deployment.annotations` | Set deployment annotations | map | `{}` |
|
||||
| `reloader.deployment.labels` | Set deployment labels, default to stakater settings | array | `see values.yaml` |
|
||||
| `reloader.deployment.image` | Set container image name, tag and policy | array | `see values.yaml` |
|
||||
| `reloader.deployment.env` | Support for extra environment variables | array | `[]` |
|
||||
| `reloader.deployment.livenessProbe` | Set liveness probe timeout values | map | `{}` |
|
||||
| `reloader.deployment.readinessProbe` | Set readiness probe timeout values | map | `{}` |
|
||||
| `reloader.deployment.resources` | Set container requests and limits (e.g. CPU or memory) | map | `{}` |
|
||||
| `reloader.deployment.pod.annotations` | Set annotations for pod | map | `{}` |
|
||||
| `reloader.deployment.priorityClassName` | Set priority class for pod in cluster | string | `""` |
|
||||
|
||||
#### Other Reloader Parameters
|
||||
|
||||
@@ -384,7 +385,7 @@ helm uninstall {{RELEASE_NAME}} -n {{NAMESPACE}}
|
||||
|
||||
#### Additional Remarks
|
||||
|
||||
- Both `namespaceSelector` & `resourceLabelSelector` can be used together. If they are then both conditions must be met for the configmap or secret to be eligible to trigger reload events. (e.g. If a configMap matches `resourceLabelSelector` but `namespaceSelector` does not match the namespace the configmap is in, it will be ignored).
|
||||
- Both `namespaceSelector` & `resourceLabelSelector` can be used together. If they are then both conditions must be met for the configmap or secret to be eligible to trigger reload events. (e.g. If a configmap matches `resourceLabelSelector` but `namespaceSelector` does not match the namespace the configmap is in, it will be ignored).
|
||||
- At one time only one of the resources `ignoreConfigMaps` or `ignoreSecrets` can be ignored, trying to do both will cause error in helm template compilation
|
||||
- Reloading of OpenShift (DeploymentConfig) and/or Argo `Rollouts` has to be enabled explicitly because it might not be always possible to use it on a cluster with restricted permissions
|
||||
- `isOpenShift` Recent versions of OpenShift (tested on 4.13.3) require the specified user to be in an `uid` range which is dynamically assigned by the namespace. The solution is to unset the runAsUser variable via ``deployment.securityContext.runAsUser=null`` and let OpenShift assign it at install
|
||||
@@ -406,7 +407,7 @@ helm uninstall {{RELEASE_NAME}} -n {{NAMESPACE}}
|
||||
|
||||
### Documentation
|
||||
|
||||
You can find more documentation [here](docs)
|
||||
The Reloader documentation can be viewed from [the doc site](https://docs.stakater.com/reloader/). The doc source is in the [docs](./docs/) folder.
|
||||
|
||||
### Have a question?
|
||||
|
||||
@@ -414,7 +415,7 @@ File a GitHub [issue](https://github.com/stakater/Reloader/issues).
|
||||
|
||||
### Talk to us on Slack
|
||||
|
||||
Join and talk to us on Slack for discussing Reloader
|
||||
Join and talk to us on Slack for discussing Reloader:
|
||||
|
||||
[](https://slack.stakater.com/)
|
||||
[](https://stakater-community.slack.com/messages/CC5S05S12)
|
||||
@@ -427,12 +428,12 @@ Please use the [issue tracker](https://github.com/stakater/Reloader/issues) to r
|
||||
|
||||
### Developing
|
||||
|
||||
1. Deploy Reloader.
|
||||
1. Run `okteto up` to activate your development container.
|
||||
1. Deploy Reloader
|
||||
1. Run `okteto up` to activate your development container
|
||||
1. `make build`
|
||||
1. `./Reloader`
|
||||
|
||||
PRs are welcome. In general, we follow the "fork-and-pull" Git workflow.
|
||||
PRs are welcome. In general, we follow the "fork-and-pull" Git workflow:
|
||||
|
||||
1. **Fork** the repo on GitHub
|
||||
1. **Clone** the project to your own machine
|
||||
@@ -444,15 +445,29 @@ PRs are welcome. In general, we follow the "fork-and-pull" Git workflow.
|
||||
|
||||
## Release Processes
|
||||
|
||||
_Repository GitHub releases_: As requested by the community in [issue 685](https://github.com/stakater/Reloader/issues/685), Reloader is now based on a manual release process. Releases are no longer done on every merged PR to the main branch, but manually on request. When a GitHub release is made, the corresponding image is built and pushed to the registry.
|
||||
_Repository GitHub releases_: As requested by the community in [issue 685](https://github.com/stakater/Reloader/issues/685), Reloader is now based on a manual release process. Releases are no longer done on every merged PR to the main branch, but manually on request.
|
||||
|
||||
_Repository git tagging_: The Reloader repository is tagged on every push to main. The creation of a tag does not trigger anything else, it just acts as a pointer to a commit on main.
|
||||
To make a GitHub release:
|
||||
|
||||
_Helm chart versioning_: The Reloader Helm chart release process is still [work in progress](https://github.com/stakater/Reloader/issues/684). This page will be updated when the process is settled. As a heads-up, to address the issues that are inherent in the current process the chart will most probably be relocated to the [Stakater charts repository](https://github.com/stakater/charts/). This setup is common in open-source repositories. When a GitHub release has been manually created in this repository, an image will be built, and Renovate in the charts repository will update the Helm chart to use it.
|
||||
1. Code owners create a release branch `release-vX.Y.Z`
|
||||
1. Code owners run a dispatch mode workflow to automatically generate version and manifests on the release branch
|
||||
1. A PR is created to bump the image version on the release branch, example: [PR-798](https://github.com/stakater/Reloader/pull/798)
|
||||
1. Code owners create a GitHub release with tag `vX.Y.Z` and target branch `release-vX.Y.Z`, which triggers creation of images
|
||||
|
||||
_Repository git tagging_: Push to the main branch will create a merge-image and merge-tag named `merge-${{ github.event.number }}`, for example `merge-800` when pull request number 800 is merged.
|
||||
|
||||
_Helm chart versioning_: The Reloader Helm chart is maintained in [this repository](./deployments/kubernetes/chart/reloader). The Helm chart has its own semantic versioning. Helm charts and code releases are separate artifacts and separately versioned. Manifest making strategy relies on Kustomize. The Reloader Helm chart manages the two artifacts with these two fields:
|
||||
|
||||
- [`appVersion`](./deployments/kubernetes/chart/reloader/Chart.yaml) points to released Reloader application image version listed on the [releases page](https://github.com/stakater/Reloader/releases)
|
||||
- [`version`](./deployments/kubernetes/chart/reloader/Chart.yaml) sets the Reloader Helm chart version
|
||||
|
||||
Helm chart will be released to the chart registry whenever files in `deployments/kubernetes/chart/reloader/**` change on the main branch.
|
||||
|
||||
Helm Chart will be released by the maintainers, on labelling a PR with `release/helm-chart` and pre-maturely updating the `version` field in `Chart.yaml` file.
|
||||
|
||||
## Changelog
|
||||
|
||||
View our closed [Pull Requests](https://github.com/stakater/Reloader/pulls?q=is%3Apr+is%3Aclosed).
|
||||
View the [releases page](https://github.com/stakater/Reloader/releases) to see what has changed in each release.
|
||||
|
||||
## License
|
||||
|
||||
|
||||
@@ -3,8 +3,8 @@
|
||||
apiVersion: v1
|
||||
name: reloader
|
||||
description: Reloader chart that runs on kubernetes
|
||||
version: 1.2.1
|
||||
appVersion: v1.2.0
|
||||
version: 1.2.2
|
||||
appVersion: v1.2.1
|
||||
keywords:
|
||||
- Reloader
|
||||
- kubernetes
|
||||
|
||||
@@ -89,6 +89,9 @@ rules:
|
||||
- jobs
|
||||
verbs:
|
||||
- create
|
||||
- delete
|
||||
- list
|
||||
- get
|
||||
{{- if .Values.reloader.enableHA }}
|
||||
- apiGroups:
|
||||
- "coordination.k8s.io"
|
||||
|
||||
@@ -18,7 +18,7 @@ metadata:
|
||||
namespace: {{ .Values.namespace | default .Release.Namespace }}
|
||||
spec:
|
||||
{{- if not (.Values.reloader.enableHA) }}
|
||||
replicas: 1
|
||||
replicas: {{ min .Values.reloader.deployment.replicas 1 }}
|
||||
{{- else }}
|
||||
replicas: {{ .Values.reloader.deployment.replicas }}
|
||||
{{- end}}
|
||||
@@ -75,9 +75,13 @@ spec:
|
||||
containers:
|
||||
{{- if .Values.global.imageRegistry }}
|
||||
- image: "{{ .Values.global.imageRegistry }}/{{ .Values.reloader.deployment.image.base }}:{{ .Values.reloader.deployment.image.tag }}"
|
||||
{{- else }}
|
||||
{{- else }}
|
||||
{{- if .Values.reloader.deployment.image.digest }}
|
||||
- image: "{{ .Values.reloader.deployment.image.name }}@{{ .Values.reloader.deployment.image.digest }}"
|
||||
{{- else }}
|
||||
- image: "{{ .Values.reloader.deployment.image.name }}:{{ .Values.reloader.deployment.image.tag }}"
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
imagePullPolicy: {{ .Values.reloader.deployment.image.pullPolicy }}
|
||||
name: {{ template "reloader-fullname" . }}
|
||||
env:
|
||||
|
||||
@@ -5,7 +5,12 @@ metadata:
|
||||
name: {{ template "reloader-fullname" . }}
|
||||
namespace: {{ .Values.namespace | default .Release.Namespace }}
|
||||
spec:
|
||||
{{- if .Values.reloader.podDisruptionBudget.maxUnavailable }}
|
||||
maxUnavailable: {{ .Values.reloader.podDisruptionBudget.maxUnavailable }}
|
||||
{{- end }}
|
||||
{{- if and .Values.reloader.podDisruptionBudget.minAvailable (not .Values.reloader.podDisruptionBudget.maxUnavailable)}}
|
||||
minAvailable: {{ .Values.reloader.podDisruptionBudget.minAvailable }}
|
||||
{{- end }}
|
||||
selector:
|
||||
matchLabels:
|
||||
app: {{ template "reloader-fullname" . }}
|
||||
|
||||
@@ -67,16 +67,6 @@ rules:
|
||||
- get
|
||||
- update
|
||||
- patch
|
||||
- apiGroups:
|
||||
- "extensions"
|
||||
resources:
|
||||
- deployments
|
||||
- daemonsets
|
||||
verbs:
|
||||
- list
|
||||
- get
|
||||
- update
|
||||
- patch
|
||||
- apiGroups:
|
||||
- "batch"
|
||||
resources:
|
||||
@@ -90,6 +80,9 @@ rules:
|
||||
- jobs
|
||||
verbs:
|
||||
- create
|
||||
- delete
|
||||
- list
|
||||
- get
|
||||
{{- if .Values.reloader.enableHA }}
|
||||
- apiGroups:
|
||||
- "coordination.k8s.io"
|
||||
|
||||
@@ -96,11 +96,12 @@ reloader:
|
||||
labels:
|
||||
provider: stakater
|
||||
group: com.stakater.platform
|
||||
version: v1.2.0
|
||||
version: v1.2.1
|
||||
image:
|
||||
name: ghcr.io/stakater/reloader
|
||||
base: stakater/reloader
|
||||
tag: v1.2.0
|
||||
tag: v1.2.1
|
||||
# digest: sha256:1234567
|
||||
pullPolicy: IfNotPresent
|
||||
# Support for extra environment variables.
|
||||
env:
|
||||
@@ -288,6 +289,9 @@ reloader:
|
||||
enabled: false
|
||||
# Set the minimum available replicas
|
||||
# minAvailable: 1
|
||||
# OR Set the maximum unavailable replicas
|
||||
# maxUnavailable: 1
|
||||
# If both defined only maxUnavailable will be used
|
||||
|
||||
netpol:
|
||||
enabled: false
|
||||
|
||||
@@ -48,6 +48,9 @@ rules:
|
||||
- jobs
|
||||
verbs:
|
||||
- create
|
||||
- delete
|
||||
- list
|
||||
- get
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
|
||||
@@ -17,7 +17,7 @@ spec:
|
||||
app: reloader-reloader
|
||||
spec:
|
||||
containers:
|
||||
- image: "ghcr.io/stakater/reloader:v1.1.0"
|
||||
- image: "ghcr.io/stakater/reloader:v1.3.0"
|
||||
imagePullPolicy: IfNotPresent
|
||||
name: reloader-reloader
|
||||
env:
|
||||
|
||||
@@ -52,6 +52,9 @@ rules:
|
||||
- jobs
|
||||
verbs:
|
||||
- create
|
||||
- delete
|
||||
- list
|
||||
- get
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
@@ -101,7 +104,7 @@ spec:
|
||||
resourceFieldRef:
|
||||
divisor: "1"
|
||||
resource: limits.memory
|
||||
image: "ghcr.io/stakater/reloader:latest"
|
||||
image: ghcr.io/stakater/reloader:v1.3.0
|
||||
imagePullPolicy: IfNotPresent
|
||||
livenessProbe:
|
||||
failureThreshold: 5
|
||||
|
||||
@@ -1,6 +1,17 @@
|
||||
# How Does Reloader Work?
|
||||
|
||||
Reloader watches for `ConfigMap` and `Secret` and detects if there are changes in data of these objects. After change detection Reloader performs rolling upgrade on relevant Pods via associated `Deployment`, `Daemonset` and `Statefulset`.
|
||||
Reloader watches for `ConfigMap` and `Secret` and detects if there are changes in data of these objects. After change detection Reloader performs rolling upgrade on relevant Pods via associated `Deployment`, `Daemonset` and `Statefulset`:
|
||||
|
||||
```mermaid
|
||||
flowchart LR
|
||||
subgraph Reloader
|
||||
controller("Controller watches in a loop") -- "Detects a change" --> upgrade_handler("Upgrade handler checks if the change is a valid data change by comparing the change hash")
|
||||
upgrade_handler -- "Update resource" --> update_resource("Updates the resource with computed hash of change")
|
||||
end
|
||||
Reloader -- "Watches" --> secret_configmaps("Secrets/ConfigMaps")
|
||||
Reloader -- "Updates resources with Reloader environment variable" --> resources("Deployments/DaemonSets/StatefulSets resources with Reloader annotation")
|
||||
resources -- "Restart pods based on StrategyType" --> Pods
|
||||
```
|
||||
|
||||
## How Does Change Detection Work?
|
||||
|
||||
@@ -43,7 +54,7 @@ When Reloader detects changes in configmap. It gets two objects of configmap. Fi
|
||||
|
||||
After that, Reloader gets the list of all `deployments`, `daemonsets` and `statefulset` and looks for above mentioned annotation for configmap. If the annotation value contains the configmap name, it then looks for an environment variable which can contain the configmap or secret data change hash.
|
||||
|
||||
### Environment Variable for ConfigMap
|
||||
### Environment Variable for Configmap
|
||||
|
||||
If configmap name is foo then
|
||||
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
|
||||
# Reloader vs ConfigmapController
|
||||
|
||||
Reloader is inspired from [Configmapcontroller](https://github.com/fabric8io/configmapcontroller) but there are many ways in which it differs from configmapController. Below is the small comparison between these two controllers.
|
||||
Reloader is inspired from [`Configmapcontroller`](https://github.com/fabric8io/configmapcontroller) but there are many ways in which it differs from `configmapController`. Below is the small comparison between these two controllers.
|
||||
|
||||
| Reloader | Configmap |
|
||||
|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||
|
||||
@@ -3,12 +3,12 @@
|
||||
Below are the steps to use Reloader with Sealed Secrets:
|
||||
|
||||
1. Download and install the kubeseal client from [here](https://github.com/bitnami-labs/sealed-secrets)
|
||||
1. Install the controller for sealed secrets
|
||||
1. Install the controller for Sealed Secrets
|
||||
1. Fetch the encryption certificate
|
||||
1. Encrypt the secret
|
||||
1. Apply the secret
|
||||
1. Install the tool which uses that sealed secret
|
||||
1. Install the tool which uses that Sealed Secret
|
||||
1. Install Reloader
|
||||
1. Once everything is setup, update the original secret at client and encrypt it with kubeseal to see Reloader working
|
||||
1. Apply the updated sealed secret
|
||||
1. Apply the updated Sealed Secret
|
||||
1. Reloader will restart the pod to use that updated secret
|
||||
|
||||
10
go.mod
10
go.mod
@@ -10,6 +10,7 @@ require (
|
||||
github.com/prometheus/client_golang v1.20.5
|
||||
github.com/sirupsen/logrus v1.9.3
|
||||
github.com/spf13/cobra v1.8.1
|
||||
github.com/stretchr/testify v1.9.0
|
||||
k8s.io/api v0.31.1
|
||||
k8s.io/apimachinery v0.31.1
|
||||
k8s.io/client-go v0.31.1
|
||||
@@ -47,17 +48,18 @@ require (
|
||||
github.com/moul/http2curl v1.0.0 // indirect
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
|
||||
github.com/pkg/errors v0.9.1 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
|
||||
github.com/prometheus/client_model v0.6.1 // indirect
|
||||
github.com/prometheus/common v0.55.0 // indirect
|
||||
github.com/prometheus/procfs v0.15.1 // indirect
|
||||
github.com/smartystreets/goconvey v1.7.2 // indirect
|
||||
github.com/spf13/pflag v1.0.5 // indirect
|
||||
github.com/x448/float16 v0.8.4 // indirect
|
||||
golang.org/x/net v0.26.0 // indirect
|
||||
golang.org/x/net v0.33.0 // indirect
|
||||
golang.org/x/oauth2 v0.21.0 // indirect
|
||||
golang.org/x/sys v0.22.0 // indirect
|
||||
golang.org/x/term v0.21.0 // indirect
|
||||
golang.org/x/text v0.16.0 // indirect
|
||||
golang.org/x/sys v0.28.0 // indirect
|
||||
golang.org/x/term v0.27.0 // indirect
|
||||
golang.org/x/text v0.21.0 // indirect
|
||||
golang.org/x/time v0.3.0 // indirect
|
||||
google.golang.org/protobuf v1.34.2 // indirect
|
||||
gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect
|
||||
|
||||
10
go.sum
10
go.sum
@@ -178,8 +178,6 @@ github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/prometheus/client_golang v1.20.4 h1:Tgh3Yr67PaOv/uTqloMsCEdeuFTatm5zIq5+qNN23vI=
|
||||
github.com/prometheus/client_golang v1.20.4/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE=
|
||||
github.com/prometheus/client_golang v1.20.5 h1:cxppBPuYhUnsO6yo/aoRol4L7q7UFfdm+bR9r+8l63Y=
|
||||
github.com/prometheus/client_golang v1.20.5/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE=
|
||||
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
@@ -255,6 +253,8 @@ golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qx
|
||||
golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
|
||||
golang.org/x/net v0.26.0 h1:soB7SVo0PWrY4vPW/+ay0jKDNScG2X9wFeYlXIvJsOQ=
|
||||
golang.org/x/net v0.26.0/go.mod h1:5YKkiSynbBIh3p6iOc/vibscux0x38BZDkn8sCUPxHE=
|
||||
golang.org/x/net v0.33.0 h1:74SYHlV8BIgHIFC/LrYkOGIwL19eTYXQ5wc6TBuO36I=
|
||||
golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.21.0 h1:tsimM75w1tF/uws5rbeHzIWxEqElMehnc+iW793zsZs=
|
||||
golang.org/x/oauth2 v0.21.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI=
|
||||
@@ -286,10 +286,14 @@ golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBc
|
||||
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.22.0 h1:RI27ohtqKCnwULzJLqkv897zojh5/DwS/ENaMzUOaWI=
|
||||
golang.org/x/sys v0.22.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA=
|
||||
golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/term v0.21.0 h1:WVXCp+/EBEHOj53Rvu+7KiT/iElMrO8ACK16SMZ3jaA=
|
||||
golang.org/x/term v0.21.0/go.mod h1:ooXLefLobQVslOqselCNF4SxFAaoS6KujMbsGzSDmX0=
|
||||
golang.org/x/term v0.27.0 h1:WP60Sv1nlK1T6SupCHbXzSaN0b9wUmsPoRS9b61A23Q=
|
||||
golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
@@ -297,6 +301,8 @@ golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||
golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4=
|
||||
golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI=
|
||||
golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo=
|
||||
golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ=
|
||||
golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4=
|
||||
golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
|
||||
@@ -2,10 +2,11 @@ package callbacks
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/stakater/Reloader/internal/pkg/options"
|
||||
"github.com/stakater/Reloader/pkg/kube"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
batchv1 "k8s.io/api/batch/v1"
|
||||
@@ -91,6 +92,26 @@ func GetCronJobItems(clients kube.Clients, namespace string) []runtime.Object {
|
||||
return items
|
||||
}
|
||||
|
||||
// GetJobItems returns the jobs in given namespace
|
||||
func GetJobItems(clients kube.Clients, namespace string) []runtime.Object {
|
||||
jobs, err := clients.KubernetesClient.BatchV1().Jobs(namespace).List(context.TODO(), meta_v1.ListOptions{})
|
||||
if err != nil {
|
||||
logrus.Errorf("Failed to list jobs %v", err)
|
||||
}
|
||||
|
||||
items := make([]runtime.Object, len(jobs.Items))
|
||||
// Ensure we always have pod annotations to add to
|
||||
for i, v := range jobs.Items {
|
||||
if v.Spec.Template.ObjectMeta.Annotations == nil {
|
||||
annotations := make(map[string]string)
|
||||
jobs.Items[i].Spec.Template.ObjectMeta.Annotations = annotations
|
||||
}
|
||||
items[i] = &jobs.Items[i]
|
||||
}
|
||||
|
||||
return items
|
||||
}
|
||||
|
||||
// GetDaemonSetItems returns the daemonSets in given namespace
|
||||
func GetDaemonSetItems(clients kube.Clients, namespace string) []runtime.Object {
|
||||
daemonSets, err := clients.KubernetesClient.AppsV1().DaemonSets(namespace).List(context.TODO(), meta_v1.ListOptions{})
|
||||
@@ -177,6 +198,11 @@ func GetCronJobAnnotations(item runtime.Object) map[string]string {
|
||||
return item.(*batchv1.CronJob).ObjectMeta.Annotations
|
||||
}
|
||||
|
||||
// GetJobAnnotations returns the annotations of given job
|
||||
func GetJobAnnotations(item runtime.Object) map[string]string {
|
||||
return item.(*batchv1.Job).ObjectMeta.Annotations
|
||||
}
|
||||
|
||||
// GetDaemonSetAnnotations returns the annotations of given daemonSet
|
||||
func GetDaemonSetAnnotations(item runtime.Object) map[string]string {
|
||||
return item.(*appsv1.DaemonSet).ObjectMeta.Annotations
|
||||
@@ -207,6 +233,11 @@ func GetCronJobPodAnnotations(item runtime.Object) map[string]string {
|
||||
return item.(*batchv1.CronJob).Spec.JobTemplate.Spec.Template.ObjectMeta.Annotations
|
||||
}
|
||||
|
||||
// GetJobPodAnnotations returns the pod's annotations of given job
|
||||
func GetJobPodAnnotations(item runtime.Object) map[string]string {
|
||||
return item.(*batchv1.Job).Spec.Template.ObjectMeta.Annotations
|
||||
}
|
||||
|
||||
// GetDaemonSetPodAnnotations returns the pod's annotations of given daemonSet
|
||||
func GetDaemonSetPodAnnotations(item runtime.Object) map[string]string {
|
||||
return item.(*appsv1.DaemonSet).Spec.Template.ObjectMeta.Annotations
|
||||
@@ -237,6 +268,11 @@ func GetCronJobContainers(item runtime.Object) []v1.Container {
|
||||
return item.(*batchv1.CronJob).Spec.JobTemplate.Spec.Template.Spec.Containers
|
||||
}
|
||||
|
||||
// GetJobContainers returns the containers of given job
|
||||
func GetJobContainers(item runtime.Object) []v1.Container {
|
||||
return item.(*batchv1.Job).Spec.Template.Spec.Containers
|
||||
}
|
||||
|
||||
// GetDaemonSetContainers returns the containers of given daemonSet
|
||||
func GetDaemonSetContainers(item runtime.Object) []v1.Container {
|
||||
return item.(*appsv1.DaemonSet).Spec.Template.Spec.Containers
|
||||
@@ -267,6 +303,11 @@ func GetCronJobInitContainers(item runtime.Object) []v1.Container {
|
||||
return item.(*batchv1.CronJob).Spec.JobTemplate.Spec.Template.Spec.InitContainers
|
||||
}
|
||||
|
||||
// GetJobInitContainers returns the containers of given job
|
||||
func GetJobInitContainers(item runtime.Object) []v1.Container {
|
||||
return item.(*batchv1.Job).Spec.Template.Spec.InitContainers
|
||||
}
|
||||
|
||||
// GetDaemonSetInitContainers returns the containers of given daemonSet
|
||||
func GetDaemonSetInitContainers(item runtime.Object) []v1.Container {
|
||||
return item.(*appsv1.DaemonSet).Spec.Template.Spec.InitContainers
|
||||
@@ -306,6 +347,38 @@ func CreateJobFromCronjob(clients kube.Clients, namespace string, resource runti
|
||||
return err
|
||||
}
|
||||
|
||||
// ReCreateJobFromjob performs rolling upgrade on job
|
||||
func ReCreateJobFromjob(clients kube.Clients, namespace string, resource runtime.Object) error {
|
||||
oldJob := resource.(*batchv1.Job)
|
||||
job := oldJob.DeepCopy()
|
||||
|
||||
// Delete the old job
|
||||
policy := meta_v1.DeletePropagationBackground
|
||||
err := clients.KubernetesClient.BatchV1().Jobs(namespace).Delete(context.TODO(), job.Name, meta_v1.DeleteOptions{PropagationPolicy: &policy})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Remove fields that should not be specified when creating a new Job
|
||||
job.ObjectMeta.ResourceVersion = ""
|
||||
job.ObjectMeta.UID = ""
|
||||
job.ObjectMeta.CreationTimestamp = meta_v1.Time{}
|
||||
job.Status = batchv1.JobStatus{}
|
||||
|
||||
// Remove problematic labels
|
||||
delete(job.Spec.Template.Labels, "controller-uid")
|
||||
delete(job.Spec.Template.Labels, batchv1.ControllerUidLabel)
|
||||
delete(job.Spec.Template.Labels, batchv1.JobNameLabel)
|
||||
delete(job.Spec.Template.Labels, "job-name")
|
||||
|
||||
// Remove the selector to allow it to be auto-generated
|
||||
job.Spec.Selector = nil
|
||||
|
||||
// Create the new job with same spec
|
||||
_, err = clients.KubernetesClient.BatchV1().Jobs(namespace).Create(context.TODO(), job, meta_v1.CreateOptions{FieldManager: "Reloader"})
|
||||
return err
|
||||
}
|
||||
|
||||
// UpdateDaemonSet performs rolling upgrade on daemonSet
|
||||
func UpdateDaemonSet(clients kube.Clients, namespace string, resource runtime.Object) error {
|
||||
daemonSet := resource.(*appsv1.DaemonSet)
|
||||
@@ -329,11 +402,15 @@ func UpdateDeploymentConfig(clients kube.Clients, namespace string, resource run
|
||||
|
||||
// UpdateRollout performs rolling upgrade on rollout
|
||||
func UpdateRollout(clients kube.Clients, namespace string, resource runtime.Object) error {
|
||||
var err error
|
||||
rollout := resource.(*argorolloutv1alpha1.Rollout)
|
||||
rolloutBefore, _ := clients.ArgoRolloutClient.ArgoprojV1alpha1().Rollouts(namespace).Get(context.TODO(), rollout.Name, meta_v1.GetOptions{})
|
||||
logrus.Warnf("Before: %+v", rolloutBefore.Spec.Template.Spec.Containers[0].Env)
|
||||
logrus.Warnf("After: %+v", rollout.Spec.Template.Spec.Containers[0].Env)
|
||||
_, err := clients.ArgoRolloutClient.ArgoprojV1alpha1().Rollouts(namespace).Patch(context.TODO(), rollout.Name, patchtypes.MergePatchType, []byte(fmt.Sprintf(`{"spec": {"restartAt": "%s"}}`, time.Now().Format(time.RFC3339))), meta_v1.PatchOptions{FieldManager: "Reloader"})
|
||||
strategy := rollout.GetAnnotations()[options.RolloutStrategyAnnotation]
|
||||
switch options.ToArgoRolloutStrategy(strategy) {
|
||||
case options.RestartStrategy:
|
||||
_, err = clients.ArgoRolloutClient.ArgoprojV1alpha1().Rollouts(namespace).Patch(context.TODO(), rollout.Name, patchtypes.MergePatchType, []byte(fmt.Sprintf(`{"spec": {"restartAt": "%s"}}`, time.Now().Format(time.RFC3339))), meta_v1.PatchOptions{FieldManager: "Reloader"})
|
||||
case options.RolloutStrategy:
|
||||
_, err = clients.ArgoRolloutClient.ArgoprojV1alpha1().Rollouts(namespace).Update(context.TODO(), rollout, meta_v1.UpdateOptions{FieldManager: "Reloader"})
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -347,6 +424,11 @@ func GetCronJobVolumes(item runtime.Object) []v1.Volume {
|
||||
return item.(*batchv1.CronJob).Spec.JobTemplate.Spec.Template.Spec.Volumes
|
||||
}
|
||||
|
||||
// GetJobVolumes returns the Volumes of given job
|
||||
func GetJobVolumes(item runtime.Object) []v1.Volume {
|
||||
return item.(*batchv1.Job).Spec.Template.Spec.Volumes
|
||||
}
|
||||
|
||||
// GetDaemonSetVolumes returns the Volumes of given daemonSet
|
||||
func GetDaemonSetVolumes(item runtime.Object) []v1.Volume {
|
||||
return item.(*appsv1.DaemonSet).Spec.Template.Spec.Volumes
|
||||
|
||||
524
internal/pkg/callbacks/rolling_upgrade_test.go
Normal file
524
internal/pkg/callbacks/rolling_upgrade_test.go
Normal file
@@ -0,0 +1,524 @@
|
||||
package callbacks_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
batchv1 "k8s.io/api/batch/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
watch "k8s.io/apimachinery/pkg/watch"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
|
||||
argorolloutv1alpha1 "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1"
|
||||
fakeargoclientset "github.com/argoproj/argo-rollouts/pkg/client/clientset/versioned/fake"
|
||||
|
||||
"github.com/stakater/Reloader/internal/pkg/callbacks"
|
||||
"github.com/stakater/Reloader/internal/pkg/options"
|
||||
"github.com/stakater/Reloader/internal/pkg/testutil"
|
||||
"github.com/stakater/Reloader/pkg/kube"
|
||||
)
|
||||
|
||||
var (
|
||||
clients = setupTestClients()
|
||||
)
|
||||
|
||||
type testFixtures struct {
|
||||
defaultContainers []v1.Container
|
||||
defaultInitContainers []v1.Container
|
||||
defaultVolumes []v1.Volume
|
||||
namespace string
|
||||
}
|
||||
|
||||
func newTestFixtures() testFixtures {
|
||||
return testFixtures{
|
||||
defaultContainers: []v1.Container{{Name: "container1"}, {Name: "container2"}},
|
||||
defaultInitContainers: []v1.Container{{Name: "init-container1"}, {Name: "init-container2"}},
|
||||
defaultVolumes: []v1.Volume{{Name: "volume1"}, {Name: "volume2"}},
|
||||
namespace: "default",
|
||||
}
|
||||
}
|
||||
|
||||
func setupTestClients() kube.Clients {
|
||||
return kube.Clients{
|
||||
KubernetesClient: fake.NewSimpleClientset(),
|
||||
ArgoRolloutClient: fakeargoclientset.NewSimpleClientset(),
|
||||
}
|
||||
}
|
||||
|
||||
// TestUpdateRollout test update rollout strategy annotation
|
||||
func TestUpdateRollout(t *testing.T) {
|
||||
namespace := "test-ns"
|
||||
|
||||
cases := map[string]struct {
|
||||
name string
|
||||
strategy string
|
||||
isRestart bool
|
||||
}{
|
||||
"test-without-strategy": {
|
||||
name: "defaults to rollout strategy",
|
||||
strategy: "",
|
||||
isRestart: false,
|
||||
},
|
||||
"test-with-restart-strategy": {
|
||||
name: "triggers a restart strategy",
|
||||
strategy: "restart",
|
||||
isRestart: true,
|
||||
},
|
||||
"test-with-rollout-strategy": {
|
||||
name: "triggers a rollout strategy",
|
||||
strategy: "rollout",
|
||||
isRestart: false,
|
||||
},
|
||||
}
|
||||
for name, tc := range cases {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
rollout, err := testutil.CreateRollout(
|
||||
clients.ArgoRolloutClient, name, namespace,
|
||||
map[string]string{options.RolloutStrategyAnnotation: tc.strategy},
|
||||
)
|
||||
if err != nil {
|
||||
t.Errorf("creating rollout: %v", err)
|
||||
}
|
||||
modifiedChan := watchRollout(rollout.Name, namespace)
|
||||
|
||||
err = callbacks.UpdateRollout(clients, namespace, rollout)
|
||||
if err != nil {
|
||||
t.Errorf("updating rollout: %v", err)
|
||||
}
|
||||
rollout, err = clients.ArgoRolloutClient.ArgoprojV1alpha1().Rollouts(
|
||||
namespace).Get(context.TODO(), rollout.Name, meta_v1.GetOptions{})
|
||||
|
||||
if err != nil {
|
||||
t.Errorf("getting rollout: %v", err)
|
||||
}
|
||||
if isRestartStrategy(rollout) == tc.isRestart {
|
||||
t.Errorf("Should not be a restart strategy")
|
||||
}
|
||||
select {
|
||||
case <-modifiedChan:
|
||||
// object has been modified
|
||||
case <-time.After(1 * time.Second):
|
||||
t.Errorf("Rollout has not been updated")
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestResourceItems(t *testing.T) {
|
||||
fixtures := newTestFixtures()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
createFunc func(kube.Clients, string) error
|
||||
getItemsFunc func(kube.Clients, string) []runtime.Object
|
||||
expectedCount int
|
||||
}{
|
||||
{
|
||||
name: "Deployments",
|
||||
createFunc: createTestDeployments,
|
||||
getItemsFunc: callbacks.GetDeploymentItems,
|
||||
expectedCount: 2,
|
||||
},
|
||||
{
|
||||
name: "CronJobs",
|
||||
createFunc: createTestCronJobs,
|
||||
getItemsFunc: callbacks.GetCronJobItems,
|
||||
expectedCount: 2,
|
||||
},
|
||||
{
|
||||
name: "Jobs",
|
||||
createFunc: createTestJobs,
|
||||
getItemsFunc: callbacks.GetJobItems,
|
||||
expectedCount: 2,
|
||||
},
|
||||
{
|
||||
name: "DaemonSets",
|
||||
createFunc: createTestDaemonSets,
|
||||
getItemsFunc: callbacks.GetDaemonSetItems,
|
||||
expectedCount: 2,
|
||||
},
|
||||
{
|
||||
name: "StatefulSets",
|
||||
createFunc: createTestStatefulSets,
|
||||
getItemsFunc: callbacks.GetStatefulSetItems,
|
||||
expectedCount: 2,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
err := tt.createFunc(clients, fixtures.namespace)
|
||||
assert.NoError(t, err)
|
||||
|
||||
items := tt.getItemsFunc(clients, fixtures.namespace)
|
||||
assert.Equal(t, tt.expectedCount, len(items))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetAnnotations(t *testing.T) {
|
||||
testAnnotations := map[string]string{"version": "1"}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
resource runtime.Object
|
||||
getFunc func(runtime.Object) map[string]string
|
||||
}{
|
||||
{"Deployment", &appsv1.Deployment{ObjectMeta: metav1.ObjectMeta{Annotations: testAnnotations}}, callbacks.GetDeploymentAnnotations},
|
||||
{"CronJob", &batchv1.CronJob{ObjectMeta: metav1.ObjectMeta{Annotations: testAnnotations}}, callbacks.GetCronJobAnnotations},
|
||||
{"Job", &batchv1.Job{ObjectMeta: metav1.ObjectMeta{Annotations: testAnnotations}}, callbacks.GetJobAnnotations},
|
||||
{"DaemonSet", &appsv1.DaemonSet{ObjectMeta: metav1.ObjectMeta{Annotations: testAnnotations}}, callbacks.GetDaemonSetAnnotations},
|
||||
{"StatefulSet", &appsv1.StatefulSet{ObjectMeta: metav1.ObjectMeta{Annotations: testAnnotations}}, callbacks.GetStatefulSetAnnotations},
|
||||
{"Rollout", &argorolloutv1alpha1.Rollout{ObjectMeta: metav1.ObjectMeta{Annotations: testAnnotations}}, callbacks.GetRolloutAnnotations},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
assert.Equal(t, testAnnotations, tt.getFunc(tt.resource))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetPodAnnotations(t *testing.T) {
|
||||
testAnnotations := map[string]string{"version": "1"}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
resource runtime.Object
|
||||
getFunc func(runtime.Object) map[string]string
|
||||
}{
|
||||
{"Deployment", createResourceWithPodAnnotations(&appsv1.Deployment{}, testAnnotations), callbacks.GetDeploymentPodAnnotations},
|
||||
{"CronJob", createResourceWithPodAnnotations(&batchv1.CronJob{}, testAnnotations), callbacks.GetCronJobPodAnnotations},
|
||||
{"Job", createResourceWithPodAnnotations(&batchv1.Job{}, testAnnotations), callbacks.GetJobPodAnnotations},
|
||||
{"DaemonSet", createResourceWithPodAnnotations(&appsv1.DaemonSet{}, testAnnotations), callbacks.GetDaemonSetPodAnnotations},
|
||||
{"StatefulSet", createResourceWithPodAnnotations(&appsv1.StatefulSet{}, testAnnotations), callbacks.GetStatefulSetPodAnnotations},
|
||||
{"Rollout", createResourceWithPodAnnotations(&argorolloutv1alpha1.Rollout{}, testAnnotations), callbacks.GetRolloutPodAnnotations},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
assert.Equal(t, testAnnotations, tt.getFunc(tt.resource))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetContainers(t *testing.T) {
|
||||
fixtures := newTestFixtures()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
resource runtime.Object
|
||||
getFunc func(runtime.Object) []v1.Container
|
||||
}{
|
||||
{"Deployment", createResourceWithContainers(&appsv1.Deployment{}, fixtures.defaultContainers), callbacks.GetDeploymentContainers},
|
||||
{"DaemonSet", createResourceWithContainers(&appsv1.DaemonSet{}, fixtures.defaultContainers), callbacks.GetDaemonSetContainers},
|
||||
{"StatefulSet", createResourceWithContainers(&appsv1.StatefulSet{}, fixtures.defaultContainers), callbacks.GetStatefulSetContainers},
|
||||
{"CronJob", createResourceWithContainers(&batchv1.CronJob{}, fixtures.defaultContainers), callbacks.GetCronJobContainers},
|
||||
{"Job", createResourceWithContainers(&batchv1.Job{}, fixtures.defaultContainers), callbacks.GetJobContainers},
|
||||
{"Rollout", createResourceWithContainers(&argorolloutv1alpha1.Rollout{}, fixtures.defaultContainers), callbacks.GetRolloutContainers},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
assert.Equal(t, fixtures.defaultContainers, tt.getFunc(tt.resource))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetInitContainers(t *testing.T) {
|
||||
fixtures := newTestFixtures()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
resource runtime.Object
|
||||
getFunc func(runtime.Object) []v1.Container
|
||||
}{
|
||||
{"Deployment", createResourceWithInitContainers(&appsv1.Deployment{}, fixtures.defaultInitContainers), callbacks.GetDeploymentInitContainers},
|
||||
{"DaemonSet", createResourceWithInitContainers(&appsv1.DaemonSet{}, fixtures.defaultInitContainers), callbacks.GetDaemonSetInitContainers},
|
||||
{"StatefulSet", createResourceWithInitContainers(&appsv1.StatefulSet{}, fixtures.defaultInitContainers), callbacks.GetStatefulSetInitContainers},
|
||||
{"CronJob", createResourceWithInitContainers(&batchv1.CronJob{}, fixtures.defaultInitContainers), callbacks.GetCronJobInitContainers},
|
||||
{"Job", createResourceWithInitContainers(&batchv1.Job{}, fixtures.defaultInitContainers), callbacks.GetJobInitContainers},
|
||||
{"Rollout", createResourceWithInitContainers(&argorolloutv1alpha1.Rollout{}, fixtures.defaultInitContainers), callbacks.GetRolloutInitContainers},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
assert.Equal(t, fixtures.defaultInitContainers, tt.getFunc(tt.resource))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestUpdateResources(t *testing.T) {
|
||||
fixtures := newTestFixtures()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
createFunc func(kube.Clients, string, string) (runtime.Object, error)
|
||||
updateFunc func(kube.Clients, string, runtime.Object) error
|
||||
}{
|
||||
{"Deployment", createTestDeploymentWithAnnotations, callbacks.UpdateDeployment},
|
||||
{"DaemonSet", createTestDaemonSetWithAnnotations, callbacks.UpdateDaemonSet},
|
||||
{"StatefulSet", createTestStatefulSetWithAnnotations, callbacks.UpdateStatefulSet},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
resource, err := tt.createFunc(clients, fixtures.namespace, "1")
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = tt.updateFunc(clients, fixtures.namespace, resource)
|
||||
assert.NoError(t, err)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestCreateJobFromCronjob(t *testing.T) {
|
||||
fixtures := newTestFixtures()
|
||||
|
||||
cronJob, err := createTestCronJobWithAnnotations(clients, fixtures.namespace, "1")
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = callbacks.CreateJobFromCronjob(clients, fixtures.namespace, cronJob.(*batchv1.CronJob))
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestReCreateJobFromJob(t *testing.T) {
|
||||
fixtures := newTestFixtures()
|
||||
|
||||
job, err := createTestJobWithAnnotations(clients, fixtures.namespace, "1")
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = callbacks.ReCreateJobFromjob(clients, fixtures.namespace, job.(*batchv1.Job))
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestGetVolumes(t *testing.T) {
|
||||
fixtures := newTestFixtures()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
resource runtime.Object
|
||||
getFunc func(runtime.Object) []v1.Volume
|
||||
}{
|
||||
{"Deployment", createResourceWithVolumes(&appsv1.Deployment{}, fixtures.defaultVolumes), callbacks.GetDeploymentVolumes},
|
||||
{"CronJob", createResourceWithVolumes(&batchv1.CronJob{}, fixtures.defaultVolumes), callbacks.GetCronJobVolumes},
|
||||
{"Job", createResourceWithVolumes(&batchv1.Job{}, fixtures.defaultVolumes), callbacks.GetJobVolumes},
|
||||
{"DaemonSet", createResourceWithVolumes(&appsv1.DaemonSet{}, fixtures.defaultVolumes), callbacks.GetDaemonSetVolumes},
|
||||
{"StatefulSet", createResourceWithVolumes(&appsv1.StatefulSet{}, fixtures.defaultVolumes), callbacks.GetStatefulSetVolumes},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
assert.Equal(t, fixtures.defaultVolumes, tt.getFunc(tt.resource))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// Helper functions
|
||||
|
||||
func isRestartStrategy(rollout *argorolloutv1alpha1.Rollout) bool {
|
||||
return rollout.Spec.RestartAt == nil
|
||||
}
|
||||
|
||||
func watchRollout(name, namespace string) chan interface{} {
|
||||
timeOut := int64(1)
|
||||
modifiedChan := make(chan interface{})
|
||||
watcher, _ := clients.ArgoRolloutClient.ArgoprojV1alpha1().Rollouts(namespace).Watch(context.Background(), meta_v1.ListOptions{TimeoutSeconds: &timeOut})
|
||||
go watchModified(watcher, name, modifiedChan)
|
||||
return modifiedChan
|
||||
}
|
||||
|
||||
func watchModified(watcher watch.Interface, name string, modifiedChan chan interface{}) {
|
||||
for event := range watcher.ResultChan() {
|
||||
item := event.Object.(*argorolloutv1alpha1.Rollout)
|
||||
if item.Name == name {
|
||||
switch event.Type {
|
||||
case watch.Modified:
|
||||
modifiedChan <- nil
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func createTestDeployments(clients kube.Clients, namespace string) error {
|
||||
for i := 1; i <= 2; i++ {
|
||||
_, err := testutil.CreateDeployment(clients.KubernetesClient, fmt.Sprintf("test-deployment-%d", i), namespace, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func createTestCronJobs(clients kube.Clients, namespace string) error {
|
||||
for i := 1; i <= 2; i++ {
|
||||
_, err := testutil.CreateCronJob(clients.KubernetesClient, fmt.Sprintf("test-cron-%d", i), namespace, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func createTestJobs(clients kube.Clients, namespace string) error {
|
||||
for i := 1; i <= 2; i++ {
|
||||
_, err := testutil.CreateJob(clients.KubernetesClient, fmt.Sprintf("test-job-%d", i), namespace, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func createTestDaemonSets(clients kube.Clients, namespace string) error {
|
||||
for i := 1; i <= 2; i++ {
|
||||
_, err := testutil.CreateDaemonSet(clients.KubernetesClient, fmt.Sprintf("test-daemonset-%d", i), namespace, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func createTestStatefulSets(clients kube.Clients, namespace string) error {
|
||||
for i := 1; i <= 2; i++ {
|
||||
_, err := testutil.CreateStatefulSet(clients.KubernetesClient, fmt.Sprintf("test-statefulset-%d", i), namespace, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func createResourceWithPodAnnotations(obj runtime.Object, annotations map[string]string) runtime.Object {
|
||||
switch v := obj.(type) {
|
||||
case *appsv1.Deployment:
|
||||
v.Spec.Template.ObjectMeta.Annotations = annotations
|
||||
case *appsv1.DaemonSet:
|
||||
v.Spec.Template.ObjectMeta.Annotations = annotations
|
||||
case *appsv1.StatefulSet:
|
||||
v.Spec.Template.ObjectMeta.Annotations = annotations
|
||||
case *batchv1.CronJob:
|
||||
v.Spec.JobTemplate.Spec.Template.ObjectMeta.Annotations = annotations
|
||||
case *batchv1.Job:
|
||||
v.Spec.Template.ObjectMeta.Annotations = annotations
|
||||
case *argorolloutv1alpha1.Rollout:
|
||||
v.Spec.Template.ObjectMeta.Annotations = annotations
|
||||
}
|
||||
return obj
|
||||
}
|
||||
|
||||
func createResourceWithContainers(obj runtime.Object, containers []v1.Container) runtime.Object {
|
||||
switch v := obj.(type) {
|
||||
case *appsv1.Deployment:
|
||||
v.Spec.Template.Spec.Containers = containers
|
||||
case *appsv1.DaemonSet:
|
||||
v.Spec.Template.Spec.Containers = containers
|
||||
case *appsv1.StatefulSet:
|
||||
v.Spec.Template.Spec.Containers = containers
|
||||
case *batchv1.CronJob:
|
||||
v.Spec.JobTemplate.Spec.Template.Spec.Containers = containers
|
||||
case *batchv1.Job:
|
||||
v.Spec.Template.Spec.Containers = containers
|
||||
case *argorolloutv1alpha1.Rollout:
|
||||
v.Spec.Template.Spec.Containers = containers
|
||||
}
|
||||
return obj
|
||||
}
|
||||
|
||||
func createResourceWithInitContainers(obj runtime.Object, initContainers []v1.Container) runtime.Object {
|
||||
switch v := obj.(type) {
|
||||
case *appsv1.Deployment:
|
||||
v.Spec.Template.Spec.InitContainers = initContainers
|
||||
case *appsv1.DaemonSet:
|
||||
v.Spec.Template.Spec.InitContainers = initContainers
|
||||
case *appsv1.StatefulSet:
|
||||
v.Spec.Template.Spec.InitContainers = initContainers
|
||||
case *batchv1.CronJob:
|
||||
v.Spec.JobTemplate.Spec.Template.Spec.InitContainers = initContainers
|
||||
case *batchv1.Job:
|
||||
v.Spec.Template.Spec.InitContainers = initContainers
|
||||
case *argorolloutv1alpha1.Rollout:
|
||||
v.Spec.Template.Spec.InitContainers = initContainers
|
||||
}
|
||||
return obj
|
||||
}
|
||||
|
||||
func createResourceWithVolumes(obj runtime.Object, volumes []v1.Volume) runtime.Object {
|
||||
switch v := obj.(type) {
|
||||
case *appsv1.Deployment:
|
||||
v.Spec.Template.Spec.Volumes = volumes
|
||||
case *batchv1.CronJob:
|
||||
v.Spec.JobTemplate.Spec.Template.Spec.Volumes = volumes
|
||||
case *batchv1.Job:
|
||||
v.Spec.Template.Spec.Volumes = volumes
|
||||
case *appsv1.DaemonSet:
|
||||
v.Spec.Template.Spec.Volumes = volumes
|
||||
case *appsv1.StatefulSet:
|
||||
v.Spec.Template.Spec.Volumes = volumes
|
||||
}
|
||||
return obj
|
||||
}
|
||||
|
||||
func createTestDeploymentWithAnnotations(clients kube.Clients, namespace, version string) (runtime.Object, error) {
|
||||
deployment := &appsv1.Deployment{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-deployment",
|
||||
Namespace: namespace,
|
||||
Annotations: map[string]string{"version": version},
|
||||
},
|
||||
}
|
||||
return clients.KubernetesClient.AppsV1().Deployments(namespace).Create(context.TODO(), deployment, metav1.CreateOptions{})
|
||||
}
|
||||
|
||||
func createTestDaemonSetWithAnnotations(clients kube.Clients, namespace, version string) (runtime.Object, error) {
|
||||
daemonSet := &appsv1.DaemonSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-daemonset",
|
||||
Namespace: namespace,
|
||||
Annotations: map[string]string{"version": version},
|
||||
},
|
||||
}
|
||||
return clients.KubernetesClient.AppsV1().DaemonSets(namespace).Create(context.TODO(), daemonSet, metav1.CreateOptions{})
|
||||
}
|
||||
|
||||
func createTestStatefulSetWithAnnotations(clients kube.Clients, namespace, version string) (runtime.Object, error) {
|
||||
statefulSet := &appsv1.StatefulSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-statefulset",
|
||||
Namespace: namespace,
|
||||
Annotations: map[string]string{"version": version},
|
||||
},
|
||||
}
|
||||
return clients.KubernetesClient.AppsV1().StatefulSets(namespace).Create(context.TODO(), statefulSet, metav1.CreateOptions{})
|
||||
}
|
||||
|
||||
func createTestCronJobWithAnnotations(clients kube.Clients, namespace, version string) (runtime.Object, error) {
|
||||
cronJob := &batchv1.CronJob{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-cronjob",
|
||||
Namespace: namespace,
|
||||
Annotations: map[string]string{"version": version},
|
||||
},
|
||||
}
|
||||
return clients.KubernetesClient.BatchV1().CronJobs(namespace).Create(context.TODO(), cronJob, metav1.CreateOptions{})
|
||||
}
|
||||
|
||||
func createTestJobWithAnnotations(clients kube.Clients, namespace, version string) (runtime.Object, error) {
|
||||
job := &batchv1.Job{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-job",
|
||||
Namespace: namespace,
|
||||
Annotations: map[string]string{"version": version},
|
||||
},
|
||||
}
|
||||
return clients.KubernetesClient.BatchV1().Jobs(namespace).Create(context.TODO(), job, metav1.CreateOptions{})
|
||||
}
|
||||
@@ -55,6 +55,20 @@ func GetCronJobCreateJobFuncs() callbacks.RollingUpgradeFuncs {
|
||||
}
|
||||
}
|
||||
|
||||
// GetDeploymentRollingUpgradeFuncs returns all callback funcs for a cronjob
|
||||
func GetJobCreateJobFuncs() callbacks.RollingUpgradeFuncs {
|
||||
return callbacks.RollingUpgradeFuncs{
|
||||
ItemsFunc: callbacks.GetJobItems,
|
||||
AnnotationsFunc: callbacks.GetJobAnnotations,
|
||||
PodAnnotationsFunc: callbacks.GetJobPodAnnotations,
|
||||
ContainersFunc: callbacks.GetJobContainers,
|
||||
InitContainersFunc: callbacks.GetJobInitContainers,
|
||||
UpdateFunc: callbacks.ReCreateJobFromjob,
|
||||
VolumesFunc: callbacks.GetJobVolumes,
|
||||
ResourceType: "Job",
|
||||
}
|
||||
}
|
||||
|
||||
// GetDaemonSetRollingUpgradeFuncs returns all callback funcs for a daemonset
|
||||
func GetDaemonSetRollingUpgradeFuncs() callbacks.RollingUpgradeFuncs {
|
||||
return callbacks.RollingUpgradeFuncs{
|
||||
@@ -153,6 +167,10 @@ func doRollingUpgrade(config util.Config, collectors metrics.Collectors, recorde
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = rollingUpgrade(clients, config, GetJobCreateJobFuncs(), collectors, recorder, invoke)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = rollingUpgrade(clients, config, GetDaemonSetRollingUpgradeFuncs(), collectors, recorder, invoke)
|
||||
if err != nil {
|
||||
return err
|
||||
|
||||
@@ -2,6 +2,15 @@ package options
|
||||
|
||||
import "github.com/stakater/Reloader/internal/pkg/constants"
|
||||
|
||||
type ArgoRolloutStrategy int
|
||||
|
||||
const (
|
||||
// RestartStrategy is the annotation value for restart strategy for rollouts
|
||||
RestartStrategy ArgoRolloutStrategy = iota
|
||||
// RolloutStrategy is the annotation value for rollout strategy for rollouts
|
||||
RolloutStrategy
|
||||
)
|
||||
|
||||
var (
|
||||
// Auto reload all resources when their corresponding configmaps/secrets are updated
|
||||
AutoReloadAll = false
|
||||
@@ -27,6 +36,8 @@ var (
|
||||
// SearchMatchAnnotation is an annotation to tag secrets to be found with
|
||||
// AutoSearchAnnotation
|
||||
SearchMatchAnnotation = "reloader.stakater.com/match"
|
||||
// RolloutStrategyAnnotation is an annotation to define rollout update strategy
|
||||
RolloutStrategyAnnotation = "reloader.stakater.com/rollout-strategy"
|
||||
// LogFormat is the log format to use (json, or empty string for default)
|
||||
LogFormat = ""
|
||||
// LogLevel is the log level to use (trace, debug, info, warning, error, fatal and panic)
|
||||
@@ -45,3 +56,14 @@ var (
|
||||
// Url to send a request to instead of triggering a reload
|
||||
WebhookUrl = ""
|
||||
)
|
||||
|
||||
func ToArgoRolloutStrategy(s string) ArgoRolloutStrategy {
|
||||
switch s {
|
||||
case "restart":
|
||||
return RestartStrategy
|
||||
case "rollout":
|
||||
fallthrough
|
||||
default:
|
||||
return RolloutStrategy
|
||||
}
|
||||
}
|
||||
|
||||
@@ -10,6 +10,8 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
argorolloutv1alpha1 "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1"
|
||||
argorollout "github.com/argoproj/argo-rollouts/pkg/client/clientset/versioned"
|
||||
openshiftv1 "github.com/openshift/api/apps/v1"
|
||||
appsclient "github.com/openshift/client-go/apps/clientset/versioned"
|
||||
"github.com/sirupsen/logrus"
|
||||
@@ -21,6 +23,7 @@ import (
|
||||
"github.com/stakater/Reloader/internal/pkg/util"
|
||||
"github.com/stakater/Reloader/pkg/kube"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
batchv1 "k8s.io/api/batch/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
@@ -69,16 +72,16 @@ func DeleteNamespace(namespace string, client kubernetes.Interface) {
|
||||
}
|
||||
}
|
||||
|
||||
func getObjectMeta(namespace string, name string, autoReload bool, secretAutoReload bool, configmapAutoReload bool) metav1.ObjectMeta {
|
||||
func getObjectMeta(namespace string, name string, autoReload bool, secretAutoReload bool, configmapAutoReload bool, extraAnnotations map[string]string) metav1.ObjectMeta {
|
||||
return metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Namespace: namespace,
|
||||
Labels: map[string]string{"firstLabel": "temp"},
|
||||
Annotations: getAnnotations(name, autoReload, secretAutoReload, configmapAutoReload),
|
||||
Annotations: getAnnotations(name, autoReload, secretAutoReload, configmapAutoReload, extraAnnotations),
|
||||
}
|
||||
}
|
||||
|
||||
func getAnnotations(name string, autoReload bool, secretAutoReload bool, configmapAutoReload bool) map[string]string {
|
||||
func getAnnotations(name string, autoReload bool, secretAutoReload bool, configmapAutoReload bool, extraAnnotations map[string]string) map[string]string {
|
||||
annotations := make(map[string]string)
|
||||
if autoReload {
|
||||
annotations[options.ReloaderAutoAnnotation] = "true"
|
||||
@@ -90,13 +93,15 @@ func getAnnotations(name string, autoReload bool, secretAutoReload bool, configm
|
||||
annotations[options.ConfigmapReloaderAutoAnnotation] = "true"
|
||||
}
|
||||
|
||||
if len(annotations) > 0 {
|
||||
return annotations
|
||||
} else {
|
||||
return map[string]string{
|
||||
if !(len(annotations) > 0) {
|
||||
annotations = map[string]string{
|
||||
options.ConfigmapUpdateOnChangeAnnotation: name,
|
||||
options.SecretUpdateOnChangeAnnotation: name}
|
||||
}
|
||||
for k, v := range extraAnnotations {
|
||||
annotations[k] = v
|
||||
}
|
||||
return annotations
|
||||
}
|
||||
|
||||
func getEnvVarSources(name string) []v1.EnvFromSource {
|
||||
@@ -173,7 +178,7 @@ func getVolumes(name string) []v1.Volume {
|
||||
}
|
||||
}
|
||||
|
||||
func getVolumeMounts(name string) []v1.VolumeMount {
|
||||
func getVolumeMounts() []v1.VolumeMount {
|
||||
return []v1.VolumeMount{
|
||||
{
|
||||
MountPath: "etc/config",
|
||||
@@ -271,7 +276,7 @@ func getPodTemplateSpecWithVolumes(name string) v1.PodTemplateSpec {
|
||||
Value: "test",
|
||||
},
|
||||
},
|
||||
VolumeMounts: getVolumeMounts(name),
|
||||
VolumeMounts: getVolumeMounts(),
|
||||
},
|
||||
},
|
||||
Volumes: getVolumes(name),
|
||||
@@ -289,7 +294,7 @@ func getPodTemplateSpecWithInitContainer(name string) v1.PodTemplateSpec {
|
||||
{
|
||||
Image: "busybox",
|
||||
Name: "busyBox",
|
||||
VolumeMounts: getVolumeMounts(name),
|
||||
VolumeMounts: getVolumeMounts(),
|
||||
},
|
||||
},
|
||||
Containers: []v1.Container{
|
||||
@@ -342,7 +347,7 @@ func getPodTemplateSpecWithInitContainerAndEnv(name string) v1.PodTemplateSpec {
|
||||
func GetDeployment(namespace string, deploymentName string) *appsv1.Deployment {
|
||||
replicaset := int32(1)
|
||||
return &appsv1.Deployment{
|
||||
ObjectMeta: getObjectMeta(namespace, deploymentName, false, false, false),
|
||||
ObjectMeta: getObjectMeta(namespace, deploymentName, false, false, false, map[string]string{}),
|
||||
Spec: appsv1.DeploymentSpec{
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{"secondLabel": "temp"},
|
||||
@@ -361,7 +366,7 @@ func GetDeploymentConfig(namespace string, deploymentConfigName string) *openshi
|
||||
replicaset := int32(1)
|
||||
podTemplateSpecWithVolume := getPodTemplateSpecWithVolumes(deploymentConfigName)
|
||||
return &openshiftv1.DeploymentConfig{
|
||||
ObjectMeta: getObjectMeta(namespace, deploymentConfigName, false, false, false),
|
||||
ObjectMeta: getObjectMeta(namespace, deploymentConfigName, false, false, false, map[string]string{}),
|
||||
Spec: openshiftv1.DeploymentConfigSpec{
|
||||
Replicas: replicaset,
|
||||
Strategy: openshiftv1.DeploymentStrategy{
|
||||
@@ -376,7 +381,7 @@ func GetDeploymentConfig(namespace string, deploymentConfigName string) *openshi
|
||||
func GetDeploymentWithInitContainer(namespace string, deploymentName string) *appsv1.Deployment {
|
||||
replicaset := int32(1)
|
||||
return &appsv1.Deployment{
|
||||
ObjectMeta: getObjectMeta(namespace, deploymentName, false, false, false),
|
||||
ObjectMeta: getObjectMeta(namespace, deploymentName, false, false, false, map[string]string{}),
|
||||
Spec: appsv1.DeploymentSpec{
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{"secondLabel": "temp"},
|
||||
@@ -394,7 +399,7 @@ func GetDeploymentWithInitContainer(namespace string, deploymentName string) *ap
|
||||
func GetDeploymentWithInitContainerAndEnv(namespace string, deploymentName string) *appsv1.Deployment {
|
||||
replicaset := int32(1)
|
||||
return &appsv1.Deployment{
|
||||
ObjectMeta: getObjectMeta(namespace, deploymentName, true, false, false),
|
||||
ObjectMeta: getObjectMeta(namespace, deploymentName, true, false, false, map[string]string{}),
|
||||
Spec: appsv1.DeploymentSpec{
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{"secondLabel": "temp"},
|
||||
@@ -411,7 +416,7 @@ func GetDeploymentWithInitContainerAndEnv(namespace string, deploymentName strin
|
||||
func GetDeploymentWithEnvVars(namespace string, deploymentName string) *appsv1.Deployment {
|
||||
replicaset := int32(1)
|
||||
return &appsv1.Deployment{
|
||||
ObjectMeta: getObjectMeta(namespace, deploymentName, true, false, false),
|
||||
ObjectMeta: getObjectMeta(namespace, deploymentName, true, false, false, map[string]string{}),
|
||||
Spec: appsv1.DeploymentSpec{
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{"secondLabel": "temp"},
|
||||
@@ -429,7 +434,7 @@ func GetDeploymentConfigWithEnvVars(namespace string, deploymentConfigName strin
|
||||
replicaset := int32(1)
|
||||
podTemplateSpecWithEnvVars := getPodTemplateSpecWithEnvVars(deploymentConfigName)
|
||||
return &openshiftv1.DeploymentConfig{
|
||||
ObjectMeta: getObjectMeta(namespace, deploymentConfigName, false, false, false),
|
||||
ObjectMeta: getObjectMeta(namespace, deploymentConfigName, false, false, false, map[string]string{}),
|
||||
Spec: openshiftv1.DeploymentConfigSpec{
|
||||
Replicas: replicaset,
|
||||
Strategy: openshiftv1.DeploymentStrategy{
|
||||
@@ -443,7 +448,7 @@ func GetDeploymentConfigWithEnvVars(namespace string, deploymentConfigName strin
|
||||
func GetDeploymentWithEnvVarSources(namespace string, deploymentName string) *appsv1.Deployment {
|
||||
replicaset := int32(1)
|
||||
return &appsv1.Deployment{
|
||||
ObjectMeta: getObjectMeta(namespace, deploymentName, true, false, false),
|
||||
ObjectMeta: getObjectMeta(namespace, deploymentName, true, false, false, map[string]string{}),
|
||||
Spec: appsv1.DeploymentSpec{
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{"secondLabel": "temp"},
|
||||
@@ -460,7 +465,7 @@ func GetDeploymentWithEnvVarSources(namespace string, deploymentName string) *ap
|
||||
func GetDeploymentWithPodAnnotations(namespace string, deploymentName string, both bool) *appsv1.Deployment {
|
||||
replicaset := int32(1)
|
||||
deployment := &appsv1.Deployment{
|
||||
ObjectMeta: getObjectMeta(namespace, deploymentName, false, false, false),
|
||||
ObjectMeta: getObjectMeta(namespace, deploymentName, false, false, false, map[string]string{}),
|
||||
Spec: appsv1.DeploymentSpec{
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{"secondLabel": "temp"},
|
||||
@@ -475,7 +480,7 @@ func GetDeploymentWithPodAnnotations(namespace string, deploymentName string, bo
|
||||
if !both {
|
||||
deployment.ObjectMeta.Annotations = nil
|
||||
}
|
||||
deployment.Spec.Template.ObjectMeta.Annotations = getAnnotations(deploymentName, true, false, false)
|
||||
deployment.Spec.Template.ObjectMeta.Annotations = getAnnotations(deploymentName, true, false, false, map[string]string{})
|
||||
return deployment
|
||||
}
|
||||
|
||||
@@ -483,9 +488,9 @@ func GetDeploymentWithTypedAutoAnnotation(namespace string, deploymentName strin
|
||||
replicaset := int32(1)
|
||||
var objectMeta metav1.ObjectMeta
|
||||
if resourceType == SecretResourceType {
|
||||
objectMeta = getObjectMeta(namespace, deploymentName, false, true, false)
|
||||
objectMeta = getObjectMeta(namespace, deploymentName, false, true, false, map[string]string{})
|
||||
} else if resourceType == ConfigmapResourceType {
|
||||
objectMeta = getObjectMeta(namespace, deploymentName, false, false, true)
|
||||
objectMeta = getObjectMeta(namespace, deploymentName, false, false, true, map[string]string{})
|
||||
}
|
||||
|
||||
return &appsv1.Deployment{
|
||||
@@ -537,7 +542,7 @@ func GetDeploymentWithExcludeAnnotation(namespace string, deploymentName string,
|
||||
// GetDaemonSet provides daemonset for testing
|
||||
func GetDaemonSet(namespace string, daemonsetName string) *appsv1.DaemonSet {
|
||||
return &appsv1.DaemonSet{
|
||||
ObjectMeta: getObjectMeta(namespace, daemonsetName, false, false, false),
|
||||
ObjectMeta: getObjectMeta(namespace, daemonsetName, false, false, false, map[string]string{}),
|
||||
Spec: appsv1.DaemonSetSpec{
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{"secondLabel": "temp"},
|
||||
@@ -552,7 +557,7 @@ func GetDaemonSet(namespace string, daemonsetName string) *appsv1.DaemonSet {
|
||||
|
||||
func GetDaemonSetWithEnvVars(namespace string, daemonSetName string) *appsv1.DaemonSet {
|
||||
return &appsv1.DaemonSet{
|
||||
ObjectMeta: getObjectMeta(namespace, daemonSetName, true, false, false),
|
||||
ObjectMeta: getObjectMeta(namespace, daemonSetName, true, false, false, map[string]string{}),
|
||||
Spec: appsv1.DaemonSetSpec{
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{"secondLabel": "temp"},
|
||||
@@ -568,7 +573,7 @@ func GetDaemonSetWithEnvVars(namespace string, daemonSetName string) *appsv1.Dae
|
||||
// GetStatefulSet provides statefulset for testing
|
||||
func GetStatefulSet(namespace string, statefulsetName string) *appsv1.StatefulSet {
|
||||
return &appsv1.StatefulSet{
|
||||
ObjectMeta: getObjectMeta(namespace, statefulsetName, false, false, false),
|
||||
ObjectMeta: getObjectMeta(namespace, statefulsetName, false, false, false, map[string]string{}),
|
||||
Spec: appsv1.StatefulSetSpec{
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{"secondLabel": "temp"},
|
||||
@@ -584,7 +589,7 @@ func GetStatefulSet(namespace string, statefulsetName string) *appsv1.StatefulSe
|
||||
// GetStatefulSet provides statefulset for testing
|
||||
func GetStatefulSetWithEnvVar(namespace string, statefulsetName string) *appsv1.StatefulSet {
|
||||
return &appsv1.StatefulSet{
|
||||
ObjectMeta: getObjectMeta(namespace, statefulsetName, true, false, false),
|
||||
ObjectMeta: getObjectMeta(namespace, statefulsetName, true, false, false, map[string]string{}),
|
||||
Spec: appsv1.StatefulSetSpec{
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{"secondLabel": "temp"},
|
||||
@@ -633,6 +638,64 @@ func GetSecret(namespace string, secretName string, data string) *v1.Secret {
|
||||
}
|
||||
}
|
||||
|
||||
func GetCronJob(namespace string, cronJobName string) *batchv1.CronJob {
|
||||
return &batchv1.CronJob{
|
||||
ObjectMeta: getObjectMeta(namespace, cronJobName, false, false, false, map[string]string{}),
|
||||
Spec: batchv1.CronJobSpec{
|
||||
Schedule: "*/5 * * * *", // Run every 5 minutes
|
||||
JobTemplate: batchv1.JobTemplateSpec{
|
||||
Spec: batchv1.JobSpec{
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{"secondLabel": "temp"},
|
||||
},
|
||||
Template: getPodTemplateSpecWithVolumes(cronJobName),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func GetJob(namespace string, jobName string) *batchv1.Job {
|
||||
return &batchv1.Job{
|
||||
ObjectMeta: getObjectMeta(namespace, jobName, false, false, false, map[string]string{}),
|
||||
Spec: batchv1.JobSpec{
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{"secondLabel": "temp"},
|
||||
},
|
||||
Template: getPodTemplateSpecWithVolumes(jobName),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func GetCronJobWithEnvVar(namespace string, cronJobName string) *batchv1.CronJob {
|
||||
return &batchv1.CronJob{
|
||||
ObjectMeta: getObjectMeta(namespace, cronJobName, true, false, false, map[string]string{}),
|
||||
Spec: batchv1.CronJobSpec{
|
||||
Schedule: "*/5 * * * *", // Run every 5 minutes
|
||||
JobTemplate: batchv1.JobTemplateSpec{
|
||||
Spec: batchv1.JobSpec{
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{"secondLabel": "temp"},
|
||||
},
|
||||
Template: getPodTemplateSpecWithEnvVars(cronJobName),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func GetJobWithEnvVar(namespace string, jobName string) *batchv1.Job {
|
||||
return &batchv1.Job{
|
||||
ObjectMeta: getObjectMeta(namespace, jobName, true, false, false, map[string]string{}),
|
||||
Spec: batchv1.JobSpec{
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{"secondLabel": "temp"},
|
||||
},
|
||||
Template: getPodTemplateSpecWithEnvVars(jobName),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// GetSecretWithUpdatedLabel provides secret for testing
|
||||
func GetSecretWithUpdatedLabel(namespace string, secretName string, label string, data string) *v1.Secret {
|
||||
return &v1.Secret{
|
||||
@@ -843,6 +906,36 @@ func CreateStatefulSet(client kubernetes.Interface, statefulsetName string, name
|
||||
return statefulset, err
|
||||
}
|
||||
|
||||
// CreateCronJob creates a cronjob in given namespace and returns the CronJob
|
||||
func CreateCronJob(client kubernetes.Interface, cronJobName string, namespace string, volumeMount bool) (*batchv1.CronJob, error) {
|
||||
logrus.Infof("Creating CronJob")
|
||||
cronJobClient := client.BatchV1().CronJobs(namespace)
|
||||
var cronJobObj *batchv1.CronJob
|
||||
if volumeMount {
|
||||
cronJobObj = GetCronJob(namespace, cronJobName)
|
||||
} else {
|
||||
cronJobObj = GetCronJobWithEnvVar(namespace, cronJobName)
|
||||
}
|
||||
cronJob, err := cronJobClient.Create(context.TODO(), cronJobObj, metav1.CreateOptions{})
|
||||
time.Sleep(3 * time.Second)
|
||||
return cronJob, err
|
||||
}
|
||||
|
||||
// CreateJob creates a job in given namespace and returns the Job
|
||||
func CreateJob(client kubernetes.Interface, jobName string, namespace string, volumeMount bool) (*batchv1.Job, error) {
|
||||
logrus.Infof("Creating Job")
|
||||
jobClient := client.BatchV1().Jobs(namespace)
|
||||
var jobObj *batchv1.Job
|
||||
if volumeMount {
|
||||
jobObj = GetJob(namespace, jobName)
|
||||
} else {
|
||||
jobObj = GetJobWithEnvVar(namespace, jobName)
|
||||
}
|
||||
job, err := jobClient.Create(context.TODO(), jobObj, metav1.CreateOptions{})
|
||||
time.Sleep(3 * time.Second)
|
||||
return job, err
|
||||
}
|
||||
|
||||
// DeleteDeployment creates a deployment in given namespace and returns the error if any
|
||||
func DeleteDeployment(client kubernetes.Interface, namespace string, deploymentName string) error {
|
||||
logrus.Infof("Deleting Deployment")
|
||||
@@ -1071,3 +1164,28 @@ func VerifyResourceAnnotationUpdate(clients kube.Clients, config util.Config, up
|
||||
func GetSHAfromEmptyData() string {
|
||||
return crypto.GenerateSHA("")
|
||||
}
|
||||
|
||||
// GetRollout provides rollout for testing
|
||||
func GetRollout(namespace string, rolloutName string, annotations map[string]string) *argorolloutv1alpha1.Rollout {
|
||||
replicaset := int32(1)
|
||||
return &argorolloutv1alpha1.Rollout{
|
||||
ObjectMeta: getObjectMeta(namespace, rolloutName, false, false, false, annotations),
|
||||
Spec: argorolloutv1alpha1.RolloutSpec{
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{"secondLabel": "temp"},
|
||||
},
|
||||
Replicas: &replicaset,
|
||||
Template: getPodTemplateSpecWithVolumes(rolloutName),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// CreateRollout creates a rolout in given namespace and returns the Rollout
|
||||
func CreateRollout(client argorollout.Interface, rolloutName string, namespace string, annotations map[string]string) (*argorolloutv1alpha1.Rollout, error) {
|
||||
logrus.Infof("Creating Rollout")
|
||||
rolloutClient := client.ArgoprojV1alpha1().Rollouts(namespace)
|
||||
rolloutObj := GetRollout(namespace, rolloutName, annotations)
|
||||
rollout, err := rolloutClient.Create(context.TODO(), rolloutObj, metav1.CreateOptions{})
|
||||
time.Sleep(3 * time.Second)
|
||||
return rollout, err
|
||||
}
|
||||
|
||||
Submodule theme_common updated: 0eef29f4cb...11286e112e
Reference in New Issue
Block a user