mirror of
https://github.com/stakater/Reloader.git
synced 2026-02-14 18:09:50 +00:00
Compare commits
66 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
5185ff2c91 | ||
|
|
d6a95a923a | ||
|
|
3dc3b4726c | ||
|
|
1b2780f712 | ||
|
|
574129d637 | ||
|
|
919d0cc3ca | ||
|
|
ca3c95404e | ||
|
|
9ded29a9aa | ||
|
|
37e6c6b7c8 | ||
|
|
1d923bce36 | ||
|
|
636ab7a3f5 | ||
|
|
41802adc52 | ||
|
|
e3d352cf56 | ||
|
|
dd7d35e268 | ||
|
|
cb769d0f64 | ||
|
|
234084a042 | ||
|
|
c341a8bb97 | ||
|
|
6aff0b9c79 | ||
|
|
9fc4f5bcf7 | ||
|
|
d45054ba5a | ||
|
|
5246ec70e7 | ||
|
|
33f28ec1e3 | ||
|
|
ee8ff2d413 | ||
|
|
4de1659965 | ||
|
|
277dde8525 | ||
|
|
0e4db821d9 | ||
|
|
565a3d6916 | ||
|
|
04d72d0b70 | ||
|
|
0a2aa15d1f | ||
|
|
035fa66692 | ||
|
|
d99a510628 | ||
|
|
266bad4c7a | ||
|
|
bd54311ac2 | ||
|
|
7ddb1ae434 | ||
|
|
1c794b911b | ||
|
|
e2592ddb35 | ||
|
|
fabb83a422 | ||
|
|
ecdfc6d751 | ||
|
|
145679a1fb | ||
|
|
bce0ac9aa6 | ||
|
|
78bb519058 | ||
|
|
3f5ee46f00 | ||
|
|
d4acec63b7 | ||
|
|
0d464cff65 | ||
|
|
eb42fce5a8 | ||
|
|
a39100ab35 | ||
|
|
bf6360752d | ||
|
|
38ab09a5af | ||
|
|
ca09e243a3 | ||
|
|
22f6c3e461 | ||
|
|
d784b552ee | ||
|
|
33710457ef | ||
|
|
f3bf76bb9d | ||
|
|
26ce083053 | ||
|
|
e9811bf166 | ||
|
|
93e7aca146 | ||
|
|
ff7c5c0f74 | ||
|
|
1ffef1a1d4 | ||
|
|
c9b919f2f4 | ||
|
|
2cd4f2397a | ||
|
|
e2edc87812 | ||
|
|
394707a7f8 | ||
|
|
71d6c4bd07 | ||
|
|
4b3a58d91e | ||
|
|
0d6d5ca479 | ||
|
|
017e6ed7fd |
3
.github/workflows/init-branch-release.yaml
vendored
3
.github/workflows/init-branch-release.yaml
vendored
@@ -48,7 +48,6 @@ jobs:
|
||||
run: |
|
||||
set -ue
|
||||
VERSION=${{ inputs.TARGET_VERSION }} make update-manifests-version
|
||||
VERSION=${{ inputs.TARGET_VERSION }} make bump-chart
|
||||
git diff
|
||||
|
||||
- name: Generate new set of manifests
|
||||
@@ -58,7 +57,7 @@ jobs:
|
||||
git diff
|
||||
|
||||
- name: Create pull request
|
||||
uses: peter-evans/create-pull-request@v7.0.5
|
||||
uses: peter-evans/create-pull-request@v7.0.6
|
||||
with:
|
||||
commit-message: "Bump version to ${{ inputs.TARGET_VERSION }}"
|
||||
title: "Bump version to ${{ inputs.TARGET_VERSION }} on ${{ inputs.TARGET_BRANCH }} branch"
|
||||
|
||||
90
.github/workflows/pull_request-helm.yaml
vendored
Normal file
90
.github/workflows/pull_request-helm.yaml
vendored
Normal file
@@ -0,0 +1,90 @@
|
||||
name: Pull Request Workflow for Helm Chart changes
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- master
|
||||
paths:
|
||||
- 'deployments/kubernetes/chart/reloader/**'
|
||||
|
||||
env:
|
||||
DOCKER_FILE_PATH: Dockerfile
|
||||
DOCKER_UBI_FILE_PATH: Dockerfile.ubi
|
||||
KUBERNETES_VERSION: "1.30.0"
|
||||
KIND_VERSION: "0.23.0"
|
||||
REGISTRY: ghcr.io
|
||||
|
||||
jobs:
|
||||
|
||||
helm-chart-validation:
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
runs-on: ubuntu-latest
|
||||
name: Helm Chart Validation
|
||||
|
||||
steps:
|
||||
|
||||
- name: Check out code
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{github.event.pull_request.head.sha}}
|
||||
fetch-depth: 0
|
||||
|
||||
# Setting up helm binary
|
||||
- name: Set up Helm
|
||||
uses: azure/setup-helm@v4
|
||||
with:
|
||||
version: v3.11.3
|
||||
|
||||
- name: Helm chart unit tests
|
||||
uses: d3adb5/helm-unittest-action@v2
|
||||
with:
|
||||
charts: deployments/kubernetes/chart/reloader
|
||||
|
||||
helm-version-validation:
|
||||
needs: helm-chart-validation
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
runs-on: ubuntu-latest
|
||||
name: Helm Version Validation
|
||||
if: ${{ contains(github.event.pull_request.labels.*.name, 'release/helm-chart') }}
|
||||
|
||||
steps:
|
||||
|
||||
- name: Check out code
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{github.event.pull_request.head.sha}}
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Add Stakater Helm Repo
|
||||
run: |
|
||||
helm repo add stakater https://stakater.github.io/stakater-charts
|
||||
|
||||
- name: Get version for chart from helm repo
|
||||
id: chart_eval
|
||||
run: |
|
||||
current_chart_version=$(helm search repo stakater/reloader | tail -n 1 | awk '{print $2}')
|
||||
echo "CURRENT_CHART_VERSION=$(echo ${current_chart_version})" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Get Updated Chart version from Chart.yaml
|
||||
uses: mikefarah/yq@master
|
||||
id: new_chart_version
|
||||
with:
|
||||
cmd: yq e '.version' deployments/kubernetes/chart/reloader/Chart.yaml
|
||||
|
||||
- name: Check Version
|
||||
uses: aleoyakas/check-semver-increased-action@v1
|
||||
id: check-version
|
||||
with:
|
||||
current-version: ${{ steps.new_chart_version.outputs.result }}
|
||||
previous-version: ${{ steps.chart_eval.outputs.CURRENT_CHART_VERSION }}
|
||||
|
||||
- name: Fail if Helm Chart version isnt updated
|
||||
if: steps.check-version.outputs.is-version-increased != 'true'
|
||||
run: |
|
||||
echo "Helm Chart Version wasnt updated"
|
||||
exit 1
|
||||
74
.github/workflows/pull_request.yaml
vendored
74
.github/workflows/pull_request.yaml
vendored
@@ -1,4 +1,4 @@
|
||||
name: Pull Request
|
||||
name: Pull Request Workflow for Code changes
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
@@ -14,6 +14,7 @@ on:
|
||||
- '!docs/**'
|
||||
- '!theme_common'
|
||||
- '!theme_override'
|
||||
- '!deployments/kubernetes/chart/reloader/**'
|
||||
|
||||
env:
|
||||
DOCKER_FILE_PATH: Dockerfile
|
||||
@@ -24,7 +25,7 @@ env:
|
||||
|
||||
jobs:
|
||||
qa:
|
||||
uses: stakater/.github/.github/workflows/pull_request_doc_qa.yaml@v0.0.98
|
||||
uses: stakater/.github/.github/workflows/pull_request_doc_qa.yaml@v0.0.122
|
||||
with:
|
||||
MD_CONFIG: .github/md_config.json
|
||||
DOC_SRC: README.md
|
||||
@@ -162,72 +163,3 @@ jobs:
|
||||
org.opencontainers.image.source=${{ github.event.repository.clone_url }}
|
||||
org.opencontainers.image.created=${{ steps.prep.outputs.created }}
|
||||
org.opencontainers.image.revision=${{ github.sha }}
|
||||
|
||||
- name: Check if Helm validation needs to run
|
||||
uses: dorny/paths-filter@v3
|
||||
id: filter
|
||||
with:
|
||||
filters: |
|
||||
chart:
|
||||
- 'deployments/kubernetes/chart/reloader/**'
|
||||
|
||||
outputs:
|
||||
helm_chart_changed: ${{ steps.filter.outputs.chart }}
|
||||
|
||||
helm-validation:
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
runs-on: ubuntu-latest
|
||||
name: Helm Chart Validation
|
||||
needs:
|
||||
- build
|
||||
|
||||
if: ${{ needs.build.outputs.helm_chart_changed }} == "true"
|
||||
steps:
|
||||
|
||||
- name: Check out code
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{github.event.pull_request.head.sha}}
|
||||
fetch-depth: 0
|
||||
|
||||
# Setting up helm binary
|
||||
- name: Set up Helm
|
||||
uses: azure/setup-helm@v4
|
||||
with:
|
||||
version: v3.11.3
|
||||
|
||||
- name: Helm chart unit tests
|
||||
uses: d3adb5/helm-unittest-action@v2
|
||||
with:
|
||||
charts: deployments/kubernetes/chart/reloader
|
||||
|
||||
- name: Add Stakater Helm Repo
|
||||
run: |
|
||||
helm repo add stakater https://stakater.github.io/stakater-charts
|
||||
|
||||
- name: Get version for chart from helm repo
|
||||
id: chart_eval
|
||||
run: |
|
||||
current_chart_version=$(helm search repo stakater/reloader | tail -n 1 | awk '{print $2}')
|
||||
echo "CURRENT_CHART_VERSION=$(echo ${current_chart_version})" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Get Updated Chart version from Chart.yaml
|
||||
uses: mikefarah/yq@master
|
||||
id: new_chart_version
|
||||
with:
|
||||
cmd: yq e '.version' deployments/kubernetes/chart/reloader/Chart.yaml
|
||||
|
||||
- name: Check Version
|
||||
uses: aleoyakas/check-semver-increased-action@v1
|
||||
id: check-version
|
||||
with:
|
||||
current-version: ${{ steps.new_chart_version.outputs.result }}
|
||||
previous-version: ${{ steps.chart_eval.outputs.CURRENT_CHART_VERSION }}
|
||||
|
||||
- name: Fail if Helm Chart version isnt updated
|
||||
if: steps.check-version.outputs.is-version-increased != 'true'
|
||||
run: |
|
||||
echo "Helm Chart Version wasnt updated"
|
||||
exit 1
|
||||
|
||||
11
.github/workflows/pull_request_docs.yaml
vendored
11
.github/workflows/pull_request_docs.yaml
vendored
@@ -15,8 +15,17 @@ on:
|
||||
|
||||
jobs:
|
||||
qa:
|
||||
uses: stakater/.github/.github/workflows/pull_request_doc_qa.yaml@v0.0.98
|
||||
uses: stakater/.github/.github/workflows/pull_request_doc_qa.yaml@v0.0.122
|
||||
with:
|
||||
MD_CONFIG: .github/md_config.json
|
||||
DOC_SRC: docs
|
||||
MD_LINT_CONFIG: .markdownlint.yaml
|
||||
build:
|
||||
uses: stakater/.github/.github/workflows/pull_request_container_build.yaml@v0.0.122
|
||||
with:
|
||||
DOCKER_FILE_PATH: Dockerfile-docs
|
||||
CONTAINER_REGISTRY_URL: ghcr.io/stakater
|
||||
secrets:
|
||||
CONTAINER_REGISTRY_USERNAME: ${{ github.actor }}
|
||||
CONTAINER_REGISTRY_PASSWORD: ${{ secrets.GHCR_TOKEN }}
|
||||
SLACK_WEBHOOK_URL: ${{ secrets.STAKATER_DELIVERY_SLACK_WEBHOOK }}
|
||||
|
||||
106
.github/workflows/push-helm-chart.yaml
vendored
Normal file
106
.github/workflows/push-helm-chart.yaml
vendored
Normal file
@@ -0,0 +1,106 @@
|
||||
name: Push Helm Chart
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
types:
|
||||
- closed
|
||||
branches:
|
||||
- master
|
||||
paths:
|
||||
- 'deployments/kubernetes/chart/reloader/**'
|
||||
- '.github/workflows/push-helm-chart.yaml'
|
||||
|
||||
env:
|
||||
HELM_REGISTRY_URL: "https://stakater.github.io/stakater-charts"
|
||||
REGISTRY: ghcr.io
|
||||
|
||||
jobs:
|
||||
verify-and-push-helm-chart:
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write # to push artifacts to `ghcr.io`
|
||||
|
||||
name: Verify and Push Helm Chart
|
||||
if: ${{ (github.event.pull_request.merged == true) && (contains(github.event.pull_request.labels.*.name, 'release/helm-chart')) }}
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Check out code
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
token: ${{ secrets.PUBLISH_TOKEN }}
|
||||
fetch-depth: 0 # otherwise, you will fail to push refs to dest repo
|
||||
submodules: recursive
|
||||
|
||||
# Setting up helm binary
|
||||
- name: Set up Helm
|
||||
uses: azure/setup-helm@v4
|
||||
with:
|
||||
version: v3.11.3
|
||||
|
||||
- name: Add Stakater Helm Repo
|
||||
run: |
|
||||
helm repo add stakater https://stakater.github.io/stakater-charts
|
||||
|
||||
- name: Get version for chart from helm repo
|
||||
id: chart_eval
|
||||
run: |
|
||||
current_chart_version=$(helm search repo stakater/reloader | tail -n 1 | awk '{print $2}')
|
||||
echo "CURRENT_CHART_VERSION=$(echo ${current_chart_version})" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Get Updated Chart version from Chart.yaml
|
||||
uses: mikefarah/yq@master
|
||||
id: new_chart_version
|
||||
with:
|
||||
cmd: yq e '.version' deployments/kubernetes/chart/reloader/Chart.yaml
|
||||
|
||||
- name: Check Version
|
||||
uses: aleoyakas/check-semver-increased-action@v1
|
||||
id: check-version
|
||||
with:
|
||||
current-version: ${{ steps.new_chart_version.outputs.result }}
|
||||
previous-version: ${{ steps.chart_eval.outputs.CURRENT_CHART_VERSION }}
|
||||
|
||||
- name: Fail if Helm Chart version isnt updated
|
||||
if: steps.check-version.outputs.is-version-increased != 'true'
|
||||
run: |
|
||||
echo "Helm Chart Version wasnt updated"
|
||||
exit 1
|
||||
|
||||
- name: Login to GHCR Registry
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: ${{ env.REGISTRY }}
|
||||
username: stakater-user
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Publish Helm chart to ghcr.io
|
||||
run: |
|
||||
helm package ./deployments/kubernetes/chart/reloader --destination ./packaged-chart
|
||||
helm push ./packaged-chart/*.tgz oci://ghcr.io/stakater/charts
|
||||
rm -rf ./packaged-chart
|
||||
|
||||
- name: Publish Helm chart to gh-pages
|
||||
uses: stefanprodan/helm-gh-pages@master
|
||||
with:
|
||||
branch: master
|
||||
repository: stakater-charts
|
||||
target_dir: docs
|
||||
token: ${{ secrets.GHCR_TOKEN }}
|
||||
charts_dir: deployments/kubernetes/chart/
|
||||
charts_url: ${{ env.HELM_REGISTRY_URL }}
|
||||
owner: stakater
|
||||
linting: on
|
||||
commit_username: stakater-user
|
||||
commit_email: stakater@gmail.com
|
||||
|
||||
- name: Notify Slack
|
||||
uses: 8398a7/action-slack@v3
|
||||
if: always() # Pick up events even if the job fails or is canceled.
|
||||
with:
|
||||
status: ${{ job.status }}
|
||||
fields: repo,author,action,eventName,ref,workflow
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.PUBLISH_TOKEN }}
|
||||
SLACK_WEBHOOK_URL: ${{ secrets.STAKATER_DELIVERY_SLACK_WEBHOOK }}
|
||||
9
.github/workflows/push-pr-image.yaml
vendored
9
.github/workflows/push-pr-image.yaml
vendored
@@ -5,6 +5,15 @@ on:
|
||||
branches:
|
||||
- master
|
||||
types: [ labeled ]
|
||||
paths:
|
||||
- '!.markdownlint.yaml'
|
||||
- '!.vale.ini'
|
||||
- '!Dockerfile-docs'
|
||||
- '!docs-nginx.conf'
|
||||
- '!docs/**'
|
||||
- '!theme_common'
|
||||
- '!theme_override'
|
||||
- '!deployments/kubernetes/chart/reloader/**'
|
||||
|
||||
env:
|
||||
DOCKER_FILE_PATH: Dockerfile
|
||||
|
||||
1
.github/workflows/push.yaml
vendored
1
.github/workflows/push.yaml
vendored
@@ -200,7 +200,6 @@ jobs:
|
||||
push: true
|
||||
build-args: BUILD_PARAMETERS=${{ env.BUILD_PARAMETERS }}
|
||||
cache-to: type=inline
|
||||
platforms: linux/amd64,linux/arm,linux/arm64
|
||||
tags: |
|
||||
${{ env.GHCR_IMAGE_REPOSITORY }}/docs:merge-${{ github.event.number }}
|
||||
labels: |
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
StylesPath = styles
|
||||
MinAlertLevel = warning
|
||||
|
||||
Packages = https://github.com/stakater/vale-package/releases/download/v0.0.40/Stakater.zip
|
||||
Packages = https://github.com/stakater/vale-package/releases/download/v0.0.52/Stakater.zip
|
||||
Vocab = Stakater
|
||||
|
||||
# Only check MarkDown files
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
FROM python:3.12 as builder
|
||||
FROM python:3.13-alpine as builder
|
||||
|
||||
# set workdir
|
||||
RUN mkdir -p $HOME/application
|
||||
@@ -10,7 +10,7 @@ COPY --chown=1001:root . .
|
||||
RUN pip3 install -r theme_common/requirements.txt
|
||||
|
||||
# Combine Theme Resources
|
||||
RUN python theme_common/scripts/combine_theme_resources.py theme_common/resources theme_override/resources dist/_theme
|
||||
RUN python theme_common/scripts/combine_theme_resources.py -s theme_common/resources -ov theme_override/resources -o dist/_theme
|
||||
# Produce mkdocs file
|
||||
RUN python theme_common/scripts/combine_mkdocs_config_yaml.py theme_common/mkdocs.yml theme_override/mkdocs.yml mkdocs.yml
|
||||
|
||||
|
||||
2
Makefile
2
Makefile
@@ -155,7 +155,7 @@ k8s-manifests: $(KUSTOMIZE) ## Generate k8s manifests using Kustomize from 'mani
|
||||
|
||||
.PHONY: update-manifests-version
|
||||
update-manifests-version: ## Generate k8s manifests using Kustomize from 'manifests' folder
|
||||
sed -i 's/image: "ghcr.io\/stakater\/reloader:latest"/image: \"ghcr.io\/stakater\/reloader:v$(VERSION)"/g' deployments/kubernetes/manifests/deployment.yaml
|
||||
sed -i 's/image:.*/image: \"ghcr.io\/stakater\/reloader:v$(VERSION)"/g' deployments/kubernetes/manifests/deployment.yaml
|
||||
|
||||
# Bump Chart
|
||||
bump-chart:
|
||||
|
||||
89
README.md
89
README.md
@@ -50,13 +50,13 @@ spec:
|
||||
metadata:
|
||||
```
|
||||
|
||||
This will discover deploymentconfigs/deployments/daemonsets/statefulset/rollouts automatically where `foo-configmap` or `foo-secret` is being used either via environment variable or from volume mount. And it will perform rolling upgrade on related pods when `foo-configmap` or `foo-secret`are updated.
|
||||
This will discover deploymentconfigs/deployments/daemonsets/statefulset/rollouts/cronjobs/jobs automatically where `foo-configmap` or `foo-secret` is being used either via environment variable or from volume mount. And it will perform rolling upgrade on related pods when `foo-configmap` or `foo-secret`are updated.
|
||||
|
||||
You can filter it by the type of monitored resource and use typed versions of `auto` annotation. If you want to discover changes only in mounted `Secret`s and ignore changes in `ConfigMap`s, add `secret.reloader.stakater.com/auto` annotation instead. Analogously, you can use `configmap.reloader.stakater.com/auto` annotation to look for changes in mounted `ConfigMap`, changes in any of mounted `Secret`s will not trigger a rolling upgrade on related pods.
|
||||
|
||||
You can also restrict this discovery to only `ConfigMap` or `Secret` objects that
|
||||
are tagged with a special annotation. To take advantage of that, annotate
|
||||
your deploymentconfigs/deployments/daemonsets/statefulset/rollouts like this:
|
||||
your deploymentconfigs/deployments/daemonsets/statefulset/rollouts/cronjobs/jobs like this:
|
||||
|
||||
```yaml
|
||||
kind: Deployment
|
||||
@@ -91,11 +91,11 @@ not.
|
||||
|
||||
Similarly, `reloader.stakater.com/auto` and its typed version (`secret.reloader.stakater.com/auto` or `configmap.reloader.stakater.com/auto`) do not work together. If you have both annotations in your deployment, then only one of them needs to be true to trigger the restart. For example, having both `reloader.stakater.com/auto: "true"` and `secret.reloader.stakater.com/auto: "false"` or both `reloader.stakater.com/auto: "false"` and `secret.reloader.stakater.com/auto: "true"` will restart upon a change in a secret it uses.
|
||||
|
||||
We can also specify a specific configmap or secret which would trigger rolling upgrade only upon change in our specified configmap or secret, this way, it will not trigger rolling upgrade upon changes in all configmaps or secrets used in a `deploymentconfig`, `deployment`, `daemonset`, `statefulset` or `rollout`.
|
||||
We can also specify a specific configmap or secret which would trigger rolling upgrade only upon change in our specified configmap or secret, this way, it will not trigger rolling upgrade upon changes in all configmaps or secrets used in a `deploymentconfig`, `deployment`, `daemonset`, `statefulset`, `rollout`, `cronJob` or `job`.
|
||||
To do this either set the auto annotation to `"false"` (`reloader.stakater.com/auto: "false"`) or remove it altogether, and use annotations for [Configmap](.#Configmap) or [Secret](.#Secret).
|
||||
|
||||
It's also possible to enable auto reloading for all resources, by setting the `--auto-reload-all` flag.
|
||||
In this case, all resources that do not have the auto annotation (or its typed version) set to `"false"`, will be reloaded automatically when their ConfigMaps or Secrets are updated.
|
||||
In this case, all resources that do not have the auto annotation (or its typed version) set to `"false"`, will be reloaded automatically when their Configmaps or Secrets are updated.
|
||||
Notice that setting the auto annotation to an undefined value counts as false as-well.
|
||||
|
||||
### Configmap
|
||||
@@ -158,7 +158,7 @@ spec:
|
||||
|
||||
- Reloader also supports [sealed-secrets](https://github.com/bitnami-labs/sealed-secrets). [Here](docs/Reloader-with-Sealed-Secrets.md) are the steps to use sealed-secrets with Reloader.
|
||||
- For [`rollouts`](https://github.com/argoproj/argo-rollouts/) Reloader simply triggers a change is up to you how you configure the `rollout` strategy.
|
||||
- `reloader.stakater.com/auto: "true"` will only reload the pod, if the configmap or secret is used (as a volume mount or as an env) in `DeploymentConfigs/Deployment/Daemonsets/Statefulsets`
|
||||
- `reloader.stakater.com/auto: "true"` will only reload the pod, if the configmap or secret is used (as a volume mount or as an env) in `DeploymentConfigs/Deployment/Daemonsets/Statefulsets/CronJobs/Jobs`
|
||||
- `secret.reloader.stakater.com/reload` or `configmap.reloader.stakater.com/reload` annotation will reload the pod upon changes in specified configmap or secret, irrespective of the usage of configmap or secret.
|
||||
- you may override the auto annotation with the `--auto-annotation` flag
|
||||
- you may override the secret typed auto annotation with the `--secret-auto-annotation` flag
|
||||
@@ -173,6 +173,7 @@ spec:
|
||||
- you may want to prevent watching certain resources with the `--resources-to-ignore` flag
|
||||
- you can configure logging in JSON format with the `--log-format=json` option
|
||||
- you can configure the "reload strategy" with the `--reload-strategy=<strategy-name>` option (details below)
|
||||
- you can configure rollout reload strategy with `reloader.stakater.com/rollout-strategy` annotation, `restart` or `rollout` values are available (defaults to `rollout`)
|
||||
|
||||
## Reload Strategies
|
||||
|
||||
@@ -209,8 +210,8 @@ Reloader can be configured to ignore the resources `secrets` and `configmaps` by
|
||||
|
||||
| Argument | Description |
|
||||
|----------------------------------|----------------------|
|
||||
| --resources-to-ignore=configMaps | To ignore configMaps |
|
||||
| --resources-to-ignore=secrets | To ignore secrets |
|
||||
| `--resources-to-ignore=configMaps` | To ignore configmaps |
|
||||
| `--resources-to-ignore=secrets` | To ignore secrets |
|
||||
|
||||
**Note:** At one time only one of these resource can be ignored, trying to do it will cause error in Reloader. Workaround for ignoring both resources is by scaling down the Reloader pods to `0`.
|
||||
|
||||
@@ -328,7 +329,7 @@ helm uninstall {{RELEASE_NAME}} -n {{NAMESPACE}}
|
||||
| `reloader.isArgoRollouts` | Enable Argo `Rollouts`. Valid value are either `true` or `false` | boolean | `false` |
|
||||
| `reloader.isOpenshift` | Enable OpenShift DeploymentConfigs. Valid value are either `true` or `false` | boolean | `false` |
|
||||
| `reloader.ignoreSecrets` | To ignore secrets. Valid value are either `true` or `false`. Either `ignoreSecrets` or `ignoreConfigMaps` can be ignored, not both at the same time | boolean | `false` |
|
||||
| `reloader.ignoreConfigMaps` | To ignore configMaps. Valid value are either `true` or `false` | boolean | `false` |
|
||||
| `reloader.ignoreConfigMaps` | To ignore configmaps. Valid value are either `true` or `false` | boolean | `false` |
|
||||
| `reloader.reloadOnCreate` | Enable reload on create events. Valid value are either `true` or `false` | boolean | `false` |
|
||||
| `reloader.reloadOnDelete` | Enable reload on delete events. Valid value are either `true` or `false` | boolean | `false` |
|
||||
| `reloader.syncAfterRestart` | Enable sync after Reloader restarts for **Add** events, works only when reloadOnCreate is `true`. Valid value are either `true` or `false` | boolean | `false` |
|
||||
@@ -346,25 +347,25 @@ helm uninstall {{RELEASE_NAME}} -n {{NAMESPACE}}
|
||||
|
||||
#### Deployment Reloader Parameters
|
||||
|
||||
| Parameter | Description | Type | Default |
|
||||
|-------------------------------------------------|-----------------------------------------------------------------------------------------|--------|-------------------|
|
||||
| `reloader.deployment.replicas` | Number of replicas, if you wish to run multiple replicas set `reloader.enableHA = true` | int | 1 |
|
||||
| `reloader.deployment.revisionHistoryLimit` | Limit the number of revisions retained in the revision history | int | 2 |
|
||||
| `reloader.deployment.nodeSelector` | Scheduling pod to a specific node based on set labels | map | `{}` |
|
||||
| `reloader.deployment.affinity` | Set affinity rules on pod | map | `{}` |
|
||||
| `reloader.deployment.securityContext` | Set pod security context | map | `{}` |
|
||||
| `reloader.deployment.containerSecurityContext` | Set container security context | map | `{}` |
|
||||
| `reloader.deployment.tolerations` | A list of `tolerations` to be applied to the deployment | array | `[]` |
|
||||
| `reloader.deployment.topologySpreadConstraints` | Topology spread constraints for pod assignment | array | `[]` |
|
||||
| `reloader.deployment.annotations` | Set deployment annotations | map | `{}` |
|
||||
| `reloader.deployment.labels` | Set deployment labels, default to stakater settings | array | `see values.yaml` |
|
||||
| `reloader.deployment.image` | Set container image name, tag and policy | array | `see values.yaml` |
|
||||
| `reloader.deployment.env` | Support for extra environment variables | array | `[]` |
|
||||
| `reloader.deployment.livenessProbe` | Set liveness probe timeout values | map | `{}` |
|
||||
| `reloader.deployment.readinessProbe` | Set readiness probe timeout values | map | `{}` |
|
||||
| `reloader.deployment.resources` | Set container requests and limits (e.g. CPU or memory) | map | `{}` |
|
||||
| `reloader.deployment.pod.annotations` | Set annotations for pod | map | `{}` |
|
||||
| `reloader.deployment.priorityClassName` | Set priority class for pod in cluster | string | `""` |
|
||||
| Parameter | Description | Type | Default |
|
||||
|-------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------|--------|-------------------|
|
||||
| `reloader.deployment.replicas` | Number of replicas, if you wish to run multiple replicas set `reloader.enableHA = true`. The replicas will be limited to 1 when `reloader.enableHA = false` | int | 1 |
|
||||
| `reloader.deployment.revisionHistoryLimit` | Limit the number of revisions retained in the revision history | int | 2 |
|
||||
| `reloader.deployment.nodeSelector` | Scheduling pod to a specific node based on set labels | map | `{}` |
|
||||
| `reloader.deployment.affinity` | Set affinity rules on pod | map | `{}` |
|
||||
| `reloader.deployment.securityContext` | Set pod security context | map | `{}` |
|
||||
| `reloader.deployment.containerSecurityContext` | Set container security context | map | `{}` |
|
||||
| `reloader.deployment.tolerations` | A list of `tolerations` to be applied to the deployment | array | `[]` |
|
||||
| `reloader.deployment.topologySpreadConstraints` | Topology spread constraints for pod assignment | array | `[]` |
|
||||
| `reloader.deployment.annotations` | Set deployment annotations | map | `{}` |
|
||||
| `reloader.deployment.labels` | Set deployment labels, default to stakater settings | array | `see values.yaml` |
|
||||
| `reloader.deployment.image` | Set container image name, tag and policy | array | `see values.yaml` |
|
||||
| `reloader.deployment.env` | Support for extra environment variables | array | `[]` |
|
||||
| `reloader.deployment.livenessProbe` | Set liveness probe timeout values | map | `{}` |
|
||||
| `reloader.deployment.readinessProbe` | Set readiness probe timeout values | map | `{}` |
|
||||
| `reloader.deployment.resources` | Set container requests and limits (e.g. CPU or memory) | map | `{}` |
|
||||
| `reloader.deployment.pod.annotations` | Set annotations for pod | map | `{}` |
|
||||
| `reloader.deployment.priorityClassName` | Set priority class for pod in cluster | string | `""` |
|
||||
|
||||
#### Other Reloader Parameters
|
||||
|
||||
@@ -384,7 +385,7 @@ helm uninstall {{RELEASE_NAME}} -n {{NAMESPACE}}
|
||||
|
||||
#### Additional Remarks
|
||||
|
||||
- Both `namespaceSelector` & `resourceLabelSelector` can be used together. If they are then both conditions must be met for the configmap or secret to be eligible to trigger reload events. (e.g. If a configMap matches `resourceLabelSelector` but `namespaceSelector` does not match the namespace the configmap is in, it will be ignored).
|
||||
- Both `namespaceSelector` & `resourceLabelSelector` can be used together. If they are then both conditions must be met for the configmap or secret to be eligible to trigger reload events. (e.g. If a configmap matches `resourceLabelSelector` but `namespaceSelector` does not match the namespace the configmap is in, it will be ignored).
|
||||
- At one time only one of the resources `ignoreConfigMaps` or `ignoreSecrets` can be ignored, trying to do both will cause error in helm template compilation
|
||||
- Reloading of OpenShift (DeploymentConfig) and/or Argo `Rollouts` has to be enabled explicitly because it might not be always possible to use it on a cluster with restricted permissions
|
||||
- `isOpenShift` Recent versions of OpenShift (tested on 4.13.3) require the specified user to be in an `uid` range which is dynamically assigned by the namespace. The solution is to unset the runAsUser variable via ``deployment.securityContext.runAsUser=null`` and let OpenShift assign it at install
|
||||
@@ -406,7 +407,7 @@ helm uninstall {{RELEASE_NAME}} -n {{NAMESPACE}}
|
||||
|
||||
### Documentation
|
||||
|
||||
You can find more documentation [here](docs)
|
||||
The Reloader documentation can be viewed from [the doc site](https://docs.stakater.com/reloader/). The doc source is in the [docs](./docs/) folder.
|
||||
|
||||
### Have a question?
|
||||
|
||||
@@ -414,7 +415,7 @@ File a GitHub [issue](https://github.com/stakater/Reloader/issues).
|
||||
|
||||
### Talk to us on Slack
|
||||
|
||||
Join and talk to us on Slack for discussing Reloader
|
||||
Join and talk to us on Slack for discussing Reloader:
|
||||
|
||||
[](https://slack.stakater.com/)
|
||||
[](https://stakater-community.slack.com/messages/CC5S05S12)
|
||||
@@ -427,12 +428,12 @@ Please use the [issue tracker](https://github.com/stakater/Reloader/issues) to r
|
||||
|
||||
### Developing
|
||||
|
||||
1. Deploy Reloader.
|
||||
1. Run `okteto up` to activate your development container.
|
||||
1. Deploy Reloader
|
||||
1. Run `okteto up` to activate your development container
|
||||
1. `make build`
|
||||
1. `./Reloader`
|
||||
|
||||
PRs are welcome. In general, we follow the "fork-and-pull" Git workflow.
|
||||
PRs are welcome. In general, we follow the "fork-and-pull" Git workflow:
|
||||
|
||||
1. **Fork** the repo on GitHub
|
||||
1. **Clone** the project to your own machine
|
||||
@@ -444,15 +445,29 @@ PRs are welcome. In general, we follow the "fork-and-pull" Git workflow.
|
||||
|
||||
## Release Processes
|
||||
|
||||
_Repository GitHub releases_: As requested by the community in [issue 685](https://github.com/stakater/Reloader/issues/685), Reloader is now based on a manual release process. Releases are no longer done on every merged PR to the main branch, but manually on request. When a GitHub release is made, the corresponding image is built and pushed to the registry.
|
||||
_Repository GitHub releases_: As requested by the community in [issue 685](https://github.com/stakater/Reloader/issues/685), Reloader is now based on a manual release process. Releases are no longer done on every merged PR to the main branch, but manually on request.
|
||||
|
||||
_Repository git tagging_: The Reloader repository is tagged on every push to main. The creation of a tag does not trigger anything else, it just acts as a pointer to a commit on main.
|
||||
To make a GitHub release:
|
||||
|
||||
_Helm chart versioning_: The Reloader Helm chart release process is still [work in progress](https://github.com/stakater/Reloader/issues/684). This page will be updated when the process is settled. As a heads-up, to address the issues that are inherent in the current process the chart will most probably be relocated to the [Stakater charts repository](https://github.com/stakater/charts/). This setup is common in open-source repositories. When a GitHub release has been manually created in this repository, an image will be built, and Renovate in the charts repository will update the Helm chart to use it.
|
||||
1. Code owners create a release branch `release-vX.Y.Z`
|
||||
1. Code owners run a dispatch mode workflow to automatically generate version and manifests on the release branch
|
||||
1. A PR is created to bump the image version on the release branch, example: [PR-798](https://github.com/stakater/Reloader/pull/798)
|
||||
1. Code owners create a GitHub release with tag `vX.Y.Z` and target branch `release-vX.Y.Z`, which triggers creation of images
|
||||
|
||||
_Repository git tagging_: Push to the main branch will create a merge-image and merge-tag named `merge-${{ github.event.number }}`, for example `merge-800` when pull request number 800 is merged.
|
||||
|
||||
_Helm chart versioning_: The Reloader Helm chart is maintained in [this repository](./deployments/kubernetes/chart/reloader). The Helm chart has its own semantic versioning. Helm charts and code releases are separate artifacts and separately versioned. Manifest making strategy relies on Kustomize. The Reloader Helm chart manages the two artifacts with these two fields:
|
||||
|
||||
- [`appVersion`](./deployments/kubernetes/chart/reloader/Chart.yaml) points to released Reloader application image version listed on the [releases page](https://github.com/stakater/Reloader/releases)
|
||||
- [`version`](./deployments/kubernetes/chart/reloader/Chart.yaml) sets the Reloader Helm chart version
|
||||
|
||||
Helm chart will be released to the chart registry whenever files in `deployments/kubernetes/chart/reloader/**` change on the main branch.
|
||||
|
||||
Helm Chart will be released by the maintainers, on labelling a PR with `release/helm-chart` and pre-maturely updating the `version` field in `Chart.yaml` file.
|
||||
|
||||
## Changelog
|
||||
|
||||
View our closed [Pull Requests](https://github.com/stakater/Reloader/pulls?q=is%3Apr+is%3Aclosed).
|
||||
View the [releases page](https://github.com/stakater/Reloader/releases) to see what has changed in each release.
|
||||
|
||||
## License
|
||||
|
||||
|
||||
@@ -3,8 +3,8 @@
|
||||
apiVersion: v1
|
||||
name: reloader
|
||||
description: Reloader chart that runs on kubernetes
|
||||
version: 1.1.1
|
||||
appVersion: v1.1.0
|
||||
version: 1.2.2
|
||||
appVersion: v1.2.1
|
||||
keywords:
|
||||
- Reloader
|
||||
- kubernetes
|
||||
|
||||
@@ -76,16 +76,6 @@ rules:
|
||||
- get
|
||||
- update
|
||||
- patch
|
||||
- apiGroups:
|
||||
- "extensions"
|
||||
resources:
|
||||
- deployments
|
||||
- daemonsets
|
||||
verbs:
|
||||
- list
|
||||
- get
|
||||
- update
|
||||
- patch
|
||||
- apiGroups:
|
||||
- "batch"
|
||||
resources:
|
||||
@@ -99,6 +89,9 @@ rules:
|
||||
- jobs
|
||||
verbs:
|
||||
- create
|
||||
- delete
|
||||
- list
|
||||
- get
|
||||
{{- if .Values.reloader.enableHA }}
|
||||
- apiGroups:
|
||||
- "coordination.k8s.io"
|
||||
|
||||
@@ -18,7 +18,7 @@ metadata:
|
||||
namespace: {{ .Values.namespace | default .Release.Namespace }}
|
||||
spec:
|
||||
{{- if not (.Values.reloader.enableHA) }}
|
||||
replicas: 1
|
||||
replicas: {{ min .Values.reloader.deployment.replicas 1 }}
|
||||
{{- else }}
|
||||
replicas: {{ .Values.reloader.deployment.replicas }}
|
||||
{{- end}}
|
||||
@@ -45,7 +45,7 @@ spec:
|
||||
{{ toYaml .Values.reloader.matchLabels | indent 8 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
{{- with .Values.reloader.deployment.imagePullSecrets }}
|
||||
{{- with .Values.global.imagePullSecrets }}
|
||||
imagePullSecrets:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
@@ -73,11 +73,15 @@ spec:
|
||||
priorityClassName: {{ .Values.reloader.deployment.priorityClassName }}
|
||||
{{- end }}
|
||||
containers:
|
||||
{{- if $.Values.global.imageRegistry }}
|
||||
- image: "{{ $.Values.global.imageRegistry }}/{{ .Values.reloader.deployment.image.base }}:{{ .Values.reloader.deployment.image.tag }}"
|
||||
{{- else }}
|
||||
{{- if .Values.global.imageRegistry }}
|
||||
- image: "{{ .Values.global.imageRegistry }}/{{ .Values.reloader.deployment.image.base }}:{{ .Values.reloader.deployment.image.tag }}"
|
||||
{{- else }}
|
||||
{{- if .Values.reloader.deployment.image.digest }}
|
||||
- image: "{{ .Values.reloader.deployment.image.name }}@{{ .Values.reloader.deployment.image.digest }}"
|
||||
{{- else }}
|
||||
- image: "{{ .Values.reloader.deployment.image.name }}:{{ .Values.reloader.deployment.image.tag }}"
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
imagePullPolicy: {{ .Values.reloader.deployment.image.pullPolicy }}
|
||||
name: {{ template "reloader-fullname" . }}
|
||||
env:
|
||||
|
||||
@@ -5,7 +5,12 @@ metadata:
|
||||
name: {{ template "reloader-fullname" . }}
|
||||
namespace: {{ .Values.namespace | default .Release.Namespace }}
|
||||
spec:
|
||||
{{- if .Values.reloader.podDisruptionBudget.maxUnavailable }}
|
||||
maxUnavailable: {{ .Values.reloader.podDisruptionBudget.maxUnavailable }}
|
||||
{{- end }}
|
||||
{{- if and .Values.reloader.podDisruptionBudget.minAvailable (not .Values.reloader.podDisruptionBudget.maxUnavailable)}}
|
||||
minAvailable: {{ .Values.reloader.podDisruptionBudget.minAvailable }}
|
||||
{{- end }}
|
||||
selector:
|
||||
matchLabels:
|
||||
app: {{ template "reloader-fullname" . }}
|
||||
|
||||
@@ -67,16 +67,6 @@ rules:
|
||||
- get
|
||||
- update
|
||||
- patch
|
||||
- apiGroups:
|
||||
- "extensions"
|
||||
resources:
|
||||
- deployments
|
||||
- daemonsets
|
||||
verbs:
|
||||
- list
|
||||
- get
|
||||
- update
|
||||
- patch
|
||||
- apiGroups:
|
||||
- "batch"
|
||||
resources:
|
||||
@@ -90,6 +80,9 @@ rules:
|
||||
- jobs
|
||||
verbs:
|
||||
- create
|
||||
- delete
|
||||
- list
|
||||
- get
|
||||
{{- if .Values.reloader.enableHA }}
|
||||
- apiGroups:
|
||||
- "coordination.k8s.io"
|
||||
|
||||
@@ -5,6 +5,8 @@ global:
|
||||
##
|
||||
imageRegistry: ""
|
||||
imagePullSecrets: []
|
||||
#imagePullSecrets:
|
||||
# - name: my-pull-secret
|
||||
|
||||
kubernetes:
|
||||
host: https://kubernetes.default
|
||||
@@ -94,11 +96,12 @@ reloader:
|
||||
labels:
|
||||
provider: stakater
|
||||
group: com.stakater.platform
|
||||
version: v1.1.0
|
||||
version: v1.2.1
|
||||
image:
|
||||
name: ghcr.io/stakater/reloader
|
||||
base: stakater/reloader
|
||||
tag: v1.1.0
|
||||
tag: v1.2.1
|
||||
# digest: sha256:1234567
|
||||
pullPolicy: IfNotPresent
|
||||
# Support for extra environment variables.
|
||||
env:
|
||||
@@ -286,6 +289,9 @@ reloader:
|
||||
enabled: false
|
||||
# Set the minimum available replicas
|
||||
# minAvailable: 1
|
||||
# OR Set the maximum unavailable replicas
|
||||
# maxUnavailable: 1
|
||||
# If both defined only maxUnavailable will be used
|
||||
|
||||
netpol:
|
||||
enabled: false
|
||||
|
||||
@@ -48,6 +48,9 @@ rules:
|
||||
- jobs
|
||||
verbs:
|
||||
- create
|
||||
- delete
|
||||
- list
|
||||
- get
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
|
||||
@@ -52,6 +52,9 @@ rules:
|
||||
- jobs
|
||||
verbs:
|
||||
- create
|
||||
- delete
|
||||
- list
|
||||
- get
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
|
||||
@@ -1,6 +1,17 @@
|
||||
# How Does Reloader Work?
|
||||
|
||||
Reloader watches for `ConfigMap` and `Secret` and detects if there are changes in data of these objects. After change detection Reloader performs rolling upgrade on relevant Pods via associated `Deployment`, `Daemonset` and `Statefulset`.
|
||||
Reloader watches for `ConfigMap` and `Secret` and detects if there are changes in data of these objects. After change detection Reloader performs rolling upgrade on relevant Pods via associated `Deployment`, `Daemonset` and `Statefulset`:
|
||||
|
||||
```mermaid
|
||||
flowchart LR
|
||||
subgraph Reloader
|
||||
controller("Controller watches in a loop") -- "Detects a change" --> upgrade_handler("Upgrade handler checks if the change is a valid data change by comparing the change hash")
|
||||
upgrade_handler -- "Update resource" --> update_resource("Updates the resource with computed hash of change")
|
||||
end
|
||||
Reloader -- "Watches" --> secret_configmaps("Secrets/ConfigMaps")
|
||||
Reloader -- "Updates resources with Reloader environment variable" --> resources("Deployments/DaemonSets/StatefulSets resources with Reloader annotation")
|
||||
resources -- "Restart pods based on StrategyType" --> Pods
|
||||
```
|
||||
|
||||
## How Does Change Detection Work?
|
||||
|
||||
@@ -43,7 +54,7 @@ When Reloader detects changes in configmap. It gets two objects of configmap. Fi
|
||||
|
||||
After that, Reloader gets the list of all `deployments`, `daemonsets` and `statefulset` and looks for above mentioned annotation for configmap. If the annotation value contains the configmap name, it then looks for an environment variable which can contain the configmap or secret data change hash.
|
||||
|
||||
### Environment Variable for ConfigMap
|
||||
### Environment Variable for Configmap
|
||||
|
||||
If configmap name is foo then
|
||||
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
|
||||
# Reloader vs ConfigmapController
|
||||
|
||||
Reloader is inspired from [Configmapcontroller](https://github.com/fabric8io/configmapcontroller) but there are many ways in which it differs from configmapController. Below is the small comparison between these two controllers.
|
||||
Reloader is inspired from [`Configmapcontroller`](https://github.com/fabric8io/configmapcontroller) but there are many ways in which it differs from `configmapController`. Below is the small comparison between these two controllers.
|
||||
|
||||
| Reloader | Configmap |
|
||||
|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||
|
||||
@@ -3,12 +3,12 @@
|
||||
Below are the steps to use Reloader with Sealed Secrets:
|
||||
|
||||
1. Download and install the kubeseal client from [here](https://github.com/bitnami-labs/sealed-secrets)
|
||||
1. Install the controller for sealed secrets
|
||||
1. Install the controller for Sealed Secrets
|
||||
1. Fetch the encryption certificate
|
||||
1. Encrypt the secret
|
||||
1. Apply the secret
|
||||
1. Install the tool which uses that sealed secret
|
||||
1. Install the tool which uses that Sealed Secret
|
||||
1. Install Reloader
|
||||
1. Once everything is setup, update the original secret at client and encrypt it with kubeseal to see Reloader working
|
||||
1. Apply the updated sealed secret
|
||||
1. Apply the updated Sealed Secret
|
||||
1. Reloader will restart the pod to use that updated secret
|
||||
|
||||
34
go.mod
34
go.mod
@@ -10,24 +10,25 @@ require (
|
||||
github.com/prometheus/client_golang v1.20.5
|
||||
github.com/sirupsen/logrus v1.9.3
|
||||
github.com/spf13/cobra v1.8.1
|
||||
k8s.io/api v0.29.3
|
||||
k8s.io/apimachinery v0.29.3
|
||||
k8s.io/client-go v0.29.3
|
||||
k8s.io/kubectl v0.29.3
|
||||
k8s.io/utils v0.0.0-20240423183400-0849a56e8f22
|
||||
github.com/stretchr/testify v1.9.0
|
||||
k8s.io/api v0.31.1
|
||||
k8s.io/apimachinery v0.31.1
|
||||
k8s.io/client-go v0.31.1
|
||||
k8s.io/kubectl v0.31.1
|
||||
k8s.io/utils v0.0.0-20240711033017-18e509b52bc8
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.3.0 // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
|
||||
github.com/elazarl/goproxy v0.0.0-20240726154733-8b0c20506380 // indirect
|
||||
github.com/emicklei/go-restful/v3 v3.11.0 // indirect
|
||||
github.com/evanphx/json-patch v5.6.0+incompatible // indirect
|
||||
github.com/go-logr/logr v1.3.0 // indirect
|
||||
github.com/fxamacker/cbor/v2 v2.7.0 // indirect
|
||||
github.com/go-logr/logr v1.4.2 // indirect
|
||||
github.com/go-openapi/jsonpointer v0.19.6 // indirect
|
||||
github.com/go-openapi/jsonreference v0.20.2 // indirect
|
||||
github.com/go-openapi/swag v0.22.3 // indirect
|
||||
github.com/go-openapi/swag v0.22.4 // indirect
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
|
||||
github.com/golang/protobuf v1.5.4 // indirect
|
||||
@@ -47,23 +48,26 @@ require (
|
||||
github.com/moul/http2curl v1.0.0 // indirect
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
|
||||
github.com/pkg/errors v0.9.1 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
|
||||
github.com/prometheus/client_model v0.6.1 // indirect
|
||||
github.com/prometheus/common v0.55.0 // indirect
|
||||
github.com/prometheus/procfs v0.15.1 // indirect
|
||||
github.com/smartystreets/goconvey v1.7.2 // indirect
|
||||
github.com/spf13/pflag v1.0.5 // indirect
|
||||
golang.org/x/net v0.26.0 // indirect
|
||||
github.com/x448/float16 v0.8.4 // indirect
|
||||
golang.org/x/net v0.33.0 // indirect
|
||||
golang.org/x/oauth2 v0.21.0 // indirect
|
||||
golang.org/x/sys v0.22.0 // indirect
|
||||
golang.org/x/term v0.21.0 // indirect
|
||||
golang.org/x/text v0.16.0 // indirect
|
||||
golang.org/x/sys v0.28.0 // indirect
|
||||
golang.org/x/term v0.27.0 // indirect
|
||||
golang.org/x/text v0.21.0 // indirect
|
||||
golang.org/x/time v0.3.0 // indirect
|
||||
google.golang.org/protobuf v1.34.2 // indirect
|
||||
gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect
|
||||
gopkg.in/inf.v0 v0.9.1 // indirect
|
||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
k8s.io/klog/v2 v2.110.1 // indirect
|
||||
k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 // indirect
|
||||
k8s.io/klog/v2 v2.130.1 // indirect
|
||||
k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 // indirect
|
||||
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect
|
||||
sigs.k8s.io/yaml v1.4.0 // indirect
|
||||
|
||||
81
go.sum
81
go.sum
@@ -21,8 +21,9 @@ github.com/dave/jennifer v1.2.0/go.mod h1:fIb+770HOpJ2fmN9EPPKOqm1vMGhB+TwXKMZhr
|
||||
github.com/dave/kerr v0.0.0-20170318121727-bc25dd6abe8e/go.mod h1:qZqlPyPvfsDJt+3wHJ1EvSXDuVjFTK0j2p/ca+gtsb8=
|
||||
github.com/dave/rebecca v0.9.1/go.mod h1:N6XYdMD/OKw3lkF3ywh8Z6wPGuwNFDNtWYEMFWEmXBA=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE=
|
||||
github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc=
|
||||
github.com/elazarl/goproxy v0.0.0-20240726154733-8b0c20506380 h1:1NyRx2f4W4WBRyg0Kys0ZbaNmDDzZ2R/C7DTi+bbsJ0=
|
||||
@@ -34,17 +35,17 @@ github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRr
|
||||
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
|
||||
github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
|
||||
github.com/evanphx/json-patch v5.6.0+incompatible h1:jBYDEEiFBPxA0v50tFdvOzQQTCvpL6mnFh5mB2/l16U=
|
||||
github.com/evanphx/json-patch v5.6.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
|
||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
|
||||
github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E=
|
||||
github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ=
|
||||
github.com/getkin/kin-openapi v0.76.0/go.mod h1:660oXbgy5JFMKreazJaQTw7o+X00qeSyhcnluiMv+Xg=
|
||||
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
||||
github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas=
|
||||
github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU=
|
||||
github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
|
||||
github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY=
|
||||
github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
|
||||
github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY=
|
||||
github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
|
||||
github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
|
||||
github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
|
||||
github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE=
|
||||
@@ -55,10 +56,11 @@ github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2Kv
|
||||
github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k=
|
||||
github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
|
||||
github.com/go-openapi/swag v0.19.14/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ=
|
||||
github.com/go-openapi/swag v0.22.3 h1:yMBqmnQ0gyZvEb/+KzuWZOXgllrXT4SADYbvDaXHv/g=
|
||||
github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14=
|
||||
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI=
|
||||
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls=
|
||||
github.com/go-openapi/swag v0.22.4 h1:QLMzNJnMGPRNDCbySlcj1x01tzU8/9LTTL9hZZZogBU=
|
||||
github.com/go-openapi/swag v0.22.4/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14=
|
||||
github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI=
|
||||
github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8=
|
||||
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
|
||||
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||
@@ -94,8 +96,8 @@ github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/
|
||||
github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0=
|
||||
github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/pprof v0.0.0-20181127221834-b4f47329b966/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
|
||||
github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 h1:K6RDEckDVWvDI9JAJYCmNdQXq6neHJOYx3V6jnqNEec=
|
||||
github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
||||
github.com/google/pprof v0.0.0-20240525223248-4bfdf5a9a2af h1:kmjWCqn2qkEml422C2Rrd27c3VGxi6a/6HNq8QmHRKM=
|
||||
github.com/google/pprof v0.0.0-20240525223248-4bfdf5a9a2af/go.mod h1:K1liHPHnj73Fdn/EKuT8nrFqBihUSKXoLYU0BuatOYo=
|
||||
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
||||
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
@@ -157,13 +159,13 @@ github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+W
|
||||
github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
|
||||
github.com/onsi/ginkgo v1.14.0 h1:2mOpI4JVVPBN+WQRa0WKH2eXR+Ey+uK4n7Zj0aYpIQA=
|
||||
github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY=
|
||||
github.com/onsi/ginkgo/v2 v2.13.0 h1:0jY9lJquiL8fcf3M4LAXN5aMlS/b2BV86HFFPCPMgE4=
|
||||
github.com/onsi/ginkgo/v2 v2.13.0/go.mod h1:TE309ZR8s5FsKKpuB1YAQYBzCaAfUgatB/xlT/ETL/o=
|
||||
github.com/onsi/ginkgo/v2 v2.19.0 h1:9Cnnf7UHo57Hy3k6/m5k3dRfGTMXGvxhHFvkDTCTpvA=
|
||||
github.com/onsi/ginkgo/v2 v2.19.0/go.mod h1:rlwLi9PilAFJ8jCg9UE1QP6VBpd6/xj3SRC0d6TU0To=
|
||||
github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
|
||||
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
|
||||
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
|
||||
github.com/onsi/gomega v1.29.0 h1:KIA/t2t5UBzoirT4H9tsML45GEbo3ouUnBHsCfD2tVg=
|
||||
github.com/onsi/gomega v1.29.0/go.mod h1:9sxs+SwGrKI0+PWe4Fxa9tFQQBG5xSsSbMXOI8PPpoQ=
|
||||
github.com/onsi/gomega v1.33.1 h1:dsYjIxxSR755MDmKVsaFQTE22ChNBcuuTWgkUDSubOk=
|
||||
github.com/onsi/gomega v1.33.1/go.mod h1:U4R44UsT+9eLIaYRB2a5qajjtQYn0hauxvRm16AVYg0=
|
||||
github.com/openshift/api v0.0.0-20240131175612-92fe66c75e8f h1:v/UGegormU7y/1hMpt52McJtlBrsLgXpySOesXWFQVg=
|
||||
github.com/openshift/api v0.0.0-20240131175612-92fe66c75e8f/go.mod h1:LEnw1IVscIxyDnltE3Wi7bQb/QzIM8BfPNKoGA1Qlxw=
|
||||
github.com/openshift/build-machinery-go v0.0.0-20211213093930-7e33a7eb4ce3/go.mod h1:b1BuldmJlbA/xYtdZvKi+7j5YGB44qJUJDZ9zwiNCfE=
|
||||
@@ -173,10 +175,9 @@ github.com/parnurzeal/gorequest v0.3.0 h1:SoFyqCDC9COr1xuS6VA8fC8RU7XyrJZN2ona1k
|
||||
github.com/parnurzeal/gorequest v0.3.0/go.mod h1:3Kh2QUMJoqw3icWAecsyzkpY7UzRfDhbRdTjtNwNiUE=
|
||||
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/prometheus/client_golang v1.20.4 h1:Tgh3Yr67PaOv/uTqloMsCEdeuFTatm5zIq5+qNN23vI=
|
||||
github.com/prometheus/client_golang v1.20.4/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE=
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/prometheus/client_golang v1.20.5 h1:cxppBPuYhUnsO6yo/aoRol4L7q7UFfdm+bR9r+8l63Y=
|
||||
github.com/prometheus/client_golang v1.20.5/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE=
|
||||
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
@@ -186,8 +187,8 @@ github.com/prometheus/common v0.55.0 h1:KEi6DK7lXW/m7Ig5i47x0vRzuBsHuvJdi5ee6Y3G
|
||||
github.com/prometheus/common v0.55.0/go.mod h1:2SECS4xJG1kd8XF9IcM1gMX6510RAEL65zxzNImwdc8=
|
||||
github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc=
|
||||
github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk=
|
||||
github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
|
||||
github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog=
|
||||
github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8=
|
||||
github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4=
|
||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo=
|
||||
github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
|
||||
@@ -214,10 +215,14 @@ github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO
|
||||
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
||||
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
|
||||
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||
github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM=
|
||||
github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg=
|
||||
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
|
||||
github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
|
||||
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
|
||||
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
|
||||
golang.org/x/arch v0.0.0-20180920145803-b19384d3c130/go.mod h1:cYlCBUl1MsqxdiKgmc4uh7TxZfWSFLOGSRR090WDxt8=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
@@ -248,6 +253,8 @@ golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qx
|
||||
golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
|
||||
golang.org/x/net v0.26.0 h1:soB7SVo0PWrY4vPW/+ay0jKDNScG2X9wFeYlXIvJsOQ=
|
||||
golang.org/x/net v0.26.0/go.mod h1:5YKkiSynbBIh3p6iOc/vibscux0x38BZDkn8sCUPxHE=
|
||||
golang.org/x/net v0.33.0 h1:74SYHlV8BIgHIFC/LrYkOGIwL19eTYXQ5wc6TBuO36I=
|
||||
golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.21.0 h1:tsimM75w1tF/uws5rbeHzIWxEqElMehnc+iW793zsZs=
|
||||
golang.org/x/oauth2 v0.21.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI=
|
||||
@@ -279,10 +286,14 @@ golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBc
|
||||
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.22.0 h1:RI27ohtqKCnwULzJLqkv897zojh5/DwS/ENaMzUOaWI=
|
||||
golang.org/x/sys v0.22.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA=
|
||||
golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/term v0.21.0 h1:WVXCp+/EBEHOj53Rvu+7KiT/iElMrO8ACK16SMZ3jaA=
|
||||
golang.org/x/term v0.21.0/go.mod h1:ooXLefLobQVslOqselCNF4SxFAaoS6KujMbsGzSDmX0=
|
||||
golang.org/x/term v0.27.0 h1:WP60Sv1nlK1T6SupCHbXzSaN0b9wUmsPoRS9b61A23Q=
|
||||
golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
@@ -290,6 +301,8 @@ golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||
golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4=
|
||||
golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI=
|
||||
golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo=
|
||||
golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ=
|
||||
golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4=
|
||||
golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
@@ -340,6 +353,8 @@ gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8
|
||||
gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
||||
gopkg.in/evanphx/json-patch.v4 v4.12.0 h1:n6jtcsulIzXPJaxegRbvFNNrZDjbij7ny3gmSPG+6V4=
|
||||
gopkg.in/evanphx/json-patch.v4 v4.12.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M=
|
||||
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
|
||||
gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
|
||||
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
|
||||
@@ -361,30 +376,30 @@ gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
k8s.io/api v0.24.0/go.mod h1:5Jl90IUrJHUJYEMANRURMiVvJ0g7Ax7r3R1bqO8zx8I=
|
||||
k8s.io/api v0.29.3 h1:2ORfZ7+bGC3YJqGpV0KSDDEVf8hdGQ6A03/50vj8pmw=
|
||||
k8s.io/api v0.29.3/go.mod h1:y2yg2NTyHUUkIoTC+phinTnEa3KFM6RZ3szxt014a80=
|
||||
k8s.io/api v0.31.1 h1:Xe1hX/fPW3PXYYv8BlozYqw63ytA92snr96zMW9gWTU=
|
||||
k8s.io/api v0.31.1/go.mod h1:sbN1g6eY6XVLeqNsZGLnI5FwVseTrZX7Fv3O26rhAaI=
|
||||
k8s.io/apimachinery v0.24.0/go.mod h1:82Bi4sCzVBdpYjyI4jY6aHX+YCUchUIrZrXKedjd2UM=
|
||||
k8s.io/apimachinery v0.29.3 h1:2tbx+5L7RNvqJjn7RIuIKu9XTsIZ9Z5wX2G22XAa5EU=
|
||||
k8s.io/apimachinery v0.29.3/go.mod h1:hx/S4V2PNW4OMg3WizRrHutyB5la0iCUbZym+W0EQIU=
|
||||
k8s.io/client-go v0.29.3 h1:R/zaZbEAxqComZ9FHeQwOh3Y1ZUs7FaHKZdQtIc2WZg=
|
||||
k8s.io/client-go v0.29.3/go.mod h1:tkDisCvgPfiRpxGnOORfkljmS+UrW+WtXAy2fTvXJB0=
|
||||
k8s.io/apimachinery v0.31.1 h1:mhcUBbj7KUjaVhyXILglcVjuS4nYXiwC+KKFBgIVy7U=
|
||||
k8s.io/apimachinery v0.31.1/go.mod h1:rsPdaZJfTfLsNJSQzNHQvYoTmxhoOEofxtOsF3rtsMo=
|
||||
k8s.io/client-go v0.31.1 h1:f0ugtWSbWpxHR7sjVpQwuvw9a3ZKLXX0u0itkFXufb0=
|
||||
k8s.io/client-go v0.31.1/go.mod h1:sKI8871MJN2OyeqRlmA4W4KM9KBdBUpDLu/43eGemCg=
|
||||
k8s.io/code-generator v0.24.0/go.mod h1:dpVhs00hTuTdTY6jvVxvTFCk6gSMrtfRydbhZwHI15w=
|
||||
k8s.io/gengo v0.0.0-20210813121822-485abfe95c7c/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E=
|
||||
k8s.io/gengo v0.0.0-20211129171323-c02415ce4185/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E=
|
||||
k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE=
|
||||
k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y=
|
||||
k8s.io/klog/v2 v2.60.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=
|
||||
k8s.io/klog/v2 v2.110.1 h1:U/Af64HJf7FcwMcXyKm2RPM22WZzyR7OSpYj5tg3cL0=
|
||||
k8s.io/klog/v2 v2.110.1/go.mod h1:YGtd1984u+GgbuZ7e08/yBuAfKLSO0+uR1Fhi6ExXjo=
|
||||
k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk=
|
||||
k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
|
||||
k8s.io/kube-openapi v0.0.0-20220328201542-3ee0da9b0b42/go.mod h1:Z/45zLw8lUo4wdiUkI+v/ImEGAvu3WatcZl3lPMR4Rk=
|
||||
k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 h1:aVUu9fTY98ivBPKR9Y5w/AuzbMm96cd3YHRTU83I780=
|
||||
k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00/go.mod h1:AsvuZPBlUDVuCdzJ87iajxtXuR9oktsTctW/R9wwouA=
|
||||
k8s.io/kubectl v0.29.3 h1:RuwyyIU42MAISRIePaa8Q7A3U74Q9P4MoJbDFz9o3us=
|
||||
k8s.io/kubectl v0.29.3/go.mod h1:yCxfY1dbwgVdEt2zkJ6d5NNLOhhWgTyrqACIoFhpdd4=
|
||||
k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 h1:BZqlfIlq5YbRMFko6/PM7FjZpUb45WallggurYhKGag=
|
||||
k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340/go.mod h1:yD4MZYeKMBwQKVht279WycxKyM84kkAx2DPrTXaeb98=
|
||||
k8s.io/kubectl v0.31.1 h1:ih4JQJHxsEggFqDJEHSOdJ69ZxZftgeZvYo7M/cpp24=
|
||||
k8s.io/kubectl v0.31.1/go.mod h1:aNuQoR43W6MLAtXQ/Bu4GDmoHlbhHKuyD49lmTC8eJM=
|
||||
k8s.io/utils v0.0.0-20210802155522-efc7438f0176/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
|
||||
k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
|
||||
k8s.io/utils v0.0.0-20240423183400-0849a56e8f22 h1:ao5hUqGhsqdm+bYbjH/pRkCs0unBGe9UyDahzs9zQzQ=
|
||||
k8s.io/utils v0.0.0-20240423183400-0849a56e8f22/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
|
||||
k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 h1:pUdcCO1Lk/tbT5ztQWOBi5HBgbBP1J8+AsQnQCKsi8A=
|
||||
k8s.io/utils v0.0.0-20240711033017-18e509b52bc8/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
|
||||
sigs.k8s.io/json v0.0.0-20211208200746-9f7c6b3444d2/go.mod h1:B+TnT182UBxE84DiCz4CVE26eOSDAeYCpfDnC2kdKMY=
|
||||
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo=
|
||||
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0=
|
||||
|
||||
@@ -2,10 +2,11 @@ package callbacks
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/stakater/Reloader/internal/pkg/options"
|
||||
"github.com/stakater/Reloader/pkg/kube"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
batchv1 "k8s.io/api/batch/v1"
|
||||
@@ -91,6 +92,26 @@ func GetCronJobItems(clients kube.Clients, namespace string) []runtime.Object {
|
||||
return items
|
||||
}
|
||||
|
||||
// GetJobItems returns the jobs in given namespace
|
||||
func GetJobItems(clients kube.Clients, namespace string) []runtime.Object {
|
||||
jobs, err := clients.KubernetesClient.BatchV1().Jobs(namespace).List(context.TODO(), meta_v1.ListOptions{})
|
||||
if err != nil {
|
||||
logrus.Errorf("Failed to list jobs %v", err)
|
||||
}
|
||||
|
||||
items := make([]runtime.Object, len(jobs.Items))
|
||||
// Ensure we always have pod annotations to add to
|
||||
for i, v := range jobs.Items {
|
||||
if v.Spec.Template.ObjectMeta.Annotations == nil {
|
||||
annotations := make(map[string]string)
|
||||
jobs.Items[i].Spec.Template.ObjectMeta.Annotations = annotations
|
||||
}
|
||||
items[i] = &jobs.Items[i]
|
||||
}
|
||||
|
||||
return items
|
||||
}
|
||||
|
||||
// GetDaemonSetItems returns the daemonSets in given namespace
|
||||
func GetDaemonSetItems(clients kube.Clients, namespace string) []runtime.Object {
|
||||
daemonSets, err := clients.KubernetesClient.AppsV1().DaemonSets(namespace).List(context.TODO(), meta_v1.ListOptions{})
|
||||
@@ -177,6 +198,11 @@ func GetCronJobAnnotations(item runtime.Object) map[string]string {
|
||||
return item.(*batchv1.CronJob).ObjectMeta.Annotations
|
||||
}
|
||||
|
||||
// GetJobAnnotations returns the annotations of given job
|
||||
func GetJobAnnotations(item runtime.Object) map[string]string {
|
||||
return item.(*batchv1.Job).ObjectMeta.Annotations
|
||||
}
|
||||
|
||||
// GetDaemonSetAnnotations returns the annotations of given daemonSet
|
||||
func GetDaemonSetAnnotations(item runtime.Object) map[string]string {
|
||||
return item.(*appsv1.DaemonSet).ObjectMeta.Annotations
|
||||
@@ -207,6 +233,11 @@ func GetCronJobPodAnnotations(item runtime.Object) map[string]string {
|
||||
return item.(*batchv1.CronJob).Spec.JobTemplate.Spec.Template.ObjectMeta.Annotations
|
||||
}
|
||||
|
||||
// GetJobPodAnnotations returns the pod's annotations of given job
|
||||
func GetJobPodAnnotations(item runtime.Object) map[string]string {
|
||||
return item.(*batchv1.Job).Spec.Template.ObjectMeta.Annotations
|
||||
}
|
||||
|
||||
// GetDaemonSetPodAnnotations returns the pod's annotations of given daemonSet
|
||||
func GetDaemonSetPodAnnotations(item runtime.Object) map[string]string {
|
||||
return item.(*appsv1.DaemonSet).Spec.Template.ObjectMeta.Annotations
|
||||
@@ -237,6 +268,11 @@ func GetCronJobContainers(item runtime.Object) []v1.Container {
|
||||
return item.(*batchv1.CronJob).Spec.JobTemplate.Spec.Template.Spec.Containers
|
||||
}
|
||||
|
||||
// GetJobContainers returns the containers of given job
|
||||
func GetJobContainers(item runtime.Object) []v1.Container {
|
||||
return item.(*batchv1.Job).Spec.Template.Spec.Containers
|
||||
}
|
||||
|
||||
// GetDaemonSetContainers returns the containers of given daemonSet
|
||||
func GetDaemonSetContainers(item runtime.Object) []v1.Container {
|
||||
return item.(*appsv1.DaemonSet).Spec.Template.Spec.Containers
|
||||
@@ -267,6 +303,11 @@ func GetCronJobInitContainers(item runtime.Object) []v1.Container {
|
||||
return item.(*batchv1.CronJob).Spec.JobTemplate.Spec.Template.Spec.InitContainers
|
||||
}
|
||||
|
||||
// GetJobInitContainers returns the containers of given job
|
||||
func GetJobInitContainers(item runtime.Object) []v1.Container {
|
||||
return item.(*batchv1.Job).Spec.Template.Spec.InitContainers
|
||||
}
|
||||
|
||||
// GetDaemonSetInitContainers returns the containers of given daemonSet
|
||||
func GetDaemonSetInitContainers(item runtime.Object) []v1.Container {
|
||||
return item.(*appsv1.DaemonSet).Spec.Template.Spec.InitContainers
|
||||
@@ -306,6 +347,38 @@ func CreateJobFromCronjob(clients kube.Clients, namespace string, resource runti
|
||||
return err
|
||||
}
|
||||
|
||||
// ReCreateJobFromjob performs rolling upgrade on job
|
||||
func ReCreateJobFromjob(clients kube.Clients, namespace string, resource runtime.Object) error {
|
||||
oldJob := resource.(*batchv1.Job)
|
||||
job := oldJob.DeepCopy()
|
||||
|
||||
// Delete the old job
|
||||
policy := meta_v1.DeletePropagationBackground
|
||||
err := clients.KubernetesClient.BatchV1().Jobs(namespace).Delete(context.TODO(), job.Name, meta_v1.DeleteOptions{PropagationPolicy: &policy})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Remove fields that should not be specified when creating a new Job
|
||||
job.ObjectMeta.ResourceVersion = ""
|
||||
job.ObjectMeta.UID = ""
|
||||
job.ObjectMeta.CreationTimestamp = meta_v1.Time{}
|
||||
job.Status = batchv1.JobStatus{}
|
||||
|
||||
// Remove problematic labels
|
||||
delete(job.Spec.Template.Labels, "controller-uid")
|
||||
delete(job.Spec.Template.Labels, batchv1.ControllerUidLabel)
|
||||
delete(job.Spec.Template.Labels, batchv1.JobNameLabel)
|
||||
delete(job.Spec.Template.Labels, "job-name")
|
||||
|
||||
// Remove the selector to allow it to be auto-generated
|
||||
job.Spec.Selector = nil
|
||||
|
||||
// Create the new job with same spec
|
||||
_, err = clients.KubernetesClient.BatchV1().Jobs(namespace).Create(context.TODO(), job, meta_v1.CreateOptions{FieldManager: "Reloader"})
|
||||
return err
|
||||
}
|
||||
|
||||
// UpdateDaemonSet performs rolling upgrade on daemonSet
|
||||
func UpdateDaemonSet(clients kube.Clients, namespace string, resource runtime.Object) error {
|
||||
daemonSet := resource.(*appsv1.DaemonSet)
|
||||
@@ -329,11 +402,15 @@ func UpdateDeploymentConfig(clients kube.Clients, namespace string, resource run
|
||||
|
||||
// UpdateRollout performs rolling upgrade on rollout
|
||||
func UpdateRollout(clients kube.Clients, namespace string, resource runtime.Object) error {
|
||||
var err error
|
||||
rollout := resource.(*argorolloutv1alpha1.Rollout)
|
||||
rolloutBefore, _ := clients.ArgoRolloutClient.ArgoprojV1alpha1().Rollouts(namespace).Get(context.TODO(), rollout.Name, meta_v1.GetOptions{})
|
||||
logrus.Warnf("Before: %+v", rolloutBefore.Spec.Template.Spec.Containers[0].Env)
|
||||
logrus.Warnf("After: %+v", rollout.Spec.Template.Spec.Containers[0].Env)
|
||||
_, err := clients.ArgoRolloutClient.ArgoprojV1alpha1().Rollouts(namespace).Patch(context.TODO(), rollout.Name, patchtypes.MergePatchType, []byte(fmt.Sprintf(`{"spec": {"restartAt": "%s"}}`, time.Now().Format(time.RFC3339))), meta_v1.PatchOptions{FieldManager: "Reloader"})
|
||||
strategy := rollout.GetAnnotations()[options.RolloutStrategyAnnotation]
|
||||
switch options.ToArgoRolloutStrategy(strategy) {
|
||||
case options.RestartStrategy:
|
||||
_, err = clients.ArgoRolloutClient.ArgoprojV1alpha1().Rollouts(namespace).Patch(context.TODO(), rollout.Name, patchtypes.MergePatchType, []byte(fmt.Sprintf(`{"spec": {"restartAt": "%s"}}`, time.Now().Format(time.RFC3339))), meta_v1.PatchOptions{FieldManager: "Reloader"})
|
||||
case options.RolloutStrategy:
|
||||
_, err = clients.ArgoRolloutClient.ArgoprojV1alpha1().Rollouts(namespace).Update(context.TODO(), rollout, meta_v1.UpdateOptions{FieldManager: "Reloader"})
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -347,6 +424,11 @@ func GetCronJobVolumes(item runtime.Object) []v1.Volume {
|
||||
return item.(*batchv1.CronJob).Spec.JobTemplate.Spec.Template.Spec.Volumes
|
||||
}
|
||||
|
||||
// GetJobVolumes returns the Volumes of given job
|
||||
func GetJobVolumes(item runtime.Object) []v1.Volume {
|
||||
return item.(*batchv1.Job).Spec.Template.Spec.Volumes
|
||||
}
|
||||
|
||||
// GetDaemonSetVolumes returns the Volumes of given daemonSet
|
||||
func GetDaemonSetVolumes(item runtime.Object) []v1.Volume {
|
||||
return item.(*appsv1.DaemonSet).Spec.Template.Spec.Volumes
|
||||
|
||||
524
internal/pkg/callbacks/rolling_upgrade_test.go
Normal file
524
internal/pkg/callbacks/rolling_upgrade_test.go
Normal file
@@ -0,0 +1,524 @@
|
||||
package callbacks_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
batchv1 "k8s.io/api/batch/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
watch "k8s.io/apimachinery/pkg/watch"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
|
||||
argorolloutv1alpha1 "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1"
|
||||
fakeargoclientset "github.com/argoproj/argo-rollouts/pkg/client/clientset/versioned/fake"
|
||||
|
||||
"github.com/stakater/Reloader/internal/pkg/callbacks"
|
||||
"github.com/stakater/Reloader/internal/pkg/options"
|
||||
"github.com/stakater/Reloader/internal/pkg/testutil"
|
||||
"github.com/stakater/Reloader/pkg/kube"
|
||||
)
|
||||
|
||||
var (
|
||||
clients = setupTestClients()
|
||||
)
|
||||
|
||||
type testFixtures struct {
|
||||
defaultContainers []v1.Container
|
||||
defaultInitContainers []v1.Container
|
||||
defaultVolumes []v1.Volume
|
||||
namespace string
|
||||
}
|
||||
|
||||
func newTestFixtures() testFixtures {
|
||||
return testFixtures{
|
||||
defaultContainers: []v1.Container{{Name: "container1"}, {Name: "container2"}},
|
||||
defaultInitContainers: []v1.Container{{Name: "init-container1"}, {Name: "init-container2"}},
|
||||
defaultVolumes: []v1.Volume{{Name: "volume1"}, {Name: "volume2"}},
|
||||
namespace: "default",
|
||||
}
|
||||
}
|
||||
|
||||
func setupTestClients() kube.Clients {
|
||||
return kube.Clients{
|
||||
KubernetesClient: fake.NewSimpleClientset(),
|
||||
ArgoRolloutClient: fakeargoclientset.NewSimpleClientset(),
|
||||
}
|
||||
}
|
||||
|
||||
// TestUpdateRollout test update rollout strategy annotation
|
||||
func TestUpdateRollout(t *testing.T) {
|
||||
namespace := "test-ns"
|
||||
|
||||
cases := map[string]struct {
|
||||
name string
|
||||
strategy string
|
||||
isRestart bool
|
||||
}{
|
||||
"test-without-strategy": {
|
||||
name: "defaults to rollout strategy",
|
||||
strategy: "",
|
||||
isRestart: false,
|
||||
},
|
||||
"test-with-restart-strategy": {
|
||||
name: "triggers a restart strategy",
|
||||
strategy: "restart",
|
||||
isRestart: true,
|
||||
},
|
||||
"test-with-rollout-strategy": {
|
||||
name: "triggers a rollout strategy",
|
||||
strategy: "rollout",
|
||||
isRestart: false,
|
||||
},
|
||||
}
|
||||
for name, tc := range cases {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
rollout, err := testutil.CreateRollout(
|
||||
clients.ArgoRolloutClient, name, namespace,
|
||||
map[string]string{options.RolloutStrategyAnnotation: tc.strategy},
|
||||
)
|
||||
if err != nil {
|
||||
t.Errorf("creating rollout: %v", err)
|
||||
}
|
||||
modifiedChan := watchRollout(rollout.Name, namespace)
|
||||
|
||||
err = callbacks.UpdateRollout(clients, namespace, rollout)
|
||||
if err != nil {
|
||||
t.Errorf("updating rollout: %v", err)
|
||||
}
|
||||
rollout, err = clients.ArgoRolloutClient.ArgoprojV1alpha1().Rollouts(
|
||||
namespace).Get(context.TODO(), rollout.Name, meta_v1.GetOptions{})
|
||||
|
||||
if err != nil {
|
||||
t.Errorf("getting rollout: %v", err)
|
||||
}
|
||||
if isRestartStrategy(rollout) == tc.isRestart {
|
||||
t.Errorf("Should not be a restart strategy")
|
||||
}
|
||||
select {
|
||||
case <-modifiedChan:
|
||||
// object has been modified
|
||||
case <-time.After(1 * time.Second):
|
||||
t.Errorf("Rollout has not been updated")
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestResourceItems(t *testing.T) {
|
||||
fixtures := newTestFixtures()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
createFunc func(kube.Clients, string) error
|
||||
getItemsFunc func(kube.Clients, string) []runtime.Object
|
||||
expectedCount int
|
||||
}{
|
||||
{
|
||||
name: "Deployments",
|
||||
createFunc: createTestDeployments,
|
||||
getItemsFunc: callbacks.GetDeploymentItems,
|
||||
expectedCount: 2,
|
||||
},
|
||||
{
|
||||
name: "CronJobs",
|
||||
createFunc: createTestCronJobs,
|
||||
getItemsFunc: callbacks.GetCronJobItems,
|
||||
expectedCount: 2,
|
||||
},
|
||||
{
|
||||
name: "Jobs",
|
||||
createFunc: createTestJobs,
|
||||
getItemsFunc: callbacks.GetJobItems,
|
||||
expectedCount: 2,
|
||||
},
|
||||
{
|
||||
name: "DaemonSets",
|
||||
createFunc: createTestDaemonSets,
|
||||
getItemsFunc: callbacks.GetDaemonSetItems,
|
||||
expectedCount: 2,
|
||||
},
|
||||
{
|
||||
name: "StatefulSets",
|
||||
createFunc: createTestStatefulSets,
|
||||
getItemsFunc: callbacks.GetStatefulSetItems,
|
||||
expectedCount: 2,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
err := tt.createFunc(clients, fixtures.namespace)
|
||||
assert.NoError(t, err)
|
||||
|
||||
items := tt.getItemsFunc(clients, fixtures.namespace)
|
||||
assert.Equal(t, tt.expectedCount, len(items))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetAnnotations(t *testing.T) {
|
||||
testAnnotations := map[string]string{"version": "1"}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
resource runtime.Object
|
||||
getFunc func(runtime.Object) map[string]string
|
||||
}{
|
||||
{"Deployment", &appsv1.Deployment{ObjectMeta: metav1.ObjectMeta{Annotations: testAnnotations}}, callbacks.GetDeploymentAnnotations},
|
||||
{"CronJob", &batchv1.CronJob{ObjectMeta: metav1.ObjectMeta{Annotations: testAnnotations}}, callbacks.GetCronJobAnnotations},
|
||||
{"Job", &batchv1.Job{ObjectMeta: metav1.ObjectMeta{Annotations: testAnnotations}}, callbacks.GetJobAnnotations},
|
||||
{"DaemonSet", &appsv1.DaemonSet{ObjectMeta: metav1.ObjectMeta{Annotations: testAnnotations}}, callbacks.GetDaemonSetAnnotations},
|
||||
{"StatefulSet", &appsv1.StatefulSet{ObjectMeta: metav1.ObjectMeta{Annotations: testAnnotations}}, callbacks.GetStatefulSetAnnotations},
|
||||
{"Rollout", &argorolloutv1alpha1.Rollout{ObjectMeta: metav1.ObjectMeta{Annotations: testAnnotations}}, callbacks.GetRolloutAnnotations},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
assert.Equal(t, testAnnotations, tt.getFunc(tt.resource))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetPodAnnotations(t *testing.T) {
|
||||
testAnnotations := map[string]string{"version": "1"}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
resource runtime.Object
|
||||
getFunc func(runtime.Object) map[string]string
|
||||
}{
|
||||
{"Deployment", createResourceWithPodAnnotations(&appsv1.Deployment{}, testAnnotations), callbacks.GetDeploymentPodAnnotations},
|
||||
{"CronJob", createResourceWithPodAnnotations(&batchv1.CronJob{}, testAnnotations), callbacks.GetCronJobPodAnnotations},
|
||||
{"Job", createResourceWithPodAnnotations(&batchv1.Job{}, testAnnotations), callbacks.GetJobPodAnnotations},
|
||||
{"DaemonSet", createResourceWithPodAnnotations(&appsv1.DaemonSet{}, testAnnotations), callbacks.GetDaemonSetPodAnnotations},
|
||||
{"StatefulSet", createResourceWithPodAnnotations(&appsv1.StatefulSet{}, testAnnotations), callbacks.GetStatefulSetPodAnnotations},
|
||||
{"Rollout", createResourceWithPodAnnotations(&argorolloutv1alpha1.Rollout{}, testAnnotations), callbacks.GetRolloutPodAnnotations},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
assert.Equal(t, testAnnotations, tt.getFunc(tt.resource))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetContainers(t *testing.T) {
|
||||
fixtures := newTestFixtures()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
resource runtime.Object
|
||||
getFunc func(runtime.Object) []v1.Container
|
||||
}{
|
||||
{"Deployment", createResourceWithContainers(&appsv1.Deployment{}, fixtures.defaultContainers), callbacks.GetDeploymentContainers},
|
||||
{"DaemonSet", createResourceWithContainers(&appsv1.DaemonSet{}, fixtures.defaultContainers), callbacks.GetDaemonSetContainers},
|
||||
{"StatefulSet", createResourceWithContainers(&appsv1.StatefulSet{}, fixtures.defaultContainers), callbacks.GetStatefulSetContainers},
|
||||
{"CronJob", createResourceWithContainers(&batchv1.CronJob{}, fixtures.defaultContainers), callbacks.GetCronJobContainers},
|
||||
{"Job", createResourceWithContainers(&batchv1.Job{}, fixtures.defaultContainers), callbacks.GetJobContainers},
|
||||
{"Rollout", createResourceWithContainers(&argorolloutv1alpha1.Rollout{}, fixtures.defaultContainers), callbacks.GetRolloutContainers},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
assert.Equal(t, fixtures.defaultContainers, tt.getFunc(tt.resource))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetInitContainers(t *testing.T) {
|
||||
fixtures := newTestFixtures()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
resource runtime.Object
|
||||
getFunc func(runtime.Object) []v1.Container
|
||||
}{
|
||||
{"Deployment", createResourceWithInitContainers(&appsv1.Deployment{}, fixtures.defaultInitContainers), callbacks.GetDeploymentInitContainers},
|
||||
{"DaemonSet", createResourceWithInitContainers(&appsv1.DaemonSet{}, fixtures.defaultInitContainers), callbacks.GetDaemonSetInitContainers},
|
||||
{"StatefulSet", createResourceWithInitContainers(&appsv1.StatefulSet{}, fixtures.defaultInitContainers), callbacks.GetStatefulSetInitContainers},
|
||||
{"CronJob", createResourceWithInitContainers(&batchv1.CronJob{}, fixtures.defaultInitContainers), callbacks.GetCronJobInitContainers},
|
||||
{"Job", createResourceWithInitContainers(&batchv1.Job{}, fixtures.defaultInitContainers), callbacks.GetJobInitContainers},
|
||||
{"Rollout", createResourceWithInitContainers(&argorolloutv1alpha1.Rollout{}, fixtures.defaultInitContainers), callbacks.GetRolloutInitContainers},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
assert.Equal(t, fixtures.defaultInitContainers, tt.getFunc(tt.resource))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestUpdateResources(t *testing.T) {
|
||||
fixtures := newTestFixtures()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
createFunc func(kube.Clients, string, string) (runtime.Object, error)
|
||||
updateFunc func(kube.Clients, string, runtime.Object) error
|
||||
}{
|
||||
{"Deployment", createTestDeploymentWithAnnotations, callbacks.UpdateDeployment},
|
||||
{"DaemonSet", createTestDaemonSetWithAnnotations, callbacks.UpdateDaemonSet},
|
||||
{"StatefulSet", createTestStatefulSetWithAnnotations, callbacks.UpdateStatefulSet},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
resource, err := tt.createFunc(clients, fixtures.namespace, "1")
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = tt.updateFunc(clients, fixtures.namespace, resource)
|
||||
assert.NoError(t, err)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestCreateJobFromCronjob(t *testing.T) {
|
||||
fixtures := newTestFixtures()
|
||||
|
||||
cronJob, err := createTestCronJobWithAnnotations(clients, fixtures.namespace, "1")
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = callbacks.CreateJobFromCronjob(clients, fixtures.namespace, cronJob.(*batchv1.CronJob))
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestReCreateJobFromJob(t *testing.T) {
|
||||
fixtures := newTestFixtures()
|
||||
|
||||
job, err := createTestJobWithAnnotations(clients, fixtures.namespace, "1")
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = callbacks.ReCreateJobFromjob(clients, fixtures.namespace, job.(*batchv1.Job))
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestGetVolumes(t *testing.T) {
|
||||
fixtures := newTestFixtures()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
resource runtime.Object
|
||||
getFunc func(runtime.Object) []v1.Volume
|
||||
}{
|
||||
{"Deployment", createResourceWithVolumes(&appsv1.Deployment{}, fixtures.defaultVolumes), callbacks.GetDeploymentVolumes},
|
||||
{"CronJob", createResourceWithVolumes(&batchv1.CronJob{}, fixtures.defaultVolumes), callbacks.GetCronJobVolumes},
|
||||
{"Job", createResourceWithVolumes(&batchv1.Job{}, fixtures.defaultVolumes), callbacks.GetJobVolumes},
|
||||
{"DaemonSet", createResourceWithVolumes(&appsv1.DaemonSet{}, fixtures.defaultVolumes), callbacks.GetDaemonSetVolumes},
|
||||
{"StatefulSet", createResourceWithVolumes(&appsv1.StatefulSet{}, fixtures.defaultVolumes), callbacks.GetStatefulSetVolumes},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
assert.Equal(t, fixtures.defaultVolumes, tt.getFunc(tt.resource))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// Helper functions
|
||||
|
||||
func isRestartStrategy(rollout *argorolloutv1alpha1.Rollout) bool {
|
||||
return rollout.Spec.RestartAt == nil
|
||||
}
|
||||
|
||||
func watchRollout(name, namespace string) chan interface{} {
|
||||
timeOut := int64(1)
|
||||
modifiedChan := make(chan interface{})
|
||||
watcher, _ := clients.ArgoRolloutClient.ArgoprojV1alpha1().Rollouts(namespace).Watch(context.Background(), meta_v1.ListOptions{TimeoutSeconds: &timeOut})
|
||||
go watchModified(watcher, name, modifiedChan)
|
||||
return modifiedChan
|
||||
}
|
||||
|
||||
func watchModified(watcher watch.Interface, name string, modifiedChan chan interface{}) {
|
||||
for event := range watcher.ResultChan() {
|
||||
item := event.Object.(*argorolloutv1alpha1.Rollout)
|
||||
if item.Name == name {
|
||||
switch event.Type {
|
||||
case watch.Modified:
|
||||
modifiedChan <- nil
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func createTestDeployments(clients kube.Clients, namespace string) error {
|
||||
for i := 1; i <= 2; i++ {
|
||||
_, err := testutil.CreateDeployment(clients.KubernetesClient, fmt.Sprintf("test-deployment-%d", i), namespace, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func createTestCronJobs(clients kube.Clients, namespace string) error {
|
||||
for i := 1; i <= 2; i++ {
|
||||
_, err := testutil.CreateCronJob(clients.KubernetesClient, fmt.Sprintf("test-cron-%d", i), namespace, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func createTestJobs(clients kube.Clients, namespace string) error {
|
||||
for i := 1; i <= 2; i++ {
|
||||
_, err := testutil.CreateJob(clients.KubernetesClient, fmt.Sprintf("test-job-%d", i), namespace, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func createTestDaemonSets(clients kube.Clients, namespace string) error {
|
||||
for i := 1; i <= 2; i++ {
|
||||
_, err := testutil.CreateDaemonSet(clients.KubernetesClient, fmt.Sprintf("test-daemonset-%d", i), namespace, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func createTestStatefulSets(clients kube.Clients, namespace string) error {
|
||||
for i := 1; i <= 2; i++ {
|
||||
_, err := testutil.CreateStatefulSet(clients.KubernetesClient, fmt.Sprintf("test-statefulset-%d", i), namespace, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func createResourceWithPodAnnotations(obj runtime.Object, annotations map[string]string) runtime.Object {
|
||||
switch v := obj.(type) {
|
||||
case *appsv1.Deployment:
|
||||
v.Spec.Template.ObjectMeta.Annotations = annotations
|
||||
case *appsv1.DaemonSet:
|
||||
v.Spec.Template.ObjectMeta.Annotations = annotations
|
||||
case *appsv1.StatefulSet:
|
||||
v.Spec.Template.ObjectMeta.Annotations = annotations
|
||||
case *batchv1.CronJob:
|
||||
v.Spec.JobTemplate.Spec.Template.ObjectMeta.Annotations = annotations
|
||||
case *batchv1.Job:
|
||||
v.Spec.Template.ObjectMeta.Annotations = annotations
|
||||
case *argorolloutv1alpha1.Rollout:
|
||||
v.Spec.Template.ObjectMeta.Annotations = annotations
|
||||
}
|
||||
return obj
|
||||
}
|
||||
|
||||
func createResourceWithContainers(obj runtime.Object, containers []v1.Container) runtime.Object {
|
||||
switch v := obj.(type) {
|
||||
case *appsv1.Deployment:
|
||||
v.Spec.Template.Spec.Containers = containers
|
||||
case *appsv1.DaemonSet:
|
||||
v.Spec.Template.Spec.Containers = containers
|
||||
case *appsv1.StatefulSet:
|
||||
v.Spec.Template.Spec.Containers = containers
|
||||
case *batchv1.CronJob:
|
||||
v.Spec.JobTemplate.Spec.Template.Spec.Containers = containers
|
||||
case *batchv1.Job:
|
||||
v.Spec.Template.Spec.Containers = containers
|
||||
case *argorolloutv1alpha1.Rollout:
|
||||
v.Spec.Template.Spec.Containers = containers
|
||||
}
|
||||
return obj
|
||||
}
|
||||
|
||||
func createResourceWithInitContainers(obj runtime.Object, initContainers []v1.Container) runtime.Object {
|
||||
switch v := obj.(type) {
|
||||
case *appsv1.Deployment:
|
||||
v.Spec.Template.Spec.InitContainers = initContainers
|
||||
case *appsv1.DaemonSet:
|
||||
v.Spec.Template.Spec.InitContainers = initContainers
|
||||
case *appsv1.StatefulSet:
|
||||
v.Spec.Template.Spec.InitContainers = initContainers
|
||||
case *batchv1.CronJob:
|
||||
v.Spec.JobTemplate.Spec.Template.Spec.InitContainers = initContainers
|
||||
case *batchv1.Job:
|
||||
v.Spec.Template.Spec.InitContainers = initContainers
|
||||
case *argorolloutv1alpha1.Rollout:
|
||||
v.Spec.Template.Spec.InitContainers = initContainers
|
||||
}
|
||||
return obj
|
||||
}
|
||||
|
||||
func createResourceWithVolumes(obj runtime.Object, volumes []v1.Volume) runtime.Object {
|
||||
switch v := obj.(type) {
|
||||
case *appsv1.Deployment:
|
||||
v.Spec.Template.Spec.Volumes = volumes
|
||||
case *batchv1.CronJob:
|
||||
v.Spec.JobTemplate.Spec.Template.Spec.Volumes = volumes
|
||||
case *batchv1.Job:
|
||||
v.Spec.Template.Spec.Volumes = volumes
|
||||
case *appsv1.DaemonSet:
|
||||
v.Spec.Template.Spec.Volumes = volumes
|
||||
case *appsv1.StatefulSet:
|
||||
v.Spec.Template.Spec.Volumes = volumes
|
||||
}
|
||||
return obj
|
||||
}
|
||||
|
||||
func createTestDeploymentWithAnnotations(clients kube.Clients, namespace, version string) (runtime.Object, error) {
|
||||
deployment := &appsv1.Deployment{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-deployment",
|
||||
Namespace: namespace,
|
||||
Annotations: map[string]string{"version": version},
|
||||
},
|
||||
}
|
||||
return clients.KubernetesClient.AppsV1().Deployments(namespace).Create(context.TODO(), deployment, metav1.CreateOptions{})
|
||||
}
|
||||
|
||||
func createTestDaemonSetWithAnnotations(clients kube.Clients, namespace, version string) (runtime.Object, error) {
|
||||
daemonSet := &appsv1.DaemonSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-daemonset",
|
||||
Namespace: namespace,
|
||||
Annotations: map[string]string{"version": version},
|
||||
},
|
||||
}
|
||||
return clients.KubernetesClient.AppsV1().DaemonSets(namespace).Create(context.TODO(), daemonSet, metav1.CreateOptions{})
|
||||
}
|
||||
|
||||
func createTestStatefulSetWithAnnotations(clients kube.Clients, namespace, version string) (runtime.Object, error) {
|
||||
statefulSet := &appsv1.StatefulSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-statefulset",
|
||||
Namespace: namespace,
|
||||
Annotations: map[string]string{"version": version},
|
||||
},
|
||||
}
|
||||
return clients.KubernetesClient.AppsV1().StatefulSets(namespace).Create(context.TODO(), statefulSet, metav1.CreateOptions{})
|
||||
}
|
||||
|
||||
func createTestCronJobWithAnnotations(clients kube.Clients, namespace, version string) (runtime.Object, error) {
|
||||
cronJob := &batchv1.CronJob{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-cronjob",
|
||||
Namespace: namespace,
|
||||
Annotations: map[string]string{"version": version},
|
||||
},
|
||||
}
|
||||
return clients.KubernetesClient.BatchV1().CronJobs(namespace).Create(context.TODO(), cronJob, metav1.CreateOptions{})
|
||||
}
|
||||
|
||||
func createTestJobWithAnnotations(clients kube.Clients, namespace, version string) (runtime.Object, error) {
|
||||
job := &batchv1.Job{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-job",
|
||||
Namespace: namespace,
|
||||
Annotations: map[string]string{"version": version},
|
||||
},
|
||||
}
|
||||
return clients.KubernetesClient.BatchV1().Jobs(namespace).Create(context.TODO(), job, metav1.CreateOptions{})
|
||||
}
|
||||
@@ -28,7 +28,7 @@ import (
|
||||
type Controller struct {
|
||||
client kubernetes.Interface
|
||||
indexer cache.Indexer
|
||||
queue workqueue.RateLimitingInterface
|
||||
queue workqueue.TypedRateLimitingInterface[any]
|
||||
informer cache.Controller
|
||||
namespace string
|
||||
resource string
|
||||
@@ -67,7 +67,7 @@ func NewController(
|
||||
})
|
||||
recorder := eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: fmt.Sprintf("reloader-%s", resource)})
|
||||
|
||||
queue := workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter())
|
||||
queue := workqueue.NewTypedRateLimitingQueue(workqueue.DefaultTypedControllerRateLimiter[any]())
|
||||
|
||||
optionsModifier := func(options *metav1.ListOptions) {
|
||||
if resource == "namespaces" {
|
||||
@@ -81,12 +81,17 @@ func NewController(
|
||||
|
||||
listWatcher := cache.NewFilteredListWatchFromClient(client.CoreV1().RESTClient(), resource, namespace, optionsModifier)
|
||||
|
||||
indexer, informer := cache.NewIndexerInformer(listWatcher, kube.ResourceMap[resource], 0, cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: c.Add,
|
||||
UpdateFunc: c.Update,
|
||||
DeleteFunc: c.Delete,
|
||||
}, cache.Indexers{})
|
||||
c.indexer = indexer
|
||||
_, informer := cache.NewInformerWithOptions(cache.InformerOptions{
|
||||
ListerWatcher: listWatcher,
|
||||
ObjectType: kube.ResourceMap[resource],
|
||||
ResyncPeriod: 0,
|
||||
Handler: cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: c.Add,
|
||||
UpdateFunc: c.Update,
|
||||
DeleteFunc: c.Delete,
|
||||
},
|
||||
Indexers: cache.Indexers{},
|
||||
})
|
||||
c.informer = informer
|
||||
c.queue = queue
|
||||
c.collectors = collectors
|
||||
|
||||
@@ -2208,7 +2208,7 @@ func TestController_resourceInIgnoredNamespace(t *testing.T) {
|
||||
type fields struct {
|
||||
client kubernetes.Interface
|
||||
indexer cache.Indexer
|
||||
queue workqueue.RateLimitingInterface
|
||||
queue workqueue.TypedRateLimitingInterface[any]
|
||||
informer cache.Controller
|
||||
namespace string
|
||||
ignoredNamespaces util.List
|
||||
@@ -2291,7 +2291,7 @@ func TestController_resourceInIgnoredNamespace(t *testing.T) {
|
||||
func TestController_resourceInNamespaceSelector(t *testing.T) {
|
||||
type fields struct {
|
||||
indexer cache.Indexer
|
||||
queue workqueue.RateLimitingInterface
|
||||
queue workqueue.TypedRateLimitingInterface[any]
|
||||
informer cache.Controller
|
||||
namespace v1.Namespace
|
||||
namespaceSelector string
|
||||
|
||||
@@ -55,6 +55,20 @@ func GetCronJobCreateJobFuncs() callbacks.RollingUpgradeFuncs {
|
||||
}
|
||||
}
|
||||
|
||||
// GetDeploymentRollingUpgradeFuncs returns all callback funcs for a cronjob
|
||||
func GetJobCreateJobFuncs() callbacks.RollingUpgradeFuncs {
|
||||
return callbacks.RollingUpgradeFuncs{
|
||||
ItemsFunc: callbacks.GetJobItems,
|
||||
AnnotationsFunc: callbacks.GetJobAnnotations,
|
||||
PodAnnotationsFunc: callbacks.GetJobPodAnnotations,
|
||||
ContainersFunc: callbacks.GetJobContainers,
|
||||
InitContainersFunc: callbacks.GetJobInitContainers,
|
||||
UpdateFunc: callbacks.ReCreateJobFromjob,
|
||||
VolumesFunc: callbacks.GetJobVolumes,
|
||||
ResourceType: "Job",
|
||||
}
|
||||
}
|
||||
|
||||
// GetDaemonSetRollingUpgradeFuncs returns all callback funcs for a daemonset
|
||||
func GetDaemonSetRollingUpgradeFuncs() callbacks.RollingUpgradeFuncs {
|
||||
return callbacks.RollingUpgradeFuncs{
|
||||
@@ -153,6 +167,10 @@ func doRollingUpgrade(config util.Config, collectors metrics.Collectors, recorde
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = rollingUpgrade(clients, config, GetJobCreateJobFuncs(), collectors, recorder, invoke)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = rollingUpgrade(clients, config, GetDaemonSetRollingUpgradeFuncs(), collectors, recorder, invoke)
|
||||
if err != nil {
|
||||
return err
|
||||
|
||||
@@ -2,6 +2,15 @@ package options
|
||||
|
||||
import "github.com/stakater/Reloader/internal/pkg/constants"
|
||||
|
||||
type ArgoRolloutStrategy int
|
||||
|
||||
const (
|
||||
// RestartStrategy is the annotation value for restart strategy for rollouts
|
||||
RestartStrategy ArgoRolloutStrategy = iota
|
||||
// RolloutStrategy is the annotation value for rollout strategy for rollouts
|
||||
RolloutStrategy
|
||||
)
|
||||
|
||||
var (
|
||||
// Auto reload all resources when their corresponding configmaps/secrets are updated
|
||||
AutoReloadAll = false
|
||||
@@ -27,6 +36,8 @@ var (
|
||||
// SearchMatchAnnotation is an annotation to tag secrets to be found with
|
||||
// AutoSearchAnnotation
|
||||
SearchMatchAnnotation = "reloader.stakater.com/match"
|
||||
// RolloutStrategyAnnotation is an annotation to define rollout update strategy
|
||||
RolloutStrategyAnnotation = "reloader.stakater.com/rollout-strategy"
|
||||
// LogFormat is the log format to use (json, or empty string for default)
|
||||
LogFormat = ""
|
||||
// LogLevel is the log level to use (trace, debug, info, warning, error, fatal and panic)
|
||||
@@ -45,3 +56,14 @@ var (
|
||||
// Url to send a request to instead of triggering a reload
|
||||
WebhookUrl = ""
|
||||
)
|
||||
|
||||
func ToArgoRolloutStrategy(s string) ArgoRolloutStrategy {
|
||||
switch s {
|
||||
case "restart":
|
||||
return RestartStrategy
|
||||
case "rollout":
|
||||
fallthrough
|
||||
default:
|
||||
return RolloutStrategy
|
||||
}
|
||||
}
|
||||
|
||||
@@ -10,6 +10,8 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
argorolloutv1alpha1 "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1"
|
||||
argorollout "github.com/argoproj/argo-rollouts/pkg/client/clientset/versioned"
|
||||
openshiftv1 "github.com/openshift/api/apps/v1"
|
||||
appsclient "github.com/openshift/client-go/apps/clientset/versioned"
|
||||
"github.com/sirupsen/logrus"
|
||||
@@ -21,6 +23,7 @@ import (
|
||||
"github.com/stakater/Reloader/internal/pkg/util"
|
||||
"github.com/stakater/Reloader/pkg/kube"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
batchv1 "k8s.io/api/batch/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
@@ -69,16 +72,16 @@ func DeleteNamespace(namespace string, client kubernetes.Interface) {
|
||||
}
|
||||
}
|
||||
|
||||
func getObjectMeta(namespace string, name string, autoReload bool, secretAutoReload bool, configmapAutoReload bool) metav1.ObjectMeta {
|
||||
func getObjectMeta(namespace string, name string, autoReload bool, secretAutoReload bool, configmapAutoReload bool, extraAnnotations map[string]string) metav1.ObjectMeta {
|
||||
return metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Namespace: namespace,
|
||||
Labels: map[string]string{"firstLabel": "temp"},
|
||||
Annotations: getAnnotations(name, autoReload, secretAutoReload, configmapAutoReload),
|
||||
Annotations: getAnnotations(name, autoReload, secretAutoReload, configmapAutoReload, extraAnnotations),
|
||||
}
|
||||
}
|
||||
|
||||
func getAnnotations(name string, autoReload bool, secretAutoReload bool, configmapAutoReload bool) map[string]string {
|
||||
func getAnnotations(name string, autoReload bool, secretAutoReload bool, configmapAutoReload bool, extraAnnotations map[string]string) map[string]string {
|
||||
annotations := make(map[string]string)
|
||||
if autoReload {
|
||||
annotations[options.ReloaderAutoAnnotation] = "true"
|
||||
@@ -90,13 +93,15 @@ func getAnnotations(name string, autoReload bool, secretAutoReload bool, configm
|
||||
annotations[options.ConfigmapReloaderAutoAnnotation] = "true"
|
||||
}
|
||||
|
||||
if len(annotations) > 0 {
|
||||
return annotations
|
||||
} else {
|
||||
return map[string]string{
|
||||
if !(len(annotations) > 0) {
|
||||
annotations = map[string]string{
|
||||
options.ConfigmapUpdateOnChangeAnnotation: name,
|
||||
options.SecretUpdateOnChangeAnnotation: name}
|
||||
}
|
||||
for k, v := range extraAnnotations {
|
||||
annotations[k] = v
|
||||
}
|
||||
return annotations
|
||||
}
|
||||
|
||||
func getEnvVarSources(name string) []v1.EnvFromSource {
|
||||
@@ -173,7 +178,7 @@ func getVolumes(name string) []v1.Volume {
|
||||
}
|
||||
}
|
||||
|
||||
func getVolumeMounts(name string) []v1.VolumeMount {
|
||||
func getVolumeMounts() []v1.VolumeMount {
|
||||
return []v1.VolumeMount{
|
||||
{
|
||||
MountPath: "etc/config",
|
||||
@@ -271,7 +276,7 @@ func getPodTemplateSpecWithVolumes(name string) v1.PodTemplateSpec {
|
||||
Value: "test",
|
||||
},
|
||||
},
|
||||
VolumeMounts: getVolumeMounts(name),
|
||||
VolumeMounts: getVolumeMounts(),
|
||||
},
|
||||
},
|
||||
Volumes: getVolumes(name),
|
||||
@@ -289,7 +294,7 @@ func getPodTemplateSpecWithInitContainer(name string) v1.PodTemplateSpec {
|
||||
{
|
||||
Image: "busybox",
|
||||
Name: "busyBox",
|
||||
VolumeMounts: getVolumeMounts(name),
|
||||
VolumeMounts: getVolumeMounts(),
|
||||
},
|
||||
},
|
||||
Containers: []v1.Container{
|
||||
@@ -342,7 +347,7 @@ func getPodTemplateSpecWithInitContainerAndEnv(name string) v1.PodTemplateSpec {
|
||||
func GetDeployment(namespace string, deploymentName string) *appsv1.Deployment {
|
||||
replicaset := int32(1)
|
||||
return &appsv1.Deployment{
|
||||
ObjectMeta: getObjectMeta(namespace, deploymentName, false, false, false),
|
||||
ObjectMeta: getObjectMeta(namespace, deploymentName, false, false, false, map[string]string{}),
|
||||
Spec: appsv1.DeploymentSpec{
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{"secondLabel": "temp"},
|
||||
@@ -361,7 +366,7 @@ func GetDeploymentConfig(namespace string, deploymentConfigName string) *openshi
|
||||
replicaset := int32(1)
|
||||
podTemplateSpecWithVolume := getPodTemplateSpecWithVolumes(deploymentConfigName)
|
||||
return &openshiftv1.DeploymentConfig{
|
||||
ObjectMeta: getObjectMeta(namespace, deploymentConfigName, false, false, false),
|
||||
ObjectMeta: getObjectMeta(namespace, deploymentConfigName, false, false, false, map[string]string{}),
|
||||
Spec: openshiftv1.DeploymentConfigSpec{
|
||||
Replicas: replicaset,
|
||||
Strategy: openshiftv1.DeploymentStrategy{
|
||||
@@ -376,7 +381,7 @@ func GetDeploymentConfig(namespace string, deploymentConfigName string) *openshi
|
||||
func GetDeploymentWithInitContainer(namespace string, deploymentName string) *appsv1.Deployment {
|
||||
replicaset := int32(1)
|
||||
return &appsv1.Deployment{
|
||||
ObjectMeta: getObjectMeta(namespace, deploymentName, false, false, false),
|
||||
ObjectMeta: getObjectMeta(namespace, deploymentName, false, false, false, map[string]string{}),
|
||||
Spec: appsv1.DeploymentSpec{
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{"secondLabel": "temp"},
|
||||
@@ -394,7 +399,7 @@ func GetDeploymentWithInitContainer(namespace string, deploymentName string) *ap
|
||||
func GetDeploymentWithInitContainerAndEnv(namespace string, deploymentName string) *appsv1.Deployment {
|
||||
replicaset := int32(1)
|
||||
return &appsv1.Deployment{
|
||||
ObjectMeta: getObjectMeta(namespace, deploymentName, true, false, false),
|
||||
ObjectMeta: getObjectMeta(namespace, deploymentName, true, false, false, map[string]string{}),
|
||||
Spec: appsv1.DeploymentSpec{
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{"secondLabel": "temp"},
|
||||
@@ -411,7 +416,7 @@ func GetDeploymentWithInitContainerAndEnv(namespace string, deploymentName strin
|
||||
func GetDeploymentWithEnvVars(namespace string, deploymentName string) *appsv1.Deployment {
|
||||
replicaset := int32(1)
|
||||
return &appsv1.Deployment{
|
||||
ObjectMeta: getObjectMeta(namespace, deploymentName, true, false, false),
|
||||
ObjectMeta: getObjectMeta(namespace, deploymentName, true, false, false, map[string]string{}),
|
||||
Spec: appsv1.DeploymentSpec{
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{"secondLabel": "temp"},
|
||||
@@ -429,7 +434,7 @@ func GetDeploymentConfigWithEnvVars(namespace string, deploymentConfigName strin
|
||||
replicaset := int32(1)
|
||||
podTemplateSpecWithEnvVars := getPodTemplateSpecWithEnvVars(deploymentConfigName)
|
||||
return &openshiftv1.DeploymentConfig{
|
||||
ObjectMeta: getObjectMeta(namespace, deploymentConfigName, false, false, false),
|
||||
ObjectMeta: getObjectMeta(namespace, deploymentConfigName, false, false, false, map[string]string{}),
|
||||
Spec: openshiftv1.DeploymentConfigSpec{
|
||||
Replicas: replicaset,
|
||||
Strategy: openshiftv1.DeploymentStrategy{
|
||||
@@ -443,7 +448,7 @@ func GetDeploymentConfigWithEnvVars(namespace string, deploymentConfigName strin
|
||||
func GetDeploymentWithEnvVarSources(namespace string, deploymentName string) *appsv1.Deployment {
|
||||
replicaset := int32(1)
|
||||
return &appsv1.Deployment{
|
||||
ObjectMeta: getObjectMeta(namespace, deploymentName, true, false, false),
|
||||
ObjectMeta: getObjectMeta(namespace, deploymentName, true, false, false, map[string]string{}),
|
||||
Spec: appsv1.DeploymentSpec{
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{"secondLabel": "temp"},
|
||||
@@ -460,7 +465,7 @@ func GetDeploymentWithEnvVarSources(namespace string, deploymentName string) *ap
|
||||
func GetDeploymentWithPodAnnotations(namespace string, deploymentName string, both bool) *appsv1.Deployment {
|
||||
replicaset := int32(1)
|
||||
deployment := &appsv1.Deployment{
|
||||
ObjectMeta: getObjectMeta(namespace, deploymentName, false, false, false),
|
||||
ObjectMeta: getObjectMeta(namespace, deploymentName, false, false, false, map[string]string{}),
|
||||
Spec: appsv1.DeploymentSpec{
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{"secondLabel": "temp"},
|
||||
@@ -475,7 +480,7 @@ func GetDeploymentWithPodAnnotations(namespace string, deploymentName string, bo
|
||||
if !both {
|
||||
deployment.ObjectMeta.Annotations = nil
|
||||
}
|
||||
deployment.Spec.Template.ObjectMeta.Annotations = getAnnotations(deploymentName, true, false, false)
|
||||
deployment.Spec.Template.ObjectMeta.Annotations = getAnnotations(deploymentName, true, false, false, map[string]string{})
|
||||
return deployment
|
||||
}
|
||||
|
||||
@@ -483,9 +488,9 @@ func GetDeploymentWithTypedAutoAnnotation(namespace string, deploymentName strin
|
||||
replicaset := int32(1)
|
||||
var objectMeta metav1.ObjectMeta
|
||||
if resourceType == SecretResourceType {
|
||||
objectMeta = getObjectMeta(namespace, deploymentName, false, true, false)
|
||||
objectMeta = getObjectMeta(namespace, deploymentName, false, true, false, map[string]string{})
|
||||
} else if resourceType == ConfigmapResourceType {
|
||||
objectMeta = getObjectMeta(namespace, deploymentName, false, false, true)
|
||||
objectMeta = getObjectMeta(namespace, deploymentName, false, false, true, map[string]string{})
|
||||
}
|
||||
|
||||
return &appsv1.Deployment{
|
||||
@@ -537,7 +542,7 @@ func GetDeploymentWithExcludeAnnotation(namespace string, deploymentName string,
|
||||
// GetDaemonSet provides daemonset for testing
|
||||
func GetDaemonSet(namespace string, daemonsetName string) *appsv1.DaemonSet {
|
||||
return &appsv1.DaemonSet{
|
||||
ObjectMeta: getObjectMeta(namespace, daemonsetName, false, false, false),
|
||||
ObjectMeta: getObjectMeta(namespace, daemonsetName, false, false, false, map[string]string{}),
|
||||
Spec: appsv1.DaemonSetSpec{
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{"secondLabel": "temp"},
|
||||
@@ -552,7 +557,7 @@ func GetDaemonSet(namespace string, daemonsetName string) *appsv1.DaemonSet {
|
||||
|
||||
func GetDaemonSetWithEnvVars(namespace string, daemonSetName string) *appsv1.DaemonSet {
|
||||
return &appsv1.DaemonSet{
|
||||
ObjectMeta: getObjectMeta(namespace, daemonSetName, true, false, false),
|
||||
ObjectMeta: getObjectMeta(namespace, daemonSetName, true, false, false, map[string]string{}),
|
||||
Spec: appsv1.DaemonSetSpec{
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{"secondLabel": "temp"},
|
||||
@@ -568,7 +573,7 @@ func GetDaemonSetWithEnvVars(namespace string, daemonSetName string) *appsv1.Dae
|
||||
// GetStatefulSet provides statefulset for testing
|
||||
func GetStatefulSet(namespace string, statefulsetName string) *appsv1.StatefulSet {
|
||||
return &appsv1.StatefulSet{
|
||||
ObjectMeta: getObjectMeta(namespace, statefulsetName, false, false, false),
|
||||
ObjectMeta: getObjectMeta(namespace, statefulsetName, false, false, false, map[string]string{}),
|
||||
Spec: appsv1.StatefulSetSpec{
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{"secondLabel": "temp"},
|
||||
@@ -584,7 +589,7 @@ func GetStatefulSet(namespace string, statefulsetName string) *appsv1.StatefulSe
|
||||
// GetStatefulSet provides statefulset for testing
|
||||
func GetStatefulSetWithEnvVar(namespace string, statefulsetName string) *appsv1.StatefulSet {
|
||||
return &appsv1.StatefulSet{
|
||||
ObjectMeta: getObjectMeta(namespace, statefulsetName, true, false, false),
|
||||
ObjectMeta: getObjectMeta(namespace, statefulsetName, true, false, false, map[string]string{}),
|
||||
Spec: appsv1.StatefulSetSpec{
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{"secondLabel": "temp"},
|
||||
@@ -633,6 +638,64 @@ func GetSecret(namespace string, secretName string, data string) *v1.Secret {
|
||||
}
|
||||
}
|
||||
|
||||
func GetCronJob(namespace string, cronJobName string) *batchv1.CronJob {
|
||||
return &batchv1.CronJob{
|
||||
ObjectMeta: getObjectMeta(namespace, cronJobName, false, false, false, map[string]string{}),
|
||||
Spec: batchv1.CronJobSpec{
|
||||
Schedule: "*/5 * * * *", // Run every 5 minutes
|
||||
JobTemplate: batchv1.JobTemplateSpec{
|
||||
Spec: batchv1.JobSpec{
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{"secondLabel": "temp"},
|
||||
},
|
||||
Template: getPodTemplateSpecWithVolumes(cronJobName),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func GetJob(namespace string, jobName string) *batchv1.Job {
|
||||
return &batchv1.Job{
|
||||
ObjectMeta: getObjectMeta(namespace, jobName, false, false, false, map[string]string{}),
|
||||
Spec: batchv1.JobSpec{
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{"secondLabel": "temp"},
|
||||
},
|
||||
Template: getPodTemplateSpecWithVolumes(jobName),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func GetCronJobWithEnvVar(namespace string, cronJobName string) *batchv1.CronJob {
|
||||
return &batchv1.CronJob{
|
||||
ObjectMeta: getObjectMeta(namespace, cronJobName, true, false, false, map[string]string{}),
|
||||
Spec: batchv1.CronJobSpec{
|
||||
Schedule: "*/5 * * * *", // Run every 5 minutes
|
||||
JobTemplate: batchv1.JobTemplateSpec{
|
||||
Spec: batchv1.JobSpec{
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{"secondLabel": "temp"},
|
||||
},
|
||||
Template: getPodTemplateSpecWithEnvVars(cronJobName),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func GetJobWithEnvVar(namespace string, jobName string) *batchv1.Job {
|
||||
return &batchv1.Job{
|
||||
ObjectMeta: getObjectMeta(namespace, jobName, true, false, false, map[string]string{}),
|
||||
Spec: batchv1.JobSpec{
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{"secondLabel": "temp"},
|
||||
},
|
||||
Template: getPodTemplateSpecWithEnvVars(jobName),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// GetSecretWithUpdatedLabel provides secret for testing
|
||||
func GetSecretWithUpdatedLabel(namespace string, secretName string, label string, data string) *v1.Secret {
|
||||
return &v1.Secret{
|
||||
@@ -843,6 +906,36 @@ func CreateStatefulSet(client kubernetes.Interface, statefulsetName string, name
|
||||
return statefulset, err
|
||||
}
|
||||
|
||||
// CreateCronJob creates a cronjob in given namespace and returns the CronJob
|
||||
func CreateCronJob(client kubernetes.Interface, cronJobName string, namespace string, volumeMount bool) (*batchv1.CronJob, error) {
|
||||
logrus.Infof("Creating CronJob")
|
||||
cronJobClient := client.BatchV1().CronJobs(namespace)
|
||||
var cronJobObj *batchv1.CronJob
|
||||
if volumeMount {
|
||||
cronJobObj = GetCronJob(namespace, cronJobName)
|
||||
} else {
|
||||
cronJobObj = GetCronJobWithEnvVar(namespace, cronJobName)
|
||||
}
|
||||
cronJob, err := cronJobClient.Create(context.TODO(), cronJobObj, metav1.CreateOptions{})
|
||||
time.Sleep(3 * time.Second)
|
||||
return cronJob, err
|
||||
}
|
||||
|
||||
// CreateJob creates a job in given namespace and returns the Job
|
||||
func CreateJob(client kubernetes.Interface, jobName string, namespace string, volumeMount bool) (*batchv1.Job, error) {
|
||||
logrus.Infof("Creating Job")
|
||||
jobClient := client.BatchV1().Jobs(namespace)
|
||||
var jobObj *batchv1.Job
|
||||
if volumeMount {
|
||||
jobObj = GetJob(namespace, jobName)
|
||||
} else {
|
||||
jobObj = GetJobWithEnvVar(namespace, jobName)
|
||||
}
|
||||
job, err := jobClient.Create(context.TODO(), jobObj, metav1.CreateOptions{})
|
||||
time.Sleep(3 * time.Second)
|
||||
return job, err
|
||||
}
|
||||
|
||||
// DeleteDeployment creates a deployment in given namespace and returns the error if any
|
||||
func DeleteDeployment(client kubernetes.Interface, namespace string, deploymentName string) error {
|
||||
logrus.Infof("Deleting Deployment")
|
||||
@@ -1071,3 +1164,28 @@ func VerifyResourceAnnotationUpdate(clients kube.Clients, config util.Config, up
|
||||
func GetSHAfromEmptyData() string {
|
||||
return crypto.GenerateSHA("")
|
||||
}
|
||||
|
||||
// GetRollout provides rollout for testing
|
||||
func GetRollout(namespace string, rolloutName string, annotations map[string]string) *argorolloutv1alpha1.Rollout {
|
||||
replicaset := int32(1)
|
||||
return &argorolloutv1alpha1.Rollout{
|
||||
ObjectMeta: getObjectMeta(namespace, rolloutName, false, false, false, annotations),
|
||||
Spec: argorolloutv1alpha1.RolloutSpec{
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{"secondLabel": "temp"},
|
||||
},
|
||||
Replicas: &replicaset,
|
||||
Template: getPodTemplateSpecWithVolumes(rolloutName),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// CreateRollout creates a rolout in given namespace and returns the Rollout
|
||||
func CreateRollout(client argorollout.Interface, rolloutName string, namespace string, annotations map[string]string) (*argorolloutv1alpha1.Rollout, error) {
|
||||
logrus.Infof("Creating Rollout")
|
||||
rolloutClient := client.ArgoprojV1alpha1().Rollouts(namespace)
|
||||
rolloutObj := GetRollout(namespace, rolloutName, annotations)
|
||||
rollout, err := rolloutClient.Create(context.TODO(), rolloutObj, metav1.CreateOptions{})
|
||||
time.Sleep(3 * time.Second)
|
||||
return rollout, err
|
||||
}
|
||||
|
||||
Submodule theme_common updated: 0eef29f4cb...11286e112e
Reference in New Issue
Block a user