Compare commits

...

10 Commits

Author SHA1 Message Date
Karl Johan Grahn
d18aabe160 update (#847) 2025-02-17 21:03:25 +01:00
Karl Johan Grahn
6a4bca0fce Update Helm chart for v1.3.0 (#846)
* update

* update
2025-02-17 18:35:59 +05:00
renovate[bot]
561f21a81d chore(deps): update stakater/.github action to v0.0.128 (#840)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-02-14 21:00:30 +01:00
renovate[bot]
0aa974f7e6 chore(deps): update dependency stakater/vale-package to v0.0.53 (#842)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-02-14 20:32:03 +01:00
Karl Johan Grahn
5185ff2c91 update (#841) 2025-02-14 10:01:55 +01:00
Karl Johan Grahn
d6a95a923a Docs: Update base python image and add params for the combine script (#839)
* update

* update

* update
2025-02-13 15:21:24 +01:00
renovate[bot]
3dc3b4726c chore(deps): update stakater/.github action to v0.0.122 (#836)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-02-12 23:58:01 +01:00
renovate[bot]
1b2780f712 chore(deps): update stakater/.github action to v0.0.120 (#833)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-02-12 10:10:33 +01:00
Mohamed Sekour
574129d637 support recreating k8s jobs when configmap/secret changed (#808)
* support recreating k8s jobs when configmap/secret changed

Co-authored-by: Mohamed Sekour <mohamed.sekour@exfo.com>

* add unit tests

* fix tests
2025-02-05 10:50:25 +01:00
renovate[bot]
919d0cc3ca chore(deps): update dependency stakater/vale-package to v0.0.52 (#829)
* chore(deps): update dependency stakater/vale-package to v0.0.52

* update

---------

Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
Co-authored-by: Karl Johan Grahn <karl.johan@stakater.com>
2025-02-05 10:23:20 +01:00
22 changed files with 656 additions and 35 deletions

View File

@@ -25,7 +25,7 @@ env:
jobs:
qa:
uses: stakater/.github/.github/workflows/pull_request_doc_qa.yaml@v0.0.117
uses: stakater/.github/.github/workflows/pull_request_doc_qa.yaml@v0.0.128
with:
MD_CONFIG: .github/md_config.json
DOC_SRC: README.md

View File

@@ -15,8 +15,17 @@ on:
jobs:
qa:
uses: stakater/.github/.github/workflows/pull_request_doc_qa.yaml@v0.0.117
uses: stakater/.github/.github/workflows/pull_request_doc_qa.yaml@v0.0.128
with:
MD_CONFIG: .github/md_config.json
DOC_SRC: docs
MD_LINT_CONFIG: .markdownlint.yaml
build:
uses: stakater/.github/.github/workflows/pull_request_container_build.yaml@v0.0.122
with:
DOCKER_FILE_PATH: Dockerfile-docs
CONTAINER_REGISTRY_URL: ghcr.io/stakater
secrets:
CONTAINER_REGISTRY_USERNAME: ${{ github.actor }}
CONTAINER_REGISTRY_PASSWORD: ${{ secrets.GHCR_TOKEN }}
SLACK_WEBHOOK_URL: ${{ secrets.STAKATER_DELIVERY_SLACK_WEBHOOK }}

View File

@@ -200,7 +200,6 @@ jobs:
push: true
build-args: BUILD_PARAMETERS=${{ env.BUILD_PARAMETERS }}
cache-to: type=inline
platforms: linux/amd64,linux/arm,linux/arm64
tags: |
${{ env.GHCR_IMAGE_REPOSITORY }}/docs:merge-${{ github.event.number }}
labels: |

View File

@@ -14,4 +14,4 @@ jobs:
-H "Accept: application/vnd.github.v3+json" \
-H "Authorization: token ${{ secrets.STAKATER_AB_TOKEN_FOR_RLDR }}" \
https://api.github.com/repos/stakater-ab/reloader-enterprise/dispatches \
-d '{"event_type":"release-published"}'
-d '{"event_type":"release-published","client_payload":{"tag":"${{ github.event.release.tag_name }}"}}'

View File

@@ -1,7 +1,7 @@
StylesPath = styles
MinAlertLevel = warning
Packages = https://github.com/stakater/vale-package/releases/download/v0.0.47/Stakater.zip
Packages = https://github.com/stakater/vale-package/releases/download/v0.0.53/Stakater.zip
Vocab = Stakater
# Only check MarkDown files

View File

@@ -1,4 +1,4 @@
FROM python:3.12 as builder
FROM python:3.13-alpine as builder
# set workdir
RUN mkdir -p $HOME/application
@@ -10,7 +10,7 @@ COPY --chown=1001:root . .
RUN pip3 install -r theme_common/requirements.txt
# Combine Theme Resources
RUN python theme_common/scripts/combine_theme_resources.py theme_common/resources theme_override/resources dist/_theme
RUN python theme_common/scripts/combine_theme_resources.py -s theme_common/resources -ov theme_override/resources -o dist/_theme
# Produce mkdocs file
RUN python theme_common/scripts/combine_mkdocs_config_yaml.py theme_common/mkdocs.yml theme_override/mkdocs.yml mkdocs.yml

View File

@@ -50,13 +50,13 @@ spec:
metadata:
```
This will discover deploymentconfigs/deployments/daemonsets/statefulset/rollouts automatically where `foo-configmap` or `foo-secret` is being used either via environment variable or from volume mount. And it will perform rolling upgrade on related pods when `foo-configmap` or `foo-secret`are updated.
This will discover deploymentconfigs/deployments/daemonsets/statefulset/rollouts/cronjobs/jobs automatically where `foo-configmap` or `foo-secret` is being used either via environment variable or from volume mount. And it will perform rolling upgrade on related pods when `foo-configmap` or `foo-secret`are updated.
You can filter it by the type of monitored resource and use typed versions of `auto` annotation. If you want to discover changes only in mounted `Secret`s and ignore changes in `ConfigMap`s, add `secret.reloader.stakater.com/auto` annotation instead. Analogously, you can use `configmap.reloader.stakater.com/auto` annotation to look for changes in mounted `ConfigMap`, changes in any of mounted `Secret`s will not trigger a rolling upgrade on related pods.
You can also restrict this discovery to only `ConfigMap` or `Secret` objects that
are tagged with a special annotation. To take advantage of that, annotate
your deploymentconfigs/deployments/daemonsets/statefulset/rollouts like this:
your deploymentconfigs/deployments/daemonsets/statefulset/rollouts/cronjobs/jobs like this:
```yaml
kind: Deployment
@@ -91,7 +91,7 @@ not.
Similarly, `reloader.stakater.com/auto` and its typed version (`secret.reloader.stakater.com/auto` or `configmap.reloader.stakater.com/auto`) do not work together. If you have both annotations in your deployment, then only one of them needs to be true to trigger the restart. For example, having both `reloader.stakater.com/auto: "true"` and `secret.reloader.stakater.com/auto: "false"` or both `reloader.stakater.com/auto: "false"` and `secret.reloader.stakater.com/auto: "true"` will restart upon a change in a secret it uses.
We can also specify a specific configmap or secret which would trigger rolling upgrade only upon change in our specified configmap or secret, this way, it will not trigger rolling upgrade upon changes in all configmaps or secrets used in a `deploymentconfig`, `deployment`, `daemonset`, `statefulset` or `rollout`.
We can also specify a specific configmap or secret which would trigger rolling upgrade only upon change in our specified configmap or secret, this way, it will not trigger rolling upgrade upon changes in all configmaps or secrets used in a `deploymentconfig`, `deployment`, `daemonset`, `statefulset`, `rollout`, `cronJob` or `job`.
To do this either set the auto annotation to `"false"` (`reloader.stakater.com/auto: "false"`) or remove it altogether, and use annotations for [Configmap](.#Configmap) or [Secret](.#Secret).
It's also possible to enable auto reloading for all resources, by setting the `--auto-reload-all` flag.
@@ -158,7 +158,7 @@ spec:
- Reloader also supports [sealed-secrets](https://github.com/bitnami-labs/sealed-secrets). [Here](docs/Reloader-with-Sealed-Secrets.md) are the steps to use sealed-secrets with Reloader.
- For [`rollouts`](https://github.com/argoproj/argo-rollouts/) Reloader simply triggers a change is up to you how you configure the `rollout` strategy.
- `reloader.stakater.com/auto: "true"` will only reload the pod, if the configmap or secret is used (as a volume mount or as an env) in `DeploymentConfigs/Deployment/Daemonsets/Statefulsets`
- `reloader.stakater.com/auto: "true"` will only reload the pod, if the configmap or secret is used (as a volume mount or as an env) in `DeploymentConfigs/Deployment/Daemonsets/Statefulsets/CronJobs/Jobs`
- `secret.reloader.stakater.com/reload` or `configmap.reloader.stakater.com/reload` annotation will reload the pod upon changes in specified configmap or secret, irrespective of the usage of configmap or secret.
- you may override the auto annotation with the `--auto-annotation` flag
- you may override the secret typed auto annotation with the `--secret-auto-annotation` flag
@@ -358,7 +358,7 @@ helm uninstall {{RELEASE_NAME}} -n {{NAMESPACE}}
| `reloader.deployment.tolerations` | A list of `tolerations` to be applied to the deployment | array | `[]` |
| `reloader.deployment.topologySpreadConstraints` | Topology spread constraints for pod assignment | array | `[]` |
| `reloader.deployment.annotations` | Set deployment annotations | map | `{}` |
| `reloader.deployment.labels` | Set deployment labels, default to stakater settings | array | `see values.yaml` |
| `reloader.deployment.labels` | Set deployment labels, default to Stakater settings | array | `see values.yaml` |
| `reloader.deployment.image` | Set container image name, tag and policy | array | `see values.yaml` |
| `reloader.deployment.env` | Support for extra environment variables | array | `[]` |
| `reloader.deployment.livenessProbe` | Set liveness probe timeout values | map | `{}` |
@@ -453,6 +453,7 @@ To make a GitHub release:
1. Code owners run a dispatch mode workflow to automatically generate version and manifests on the release branch
1. A PR is created to bump the image version on the release branch, example: [PR-798](https://github.com/stakater/Reloader/pull/798)
1. Code owners create a GitHub release with tag `vX.Y.Z` and target branch `release-vX.Y.Z`, which triggers creation of images
1. Code owners create a PR to update the Helm chart version
_Repository git tagging_: Push to the main branch will create a merge-image and merge-tag named `merge-${{ github.event.number }}`, for example `merge-800` when pull request number 800 is merged.
@@ -475,10 +476,10 @@ Apache2 © [Stakater][website]
## About
`Reloader` is maintained by [Stakater][website]. Like it? Please let us know at <hello@stakater.com>
`Reloader` is maintained by [Stakater][website]. Like it? Please let us know at [hello@stakater.com](hello@stakater.com)
See [our other projects](https://github.com/stakater)
or contact us in case of professional services and queries on <hello@stakater.com>
or contact us in case of professional services and queries on [hello@stakater.com](hello@stakater.com)
[website]: https://stakater.com

View File

@@ -1,10 +1,8 @@
# Generated from deployments/kubernetes/templates/chart/Chart.yaml.tmpl
apiVersion: v1
name: reloader
description: Reloader chart that runs on kubernetes
version: 1.2.2
appVersion: v1.2.1
version: 1.3.0
appVersion: v1.3.0
keywords:
- Reloader
- kubernetes
@@ -18,4 +16,4 @@ maintainers:
- name: rasheedamir
email: rasheed@stakater.com
- name: faizanahmad055
email: faizan.ahmad55@outlook.com
email: faizan@stakater.com

View File

@@ -89,6 +89,9 @@ rules:
- jobs
verbs:
- create
- delete
- list
- get
{{- if .Values.reloader.enableHA }}
- apiGroups:
- "coordination.k8s.io"

View File

@@ -80,6 +80,9 @@ rules:
- jobs
verbs:
- create
- delete
- list
- get
{{- if .Values.reloader.enableHA }}
- apiGroups:
- "coordination.k8s.io"

View File

@@ -96,11 +96,11 @@ reloader:
labels:
provider: stakater
group: com.stakater.platform
version: v1.2.1
version: v1.3.0
image:
name: ghcr.io/stakater/reloader
base: stakater/reloader
tag: v1.2.1
tag: v1.3.0
# digest: sha256:1234567
pullPolicy: IfNotPresent
# Support for extra environment variables.

View File

@@ -48,6 +48,9 @@ rules:
- jobs
verbs:
- create
- delete
- list
- get
- apiGroups:
- ""
resources:

View File

@@ -52,6 +52,9 @@ rules:
- jobs
verbs:
- create
- delete
- list
- get
- apiGroups:
- ""
resources:

View File

@@ -54,7 +54,7 @@ When Reloader detects changes in configmap. It gets two objects of configmap. Fi
After that, Reloader gets the list of all `deployments`, `daemonsets` and `statefulset` and looks for above mentioned annotation for configmap. If the annotation value contains the configmap name, it then looks for an environment variable which can contain the configmap or secret data change hash.
### Environment Variable for ConfigMap
### Environment Variable for Configmap
If configmap name is foo then

View File

@@ -1,7 +1,6 @@
# Reloader vs ConfigmapController
Reloader is inspired from [Configmapcontroller](https://github.com/fabric8io/configmapcontroller) but there are many ways in which it differs from configmapController. Below is the small comparison between these two controllers.
Reloader is inspired from [`Configmapcontroller`](https://github.com/fabric8io/configmapcontroller) but there are many ways in which it differs from `configmapController`. Below is the small comparison between these two controllers.
| Reloader | Configmap |
|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|

View File

@@ -3,12 +3,12 @@
Below are the steps to use Reloader with Sealed Secrets:
1. Download and install the kubeseal client from [here](https://github.com/bitnami-labs/sealed-secrets)
1. Install the controller for sealed secrets
1. Install the controller for Sealed Secrets
1. Fetch the encryption certificate
1. Encrypt the secret
1. Apply the secret
1. Install the tool which uses that sealed secret
1. Install the tool which uses that Sealed Secret
1. Install Reloader
1. Once everything is setup, update the original secret at client and encrypt it with kubeseal to see Reloader working
1. Apply the updated sealed secret
1. Apply the updated Sealed Secret
1. Reloader will restart the pod to use that updated secret

2
go.mod
View File

@@ -10,6 +10,7 @@ require (
github.com/prometheus/client_golang v1.20.5
github.com/sirupsen/logrus v1.9.3
github.com/spf13/cobra v1.8.1
github.com/stretchr/testify v1.9.0
k8s.io/api v0.31.1
k8s.io/apimachinery v0.31.1
k8s.io/client-go v0.31.1
@@ -47,6 +48,7 @@ require (
github.com/moul/http2curl v1.0.0 // indirect
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
github.com/pkg/errors v0.9.1 // indirect
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
github.com/prometheus/client_model v0.6.1 // indirect
github.com/prometheus/common v0.55.0 // indirect
github.com/prometheus/procfs v0.15.1 // indirect

2
go.sum
View File

@@ -178,8 +178,6 @@ github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/prometheus/client_golang v1.20.4 h1:Tgh3Yr67PaOv/uTqloMsCEdeuFTatm5zIq5+qNN23vI=
github.com/prometheus/client_golang v1.20.4/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE=
github.com/prometheus/client_golang v1.20.5 h1:cxppBPuYhUnsO6yo/aoRol4L7q7UFfdm+bR9r+8l63Y=
github.com/prometheus/client_golang v1.20.5/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE=
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=

View File

@@ -92,6 +92,26 @@ func GetCronJobItems(clients kube.Clients, namespace string) []runtime.Object {
return items
}
// GetJobItems returns the jobs in given namespace
func GetJobItems(clients kube.Clients, namespace string) []runtime.Object {
jobs, err := clients.KubernetesClient.BatchV1().Jobs(namespace).List(context.TODO(), meta_v1.ListOptions{})
if err != nil {
logrus.Errorf("Failed to list jobs %v", err)
}
items := make([]runtime.Object, len(jobs.Items))
// Ensure we always have pod annotations to add to
for i, v := range jobs.Items {
if v.Spec.Template.ObjectMeta.Annotations == nil {
annotations := make(map[string]string)
jobs.Items[i].Spec.Template.ObjectMeta.Annotations = annotations
}
items[i] = &jobs.Items[i]
}
return items
}
// GetDaemonSetItems returns the daemonSets in given namespace
func GetDaemonSetItems(clients kube.Clients, namespace string) []runtime.Object {
daemonSets, err := clients.KubernetesClient.AppsV1().DaemonSets(namespace).List(context.TODO(), meta_v1.ListOptions{})
@@ -178,6 +198,11 @@ func GetCronJobAnnotations(item runtime.Object) map[string]string {
return item.(*batchv1.CronJob).ObjectMeta.Annotations
}
// GetJobAnnotations returns the annotations of given job
func GetJobAnnotations(item runtime.Object) map[string]string {
return item.(*batchv1.Job).ObjectMeta.Annotations
}
// GetDaemonSetAnnotations returns the annotations of given daemonSet
func GetDaemonSetAnnotations(item runtime.Object) map[string]string {
return item.(*appsv1.DaemonSet).ObjectMeta.Annotations
@@ -208,6 +233,11 @@ func GetCronJobPodAnnotations(item runtime.Object) map[string]string {
return item.(*batchv1.CronJob).Spec.JobTemplate.Spec.Template.ObjectMeta.Annotations
}
// GetJobPodAnnotations returns the pod's annotations of given job
func GetJobPodAnnotations(item runtime.Object) map[string]string {
return item.(*batchv1.Job).Spec.Template.ObjectMeta.Annotations
}
// GetDaemonSetPodAnnotations returns the pod's annotations of given daemonSet
func GetDaemonSetPodAnnotations(item runtime.Object) map[string]string {
return item.(*appsv1.DaemonSet).Spec.Template.ObjectMeta.Annotations
@@ -238,6 +268,11 @@ func GetCronJobContainers(item runtime.Object) []v1.Container {
return item.(*batchv1.CronJob).Spec.JobTemplate.Spec.Template.Spec.Containers
}
// GetJobContainers returns the containers of given job
func GetJobContainers(item runtime.Object) []v1.Container {
return item.(*batchv1.Job).Spec.Template.Spec.Containers
}
// GetDaemonSetContainers returns the containers of given daemonSet
func GetDaemonSetContainers(item runtime.Object) []v1.Container {
return item.(*appsv1.DaemonSet).Spec.Template.Spec.Containers
@@ -268,6 +303,11 @@ func GetCronJobInitContainers(item runtime.Object) []v1.Container {
return item.(*batchv1.CronJob).Spec.JobTemplate.Spec.Template.Spec.InitContainers
}
// GetJobInitContainers returns the containers of given job
func GetJobInitContainers(item runtime.Object) []v1.Container {
return item.(*batchv1.Job).Spec.Template.Spec.InitContainers
}
// GetDaemonSetInitContainers returns the containers of given daemonSet
func GetDaemonSetInitContainers(item runtime.Object) []v1.Container {
return item.(*appsv1.DaemonSet).Spec.Template.Spec.InitContainers
@@ -307,6 +347,38 @@ func CreateJobFromCronjob(clients kube.Clients, namespace string, resource runti
return err
}
// ReCreateJobFromjob performs rolling upgrade on job
func ReCreateJobFromjob(clients kube.Clients, namespace string, resource runtime.Object) error {
oldJob := resource.(*batchv1.Job)
job := oldJob.DeepCopy()
// Delete the old job
policy := meta_v1.DeletePropagationBackground
err := clients.KubernetesClient.BatchV1().Jobs(namespace).Delete(context.TODO(), job.Name, meta_v1.DeleteOptions{PropagationPolicy: &policy})
if err != nil {
return err
}
// Remove fields that should not be specified when creating a new Job
job.ObjectMeta.ResourceVersion = ""
job.ObjectMeta.UID = ""
job.ObjectMeta.CreationTimestamp = meta_v1.Time{}
job.Status = batchv1.JobStatus{}
// Remove problematic labels
delete(job.Spec.Template.Labels, "controller-uid")
delete(job.Spec.Template.Labels, batchv1.ControllerUidLabel)
delete(job.Spec.Template.Labels, batchv1.JobNameLabel)
delete(job.Spec.Template.Labels, "job-name")
// Remove the selector to allow it to be auto-generated
job.Spec.Selector = nil
// Create the new job with same spec
_, err = clients.KubernetesClient.BatchV1().Jobs(namespace).Create(context.TODO(), job, meta_v1.CreateOptions{FieldManager: "Reloader"})
return err
}
// UpdateDaemonSet performs rolling upgrade on daemonSet
func UpdateDaemonSet(clients kube.Clients, namespace string, resource runtime.Object) error {
daemonSet := resource.(*appsv1.DaemonSet)
@@ -352,6 +424,11 @@ func GetCronJobVolumes(item runtime.Object) []v1.Volume {
return item.(*batchv1.CronJob).Spec.JobTemplate.Spec.Template.Spec.Volumes
}
// GetJobVolumes returns the Volumes of given job
func GetJobVolumes(item runtime.Object) []v1.Volume {
return item.(*batchv1.Job).Spec.Template.Spec.Volumes
}
// GetDaemonSetVolumes returns the Volumes of given daemonSet
func GetDaemonSetVolumes(item runtime.Object) []v1.Volume {
return item.(*appsv1.DaemonSet).Spec.Template.Spec.Volumes

View File

@@ -2,13 +2,22 @@ package callbacks_test
import (
"context"
"fmt"
"testing"
"time"
argorolloutv1alpha1 "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1"
argorollouts "github.com/argoproj/argo-rollouts/pkg/client/clientset/versioned/fake"
"github.com/stretchr/testify/assert"
appsv1 "k8s.io/api/apps/v1"
batchv1 "k8s.io/api/batch/v1"
v1 "k8s.io/api/core/v1"
meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
watch "k8s.io/apimachinery/pkg/watch"
"k8s.io/client-go/kubernetes/fake"
argorolloutv1alpha1 "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1"
fakeargoclientset "github.com/argoproj/argo-rollouts/pkg/client/clientset/versioned/fake"
"github.com/stakater/Reloader/internal/pkg/callbacks"
"github.com/stakater/Reloader/internal/pkg/options"
@@ -17,9 +26,32 @@ import (
)
var (
clients = kube.Clients{ArgoRolloutClient: argorollouts.NewSimpleClientset()}
clients = setupTestClients()
)
type testFixtures struct {
defaultContainers []v1.Container
defaultInitContainers []v1.Container
defaultVolumes []v1.Volume
namespace string
}
func newTestFixtures() testFixtures {
return testFixtures{
defaultContainers: []v1.Container{{Name: "container1"}, {Name: "container2"}},
defaultInitContainers: []v1.Container{{Name: "init-container1"}, {Name: "init-container2"}},
defaultVolumes: []v1.Volume{{Name: "volume1"}, {Name: "volume2"}},
namespace: "default",
}
}
func setupTestClients() kube.Clients {
return kube.Clients{
KubernetesClient: fake.NewSimpleClientset(),
ArgoRolloutClient: fakeargoclientset.NewSimpleClientset(),
}
}
// TestUpdateRollout test update rollout strategy annotation
func TestUpdateRollout(t *testing.T) {
namespace := "test-ns"
@@ -79,6 +111,218 @@ func TestUpdateRollout(t *testing.T) {
}
}
func TestResourceItems(t *testing.T) {
fixtures := newTestFixtures()
tests := []struct {
name string
createFunc func(kube.Clients, string) error
getItemsFunc func(kube.Clients, string) []runtime.Object
expectedCount int
}{
{
name: "Deployments",
createFunc: createTestDeployments,
getItemsFunc: callbacks.GetDeploymentItems,
expectedCount: 2,
},
{
name: "CronJobs",
createFunc: createTestCronJobs,
getItemsFunc: callbacks.GetCronJobItems,
expectedCount: 2,
},
{
name: "Jobs",
createFunc: createTestJobs,
getItemsFunc: callbacks.GetJobItems,
expectedCount: 2,
},
{
name: "DaemonSets",
createFunc: createTestDaemonSets,
getItemsFunc: callbacks.GetDaemonSetItems,
expectedCount: 2,
},
{
name: "StatefulSets",
createFunc: createTestStatefulSets,
getItemsFunc: callbacks.GetStatefulSetItems,
expectedCount: 2,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
err := tt.createFunc(clients, fixtures.namespace)
assert.NoError(t, err)
items := tt.getItemsFunc(clients, fixtures.namespace)
assert.Equal(t, tt.expectedCount, len(items))
})
}
}
func TestGetAnnotations(t *testing.T) {
testAnnotations := map[string]string{"version": "1"}
tests := []struct {
name string
resource runtime.Object
getFunc func(runtime.Object) map[string]string
}{
{"Deployment", &appsv1.Deployment{ObjectMeta: metav1.ObjectMeta{Annotations: testAnnotations}}, callbacks.GetDeploymentAnnotations},
{"CronJob", &batchv1.CronJob{ObjectMeta: metav1.ObjectMeta{Annotations: testAnnotations}}, callbacks.GetCronJobAnnotations},
{"Job", &batchv1.Job{ObjectMeta: metav1.ObjectMeta{Annotations: testAnnotations}}, callbacks.GetJobAnnotations},
{"DaemonSet", &appsv1.DaemonSet{ObjectMeta: metav1.ObjectMeta{Annotations: testAnnotations}}, callbacks.GetDaemonSetAnnotations},
{"StatefulSet", &appsv1.StatefulSet{ObjectMeta: metav1.ObjectMeta{Annotations: testAnnotations}}, callbacks.GetStatefulSetAnnotations},
{"Rollout", &argorolloutv1alpha1.Rollout{ObjectMeta: metav1.ObjectMeta{Annotations: testAnnotations}}, callbacks.GetRolloutAnnotations},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
assert.Equal(t, testAnnotations, tt.getFunc(tt.resource))
})
}
}
func TestGetPodAnnotations(t *testing.T) {
testAnnotations := map[string]string{"version": "1"}
tests := []struct {
name string
resource runtime.Object
getFunc func(runtime.Object) map[string]string
}{
{"Deployment", createResourceWithPodAnnotations(&appsv1.Deployment{}, testAnnotations), callbacks.GetDeploymentPodAnnotations},
{"CronJob", createResourceWithPodAnnotations(&batchv1.CronJob{}, testAnnotations), callbacks.GetCronJobPodAnnotations},
{"Job", createResourceWithPodAnnotations(&batchv1.Job{}, testAnnotations), callbacks.GetJobPodAnnotations},
{"DaemonSet", createResourceWithPodAnnotations(&appsv1.DaemonSet{}, testAnnotations), callbacks.GetDaemonSetPodAnnotations},
{"StatefulSet", createResourceWithPodAnnotations(&appsv1.StatefulSet{}, testAnnotations), callbacks.GetStatefulSetPodAnnotations},
{"Rollout", createResourceWithPodAnnotations(&argorolloutv1alpha1.Rollout{}, testAnnotations), callbacks.GetRolloutPodAnnotations},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
assert.Equal(t, testAnnotations, tt.getFunc(tt.resource))
})
}
}
func TestGetContainers(t *testing.T) {
fixtures := newTestFixtures()
tests := []struct {
name string
resource runtime.Object
getFunc func(runtime.Object) []v1.Container
}{
{"Deployment", createResourceWithContainers(&appsv1.Deployment{}, fixtures.defaultContainers), callbacks.GetDeploymentContainers},
{"DaemonSet", createResourceWithContainers(&appsv1.DaemonSet{}, fixtures.defaultContainers), callbacks.GetDaemonSetContainers},
{"StatefulSet", createResourceWithContainers(&appsv1.StatefulSet{}, fixtures.defaultContainers), callbacks.GetStatefulSetContainers},
{"CronJob", createResourceWithContainers(&batchv1.CronJob{}, fixtures.defaultContainers), callbacks.GetCronJobContainers},
{"Job", createResourceWithContainers(&batchv1.Job{}, fixtures.defaultContainers), callbacks.GetJobContainers},
{"Rollout", createResourceWithContainers(&argorolloutv1alpha1.Rollout{}, fixtures.defaultContainers), callbacks.GetRolloutContainers},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
assert.Equal(t, fixtures.defaultContainers, tt.getFunc(tt.resource))
})
}
}
func TestGetInitContainers(t *testing.T) {
fixtures := newTestFixtures()
tests := []struct {
name string
resource runtime.Object
getFunc func(runtime.Object) []v1.Container
}{
{"Deployment", createResourceWithInitContainers(&appsv1.Deployment{}, fixtures.defaultInitContainers), callbacks.GetDeploymentInitContainers},
{"DaemonSet", createResourceWithInitContainers(&appsv1.DaemonSet{}, fixtures.defaultInitContainers), callbacks.GetDaemonSetInitContainers},
{"StatefulSet", createResourceWithInitContainers(&appsv1.StatefulSet{}, fixtures.defaultInitContainers), callbacks.GetStatefulSetInitContainers},
{"CronJob", createResourceWithInitContainers(&batchv1.CronJob{}, fixtures.defaultInitContainers), callbacks.GetCronJobInitContainers},
{"Job", createResourceWithInitContainers(&batchv1.Job{}, fixtures.defaultInitContainers), callbacks.GetJobInitContainers},
{"Rollout", createResourceWithInitContainers(&argorolloutv1alpha1.Rollout{}, fixtures.defaultInitContainers), callbacks.GetRolloutInitContainers},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
assert.Equal(t, fixtures.defaultInitContainers, tt.getFunc(tt.resource))
})
}
}
func TestUpdateResources(t *testing.T) {
fixtures := newTestFixtures()
tests := []struct {
name string
createFunc func(kube.Clients, string, string) (runtime.Object, error)
updateFunc func(kube.Clients, string, runtime.Object) error
}{
{"Deployment", createTestDeploymentWithAnnotations, callbacks.UpdateDeployment},
{"DaemonSet", createTestDaemonSetWithAnnotations, callbacks.UpdateDaemonSet},
{"StatefulSet", createTestStatefulSetWithAnnotations, callbacks.UpdateStatefulSet},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
resource, err := tt.createFunc(clients, fixtures.namespace, "1")
assert.NoError(t, err)
err = tt.updateFunc(clients, fixtures.namespace, resource)
assert.NoError(t, err)
})
}
}
func TestCreateJobFromCronjob(t *testing.T) {
fixtures := newTestFixtures()
cronJob, err := createTestCronJobWithAnnotations(clients, fixtures.namespace, "1")
assert.NoError(t, err)
err = callbacks.CreateJobFromCronjob(clients, fixtures.namespace, cronJob.(*batchv1.CronJob))
assert.NoError(t, err)
}
func TestReCreateJobFromJob(t *testing.T) {
fixtures := newTestFixtures()
job, err := createTestJobWithAnnotations(clients, fixtures.namespace, "1")
assert.NoError(t, err)
err = callbacks.ReCreateJobFromjob(clients, fixtures.namespace, job.(*batchv1.Job))
assert.NoError(t, err)
}
func TestGetVolumes(t *testing.T) {
fixtures := newTestFixtures()
tests := []struct {
name string
resource runtime.Object
getFunc func(runtime.Object) []v1.Volume
}{
{"Deployment", createResourceWithVolumes(&appsv1.Deployment{}, fixtures.defaultVolumes), callbacks.GetDeploymentVolumes},
{"CronJob", createResourceWithVolumes(&batchv1.CronJob{}, fixtures.defaultVolumes), callbacks.GetCronJobVolumes},
{"Job", createResourceWithVolumes(&batchv1.Job{}, fixtures.defaultVolumes), callbacks.GetJobVolumes},
{"DaemonSet", createResourceWithVolumes(&appsv1.DaemonSet{}, fixtures.defaultVolumes), callbacks.GetDaemonSetVolumes},
{"StatefulSet", createResourceWithVolumes(&appsv1.StatefulSet{}, fixtures.defaultVolumes), callbacks.GetStatefulSetVolumes},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
assert.Equal(t, fixtures.defaultVolumes, tt.getFunc(tt.resource))
})
}
}
// Helper functions
func isRestartStrategy(rollout *argorolloutv1alpha1.Rollout) bool {
return rollout.Spec.RestartAt == nil
}
@@ -103,3 +347,178 @@ func watchModified(watcher watch.Interface, name string, modifiedChan chan inter
}
}
}
func createTestDeployments(clients kube.Clients, namespace string) error {
for i := 1; i <= 2; i++ {
_, err := testutil.CreateDeployment(clients.KubernetesClient, fmt.Sprintf("test-deployment-%d", i), namespace, false)
if err != nil {
return err
}
}
return nil
}
func createTestCronJobs(clients kube.Clients, namespace string) error {
for i := 1; i <= 2; i++ {
_, err := testutil.CreateCronJob(clients.KubernetesClient, fmt.Sprintf("test-cron-%d", i), namespace, false)
if err != nil {
return err
}
}
return nil
}
func createTestJobs(clients kube.Clients, namespace string) error {
for i := 1; i <= 2; i++ {
_, err := testutil.CreateJob(clients.KubernetesClient, fmt.Sprintf("test-job-%d", i), namespace, false)
if err != nil {
return err
}
}
return nil
}
func createTestDaemonSets(clients kube.Clients, namespace string) error {
for i := 1; i <= 2; i++ {
_, err := testutil.CreateDaemonSet(clients.KubernetesClient, fmt.Sprintf("test-daemonset-%d", i), namespace, false)
if err != nil {
return err
}
}
return nil
}
func createTestStatefulSets(clients kube.Clients, namespace string) error {
for i := 1; i <= 2; i++ {
_, err := testutil.CreateStatefulSet(clients.KubernetesClient, fmt.Sprintf("test-statefulset-%d", i), namespace, false)
if err != nil {
return err
}
}
return nil
}
func createResourceWithPodAnnotations(obj runtime.Object, annotations map[string]string) runtime.Object {
switch v := obj.(type) {
case *appsv1.Deployment:
v.Spec.Template.ObjectMeta.Annotations = annotations
case *appsv1.DaemonSet:
v.Spec.Template.ObjectMeta.Annotations = annotations
case *appsv1.StatefulSet:
v.Spec.Template.ObjectMeta.Annotations = annotations
case *batchv1.CronJob:
v.Spec.JobTemplate.Spec.Template.ObjectMeta.Annotations = annotations
case *batchv1.Job:
v.Spec.Template.ObjectMeta.Annotations = annotations
case *argorolloutv1alpha1.Rollout:
v.Spec.Template.ObjectMeta.Annotations = annotations
}
return obj
}
func createResourceWithContainers(obj runtime.Object, containers []v1.Container) runtime.Object {
switch v := obj.(type) {
case *appsv1.Deployment:
v.Spec.Template.Spec.Containers = containers
case *appsv1.DaemonSet:
v.Spec.Template.Spec.Containers = containers
case *appsv1.StatefulSet:
v.Spec.Template.Spec.Containers = containers
case *batchv1.CronJob:
v.Spec.JobTemplate.Spec.Template.Spec.Containers = containers
case *batchv1.Job:
v.Spec.Template.Spec.Containers = containers
case *argorolloutv1alpha1.Rollout:
v.Spec.Template.Spec.Containers = containers
}
return obj
}
func createResourceWithInitContainers(obj runtime.Object, initContainers []v1.Container) runtime.Object {
switch v := obj.(type) {
case *appsv1.Deployment:
v.Spec.Template.Spec.InitContainers = initContainers
case *appsv1.DaemonSet:
v.Spec.Template.Spec.InitContainers = initContainers
case *appsv1.StatefulSet:
v.Spec.Template.Spec.InitContainers = initContainers
case *batchv1.CronJob:
v.Spec.JobTemplate.Spec.Template.Spec.InitContainers = initContainers
case *batchv1.Job:
v.Spec.Template.Spec.InitContainers = initContainers
case *argorolloutv1alpha1.Rollout:
v.Spec.Template.Spec.InitContainers = initContainers
}
return obj
}
func createResourceWithVolumes(obj runtime.Object, volumes []v1.Volume) runtime.Object {
switch v := obj.(type) {
case *appsv1.Deployment:
v.Spec.Template.Spec.Volumes = volumes
case *batchv1.CronJob:
v.Spec.JobTemplate.Spec.Template.Spec.Volumes = volumes
case *batchv1.Job:
v.Spec.Template.Spec.Volumes = volumes
case *appsv1.DaemonSet:
v.Spec.Template.Spec.Volumes = volumes
case *appsv1.StatefulSet:
v.Spec.Template.Spec.Volumes = volumes
}
return obj
}
func createTestDeploymentWithAnnotations(clients kube.Clients, namespace, version string) (runtime.Object, error) {
deployment := &appsv1.Deployment{
ObjectMeta: metav1.ObjectMeta{
Name: "test-deployment",
Namespace: namespace,
Annotations: map[string]string{"version": version},
},
}
return clients.KubernetesClient.AppsV1().Deployments(namespace).Create(context.TODO(), deployment, metav1.CreateOptions{})
}
func createTestDaemonSetWithAnnotations(clients kube.Clients, namespace, version string) (runtime.Object, error) {
daemonSet := &appsv1.DaemonSet{
ObjectMeta: metav1.ObjectMeta{
Name: "test-daemonset",
Namespace: namespace,
Annotations: map[string]string{"version": version},
},
}
return clients.KubernetesClient.AppsV1().DaemonSets(namespace).Create(context.TODO(), daemonSet, metav1.CreateOptions{})
}
func createTestStatefulSetWithAnnotations(clients kube.Clients, namespace, version string) (runtime.Object, error) {
statefulSet := &appsv1.StatefulSet{
ObjectMeta: metav1.ObjectMeta{
Name: "test-statefulset",
Namespace: namespace,
Annotations: map[string]string{"version": version},
},
}
return clients.KubernetesClient.AppsV1().StatefulSets(namespace).Create(context.TODO(), statefulSet, metav1.CreateOptions{})
}
func createTestCronJobWithAnnotations(clients kube.Clients, namespace, version string) (runtime.Object, error) {
cronJob := &batchv1.CronJob{
ObjectMeta: metav1.ObjectMeta{
Name: "test-cronjob",
Namespace: namespace,
Annotations: map[string]string{"version": version},
},
}
return clients.KubernetesClient.BatchV1().CronJobs(namespace).Create(context.TODO(), cronJob, metav1.CreateOptions{})
}
func createTestJobWithAnnotations(clients kube.Clients, namespace, version string) (runtime.Object, error) {
job := &batchv1.Job{
ObjectMeta: metav1.ObjectMeta{
Name: "test-job",
Namespace: namespace,
Annotations: map[string]string{"version": version},
},
}
return clients.KubernetesClient.BatchV1().Jobs(namespace).Create(context.TODO(), job, metav1.CreateOptions{})
}

View File

@@ -55,6 +55,20 @@ func GetCronJobCreateJobFuncs() callbacks.RollingUpgradeFuncs {
}
}
// GetDeploymentRollingUpgradeFuncs returns all callback funcs for a cronjob
func GetJobCreateJobFuncs() callbacks.RollingUpgradeFuncs {
return callbacks.RollingUpgradeFuncs{
ItemsFunc: callbacks.GetJobItems,
AnnotationsFunc: callbacks.GetJobAnnotations,
PodAnnotationsFunc: callbacks.GetJobPodAnnotations,
ContainersFunc: callbacks.GetJobContainers,
InitContainersFunc: callbacks.GetJobInitContainers,
UpdateFunc: callbacks.ReCreateJobFromjob,
VolumesFunc: callbacks.GetJobVolumes,
ResourceType: "Job",
}
}
// GetDaemonSetRollingUpgradeFuncs returns all callback funcs for a daemonset
func GetDaemonSetRollingUpgradeFuncs() callbacks.RollingUpgradeFuncs {
return callbacks.RollingUpgradeFuncs{
@@ -153,6 +167,10 @@ func doRollingUpgrade(config util.Config, collectors metrics.Collectors, recorde
if err != nil {
return err
}
err = rollingUpgrade(clients, config, GetJobCreateJobFuncs(), collectors, recorder, invoke)
if err != nil {
return err
}
err = rollingUpgrade(clients, config, GetDaemonSetRollingUpgradeFuncs(), collectors, recorder, invoke)
if err != nil {
return err

View File

@@ -23,6 +23,7 @@ import (
"github.com/stakater/Reloader/internal/pkg/util"
"github.com/stakater/Reloader/pkg/kube"
appsv1 "k8s.io/api/apps/v1"
batchv1 "k8s.io/api/batch/v1"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@@ -177,7 +178,7 @@ func getVolumes(name string) []v1.Volume {
}
}
func getVolumeMounts(name string) []v1.VolumeMount {
func getVolumeMounts() []v1.VolumeMount {
return []v1.VolumeMount{
{
MountPath: "etc/config",
@@ -275,7 +276,7 @@ func getPodTemplateSpecWithVolumes(name string) v1.PodTemplateSpec {
Value: "test",
},
},
VolumeMounts: getVolumeMounts(name),
VolumeMounts: getVolumeMounts(),
},
},
Volumes: getVolumes(name),
@@ -293,7 +294,7 @@ func getPodTemplateSpecWithInitContainer(name string) v1.PodTemplateSpec {
{
Image: "busybox",
Name: "busyBox",
VolumeMounts: getVolumeMounts(name),
VolumeMounts: getVolumeMounts(),
},
},
Containers: []v1.Container{
@@ -637,6 +638,64 @@ func GetSecret(namespace string, secretName string, data string) *v1.Secret {
}
}
func GetCronJob(namespace string, cronJobName string) *batchv1.CronJob {
return &batchv1.CronJob{
ObjectMeta: getObjectMeta(namespace, cronJobName, false, false, false, map[string]string{}),
Spec: batchv1.CronJobSpec{
Schedule: "*/5 * * * *", // Run every 5 minutes
JobTemplate: batchv1.JobTemplateSpec{
Spec: batchv1.JobSpec{
Selector: &metav1.LabelSelector{
MatchLabels: map[string]string{"secondLabel": "temp"},
},
Template: getPodTemplateSpecWithVolumes(cronJobName),
},
},
},
}
}
func GetJob(namespace string, jobName string) *batchv1.Job {
return &batchv1.Job{
ObjectMeta: getObjectMeta(namespace, jobName, false, false, false, map[string]string{}),
Spec: batchv1.JobSpec{
Selector: &metav1.LabelSelector{
MatchLabels: map[string]string{"secondLabel": "temp"},
},
Template: getPodTemplateSpecWithVolumes(jobName),
},
}
}
func GetCronJobWithEnvVar(namespace string, cronJobName string) *batchv1.CronJob {
return &batchv1.CronJob{
ObjectMeta: getObjectMeta(namespace, cronJobName, true, false, false, map[string]string{}),
Spec: batchv1.CronJobSpec{
Schedule: "*/5 * * * *", // Run every 5 minutes
JobTemplate: batchv1.JobTemplateSpec{
Spec: batchv1.JobSpec{
Selector: &metav1.LabelSelector{
MatchLabels: map[string]string{"secondLabel": "temp"},
},
Template: getPodTemplateSpecWithEnvVars(cronJobName),
},
},
},
}
}
func GetJobWithEnvVar(namespace string, jobName string) *batchv1.Job {
return &batchv1.Job{
ObjectMeta: getObjectMeta(namespace, jobName, true, false, false, map[string]string{}),
Spec: batchv1.JobSpec{
Selector: &metav1.LabelSelector{
MatchLabels: map[string]string{"secondLabel": "temp"},
},
Template: getPodTemplateSpecWithEnvVars(jobName),
},
}
}
// GetSecretWithUpdatedLabel provides secret for testing
func GetSecretWithUpdatedLabel(namespace string, secretName string, label string, data string) *v1.Secret {
return &v1.Secret{
@@ -847,6 +906,36 @@ func CreateStatefulSet(client kubernetes.Interface, statefulsetName string, name
return statefulset, err
}
// CreateCronJob creates a cronjob in given namespace and returns the CronJob
func CreateCronJob(client kubernetes.Interface, cronJobName string, namespace string, volumeMount bool) (*batchv1.CronJob, error) {
logrus.Infof("Creating CronJob")
cronJobClient := client.BatchV1().CronJobs(namespace)
var cronJobObj *batchv1.CronJob
if volumeMount {
cronJobObj = GetCronJob(namespace, cronJobName)
} else {
cronJobObj = GetCronJobWithEnvVar(namespace, cronJobName)
}
cronJob, err := cronJobClient.Create(context.TODO(), cronJobObj, metav1.CreateOptions{})
time.Sleep(3 * time.Second)
return cronJob, err
}
// CreateJob creates a job in given namespace and returns the Job
func CreateJob(client kubernetes.Interface, jobName string, namespace string, volumeMount bool) (*batchv1.Job, error) {
logrus.Infof("Creating Job")
jobClient := client.BatchV1().Jobs(namespace)
var jobObj *batchv1.Job
if volumeMount {
jobObj = GetJob(namespace, jobName)
} else {
jobObj = GetJobWithEnvVar(namespace, jobName)
}
job, err := jobClient.Create(context.TODO(), jobObj, metav1.CreateOptions{})
time.Sleep(3 * time.Second)
return job, err
}
// DeleteDeployment creates a deployment in given namespace and returns the error if any
func DeleteDeployment(client kubernetes.Interface, namespace string, deploymentName string) error {
logrus.Infof("Deleting Deployment")