From fafd5460a21738df120e76aa375f9b0720805efc Mon Sep 17 00:00:00 2001 From: TheiLLeniumStudios <104288623+TheiLLeniumStudios@users.noreply.github.com> Date: Thu, 8 Jan 2026 11:06:45 +0100 Subject: [PATCH] feat: Initial e2e tests and migrate old ones into e2e --- Makefile | 61 +- go.mod | 8 +- internal/pkg/controller/controller_test.go | 2873 ++-------- internal/pkg/handler/create_test.go | 358 ++ internal/pkg/handler/delete_test.go | 356 ++ internal/pkg/handler/handlers_test.go | 288 + internal/pkg/handler/update_test.go | 530 ++ internal/pkg/handler/upgrade_test.go | 4910 +++-------------- internal/pkg/testutil/kube.go | 561 +- scripts/e2e-cluster-cleanup.sh | 93 + scripts/e2e-cluster-setup.sh | 80 + test/e2e/README.md | 457 ++ test/e2e/advanced/advanced_suite_test.go | 51 + test/e2e/advanced/job_reload_test.go | 187 + test/e2e/advanced/multi_container_test.go | 94 + test/e2e/advanced/pod_annotations_test.go | 191 + test/e2e/advanced/regex_test.go | 134 + .../e2e/annotations/annotations_suite_test.go | 59 + test/e2e/annotations/auto_reload_test.go | 269 + test/e2e/annotations/combination_test.go | 352 ++ test/e2e/annotations/exclude_test.go | 196 + test/e2e/annotations/pause_period_test.go | 102 + test/e2e/annotations/resource_ignore_test.go | 93 + test/e2e/annotations/search_match_test.go | 169 + test/e2e/argo/argo_suite_test.go | 66 + test/e2e/argo/rollout_test.go | 91 + test/e2e/core/core_suite_test.go | 89 + test/e2e/core/reference_methods_test.go | 528 ++ test/e2e/core/workloads_test.go | 912 +++ test/e2e/e2e_suite_test.go | 84 + test/e2e/flags/auto_reload_all_test.go | 106 + test/e2e/flags/flags_suite_test.go | 71 + test/e2e/flags/ignore_resources_test.go | 193 + test/e2e/flags/ignored_workloads_test.go | 159 + test/e2e/flags/namespace_ignore_test.go | 114 + test/e2e/flags/namespace_selector_test.go | 116 + test/e2e/flags/reload_on_create_test.go | 143 + test/e2e/flags/reload_on_delete_test.go | 154 + test/e2e/flags/resource_selector_test.go | 114 + test/e2e/flags/watch_globally_test.go | 170 + test/e2e/utils/annotations.go | 207 + test/e2e/utils/annotations_test.go | 306 + test/e2e/utils/argo.go | 308 ++ test/e2e/utils/helm.go | 224 + test/e2e/utils/helm_test.go | 157 + test/e2e/utils/kind.go | 27 + test/e2e/utils/openshift.go | 265 + test/e2e/utils/rand.go | 26 + test/e2e/utils/rand_test.go | 135 + test/e2e/utils/resources.go | 1094 ++++ test/e2e/utils/test_helpers.go | 12 + test/e2e/utils/test_helpers_test.go | 148 + test/e2e/utils/testenv.go | 154 + test/e2e/utils/utils.go | 114 + test/e2e/utils/wait.go | 498 ++ test/e2e/utils/workload_adapter.go | 160 + test/e2e/utils/workload_argo.go | 340 ++ test/e2e/utils/workload_cronjob.go | 223 + test/e2e/utils/workload_daemonset.go | 246 + test/e2e/utils/workload_deployment.go | 132 + test/e2e/utils/workload_job.go | 207 + test/e2e/utils/workload_openshift.go | 340 ++ test/e2e/utils/workload_statefulset.go | 246 + 63 files changed, 14035 insertions(+), 7116 deletions(-) create mode 100644 internal/pkg/handler/create_test.go create mode 100644 internal/pkg/handler/delete_test.go create mode 100644 internal/pkg/handler/handlers_test.go create mode 100644 internal/pkg/handler/update_test.go create mode 100644 scripts/e2e-cluster-cleanup.sh create mode 100644 scripts/e2e-cluster-setup.sh create mode 100644 test/e2e/README.md create mode 100644 test/e2e/advanced/advanced_suite_test.go create mode 100644 test/e2e/advanced/job_reload_test.go create mode 100644 test/e2e/advanced/multi_container_test.go create mode 100644 test/e2e/advanced/pod_annotations_test.go create mode 100644 test/e2e/advanced/regex_test.go create mode 100644 test/e2e/annotations/annotations_suite_test.go create mode 100644 test/e2e/annotations/auto_reload_test.go create mode 100644 test/e2e/annotations/combination_test.go create mode 100644 test/e2e/annotations/exclude_test.go create mode 100644 test/e2e/annotations/pause_period_test.go create mode 100644 test/e2e/annotations/resource_ignore_test.go create mode 100644 test/e2e/annotations/search_match_test.go create mode 100644 test/e2e/argo/argo_suite_test.go create mode 100644 test/e2e/argo/rollout_test.go create mode 100644 test/e2e/core/core_suite_test.go create mode 100644 test/e2e/core/reference_methods_test.go create mode 100644 test/e2e/core/workloads_test.go create mode 100644 test/e2e/e2e_suite_test.go create mode 100644 test/e2e/flags/auto_reload_all_test.go create mode 100644 test/e2e/flags/flags_suite_test.go create mode 100644 test/e2e/flags/ignore_resources_test.go create mode 100644 test/e2e/flags/ignored_workloads_test.go create mode 100644 test/e2e/flags/namespace_ignore_test.go create mode 100644 test/e2e/flags/namespace_selector_test.go create mode 100644 test/e2e/flags/reload_on_create_test.go create mode 100644 test/e2e/flags/reload_on_delete_test.go create mode 100644 test/e2e/flags/resource_selector_test.go create mode 100644 test/e2e/flags/watch_globally_test.go create mode 100644 test/e2e/utils/annotations.go create mode 100644 test/e2e/utils/annotations_test.go create mode 100644 test/e2e/utils/argo.go create mode 100644 test/e2e/utils/helm.go create mode 100644 test/e2e/utils/helm_test.go create mode 100644 test/e2e/utils/kind.go create mode 100644 test/e2e/utils/openshift.go create mode 100644 test/e2e/utils/rand.go create mode 100644 test/e2e/utils/rand_test.go create mode 100644 test/e2e/utils/resources.go create mode 100644 test/e2e/utils/test_helpers.go create mode 100644 test/e2e/utils/test_helpers_test.go create mode 100644 test/e2e/utils/testenv.go create mode 100644 test/e2e/utils/utils.go create mode 100644 test/e2e/utils/wait.go create mode 100644 test/e2e/utils/workload_adapter.go create mode 100644 test/e2e/utils/workload_argo.go create mode 100644 test/e2e/utils/workload_cronjob.go create mode 100644 test/e2e/utils/workload_daemonset.go create mode 100644 test/e2e/utils/workload_deployment.go create mode 100644 test/e2e/utils/workload_job.go create mode 100644 test/e2e/utils/workload_openshift.go create mode 100644 test/e2e/utils/workload_statefulset.go diff --git a/Makefile b/Makefile index 8444e1f..3c15d05 100644 --- a/Makefile +++ b/Makefile @@ -14,6 +14,9 @@ DOCKER_IMAGE ?= ghcr.io/stakater/reloader # Default value "dev" VERSION ?= 0.0.1 +# Full image reference (used for docker-build) +IMG ?= $(DOCKER_IMAGE):v$(VERSION) + REPOSITORY_GENERIC = ${DOCKER_IMAGE}:${VERSION} REPOSITORY_ARCH = ${DOCKER_IMAGE}:v${VERSION}-${ARCH} BUILD= @@ -140,7 +143,63 @@ manifest: docker manifest annotate --arch $(ARCH) $(REPOSITORY_GENERIC) $(REPOSITORY_ARCH) test: - "$(GOCMD)" test -timeout 1800s -v ./... + "$(GOCMD)" test -timeout 1800s -v -short ./internal/... ./test/e2e/utils/... + +##@ E2E Tests + +E2E_IMG ?= ghcr.io/stakater/reloader:test +E2E_TIMEOUT ?= 45m +KIND_CLUSTER ?= kind + +# Detect container runtime (docker or podman) +CONTAINER_RUNTIME ?= $(shell command -v docker 2>/dev/null || command -v podman 2>/dev/null) + +.PHONY: e2e-build +e2e-build: ## Build container image for e2e testing (uses docker or podman) + $(CONTAINER_RUNTIME) build -t $(E2E_IMG) -f Dockerfile . + +.PHONY: e2e-load +e2e-load: ## Load e2e image to Kind cluster (handles both docker and podman) +ifeq ($(notdir $(CONTAINER_RUNTIME)),podman) + @echo "Using podman: loading via image-archive..." + $(CONTAINER_RUNTIME) save $(E2E_IMG) -o /tmp/reloader-e2e.tar + kind load image-archive /tmp/reloader-e2e.tar --name $(KIND_CLUSTER) + rm -f /tmp/reloader-e2e.tar +else + kind load docker-image $(E2E_IMG) --name $(KIND_CLUSTER) +endif + +.PHONY: e2e-setup +e2e-setup: e2e-build e2e-load ## Build image and load to Kind (run once before tests) + @echo "E2E setup complete. Image $(E2E_IMG) loaded to Kind cluster $(KIND_CLUSTER)" + +.PHONY: e2e-cluster-setup +e2e-cluster-setup: ## Setup e2e cluster prerequisites (Argo Rollouts, etc.) + ./scripts/e2e-cluster-setup.sh + +.PHONY: e2e-cluster-cleanup +e2e-cluster-cleanup: ## Cleanup e2e cluster resources (Argo Rollouts, test namespaces, etc.) + ./scripts/e2e-cluster-cleanup.sh + +.PHONY: e2e +e2e: e2e-setup e2e-cluster-setup ## Run all e2e tests (builds image, loads to Kind, sets up cluster, runs tests) + SKIP_BUILD=true RELOADER_IMAGE=$(E2E_IMG) "$(GOCMD)" test -v -count=1 -p 1 -timeout $(E2E_TIMEOUT) ./test/e2e/... + @echo "E2E tests complete. Run 'make e2e-cluster-cleanup' to cleanup cluster resources." + +.PHONY: e2e-kind-create +e2e-kind-create: ## Create Kind cluster for e2e tests + kind create cluster --name $(KIND_CLUSTER) || true + +.PHONY: e2e-ci +e2e-ci: e2e-kind-create e2e e2e-cluster-cleanup ## Full CI pipeline: create Kind cluster, build, load, run tests, cleanup + +.PHONY: e2e-kind-delete +e2e-kind-delete: ## Delete Kind cluster used for e2e tests + kind delete cluster --name $(KIND_CLUSTER) + +.PHONY: docker-build +docker-build: ## Build Docker image + $(CONTAINER_RUNTIME) build -t $(IMG) -f Dockerfile . stop: @docker stop "${BINARY}" diff --git a/go.mod b/go.mod index 05edecc..5417a61 100644 --- a/go.mod +++ b/go.mod @@ -4,7 +4,8 @@ go 1.25.5 require ( github.com/argoproj/argo-rollouts v1.8.3 - github.com/openshift/api v0.0.0-20250411135543-10a8fa583797 + github.com/onsi/ginkgo/v2 v2.21.0 + github.com/onsi/gomega v1.35.1 github.com/openshift/client-go v0.0.0-20250402181141-b3bad3b645f2 github.com/parnurzeal/gorequest v0.3.0 github.com/prometheus/client_golang v1.22.0 @@ -29,21 +30,23 @@ require ( github.com/go-openapi/jsonpointer v0.21.1 // indirect github.com/go-openapi/jsonreference v0.21.0 // indirect github.com/go-openapi/swag v0.23.1 // indirect + github.com/go-task/slim-sprig/v3 v3.0.0 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/protobuf v1.5.4 // indirect github.com/google/gnostic-models v0.6.9 // indirect github.com/google/go-cmp v0.7.0 // indirect github.com/google/gofuzz v1.2.0 // indirect + github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db // indirect github.com/google/uuid v1.6.0 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect - github.com/kylelemons/godebug v1.1.0 // indirect github.com/mailru/easyjson v0.9.0 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/moul/http2curl v1.0.0 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect + github.com/openshift/api v0.0.0-20250411135543-10a8fa583797 // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/prometheus/client_model v0.6.2 // indirect @@ -58,6 +61,7 @@ require ( golang.org/x/term v0.31.0 // indirect golang.org/x/text v0.24.0 // indirect golang.org/x/time v0.11.0 // indirect + golang.org/x/tools v0.26.0 // indirect google.golang.org/protobuf v1.36.6 // indirect gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect diff --git a/internal/pkg/controller/controller_test.go b/internal/pkg/controller/controller_test.go index 63e6be3..250dd1f 100644 --- a/internal/pkg/controller/controller_test.go +++ b/internal/pkg/controller/controller_test.go @@ -1,2365 +1,656 @@ package controller import ( - "context" - "os" "testing" - "time" - "github.com/stakater/Reloader/internal/pkg/constants" - - "github.com/stakater/Reloader/internal/pkg/metrics" - - "github.com/sirupsen/logrus" "github.com/stakater/Reloader/internal/pkg/handler" + "github.com/stakater/Reloader/internal/pkg/metrics" "github.com/stakater/Reloader/internal/pkg/options" - "github.com/stakater/Reloader/internal/pkg/testutil" - "github.com/stakater/Reloader/internal/pkg/util" - "github.com/stakater/Reloader/pkg/common" - "github.com/stakater/Reloader/pkg/kube" + "github.com/stretchr/testify/assert" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/kubernetes" - "k8s.io/client-go/kubernetes/fake" - "k8s.io/client-go/tools/cache" "k8s.io/client-go/util/workqueue" ) -var ( - clients = kube.GetClients() - namespace = "test-reloader-" + testutil.RandSeq(5) - configmapNamePrefix = "testconfigmap-reloader" - secretNamePrefix = "testsecret-reloader" - data = "dGVzdFNlY3JldEVuY29kaW5nRm9yUmVsb2FkZXI=" - newData = "dGVzdE5ld1NlY3JldEVuY29kaW5nRm9yUmVsb2FkZXI=" - updatedData = "dGVzdFVwZGF0ZWRTZWNyZXRFbmNvZGluZ0ZvclJlbG9hZGVy" - collectors = metrics.NewCollectors() -) - -const ( - sleepDuration = 3 * time.Second -) - -func TestMain(m *testing.M) { - - testutil.CreateNamespace(namespace, clients.KubernetesClient) - - logrus.Infof("Creating controller") - for k := range kube.ResourceMap { - if k == "namespaces" { - continue - } - c, err := NewController(clients.KubernetesClient, k, namespace, []string{}, "", "", collectors) - if err != nil { - logrus.Fatalf("%s", err) - } - - // Now let's start the controller - stop := make(chan struct{}) - defer close(stop) - go c.Run(1, stop) - } - time.Sleep(sleepDuration) - - logrus.Infof("Running Testcases") - retCode := m.Run() - - testutil.DeleteNamespace(namespace, clients.KubernetesClient) - - os.Exit(retCode) +// resetGlobalState resets global variables between tests +func resetGlobalState() { + secretControllerInitialized = false + configmapControllerInitialized = false + selectedNamespacesCache = []string{} } -// Perform rolling upgrade on deployment and create pod annotation var upon updating the configmap -func TestControllerUpdatingConfigmapShouldCreatePodAnnotationInDeployment(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy +// newTestController creates a controller for testing without starting informers +func newTestController(ignoredNamespaces []string, namespaceSelector string) *Controller { + queue := workqueue.NewTypedRateLimitingQueue(workqueue.DefaultTypedControllerRateLimiter[any]()) + collectors := metrics.NewCollectors() - // Creating configmap - configmapName := configmapNamePrefix + "-update-" + testutil.RandSeq(5) - configmapClient, err := testutil.CreateConfigMap(clients.KubernetesClient, namespace, configmapName, "www.google.com") - if err != nil { - t.Errorf("Error while creating the configmap %v", err) + return &Controller{ + queue: queue, + ignoredNamespaces: ignoredNamespaces, + namespaceSelector: namespaceSelector, + collectors: collectors, + resource: "configmaps", } - - // Creating deployment - _, err = testutil.CreateDeployment(clients.KubernetesClient, configmapName, namespace, true) - if err != nil { - t.Errorf("Error in deployment creation: %v", err) - } - - // Updating configmap for first time - updateErr := testutil.UpdateConfigMap(configmapClient, namespace, configmapName, "", "www.stakater.com") - if updateErr != nil { - t.Errorf("Configmap was not updated") - } - - // Verifying deployment update - logrus.Infof("Verifying pod annotation has been created") - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapName, "www.stakater.com") - config := common.Config{ - Namespace: namespace, - ResourceName: configmapName, - SHAValue: shaData, - Annotation: options.ConfigmapUpdateOnChangeAnnotation, - } - deploymentFuncs := handler.GetDeploymentRollingUpgradeFuncs() - updated := testutil.VerifyResourceAnnotationUpdate(clients, config, deploymentFuncs) - if !updated { - t.Errorf("Deployment was not updated") - } - time.Sleep(sleepDuration) - - // Deleting deployment - err = testutil.DeleteDeployment(clients.KubernetesClient, namespace, configmapName) - if err != nil { - logrus.Errorf("Error while deleting the deployment %v", err) - } - - // Deleting configmap - err = testutil.DeleteConfigMap(clients.KubernetesClient, namespace, configmapName) - if err != nil { - logrus.Errorf("Error while deleting the configmap %v", err) - } - time.Sleep(sleepDuration) } -// Perform rolling upgrade on deployment and create pod annotation var upon updating the configmap -func TestControllerUpdatingConfigmapShouldAutoCreatePodAnnotationInDeployment(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - - // Creating configmap - configmapName := configmapNamePrefix + "-update-" + testutil.RandSeq(5) - configmapClient, err := testutil.CreateConfigMap(clients.KubernetesClient, namespace, configmapName, "www.google.com") - if err != nil { - t.Errorf("Error while creating the configmap %v", err) - } - - // Creating deployment - _, err = testutil.CreateDeployment(clients.KubernetesClient, configmapName, namespace, false) - if err != nil { - t.Errorf("Error in deployment creation: %v", err) - } - - // Updating configmap for first time - updateErr := testutil.UpdateConfigMap(configmapClient, namespace, configmapName, "", "www.stakater.com") - if updateErr != nil { - t.Errorf("Configmap was not updated") - } - - // Verifying deployment update - logrus.Infof("Verifying pod annotation has been created") - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapName, "www.stakater.com") - config := common.Config{ - Namespace: namespace, - ResourceName: configmapName, - SHAValue: shaData, - Annotation: options.ConfigmapUpdateOnChangeAnnotation, - } - deploymentFuncs := handler.GetDeploymentRollingUpgradeFuncs() - updated := testutil.VerifyResourceAnnotationUpdate(clients, config, deploymentFuncs) - if !updated { - t.Errorf("Deployment was not updated") - } - time.Sleep(sleepDuration) - - // Deleting deployment - err = testutil.DeleteDeployment(clients.KubernetesClient, namespace, configmapName) - if err != nil { - logrus.Errorf("Error while deleting the deployment %v", err) - } - - // Deleting configmap - err = testutil.DeleteConfigMap(clients.KubernetesClient, namespace, configmapName) - if err != nil { - logrus.Errorf("Error while deleting the configmap %v", err) - } - time.Sleep(sleepDuration) -} - -// Perform rolling upgrade on deployment and create pod annotation var upon creating the configmap -func TestControllerCreatingConfigmapShouldCreatePodAnnotationInDeployment(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - - // TODO: Fix this test case - t.Skip("Skipping TestControllerCreatingConfigmapShouldCreatePodAnnotationInDeployment test case") - - // Creating configmap - configmapName := configmapNamePrefix + "-create-" + testutil.RandSeq(5) - _, err := testutil.CreateConfigMap(clients.KubernetesClient, namespace, configmapName, "www.google.com") - if err != nil { - t.Errorf("Error while creating the configmap %v", err) - } - - // Creating deployment - _, err = testutil.CreateDeployment(clients.KubernetesClient, configmapName, namespace, true) - if err != nil { - t.Errorf("Error in deployment creation: %v", err) - } - - // Deleting configmap for first time - err = testutil.DeleteConfigMap(clients.KubernetesClient, namespace, configmapName) - if err != nil { - logrus.Errorf("Error while deleting the configmap %v", err) - } - - time.Sleep(sleepDuration) - - _, err = testutil.CreateConfigMap(clients.KubernetesClient, namespace, configmapName, "www.stakater.com") - if err != nil { - t.Errorf("Error while creating the configmap second time %v", err) - } - - time.Sleep(sleepDuration) - - // Verifying deployment update - logrus.Infof("Verifying pod annotation has been created") - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapName, "www.stakater.com") - config := common.Config{ - Namespace: namespace, - ResourceName: configmapName, - SHAValue: shaData, - Annotation: options.ConfigmapUpdateOnChangeAnnotation, - } - deploymentFuncs := handler.GetDeploymentRollingUpgradeFuncs() - updated := testutil.VerifyResourceAnnotationUpdate(clients, config, deploymentFuncs) - if !updated { - t.Errorf("Deployment was not updated") - } - time.Sleep(sleepDuration) - - // Deleting deployment - err = testutil.DeleteDeployment(clients.KubernetesClient, namespace, configmapName) - if err != nil { - logrus.Errorf("Error while deleting the deployment %v", err) - } - - // Deleting configmap - err = testutil.DeleteConfigMap(clients.KubernetesClient, namespace, configmapName) - if err != nil { - logrus.Errorf("Error while deleting the configmap %v", err) - } - time.Sleep(sleepDuration) -} - -// Perform rolling upgrade on deployment and update pod annotation var upon updating the configmap -func TestControllerForUpdatingConfigmapShouldUpdateDeploymentUsingArs(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - - // Creating secret - configmapName := configmapNamePrefix + "-update-" + testutil.RandSeq(5) - configmapClient, err := testutil.CreateConfigMap(clients.KubernetesClient, namespace, configmapName, "www.google.com") - if err != nil { - t.Errorf("Error while creating the configmap %v", err) - } - - // Creating deployment - _, err = testutil.CreateDeployment(clients.KubernetesClient, configmapName, namespace, true) - if err != nil { - t.Errorf("Error in deployment creation: %v", err) - } - - // Updating configmap for first time - updateErr := testutil.UpdateConfigMap(configmapClient, namespace, configmapName, "", "www.stakater.com") - if updateErr != nil { - t.Errorf("Configmap was not updated") - } - - // Updating configmap for second time - updateErr = testutil.UpdateConfigMap(configmapClient, namespace, configmapName, "", "aurorasolutions.io") - if updateErr != nil { - t.Errorf("Configmap was not updated") - } - - // Verifying deployment update - logrus.Infof("Verifying pod annotation has been updated") - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapName, "aurorasolutions.io") - config := common.Config{ - Namespace: namespace, - ResourceName: configmapName, - SHAValue: shaData, - Annotation: options.ConfigmapUpdateOnChangeAnnotation, - } - - deploymentFuncs := handler.GetDeploymentRollingUpgradeFuncs() - - updated := testutil.VerifyResourceAnnotationUpdate(clients, config, deploymentFuncs) - if !updated { - t.Errorf("Deployment was not updated") - } - time.Sleep(sleepDuration) - - // Deleting deployment - err = testutil.DeleteDeployment(clients.KubernetesClient, namespace, configmapName) - if err != nil { - logrus.Errorf("Error while deleting the deployment %v", err) - } - - // Deleting configmap - err = testutil.DeleteConfigMap(clients.KubernetesClient, namespace, configmapName) - if err != nil { - logrus.Errorf("Error while deleting the configmap %v", err) - } - time.Sleep(sleepDuration) -} - -// Do not Perform rolling upgrade on deployment and create pod annotation var upon updating the labels configmap -func TestControllerUpdatingConfigmapLabelsShouldNotCreateOrCreatePodAnnotationInDeployment(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - - // Creating configmap - configmapName := configmapNamePrefix + "-update-" + testutil.RandSeq(5) - configmapClient, err := testutil.CreateConfigMap(clients.KubernetesClient, namespace, configmapName, "www.google.com") - if err != nil { - t.Errorf("Error while creating the configmap %v", err) - } - - // Creating deployment - _, err = testutil.CreateDeployment(clients.KubernetesClient, configmapName, namespace, true) - if err != nil { - t.Errorf("Error in deployment creation: %v", err) - } - - // Updating configmap for first time - updateErr := testutil.UpdateConfigMap(configmapClient, namespace, configmapName, "test", "www.google.com") - if updateErr != nil { - t.Errorf("Configmap was not updated") - } - - // Verifying deployment update - logrus.Infof("Verifying pod annotation has been created") - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapName, "www.google.com") - config := common.Config{ - Namespace: namespace, - ResourceName: configmapName, - SHAValue: shaData, - Annotation: options.ConfigmapUpdateOnChangeAnnotation, - } - deploymentFuncs := handler.GetDeploymentRollingUpgradeFuncs() - updated := testutil.VerifyResourceAnnotationUpdate(clients, config, deploymentFuncs) - if updated { - t.Errorf("Deployment should not be updated by changing label") - } - time.Sleep(sleepDuration) - - // Deleting deployment - err = testutil.DeleteDeployment(clients.KubernetesClient, namespace, configmapName) - if err != nil { - logrus.Errorf("Error while deleting the deployment %v", err) - } - - // Deleting configmap - err = testutil.DeleteConfigMap(clients.KubernetesClient, namespace, configmapName) - if err != nil { - logrus.Errorf("Error while deleting the configmap %v", err) - } - time.Sleep(sleepDuration) -} - -// Perform rolling upgrade on pod and create pod annotation var upon creating the secret -func TestControllerCreatingSecretShouldCreatePodAnnotationInDeployment(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - - // TODO: Fix this test case - t.Skip("Skipping TestControllerCreatingConfigmapShouldCreatePodAnnotationInDeployment test case") - - // Creating secret - secretName := secretNamePrefix + "-create-" + testutil.RandSeq(5) - _, err := testutil.CreateSecret(clients.KubernetesClient, namespace, secretName, data) - if err != nil { - t.Errorf("Error in secret creation: %v", err) - } - - // Creating deployment - _, err = testutil.CreateDeployment(clients.KubernetesClient, secretName, namespace, true) - if err != nil { - t.Errorf("Error in deployment creation: %v", err) - } - - //Deleting Secret - err = testutil.DeleteSecret(clients.KubernetesClient, namespace, secretName) - if err != nil { - logrus.Errorf("Error while deleting the secret %v", err) - } - time.Sleep(sleepDuration) - - _, err = testutil.CreateSecret(clients.KubernetesClient, namespace, secretName, newData) - if err != nil { - t.Errorf("Error in secret creation: %v", err) - } - - time.Sleep(sleepDuration) - - // Verifying Upgrade - logrus.Infof("Verifying pod annotation has been created") - shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, namespace, secretName, newData) - config := common.Config{ - Namespace: namespace, - ResourceName: secretName, - SHAValue: shaData, - Annotation: options.SecretUpdateOnChangeAnnotation, - } - deploymentFuncs := handler.GetDeploymentRollingUpgradeFuncs() - time.Sleep(sleepDuration) - updated := testutil.VerifyResourceAnnotationUpdate(clients, config, deploymentFuncs) - if !updated { - t.Errorf("Deployment was not updated") - } - - // Deleting Deployment - err = testutil.DeleteDeployment(clients.KubernetesClient, namespace, secretName) - if err != nil { - logrus.Errorf("Error while deleting the deployment %v", err) - } - - //Deleting Secret - err = testutil.DeleteSecret(clients.KubernetesClient, namespace, secretName) - if err != nil { - logrus.Errorf("Error while deleting the secret %v", err) - } - time.Sleep(sleepDuration) -} - -// Perform rolling upgrade on pod and create pod annotation var upon updating the secret -func TestControllerUpdatingSecretShouldCreatePodAnnotationInDeployment(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - - // Creating secret - secretName := secretNamePrefix + "-update-" + testutil.RandSeq(5) - secretClient, err := testutil.CreateSecret(clients.KubernetesClient, namespace, secretName, data) - if err != nil { - t.Errorf("Error in secret creation: %v", err) - } - - // Creating deployment - _, err = testutil.CreateDeployment(clients.KubernetesClient, secretName, namespace, true) - if err != nil { - t.Errorf("Error in deployment creation: %v", err) - } - - // Updating Secret - err = testutil.UpdateSecret(secretClient, namespace, secretName, "", newData) - if err != nil { - t.Errorf("Error while updating secret %v", err) - } - - // Verifying Upgrade - logrus.Infof("Verifying pod annotation has been created") - shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, namespace, secretName, newData) - config := common.Config{ - Namespace: namespace, - ResourceName: secretName, - SHAValue: shaData, - Annotation: options.SecretUpdateOnChangeAnnotation, - } - deploymentFuncs := handler.GetDeploymentRollingUpgradeFuncs() - updated := testutil.VerifyResourceAnnotationUpdate(clients, config, deploymentFuncs) - if !updated { - t.Errorf("Deployment was not updated") - } - - // Deleting Deployment - err = testutil.DeleteDeployment(clients.KubernetesClient, namespace, secretName) - if err != nil { - logrus.Errorf("Error while deleting the deployment %v", err) - } - - //Deleting Secret - err = testutil.DeleteSecret(clients.KubernetesClient, namespace, secretName) - if err != nil { - logrus.Errorf("Error while deleting the secret %v", err) - } - time.Sleep(sleepDuration) -} - -// Perform rolling upgrade on deployment and update pod annotation var upon updating the secret -func TestControllerUpdatingSecretShouldUpdatePodAnnotationInDeployment(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - - // Creating secret - secretName := secretNamePrefix + "-update-" + testutil.RandSeq(5) - secretClient, err := testutil.CreateSecret(clients.KubernetesClient, namespace, secretName, data) - if err != nil { - t.Errorf("Error in secret creation: %v", err) - } - - // Creating deployment - _, err = testutil.CreateDeployment(clients.KubernetesClient, secretName, namespace, true) - if err != nil { - t.Errorf("Error in deployment creation: %v", err) - } - - // Updating Secret - err = testutil.UpdateSecret(secretClient, namespace, secretName, "", newData) - if err != nil { - t.Errorf("Error while updating secret %v", err) - } - - // Updating Secret - err = testutil.UpdateSecret(secretClient, namespace, secretName, "", updatedData) - if err != nil { - t.Errorf("Error while updating secret %v", err) - } - - // Verifying Upgrade - logrus.Infof("Verifying pod annotation has been updated") - shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, namespace, secretName, updatedData) - config := common.Config{ - Namespace: namespace, - ResourceName: secretName, - SHAValue: shaData, - Annotation: options.SecretUpdateOnChangeAnnotation, - } - deploymentFuncs := handler.GetDeploymentRollingUpgradeFuncs() - updated := testutil.VerifyResourceAnnotationUpdate(clients, config, deploymentFuncs) - if !updated { - t.Errorf("Deployment was not updated") - } - - // Deleting Deployment - err = testutil.DeleteDeployment(clients.KubernetesClient, namespace, secretName) - if err != nil { - logrus.Errorf("Error while deleting the deployment %v", err) - } - - //Deleting Secret - err = testutil.DeleteSecret(clients.KubernetesClient, namespace, secretName) - if err != nil { - logrus.Errorf("Error while deleting the secret %v", err) - } - time.Sleep(sleepDuration) -} - -// Do not Perform rolling upgrade on pod and create or update a pod annotation upon updating the label in secret -func TestControllerUpdatingSecretLabelsShouldNotCreateOrUpdatePodAnnotationInDeployment(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - - // Creating secret - secretName := secretNamePrefix + "-update-" + testutil.RandSeq(5) - secretClient, err := testutil.CreateSecret(clients.KubernetesClient, namespace, secretName, data) - if err != nil { - t.Errorf("Error in secret creation: %v", err) - } - - // Creating deployment - _, err = testutil.CreateDeployment(clients.KubernetesClient, secretName, namespace, true) - if err != nil { - t.Errorf("Error in deployment creation: %v", err) - } - - err = testutil.UpdateSecret(secretClient, namespace, secretName, "test", data) - if err != nil { - t.Errorf("Error while updating secret %v", err) - } - - // Verifying Upgrade - logrus.Infof("Verifying pod annotation has been created") - shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, namespace, secretName, data) - config := common.Config{ - Namespace: namespace, - ResourceName: secretName, - SHAValue: shaData, - Annotation: options.SecretUpdateOnChangeAnnotation, - } - deploymentFuncs := handler.GetDeploymentRollingUpgradeFuncs() - updated := testutil.VerifyResourceAnnotationUpdate(clients, config, deploymentFuncs) - if updated { - t.Errorf("Deployment should not be updated by changing label in secret") - } - - // Deleting Deployment - err = testutil.DeleteDeployment(clients.KubernetesClient, namespace, secretName) - if err != nil { - logrus.Errorf("Error while deleting the deployment %v", err) - } - - //Deleting Secret - err = testutil.DeleteSecret(clients.KubernetesClient, namespace, secretName) - if err != nil { - logrus.Errorf("Error while deleting the secret %v", err) - } - time.Sleep(sleepDuration) -} - -// Perform rolling upgrade on DaemonSet and create pod annotation var upon updating the configmap -func TestControllerUpdatingConfigmapShouldCreatePodAnnotationInDaemonSet(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - - // Creating configmap - configmapName := configmapNamePrefix + "-update-" + testutil.RandSeq(5) - configmapClient, err := testutil.CreateConfigMap(clients.KubernetesClient, namespace, configmapName, "www.google.com") - if err != nil { - t.Errorf("Error while creating the configmap %v", err) - } - - // Creating DaemonSet - _, err = testutil.CreateDaemonSet(clients.KubernetesClient, configmapName, namespace, true) - if err != nil { - t.Errorf("Error in DaemonSet creation: %v", err) - } - - // Updating configmap for first time - updateErr := testutil.UpdateConfigMap(configmapClient, namespace, configmapName, "", "www.stakater.com") - if updateErr != nil { - t.Errorf("Configmap was not updated") - } - - // Verifying DaemonSet update - logrus.Infof("Verifying pod annotation has been created") - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapName, "www.stakater.com") - config := common.Config{ - Namespace: namespace, - ResourceName: configmapName, - SHAValue: shaData, - Annotation: options.ConfigmapUpdateOnChangeAnnotation, - } - daemonSetFuncs := handler.GetDaemonSetRollingUpgradeFuncs() - updated := testutil.VerifyResourceAnnotationUpdate(clients, config, daemonSetFuncs) - if !updated { - t.Errorf("DaemonSet was not updated") - } - time.Sleep(sleepDuration) - - // Deleting DaemonSet - err = testutil.DeleteDaemonSet(clients.KubernetesClient, namespace, configmapName) - if err != nil { - logrus.Errorf("Error while deleting the DaemonSet %v", err) - } - - // Deleting configmap - err = testutil.DeleteConfigMap(clients.KubernetesClient, namespace, configmapName) - if err != nil { - logrus.Errorf("Error while deleting the configmap %v", err) - } - time.Sleep(sleepDuration) -} - -// Perform rolling upgrade on DaemonSet and update pod annotation var upon updating the configmap -func TestControllerForUpdatingConfigmapShouldUpdateDaemonSetUsingArs(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - - // Creating secret - configmapName := configmapNamePrefix + "-update-" + testutil.RandSeq(5) - configmapClient, err := testutil.CreateConfigMap(clients.KubernetesClient, namespace, configmapName, "www.google.com") - if err != nil { - t.Errorf("Error while creating the configmap %v", err) - } - - // Creating DaemonSet - _, err = testutil.CreateDaemonSet(clients.KubernetesClient, configmapName, namespace, true) - if err != nil { - t.Errorf("Error in DaemonSet creation: %v", err) - } - - // Updating configmap for first time - updateErr := testutil.UpdateConfigMap(configmapClient, namespace, configmapName, "", "www.stakater.com") - if updateErr != nil { - t.Errorf("Configmap was not updated") - } - - time.Sleep(sleepDuration) - - // Updating configmap for second time - updateErr = testutil.UpdateConfigMap(configmapClient, namespace, configmapName, "", "aurorasolutions.io") - if updateErr != nil { - t.Errorf("Configmap was not updated") - } - - time.Sleep(sleepDuration) - - // Verifying DaemonSet update - logrus.Infof("Verifying pod annotation has been updated") - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapName, "aurorasolutions.io") - config := common.Config{ - Namespace: namespace, - ResourceName: configmapName, - SHAValue: shaData, - Annotation: options.ConfigmapUpdateOnChangeAnnotation, - } - daemonSetFuncs := handler.GetDaemonSetRollingUpgradeFuncs() - updated := testutil.VerifyResourceAnnotationUpdate(clients, config, daemonSetFuncs) - if !updated { - t.Errorf("DaemonSet was not updated") - } - time.Sleep(sleepDuration) - - // Deleting DaemonSet - err = testutil.DeleteDaemonSet(clients.KubernetesClient, namespace, configmapName) - if err != nil { - logrus.Errorf("Error while deleting the DaemonSet %v", err) - } - - // Deleting configmap - err = testutil.DeleteConfigMap(clients.KubernetesClient, namespace, configmapName) - if err != nil { - logrus.Errorf("Error while deleting the configmap %v", err) - } - time.Sleep(sleepDuration) -} - -// Perform rolling upgrade on pod and create pod annotation var upon updating the secret -func TestControllerUpdatingSecretShouldCreatePodAnnotationInDaemonSet(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - - // Creating secret - secretName := secretNamePrefix + "-update-" + testutil.RandSeq(5) - secretClient, err := testutil.CreateSecret(clients.KubernetesClient, namespace, secretName, data) - if err != nil { - t.Errorf("Error in secret creation: %v", err) - } - - // Creating DaemonSet - _, err = testutil.CreateDaemonSet(clients.KubernetesClient, secretName, namespace, true) - if err != nil { - t.Errorf("Error in DaemonSet creation: %v", err) - } - - // Updating Secret - err = testutil.UpdateSecret(secretClient, namespace, secretName, "", newData) - if err != nil { - t.Errorf("Error while updating secret %v", err) - } - - // Verifying Upgrade - logrus.Infof("Verifying pod annotation has been created") - shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, namespace, secretName, newData) - config := common.Config{ - Namespace: namespace, - ResourceName: secretName, - SHAValue: shaData, - Annotation: options.SecretUpdateOnChangeAnnotation, - } - daemonSetFuncs := handler.GetDaemonSetRollingUpgradeFuncs() - updated := testutil.VerifyResourceAnnotationUpdate(clients, config, daemonSetFuncs) - if !updated { - t.Errorf("DaemonSet was not updated") - } - - // Deleting DaemonSet - err = testutil.DeleteDaemonSet(clients.KubernetesClient, namespace, secretName) - if err != nil { - logrus.Errorf("Error while deleting the DaemonSet %v", err) - } - - //Deleting Secret - err = testutil.DeleteSecret(clients.KubernetesClient, namespace, secretName) - if err != nil { - logrus.Errorf("Error while deleting the secret %v", err) - } - time.Sleep(sleepDuration) -} - -// Perform rolling upgrade on DaemonSet and update pod annotation var upon updating the secret -func TestControllerUpdatingSecretShouldUpdatePodAnnotationInDaemonSet(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - - // Creating secret - secretName := secretNamePrefix + "-update-" + testutil.RandSeq(5) - secretClient, err := testutil.CreateSecret(clients.KubernetesClient, namespace, secretName, data) - if err != nil { - t.Errorf("Error in secret creation: %v", err) - } - - // Creating DaemonSet - _, err = testutil.CreateDaemonSet(clients.KubernetesClient, secretName, namespace, true) - if err != nil { - t.Errorf("Error in DaemonSet creation: %v", err) - } - - // Updating Secret - err = testutil.UpdateSecret(secretClient, namespace, secretName, "", newData) - if err != nil { - t.Errorf("Error while updating secret %v", err) - } - time.Sleep(sleepDuration) - - // Updating Secret - err = testutil.UpdateSecret(secretClient, namespace, secretName, "", updatedData) - if err != nil { - t.Errorf("Error while updating secret %v", err) - } - - // Verifying Upgrade - logrus.Infof("Verifying pod annotation has been updated") - shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, namespace, secretName, updatedData) - config := common.Config{ - Namespace: namespace, - ResourceName: secretName, - SHAValue: shaData, - Annotation: options.SecretUpdateOnChangeAnnotation, - } - daemonSetFuncs := handler.GetDaemonSetRollingUpgradeFuncs() - updated := testutil.VerifyResourceAnnotationUpdate(clients, config, daemonSetFuncs) - if !updated { - t.Errorf("DaemonSet was not updated") - } - - // Deleting DaemonSet - err = testutil.DeleteDaemonSet(clients.KubernetesClient, namespace, secretName) - if err != nil { - logrus.Errorf("Error while deleting the DaemonSet %v", err) - } - - //Deleting Secret - err = testutil.DeleteSecret(clients.KubernetesClient, namespace, secretName) - if err != nil { - logrus.Errorf("Error while deleting the secret %v", err) - } - time.Sleep(sleepDuration) -} - -// Do not Perform rolling upgrade on pod and create or update a pod annotation upon updating the label in secret -func TestControllerUpdatingSecretLabelsShouldNotCreateOrUpdatePodAnnotationInDaemonSet(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - - // Creating secret - secretName := secretNamePrefix + "-update-" + testutil.RandSeq(5) - secretClient, err := testutil.CreateSecret(clients.KubernetesClient, namespace, secretName, data) - if err != nil { - t.Errorf("Error in secret creation: %v", err) - } - - // Creating DaemonSet - _, err = testutil.CreateDaemonSet(clients.KubernetesClient, secretName, namespace, true) - if err != nil { - t.Errorf("Error in DaemonSet creation: %v", err) - } - - err = testutil.UpdateSecret(secretClient, namespace, secretName, "test", data) - if err != nil { - t.Errorf("Error while updating secret %v", err) - } - - // Verifying Upgrade - logrus.Infof("Verifying pod annotation has been created") - shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, namespace, secretName, data) - config := common.Config{ - Namespace: namespace, - ResourceName: secretName, - SHAValue: shaData, - Annotation: options.SecretUpdateOnChangeAnnotation, - } - daemonSetFuncs := handler.GetDaemonSetRollingUpgradeFuncs() - updated := testutil.VerifyResourceAnnotationUpdate(clients, config, daemonSetFuncs) - if updated { - t.Errorf("DaemonSet should not be updated by changing label in secret") - } - - // Deleting DaemonSet - err = testutil.DeleteDaemonSet(clients.KubernetesClient, namespace, secretName) - if err != nil { - logrus.Errorf("Error while deleting the DaemonSet %v", err) - } - - //Deleting Secret - err = testutil.DeleteSecret(clients.KubernetesClient, namespace, secretName) - if err != nil { - logrus.Errorf("Error while deleting the secret %v", err) - } - time.Sleep(sleepDuration) -} - -// Perform rolling upgrade on StatefulSet and create pod annotation var upon updating the configmap -func TestControllerUpdatingConfigmapShouldCreatePodAnnotationInStatefulSet(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - - // Creating configmap - configmapName := configmapNamePrefix + "-update-" + testutil.RandSeq(5) - configmapClient, err := testutil.CreateConfigMap(clients.KubernetesClient, namespace, configmapName, "www.google.com") - if err != nil { - t.Errorf("Error while creating the configmap %v", err) - } - - // Creating StatefulSet - _, err = testutil.CreateStatefulSet(clients.KubernetesClient, configmapName, namespace, true) - if err != nil { - t.Errorf("Error in StatefulSet creation: %v", err) - } - - // Updating configmap for first time - updateErr := testutil.UpdateConfigMap(configmapClient, namespace, configmapName, "", "www.stakater.com") - if updateErr != nil { - t.Errorf("Configmap was not updated") - } - - // Verifying StatefulSet update - logrus.Infof("Verifying pod annotation has been created") - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapName, "www.stakater.com") - config := common.Config{ - Namespace: namespace, - ResourceName: configmapName, - SHAValue: shaData, - Annotation: options.ConfigmapUpdateOnChangeAnnotation, - } - statefulSetFuncs := handler.GetStatefulSetRollingUpgradeFuncs() - updated := testutil.VerifyResourceAnnotationUpdate(clients, config, statefulSetFuncs) - if !updated { - t.Errorf("StatefulSet was not updated") - } - time.Sleep(sleepDuration) - - // Deleting StatefulSet - err = testutil.DeleteStatefulSet(clients.KubernetesClient, namespace, configmapName) - if err != nil { - logrus.Errorf("Error while deleting the StatefulSet %v", err) - } - - // Deleting configmap - err = testutil.DeleteConfigMap(clients.KubernetesClient, namespace, configmapName) - if err != nil { - logrus.Errorf("Error while deleting the configmap %v", err) - } - time.Sleep(sleepDuration) -} - -// Perform rolling upgrade on StatefulSet and update pod annotation var upon updating the configmap -func TestControllerForUpdatingConfigmapShouldUpdateStatefulSetUsingArs(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - - // Creating secret - configmapName := configmapNamePrefix + "-update-" + testutil.RandSeq(5) - configmapClient, err := testutil.CreateConfigMap(clients.KubernetesClient, namespace, configmapName, "www.google.com") - if err != nil { - t.Errorf("Error while creating the configmap %v", err) - } - - // Creating StatefulSet - _, err = testutil.CreateStatefulSet(clients.KubernetesClient, configmapName, namespace, true) - if err != nil { - t.Errorf("Error in StatefulSet creation: %v", err) - } - - // Updating configmap for first time - updateErr := testutil.UpdateConfigMap(configmapClient, namespace, configmapName, "", "www.stakater.com") - if updateErr != nil { - t.Errorf("Configmap was not updated") - } - - // Updating configmap for second time - updateErr = testutil.UpdateConfigMap(configmapClient, namespace, configmapName, "", "aurorasolutions.io") - if updateErr != nil { - t.Errorf("Configmap was not updated") - } - - // Verifying StatefulSet update - logrus.Infof("Verifying pod annotation has been updated") - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapName, "aurorasolutions.io") - config := common.Config{ - Namespace: namespace, - ResourceName: configmapName, - SHAValue: shaData, - Annotation: options.ConfigmapUpdateOnChangeAnnotation, - } - statefulSetFuncs := handler.GetStatefulSetRollingUpgradeFuncs() - updated := testutil.VerifyResourceAnnotationUpdate(clients, config, statefulSetFuncs) - if !updated { - t.Errorf("StatefulSet was not updated") - } - time.Sleep(sleepDuration) - - // Deleting StatefulSet - err = testutil.DeleteStatefulSet(clients.KubernetesClient, namespace, configmapName) - if err != nil { - logrus.Errorf("Error while deleting the StatefulSet %v", err) - } - - // Deleting configmap - err = testutil.DeleteConfigMap(clients.KubernetesClient, namespace, configmapName) - if err != nil { - logrus.Errorf("Error while deleting the configmap %v", err) - } - time.Sleep(sleepDuration) -} - -// Perform rolling upgrade on pod and create pod annotation var upon updating the secret -func TestControllerUpdatingSecretShouldCreatePodAnnotationInStatefulSet(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - - // Creating secret - secretName := secretNamePrefix + "-update-" + testutil.RandSeq(5) - secretClient, err := testutil.CreateSecret(clients.KubernetesClient, namespace, secretName, data) - if err != nil { - t.Errorf("Error in secret creation: %v", err) - } - - // Creating StatefulSet - _, err = testutil.CreateStatefulSet(clients.KubernetesClient, secretName, namespace, true) - if err != nil { - t.Errorf("Error in StatefulSet creation: %v", err) - } - - // Updating Secret - err = testutil.UpdateSecret(secretClient, namespace, secretName, "", newData) - if err != nil { - t.Errorf("Error while updating secret %v", err) - } - - // Verifying Upgrade - logrus.Infof("Verifying pod annotation has been created") - shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, namespace, secretName, newData) - config := common.Config{ - Namespace: namespace, - ResourceName: secretName, - SHAValue: shaData, - Annotation: options.SecretUpdateOnChangeAnnotation, - } - statefulSetFuncs := handler.GetStatefulSetRollingUpgradeFuncs() - updated := testutil.VerifyResourceAnnotationUpdate(clients, config, statefulSetFuncs) - if !updated { - t.Errorf("StatefulSet was not updated") - } - - // Deleting StatefulSet - err = testutil.DeleteStatefulSet(clients.KubernetesClient, namespace, secretName) - if err != nil { - logrus.Errorf("Error while deleting the StatefulSet %v", err) - } - - //Deleting Secret - err = testutil.DeleteSecret(clients.KubernetesClient, namespace, secretName) - if err != nil { - logrus.Errorf("Error while deleting the secret %v", err) - } - time.Sleep(sleepDuration) -} - -// Perform rolling upgrade on deployment and create env var upon updating the configmap -func TestControllerUpdatingConfigmapShouldCreateEnvInDeployment(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - - // Creating configmap - configmapName := configmapNamePrefix + "-update-" + testutil.RandSeq(5) - configmapClient, err := testutil.CreateConfigMap(clients.KubernetesClient, namespace, configmapName, "www.google.com") - if err != nil { - t.Errorf("Error while creating the configmap %v", err) - } - - // Creating deployment - _, err = testutil.CreateDeployment(clients.KubernetesClient, configmapName, namespace, true) - if err != nil { - t.Errorf("Error in deployment creation: %v", err) - } - - // Updating configmap for first time - updateErr := testutil.UpdateConfigMap(configmapClient, namespace, configmapName, "", "www.stakater.com") - if updateErr != nil { - t.Errorf("Configmap was not updated") - } - - // Verifying deployment update - logrus.Infof("Verifying env var has been created") - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapName, "www.stakater.com") - config := common.Config{ - Namespace: namespace, - ResourceName: configmapName, - SHAValue: shaData, - Annotation: options.ConfigmapUpdateOnChangeAnnotation, - } - deploymentFuncs := handler.GetDeploymentRollingUpgradeFuncs() - updated := testutil.VerifyResourceEnvVarUpdate(clients, config, constants.ConfigmapEnvVarPostfix, deploymentFuncs) - if !updated { - t.Errorf("Deployment was not updated") - } - time.Sleep(sleepDuration) - - // Deleting deployment - err = testutil.DeleteDeployment(clients.KubernetesClient, namespace, configmapName) - if err != nil { - logrus.Errorf("Error while deleting the deployment %v", err) - } - - // Deleting configmap - err = testutil.DeleteConfigMap(clients.KubernetesClient, namespace, configmapName) - if err != nil { - logrus.Errorf("Error while deleting the configmap %v", err) - } - time.Sleep(sleepDuration) -} - -// Perform rolling upgrade on deployment and create env var upon updating the configmap -func TestControllerUpdatingConfigmapShouldAutoCreateEnvInDeployment(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - - // Creating configmap - configmapName := configmapNamePrefix + "-update-" + testutil.RandSeq(5) - configmapClient, err := testutil.CreateConfigMap(clients.KubernetesClient, namespace, configmapName, "www.google.com") - if err != nil { - t.Errorf("Error while creating the configmap %v", err) - } - - // Creating deployment - _, err = testutil.CreateDeployment(clients.KubernetesClient, configmapName, namespace, false) - if err != nil { - t.Errorf("Error in deployment creation: %v", err) - } - - // Updating configmap for first time - updateErr := testutil.UpdateConfigMap(configmapClient, namespace, configmapName, "", "www.stakater.com") - if updateErr != nil { - t.Errorf("Configmap was not updated") - } - - // Verifying deployment update - logrus.Infof("Verifying env var has been created") - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapName, "www.stakater.com") - config := common.Config{ - Namespace: namespace, - ResourceName: configmapName, - SHAValue: shaData, - Annotation: options.ConfigmapUpdateOnChangeAnnotation, - } - deploymentFuncs := handler.GetDeploymentRollingUpgradeFuncs() - updated := testutil.VerifyResourceEnvVarUpdate(clients, config, constants.ConfigmapEnvVarPostfix, deploymentFuncs) - if !updated { - t.Errorf("Deployment was not updated") - } - time.Sleep(sleepDuration) - - // Deleting deployment - err = testutil.DeleteDeployment(clients.KubernetesClient, namespace, configmapName) - if err != nil { - logrus.Errorf("Error while deleting the deployment %v", err) - } - - // Deleting configmap - err = testutil.DeleteConfigMap(clients.KubernetesClient, namespace, configmapName) - if err != nil { - logrus.Errorf("Error while deleting the configmap %v", err) - } - time.Sleep(sleepDuration) -} - -// Perform rolling upgrade on deployment and create env var upon creating the configmap -func TestControllerCreatingConfigmapShouldCreateEnvInDeployment(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - - // TODO: Fix this test case - t.Skip("Skipping TestControllerCreatingConfigmapShouldCreateEnvInDeployment test case") - - // Creating configmap - configmapName := configmapNamePrefix + "-create-" + testutil.RandSeq(5) - _, err := testutil.CreateConfigMap(clients.KubernetesClient, namespace, configmapName, "www.google.com") - if err != nil { - t.Errorf("Error while creating the configmap %v", err) - } - - // Creating deployment - _, err = testutil.CreateDeployment(clients.KubernetesClient, configmapName, namespace, true) - if err != nil { - t.Errorf("Error in deployment creation: %v", err) - } - - // Deleting configmap for first time - err = testutil.DeleteConfigMap(clients.KubernetesClient, namespace, configmapName) - if err != nil { - logrus.Errorf("Error while deleting the configmap %v", err) - } - - time.Sleep(sleepDuration) - - _, err = testutil.CreateConfigMap(clients.KubernetesClient, namespace, configmapName, "www.stakater.com") - if err != nil { - t.Errorf("Error while creating the configmap second time %v", err) - } - - time.Sleep(sleepDuration) - - // Verifying deployment update - logrus.Infof("Verifying env var has been created") - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapName, "www.stakater.com") - config := common.Config{ - Namespace: namespace, - ResourceName: configmapName, - SHAValue: shaData, - Annotation: options.ConfigmapUpdateOnChangeAnnotation, - } - deploymentFuncs := handler.GetDeploymentRollingUpgradeFuncs() - updated := testutil.VerifyResourceEnvVarUpdate(clients, config, constants.ConfigmapEnvVarPostfix, deploymentFuncs) - if !updated { - t.Errorf("Deployment was not updated") - } - time.Sleep(sleepDuration) - - // Deleting deployment - err = testutil.DeleteDeployment(clients.KubernetesClient, namespace, configmapName) - if err != nil { - logrus.Errorf("Error while deleting the deployment %v", err) - } - - // Deleting configmap - err = testutil.DeleteConfigMap(clients.KubernetesClient, namespace, configmapName) - if err != nil { - logrus.Errorf("Error while deleting the configmap %v", err) - } - time.Sleep(sleepDuration) -} - -// Perform rolling upgrade on deployment and update env var upon updating the configmap -func TestControllerForUpdatingConfigmapShouldUpdateDeploymentUsingErs(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - - // Creating secret - configmapName := configmapNamePrefix + "-update-" + testutil.RandSeq(5) - configmapClient, err := testutil.CreateConfigMap(clients.KubernetesClient, namespace, configmapName, "www.google.com") - if err != nil { - t.Errorf("Error while creating the configmap %v", err) - } - - // Creating deployment - _, err = testutil.CreateDeployment(clients.KubernetesClient, configmapName, namespace, true) - if err != nil { - t.Errorf("Error in deployment creation: %v", err) - } - - // Updating configmap for first time - updateErr := testutil.UpdateConfigMap(configmapClient, namespace, configmapName, "", "www.stakater.com") - if updateErr != nil { - t.Errorf("Configmap was not updated") - } - - // Updating configmap for second time - updateErr = testutil.UpdateConfigMap(configmapClient, namespace, configmapName, "", "aurorasolutions.io") - if updateErr != nil { - t.Errorf("Configmap was not updated") - } - - // Verifying deployment update - logrus.Infof("Verifying env var has been updated") - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapName, "aurorasolutions.io") - config := common.Config{ - Namespace: namespace, - ResourceName: configmapName, - SHAValue: shaData, - Annotation: options.ConfigmapUpdateOnChangeAnnotation, - } - - deploymentFuncs := handler.GetDeploymentRollingUpgradeFuncs() - - updated := testutil.VerifyResourceEnvVarUpdate(clients, config, constants.ConfigmapEnvVarPostfix, deploymentFuncs) - if !updated { - t.Errorf("Deployment was not updated") - } - time.Sleep(sleepDuration) - - // Deleting deployment - err = testutil.DeleteDeployment(clients.KubernetesClient, namespace, configmapName) - if err != nil { - logrus.Errorf("Error while deleting the deployment %v", err) - } - - // Deleting configmap - err = testutil.DeleteConfigMap(clients.KubernetesClient, namespace, configmapName) - if err != nil { - logrus.Errorf("Error while deleting the configmap %v", err) - } - time.Sleep(sleepDuration) -} - -// Do not Perform rolling upgrade on deployment and create env var upon updating the labels configmap -func TestControllerUpdatingConfigmapLabelsShouldNotCreateOrUpdateEnvInDeployment(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - - // Creating configmap - configmapName := configmapNamePrefix + "-update-" + testutil.RandSeq(5) - configmapClient, err := testutil.CreateConfigMap(clients.KubernetesClient, namespace, configmapName, "www.google.com") - if err != nil { - t.Errorf("Error while creating the configmap %v", err) - } - - // Creating deployment - _, err = testutil.CreateDeployment(clients.KubernetesClient, configmapName, namespace, true) - if err != nil { - t.Errorf("Error in deployment creation: %v", err) - } - - // Updating configmap for first time - updateErr := testutil.UpdateConfigMap(configmapClient, namespace, configmapName, "test", "www.google.com") - if updateErr != nil { - t.Errorf("Configmap was not updated") - } - - // Verifying deployment update - logrus.Infof("Verifying env var has been created") - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapName, "www.google.com") - config := common.Config{ - Namespace: namespace, - ResourceName: configmapName, - SHAValue: shaData, - Annotation: options.ConfigmapUpdateOnChangeAnnotation, - } - deploymentFuncs := handler.GetDeploymentRollingUpgradeFuncs() - updated := testutil.VerifyResourceEnvVarUpdate(clients, config, constants.ConfigmapEnvVarPostfix, deploymentFuncs) - if updated { - t.Errorf("Deployment should not be updated by changing label") - } - time.Sleep(sleepDuration) - - // Deleting deployment - err = testutil.DeleteDeployment(clients.KubernetesClient, namespace, configmapName) - if err != nil { - logrus.Errorf("Error while deleting the deployment %v", err) - } - - // Deleting configmap - err = testutil.DeleteConfigMap(clients.KubernetesClient, namespace, configmapName) - if err != nil { - logrus.Errorf("Error while deleting the configmap %v", err) - } - time.Sleep(sleepDuration) -} - -// Perform rolling upgrade on pod and create a env var upon creating the secret -func TestControllerCreatingSecretShouldCreateEnvInDeployment(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - - // TODO: Fix this test case - t.Skip("Skipping TestControllerCreatingConfigmapShouldCreateEnvInDeployment test case") - - // Creating secret - secretName := secretNamePrefix + "-create-" + testutil.RandSeq(5) - _, err := testutil.CreateSecret(clients.KubernetesClient, namespace, secretName, data) - if err != nil { - t.Errorf("Error in secret creation: %v", err) - } - - // Creating deployment - _, err = testutil.CreateDeployment(clients.KubernetesClient, secretName, namespace, true) - if err != nil { - t.Errorf("Error in deployment creation: %v", err) - } - - //Deleting Secret - err = testutil.DeleteSecret(clients.KubernetesClient, namespace, secretName) - if err != nil { - logrus.Errorf("Error while deleting the secret %v", err) - } - time.Sleep(sleepDuration) - - _, err = testutil.CreateSecret(clients.KubernetesClient, namespace, secretName, newData) - if err != nil { - t.Errorf("Error in secret creation: %v", err) - } - - time.Sleep(sleepDuration) - - // Verifying Upgrade - logrus.Infof("Verifying env var has been created") - shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, namespace, secretName, newData) - config := common.Config{ - Namespace: namespace, - ResourceName: secretName, - SHAValue: shaData, - Annotation: options.SecretUpdateOnChangeAnnotation, - } - deploymentFuncs := handler.GetDeploymentRollingUpgradeFuncs() - time.Sleep(sleepDuration) - updated := testutil.VerifyResourceEnvVarUpdate(clients, config, constants.SecretEnvVarPostfix, deploymentFuncs) - if !updated { - t.Errorf("Deployment was not updated") - } - - // Deleting Deployment - err = testutil.DeleteDeployment(clients.KubernetesClient, namespace, secretName) - if err != nil { - logrus.Errorf("Error while deleting the deployment %v", err) - } - - //Deleting Secret - err = testutil.DeleteSecret(clients.KubernetesClient, namespace, secretName) - if err != nil { - logrus.Errorf("Error while deleting the secret %v", err) - } - time.Sleep(sleepDuration) -} - -// Perform rolling upgrade on pod and create a env var upon updating the secret -func TestControllerUpdatingSecretShouldCreateEnvInDeployment(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - - // Creating secret - secretName := secretNamePrefix + "-update-" + testutil.RandSeq(5) - secretClient, err := testutil.CreateSecret(clients.KubernetesClient, namespace, secretName, data) - if err != nil { - t.Errorf("Error in secret creation: %v", err) - } - - // Creating deployment - _, err = testutil.CreateDeployment(clients.KubernetesClient, secretName, namespace, true) - if err != nil { - t.Errorf("Error in deployment creation: %v", err) - } - - // Updating Secret - err = testutil.UpdateSecret(secretClient, namespace, secretName, "", newData) - if err != nil { - t.Errorf("Error while updating secret %v", err) - } - - // Verifying Upgrade - logrus.Infof("Verifying env var has been created") - shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, namespace, secretName, newData) - config := common.Config{ - Namespace: namespace, - ResourceName: secretName, - SHAValue: shaData, - Annotation: options.SecretUpdateOnChangeAnnotation, - } - deploymentFuncs := handler.GetDeploymentRollingUpgradeFuncs() - updated := testutil.VerifyResourceEnvVarUpdate(clients, config, constants.SecretEnvVarPostfix, deploymentFuncs) - if !updated { - t.Errorf("Deployment was not updated") - } - - // Deleting Deployment - err = testutil.DeleteDeployment(clients.KubernetesClient, namespace, secretName) - if err != nil { - logrus.Errorf("Error while deleting the deployment %v", err) - } - - //Deleting Secret - err = testutil.DeleteSecret(clients.KubernetesClient, namespace, secretName) - if err != nil { - logrus.Errorf("Error while deleting the secret %v", err) - } - time.Sleep(sleepDuration) -} - -// Perform rolling upgrade on deployment and update env var upon updating the secret -func TestControllerUpdatingSecretShouldUpdateEnvInDeployment(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - - // Creating secret - secretName := secretNamePrefix + "-update-" + testutil.RandSeq(5) - secretClient, err := testutil.CreateSecret(clients.KubernetesClient, namespace, secretName, data) - if err != nil { - t.Errorf("Error in secret creation: %v", err) - } - - // Creating deployment - _, err = testutil.CreateDeployment(clients.KubernetesClient, secretName, namespace, true) - if err != nil { - t.Errorf("Error in deployment creation: %v", err) - } - - // Updating Secret - err = testutil.UpdateSecret(secretClient, namespace, secretName, "", newData) - if err != nil { - t.Errorf("Error while updating secret %v", err) - } - - // Updating Secret - err = testutil.UpdateSecret(secretClient, namespace, secretName, "", updatedData) - if err != nil { - t.Errorf("Error while updating secret %v", err) - } - - // Verifying Upgrade - logrus.Infof("Verifying env var has been updated") - shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, namespace, secretName, updatedData) - config := common.Config{ - Namespace: namespace, - ResourceName: secretName, - SHAValue: shaData, - Annotation: options.SecretUpdateOnChangeAnnotation, - } - deploymentFuncs := handler.GetDeploymentRollingUpgradeFuncs() - updated := testutil.VerifyResourceEnvVarUpdate(clients, config, constants.SecretEnvVarPostfix, deploymentFuncs) - if !updated { - t.Errorf("Deployment was not updated") - } - - // Deleting Deployment - err = testutil.DeleteDeployment(clients.KubernetesClient, namespace, secretName) - if err != nil { - logrus.Errorf("Error while deleting the deployment %v", err) - } - - //Deleting Secret - err = testutil.DeleteSecret(clients.KubernetesClient, namespace, secretName) - if err != nil { - logrus.Errorf("Error while deleting the secret %v", err) - } - time.Sleep(sleepDuration) -} - -// Do not Perform rolling upgrade on pod and create or update a env var upon updating the label in secret -func TestControllerUpdatingSecretLabelsShouldNotCreateOrUpdateEnvInDeployment(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - - // Creating secret - secretName := secretNamePrefix + "-update-" + testutil.RandSeq(5) - secretClient, err := testutil.CreateSecret(clients.KubernetesClient, namespace, secretName, data) - if err != nil { - t.Errorf("Error in secret creation: %v", err) - } - - // Creating deployment - _, err = testutil.CreateDeployment(clients.KubernetesClient, secretName, namespace, true) - if err != nil { - t.Errorf("Error in deployment creation: %v", err) - } - - err = testutil.UpdateSecret(secretClient, namespace, secretName, "test", data) - if err != nil { - t.Errorf("Error while updating secret %v", err) - } - - // Verifying Upgrade - logrus.Infof("Verifying env var has been created") - shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, namespace, secretName, data) - config := common.Config{ - Namespace: namespace, - ResourceName: secretName, - SHAValue: shaData, - Annotation: options.SecretUpdateOnChangeAnnotation, - } - deploymentFuncs := handler.GetDeploymentRollingUpgradeFuncs() - updated := testutil.VerifyResourceEnvVarUpdate(clients, config, constants.SecretEnvVarPostfix, deploymentFuncs) - if updated { - t.Errorf("Deployment should not be updated by changing label in secret") - } - - // Deleting Deployment - err = testutil.DeleteDeployment(clients.KubernetesClient, namespace, secretName) - if err != nil { - logrus.Errorf("Error while deleting the deployment %v", err) - } - - //Deleting Secret - err = testutil.DeleteSecret(clients.KubernetesClient, namespace, secretName) - if err != nil { - logrus.Errorf("Error while deleting the secret %v", err) - } - time.Sleep(sleepDuration) -} - -// Perform rolling upgrade on DaemonSet and create env var upon updating the configmap -func TestControllerUpdatingConfigmapShouldCreateEnvInDaemonSet(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - - // Creating configmap - configmapName := configmapNamePrefix + "-update-" + testutil.RandSeq(5) - configmapClient, err := testutil.CreateConfigMap(clients.KubernetesClient, namespace, configmapName, "www.google.com") - if err != nil { - t.Errorf("Error while creating the configmap %v", err) - } - - // Creating DaemonSet - _, err = testutil.CreateDaemonSet(clients.KubernetesClient, configmapName, namespace, true) - if err != nil { - t.Errorf("Error in DaemonSet creation: %v", err) - } - - // Updating configmap for first time - updateErr := testutil.UpdateConfigMap(configmapClient, namespace, configmapName, "", "www.stakater.com") - if updateErr != nil { - t.Errorf("Configmap was not updated") - } - - // Verifying DaemonSet update - logrus.Infof("Verifying env var has been created") - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapName, "www.stakater.com") - config := common.Config{ - Namespace: namespace, - ResourceName: configmapName, - SHAValue: shaData, - Annotation: options.ConfigmapUpdateOnChangeAnnotation, - } - daemonSetFuncs := handler.GetDaemonSetRollingUpgradeFuncs() - updated := testutil.VerifyResourceEnvVarUpdate(clients, config, constants.ConfigmapEnvVarPostfix, daemonSetFuncs) - if !updated { - t.Errorf("DaemonSet was not updated") - } - time.Sleep(sleepDuration) - - // Deleting DaemonSet - err = testutil.DeleteDaemonSet(clients.KubernetesClient, namespace, configmapName) - if err != nil { - logrus.Errorf("Error while deleting the DaemonSet %v", err) - } - - // Deleting configmap - err = testutil.DeleteConfigMap(clients.KubernetesClient, namespace, configmapName) - if err != nil { - logrus.Errorf("Error while deleting the configmap %v", err) - } - time.Sleep(sleepDuration) -} - -// Perform rolling upgrade on DaemonSet and update env var upon updating the configmap -func TestControllerForUpdatingConfigmapShouldUpdateDaemonSetUsingErs(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - - // Creating secret - configmapName := configmapNamePrefix + "-update-" + testutil.RandSeq(5) - configmapClient, err := testutil.CreateConfigMap(clients.KubernetesClient, namespace, configmapName, "www.google.com") - if err != nil { - t.Errorf("Error while creating the configmap %v", err) - } - - // Creating DaemonSet - _, err = testutil.CreateDaemonSet(clients.KubernetesClient, configmapName, namespace, true) - if err != nil { - t.Errorf("Error in DaemonSet creation: %v", err) - } - - // Updating configmap for first time - updateErr := testutil.UpdateConfigMap(configmapClient, namespace, configmapName, "", "www.stakater.com") - if updateErr != nil { - t.Errorf("Configmap was not updated") - } - - time.Sleep(sleepDuration) - - // Updating configmap for second time - updateErr = testutil.UpdateConfigMap(configmapClient, namespace, configmapName, "", "aurorasolutions.io") - if updateErr != nil { - t.Errorf("Configmap was not updated") - } - - time.Sleep(sleepDuration) - - // Verifying DaemonSet update - logrus.Infof("Verifying env var has been updated") - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapName, "aurorasolutions.io") - config := common.Config{ - Namespace: namespace, - ResourceName: configmapName, - SHAValue: shaData, - Annotation: options.ConfigmapUpdateOnChangeAnnotation, - } - daemonSetFuncs := handler.GetDaemonSetRollingUpgradeFuncs() - updated := testutil.VerifyResourceEnvVarUpdate(clients, config, constants.ConfigmapEnvVarPostfix, daemonSetFuncs) - if !updated { - t.Errorf("DaemonSet was not updated") - } - time.Sleep(sleepDuration) - - // Deleting DaemonSet - err = testutil.DeleteDaemonSet(clients.KubernetesClient, namespace, configmapName) - if err != nil { - logrus.Errorf("Error while deleting the DaemonSet %v", err) - } - - // Deleting configmap - err = testutil.DeleteConfigMap(clients.KubernetesClient, namespace, configmapName) - if err != nil { - logrus.Errorf("Error while deleting the configmap %v", err) - } - time.Sleep(sleepDuration) -} - -// Perform rolling upgrade on pod and create a env var upon updating the secret -func TestControllerUpdatingSecretShouldCreateEnvInDaemonSet(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - - // Creating secret - secretName := secretNamePrefix + "-update-" + testutil.RandSeq(5) - secretClient, err := testutil.CreateSecret(clients.KubernetesClient, namespace, secretName, data) - if err != nil { - t.Errorf("Error in secret creation: %v", err) - } - - // Creating DaemonSet - _, err = testutil.CreateDaemonSet(clients.KubernetesClient, secretName, namespace, true) - if err != nil { - t.Errorf("Error in DaemonSet creation: %v", err) - } - - // Updating Secret - err = testutil.UpdateSecret(secretClient, namespace, secretName, "", newData) - if err != nil { - t.Errorf("Error while updating secret %v", err) - } - - // Verifying Upgrade - logrus.Infof("Verifying env var has been created") - shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, namespace, secretName, newData) - config := common.Config{ - Namespace: namespace, - ResourceName: secretName, - SHAValue: shaData, - Annotation: options.SecretUpdateOnChangeAnnotation, - } - daemonSetFuncs := handler.GetDaemonSetRollingUpgradeFuncs() - updated := testutil.VerifyResourceEnvVarUpdate(clients, config, constants.SecretEnvVarPostfix, daemonSetFuncs) - if !updated { - t.Errorf("DaemonSet was not updated") - } - - // Deleting DaemonSet - err = testutil.DeleteDaemonSet(clients.KubernetesClient, namespace, secretName) - if err != nil { - logrus.Errorf("Error while deleting the DaemonSet %v", err) - } - - //Deleting Secret - err = testutil.DeleteSecret(clients.KubernetesClient, namespace, secretName) - if err != nil { - logrus.Errorf("Error while deleting the secret %v", err) - } - time.Sleep(sleepDuration) -} - -// Perform rolling upgrade on DaemonSet and update env var upon updating the secret -func TestControllerUpdatingSecretShouldUpdateEnvInDaemonSet(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - - // Creating secret - secretName := secretNamePrefix + "-update-" + testutil.RandSeq(5) - secretClient, err := testutil.CreateSecret(clients.KubernetesClient, namespace, secretName, data) - if err != nil { - t.Errorf("Error in secret creation: %v", err) - } - - // Creating DaemonSet - _, err = testutil.CreateDaemonSet(clients.KubernetesClient, secretName, namespace, true) - if err != nil { - t.Errorf("Error in DaemonSet creation: %v", err) - } - - // Updating Secret - err = testutil.UpdateSecret(secretClient, namespace, secretName, "", newData) - if err != nil { - t.Errorf("Error while updating secret %v", err) - } - time.Sleep(sleepDuration) - - // Updating Secret - err = testutil.UpdateSecret(secretClient, namespace, secretName, "", updatedData) - if err != nil { - t.Errorf("Error while updating secret %v", err) - } - - // Verifying Upgrade - logrus.Infof("Verifying env var has been updated") - shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, namespace, secretName, updatedData) - config := common.Config{ - Namespace: namespace, - ResourceName: secretName, - SHAValue: shaData, - Annotation: options.SecretUpdateOnChangeAnnotation, - } - daemonSetFuncs := handler.GetDaemonSetRollingUpgradeFuncs() - updated := testutil.VerifyResourceEnvVarUpdate(clients, config, constants.SecretEnvVarPostfix, daemonSetFuncs) - if !updated { - t.Errorf("DaemonSet was not updated") - } - - // Deleting DaemonSet - err = testutil.DeleteDaemonSet(clients.KubernetesClient, namespace, secretName) - if err != nil { - logrus.Errorf("Error while deleting the DaemonSet %v", err) - } - - //Deleting Secret - err = testutil.DeleteSecret(clients.KubernetesClient, namespace, secretName) - if err != nil { - logrus.Errorf("Error while deleting the secret %v", err) - } - time.Sleep(sleepDuration) -} - -// Do not Perform rolling upgrade on pod and create or update a env var upon updating the label in secret -func TestControllerUpdatingSecretLabelsShouldNotCreateOrUpdateEnvInDaemonSet(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - - // Creating secret - secretName := secretNamePrefix + "-update-" + testutil.RandSeq(5) - secretClient, err := testutil.CreateSecret(clients.KubernetesClient, namespace, secretName, data) - if err != nil { - t.Errorf("Error in secret creation: %v", err) - } - - // Creating DaemonSet - _, err = testutil.CreateDaemonSet(clients.KubernetesClient, secretName, namespace, true) - if err != nil { - t.Errorf("Error in DaemonSet creation: %v", err) - } - - err = testutil.UpdateSecret(secretClient, namespace, secretName, "test", data) - if err != nil { - t.Errorf("Error while updating secret %v", err) - } - - // Verifying Upgrade - logrus.Infof("Verifying env var has been created") - shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, namespace, secretName, data) - config := common.Config{ - Namespace: namespace, - ResourceName: secretName, - SHAValue: shaData, - Annotation: options.SecretUpdateOnChangeAnnotation, - } - daemonSetFuncs := handler.GetDaemonSetRollingUpgradeFuncs() - updated := testutil.VerifyResourceEnvVarUpdate(clients, config, constants.SecretEnvVarPostfix, daemonSetFuncs) - if updated { - t.Errorf("DaemonSet should not be updated by changing label in secret") - } - - // Deleting DaemonSet - err = testutil.DeleteDaemonSet(clients.KubernetesClient, namespace, secretName) - if err != nil { - logrus.Errorf("Error while deleting the DaemonSet %v", err) - } - - //Deleting Secret - err = testutil.DeleteSecret(clients.KubernetesClient, namespace, secretName) - if err != nil { - logrus.Errorf("Error while deleting the secret %v", err) - } - time.Sleep(sleepDuration) -} - -// Perform rolling upgrade on StatefulSet and create env var upon updating the configmap -func TestControllerUpdatingConfigmapShouldCreateEnvInStatefulSet(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - - // Creating configmap - configmapName := configmapNamePrefix + "-update-" + testutil.RandSeq(5) - configmapClient, err := testutil.CreateConfigMap(clients.KubernetesClient, namespace, configmapName, "www.google.com") - if err != nil { - t.Errorf("Error while creating the configmap %v", err) - } - - // Creating StatefulSet - _, err = testutil.CreateStatefulSet(clients.KubernetesClient, configmapName, namespace, true) - if err != nil { - t.Errorf("Error in StatefulSet creation: %v", err) - } - - // Updating configmap for first time - updateErr := testutil.UpdateConfigMap(configmapClient, namespace, configmapName, "", "www.stakater.com") - if updateErr != nil { - t.Errorf("Configmap was not updated") - } - - // Verifying StatefulSet update - logrus.Infof("Verifying env var has been created") - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapName, "www.stakater.com") - config := common.Config{ - Namespace: namespace, - ResourceName: configmapName, - SHAValue: shaData, - Annotation: options.ConfigmapUpdateOnChangeAnnotation, - } - statefulSetFuncs := handler.GetStatefulSetRollingUpgradeFuncs() - updated := testutil.VerifyResourceEnvVarUpdate(clients, config, constants.ConfigmapEnvVarPostfix, statefulSetFuncs) - if !updated { - t.Errorf("StatefulSet was not updated") - } - time.Sleep(sleepDuration) - - // Deleting StatefulSet - err = testutil.DeleteStatefulSet(clients.KubernetesClient, namespace, configmapName) - if err != nil { - logrus.Errorf("Error while deleting the StatefulSet %v", err) - } - - // Deleting configmap - err = testutil.DeleteConfigMap(clients.KubernetesClient, namespace, configmapName) - if err != nil { - logrus.Errorf("Error while deleting the configmap %v", err) - } - time.Sleep(sleepDuration) -} - -// Perform rolling upgrade on StatefulSet and update env var upon updating the configmap -func TestControllerForUpdatingConfigmapShouldUpdateStatefulSetUsingErs(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - - // Creating secret - configmapName := configmapNamePrefix + "-update-" + testutil.RandSeq(5) - configmapClient, err := testutil.CreateConfigMap(clients.KubernetesClient, namespace, configmapName, "www.google.com") - if err != nil { - t.Errorf("Error while creating the configmap %v", err) - } - - // Creating StatefulSet - _, err = testutil.CreateStatefulSet(clients.KubernetesClient, configmapName, namespace, true) - if err != nil { - t.Errorf("Error in StatefulSet creation: %v", err) - } - - // Updating configmap for first time - updateErr := testutil.UpdateConfigMap(configmapClient, namespace, configmapName, "", "www.stakater.com") - if updateErr != nil { - t.Errorf("Configmap was not updated") - } - - // Updating configmap for second time - updateErr = testutil.UpdateConfigMap(configmapClient, namespace, configmapName, "", "aurorasolutions.io") - if updateErr != nil { - t.Errorf("Configmap was not updated") - } - - // Verifying StatefulSet update - logrus.Infof("Verifying env var has been updated") - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapName, "aurorasolutions.io") - config := common.Config{ - Namespace: namespace, - ResourceName: configmapName, - SHAValue: shaData, - Annotation: options.ConfigmapUpdateOnChangeAnnotation, - } - statefulSetFuncs := handler.GetStatefulSetRollingUpgradeFuncs() - updated := testutil.VerifyResourceEnvVarUpdate(clients, config, constants.ConfigmapEnvVarPostfix, statefulSetFuncs) - if !updated { - t.Errorf("StatefulSet was not updated") - } - time.Sleep(sleepDuration) - - // Deleting StatefulSet - err = testutil.DeleteStatefulSet(clients.KubernetesClient, namespace, configmapName) - if err != nil { - logrus.Errorf("Error while deleting the StatefulSet %v", err) - } - - // Deleting configmap - err = testutil.DeleteConfigMap(clients.KubernetesClient, namespace, configmapName) - if err != nil { - logrus.Errorf("Error while deleting the configmap %v", err) - } - time.Sleep(sleepDuration) -} - -// Perform rolling upgrade on pod and create a env var upon updating the secret -func TestControllerUpdatingSecretShouldCreateEnvInStatefulSet(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - - // Creating secret - secretName := secretNamePrefix + "-update-" + testutil.RandSeq(5) - secretClient, err := testutil.CreateSecret(clients.KubernetesClient, namespace, secretName, data) - if err != nil { - t.Errorf("Error in secret creation: %v", err) - } - - // Creating StatefulSet - _, err = testutil.CreateStatefulSet(clients.KubernetesClient, secretName, namespace, true) - if err != nil { - t.Errorf("Error in StatefulSet creation: %v", err) - } - - // Updating Secret - err = testutil.UpdateSecret(secretClient, namespace, secretName, "", newData) - if err != nil { - t.Errorf("Error while updating secret %v", err) - } - - // Verifying Upgrade - logrus.Infof("Verifying env var has been created") - shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, namespace, secretName, newData) - config := common.Config{ - Namespace: namespace, - ResourceName: secretName, - SHAValue: shaData, - Annotation: options.SecretUpdateOnChangeAnnotation, - } - statefulSetFuncs := handler.GetStatefulSetRollingUpgradeFuncs() - updated := testutil.VerifyResourceEnvVarUpdate(clients, config, constants.SecretEnvVarPostfix, statefulSetFuncs) - if !updated { - t.Errorf("StatefulSet was not updated") - } - - // Deleting StatefulSet - err = testutil.DeleteStatefulSet(clients.KubernetesClient, namespace, secretName) - if err != nil { - logrus.Errorf("Error while deleting the StatefulSet %v", err) - } - - //Deleting Secret - err = testutil.DeleteSecret(clients.KubernetesClient, namespace, secretName) - if err != nil { - logrus.Errorf("Error while deleting the secret %v", err) - } - time.Sleep(sleepDuration) -} - -// Perform rolling upgrade on StatefulSet and update env var upon updating the secret -func TestControllerUpdatingSecretShouldUpdateEnvInStatefulSet(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - - // Creating secret - secretName := secretNamePrefix + "-update-" + testutil.RandSeq(5) - secretClient, err := testutil.CreateSecret(clients.KubernetesClient, namespace, secretName, data) - if err != nil { - t.Errorf("Error in secret creation: %v", err) - } - - // Creating StatefulSet - _, err = testutil.CreateStatefulSet(clients.KubernetesClient, secretName, namespace, true) - if err != nil { - t.Errorf("Error in StatefulSet creation: %v", err) - } - - // Updating Secret - err = testutil.UpdateSecret(secretClient, namespace, secretName, "", newData) - if err != nil { - t.Errorf("Error while updating secret %v", err) - } - - // Updating Secret - err = testutil.UpdateSecret(secretClient, namespace, secretName, "", updatedData) - if err != nil { - t.Errorf("Error while updating secret %v", err) - } - - // Verifying Upgrade - logrus.Infof("Verifying env var has been updated") - shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, namespace, secretName, updatedData) - config := common.Config{ - Namespace: namespace, - ResourceName: secretName, - SHAValue: shaData, - Annotation: options.SecretUpdateOnChangeAnnotation, - } - statefulSetFuncs := handler.GetStatefulSetRollingUpgradeFuncs() - updated := testutil.VerifyResourceEnvVarUpdate(clients, config, constants.SecretEnvVarPostfix, statefulSetFuncs) - if !updated { - t.Errorf("StatefulSet was not updated") - } - - // Deleting StatefulSet - err = testutil.DeleteStatefulSet(clients.KubernetesClient, namespace, secretName) - if err != nil { - logrus.Errorf("Error while deleting the StatefulSet %v", err) - } - - //Deleting Secret - err = testutil.DeleteSecret(clients.KubernetesClient, namespace, secretName) - if err != nil { - logrus.Errorf("Error while deleting the secret %v", err) - } - time.Sleep(sleepDuration) -} - -// Perform rolling upgrade on StatefulSet and update pod annotation var upon updating the secret -func TestControllerUpdatingSecretShouldUpdatePodAnnotationInStatefulSet(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - - // Creating secret - secretName := secretNamePrefix + "-update-" + testutil.RandSeq(5) - secretClient, err := testutil.CreateSecret(clients.KubernetesClient, namespace, secretName, data) - if err != nil { - t.Errorf("Error in secret creation: %v", err) - } - - // Creating StatefulSet - _, err = testutil.CreateStatefulSet(clients.KubernetesClient, secretName, namespace, true) - if err != nil { - t.Errorf("Error in StatefulSet creation: %v", err) - } - - // Updating Secret - err = testutil.UpdateSecret(secretClient, namespace, secretName, "", newData) - if err != nil { - t.Errorf("Error while updating secret %v", err) - } - - // Updating Secret - err = testutil.UpdateSecret(secretClient, namespace, secretName, "", updatedData) - if err != nil { - t.Errorf("Error while updating secret %v", err) - } - - // Verifying Upgrade - logrus.Infof("Verifying pod annotation has been updated") - shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, namespace, secretName, updatedData) - config := common.Config{ - Namespace: namespace, - ResourceName: secretName, - SHAValue: shaData, - Annotation: options.SecretUpdateOnChangeAnnotation, - } - statefulSetFuncs := handler.GetStatefulSetRollingUpgradeFuncs() - updated := testutil.VerifyResourceAnnotationUpdate(clients, config, statefulSetFuncs) - if !updated { - t.Errorf("StatefulSet was not updated") - } - - // Deleting StatefulSet - err = testutil.DeleteStatefulSet(clients.KubernetesClient, namespace, secretName) - if err != nil { - logrus.Errorf("Error while deleting the StatefulSet %v", err) - } - - //Deleting Secret - err = testutil.DeleteSecret(clients.KubernetesClient, namespace, secretName) - if err != nil { - logrus.Errorf("Error while deleting the secret %v", err) - } - time.Sleep(sleepDuration) -} - -func TestController_resourceInIgnoredNamespace(t *testing.T) { - type fields struct { - client kubernetes.Interface - indexer cache.Indexer - queue workqueue.TypedRateLimitingInterface[any] - informer cache.Controller - namespace string - ignoredNamespaces util.List - } - type args struct { - raw interface{} - } +func TestResourceInIgnoredNamespace(t *testing.T) { tests := []struct { - name string - fields fields - args args - want bool + name string + ignoredNamespaces []string + resource interface{} + expected bool }{ { - name: "TestConfigMapResourceInIgnoredNamespaceShouldReturnTrue", - fields: fields{ - ignoredNamespaces: util.List{ - "system", + name: "ConfigMap in ignored namespace", + ignoredNamespaces: []string{"kube-system", "default"}, + resource: &v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cm", + Namespace: "kube-system", }, }, - args: args{ - raw: testutil.GetConfigmap("system", "testcm", "test"), - }, - want: true, + expected: true, }, { - name: "TestSecretResourceInIgnoredNamespaceShouldReturnTrue", - fields: fields{ - ignoredNamespaces: util.List{ - "system", + name: "ConfigMap not in ignored namespace", + ignoredNamespaces: []string{"kube-system", "default"}, + resource: &v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cm", + Namespace: "my-namespace", }, }, - args: args{ - raw: testutil.GetSecret("system", "testsecret", "test"), - }, - want: true, + expected: false, }, { - name: "TestConfigMapResourceInIgnoredNamespaceShouldReturnFalse", - fields: fields{ - ignoredNamespaces: util.List{ - "system", + name: "Secret in ignored namespace", + ignoredNamespaces: []string{"kube-system"}, + resource: &v1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-secret", + Namespace: "kube-system", }, }, - args: args{ - raw: testutil.GetConfigmap("some-other-namespace", "testcm", "test"), - }, - want: false, + expected: true, }, { - name: "TestConfigMapResourceInIgnoredNamespaceShouldReturnFalse", - fields: fields{ - ignoredNamespaces: util.List{ - "system", + name: "Secret not in ignored namespace", + ignoredNamespaces: []string{"kube-system"}, + resource: &v1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-secret", + Namespace: "my-namespace", }, }, - args: args{ - raw: testutil.GetSecret("some-other-namespace", "testsecret", "test"), + expected: false, + }, + { + name: "Empty ignored namespaces list", + ignoredNamespaces: []string{}, + resource: &v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cm", + Namespace: "any-namespace", + }, }, - want: false, + expected: false, + }, + { + name: "Unknown resource type", + ignoredNamespaces: []string{"kube-system"}, + resource: &v1.Pod{}, // Not a ConfigMap or Secret + expected: false, }, } + for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - c := &Controller{ - client: tt.fields.client, - indexer: tt.fields.indexer, - queue: tt.fields.queue, - informer: tt.fields.informer, - namespace: tt.fields.namespace, - ignoredNamespaces: tt.fields.ignoredNamespaces, - } - if got := c.resourceInIgnoredNamespace(tt.args.raw); got != tt.want { - t.Errorf("Controller.resourceInIgnoredNamespace() = %v, want %v", got, tt.want) - } + c := newTestController(tt.ignoredNamespaces, "") + result := c.resourceInIgnoredNamespace(tt.resource) + assert.Equal(t, tt.expected, result) }) } } -func TestController_resourceInNamespaceSelector(t *testing.T) { - type fields struct { - indexer cache.Indexer - queue workqueue.TypedRateLimitingInterface[any] - informer cache.Controller - namespace v1.Namespace +func TestResourceInSelectedNamespaces(t *testing.T) { + tests := []struct { + name string namespaceSelector string - } - type args struct { - raw interface{} - } - tests := []struct { - name string - fields fields - args args - want bool + cachedNamespaces []string + resource interface{} + expected bool }{ { - name: "TestConfigMapResourceInNamespaceSelector", - fields: fields{ - namespaceSelector: "select=this,select2=this2", - namespace: v1.Namespace{ - ObjectMeta: metav1.ObjectMeta{ - Name: "selected-namespace", - Labels: map[string]string{ - "select": "this", - "select2": "this2", - }, - }, + name: "No namespace selector - all namespaces allowed", + namespaceSelector: "", + cachedNamespaces: []string{}, + resource: &v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cm", + Namespace: "any-namespace", }, }, - args: args{ - raw: testutil.GetConfigmap("selected-namespace", "testcm", "test"), - }, - want: true, - }, { - name: "TestConfigMapResourceNotInNamespaceSelector", - fields: fields{ - namespaceSelector: "select=this,select2=this2", - namespace: v1.Namespace{ - ObjectMeta: metav1.ObjectMeta{ - Name: "not-selected-namespace", - Labels: map[string]string{}, - }, - }, - }, - args: args{ - raw: testutil.GetConfigmap("not-selected-namespace", "testcm", "test"), - }, - want: false, + expected: true, }, { - name: "TestSecretResourceInNamespaceSelector", - fields: fields{ - namespaceSelector: "select=this,select2=this2", - namespace: v1.Namespace{ - ObjectMeta: metav1.ObjectMeta{ - Name: "selected-namespace", - Labels: map[string]string{ - "select": "this", - "select2": "this2", - }, - }, + name: "ConfigMap in selected namespace", + namespaceSelector: "env=prod", + cachedNamespaces: []string{"prod-ns", "staging-ns"}, + resource: &v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cm", + Namespace: "prod-ns", }, }, - args: args{ - raw: testutil.GetSecret("selected-namespace", "testsecret", "test"), - }, - want: true, - }, { - name: "TestSecretResourceNotInNamespaceSelector", - fields: fields{ - namespaceSelector: "select=this,select2=this2", - namespace: v1.Namespace{ - ObjectMeta: metav1.ObjectMeta{ - Name: "not-selected-namespace", - Labels: map[string]string{}, - }, + expected: true, + }, + { + name: "ConfigMap not in selected namespace", + namespaceSelector: "env=prod", + cachedNamespaces: []string{"prod-ns"}, + resource: &v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cm", + Namespace: "dev-ns", }, }, - args: args{ - raw: testutil.GetSecret("not-selected-namespace", "secret", "test"), - }, - want: false, - }, { - name: "TestSecretResourceInNamespaceSelectorKeyExists", - fields: fields{ - namespaceSelector: "select", - namespace: v1.Namespace{ - ObjectMeta: metav1.ObjectMeta{ - Name: "selected-namespace", - Labels: map[string]string{ - "select": "this", - }, - }, + expected: false, + }, + { + name: "Secret in selected namespace", + namespaceSelector: "env=prod", + cachedNamespaces: []string{"prod-ns"}, + resource: &v1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-secret", + Namespace: "prod-ns", }, }, - args: args{ - raw: testutil.GetSecret("selected-namespace", "secret", "test"), - }, - want: true, - }, { - name: "TestSecretResourceInNamespaceSelectorValueIn", - fields: fields{ - namespaceSelector: "select in (select1, select2, select3)", - namespace: v1.Namespace{ - ObjectMeta: metav1.ObjectMeta{ - Name: "selected-namespace", - Labels: map[string]string{ - "select": "select2", - }, - }, + expected: true, + }, + { + name: "Secret not in selected namespace", + namespaceSelector: "env=prod", + cachedNamespaces: []string{"prod-ns"}, + resource: &v1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-secret", + Namespace: "dev-ns", }, }, - args: args{ - raw: testutil.GetSecret("selected-namespace", "secret", "test"), - }, - want: true, - }, { - name: "TestSecretResourceInNamespaceSelectorKeyDoesNotExist", - fields: fields{ - namespaceSelector: "!select2", - namespace: v1.Namespace{ - ObjectMeta: metav1.ObjectMeta{ - Name: "selected-namespace", - Labels: map[string]string{ - "select": "this", - }, - }, - }, - }, - args: args{ - raw: testutil.GetSecret("selected-namespace", "secret", "test"), - }, - want: true, - }, { - name: "TestSecretResourceInNamespaceSelectorMultipleConditions", - fields: fields{ - namespaceSelector: "select,select2=this2,select3!=this4", - namespace: v1.Namespace{ - ObjectMeta: metav1.ObjectMeta{ - Name: "selected-namespace", - Labels: map[string]string{ - "select": "this", - "select2": "this2", - "select3": "this3", - }, - }, - }, - }, - args: args{ - raw: testutil.GetSecret("selected-namespace", "secret", "test"), - }, - want: true, + expected: false, + }, + { + name: "Unknown resource type with selector", + namespaceSelector: "env=prod", + cachedNamespaces: []string{"prod-ns"}, + resource: &v1.Pod{}, + expected: false, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - fakeClient := fake.NewSimpleClientset() - namespace, _ := fakeClient.CoreV1().Namespaces().Create(context.Background(), &tt.fields.namespace, metav1.CreateOptions{}) - logrus.Infof("created fakeClient namespace for testing = %s", namespace.Name) + resetGlobalState() + selectedNamespacesCache = tt.cachedNamespaces - c := &Controller{ - client: fakeClient, - indexer: tt.fields.indexer, - queue: tt.fields.queue, - informer: tt.fields.informer, - namespace: tt.fields.namespace.Name, - namespaceSelector: tt.fields.namespaceSelector, + c := newTestController([]string{}, tt.namespaceSelector) + result := c.resourceInSelectedNamespaces(tt.resource) + assert.Equal(t, tt.expected, result) + }) + } +} + +func TestAddSelectedNamespaceToCache(t *testing.T) { + resetGlobalState() + + c := newTestController([]string{}, "env=prod") + + // Add first namespace + ns1 := v1.Namespace{ + ObjectMeta: metav1.ObjectMeta{Name: "namespace-1"}, + } + c.addSelectedNamespaceToCache(ns1) + assert.Contains(t, selectedNamespacesCache, "namespace-1") + assert.Len(t, selectedNamespacesCache, 1) + + // Add second namespace + ns2 := v1.Namespace{ + ObjectMeta: metav1.ObjectMeta{Name: "namespace-2"}, + } + c.addSelectedNamespaceToCache(ns2) + assert.Contains(t, selectedNamespacesCache, "namespace-1") + assert.Contains(t, selectedNamespacesCache, "namespace-2") + assert.Len(t, selectedNamespacesCache, 2) +} + +func TestRemoveSelectedNamespaceFromCache(t *testing.T) { + tests := []struct { + name string + initialCache []string + namespaceToRemove string + expectedCache []string + }{ + { + name: "Remove existing namespace", + initialCache: []string{"ns-1", "ns-2", "ns-3"}, + namespaceToRemove: "ns-2", + expectedCache: []string{"ns-1", "ns-3"}, + }, + { + name: "Remove non-existing namespace", + initialCache: []string{"ns-1", "ns-2"}, + namespaceToRemove: "ns-3", + expectedCache: []string{"ns-1", "ns-2"}, + }, + { + name: "Remove from empty cache", + initialCache: []string{}, + namespaceToRemove: "ns-1", + expectedCache: []string{}, + }, + { + name: "Remove only namespace", + initialCache: []string{"ns-1"}, + namespaceToRemove: "ns-1", + expectedCache: []string{}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + resetGlobalState() + selectedNamespacesCache = tt.initialCache + + c := newTestController([]string{}, "env=prod") + ns := v1.Namespace{ + ObjectMeta: metav1.ObjectMeta{Name: tt.namespaceToRemove}, } + c.removeSelectedNamespaceFromCache(ns) - listOptions := metav1.ListOptions{} - listOptions.LabelSelector = tt.fields.namespaceSelector - namespaces, _ := fakeClient.CoreV1().Namespaces().List(context.Background(), listOptions) + assert.Equal(t, tt.expectedCache, selectedNamespacesCache) + }) + } +} - for _, ns := range namespaces.Items { - c.addSelectedNamespaceToCache(ns) - } +func TestAddHandler(t *testing.T) { + tests := []struct { + name string + reloadOnCreate string + ignoredNamespaces []string + resource interface{} + controllersInit bool + expectQueueItem bool + }{ + { + name: "Namespace resource - should not queue", + reloadOnCreate: "true", + ignoredNamespaces: []string{}, + resource: &v1.Namespace{ + ObjectMeta: metav1.ObjectMeta{Name: "test-ns"}, + }, + controllersInit: true, + expectQueueItem: false, + }, + { + name: "ReloadOnCreate disabled", + reloadOnCreate: "false", + ignoredNamespaces: []string{}, + resource: &v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cm", + Namespace: "default", + }, + }, + controllersInit: true, + expectQueueItem: false, + }, + { + name: "ConfigMap in ignored namespace", + reloadOnCreate: "true", + ignoredNamespaces: []string{"kube-system"}, + resource: &v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cm", + Namespace: "kube-system", + }, + }, + controllersInit: true, + expectQueueItem: false, + }, + { + name: "Controllers not initialized", + reloadOnCreate: "true", + ignoredNamespaces: []string{}, + resource: &v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cm", + Namespace: "default", + }, + }, + controllersInit: false, + expectQueueItem: false, + }, + { + name: "Valid ConfigMap - should queue", + reloadOnCreate: "true", + ignoredNamespaces: []string{}, + resource: &v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cm", + Namespace: "default", + }, + }, + controllersInit: true, + expectQueueItem: true, + }, + } - if got := c.resourceInSelectedNamespaces(tt.args.raw); got != tt.want { - t.Errorf("Controller.resourceInNamespaceSelector() = %v, want %v", got, tt.want) - } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + resetGlobalState() + options.ReloadOnCreate = tt.reloadOnCreate + secretControllerInitialized = tt.controllersInit + configmapControllerInitialized = tt.controllersInit - for _, ns := range namespaces.Items { - c.removeSelectedNamespaceFromCache(ns) + c := newTestController(tt.ignoredNamespaces, "") + c.Add(tt.resource) + + if tt.expectQueueItem { + assert.Equal(t, 1, c.queue.Len(), "Expected queue to have 1 item") + } else { + assert.Equal(t, 0, c.queue.Len(), "Expected queue to be empty") } }) } } + +func TestUpdateHandler(t *testing.T) { + tests := []struct { + name string + ignoredNamespaces []string + namespaceSelector string + cachedNamespaces []string + oldResource interface{} + newResource interface{} + expectQueueItem bool + }{ + { + name: "Namespace resource - should not queue", + ignoredNamespaces: []string{}, + oldResource: &v1.Namespace{ + ObjectMeta: metav1.ObjectMeta{Name: "test-ns"}, + }, + newResource: &v1.Namespace{ + ObjectMeta: metav1.ObjectMeta{Name: "test-ns"}, + }, + expectQueueItem: false, + }, + { + name: "ConfigMap in ignored namespace", + ignoredNamespaces: []string{"kube-system"}, + oldResource: &v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cm", + Namespace: "kube-system", + }, + }, + newResource: &v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cm", + Namespace: "kube-system", + }, + }, + expectQueueItem: false, + }, + { + name: "ConfigMap not in selected namespace", + ignoredNamespaces: []string{}, + namespaceSelector: "env=prod", + cachedNamespaces: []string{"prod-ns"}, + oldResource: &v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cm", + Namespace: "dev-ns", + }, + }, + newResource: &v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cm", + Namespace: "dev-ns", + }, + }, + expectQueueItem: false, + }, + { + name: "Valid ConfigMap update - should queue", + ignoredNamespaces: []string{}, + oldResource: &v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cm", + Namespace: "default", + }, + Data: map[string]string{"key": "old-value"}, + }, + newResource: &v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cm", + Namespace: "default", + }, + Data: map[string]string{"key": "new-value"}, + }, + expectQueueItem: true, + }, + { + name: "Valid Secret update - should queue", + ignoredNamespaces: []string{}, + oldResource: &v1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-secret", + Namespace: "default", + }, + }, + newResource: &v1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-secret", + Namespace: "default", + }, + }, + expectQueueItem: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + resetGlobalState() + if tt.cachedNamespaces != nil { + selectedNamespacesCache = tt.cachedNamespaces + } + + c := newTestController(tt.ignoredNamespaces, tt.namespaceSelector) + c.Update(tt.oldResource, tt.newResource) + + if tt.expectQueueItem { + assert.Equal(t, 1, c.queue.Len(), "Expected queue to have 1 item") + // Verify the queued item is the correct type + item, _ := c.queue.Get() + _, ok := item.(handler.ResourceUpdatedHandler) + assert.True(t, ok, "Expected ResourceUpdatedHandler in queue") + c.queue.Done(item) + } else { + assert.Equal(t, 0, c.queue.Len(), "Expected queue to be empty") + } + }) + } +} + +func TestDeleteHandler(t *testing.T) { + tests := []struct { + name string + reloadOnDelete string + ignoredNamespaces []string + resource interface{} + controllersInit bool + expectQueueItem bool + }{ + { + name: "ReloadOnDelete disabled", + reloadOnDelete: "false", + ignoredNamespaces: []string{}, + resource: &v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cm", + Namespace: "default", + }, + }, + controllersInit: true, + expectQueueItem: false, + }, + { + name: "ConfigMap in ignored namespace", + reloadOnDelete: "true", + ignoredNamespaces: []string{"kube-system"}, + resource: &v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cm", + Namespace: "kube-system", + }, + }, + controllersInit: true, + expectQueueItem: false, + }, + { + name: "Controllers not initialized", + reloadOnDelete: "true", + ignoredNamespaces: []string{}, + resource: &v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cm", + Namespace: "default", + }, + }, + controllersInit: false, + expectQueueItem: false, + }, + { + name: "Valid ConfigMap delete - should queue", + reloadOnDelete: "true", + ignoredNamespaces: []string{}, + resource: &v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cm", + Namespace: "default", + }, + }, + controllersInit: true, + expectQueueItem: true, + }, + { + name: "Namespace delete - updates cache", + reloadOnDelete: "false", // Disable to test cache update only + ignoredNamespaces: []string{}, + resource: &v1.Namespace{ + ObjectMeta: metav1.ObjectMeta{Name: "test-ns"}, + }, + controllersInit: true, + expectQueueItem: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + resetGlobalState() + options.ReloadOnDelete = tt.reloadOnDelete + secretControllerInitialized = tt.controllersInit + configmapControllerInitialized = tt.controllersInit + + c := newTestController(tt.ignoredNamespaces, "") + c.Delete(tt.resource) + + if tt.expectQueueItem { + assert.Equal(t, 1, c.queue.Len(), "Expected queue to have 1 item") + // Verify the queued item is the correct type + item, _ := c.queue.Get() + _, ok := item.(handler.ResourceDeleteHandler) + assert.True(t, ok, "Expected ResourceDeleteHandler in queue") + c.queue.Done(item) + } else { + assert.Equal(t, 0, c.queue.Len(), "Expected queue to be empty") + } + }) + } +} + +func TestHandleErr(t *testing.T) { + t.Run("No error - should forget key", func(t *testing.T) { + resetGlobalState() + c := newTestController([]string{}, "") + + key := "test-key" + // Add key to queue first + c.queue.Add(key) + item, _ := c.queue.Get() + + // Handle with no error + c.handleErr(nil, item) + c.queue.Done(item) + + // Key should be forgotten (NumRequeues should be 0) + assert.Equal(t, 0, c.queue.NumRequeues(key)) + }) + + t.Run("Error at max retries - should drop key", func(t *testing.T) { + resetGlobalState() + c := newTestController([]string{}, "") + + key := "test-key-max" + + // Simulate 5 previous failures (max retries) + for range 5 { + c.queue.AddRateLimited(key) + } + + // After max retries, handleErr should forget the key + c.handleErr(assert.AnError, key) + + // Key should be forgotten + assert.Equal(t, 0, c.queue.NumRequeues(key)) + }) +} + +func TestAddHandlerWithNamespaceEvent(t *testing.T) { + resetGlobalState() + + c := newTestController([]string{}, "env=prod") + + // When a namespace is added, it should be cached + ns := &v1.Namespace{ + ObjectMeta: metav1.ObjectMeta{Name: "new-namespace"}, + } + + c.Add(ns) + + assert.Contains(t, selectedNamespacesCache, "new-namespace") + assert.Equal(t, 0, c.queue.Len(), "Namespace add should not queue anything") +} + +func TestDeleteHandlerWithNamespaceEvent(t *testing.T) { + resetGlobalState() + selectedNamespacesCache = []string{"ns-1", "ns-to-delete", "ns-2"} + + c := newTestController([]string{}, "env=prod") + options.ReloadOnDelete = "true" + secretControllerInitialized = true + configmapControllerInitialized = true + + ns := &v1.Namespace{ + ObjectMeta: metav1.ObjectMeta{Name: "ns-to-delete"}, + } + + c.Delete(ns) + + assert.NotContains(t, selectedNamespacesCache, "ns-to-delete") + assert.Contains(t, selectedNamespacesCache, "ns-1") + assert.Contains(t, selectedNamespacesCache, "ns-2") + assert.Equal(t, 0, c.queue.Len(), "Namespace delete should not queue anything") +} diff --git a/internal/pkg/handler/create_test.go b/internal/pkg/handler/create_test.go new file mode 100644 index 0000000..454e796 --- /dev/null +++ b/internal/pkg/handler/create_test.go @@ -0,0 +1,358 @@ +package handler + +import ( + "testing" + + "github.com/stakater/Reloader/internal/pkg/constants" + "github.com/stakater/Reloader/internal/pkg/metrics" + "github.com/stretchr/testify/assert" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func TestResourceCreatedHandler_GetConfig(t *testing.T) { + tests := []struct { + name string + resource interface{} + expectedName string + expectedNS string + expectedType string + expectSHANotEmpty bool + expectOldSHAEmpty bool + }{ + { + name: "ConfigMap with data", + resource: &v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "my-configmap", + Namespace: "test-ns", + }, + Data: map[string]string{ + "key1": "value1", + "key2": "value2", + }, + }, + expectedName: "my-configmap", + expectedNS: "test-ns", + expectedType: constants.ConfigmapEnvVarPostfix, + expectSHANotEmpty: true, + expectOldSHAEmpty: true, + }, + { + name: "ConfigMap with empty data", + resource: &v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "empty-configmap", + Namespace: "default", + }, + Data: map[string]string{}, + }, + expectedName: "empty-configmap", + expectedNS: "default", + expectedType: constants.ConfigmapEnvVarPostfix, + expectSHANotEmpty: true, + expectOldSHAEmpty: true, + }, + { + name: "ConfigMap with binary data", + resource: &v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "binary-configmap", + Namespace: "default", + }, + BinaryData: map[string][]byte{ + "binary-key": []byte("binary-value"), + }, + }, + expectedName: "binary-configmap", + expectedNS: "default", + expectedType: constants.ConfigmapEnvVarPostfix, + expectSHANotEmpty: true, + expectOldSHAEmpty: true, + }, + { + name: "ConfigMap with annotations", + resource: &v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "annotated-configmap", + Namespace: "default", + Annotations: map[string]string{ + "reloader.stakater.com/match": "true", + }, + }, + Data: map[string]string{"key": "value"}, + }, + expectedName: "annotated-configmap", + expectedNS: "default", + expectedType: constants.ConfigmapEnvVarPostfix, + expectSHANotEmpty: true, + expectOldSHAEmpty: true, + }, + { + name: "Secret with data", + resource: &v1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "my-secret", + Namespace: "secret-ns", + }, + Data: map[string][]byte{ + "password": []byte("secret-password"), + }, + }, + expectedName: "my-secret", + expectedNS: "secret-ns", + expectedType: constants.SecretEnvVarPostfix, + expectSHANotEmpty: true, + expectOldSHAEmpty: true, + }, + { + name: "Secret with empty data", + resource: &v1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "empty-secret", + Namespace: "default", + }, + Data: map[string][]byte{}, + }, + expectedName: "empty-secret", + expectedNS: "default", + expectedType: constants.SecretEnvVarPostfix, + expectSHANotEmpty: true, + expectOldSHAEmpty: true, + }, + { + name: "Secret with StringData", + resource: &v1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "stringdata-secret", + Namespace: "default", + }, + StringData: map[string]string{ + "username": "admin", + }, + }, + expectedName: "stringdata-secret", + expectedNS: "default", + expectedType: constants.SecretEnvVarPostfix, + expectSHANotEmpty: true, + expectOldSHAEmpty: true, + }, + { + name: "Secret with labels", + resource: &v1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "labeled-secret", + Namespace: "default", + Labels: map[string]string{ + "app": "test", + }, + }, + Data: map[string][]byte{"key": []byte("value")}, + }, + expectedName: "labeled-secret", + expectedNS: "default", + expectedType: constants.SecretEnvVarPostfix, + expectSHANotEmpty: true, + expectOldSHAEmpty: true, + }, + { + name: "Invalid resource type - string", + resource: "invalid-string", + expectedName: "", + expectedNS: "", + expectedType: "", + expectSHANotEmpty: false, + expectOldSHAEmpty: true, + }, + { + name: "Invalid resource type - int", + resource: 123, + expectedName: "", + expectedNS: "", + expectedType: "", + expectSHANotEmpty: false, + expectOldSHAEmpty: true, + }, + { + name: "Invalid resource type - struct", + resource: struct{ Name string }{Name: "test"}, + expectedName: "", + expectedNS: "", + expectedType: "", + expectSHANotEmpty: false, + expectOldSHAEmpty: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + handler := ResourceCreatedHandler{ + Resource: tt.resource, + Collectors: metrics.NewCollectors(), + } + + config, oldSHA := handler.GetConfig() + + assert.Equal(t, tt.expectedName, config.ResourceName) + assert.Equal(t, tt.expectedNS, config.Namespace) + assert.Equal(t, tt.expectedType, config.Type) + + if tt.expectSHANotEmpty { + assert.NotEmpty(t, config.SHAValue, "SHA should not be empty") + } + + if tt.expectOldSHAEmpty { + assert.Empty(t, oldSHA, "oldSHA should always be empty for create handler") + } + }) + } +} + +func TestResourceCreatedHandler_GetConfig_Annotations(t *testing.T) { + // Test that annotations are properly captured in config + cm := &v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "annotated-cm", + Namespace: "default", + Annotations: map[string]string{ + "reloader.stakater.com/match": "true", + "reloader.stakater.com/search": "true", + }, + }, + Data: map[string]string{"key": "value"}, + } + + handler := ResourceCreatedHandler{ + Resource: cm, + Collectors: metrics.NewCollectors(), + } + + config, _ := handler.GetConfig() + + assert.NotNil(t, config.ResourceAnnotations) + assert.Equal(t, "true", config.ResourceAnnotations["reloader.stakater.com/match"]) + assert.Equal(t, "true", config.ResourceAnnotations["reloader.stakater.com/search"]) +} + +func TestResourceCreatedHandler_GetConfig_Labels(t *testing.T) { + // Test that labels are properly captured in config + secret := &v1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "labeled-secret", + Namespace: "default", + Labels: map[string]string{ + "app": "myapp", + "version": "v1", + }, + }, + Data: map[string][]byte{"key": []byte("value")}, + } + + handler := ResourceCreatedHandler{ + Resource: secret, + Collectors: metrics.NewCollectors(), + } + + config, _ := handler.GetConfig() + + assert.NotNil(t, config.Labels) + assert.Equal(t, "myapp", config.Labels["app"]) + assert.Equal(t, "v1", config.Labels["version"]) +} + +func TestResourceCreatedHandler_Handle(t *testing.T) { + tests := []struct { + name string + resource interface{} + expectError bool + }{ + { + name: "Nil resource", + resource: nil, + expectError: false, // logs error but returns nil + }, + { + name: "Valid ConfigMap - no workloads to update", + resource: &v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cm", + Namespace: "default", + }, + Data: map[string]string{"key": "value"}, + }, + expectError: false, + }, + { + name: "Valid Secret - no workloads to update", + resource: &v1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-secret", + Namespace: "default", + }, + Data: map[string][]byte{"key": []byte("value")}, + }, + expectError: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + handler := ResourceCreatedHandler{ + Resource: tt.resource, + Collectors: metrics.NewCollectors(), + } + + err := handler.Handle() + + if tt.expectError { + assert.Error(t, err) + } else { + assert.NoError(t, err) + } + }) + } +} + +func TestResourceCreatedHandler_SHAConsistency(t *testing.T) { + // Test that same data produces same SHA + data := map[string]string{"key": "value"} + + cm1 := &v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{Name: "cm1", Namespace: "default"}, + Data: data, + } + cm2 := &v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{Name: "cm2", Namespace: "default"}, + Data: data, + } + + handler1 := ResourceCreatedHandler{Resource: cm1, Collectors: metrics.NewCollectors()} + handler2 := ResourceCreatedHandler{Resource: cm2, Collectors: metrics.NewCollectors()} + + config1, _ := handler1.GetConfig() + config2, _ := handler2.GetConfig() + + // Same data should produce same SHA + assert.Equal(t, config1.SHAValue, config2.SHAValue) +} + +func TestResourceCreatedHandler_SHADifference(t *testing.T) { + // Test that different data produces different SHA + cm1 := &v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{Name: "cm", Namespace: "default"}, + Data: map[string]string{"key": "value1"}, + } + cm2 := &v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{Name: "cm", Namespace: "default"}, + Data: map[string]string{"key": "value2"}, + } + + handler1 := ResourceCreatedHandler{Resource: cm1, Collectors: metrics.NewCollectors()} + handler2 := ResourceCreatedHandler{Resource: cm2, Collectors: metrics.NewCollectors()} + + config1, _ := handler1.GetConfig() + config2, _ := handler2.GetConfig() + + // Different data should produce different SHA + assert.NotEqual(t, config1.SHAValue, config2.SHAValue) +} diff --git a/internal/pkg/handler/delete_test.go b/internal/pkg/handler/delete_test.go new file mode 100644 index 0000000..a5fbb59 --- /dev/null +++ b/internal/pkg/handler/delete_test.go @@ -0,0 +1,356 @@ +package handler + +import ( + "testing" + + "github.com/stakater/Reloader/internal/pkg/callbacks" + "github.com/stakater/Reloader/internal/pkg/constants" + "github.com/stakater/Reloader/internal/pkg/options" + "github.com/stakater/Reloader/pkg/common" + "github.com/stretchr/testify/assert" + appsv1 "k8s.io/api/apps/v1" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +// mockDeploymentForDelete creates a deployment with containers for testing delete strategies +func mockDeploymentForDelete(name, namespace string, containers []v1.Container, volumes []v1.Volume) *appsv1.Deployment { + return &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + }, + Spec: appsv1.DeploymentSpec{ + Template: v1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{}, + }, + Spec: v1.PodSpec{ + Containers: containers, + Volumes: volumes, + }, + }, + }, + } +} + +// Mock funcs for testing +func mockContainersFunc(item runtime.Object) []v1.Container { + deployment, ok := item.(*appsv1.Deployment) + if !ok { + return nil + } + return deployment.Spec.Template.Spec.Containers +} + +func mockInitContainersFunc(item runtime.Object) []v1.Container { + deployment, ok := item.(*appsv1.Deployment) + if !ok { + return nil + } + return deployment.Spec.Template.Spec.InitContainers +} + +func mockVolumesFunc(item runtime.Object) []v1.Volume { + deployment, ok := item.(*appsv1.Deployment) + if !ok { + return nil + } + return deployment.Spec.Template.Spec.Volumes +} + +func mockPodAnnotationsFunc(item runtime.Object) map[string]string { + deployment, ok := item.(*appsv1.Deployment) + if !ok { + return nil + } + return deployment.Spec.Template.Annotations +} + +func mockPatchTemplatesFunc() callbacks.PatchTemplates { + return callbacks.PatchTemplates{ + AnnotationTemplate: `{"spec":{"template":{"metadata":{"annotations":{"%s":"%s"}}}}}`, + EnvVarTemplate: `{"spec":{"template":{"spec":{"containers":[{"name":"%s","env":[{"name":"%s","value":"%s"}]}]}}}}`, + DeleteEnvVarTemplate: `[{"op":"remove","path":"/spec/template/spec/containers/%d/env/%d"}]`, + } +} + +func TestRemoveContainerEnvVars(t *testing.T) { + tests := []struct { + name string + containers []v1.Container + volumes []v1.Volume + config common.Config + autoReload bool + expected constants.Result + envVarRemoved bool + }{ + { + name: "Remove existing env var - configmap envFrom", + containers: []v1.Container{ + { + Name: "app", + EnvFrom: []v1.EnvFromSource{ + { + ConfigMapRef: &v1.ConfigMapEnvSource{ + LocalObjectReference: v1.LocalObjectReference{ + Name: "my-configmap", + }, + }, + }, + }, + Env: []v1.EnvVar{ + {Name: "STAKATER_MY_CONFIGMAP_CONFIGMAP", Value: "sha-value"}, + }, + }, + }, + volumes: []v1.Volume{}, + config: common.Config{ + ResourceName: "my-configmap", + Type: constants.ConfigmapEnvVarPostfix, + }, + autoReload: true, + expected: constants.Updated, + envVarRemoved: true, + }, + { + name: "No env var to remove", + containers: []v1.Container{ + { + Name: "app", + EnvFrom: []v1.EnvFromSource{ + { + ConfigMapRef: &v1.ConfigMapEnvSource{ + LocalObjectReference: v1.LocalObjectReference{ + Name: "my-configmap", + }, + }, + }, + }, + Env: []v1.EnvVar{}, // No env vars + }, + }, + volumes: []v1.Volume{}, + config: common.Config{ + ResourceName: "my-configmap", + Type: constants.ConfigmapEnvVarPostfix, + }, + autoReload: true, + expected: constants.NotUpdated, + envVarRemoved: false, + }, + { + name: "Remove existing env var - secret envFrom", + containers: []v1.Container{ + { + Name: "app", + EnvFrom: []v1.EnvFromSource{ + { + SecretRef: &v1.SecretEnvSource{ + LocalObjectReference: v1.LocalObjectReference{ + Name: "my-secret", + }, + }, + }, + }, + Env: []v1.EnvVar{ + {Name: "STAKATER_MY_SECRET_SECRET", Value: "sha-value"}, + }, + }, + }, + volumes: []v1.Volume{}, + config: common.Config{ + ResourceName: "my-secret", + Type: constants.SecretEnvVarPostfix, + }, + autoReload: true, + expected: constants.Updated, + envVarRemoved: true, + }, + { + name: "No container found", + containers: []v1.Container{}, + volumes: []v1.Volume{}, + config: common.Config{ + ResourceName: "my-configmap", + Type: constants.ConfigmapEnvVarPostfix, + }, + autoReload: true, + expected: constants.NoContainerFound, + envVarRemoved: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + deployment := mockDeploymentForDelete("test-deploy", "default", tt.containers, tt.volumes) + + funcs := callbacks.RollingUpgradeFuncs{ + ContainersFunc: mockContainersFunc, + InitContainersFunc: mockInitContainersFunc, + VolumesFunc: mockVolumesFunc, + PodAnnotationsFunc: mockPodAnnotationsFunc, + PatchTemplatesFunc: mockPatchTemplatesFunc, + SupportsPatch: true, + } + + result := removeContainerEnvVars(funcs, deployment, tt.config, tt.autoReload) + + assert.Equal(t, tt.expected, result.Result) + + if tt.envVarRemoved { + // Verify env var was removed from container + containers := deployment.Spec.Template.Spec.Containers + for _, c := range containers { + for _, env := range c.Env { + envVarName := getEnvVarName(tt.config.ResourceName, tt.config.Type) + assert.NotEqual(t, envVarName, env.Name, "Env var should have been removed") + } + } + } + }) + } +} + +func TestInvokeDeleteStrategy(t *testing.T) { + // Save original strategy and restore after test + originalStrategy := options.ReloadStrategy + defer func() { + options.ReloadStrategy = originalStrategy + }() + + tests := []struct { + name string + reloadStrategy string + containers []v1.Container + volumes []v1.Volume + config common.Config + }{ + { + name: "Annotations strategy", + reloadStrategy: constants.AnnotationsReloadStrategy, + containers: []v1.Container{ + { + Name: "app", + EnvFrom: []v1.EnvFromSource{ + { + ConfigMapRef: &v1.ConfigMapEnvSource{ + LocalObjectReference: v1.LocalObjectReference{ + Name: "my-configmap", + }, + }, + }, + }, + }, + }, + volumes: []v1.Volume{}, + config: common.Config{ + ResourceName: "my-configmap", + Type: constants.ConfigmapEnvVarPostfix, + SHAValue: "sha-value", + }, + }, + { + name: "EnvVars strategy", + reloadStrategy: constants.EnvVarsReloadStrategy, + containers: []v1.Container{ + { + Name: "app", + EnvFrom: []v1.EnvFromSource{ + { + ConfigMapRef: &v1.ConfigMapEnvSource{ + LocalObjectReference: v1.LocalObjectReference{ + Name: "my-configmap", + }, + }, + }, + }, + Env: []v1.EnvVar{ + {Name: "STAKATER_MY_CONFIGMAP_CONFIGMAP", Value: "sha-value"}, + }, + }, + }, + volumes: []v1.Volume{}, + config: common.Config{ + ResourceName: "my-configmap", + Type: constants.ConfigmapEnvVarPostfix, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + options.ReloadStrategy = tt.reloadStrategy + + deployment := mockDeploymentForDelete("test-deploy", "default", tt.containers, tt.volumes) + + funcs := callbacks.RollingUpgradeFuncs{ + ContainersFunc: mockContainersFunc, + InitContainersFunc: mockInitContainersFunc, + VolumesFunc: mockVolumesFunc, + PodAnnotationsFunc: mockPodAnnotationsFunc, + PatchTemplatesFunc: mockPatchTemplatesFunc, + SupportsPatch: true, + } + + result := invokeDeleteStrategy(funcs, deployment, tt.config, true) + + // Should return a valid result + assert.NotNil(t, result) + }) + } +} + +func TestRemovePodAnnotations(t *testing.T) { + tests := []struct { + name string + containers []v1.Container + volumes []v1.Volume + config common.Config + }{ + { + name: "Remove pod annotations - configmap", + containers: []v1.Container{ + { + Name: "app", + EnvFrom: []v1.EnvFromSource{ + { + ConfigMapRef: &v1.ConfigMapEnvSource{ + LocalObjectReference: v1.LocalObjectReference{ + Name: "my-configmap", + }, + }, + }, + }, + }, + }, + volumes: []v1.Volume{}, + config: common.Config{ + ResourceName: "my-configmap", + Type: constants.ConfigmapEnvVarPostfix, + SHAValue: "sha-value", + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + deployment := mockDeploymentForDelete("test-deploy", "default", tt.containers, tt.volumes) + + funcs := callbacks.RollingUpgradeFuncs{ + ContainersFunc: mockContainersFunc, + InitContainersFunc: mockInitContainersFunc, + VolumesFunc: mockVolumesFunc, + PodAnnotationsFunc: mockPodAnnotationsFunc, + PatchTemplatesFunc: mockPatchTemplatesFunc, + SupportsPatch: false, // No patch for annotations removal test + } + + result := removePodAnnotations(funcs, deployment, tt.config, true) + + // Should return Updated since it sets the SHA to empty data hash + assert.Equal(t, constants.Updated, result.Result) + }) + } +} diff --git a/internal/pkg/handler/handlers_test.go b/internal/pkg/handler/handlers_test.go new file mode 100644 index 0000000..e5391fb --- /dev/null +++ b/internal/pkg/handler/handlers_test.go @@ -0,0 +1,288 @@ +package handler + +import ( + "testing" + + "github.com/stakater/Reloader/internal/pkg/constants" + "github.com/stakater/Reloader/internal/pkg/metrics" + "github.com/stretchr/testify/assert" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// Helper function to create a test ConfigMap +func createTestConfigMap(name, namespace string, data map[string]string) *v1.ConfigMap { + return &v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + }, + Data: data, + } +} + +// Helper function to create a test Secret +func createTestSecret(name, namespace string, data map[string][]byte) *v1.Secret { + return &v1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + }, + Data: data, + } +} + +// Helper function to create test metrics collectors +func createTestCollectors() metrics.Collectors { + return metrics.NewCollectors() +} + +// ============================================================ +// ResourceCreatedHandler Tests +// ============================================================ + +func TestResourceCreatedHandler_GetConfig_ConfigMap(t *testing.T) { + cm := createTestConfigMap("test-cm", "default", map[string]string{"key": "value"}) + handler := ResourceCreatedHandler{ + Resource: cm, + Collectors: createTestCollectors(), + } + + config, oldSHA := handler.GetConfig() + + assert.Equal(t, "test-cm", config.ResourceName) + assert.Equal(t, "default", config.Namespace) + assert.Equal(t, constants.ConfigmapEnvVarPostfix, config.Type) + assert.NotEmpty(t, config.SHAValue) + assert.Empty(t, oldSHA) // oldSHA is always empty for create handler +} + +func TestResourceCreatedHandler_GetConfig_Secret(t *testing.T) { + secret := createTestSecret("test-secret", "default", map[string][]byte{"key": []byte("value")}) + handler := ResourceCreatedHandler{ + Resource: secret, + Collectors: createTestCollectors(), + } + + config, oldSHA := handler.GetConfig() + + assert.Equal(t, "test-secret", config.ResourceName) + assert.Equal(t, "default", config.Namespace) + assert.Equal(t, constants.SecretEnvVarPostfix, config.Type) + assert.NotEmpty(t, config.SHAValue) + assert.Empty(t, oldSHA) +} + +func TestResourceCreatedHandler_GetConfig_InvalidResource(t *testing.T) { + // Test with an invalid resource type + handler := ResourceCreatedHandler{ + Resource: "invalid", + Collectors: createTestCollectors(), + } + + config, _ := handler.GetConfig() + + // Config should be empty/zero for invalid resources + assert.Empty(t, config.ResourceName) +} + +func TestResourceCreatedHandler_Handle_NilResource(t *testing.T) { + handler := ResourceCreatedHandler{ + Resource: nil, + Collectors: createTestCollectors(), + } + + err := handler.Handle() + + // Should not return error even with nil resource (just logs error) + assert.NoError(t, err) +} + +// ============================================================ +// ResourceDeleteHandler Tests +// ============================================================ + +func TestResourceDeleteHandler_GetConfig_ConfigMap(t *testing.T) { + cm := createTestConfigMap("test-cm", "default", map[string]string{"key": "value"}) + handler := ResourceDeleteHandler{ + Resource: cm, + Collectors: createTestCollectors(), + } + + config, oldSHA := handler.GetConfig() + + assert.Equal(t, "test-cm", config.ResourceName) + assert.Equal(t, "default", config.Namespace) + assert.Equal(t, constants.ConfigmapEnvVarPostfix, config.Type) + assert.NotEmpty(t, config.SHAValue) + assert.Empty(t, oldSHA) +} + +func TestResourceDeleteHandler_GetConfig_Secret(t *testing.T) { + secret := createTestSecret("test-secret", "default", map[string][]byte{"key": []byte("value")}) + handler := ResourceDeleteHandler{ + Resource: secret, + Collectors: createTestCollectors(), + } + + config, oldSHA := handler.GetConfig() + + assert.Equal(t, "test-secret", config.ResourceName) + assert.Equal(t, "default", config.Namespace) + assert.Equal(t, constants.SecretEnvVarPostfix, config.Type) + assert.NotEmpty(t, config.SHAValue) + assert.Empty(t, oldSHA) +} + +func TestResourceDeleteHandler_GetConfig_InvalidResource(t *testing.T) { + handler := ResourceDeleteHandler{ + Resource: "invalid", + Collectors: createTestCollectors(), + } + + config, _ := handler.GetConfig() + + assert.Empty(t, config.ResourceName) +} + +func TestResourceDeleteHandler_Handle_NilResource(t *testing.T) { + handler := ResourceDeleteHandler{ + Resource: nil, + Collectors: createTestCollectors(), + } + + err := handler.Handle() + + assert.NoError(t, err) +} + +// ============================================================ +// ResourceUpdatedHandler Tests +// ============================================================ + +func TestResourceUpdatedHandler_GetConfig_ConfigMap(t *testing.T) { + oldCM := createTestConfigMap("test-cm", "default", map[string]string{"key": "old-value"}) + newCM := createTestConfigMap("test-cm", "default", map[string]string{"key": "new-value"}) + + handler := ResourceUpdatedHandler{ + Resource: newCM, + OldResource: oldCM, + Collectors: createTestCollectors(), + } + + config, oldSHA := handler.GetConfig() + + assert.Equal(t, "test-cm", config.ResourceName) + assert.Equal(t, "default", config.Namespace) + assert.Equal(t, constants.ConfigmapEnvVarPostfix, config.Type) + assert.NotEmpty(t, config.SHAValue) + assert.NotEmpty(t, oldSHA) + // SHAs should be different since data changed + assert.NotEqual(t, config.SHAValue, oldSHA) +} + +func TestResourceUpdatedHandler_GetConfig_ConfigMap_SameData(t *testing.T) { + oldCM := createTestConfigMap("test-cm", "default", map[string]string{"key": "same-value"}) + newCM := createTestConfigMap("test-cm", "default", map[string]string{"key": "same-value"}) + + handler := ResourceUpdatedHandler{ + Resource: newCM, + OldResource: oldCM, + Collectors: createTestCollectors(), + } + + config, oldSHA := handler.GetConfig() + + assert.Equal(t, "test-cm", config.ResourceName) + // SHAs should be the same since data didn't change + assert.Equal(t, config.SHAValue, oldSHA) +} + +func TestResourceUpdatedHandler_GetConfig_Secret(t *testing.T) { + oldSecret := createTestSecret("test-secret", "default", map[string][]byte{"key": []byte("old-value")}) + newSecret := createTestSecret("test-secret", "default", map[string][]byte{"key": []byte("new-value")}) + + handler := ResourceUpdatedHandler{ + Resource: newSecret, + OldResource: oldSecret, + Collectors: createTestCollectors(), + } + + config, oldSHA := handler.GetConfig() + + assert.Equal(t, "test-secret", config.ResourceName) + assert.Equal(t, "default", config.Namespace) + assert.Equal(t, constants.SecretEnvVarPostfix, config.Type) + assert.NotEmpty(t, config.SHAValue) + assert.NotEmpty(t, oldSHA) + assert.NotEqual(t, config.SHAValue, oldSHA) +} + +func TestResourceUpdatedHandler_GetConfig_Secret_SameData(t *testing.T) { + oldSecret := createTestSecret("test-secret", "default", map[string][]byte{"key": []byte("same-value")}) + newSecret := createTestSecret("test-secret", "default", map[string][]byte{"key": []byte("same-value")}) + + handler := ResourceUpdatedHandler{ + Resource: newSecret, + OldResource: oldSecret, + Collectors: createTestCollectors(), + } + + config, oldSHA := handler.GetConfig() + + assert.Equal(t, "test-secret", config.ResourceName) + // SHAs should be the same since data didn't change + assert.Equal(t, config.SHAValue, oldSHA) +} + +func TestResourceUpdatedHandler_GetConfig_InvalidResource(t *testing.T) { + handler := ResourceUpdatedHandler{ + Resource: "invalid", + OldResource: "invalid", + Collectors: createTestCollectors(), + } + + config, _ := handler.GetConfig() + + assert.Empty(t, config.ResourceName) +} + +func TestResourceUpdatedHandler_Handle_NilResource(t *testing.T) { + handler := ResourceUpdatedHandler{ + Resource: nil, + OldResource: nil, + Collectors: createTestCollectors(), + } + + err := handler.Handle() + + assert.NoError(t, err) +} + +func TestResourceUpdatedHandler_Handle_NilOldResource(t *testing.T) { + cm := createTestConfigMap("test-cm", "default", map[string]string{"key": "value"}) + handler := ResourceUpdatedHandler{ + Resource: cm, + OldResource: nil, + Collectors: createTestCollectors(), + } + + err := handler.Handle() + + // Should not return error (just logs error) + assert.NoError(t, err) +} + +func TestResourceUpdatedHandler_Handle_NoChange(t *testing.T) { + // When SHA values are the same, Handle should return nil without doing anything + cm := createTestConfigMap("test-cm", "default", map[string]string{"key": "same-value"}) + handler := ResourceUpdatedHandler{ + Resource: cm, + OldResource: cm, // Same resource = same SHA + Collectors: createTestCollectors(), + } + + err := handler.Handle() + + assert.NoError(t, err) +} diff --git a/internal/pkg/handler/update_test.go b/internal/pkg/handler/update_test.go new file mode 100644 index 0000000..dcc1925 --- /dev/null +++ b/internal/pkg/handler/update_test.go @@ -0,0 +1,530 @@ +package handler + +import ( + "testing" + + "github.com/stakater/Reloader/internal/pkg/constants" + "github.com/stakater/Reloader/internal/pkg/metrics" + "github.com/stretchr/testify/assert" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func TestResourceUpdatedHandler_GetConfig(t *testing.T) { + tests := []struct { + name string + oldResource any + newResource any + expectedName string + expectedNS string + expectedType string + expectSHANotEmpty bool + expectSHAChanged bool + }{ + { + name: "ConfigMap data changed", + oldResource: &v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{Name: "my-cm", Namespace: "default"}, + Data: map[string]string{"key": "old-value"}, + }, + newResource: &v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{Name: "my-cm", Namespace: "default"}, + Data: map[string]string{"key": "new-value"}, + }, + expectedName: "my-cm", + expectedNS: "default", + expectedType: constants.ConfigmapEnvVarPostfix, + expectSHANotEmpty: true, + expectSHAChanged: true, + }, + { + name: "ConfigMap data unchanged", + oldResource: &v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{Name: "my-cm", Namespace: "default"}, + Data: map[string]string{"key": "same-value"}, + }, + newResource: &v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{Name: "my-cm", Namespace: "default"}, + Data: map[string]string{"key": "same-value"}, + }, + expectedName: "my-cm", + expectedNS: "default", + expectedType: constants.ConfigmapEnvVarPostfix, + expectSHANotEmpty: true, + expectSHAChanged: false, + }, + { + name: "ConfigMap key added", + oldResource: &v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{Name: "my-cm", Namespace: "default"}, + Data: map[string]string{"key1": "value1"}, + }, + newResource: &v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{Name: "my-cm", Namespace: "default"}, + Data: map[string]string{"key1": "value1", "key2": "value2"}, + }, + expectedName: "my-cm", + expectedNS: "default", + expectedType: constants.ConfigmapEnvVarPostfix, + expectSHANotEmpty: true, + expectSHAChanged: true, + }, + { + name: "ConfigMap key removed", + oldResource: &v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{Name: "my-cm", Namespace: "default"}, + Data: map[string]string{"key1": "value1", "key2": "value2"}, + }, + newResource: &v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{Name: "my-cm", Namespace: "default"}, + Data: map[string]string{"key1": "value1"}, + }, + expectedName: "my-cm", + expectedNS: "default", + expectedType: constants.ConfigmapEnvVarPostfix, + expectSHANotEmpty: true, + expectSHAChanged: true, + }, + { + name: "ConfigMap only labels changed - SHA unchanged", + oldResource: &v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "my-cm", + Namespace: "default", + Labels: map[string]string{"version": "v1"}, + }, + Data: map[string]string{"key": "value"}, + }, + newResource: &v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "my-cm", + Namespace: "default", + Labels: map[string]string{"version": "v2"}, + }, + Data: map[string]string{"key": "value"}, + }, + expectedName: "my-cm", + expectedNS: "default", + expectedType: constants.ConfigmapEnvVarPostfix, + expectSHANotEmpty: true, + expectSHAChanged: false, // Only data affects SHA, not labels + }, + { + name: "ConfigMap only annotations changed - SHA unchanged", + oldResource: &v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "my-cm", + Namespace: "default", + Annotations: map[string]string{"note": "old"}, + }, + Data: map[string]string{"key": "value"}, + }, + newResource: &v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "my-cm", + Namespace: "default", + Annotations: map[string]string{"note": "new"}, + }, + Data: map[string]string{"key": "value"}, + }, + expectedName: "my-cm", + expectedNS: "default", + expectedType: constants.ConfigmapEnvVarPostfix, + expectSHANotEmpty: true, + expectSHAChanged: false, // Only data affects SHA, not annotations + }, + { + name: "Secret data changed", + oldResource: &v1.Secret{ + ObjectMeta: metav1.ObjectMeta{Name: "my-secret", Namespace: "default"}, + Data: map[string][]byte{"password": []byte("old-pass")}, + }, + newResource: &v1.Secret{ + ObjectMeta: metav1.ObjectMeta{Name: "my-secret", Namespace: "default"}, + Data: map[string][]byte{"password": []byte("new-pass")}, + }, + expectedName: "my-secret", + expectedNS: "default", + expectedType: constants.SecretEnvVarPostfix, + expectSHANotEmpty: true, + expectSHAChanged: true, + }, + { + name: "Secret data unchanged", + oldResource: &v1.Secret{ + ObjectMeta: metav1.ObjectMeta{Name: "my-secret", Namespace: "default"}, + Data: map[string][]byte{"password": []byte("same-pass")}, + }, + newResource: &v1.Secret{ + ObjectMeta: metav1.ObjectMeta{Name: "my-secret", Namespace: "default"}, + Data: map[string][]byte{"password": []byte("same-pass")}, + }, + expectedName: "my-secret", + expectedNS: "default", + expectedType: constants.SecretEnvVarPostfix, + expectSHANotEmpty: true, + expectSHAChanged: false, + }, + { + name: "Secret key added", + oldResource: &v1.Secret{ + ObjectMeta: metav1.ObjectMeta{Name: "my-secret", Namespace: "default"}, + Data: map[string][]byte{"key1": []byte("value1")}, + }, + newResource: &v1.Secret{ + ObjectMeta: metav1.ObjectMeta{Name: "my-secret", Namespace: "default"}, + Data: map[string][]byte{"key1": []byte("value1"), "key2": []byte("value2")}, + }, + expectedName: "my-secret", + expectedNS: "default", + expectedType: constants.SecretEnvVarPostfix, + expectSHANotEmpty: true, + expectSHAChanged: true, + }, + { + name: "Secret only labels changed - SHA unchanged", + oldResource: &v1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "my-secret", + Namespace: "default", + Labels: map[string]string{"env": "dev"}, + }, + Data: map[string][]byte{"key": []byte("value")}, + }, + newResource: &v1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "my-secret", + Namespace: "default", + Labels: map[string]string{"env": "prod"}, + }, + Data: map[string][]byte{"key": []byte("value")}, + }, + expectedName: "my-secret", + expectedNS: "default", + expectedType: constants.SecretEnvVarPostfix, + expectSHANotEmpty: true, + expectSHAChanged: false, + }, + { + name: "Invalid resource type", + oldResource: "invalid", + newResource: "invalid", + expectedName: "", + expectedNS: "", + expectedType: "", + expectSHANotEmpty: false, + expectSHAChanged: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + handler := ResourceUpdatedHandler{ + Resource: tt.newResource, + OldResource: tt.oldResource, + Collectors: metrics.NewCollectors(), + } + + config, oldSHA := handler.GetConfig() + + assert.Equal(t, tt.expectedName, config.ResourceName) + assert.Equal(t, tt.expectedNS, config.Namespace) + assert.Equal(t, tt.expectedType, config.Type) + + if tt.expectSHANotEmpty { + assert.NotEmpty(t, config.SHAValue, "new SHA should not be empty") + assert.NotEmpty(t, oldSHA, "old SHA should not be empty") + } + + if tt.expectSHAChanged { + assert.NotEqual(t, config.SHAValue, oldSHA, "SHA should have changed") + } else if tt.expectSHANotEmpty { + assert.Equal(t, config.SHAValue, oldSHA, "SHA should not have changed") + } + }) + } +} + +func TestResourceUpdatedHandler_Handle(t *testing.T) { + tests := []struct { + name string + oldResource any + newResource any + expectError bool + }{ + { + name: "Both resources nil", + oldResource: nil, + newResource: nil, + expectError: false, // logs error but returns nil + }, + { + name: "Old resource nil", + oldResource: nil, + newResource: &v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{Name: "cm", Namespace: "default"}, + Data: map[string]string{"key": "value"}, + }, + expectError: false, + }, + { + name: "New resource nil", + oldResource: &v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{Name: "cm", Namespace: "default"}, + Data: map[string]string{"key": "value"}, + }, + newResource: nil, + expectError: false, + }, + { + name: "ConfigMap unchanged - no action", + oldResource: &v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{Name: "cm", Namespace: "default"}, + Data: map[string]string{"key": "same"}, + }, + newResource: &v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{Name: "cm", Namespace: "default"}, + Data: map[string]string{"key": "same"}, + }, + expectError: false, + }, + { + name: "ConfigMap changed - triggers update", + oldResource: &v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{Name: "cm", Namespace: "default"}, + Data: map[string]string{"key": "old"}, + }, + newResource: &v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{Name: "cm", Namespace: "default"}, + Data: map[string]string{"key": "new"}, + }, + expectError: false, // No error, but no workloads to update in test + }, + { + name: "Secret unchanged - no action", + oldResource: &v1.Secret{ + ObjectMeta: metav1.ObjectMeta{Name: "secret", Namespace: "default"}, + Data: map[string][]byte{"key": []byte("same")}, + }, + newResource: &v1.Secret{ + ObjectMeta: metav1.ObjectMeta{Name: "secret", Namespace: "default"}, + Data: map[string][]byte{"key": []byte("same")}, + }, + expectError: false, + }, + { + name: "Secret changed - triggers update", + oldResource: &v1.Secret{ + ObjectMeta: metav1.ObjectMeta{Name: "secret", Namespace: "default"}, + Data: map[string][]byte{"key": []byte("old")}, + }, + newResource: &v1.Secret{ + ObjectMeta: metav1.ObjectMeta{Name: "secret", Namespace: "default"}, + Data: map[string][]byte{"key": []byte("new")}, + }, + expectError: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + handler := ResourceUpdatedHandler{ + Resource: tt.newResource, + OldResource: tt.oldResource, + Collectors: metrics.NewCollectors(), + } + + err := handler.Handle() + + if tt.expectError { + assert.Error(t, err) + } else { + assert.NoError(t, err) + } + }) + } +} + +func TestResourceUpdatedHandler_GetConfig_Annotations(t *testing.T) { + // Test that annotations from the new resource are captured + oldCM := &v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cm", + Namespace: "default", + Annotations: map[string]string{ + "old-annotation": "old-value", + }, + }, + Data: map[string]string{"key": "value"}, + } + + newCM := &v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cm", + Namespace: "default", + Annotations: map[string]string{ + "new-annotation": "new-value", + }, + }, + Data: map[string]string{"key": "value"}, + } + + handler := ResourceUpdatedHandler{ + Resource: newCM, + OldResource: oldCM, + Collectors: metrics.NewCollectors(), + } + + config, _ := handler.GetConfig() + + // Should have new annotations + assert.Equal(t, "new-value", config.ResourceAnnotations["new-annotation"]) + // Should not have old annotations + _, hasOld := config.ResourceAnnotations["old-annotation"] + assert.False(t, hasOld) +} + +func TestResourceUpdatedHandler_GetConfig_Labels(t *testing.T) { + // Test that labels from the new resource are captured + oldSecret := &v1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "secret", + Namespace: "default", + Labels: map[string]string{"version": "v1"}, + }, + Data: map[string][]byte{"key": []byte("value")}, + } + + newSecret := &v1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "secret", + Namespace: "default", + Labels: map[string]string{"version": "v2"}, + }, + Data: map[string][]byte{"key": []byte("value")}, + } + + handler := ResourceUpdatedHandler{ + Resource: newSecret, + OldResource: oldSecret, + Collectors: metrics.NewCollectors(), + } + + config, _ := handler.GetConfig() + + // Should have new labels + assert.Equal(t, "v2", config.Labels["version"]) +} + +func TestResourceUpdatedHandler_EmptyToNonEmpty(t *testing.T) { + // Test transition from empty data to non-empty data + oldCM := &v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{Name: "cm", Namespace: "default"}, + Data: map[string]string{}, + } + newCM := &v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{Name: "cm", Namespace: "default"}, + Data: map[string]string{"key": "value"}, + } + + handler := ResourceUpdatedHandler{ + Resource: newCM, + OldResource: oldCM, + Collectors: metrics.NewCollectors(), + } + + config, oldSHA := handler.GetConfig() + + assert.NotEqual(t, config.SHAValue, oldSHA, "SHA should change when data is added") +} + +func TestResourceUpdatedHandler_NonEmptyToEmpty(t *testing.T) { + // Test transition from non-empty data to empty data + oldCM := &v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{Name: "cm", Namespace: "default"}, + Data: map[string]string{"key": "value"}, + } + newCM := &v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{Name: "cm", Namespace: "default"}, + Data: map[string]string{}, + } + + handler := ResourceUpdatedHandler{ + Resource: newCM, + OldResource: oldCM, + Collectors: metrics.NewCollectors(), + } + + config, oldSHA := handler.GetConfig() + + assert.NotEqual(t, config.SHAValue, oldSHA, "SHA should change when data is removed") +} + +func TestResourceUpdatedHandler_BinaryDataChange(t *testing.T) { + // Test ConfigMap binary data change + oldCM := &v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{Name: "cm", Namespace: "default"}, + BinaryData: map[string][]byte{"binary": []byte("old-binary")}, + } + newCM := &v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{Name: "cm", Namespace: "default"}, + BinaryData: map[string][]byte{"binary": []byte("new-binary")}, + } + + handler := ResourceUpdatedHandler{ + Resource: newCM, + OldResource: oldCM, + Collectors: metrics.NewCollectors(), + } + + config, oldSHA := handler.GetConfig() + + assert.NotEqual(t, config.SHAValue, oldSHA, "SHA should change when binary data changes") +} + +func TestResourceUpdatedHandler_MixedDataAndBinaryData(t *testing.T) { + // Test ConfigMap with both Data and BinaryData + oldCM := &v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{Name: "cm", Namespace: "default"}, + Data: map[string]string{"text": "value"}, + BinaryData: map[string][]byte{"binary": []byte("binary-value")}, + } + newCM := &v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{Name: "cm", Namespace: "default"}, + Data: map[string]string{"text": "value"}, + BinaryData: map[string][]byte{"binary": []byte("new-binary-value")}, + } + + handler := ResourceUpdatedHandler{ + Resource: newCM, + OldResource: oldCM, + Collectors: metrics.NewCollectors(), + } + + config, oldSHA := handler.GetConfig() + + assert.NotEqual(t, config.SHAValue, oldSHA, "SHA should change when binary data changes") +} + +func TestResourceUpdatedHandler_DifferentNamespaces(t *testing.T) { + // Edge case: what if namespaces are different (shouldn't happen in practice) + oldCM := &v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{Name: "cm", Namespace: "ns1"}, + Data: map[string]string{"key": "value"}, + } + newCM := &v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{Name: "cm", Namespace: "ns2"}, + Data: map[string]string{"key": "value"}, + } + + handler := ResourceUpdatedHandler{ + Resource: newCM, + OldResource: oldCM, + Collectors: metrics.NewCollectors(), + } + + config, _ := handler.GetConfig() + + // Should use new resource's namespace + assert.Equal(t, "ns2", config.Namespace) +} diff --git a/internal/pkg/handler/upgrade_test.go b/internal/pkg/handler/upgrade_test.go index 9a0e945..a7d20c1 100644 --- a/internal/pkg/handler/upgrade_test.go +++ b/internal/pkg/handler/upgrade_test.go @@ -1,4287 +1,671 @@ package handler import ( - "context" - "fmt" - "os" "testing" - "time" - argorolloutv1alpha1 "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1" - "github.com/prometheus/client_golang/prometheus" - promtestutil "github.com/prometheus/client_golang/prometheus/testutil" - "github.com/sirupsen/logrus" "github.com/stakater/Reloader/internal/pkg/callbacks" "github.com/stakater/Reloader/internal/pkg/constants" - "github.com/stakater/Reloader/internal/pkg/metrics" - "github.com/stakater/Reloader/internal/pkg/options" - "github.com/stakater/Reloader/internal/pkg/testutil" - "github.com/stakater/Reloader/internal/pkg/util" "github.com/stakater/Reloader/pkg/common" - "github.com/stakater/Reloader/pkg/kube" "github.com/stretchr/testify/assert" v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/api/meta" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - patchtypes "k8s.io/apimachinery/pkg/types" - testclient "k8s.io/client-go/kubernetes/fake" ) -var ( - clients = kube.Clients{KubernetesClient: testclient.NewSimpleClientset()} - - arsNamespace = "test-handler-" + testutil.RandSeq(5) - arsConfigmapName = "testconfigmap-handler-" + testutil.RandSeq(5) - arsSecretName = "testsecret-handler-" + testutil.RandSeq(5) - arsProjectedConfigMapName = "testprojectedconfigmap-handler-" + testutil.RandSeq(5) - arsProjectedSecretName = "testprojectedsecret-handler-" + testutil.RandSeq(5) - arsConfigmapWithInitContainer = "testconfigmapInitContainerhandler-" + testutil.RandSeq(5) - arsSecretWithInitContainer = "testsecretWithInitContainer-handler-" + testutil.RandSeq(5) - arsProjectedConfigMapWithInitContainer = "testProjectedConfigMapWithInitContainer-handler" + testutil.RandSeq(5) - arsProjectedSecretWithInitContainer = "testProjectedSecretWithInitContainer-handler" + testutil.RandSeq(5) - arsConfigmapWithInitEnv = "configmapWithInitEnv-" + testutil.RandSeq(5) - arsSecretWithInitEnv = "secretWithInitEnv-handler-" + testutil.RandSeq(5) - arsConfigmapWithEnvName = "testconfigmapWithEnv-handler-" + testutil.RandSeq(5) - arsConfigmapWithEnvFromName = "testconfigmapWithEnvFrom-handler-" + testutil.RandSeq(5) - arsSecretWithEnvName = "testsecretWithEnv-handler-" + testutil.RandSeq(5) - arsSecretWithEnvFromName = "testsecretWithEnvFrom-handler-" + testutil.RandSeq(5) - arsConfigmapWithPodAnnotations = "testconfigmapPodAnnotations-handler-" + testutil.RandSeq(5) - arsConfigmapWithBothAnnotations = "testconfigmapBothAnnotations-handler-" + testutil.RandSeq(5) - arsConfigmapAnnotated = "testconfigmapAnnotated-handler-" + testutil.RandSeq(5) - arsConfigMapWithNonAnnotatedDeployment = "testconfigmapNonAnnotatedDeployment-handler-" + testutil.RandSeq(5) - arsSecretWithSecretAutoAnnotation = "testsecretwithsecretautoannotationdeployment-handler-" + testutil.RandSeq(5) - arsConfigmapWithConfigMapAutoAnnotation = "testconfigmapwithconfigmapautoannotationdeployment-handler-" + testutil.RandSeq(5) - arsSecretWithExcludeSecretAnnotation = "testsecretwithsecretexcludeannotationdeployment-handler-" + testutil.RandSeq(5) - arsConfigmapWithExcludeConfigMapAnnotation = "testconfigmapwithconfigmapexcludeannotationdeployment-handler-" + testutil.RandSeq(5) - arsConfigmapWithIgnoreAnnotation = "testconfigmapWithIgnoreAnnotation-handler-" + testutil.RandSeq(5) - arsSecretWithIgnoreAnnotation = "testsecretWithIgnoreAnnotation-handler-" + testutil.RandSeq(5) - arsConfigmapWithPausedDeployment = "testconfigmapWithPausedDeployment-handler-" + testutil.RandSeq(5) - - ersNamespace = "test-handler-" + testutil.RandSeq(5) - ersConfigmapName = "testconfigmap-handler-" + testutil.RandSeq(5) - ersSecretName = "testsecret-handler-" + testutil.RandSeq(5) - ersProjectedConfigMapName = "testprojectedconfigmap-handler-" + testutil.RandSeq(5) - ersProjectedSecretName = "testprojectedsecret-handler-" + testutil.RandSeq(5) - ersConfigmapWithInitContainer = "testconfigmapInitContainerhandler-" + testutil.RandSeq(5) - ersSecretWithInitContainer = "testsecretWithInitContainer-handler-" + testutil.RandSeq(5) - ersProjectedConfigMapWithInitContainer = "testProjectedConfigMapWithInitContainer-handler" + testutil.RandSeq(5) - ersProjectedSecretWithInitContainer = "testProjectedSecretWithInitContainer-handler" + testutil.RandSeq(5) - ersConfigmapWithInitEnv = "configmapWithInitEnv-" + testutil.RandSeq(5) - ersSecretWithInitEnv = "secretWithInitEnv-handler-" + testutil.RandSeq(5) - ersConfigmapWithEnvName = "testconfigmapWithEnv-handler-" + testutil.RandSeq(5) - ersConfigmapWithEnvFromName = "testconfigmapWithEnvFrom-handler-" + testutil.RandSeq(5) - ersSecretWithEnvName = "testsecretWithEnv-handler-" + testutil.RandSeq(5) - ersSecretWithEnvFromName = "testsecretWithEnvFrom-handler-" + testutil.RandSeq(5) - ersConfigmapWithPodAnnotations = "testconfigmapPodAnnotations-handler-" + testutil.RandSeq(5) - ersConfigmapWithBothAnnotations = "testconfigmapBothAnnotations-handler-" + testutil.RandSeq(5) - ersConfigmapAnnotated = "testconfigmapAnnotated-handler-" + testutil.RandSeq(5) - ersSecretWithSecretAutoAnnotation = "testsecretwithsecretautoannotationdeployment-handler-" + testutil.RandSeq(5) - ersConfigmapWithConfigMapAutoAnnotation = "testconfigmapwithconfigmapautoannotationdeployment-handler-" + testutil.RandSeq(5) - ersSecretWithSecretExcludeAnnotation = "testsecretwithsecretexcludeannotationdeployment-handler-" + testutil.RandSeq(5) - ersConfigmapWithConfigMapExcludeAnnotation = "testconfigmapwithconfigmapexcludeannotationdeployment-handler-" + testutil.RandSeq(5) - ersConfigmapWithIgnoreAnnotation = "testconfigmapWithIgnoreAnnotation-handler-" + testutil.RandSeq(5) - ersSecretWithIgnoreAnnotation = "testsecretWithIgnoreAnnotation-handler-" + testutil.RandSeq(5) - ersConfigmapWithPausedDeployment = "testconfigmapWithPausedDeployment-handler-" + testutil.RandSeq(5) -) - -func TestMain(m *testing.M) { - - // Creating namespaces - testutil.CreateNamespace(arsNamespace, clients.KubernetesClient) - testutil.CreateNamespace(ersNamespace, clients.KubernetesClient) - - logrus.Infof("Setting up the annotation reload strategy test resources") - setupArs() - logrus.Infof("Setting up the env-var reload strategy test resources") - setupErs() - - logrus.Infof("Running Testcases") - retCode := m.Run() - - logrus.Infof("tearing down the annotation reload strategy test resources") - teardownArs() - logrus.Infof("tearing down the env-var reload strategy test resources") - teardownErs() - - os.Exit(retCode) -} - -func setupArs() { - // Creating configmap - _, err := testutil.CreateConfigMap(clients.KubernetesClient, arsNamespace, arsConfigmapName, "www.google.com") - if err != nil { - logrus.Errorf("Error in configmap creation: %v", err) - } - - // Creating secret - data := "dGVzdFNlY3JldEVuY29kaW5nRm9yUmVsb2FkZXI=" - _, err = testutil.CreateSecret(clients.KubernetesClient, arsNamespace, arsSecretName, data) - if err != nil { - logrus.Errorf("Error in secret creation: %v", err) - } - - // Creating configmap will be used in projected volume - _, err = testutil.CreateConfigMap(clients.KubernetesClient, arsNamespace, arsProjectedConfigMapName, "www.google.com") - if err != nil { - logrus.Errorf("Error in configmap creation: %v", err) - } - - // Creating secret will be used in projected volume - _, err = testutil.CreateSecret(clients.KubernetesClient, arsNamespace, arsProjectedSecretName, data) - if err != nil { - logrus.Errorf("Error in secret creation: %v", err) - } - - // Creating configmap will be used in projected volume in init containers - _, err = testutil.CreateConfigMap(clients.KubernetesClient, arsNamespace, arsProjectedConfigMapWithInitContainer, "www.google.com") - if err != nil { - logrus.Errorf("Error in configmap creation: %v", err) - } - - // Creating secret will be used in projected volume in init containers - _, err = testutil.CreateSecret(clients.KubernetesClient, arsNamespace, arsProjectedSecretWithInitContainer, data) - if err != nil { - logrus.Errorf("Error in secret creation: %v", err) - } - - _, err = testutil.CreateConfigMap(clients.KubernetesClient, arsNamespace, arsConfigmapWithEnvName, "www.google.com") - if err != nil { - logrus.Errorf("Error in configmap creation: %v", err) - } - - // Creating secret - _, err = testutil.CreateSecret(clients.KubernetesClient, arsNamespace, arsSecretWithEnvName, data) - if err != nil { - logrus.Errorf("Error in secret creation: %v", err) - } - - _, err = testutil.CreateConfigMap(clients.KubernetesClient, arsNamespace, arsConfigmapWithEnvFromName, "www.google.com") - if err != nil { - logrus.Errorf("Error in configmap creation: %v", err) - } - - // Creating secret - _, err = testutil.CreateSecret(clients.KubernetesClient, arsNamespace, arsSecretWithInitEnv, data) - if err != nil { - logrus.Errorf("Error in secret creation: %v", err) - } - - _, err = testutil.CreateConfigMap(clients.KubernetesClient, arsNamespace, arsConfigmapWithInitContainer, "www.google.com") - if err != nil { - logrus.Errorf("Error in configmap creation: %v", err) - } - - // Creating secret - _, err = testutil.CreateSecret(clients.KubernetesClient, arsNamespace, arsSecretWithEnvFromName, data) - if err != nil { - logrus.Errorf("Error in secret creation: %v", err) - } - - _, err = testutil.CreateConfigMap(clients.KubernetesClient, arsNamespace, arsConfigmapWithInitEnv, "www.google.com") - if err != nil { - logrus.Errorf("Error in configmap creation: %v", err) - } - - // Creating secret - _, err = testutil.CreateSecret(clients.KubernetesClient, arsNamespace, arsSecretWithInitContainer, data) - if err != nil { - logrus.Errorf("Error in secret creation: %v", err) - } - - _, err = testutil.CreateConfigMap(clients.KubernetesClient, arsNamespace, arsConfigmapWithPodAnnotations, "www.google.com") - if err != nil { - logrus.Errorf("Error in configmap creation: %v", err) - } - - _, err = testutil.CreateConfigMap(clients.KubernetesClient, arsNamespace, arsConfigMapWithNonAnnotatedDeployment, "www.google.com") - if err != nil { - logrus.Errorf("Error in configmap creation: %v", err) - } - - // Creating secret used with secret auto annotation - _, err = testutil.CreateSecret(clients.KubernetesClient, arsNamespace, arsSecretWithSecretAutoAnnotation, data) - if err != nil { - logrus.Errorf("Error in secret creation: %v", err) - } - - // Creating configmap used with configmap auto annotation - _, err = testutil.CreateConfigMap(clients.KubernetesClient, arsNamespace, arsConfigmapWithConfigMapAutoAnnotation, "www.google.com") - if err != nil { - logrus.Errorf("Error in configmap creation: %v", err) - } - - // Creating configmap for testing pausing deployments - _, err = testutil.CreateConfigMap(clients.KubernetesClient, arsNamespace, arsConfigmapWithPausedDeployment, "www.google.com") - if err != nil { - logrus.Errorf("Error in configmap creation: %v", err) - } - - // Creating secret used with secret auto annotation - _, err = testutil.CreateSecret(clients.KubernetesClient, arsNamespace, arsSecretWithExcludeSecretAnnotation, data) - if err != nil { - logrus.Errorf("Error in secret creation: %v", err) - } - - // Creating configmap used with configmap auto annotation - _, err = testutil.CreateConfigMap(clients.KubernetesClient, arsNamespace, arsConfigmapWithExcludeConfigMapAnnotation, "www.google.com") - if err != nil { - logrus.Errorf("Error in configmap creation: %v", err) - } - - // Creating configmap with ignore annotation - _, err = testutil.CreateConfigMap(clients.KubernetesClient, arsNamespace, arsConfigmapWithIgnoreAnnotation, "www.google.com") - if err != nil { - logrus.Errorf("Error in configmap creation: %v", err) - } - // Patch with ignore annotation - cmClient := clients.KubernetesClient.CoreV1().ConfigMaps(arsNamespace) - patch := []byte(`{"metadata":{"annotations":{"reloader.stakater.com/ignore":"true"}}}`) - _, _ = cmClient.Patch(context.TODO(), arsConfigmapWithIgnoreAnnotation, patchtypes.MergePatchType, patch, metav1.PatchOptions{}) - - // Creating secret with ignore annotation - _, err = testutil.CreateSecret(clients.KubernetesClient, arsNamespace, arsSecretWithIgnoreAnnotation, data) - if err != nil { - logrus.Errorf("Error in secret creation: %v", err) - } - secretClient := clients.KubernetesClient.CoreV1().Secrets(arsNamespace) - _, _ = secretClient.Patch(context.TODO(), arsSecretWithIgnoreAnnotation, patchtypes.MergePatchType, patch, metav1.PatchOptions{}) - - // Creating Deployment referencing configmap with ignore annotation - _, err = testutil.CreateDeployment(clients.KubernetesClient, arsConfigmapWithIgnoreAnnotation, arsNamespace, true) - if err != nil { - logrus.Errorf("Error in Deployment with configmap ignore annotation creation: %v", err) - } - // Creating Deployment referencing secret with ignore annotation - _, err = testutil.CreateDeployment(clients.KubernetesClient, arsSecretWithIgnoreAnnotation, arsNamespace, true) - if err != nil { - logrus.Errorf("Error in Deployment with secret ignore annotation creation: %v", err) - } - - // Creating Deployment with configmap - _, err = testutil.CreateDeployment(clients.KubernetesClient, arsConfigmapName, arsNamespace, true) - if err != nil { - logrus.Errorf("Error in Deployment with configmap creation: %v", err) - } - - // Creating Deployment with configmap mounted in init container - _, err = testutil.CreateDeploymentWithInitContainer(clients.KubernetesClient, arsConfigmapWithInitContainer, arsNamespace, true) - if err != nil { - logrus.Errorf("Error in Deployment with configmap creation: %v", err) - } - - // Creating Deployment with configmap in projected volume - _, err = testutil.CreateDeployment(clients.KubernetesClient, arsProjectedConfigMapName, arsNamespace, true) - if err != nil { - logrus.Errorf("Error in Deployment with configmap creation: %v", err) - } - - // Creating Deployment with configmap in projected volume mounted in init container - _, err = testutil.CreateDeploymentWithInitContainer(clients.KubernetesClient, arsProjectedConfigMapWithInitContainer, arsNamespace, true) - if err != nil { - logrus.Errorf("Error in Deployment with configmap creation: %v", err) - } - - // Creating Deployment with secret in projected volume - _, err = testutil.CreateDeployment(clients.KubernetesClient, arsProjectedSecretName, arsNamespace, true) - if err != nil { - logrus.Errorf("Error in Deployment with secret creation: %v", err) - } - - // Creating Deployment with secret in projected volume mounted in init container - _, err = testutil.CreateDeploymentWithInitContainer(clients.KubernetesClient, arsProjectedSecretWithInitContainer, arsNamespace, true) - if err != nil { - logrus.Errorf("Error in Deployment with secret creation: %v", err) - } - - // Creating Deployment with secret mounted in init container - _, err = testutil.CreateDeploymentWithInitContainer(clients.KubernetesClient, arsSecretWithInitContainer, arsNamespace, true) - if err != nil { - logrus.Errorf("Error in Deployment with secret creation: %v", err) - } - - // Creating Deployment with configmap mounted as Env in init container - _, err = testutil.CreateDeploymentWithInitContainer(clients.KubernetesClient, arsConfigmapWithInitEnv, arsNamespace, false) - if err != nil { - logrus.Errorf("Error in Deployment with configmap creation: %v", err) - } - - // Creating Deployment with secret mounted as Env in init container - _, err = testutil.CreateDeploymentWithInitContainer(clients.KubernetesClient, arsSecretWithInitEnv, arsNamespace, false) - if err != nil { - logrus.Errorf("Error in Deployment with secret creation: %v", err) - } - - // Creating Deployment with secret - _, err = testutil.CreateDeployment(clients.KubernetesClient, arsSecretName, arsNamespace, true) - if err != nil { - logrus.Errorf("Error in Deployment with secret creation: %v", err) - } - - // Creating Deployment with env var source as configmap - _, err = testutil.CreateDeployment(clients.KubernetesClient, arsConfigmapWithEnvName, arsNamespace, false) - if err != nil { - logrus.Errorf("Error in Deployment with configmap configmap as env var source creation: %v", err) - } - - // Creating Deployment with env var source as secret - _, err = testutil.CreateDeployment(clients.KubernetesClient, arsSecretWithEnvName, arsNamespace, false) - if err != nil { - logrus.Errorf("Error in Deployment with secret configmap as env var source creation: %v", err) - } - - // Creating Deployment with envFrom source as secret - _, err = testutil.CreateDeploymentWithEnvVarSource(clients.KubernetesClient, arsConfigmapWithEnvFromName, arsNamespace) - if err != nil { - logrus.Errorf("Error in Deployment with secret configmap as envFrom source creation: %v", err) - } - - // Creating Deployment with envFrom source as secret - _, err = testutil.CreateDeploymentWithEnvVarSource(clients.KubernetesClient, arsSecretWithEnvFromName, arsNamespace) - if err != nil { - logrus.Errorf("Error in Deployment with secret configmap as envFrom source creation: %v", err) - } - - // Creating Deployment with envFrom source as secret - _, err = testutil.CreateDeploymentWithEnvVarSourceAndAnnotations( - clients.KubernetesClient, - arsConfigmapAnnotated, - arsNamespace, - map[string]string{"reloader.stakater.com/search": "true"}, - ) - if err != nil { - logrus.Errorf("Error in Deployment with secret configmap as envFrom source creation: %v", err) - } - - // Creating Deployment with configmap and without annotations - _, err = testutil.CreateDeploymentWithEnvVarSourceAndAnnotations(clients.KubernetesClient, arsConfigMapWithNonAnnotatedDeployment, arsNamespace, map[string]string{}) - if err != nil { - logrus.Errorf("Error in Deployment with configmap and without annotation creation: %v", err) - } - - // Creating Deployment with secret and with secret auto annotation - _, err = testutil.CreateDeploymentWithTypedAutoAnnotation(clients.KubernetesClient, arsSecretWithSecretAutoAnnotation, arsNamespace, testutil.SecretResourceType) - if err != nil { - logrus.Errorf("Error in Deployment with secret and with secret auto annotation: %v", err) - } - - // Creating Deployment with secret and with secret auto annotation - _, err = testutil.CreateDeploymentWithTypedAutoAnnotation(clients.KubernetesClient, arsConfigmapWithConfigMapAutoAnnotation, arsNamespace, testutil.ConfigmapResourceType) - if err != nil { - logrus.Errorf("Error in Deployment with configmap and with configmap auto annotation: %v", err) - } - - // Creating Deployment with secret and exclude secret annotation - _, err = testutil.CreateDeploymentWithExcludeAnnotation(clients.KubernetesClient, arsSecretWithExcludeSecretAnnotation, arsNamespace, testutil.SecretResourceType) - if err != nil { - logrus.Errorf("Error in Deployment with secret and with secret exclude annotation: %v", err) - } - - // Creating Deployment with secret and exclude configmap annotation - _, err = testutil.CreateDeploymentWithExcludeAnnotation(clients.KubernetesClient, arsConfigmapWithExcludeConfigMapAnnotation, arsNamespace, testutil.ConfigmapResourceType) - if err != nil { - logrus.Errorf("Error in Deployment with configmap and with configmap exclude annotation: %v", err) - } - - // Creating DaemonSet with configmap - _, err = testutil.CreateDaemonSet(clients.KubernetesClient, arsConfigmapName, arsNamespace, true) - if err != nil { - logrus.Errorf("Error in DaemonSet with configmap creation: %v", err) - } - - // Creating DaemonSet with secret - _, err = testutil.CreateDaemonSet(clients.KubernetesClient, arsSecretName, arsNamespace, true) - if err != nil { - logrus.Errorf("Error in DaemonSet with secret creation: %v", err) - } - - // Creating DaemonSet with configmap in projected volume - _, err = testutil.CreateDaemonSet(clients.KubernetesClient, arsProjectedConfigMapName, arsNamespace, true) - if err != nil { - logrus.Errorf("Error in DaemonSet with configmap creation: %v", err) - } - - // Creating DaemonSet with secret in projected volume - _, err = testutil.CreateDaemonSet(clients.KubernetesClient, arsProjectedSecretName, arsNamespace, true) - if err != nil { - logrus.Errorf("Error in DaemonSet with secret creation: %v", err) - } - - // Creating DaemonSet with env var source as configmap - _, err = testutil.CreateDaemonSet(clients.KubernetesClient, arsConfigmapWithEnvName, arsNamespace, false) - if err != nil { - logrus.Errorf("Error in DaemonSet with configmap as env var source creation: %v", err) - } - - // Creating DaemonSet with env var source as secret - _, err = testutil.CreateDaemonSet(clients.KubernetesClient, arsSecretWithEnvName, arsNamespace, false) - if err != nil { - logrus.Errorf("Error in DaemonSet with secret configmap as env var source creation: %v", err) - } - - // Creating StatefulSet with configmap - _, err = testutil.CreateStatefulSet(clients.KubernetesClient, arsConfigmapName, arsNamespace, true) - if err != nil { - logrus.Errorf("Error in StatefulSet with configmap creation: %v", err) - } - - // Creating StatefulSet with secret - _, err = testutil.CreateStatefulSet(clients.KubernetesClient, arsSecretName, arsNamespace, true) - if err != nil { - logrus.Errorf("Error in StatefulSet with secret creation: %v", err) - } - - // Creating StatefulSet with configmap in projected volume - _, err = testutil.CreateStatefulSet(clients.KubernetesClient, arsProjectedConfigMapName, arsNamespace, true) - if err != nil { - logrus.Errorf("Error in StatefulSet with configmap creation: %v", err) - } - - // Creating StatefulSet with secret in projected volume - _, err = testutil.CreateStatefulSet(clients.KubernetesClient, arsProjectedSecretName, arsNamespace, true) - if err != nil { - logrus.Errorf("Error in StatefulSet with configmap creation: %v", err) - } - - // Creating StatefulSet with env var source as configmap - _, err = testutil.CreateStatefulSet(clients.KubernetesClient, arsConfigmapWithEnvName, arsNamespace, false) - if err != nil { - logrus.Errorf("Error in StatefulSet with configmap configmap as env var source creation: %v", err) - } - - // Creating StatefulSet with env var source as secret - _, err = testutil.CreateStatefulSet(clients.KubernetesClient, arsSecretWithEnvName, arsNamespace, false) - if err != nil { - logrus.Errorf("Error in StatefulSet with secret configmap as env var source creation: %v", err) - } - - // Creating Deployment with pod annotations - _, err = testutil.CreateDeploymentWithPodAnnotations(clients.KubernetesClient, arsConfigmapWithPodAnnotations, arsNamespace, false) - if err != nil { - logrus.Errorf("Error in Deployment with pod annotations: %v", err) - } - - // Creating Deployment with both annotations - _, err = testutil.CreateDeploymentWithPodAnnotations(clients.KubernetesClient, arsConfigmapWithBothAnnotations, arsNamespace, true) - - if err != nil { - logrus.Errorf("Error in Deployment with both annotations: %v", err) - } - - // Creating Deployment with pause annotation - _, err = testutil.CreateDeploymentWithAnnotations(clients.KubernetesClient, arsConfigmapWithPausedDeployment, arsNamespace, map[string]string{options.PauseDeploymentAnnotation: "10s"}, false) - if err != nil { - logrus.Errorf("Error in Deployment with configmap creation: %v", err) - } -} - -func teardownArs() { - // Deleting Deployment with configmap - deploymentError := testutil.DeleteDeployment(clients.KubernetesClient, arsNamespace, arsConfigmapName) - if deploymentError != nil { - logrus.Errorf("Error while deleting deployment with configmap %v", deploymentError) - } - - // Deleting Deployment with secret - deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, arsNamespace, arsSecretName) - if deploymentError != nil { - logrus.Errorf("Error while deleting deployment with secret %v", deploymentError) - } - - // Deleting Deployment with configmap in projected volume - deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, arsNamespace, arsProjectedConfigMapName) - if deploymentError != nil { - logrus.Errorf("Error while deleting deployment with configmap %v", deploymentError) - } - - // Deleting Deployment with configmap in projected volume mounted in init container - deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, arsNamespace, arsProjectedConfigMapWithInitContainer) - if deploymentError != nil { - logrus.Errorf("Error while deleting deployment with configmap %v", deploymentError) - } - - // Deleting Deployment with secret in projected volume - deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, arsNamespace, arsProjectedSecretName) - if deploymentError != nil { - logrus.Errorf("Error while deleting deployment with configmap %v", deploymentError) - } - - // Deleting Deployment with secret in projected volume mounted in init container - deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, arsNamespace, arsProjectedSecretWithInitContainer) - if deploymentError != nil { - logrus.Errorf("Error while deleting deployment with configmap %v", deploymentError) - } - - // Deleting Deployment with configmap as env var source - deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, arsNamespace, arsConfigmapWithEnvName) - if deploymentError != nil { - logrus.Errorf("Error while deleting deployment with configmap as env var source %v", deploymentError) - } - - // Deleting Deployment with secret - deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, arsNamespace, arsSecretWithEnvName) - if deploymentError != nil { - logrus.Errorf("Error while deleting deployment with secret as env var source %v", deploymentError) - } - - // Deleting Deployment with configmap mounted in init container - deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, arsNamespace, arsConfigmapWithInitContainer) - if deploymentError != nil { - logrus.Errorf("Error while deleting deployment with configmap mounted in init container %v", deploymentError) - } - - // Deleting Deployment with secret mounted in init container - deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, arsNamespace, arsSecretWithInitContainer) - if deploymentError != nil { - logrus.Errorf("Error while deleting deployment with secret mounted in init container %v", deploymentError) - } - - // Deleting Deployment with configmap mounted as env in init container - deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, arsNamespace, arsConfigmapWithInitEnv) - if deploymentError != nil { - logrus.Errorf("Error while deleting deployment with configmap mounted as env in init container %v", deploymentError) - } - - // Deleting Deployment with secret mounted as env in init container - deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, arsNamespace, arsSecretWithInitEnv) - if deploymentError != nil { - logrus.Errorf("Error while deleting deployment with secret mounted as env in init container %v", deploymentError) - } - - // Deleting Deployment with configmap as envFrom source - deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, arsNamespace, arsConfigmapWithEnvFromName) - if deploymentError != nil { - logrus.Errorf("Error while deleting deployment with configmap as envFrom source %v", deploymentError) - } - - // Deleting Deployment with secret as envFrom source - deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, arsNamespace, arsSecretWithEnvFromName) - if deploymentError != nil { - logrus.Errorf("Error while deleting deployment with secret as envFrom source %v", deploymentError) - } - - // Deleting Deployment with pod annotations - deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, arsNamespace, arsConfigmapWithPodAnnotations) - if deploymentError != nil { - logrus.Errorf("Error while deleting deployment with pod annotations %v", deploymentError) - } - - // Deleting Deployment with both annotations - deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, arsNamespace, arsConfigmapWithBothAnnotations) - if deploymentError != nil { - logrus.Errorf("Error while deleting deployment with both annotations %v", deploymentError) - } - - // Deleting Deployment with search annotation - deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, arsNamespace, arsConfigmapAnnotated) - if deploymentError != nil { - logrus.Errorf("Error while deleting deployment with search annotation %v", deploymentError) - } - - // Deleting Deployment with secret and secret auto annotation - deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, arsNamespace, arsSecretWithSecretAutoAnnotation) - if deploymentError != nil { - logrus.Errorf("Error while deleting deployment with secret auto annotation %v", deploymentError) - } - - // Deleting Deployment with configmap and configmap auto annotation - deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, arsNamespace, arsConfigmapWithConfigMapAutoAnnotation) - if deploymentError != nil { - logrus.Errorf("Error while deleting deployment with configmap auto annotation %v", deploymentError) - } - - // Deleting Deployment with secret and exclude secret annotation - deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, arsNamespace, arsSecretWithExcludeSecretAnnotation) - if deploymentError != nil { - logrus.Errorf("Error while deleting deployment with secret auto annotation %v", deploymentError) - } - - // Deleting Deployment with configmap and exclude configmap annotation - deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, arsNamespace, arsConfigmapWithExcludeConfigMapAnnotation) - if deploymentError != nil { - logrus.Errorf("Error while deleting deployment with configmap auto annotation %v", deploymentError) - } - - // Deleting DaemonSet with configmap - daemonSetError := testutil.DeleteDaemonSet(clients.KubernetesClient, arsNamespace, arsConfigmapName) - if daemonSetError != nil { - logrus.Errorf("Error while deleting daemonSet with configmap %v", daemonSetError) - } - - // Deleting Deployment with secret - daemonSetError = testutil.DeleteDaemonSet(clients.KubernetesClient, arsNamespace, arsSecretName) - if daemonSetError != nil { - logrus.Errorf("Error while deleting daemonSet with secret %v", daemonSetError) - } - - // Deleting DaemonSet with configmap in projected volume - daemonSetError = testutil.DeleteDaemonSet(clients.KubernetesClient, arsNamespace, arsProjectedConfigMapName) - if daemonSetError != nil { - logrus.Errorf("Error while deleting daemonSet with configmap %v", daemonSetError) - } - - // Deleting Deployment with secret in projected volume - daemonSetError = testutil.DeleteDaemonSet(clients.KubernetesClient, arsNamespace, arsProjectedSecretName) - if daemonSetError != nil { - logrus.Errorf("Error while deleting daemonSet with secret %v", daemonSetError) - } - - // Deleting Deployment with configmap as env var source - daemonSetError = testutil.DeleteDaemonSet(clients.KubernetesClient, arsNamespace, arsConfigmapWithEnvName) - if daemonSetError != nil { - logrus.Errorf("Error while deleting daemonSet with configmap as env var source %v", daemonSetError) - } - - // Deleting Deployment with secret as env var source - daemonSetError = testutil.DeleteDaemonSet(clients.KubernetesClient, arsNamespace, arsSecretWithEnvName) - if daemonSetError != nil { - logrus.Errorf("Error while deleting daemonSet with secret as env var source %v", daemonSetError) - } - - // Deleting StatefulSet with configmap - statefulSetError := testutil.DeleteStatefulSet(clients.KubernetesClient, arsNamespace, arsConfigmapName) - if statefulSetError != nil { - logrus.Errorf("Error while deleting statefulSet with configmap %v", statefulSetError) - } - - // Deleting Deployment with secret - statefulSetError = testutil.DeleteStatefulSet(clients.KubernetesClient, arsNamespace, arsSecretName) - if statefulSetError != nil { - logrus.Errorf("Error while deleting statefulSet with secret %v", statefulSetError) - } - - // Deleting StatefulSet with configmap in projected volume - statefulSetError = testutil.DeleteStatefulSet(clients.KubernetesClient, arsNamespace, arsProjectedConfigMapName) - if statefulSetError != nil { - logrus.Errorf("Error while deleting statefulSet with configmap %v", statefulSetError) - } - - // Deleting Deployment with secret in projected volume - statefulSetError = testutil.DeleteStatefulSet(clients.KubernetesClient, arsNamespace, arsProjectedSecretName) - if statefulSetError != nil { - logrus.Errorf("Error while deleting statefulSet with secret %v", statefulSetError) - } - - // Deleting StatefulSet with configmap as env var source - statefulSetError = testutil.DeleteStatefulSet(clients.KubernetesClient, arsNamespace, arsConfigmapWithEnvName) - if statefulSetError != nil { - logrus.Errorf("Error while deleting statefulSet with configmap as env var source %v", statefulSetError) - } - - // Deleting Deployment with secret as env var source - statefulSetError = testutil.DeleteStatefulSet(clients.KubernetesClient, arsNamespace, arsSecretWithEnvName) - if statefulSetError != nil { - logrus.Errorf("Error while deleting statefulSet with secret as env var source %v", statefulSetError) - } - - // Deleting Deployment with pause annotation - deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, arsNamespace, arsConfigmapWithPausedDeployment) - if deploymentError != nil { - logrus.Errorf("Error while deleting deployment with configmap %v", deploymentError) - } - - // Deleting Configmap - err := testutil.DeleteConfigMap(clients.KubernetesClient, arsNamespace, arsConfigmapName) - if err != nil { - logrus.Errorf("Error while deleting the configmap %v", err) - } - - // Deleting Secret - err = testutil.DeleteSecret(clients.KubernetesClient, arsNamespace, arsSecretName) - if err != nil { - logrus.Errorf("Error while deleting the secret %v", err) - } - - // Deleting configmap used in projected volume - err = testutil.DeleteConfigMap(clients.KubernetesClient, arsNamespace, arsProjectedConfigMapName) - if err != nil { - logrus.Errorf("Error while deleting the configmap %v", err) - } - - // Deleting Secret used in projected volume - err = testutil.DeleteSecret(clients.KubernetesClient, arsNamespace, arsProjectedSecretName) - if err != nil { - logrus.Errorf("Error while deleting the secret %v", err) - } - - // Deleting configmap used in projected volume in init containers - err = testutil.DeleteConfigMap(clients.KubernetesClient, arsNamespace, arsProjectedConfigMapWithInitContainer) - if err != nil { - logrus.Errorf("Error while deleting the configmap %v", err) - } - - // Deleting secret used in projected volume in init containers - err = testutil.DeleteSecret(clients.KubernetesClient, arsNamespace, arsProjectedSecretWithInitContainer) - if err != nil { - logrus.Errorf("Error while deleting the secret %v", err) - } - - // Deleting Configmap used as env var source - err = testutil.DeleteConfigMap(clients.KubernetesClient, arsNamespace, arsConfigmapWithEnvName) - if err != nil { - logrus.Errorf("Error while deleting the configmap used as env var source %v", err) - } - - // Deleting Secret used as env var source - err = testutil.DeleteSecret(clients.KubernetesClient, arsNamespace, arsSecretWithEnvName) - if err != nil { - logrus.Errorf("Error while deleting the secret used as env var source %v", err) - } - - // Deleting Configmap used in init container - err = testutil.DeleteConfigMap(clients.KubernetesClient, arsNamespace, arsConfigmapWithInitContainer) - if err != nil { - logrus.Errorf("Error while deleting the configmap used in init container %v", err) - } - - // Deleting Secret used in init container - err = testutil.DeleteSecret(clients.KubernetesClient, arsNamespace, arsSecretWithInitContainer) - if err != nil { - logrus.Errorf("Error while deleting the secret used in init container %v", err) - } - - // Deleting Configmap used as env var source - err = testutil.DeleteConfigMap(clients.KubernetesClient, arsNamespace, arsConfigmapWithEnvFromName) - if err != nil { - logrus.Errorf("Error while deleting the configmap used as env var source %v", err) - } - - // Deleting Secret used as env var source - err = testutil.DeleteSecret(clients.KubernetesClient, arsNamespace, arsSecretWithEnvFromName) - if err != nil { - logrus.Errorf("Error while deleting the secret used as env var source %v", err) - } - - // Deleting Configmap used as env var source - err = testutil.DeleteConfigMap(clients.KubernetesClient, arsNamespace, arsConfigmapWithInitEnv) - if err != nil { - logrus.Errorf("Error while deleting the configmap used as env var source in init container %v", err) - } - - // Deleting Secret used as env var source - err = testutil.DeleteSecret(clients.KubernetesClient, arsNamespace, arsSecretWithInitEnv) - if err != nil { - logrus.Errorf("Error while deleting the secret used as env var source in init container %v", err) - } - - err = testutil.DeleteConfigMap(clients.KubernetesClient, arsNamespace, arsConfigmapWithPodAnnotations) - if err != nil { - logrus.Errorf("Error while deleting the configmap used with pod annotations: %v", err) - } - - // Deleting Secret used with secret auto annotation - err = testutil.DeleteSecret(clients.KubernetesClient, arsNamespace, arsSecretWithSecretAutoAnnotation) - if err != nil { - logrus.Errorf("Error while deleting the secret used with secret auto annotations: %v", err) - } - - // Deleting ConfigMap used with configmap auto annotation - err = testutil.DeleteConfigMap(clients.KubernetesClient, arsNamespace, arsConfigmapWithConfigMapAutoAnnotation) - if err != nil { - logrus.Errorf("Error while deleting the configmap used with configmap auto annotations: %v", err) - } - - // Deleting Secret used with exclude secret annotation - err = testutil.DeleteSecret(clients.KubernetesClient, arsNamespace, arsSecretWithExcludeSecretAnnotation) - if err != nil { - logrus.Errorf("Error while deleting the secret used with secret auto annotations: %v", err) - } - - // Deleting ConfigMap used with exclude configmap annotation - err = testutil.DeleteConfigMap(clients.KubernetesClient, arsNamespace, arsConfigmapWithExcludeConfigMapAnnotation) - if err != nil { - logrus.Errorf("Error while deleting the configmap used with configmap auto annotations: %v", err) - } - - // Deleting configmap for testing pausing deployments - err = testutil.DeleteConfigMap(clients.KubernetesClient, arsNamespace, arsConfigmapWithPausedDeployment) - if err != nil { - logrus.Errorf("Error while deleting the configmap: %v", err) - } - - // Deleting namespace - testutil.DeleteNamespace(arsNamespace, clients.KubernetesClient) - -} - -func setupErs() { - // Creating configmap - _, err := testutil.CreateConfigMap(clients.KubernetesClient, ersNamespace, ersConfigmapName, "www.google.com") - if err != nil { - logrus.Errorf("Error in configmap creation: %v", err) - } - - // Creating secret - data := "dGVzdFNlY3JldEVuY29kaW5nRm9yUmVsb2FkZXI=" - _, err = testutil.CreateSecret(clients.KubernetesClient, ersNamespace, ersSecretName, data) - if err != nil { - logrus.Errorf("Error in secret creation: %v", err) - } - - // Creating configmap will be used in projected volume - _, err = testutil.CreateConfigMap(clients.KubernetesClient, ersNamespace, ersProjectedConfigMapName, "www.google.com") - if err != nil { - logrus.Errorf("Error in configmap creation: %v", err) - } - - // Creating secret will be used in projected volume - _, err = testutil.CreateSecret(clients.KubernetesClient, ersNamespace, ersProjectedSecretName, data) - if err != nil { - logrus.Errorf("Error in secret creation: %v", err) - } - - // Creating configmap will be used in projected volume in init containers - _, err = testutil.CreateConfigMap(clients.KubernetesClient, ersNamespace, ersProjectedConfigMapWithInitContainer, "www.google.com") - if err != nil { - logrus.Errorf("Error in configmap creation: %v", err) - } - - // Creating secret will be used in projected volume in init containers - _, err = testutil.CreateSecret(clients.KubernetesClient, ersNamespace, ersProjectedSecretWithInitContainer, data) - if err != nil { - logrus.Errorf("Error in secret creation: %v", err) - } - - _, err = testutil.CreateConfigMap(clients.KubernetesClient, ersNamespace, ersConfigmapWithEnvName, "www.google.com") - if err != nil { - logrus.Errorf("Error in configmap creation: %v", err) - } - - // Creating secret - _, err = testutil.CreateSecret(clients.KubernetesClient, ersNamespace, ersSecretWithEnvName, data) - if err != nil { - logrus.Errorf("Error in secret creation: %v", err) - } - - _, err = testutil.CreateConfigMap(clients.KubernetesClient, ersNamespace, ersConfigmapWithEnvFromName, "www.google.com") - if err != nil { - logrus.Errorf("Error in configmap creation: %v", err) - } - - // Creating configmap for testing pausing deployments - _, err = testutil.CreateConfigMap(clients.KubernetesClient, ersNamespace, ersConfigmapWithPausedDeployment, "www.google.com") - if err != nil { - logrus.Errorf("Error in configmap creation: %v", err) - } - - // Creating secret - _, err = testutil.CreateSecret(clients.KubernetesClient, ersNamespace, ersSecretWithInitEnv, data) - if err != nil { - logrus.Errorf("Error in secret creation: %v", err) - } - - _, err = testutil.CreateConfigMap(clients.KubernetesClient, ersNamespace, ersConfigmapWithInitContainer, "www.google.com") - if err != nil { - logrus.Errorf("Error in configmap creation: %v", err) - } - - // Creating secret - _, err = testutil.CreateSecret(clients.KubernetesClient, ersNamespace, ersSecretWithEnvFromName, data) - if err != nil { - logrus.Errorf("Error in secret creation: %v", err) - } - - _, err = testutil.CreateConfigMap(clients.KubernetesClient, ersNamespace, ersConfigmapWithInitEnv, "www.google.com") - if err != nil { - logrus.Errorf("Error in configmap creation: %v", err) - } - - // Creating secret - _, err = testutil.CreateSecret(clients.KubernetesClient, ersNamespace, ersSecretWithInitContainer, data) - if err != nil { - logrus.Errorf("Error in secret creation: %v", err) - } - - _, err = testutil.CreateConfigMap(clients.KubernetesClient, ersNamespace, ersConfigmapWithPodAnnotations, "www.google.com") - if err != nil { - logrus.Errorf("Error in configmap creation: %v", err) - } - - // Creating secret used with secret auto annotation - _, err = testutil.CreateSecret(clients.KubernetesClient, ersNamespace, ersSecretWithSecretAutoAnnotation, data) - if err != nil { - logrus.Errorf("Error in secret creation: %v", err) - } - - // Creating configmap used with configmap auto annotation - _, err = testutil.CreateConfigMap(clients.KubernetesClient, ersNamespace, ersConfigmapWithConfigMapAutoAnnotation, "www.google.com") - if err != nil { - logrus.Errorf("Error in configmap creation: %v", err) - } - - // Creating secret used with secret exclude annotation - _, err = testutil.CreateSecret(clients.KubernetesClient, ersNamespace, ersSecretWithSecretExcludeAnnotation, data) - if err != nil { - logrus.Errorf("Error in secret creation: %v", err) - } - - // Creating configmap used with configmap exclude annotation - _, err = testutil.CreateConfigMap(clients.KubernetesClient, ersNamespace, ersConfigmapWithConfigMapExcludeAnnotation, "www.google.com") - if err != nil { - logrus.Errorf("Error in configmap creation: %v", err) - } - - // Creating configmap with ignore annotation - _, err = testutil.CreateConfigMap(clients.KubernetesClient, ersNamespace, ersConfigmapWithIgnoreAnnotation, "www.google.com") - if err != nil { - logrus.Errorf("Error in configmap creation: %v", err) - } - cmClient := clients.KubernetesClient.CoreV1().ConfigMaps(ersNamespace) - patch := []byte(`{"metadata":{"annotations":{"reloader.stakater.com/ignore":"true"}}}`) - _, _ = cmClient.Patch(context.TODO(), ersConfigmapWithIgnoreAnnotation, patchtypes.MergePatchType, patch, metav1.PatchOptions{}) - - // Creating secret with ignore annotation - _, err = testutil.CreateSecret(clients.KubernetesClient, ersNamespace, ersSecretWithIgnoreAnnotation, data) - if err != nil { - logrus.Errorf("Error in secret creation: %v", err) - } - secretClient := clients.KubernetesClient.CoreV1().Secrets(ersNamespace) - _, _ = secretClient.Patch(context.TODO(), ersSecretWithIgnoreAnnotation, patchtypes.MergePatchType, patch, metav1.PatchOptions{}) - - // Creating Deployment referencing configmap with ignore annotation - _, err = testutil.CreateDeployment(clients.KubernetesClient, ersConfigmapWithIgnoreAnnotation, ersNamespace, true) - if err != nil { - logrus.Errorf("Error in Deployment with configmap ignore annotation creation: %v", err) - } - // Creating Deployment referencing secret with ignore annotation - _, err = testutil.CreateDeployment(clients.KubernetesClient, ersSecretWithIgnoreAnnotation, ersNamespace, true) - if err != nil { - logrus.Errorf("Error in Deployment with secret ignore annotation creation: %v", err) - } - - // Creating Deployment with configmap - _, err = testutil.CreateDeployment(clients.KubernetesClient, ersConfigmapName, ersNamespace, true) - if err != nil { - logrus.Errorf("Error in Deployment with configmap creation: %v", err) - } - - // Creating Deployment with configmap mounted in init container - _, err = testutil.CreateDeploymentWithInitContainer(clients.KubernetesClient, ersConfigmapWithInitContainer, ersNamespace, true) - if err != nil { - logrus.Errorf("Error in Deployment with configmap creation: %v", err) - } - - // Creating Deployment with configmap in projected volume - _, err = testutil.CreateDeployment(clients.KubernetesClient, ersProjectedConfigMapName, ersNamespace, true) - if err != nil { - logrus.Errorf("Error in Deployment with configmap creation: %v", err) - } - - // Creating Deployment with configmap in projected volume mounted in init container - _, err = testutil.CreateDeploymentWithInitContainer(clients.KubernetesClient, ersProjectedConfigMapWithInitContainer, ersNamespace, true) - if err != nil { - logrus.Errorf("Error in Deployment with configmap creation: %v", err) - } - - // Creating Deployment with secret in projected volume - _, err = testutil.CreateDeployment(clients.KubernetesClient, ersProjectedSecretName, ersNamespace, true) - if err != nil { - logrus.Errorf("Error in Deployment with secret creation: %v", err) - } - - // Creating Deployment with secret in projected volume mounted in init container - _, err = testutil.CreateDeploymentWithInitContainer(clients.KubernetesClient, ersProjectedSecretWithInitContainer, ersNamespace, true) - if err != nil { - logrus.Errorf("Error in Deployment with secret creation: %v", err) - } - - // Creating Deployment with secret mounted in init container - _, err = testutil.CreateDeploymentWithInitContainer(clients.KubernetesClient, ersSecretWithInitContainer, ersNamespace, true) - if err != nil { - logrus.Errorf("Error in Deployment with secret creation: %v", err) - } - - // Creating Deployment with configmap mounted as Env in init container - _, err = testutil.CreateDeploymentWithInitContainer(clients.KubernetesClient, ersConfigmapWithInitEnv, ersNamespace, false) - if err != nil { - logrus.Errorf("Error in Deployment with configmap creation: %v", err) - } - - // Creating Deployment with secret mounted as Env in init container - _, err = testutil.CreateDeploymentWithInitContainer(clients.KubernetesClient, ersSecretWithInitEnv, ersNamespace, false) - if err != nil { - logrus.Errorf("Error in Deployment with secret creation: %v", err) - } - - // Creating Deployment with secret - _, err = testutil.CreateDeployment(clients.KubernetesClient, ersSecretName, ersNamespace, true) - if err != nil { - logrus.Errorf("Error in Deployment with secret creation: %v", err) - } - - // Creating Deployment with env var source as configmap - _, err = testutil.CreateDeployment(clients.KubernetesClient, ersConfigmapWithEnvName, ersNamespace, false) - if err != nil { - logrus.Errorf("Error in Deployment with configmap configmap as env var source creation: %v", err) - } - - // Creating Deployment with env var source as secret - _, err = testutil.CreateDeployment(clients.KubernetesClient, ersSecretWithEnvName, ersNamespace, false) - if err != nil { - logrus.Errorf("Error in Deployment with secret configmap as env var source creation: %v", err) - } - - // Creating Deployment with envFrom source as secret - _, err = testutil.CreateDeploymentWithEnvVarSource(clients.KubernetesClient, ersConfigmapWithEnvFromName, ersNamespace) - if err != nil { - logrus.Errorf("Error in Deployment with secret configmap as envFrom source creation: %v", err) - } - - // Creating Deployment with envFrom source as secret - _, err = testutil.CreateDeploymentWithEnvVarSource(clients.KubernetesClient, ersSecretWithEnvFromName, ersNamespace) - if err != nil { - logrus.Errorf("Error in Deployment with secret configmap as envFrom source creation: %v", err) - } - - // Creating Deployment with envFrom source as secret - _, err = testutil.CreateDeploymentWithEnvVarSourceAndAnnotations( - clients.KubernetesClient, - ersConfigmapAnnotated, - ersNamespace, - map[string]string{"reloader.stakater.com/search": "true"}, - ) - if err != nil { - logrus.Errorf("Error in Deployment with secret configmap as envFrom source creation: %v", err) - } - - // Creating Deployment with secret and with secret auto annotation - _, err = testutil.CreateDeploymentWithTypedAutoAnnotation(clients.KubernetesClient, ersSecretWithSecretAutoAnnotation, ersNamespace, testutil.SecretResourceType) - if err != nil { - logrus.Errorf("Error in Deployment with secret and with secret auto annotation: %v", err) - } - - // Creating Deployment with secret and with secret auto annotation - _, err = testutil.CreateDeploymentWithTypedAutoAnnotation(clients.KubernetesClient, ersConfigmapWithConfigMapAutoAnnotation, ersNamespace, testutil.ConfigmapResourceType) - if err != nil { - logrus.Errorf("Error in Deployment with configmap and with configmap auto annotation: %v", err) - } - - // Creating Deployment with secret and with secret exclude annotation - _, err = testutil.CreateDeploymentWithExcludeAnnotation(clients.KubernetesClient, ersSecretWithSecretExcludeAnnotation, ersNamespace, testutil.SecretResourceType) - if err != nil { - logrus.Errorf("Error in Deployment with secret and with secret exclude annotation: %v", err) - } - - // Creating Deployment with secret and with secret exclude annotation - _, err = testutil.CreateDeploymentWithExcludeAnnotation(clients.KubernetesClient, ersConfigmapWithConfigMapExcludeAnnotation, ersNamespace, testutil.ConfigmapResourceType) - if err != nil { - logrus.Errorf("Error in Deployment with configmap and with configmap exclude annotation: %v", err) - } - - // Creating Deployment with pause annotation - _, err = testutil.CreateDeploymentWithAnnotations(clients.KubernetesClient, ersConfigmapWithPausedDeployment, ersNamespace, map[string]string{options.PauseDeploymentAnnotation: "10s"}, false) - if err != nil { - logrus.Errorf("Error in Deployment with configmap creation: %v", err) - } - - // Creating DaemonSet with configmap - _, err = testutil.CreateDaemonSet(clients.KubernetesClient, ersConfigmapName, ersNamespace, true) - if err != nil { - logrus.Errorf("Error in DaemonSet with configmap creation: %v", err) - } - - // Creating DaemonSet with secret - _, err = testutil.CreateDaemonSet(clients.KubernetesClient, ersSecretName, ersNamespace, true) - if err != nil { - logrus.Errorf("Error in DaemonSet with secret creation: %v", err) - } - - // Creating DaemonSet with configmap in projected volume - _, err = testutil.CreateDaemonSet(clients.KubernetesClient, ersProjectedConfigMapName, ersNamespace, true) - if err != nil { - logrus.Errorf("Error in DaemonSet with configmap creation: %v", err) - } - - // Creating DaemonSet with secret in projected volume - _, err = testutil.CreateDaemonSet(clients.KubernetesClient, ersProjectedSecretName, ersNamespace, true) - if err != nil { - logrus.Errorf("Error in DaemonSet with secret creation: %v", err) - } - - // Creating DaemonSet with env var source as configmap - _, err = testutil.CreateDaemonSet(clients.KubernetesClient, ersConfigmapWithEnvName, ersNamespace, false) - if err != nil { - logrus.Errorf("Error in DaemonSet with configmap as env var source creation: %v", err) - } - - // Creating DaemonSet with env var source as secret - _, err = testutil.CreateDaemonSet(clients.KubernetesClient, ersSecretWithEnvName, ersNamespace, false) - if err != nil { - logrus.Errorf("Error in DaemonSet with secret configmap as env var source creation: %v", err) - } - - // Creating StatefulSet with configmap - _, err = testutil.CreateStatefulSet(clients.KubernetesClient, ersConfigmapName, ersNamespace, true) - if err != nil { - logrus.Errorf("Error in StatefulSet with configmap creation: %v", err) - } - - // Creating StatefulSet with secret - _, err = testutil.CreateStatefulSet(clients.KubernetesClient, ersSecretName, ersNamespace, true) - if err != nil { - logrus.Errorf("Error in StatefulSet with secret creation: %v", err) - } - - // Creating StatefulSet with configmap in projected volume - _, err = testutil.CreateStatefulSet(clients.KubernetesClient, ersProjectedConfigMapName, ersNamespace, true) - if err != nil { - logrus.Errorf("Error in StatefulSet with configmap creation: %v", err) - } - - // Creating StatefulSet with secret in projected volume - _, err = testutil.CreateStatefulSet(clients.KubernetesClient, ersProjectedSecretName, ersNamespace, true) - if err != nil { - logrus.Errorf("Error in StatefulSet with configmap creation: %v", err) - } - - // Creating StatefulSet with env var source as configmap - _, err = testutil.CreateStatefulSet(clients.KubernetesClient, ersConfigmapWithEnvName, ersNamespace, false) - if err != nil { - logrus.Errorf("Error in StatefulSet with configmap configmap as env var source creation: %v", err) - } - - // Creating StatefulSet with env var source as secret - _, err = testutil.CreateStatefulSet(clients.KubernetesClient, ersSecretWithEnvName, ersNamespace, false) - if err != nil { - logrus.Errorf("Error in StatefulSet with secret configmap as env var source creation: %v", err) - } - - // Creating Deployment with pod annotations - _, err = testutil.CreateDeploymentWithPodAnnotations(clients.KubernetesClient, ersConfigmapWithPodAnnotations, ersNamespace, false) - if err != nil { - logrus.Errorf("Error in Deployment with pod annotations: %v", err) - } - - // Creating Deployment with both annotations - _, err = testutil.CreateDeploymentWithPodAnnotations(clients.KubernetesClient, ersConfigmapWithBothAnnotations, ersNamespace, true) - if err != nil { - logrus.Errorf("Error in Deployment with both annotations: %v", err) - } -} - -func teardownErs() { - // Deleting Deployment with configmap - deploymentError := testutil.DeleteDeployment(clients.KubernetesClient, ersNamespace, ersConfigmapName) - if deploymentError != nil { - logrus.Errorf("Error while deleting deployment with configmap %v", deploymentError) - } - - // Deleting Deployment with secret - deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, ersNamespace, ersSecretName) - if deploymentError != nil { - logrus.Errorf("Error while deleting deployment with secret %v", deploymentError) - } - - // Deleting Deployment with configmap in projected volume - deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, ersNamespace, ersProjectedConfigMapName) - if deploymentError != nil { - logrus.Errorf("Error while deleting deployment with configmap %v", deploymentError) - } - - // Deleting Deployment with configmap in projected volume mounted in init container - deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, ersNamespace, ersProjectedConfigMapWithInitContainer) - if deploymentError != nil { - logrus.Errorf("Error while deleting deployment with configmap %v", deploymentError) - } - - // Deleting Deployment with secret in projected volume - deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, ersNamespace, ersProjectedSecretName) - if deploymentError != nil { - logrus.Errorf("Error while deleting deployment with configmap %v", deploymentError) - } - - // Deleting Deployment with secret in projected volume mounted in init container - deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, ersNamespace, ersProjectedSecretWithInitContainer) - if deploymentError != nil { - logrus.Errorf("Error while deleting deployment with configmap %v", deploymentError) - } - - // Deleting Deployment with configmap as env var source - deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, ersNamespace, ersConfigmapWithEnvName) - if deploymentError != nil { - logrus.Errorf("Error while deleting deployment with configmap as env var source %v", deploymentError) - } - - // Deleting Deployment with secret - deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, ersNamespace, ersSecretWithEnvName) - if deploymentError != nil { - logrus.Errorf("Error while deleting deployment with secret as env var source %v", deploymentError) - } - - // Deleting Deployment with configmap mounted in init container - deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, ersNamespace, ersConfigmapWithInitContainer) - if deploymentError != nil { - logrus.Errorf("Error while deleting deployment with configmap mounted in init container %v", deploymentError) - } - - // Deleting Deployment with secret mounted in init container - deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, ersNamespace, ersSecretWithInitContainer) - if deploymentError != nil { - logrus.Errorf("Error while deleting deployment with secret mounted in init container %v", deploymentError) - } - - // Deleting Deployment with configmap mounted as env in init container - deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, ersNamespace, ersConfigmapWithInitEnv) - if deploymentError != nil { - logrus.Errorf("Error while deleting deployment with configmap mounted as env in init container %v", deploymentError) - } - - // Deleting Deployment with secret mounted as env in init container - deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, ersNamespace, ersSecretWithInitEnv) - if deploymentError != nil { - logrus.Errorf("Error while deleting deployment with secret mounted as env in init container %v", deploymentError) - } - - // Deleting Deployment with configmap as envFrom source - deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, ersNamespace, ersConfigmapWithEnvFromName) - if deploymentError != nil { - logrus.Errorf("Error while deleting deployment with configmap as envFrom source %v", deploymentError) - } - - // Deleting Deployment with secret as envFrom source - deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, ersNamespace, ersSecretWithEnvFromName) - if deploymentError != nil { - logrus.Errorf("Error while deleting deployment with secret as envFrom source %v", deploymentError) - } - - // Deleting Deployment with pod annotations - deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, ersNamespace, ersConfigmapWithPodAnnotations) - if deploymentError != nil { - logrus.Errorf("Error while deleting deployment with pod annotations %v", deploymentError) - } - - // Deleting Deployment with both annotations - deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, ersNamespace, ersConfigmapWithBothAnnotations) - if deploymentError != nil { - logrus.Errorf("Error while deleting deployment with both annotations %v", deploymentError) - } - - // Deleting Deployment with search annotation - deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, ersNamespace, ersConfigmapAnnotated) - if deploymentError != nil { - logrus.Errorf("Error while deleting deployment with search annotation %v", deploymentError) - } - - // Deleting Deployment with secret and secret auto annotation - deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, ersNamespace, ersSecretWithSecretAutoAnnotation) - if deploymentError != nil { - logrus.Errorf("Error while deleting deployment with secret auto annotation %v", deploymentError) - } - - // Deleting Deployment with configmap and configmap auto annotation - deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, ersNamespace, ersConfigmapWithConfigMapAutoAnnotation) - if deploymentError != nil { - logrus.Errorf("Error while deleting deployment with configmap auto annotation %v", deploymentError) - } - - // Deleting Deployment with secret and secret exclude annotation - deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, ersNamespace, ersSecretWithSecretExcludeAnnotation) - if deploymentError != nil { - logrus.Errorf("Error while deleting deployment with secret exclude annotation %v", deploymentError) - } - - // Deleting Deployment with configmap and configmap exclude annotation - deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, ersNamespace, ersConfigmapWithConfigMapExcludeAnnotation) - if deploymentError != nil { - logrus.Errorf("Error while deleting deployment with configmap exclude annotation %v", deploymentError) - } - - // Deleting DaemonSet with configmap - daemonSetError := testutil.DeleteDaemonSet(clients.KubernetesClient, ersNamespace, ersConfigmapName) - if daemonSetError != nil { - logrus.Errorf("Error while deleting daemonSet with configmap %v", daemonSetError) - } - - // Deleting Deployment with secret - daemonSetError = testutil.DeleteDaemonSet(clients.KubernetesClient, ersNamespace, ersSecretName) - if daemonSetError != nil { - logrus.Errorf("Error while deleting daemonSet with secret %v", daemonSetError) - } - - // Deleting DaemonSet with configmap in projected volume - daemonSetError = testutil.DeleteDaemonSet(clients.KubernetesClient, ersNamespace, ersProjectedConfigMapName) - if daemonSetError != nil { - logrus.Errorf("Error while deleting daemonSet with configmap %v", daemonSetError) - } - - // Deleting Deployment with secret in projected volume - daemonSetError = testutil.DeleteDaemonSet(clients.KubernetesClient, ersNamespace, ersProjectedSecretName) - if daemonSetError != nil { - logrus.Errorf("Error while deleting daemonSet with secret %v", daemonSetError) - } - - // Deleting Deployment with configmap as env var source - daemonSetError = testutil.DeleteDaemonSet(clients.KubernetesClient, ersNamespace, ersConfigmapWithEnvName) - if daemonSetError != nil { - logrus.Errorf("Error while deleting daemonSet with configmap as env var source %v", daemonSetError) - } - - // Deleting Deployment with secret as env var source - daemonSetError = testutil.DeleteDaemonSet(clients.KubernetesClient, ersNamespace, ersSecretWithEnvName) - if daemonSetError != nil { - logrus.Errorf("Error while deleting daemonSet with secret as env var source %v", daemonSetError) - } - - // Deleting StatefulSet with configmap - statefulSetError := testutil.DeleteStatefulSet(clients.KubernetesClient, ersNamespace, ersConfigmapName) - if statefulSetError != nil { - logrus.Errorf("Error while deleting statefulSet with configmap %v", statefulSetError) - } - - // Deleting Deployment with secret - statefulSetError = testutil.DeleteStatefulSet(clients.KubernetesClient, ersNamespace, ersSecretName) - if statefulSetError != nil { - logrus.Errorf("Error while deleting statefulSet with secret %v", statefulSetError) - } - - // Deleting StatefulSet with configmap in projected volume - statefulSetError = testutil.DeleteStatefulSet(clients.KubernetesClient, ersNamespace, ersProjectedConfigMapName) - if statefulSetError != nil { - logrus.Errorf("Error while deleting statefulSet with configmap %v", statefulSetError) - } - - // Deleting Deployment with secret in projected volume - statefulSetError = testutil.DeleteStatefulSet(clients.KubernetesClient, ersNamespace, ersProjectedSecretName) - if statefulSetError != nil { - logrus.Errorf("Error while deleting statefulSet with secret %v", statefulSetError) - } - - // Deleting StatefulSet with configmap as env var source - statefulSetError = testutil.DeleteStatefulSet(clients.KubernetesClient, ersNamespace, ersConfigmapWithEnvName) - if statefulSetError != nil { - logrus.Errorf("Error while deleting statefulSet with configmap as env var source %v", statefulSetError) - } - - // Deleting Deployment with secret as env var source - statefulSetError = testutil.DeleteStatefulSet(clients.KubernetesClient, ersNamespace, ersSecretWithEnvName) - if statefulSetError != nil { - logrus.Errorf("Error while deleting statefulSet with secret as env var source %v", statefulSetError) - } - - // Deleting Deployment for testing pausing deployments - deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, ersNamespace, ersConfigmapWithPausedDeployment) - if deploymentError != nil { - logrus.Errorf("Error while deleting deployment with configmap %v", deploymentError) - } - - // Deleting Configmap - err := testutil.DeleteConfigMap(clients.KubernetesClient, ersNamespace, ersConfigmapName) - if err != nil { - logrus.Errorf("Error while deleting the configmap %v", err) - } - - // Deleting Secret - err = testutil.DeleteSecret(clients.KubernetesClient, ersNamespace, ersSecretName) - if err != nil { - logrus.Errorf("Error while deleting the secret %v", err) - } - - // Deleting configmap used in projected volume - err = testutil.DeleteConfigMap(clients.KubernetesClient, ersNamespace, ersProjectedConfigMapName) - if err != nil { - logrus.Errorf("Error while deleting the configmap %v", err) - } - - // Deleting Secret used in projected volume - err = testutil.DeleteSecret(clients.KubernetesClient, ersNamespace, ersProjectedSecretName) - if err != nil { - logrus.Errorf("Error while deleting the secret %v", err) - } - - // Deleting configmap used in projected volume in init containers - err = testutil.DeleteConfigMap(clients.KubernetesClient, ersNamespace, ersProjectedConfigMapWithInitContainer) - if err != nil { - logrus.Errorf("Error while deleting the configmap %v", err) - } - - // Deleting secret used in projected volume in init containers - err = testutil.DeleteSecret(clients.KubernetesClient, ersNamespace, ersProjectedSecretWithInitContainer) - if err != nil { - logrus.Errorf("Error while deleting the secret %v", err) - } - - // Deleting Configmap used as env var source - err = testutil.DeleteConfigMap(clients.KubernetesClient, ersNamespace, ersConfigmapWithEnvName) - if err != nil { - logrus.Errorf("Error while deleting the configmap used as env var source %v", err) - } - - // Deleting Secret used as env var source - err = testutil.DeleteSecret(clients.KubernetesClient, ersNamespace, ersSecretWithEnvName) - if err != nil { - logrus.Errorf("Error while deleting the secret used as env var source %v", err) - } - - // Deleting Configmap used in init container - err = testutil.DeleteConfigMap(clients.KubernetesClient, ersNamespace, ersConfigmapWithInitContainer) - if err != nil { - logrus.Errorf("Error while deleting the configmap used in init container %v", err) - } - - // Deleting Secret used in init container - err = testutil.DeleteSecret(clients.KubernetesClient, ersNamespace, ersSecretWithInitContainer) - if err != nil { - logrus.Errorf("Error while deleting the secret used in init container %v", err) - } - - // Deleting Configmap used as env var source - err = testutil.DeleteConfigMap(clients.KubernetesClient, ersNamespace, ersConfigmapWithEnvFromName) - if err != nil { - logrus.Errorf("Error while deleting the configmap used as env var source %v", err) - } - - // Deleting Secret used as env var source - err = testutil.DeleteSecret(clients.KubernetesClient, ersNamespace, ersSecretWithEnvFromName) - if err != nil { - logrus.Errorf("Error while deleting the secret used as env var source %v", err) - } - - // Deleting Configmap used as env var source - err = testutil.DeleteConfigMap(clients.KubernetesClient, ersNamespace, ersConfigmapWithInitEnv) - if err != nil { - logrus.Errorf("Error while deleting the configmap used as env var source in init container %v", err) - } - - // Deleting Secret used as env var source - err = testutil.DeleteSecret(clients.KubernetesClient, ersNamespace, ersSecretWithInitEnv) - if err != nil { - logrus.Errorf("Error while deleting the secret used as env var source in init container %v", err) - } - - err = testutil.DeleteConfigMap(clients.KubernetesClient, ersNamespace, ersConfigmapWithPodAnnotations) - if err != nil { - logrus.Errorf("Error while deleting the configmap used with pod annotations: %v", err) - } - - // Deleting Secret used with secret auto annotation - err = testutil.DeleteSecret(clients.KubernetesClient, ersNamespace, ersSecretWithSecretAutoAnnotation) - if err != nil { - logrus.Errorf("Error while deleting the secret used with secret auto annotation: %v", err) - } - - // Deleting ConfigMap used with configmap auto annotation - err = testutil.DeleteConfigMap(clients.KubernetesClient, ersNamespace, ersConfigmapWithConfigMapAutoAnnotation) - if err != nil { - logrus.Errorf("Error while deleting the configmap used with configmap auto annotation: %v", err) - } - - // Deleting Secret used with secret exclude annotation - err = testutil.DeleteSecret(clients.KubernetesClient, ersNamespace, ersSecretWithSecretExcludeAnnotation) - if err != nil { - logrus.Errorf("Error while deleting the secret used with secret exclude annotation: %v", err) - } - - // Deleting ConfigMap used with configmap exclude annotation - err = testutil.DeleteConfigMap(clients.KubernetesClient, ersNamespace, ersConfigmapWithConfigMapExcludeAnnotation) - if err != nil { - logrus.Errorf("Error while deleting the configmap used with configmap exclude annotation: %v", err) - } - - // Deleting ConfigMap for testing pausing deployments - err = testutil.DeleteConfigMap(clients.KubernetesClient, ersNamespace, ersConfigmapWithPausedDeployment) - if err != nil { - logrus.Errorf("Error while deleting the configmap: %v", err) - } - - // Deleting namespace - testutil.DeleteNamespace(ersNamespace, clients.KubernetesClient) - -} - -func getConfigWithAnnotations(resourceType string, name string, shaData string, annotation string, typedAutoAnnotation string) common.Config { - ns := ersNamespace - if options.ReloadStrategy == constants.AnnotationsReloadStrategy { - ns = arsNamespace - } - - return common.Config{ - Namespace: ns, - ResourceName: name, - SHAValue: shaData, - Annotation: annotation, - TypedAutoAnnotation: typedAutoAnnotation, - Type: resourceType, - } -} - -func getCollectors() metrics.Collectors { - return metrics.NewCollectors() -} - -var labelSucceeded = prometheus.Labels{"success": "true"} -var labelFailed = prometheus.Labels{"success": "false"} - -func testRollingUpgradeInvokeDeleteStrategyArs(t *testing.T, clients kube.Clients, config common.Config, upgradeFuncs callbacks.RollingUpgradeFuncs, collectors metrics.Collectors, envVarPostfix string) { - err := PerformAction(clients, config, upgradeFuncs, collectors, nil, invokeDeleteStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for %s with %s", upgradeFuncs.ResourceType, envVarPostfix) - } - - config.SHAValue = testutil.GetSHAfromEmptyData() - removed := testutil.VerifyResourceAnnotationUpdate(clients, config, upgradeFuncs) - if !removed { - t.Errorf("%s was not updated", upgradeFuncs.ResourceType) - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 2 { - t.Errorf("Counter was not increased") - } -} - -func testRollingUpgradeWithPatchAndInvokeDeleteStrategyArs(t *testing.T, clients kube.Clients, config common.Config, upgradeFuncs callbacks.RollingUpgradeFuncs, collectors metrics.Collectors, envVarPostfix string) { - err := PerformAction(clients, config, upgradeFuncs, collectors, nil, invokeDeleteStrategy) - upgradeFuncs.PatchFunc = func(client kube.Clients, namespace string, resource runtime.Object, patchType patchtypes.PatchType, bytes []byte) error { - assert.Equal(t, patchtypes.StrategicMergePatchType, patchType) - assert.NotEmpty(t, bytes) - return nil - } - upgradeFuncs.UpdateFunc = func(kube.Clients, string, runtime.Object) error { - t.Errorf("Update should not be called") - return nil - } - if err != nil { - t.Errorf("Rolling upgrade failed for %s with %s", upgradeFuncs.ResourceType, envVarPostfix) - } -} - -func TestRollingUpgradeForDeploymentWithConfigmapUsingArs(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - envVarPostfix := constants.ConfigmapEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, arsNamespace, arsConfigmapName, "www.stakater.com") - config := getConfigWithAnnotations(envVarPostfix, arsConfigmapName, shaData, options.ConfigmapUpdateOnChangeAnnotation, options.ConfigmapReloaderAutoAnnotation) - deploymentFuncs := GetDeploymentRollingUpgradeFuncs() - collectors := getCollectors() - - itemCalled := 0 - itemsCalled := 0 - - deploymentFuncs.ItemFunc = func(client kube.Clients, namespace string, name string) (runtime.Object, error) { - itemCalled++ - return callbacks.GetDeploymentItem(client, namespace, name) - } - deploymentFuncs.ItemsFunc = func(client kube.Clients, namespace string) []runtime.Object { - itemsCalled++ - return callbacks.GetDeploymentItems(client, namespace) - } - - err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for Deployment with Configmap") - } - - logrus.Infof("Verifying deployment update") - updated := testutil.VerifyResourceAnnotationUpdate(clients, config, deploymentFuncs) - if !updated { - t.Errorf("Deployment was not updated") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": arsNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } - - assert.Equal(t, 0, itemCalled, "ItemFunc should not be called") - assert.Equal(t, 2, itemsCalled, "ItemsFunc should be called twice") - - testRollingUpgradeInvokeDeleteStrategyArs(t, clients, config, deploymentFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForDeploymentWithPatchAndRetryUsingArs(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - envVarPostfix := constants.ConfigmapEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, arsNamespace, arsConfigmapName, "www.stakater.com") - config := getConfigWithAnnotations(envVarPostfix, arsConfigmapName, shaData, options.ConfigmapUpdateOnChangeAnnotation, options.ConfigmapReloaderAutoAnnotation) - deploymentFuncs := GetDeploymentRollingUpgradeFuncs() - - assert.True(t, deploymentFuncs.SupportsPatch) - assert.NotEmpty(t, deploymentFuncs.PatchTemplatesFunc().AnnotationTemplate) - - itemCalled := 0 - itemsCalled := 0 - - deploymentFuncs.ItemFunc = func(client kube.Clients, namespace string, name string) (runtime.Object, error) { - itemCalled++ - return callbacks.GetDeploymentItem(client, namespace, name) - } - deploymentFuncs.ItemsFunc = func(client kube.Clients, namespace string) []runtime.Object { - itemsCalled++ - return callbacks.GetDeploymentItems(client, namespace) - } - - patchCalled := 0 - deploymentFuncs.PatchFunc = func(client kube.Clients, namespace string, resource runtime.Object, patchType patchtypes.PatchType, bytes []byte) error { - patchCalled++ - if patchCalled < 2 { - return &errors.StatusError{ErrStatus: metav1.Status{Reason: metav1.StatusReasonConflict}} // simulate conflict - } - assert.Equal(t, patchtypes.StrategicMergePatchType, patchType) - assert.NotEmpty(t, bytes) - assert.Contains(t, string(bytes), `{"spec":{"template":{"metadata":{"annotations":{"reloader.stakater.com/last-reloaded-from":`) - assert.Contains(t, string(bytes), `\"hash\":\"3c9a892aeaedc759abc3df9884a37b8be5680382\"`) - return nil - } - - deploymentFuncs.UpdateFunc = func(kube.Clients, string, runtime.Object) error { - t.Errorf("Update should not be called") - return nil - } - - collectors := getCollectors() - err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - if err != nil { - t.Errorf("Rolling upgrade failed for Deployment with Configmap") - } - - assert.Equal(t, 1, itemCalled, "ItemFunc should be called once") - assert.Equal(t, 1, itemsCalled, "ItemsFunc should be called once") - assert.Equal(t, 2, patchCalled, "PatchFunc should be called twice") - - deploymentFuncs = GetDeploymentRollingUpgradeFuncs() - testRollingUpgradeWithPatchAndInvokeDeleteStrategyArs(t, clients, config, deploymentFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForDeploymentWithConfigmapWithoutReloadAnnotationAndWithoutAutoReloadAllNoTriggersUsingArs(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - envVarPostfix := constants.ConfigmapEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, arsNamespace, arsConfigMapWithNonAnnotatedDeployment, "www.stakater.com") - config := getConfigWithAnnotations(envVarPostfix, arsConfigMapWithNonAnnotatedDeployment, shaData, options.ConfigmapUpdateOnChangeAnnotation, options.ConfigmapReloaderAutoAnnotation) - deploymentFuncs := GetDeploymentRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for Deployment with Configmap") - } - - logrus.Infof("Verifying deployment update") - updated := testutil.VerifyResourceAnnotationUpdate(clients, config, deploymentFuncs) - if updated { - t.Errorf("Deployment was updated") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) > 0 { - t.Errorf("Counter was increased unexpectedly") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": arsNamespace})) > 0 { - t.Errorf("Counter by namespace was increased unexpectedly") - } -} - -func TestRollingUpgradeForDeploymentWithConfigmapWithoutReloadAnnotationButWithAutoReloadAllUsingArs(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - options.AutoReloadAll = true - defer func() { options.AutoReloadAll = false }() - envVarPostfix := constants.ConfigmapEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, arsNamespace, arsConfigMapWithNonAnnotatedDeployment, "www.stakater.com") - config := getConfigWithAnnotations(envVarPostfix, arsConfigMapWithNonAnnotatedDeployment, shaData, "", options.ConfigmapReloaderAutoAnnotation) - deploymentFuncs := GetDeploymentRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for Deployment with Configmap") - } - - logrus.Infof("Verifying deployment update") - updated := testutil.VerifyResourceAnnotationUpdate(clients, config, deploymentFuncs) - if !updated { - t.Errorf("Deployment was not updated") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": arsNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } - - testRollingUpgradeInvokeDeleteStrategyArs(t, clients, config, deploymentFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForDeploymentWithConfigmapInProjectedVolumeUsingArs(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - envVarPostfix := constants.ConfigmapEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, arsNamespace, arsProjectedConfigMapName, "www.stakater.com") - config := getConfigWithAnnotations(envVarPostfix, arsProjectedConfigMapName, shaData, options.ConfigmapUpdateOnChangeAnnotation, options.ConfigmapReloaderAutoAnnotation) - deploymentFuncs := GetDeploymentRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - if err != nil { - t.Errorf("Rolling upgrade failed for Deployment with Configmap in projected volume") - } - - logrus.Infof("Verifying deployment update") - updated := testutil.VerifyResourceAnnotationUpdate(clients, config, deploymentFuncs) - if !updated { - t.Errorf("Deployment was not updated") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": arsNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } - - testRollingUpgradeInvokeDeleteStrategyArs(t, clients, config, deploymentFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForDeploymentWithConfigmapViaSearchAnnotationUsingArs(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - envVarPostfix := constants.ConfigmapEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, arsNamespace, arsConfigmapAnnotated, "www.stakater.com") - config := getConfigWithAnnotations(envVarPostfix, arsConfigmapAnnotated, shaData, "", options.ConfigmapReloaderAutoAnnotation) - config.ResourceAnnotations = map[string]string{"reloader.stakater.com/match": "true"} - deploymentFuncs := GetDeploymentRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - if err != nil { - t.Errorf("Rolling upgrade failed for Deployment with Configmap") - } - - logrus.Infof("Verifying deployment update") - updated := testutil.VerifyResourceAnnotationUpdate(clients, config, deploymentFuncs) - if !updated { - t.Errorf("Deployment was not updated") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": arsNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } - - testRollingUpgradeInvokeDeleteStrategyArs(t, clients, config, deploymentFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForDeploymentWithConfigmapViaSearchAnnotationNoTriggersUsingArs(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - envVarPostfix := constants.ConfigmapEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, arsNamespace, arsConfigmapAnnotated, "www.stakater.com") - config := getConfigWithAnnotations(envVarPostfix, arsConfigmapAnnotated, shaData, "", options.ConfigmapReloaderAutoAnnotation) - config.ResourceAnnotations = map[string]string{"reloader.stakater.com/match": "false"} - deploymentFuncs := GetDeploymentRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - if err != nil { - t.Errorf("Rolling upgrade failed for Deployment with Configmap") - } - - logrus.Infof("Verifying deployment update") - updated := testutil.VerifyResourceAnnotationUpdate(clients, config, deploymentFuncs) - time.Sleep(5 * time.Second) - if updated { - t.Errorf("Deployment was updated unexpectedly") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) > 0 { - t.Errorf("Counter was increased unexpectedly") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": arsNamespace})) > 0 { - t.Errorf("Counter by namespace was increased unexpectedly") - } -} - -func TestRollingUpgradeForDeploymentWithConfigmapViaSearchAnnotationNotMappedUsingArs(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - envVarPostfix := constants.ConfigmapEnvVarPostfix - - deployment, err := testutil.CreateDeploymentWithEnvVarSourceAndAnnotations( - clients.KubernetesClient, - arsConfigmapAnnotated+"-different", - arsNamespace, - map[string]string{"reloader.stakater.com/search": "true"}, - ) - if err != nil { - t.Errorf("Failed to create deployment with search annotation.") - } - defer func() { - _ = clients.KubernetesClient.AppsV1().Deployments(arsNamespace).Delete(context.TODO(), deployment.Name, metav1.DeleteOptions{}) - }() - // defer clients.KubernetesClient.AppsV1().Deployments(namespace).Delete(deployment.Name, &v1.DeleteOptions{}) - - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, arsNamespace, arsConfigmapAnnotated, "www.stakater.com") - config := getConfigWithAnnotations(envVarPostfix, arsConfigmapAnnotated, shaData, "", options.ConfigmapReloaderAutoAnnotation) - config.ResourceAnnotations = map[string]string{"reloader.stakater.com/match": "false"} - deploymentFuncs := GetDeploymentRollingUpgradeFuncs() - collectors := getCollectors() - - err = PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - if err != nil { - t.Errorf("Rolling upgrade failed for Deployment with Configmap") - } - - logrus.Infof("Verifying deployment update") - updated := testutil.VerifyResourceAnnotationUpdate(clients, config, deploymentFuncs) - if updated { - t.Errorf("Deployment was updated unexpectedly") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) > 0 { - t.Errorf("Counter was increased unexpectedly") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": arsNamespace})) > 0 { - t.Errorf("Counter by namespace was increased unexpectedly") - } -} - -func TestRollingUpgradeForDeploymentWithConfigmapInInitContainerUsingArs(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - envVarPostfix := constants.ConfigmapEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, arsNamespace, arsConfigmapWithInitContainer, "www.stakater.com") - config := getConfigWithAnnotations(envVarPostfix, arsConfigmapWithInitContainer, shaData, options.ConfigmapUpdateOnChangeAnnotation, options.ConfigmapReloaderAutoAnnotation) - deploymentFuncs := GetDeploymentRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for Deployment with Configmap") - } - - logrus.Infof("Verifying deployment update") - updated := testutil.VerifyResourceAnnotationUpdate(clients, config, deploymentFuncs) - if !updated { - t.Errorf("Deployment was not updated") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": arsNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } - - testRollingUpgradeInvokeDeleteStrategyArs(t, clients, config, deploymentFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForDeploymentWithConfigmapInProjectVolumeInInitContainerUsingArs(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - envVarPostfix := constants.ConfigmapEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, arsNamespace, arsProjectedConfigMapWithInitContainer, "www.stakater.com") - config := getConfigWithAnnotations(envVarPostfix, arsProjectedConfigMapWithInitContainer, shaData, options.ConfigmapUpdateOnChangeAnnotation, options.ConfigmapReloaderAutoAnnotation) - deploymentFuncs := GetDeploymentRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for Deployment with Configmap in projected volume") - } - - logrus.Infof("Verifying deployment update") - updated := testutil.VerifyResourceAnnotationUpdate(clients, config, deploymentFuncs) - if !updated { - t.Errorf("Deployment was not updated") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": arsNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } - - testRollingUpgradeInvokeDeleteStrategyArs(t, clients, config, deploymentFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForDeploymentWithConfigmapAsEnvVarUsingArs(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - envVarPostfix := constants.ConfigmapEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, arsNamespace, arsConfigmapWithEnvName, "www.stakater.com") - config := getConfigWithAnnotations(envVarPostfix, arsConfigmapWithEnvName, shaData, options.ReloaderAutoAnnotation, options.ConfigmapReloaderAutoAnnotation) - deploymentFuncs := GetDeploymentRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for Deployment with Configmap used as env var") - } - - logrus.Infof("Verifying deployment update") - updated := testutil.VerifyResourceAnnotationUpdate(clients, config, deploymentFuncs) - if !updated { - t.Errorf("Deployment was not updated") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": arsNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } - - testRollingUpgradeInvokeDeleteStrategyArs(t, clients, config, deploymentFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForDeploymentWithConfigmapAsEnvVarInInitContainerUsingArs(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - envVarPostfix := constants.ConfigmapEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, arsNamespace, arsConfigmapWithInitEnv, "www.stakater.com") - config := getConfigWithAnnotations(envVarPostfix, arsConfigmapWithInitEnv, shaData, options.ReloaderAutoAnnotation, options.ConfigmapReloaderAutoAnnotation) - deploymentFuncs := GetDeploymentRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for Deployment with Configmap used as env var") - } - - logrus.Infof("Verifying deployment update") - updated := testutil.VerifyResourceAnnotationUpdate(clients, config, deploymentFuncs) - if !updated { - t.Errorf("Deployment was not updated") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": arsNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } - - testRollingUpgradeInvokeDeleteStrategyArs(t, clients, config, deploymentFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForDeploymentWithConfigmapAsEnvVarFromUsingArs(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - envVarPostfix := constants.ConfigmapEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, arsNamespace, arsConfigmapWithEnvFromName, "www.stakater.com") - config := getConfigWithAnnotations(envVarPostfix, arsConfigmapWithEnvFromName, shaData, options.ReloaderAutoAnnotation, options.ConfigmapReloaderAutoAnnotation) - deploymentFuncs := GetDeploymentRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for Deployment with Configmap used as env var") - } - - logrus.Infof("Verifying deployment update") - updated := testutil.VerifyResourceAnnotationUpdate(clients, config, deploymentFuncs) - if !updated { - t.Errorf("Deployment was not updated") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": arsNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } - - testRollingUpgradeInvokeDeleteStrategyArs(t, clients, config, deploymentFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForDeploymentWithSecretUsingArs(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - envVarPostfix := constants.SecretEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, arsNamespace, arsSecretName, "dGVzdFVwZGF0ZWRTZWNyZXRFbmNvZGluZ0ZvclJlbG9hZGVy") - config := getConfigWithAnnotations(envVarPostfix, arsSecretName, shaData, options.SecretUpdateOnChangeAnnotation, options.SecretReloaderAutoAnnotation) - deploymentFuncs := GetDeploymentRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for Deployment with Secret") - } - - logrus.Infof("Verifying deployment update") - updated := testutil.VerifyResourceAnnotationUpdate(clients, config, deploymentFuncs) - if !updated { - t.Errorf("Deployment was not updated") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": arsNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } - - testRollingUpgradeInvokeDeleteStrategyArs(t, clients, config, deploymentFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForDeploymentWithSecretInProjectedVolumeUsingArs(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - envVarPostfix := constants.SecretEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, arsNamespace, arsProjectedSecretName, "dGVzdFVwZGF0ZWRTZWNyZXRFbmNvZGluZ0ZvclJlbG9hZGVy") - config := getConfigWithAnnotations(envVarPostfix, arsProjectedSecretName, shaData, options.SecretUpdateOnChangeAnnotation, options.SecretReloaderAutoAnnotation) - deploymentFuncs := GetDeploymentRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for Deployment with Secret in projected volume") - } - - logrus.Infof("Verifying deployment update") - updated := testutil.VerifyResourceAnnotationUpdate(clients, config, deploymentFuncs) - if !updated { - t.Errorf("Deployment was not updated") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": arsNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } - - testRollingUpgradeInvokeDeleteStrategyArs(t, clients, config, deploymentFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForDeploymentWithSecretinInitContainerUsingArs(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - envVarPostfix := constants.SecretEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, arsNamespace, arsSecretWithInitContainer, "dGVzdFVwZGF0ZWRTZWNyZXRFbmNvZGluZ0ZvclJlbG9hZGVy") - config := getConfigWithAnnotations(envVarPostfix, arsSecretWithInitContainer, shaData, options.SecretUpdateOnChangeAnnotation, options.SecretReloaderAutoAnnotation) - deploymentFuncs := GetDeploymentRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for Deployment with Secret") - } - - logrus.Infof("Verifying deployment update") - updated := testutil.VerifyResourceAnnotationUpdate(clients, config, deploymentFuncs) - if !updated { - t.Errorf("Deployment was not updated") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": arsNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } - - testRollingUpgradeInvokeDeleteStrategyArs(t, clients, config, deploymentFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForDeploymentWithSecretInProjectedVolumeinInitContainerUsingArs(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - envVarPostfix := constants.SecretEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, arsNamespace, arsProjectedSecretWithInitContainer, "dGVzdFVwZGF0ZWRTZWNyZXRFbmNvZGluZ0ZvclJlbG9hZGVy") - config := getConfigWithAnnotations(envVarPostfix, arsProjectedSecretWithInitContainer, shaData, options.SecretUpdateOnChangeAnnotation, options.SecretReloaderAutoAnnotation) - deploymentFuncs := GetDeploymentRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for Deployment with Secret in projected volume") - } - - logrus.Infof("Verifying deployment update") - updated := testutil.VerifyResourceAnnotationUpdate(clients, config, deploymentFuncs) - if !updated { - t.Errorf("Deployment was not updated") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": arsNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } - - testRollingUpgradeInvokeDeleteStrategyArs(t, clients, config, deploymentFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForDeploymentWithSecretAsEnvVarUsingArs(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - envVarPostfix := constants.SecretEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, arsNamespace, arsSecretWithEnvName, "dGVzdFVwZGF0ZWRTZWNyZXRFbmNvZGluZ0ZvclJlbG9hZGVy") - config := getConfigWithAnnotations(envVarPostfix, arsSecretWithEnvName, shaData, options.ReloaderAutoAnnotation, options.SecretReloaderAutoAnnotation) - deploymentFuncs := GetDeploymentRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for Deployment with Secret") - } - - logrus.Infof("Verifying deployment update") - updated := testutil.VerifyResourceAnnotationUpdate(clients, config, deploymentFuncs) - if !updated { - t.Errorf("Deployment was not updated") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": arsNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } - - testRollingUpgradeInvokeDeleteStrategyArs(t, clients, config, deploymentFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForDeploymentWithSecretAsEnvVarFromUsingArs(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - envVarPostfix := constants.SecretEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, arsNamespace, arsSecretWithEnvFromName, "dGVzdFVwZGF0ZWRTZWNyZXRFbmNvZGluZ0ZvclJlbG9hZGVy") - config := getConfigWithAnnotations(envVarPostfix, arsSecretWithEnvFromName, shaData, options.ReloaderAutoAnnotation, options.SecretReloaderAutoAnnotation) - deploymentFuncs := GetDeploymentRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for Deployment with Secret") - } - - logrus.Infof("Verifying deployment update") - updated := testutil.VerifyResourceAnnotationUpdate(clients, config, deploymentFuncs) - if !updated { - t.Errorf("Deployment was not updated") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": arsNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } - testRollingUpgradeInvokeDeleteStrategyArs(t, clients, config, deploymentFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForDeploymentWithSecretAsEnvVarInInitContainerUsingArs(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - envVarPostfix := constants.SecretEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, arsNamespace, arsSecretWithInitEnv, "dGVzdFVwZGF0ZWRTZWNyZXRFbmNvZGluZ0ZvclJlbG9hZGVy") - config := getConfigWithAnnotations(envVarPostfix, arsSecretWithInitEnv, shaData, options.ReloaderAutoAnnotation, options.SecretReloaderAutoAnnotation) - deploymentFuncs := GetDeploymentRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for Deployment with Secret") - } - - logrus.Infof("Verifying deployment update") - updated := testutil.VerifyResourceAnnotationUpdate(clients, config, deploymentFuncs) - if !updated { - t.Errorf("Deployment was not updated") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": arsNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } - - testRollingUpgradeInvokeDeleteStrategyArs(t, clients, config, deploymentFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForDeploymentWithSecretExcludeAnnotationUsingArs(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - envVarPostfix := constants.SecretEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, arsNamespace, arsSecretWithExcludeSecretAnnotation, "dGVzdFVwZGF0ZWRTZWNyZXRFbmNvZGluZ0ZvclJlbG9hZGVy") - config := getConfigWithAnnotations(envVarPostfix, arsSecretWithExcludeSecretAnnotation, shaData, "", options.SecretReloaderAutoAnnotation) - deploymentFuncs := GetDeploymentRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - if err != nil { - t.Errorf("Rolling upgrade failed for Deployment with Secret") - } - - logrus.Infof("Verifying deployment did not update") - updated := testutil.VerifyResourceAnnotationUpdate(clients, config, deploymentFuncs) - if updated { - t.Errorf("Deployment which had to be excluded was updated") - } -} - -func TestRollingUpgradeForDeploymentWithSecretAutoAnnotationUsingArs(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - envVarPostfix := constants.SecretEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, arsNamespace, arsSecretWithSecretAutoAnnotation, "dGVzdFVwZGF0ZWRTZWNyZXRFbmNvZGluZ0ZvclJlbG9hZGVy") - config := getConfigWithAnnotations(envVarPostfix, arsSecretWithSecretAutoAnnotation, shaData, "", options.SecretReloaderAutoAnnotation) - deploymentFuncs := GetDeploymentRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for Deployment with Secret") - } - - logrus.Infof("Verifying deployment update") - updated := testutil.VerifyResourceAnnotationUpdate(clients, config, deploymentFuncs) - if !updated { - t.Errorf("Deployment was not updated") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": arsNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } - - testRollingUpgradeInvokeDeleteStrategyArs(t, clients, config, deploymentFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForDeploymentWithExcludeConfigMapAnnotationUsingArs(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - envVarPostfix := constants.ConfigmapEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, arsNamespace, arsConfigmapWithExcludeConfigMapAnnotation, "www.facebook.com") - config := getConfigWithAnnotations(envVarPostfix, arsConfigmapWithExcludeConfigMapAnnotation, shaData, "", options.ConfigmapReloaderAutoAnnotation) - deploymentFuncs := GetDeploymentRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - if err != nil { - t.Errorf("Rolling upgrade failed for Deployment with exclude ConfigMap") - } - - logrus.Infof("Verifying deployment did update") - updated := testutil.VerifyResourceAnnotationUpdate(clients, config, deploymentFuncs) - if updated { - t.Errorf("Deployment which had to be excluded was updated") - } -} - -func TestRollingUpgradeForDeploymentWithConfigMapAutoAnnotationUsingArs(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - envVarPostfix := constants.ConfigmapEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, arsNamespace, arsConfigmapWithConfigMapAutoAnnotation, "www.facebook.com") - config := getConfigWithAnnotations(envVarPostfix, arsConfigmapWithConfigMapAutoAnnotation, shaData, "", options.ConfigmapReloaderAutoAnnotation) - deploymentFuncs := GetDeploymentRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for Deployment with ConfigMap") - } - - logrus.Infof("Verifying deployment update") - updated := testutil.VerifyResourceAnnotationUpdate(clients, config, deploymentFuncs) - if !updated { - t.Errorf("Deployment was not updated") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": arsNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } - - testRollingUpgradeInvokeDeleteStrategyArs(t, clients, config, deploymentFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForDaemonSetWithConfigmapUsingArs(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - envVarPostfix := constants.ConfigmapEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, arsNamespace, arsConfigmapName, "www.facebook.com") - config := getConfigWithAnnotations(envVarPostfix, arsConfigmapName, shaData, options.ConfigmapUpdateOnChangeAnnotation, options.ConfigmapReloaderAutoAnnotation) - daemonSetFuncs := GetDaemonSetRollingUpgradeFuncs() - collectors := getCollectors() - - itemCalled := 0 - itemsCalled := 0 - - daemonSetFuncs.ItemFunc = func(client kube.Clients, namespace string, name string) (runtime.Object, error) { - itemCalled++ - return callbacks.GetDaemonSetItem(client, namespace, name) - } - daemonSetFuncs.ItemsFunc = func(client kube.Clients, namespace string) []runtime.Object { - itemsCalled++ - return callbacks.GetDaemonSetItems(client, namespace) - } - - err := PerformAction(clients, config, daemonSetFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for DaemonSet with configmap") - } - - logrus.Infof("Verifying daemonSet update") - updated := testutil.VerifyResourceAnnotationUpdate(clients, config, daemonSetFuncs) - if !updated { - t.Errorf("DaemonSet was not updated") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": arsNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } - - assert.Equal(t, 0, itemCalled, "ItemFunc should not be called") - assert.Equal(t, 2, itemsCalled, "ItemsFunc should be called twice") - - testRollingUpgradeInvokeDeleteStrategyArs(t, clients, config, daemonSetFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForDaemonSetWithPatchAndRetryUsingArs(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - envVarPostfix := constants.ConfigmapEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, arsNamespace, arsConfigmapName, "www.facebook.com") - config := getConfigWithAnnotations(envVarPostfix, arsConfigmapName, shaData, options.ConfigmapUpdateOnChangeAnnotation, options.ConfigmapReloaderAutoAnnotation) - daemonSetFuncs := GetDaemonSetRollingUpgradeFuncs() - - itemCalled := 0 - itemsCalled := 0 - - daemonSetFuncs.ItemFunc = func(client kube.Clients, namespace string, name string) (runtime.Object, error) { - itemCalled++ - return callbacks.GetDaemonSetItem(client, namespace, name) - } - daemonSetFuncs.ItemsFunc = func(client kube.Clients, namespace string) []runtime.Object { - itemsCalled++ - return callbacks.GetDaemonSetItems(client, namespace) - } - - assert.True(t, daemonSetFuncs.SupportsPatch) - assert.NotEmpty(t, daemonSetFuncs.PatchTemplatesFunc().AnnotationTemplate) - - patchCalled := 0 - daemonSetFuncs.PatchFunc = func(client kube.Clients, namespace string, resource runtime.Object, patchType patchtypes.PatchType, bytes []byte) error { - patchCalled++ - if patchCalled < 2 { - return &errors.StatusError{ErrStatus: metav1.Status{Reason: metav1.StatusReasonConflict}} // simulate conflict - } - assert.Equal(t, patchtypes.StrategicMergePatchType, patchType) - assert.NotEmpty(t, bytes) - assert.Contains(t, string(bytes), `{"spec":{"template":{"metadata":{"annotations":{"reloader.stakater.com/last-reloaded-from":`) - assert.Contains(t, string(bytes), `\"hash\":\"314a2269170750a974d79f02b5b9ee517de7f280\"`) - return nil - } - - daemonSetFuncs.UpdateFunc = func(kube.Clients, string, runtime.Object) error { - t.Errorf("Update should not be called") - return nil - } - - collectors := getCollectors() - - err := PerformAction(clients, config, daemonSetFuncs, collectors, nil, invokeReloadStrategy) - if err != nil { - t.Errorf("Rolling upgrade failed for DaemonSet with configmap") - } - - assert.Equal(t, 1, itemCalled, "ItemFunc should be called once") - assert.Equal(t, 1, itemsCalled, "ItemsFunc should be called once") - assert.Equal(t, 2, patchCalled, "PatchFunc should be called twice") - - daemonSetFuncs = GetDeploymentRollingUpgradeFuncs() - testRollingUpgradeWithPatchAndInvokeDeleteStrategyArs(t, clients, config, daemonSetFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForDaemonSetWithConfigmapInProjectedVolumeUsingArs(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - envVarPostfix := constants.ConfigmapEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, arsNamespace, arsProjectedConfigMapName, "www.facebook.com") - config := getConfigWithAnnotations(envVarPostfix, arsProjectedConfigMapName, shaData, options.ConfigmapUpdateOnChangeAnnotation, options.ConfigmapReloaderAutoAnnotation) - daemonSetFuncs := GetDaemonSetRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, daemonSetFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for DaemonSet with configmap in projected volume") - } - - logrus.Infof("Verifying daemonSet update") - updated := testutil.VerifyResourceAnnotationUpdate(clients, config, daemonSetFuncs) - if !updated { - t.Errorf("DaemonSet was not updated") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": arsNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } - - testRollingUpgradeInvokeDeleteStrategyArs(t, clients, config, daemonSetFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForDaemonSetWithConfigmapAsEnvVarUsingArs(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - envVarPostfix := constants.ConfigmapEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, arsNamespace, arsConfigmapWithEnvName, "www.facebook.com") - config := getConfigWithAnnotations(envVarPostfix, arsConfigmapWithEnvName, shaData, options.ReloaderAutoAnnotation, options.ConfigmapReloaderAutoAnnotation) - daemonSetFuncs := GetDaemonSetRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, daemonSetFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for DaemonSet with configmap used as env var") - } - - logrus.Infof("Verifying daemonSet update") - updated := testutil.VerifyResourceAnnotationUpdate(clients, config, daemonSetFuncs) - if !updated { - t.Errorf("DaemonSet was not updated") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": arsNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } - - testRollingUpgradeInvokeDeleteStrategyArs(t, clients, config, daemonSetFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForDaemonSetWithSecretUsingArs(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - envVarPostfix := constants.SecretEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, arsNamespace, arsSecretName, "d3d3LmZhY2Vib29rLmNvbQ==") - config := getConfigWithAnnotations(envVarPostfix, arsSecretName, shaData, options.SecretUpdateOnChangeAnnotation, options.SecretReloaderAutoAnnotation) - daemonSetFuncs := GetDaemonSetRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, daemonSetFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for DaemonSet with secret") - } - - logrus.Infof("Verifying daemonSet update") - updated := testutil.VerifyResourceAnnotationUpdate(clients, config, daemonSetFuncs) - if !updated { - t.Errorf("DaemonSet was not updated") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": arsNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } - - testRollingUpgradeInvokeDeleteStrategyArs(t, clients, config, daemonSetFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForDaemonSetWithSecretInProjectedVolumeUsingArs(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - envVarPostfix := constants.SecretEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, arsNamespace, arsProjectedSecretName, "d3d3LmZhY2Vib29rLmNvbQ==") - config := getConfigWithAnnotations(envVarPostfix, arsProjectedSecretName, shaData, options.SecretUpdateOnChangeAnnotation, options.SecretReloaderAutoAnnotation) - daemonSetFuncs := GetDaemonSetRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, daemonSetFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for DaemonSet with secret in projected volume") - } - - logrus.Infof("Verifying daemonSet update") - updated := testutil.VerifyResourceAnnotationUpdate(clients, config, daemonSetFuncs) - if !updated { - t.Errorf("DaemonSet was not updated") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": arsNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } - - testRollingUpgradeInvokeDeleteStrategyArs(t, clients, config, daemonSetFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForStatefulSetWithConfigmapUsingArs(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - envVarPostfix := constants.ConfigmapEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, arsNamespace, arsConfigmapName, "www.twitter.com") - config := getConfigWithAnnotations(envVarPostfix, arsConfigmapName, shaData, options.ConfigmapUpdateOnChangeAnnotation, options.ConfigmapReloaderAutoAnnotation) - statefulSetFuncs := GetStatefulSetRollingUpgradeFuncs() - collectors := getCollectors() - - itemCalled := 0 - itemsCalled := 0 - - statefulSetFuncs.ItemFunc = func(client kube.Clients, namespace string, name string) (runtime.Object, error) { - itemCalled++ - return callbacks.GetStatefulSetItem(client, namespace, name) - } - statefulSetFuncs.ItemsFunc = func(client kube.Clients, namespace string) []runtime.Object { - itemsCalled++ - return callbacks.GetStatefulSetItems(client, namespace) - } - - err := PerformAction(clients, config, statefulSetFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for StatefulSet with configmap") - } - - logrus.Infof("Verifying statefulSet update") - updated := testutil.VerifyResourceAnnotationUpdate(clients, config, statefulSetFuncs) - if !updated { - t.Errorf("StatefulSet was not updated") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": arsNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } - - assert.Equal(t, 0, itemCalled, "ItemFunc should not be called") - assert.Equal(t, 2, itemsCalled, "ItemsFunc should be called twice") - - testRollingUpgradeInvokeDeleteStrategyArs(t, clients, config, statefulSetFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForStatefulSetWithPatchAndRetryUsingArs(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - envVarPostfix := constants.ConfigmapEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, arsNamespace, arsConfigmapName, "www.twitter.com") - config := getConfigWithAnnotations(envVarPostfix, arsConfigmapName, shaData, options.ConfigmapUpdateOnChangeAnnotation, options.ConfigmapReloaderAutoAnnotation) - statefulSetFuncs := GetStatefulSetRollingUpgradeFuncs() - - itemCalled := 0 - itemsCalled := 0 - - statefulSetFuncs.ItemFunc = func(client kube.Clients, namespace string, name string) (runtime.Object, error) { - itemCalled++ - return callbacks.GetStatefulSetItem(client, namespace, name) - } - statefulSetFuncs.ItemsFunc = func(client kube.Clients, namespace string) []runtime.Object { - itemsCalled++ - return callbacks.GetStatefulSetItems(client, namespace) - } - - assert.True(t, statefulSetFuncs.SupportsPatch) - assert.NotEmpty(t, statefulSetFuncs.PatchTemplatesFunc().AnnotationTemplate) - - patchCalled := 0 - statefulSetFuncs.PatchFunc = func(client kube.Clients, namespace string, resource runtime.Object, patchType patchtypes.PatchType, bytes []byte) error { - patchCalled++ - if patchCalled < 2 { - return &errors.StatusError{ErrStatus: metav1.Status{Reason: metav1.StatusReasonConflict}} // simulate conflict - } - assert.Equal(t, patchtypes.StrategicMergePatchType, patchType) - assert.NotEmpty(t, bytes) - assert.Contains(t, string(bytes), `{"spec":{"template":{"metadata":{"annotations":{"reloader.stakater.com/last-reloaded-from":`) - assert.Contains(t, string(bytes), `\"hash\":\"f821414d40d8815fb330763f74a4ff7ab651d4fa\"`) - return nil - } - - statefulSetFuncs.UpdateFunc = func(kube.Clients, string, runtime.Object) error { - t.Errorf("Update should not be called") - return nil - } - - collectors := getCollectors() - - err := PerformAction(clients, config, statefulSetFuncs, collectors, nil, invokeReloadStrategy) - if err != nil { - t.Errorf("Rolling upgrade failed for StatefulSet with configmap") - } - - assert.Equal(t, 1, itemCalled, "ItemFunc should be called once") - assert.Equal(t, 1, itemsCalled, "ItemsFunc should be called once") - assert.Equal(t, 2, patchCalled, "PatchFunc should be called twice") - - statefulSetFuncs = GetDeploymentRollingUpgradeFuncs() - testRollingUpgradeWithPatchAndInvokeDeleteStrategyArs(t, clients, config, statefulSetFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForStatefulSetWithConfigmapInProjectedVolumeUsingArs(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - envVarPostfix := constants.ConfigmapEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, arsNamespace, arsProjectedConfigMapName, "www.twitter.com") - config := getConfigWithAnnotations(envVarPostfix, arsProjectedConfigMapName, shaData, options.ConfigmapUpdateOnChangeAnnotation, options.ConfigmapReloaderAutoAnnotation) - statefulSetFuncs := GetStatefulSetRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, statefulSetFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for StatefulSet with configmap in projected volume") - } - - logrus.Infof("Verifying statefulSet update") - updated := testutil.VerifyResourceAnnotationUpdate(clients, config, statefulSetFuncs) - if !updated { - t.Errorf("StatefulSet was not updated") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": arsNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } - - testRollingUpgradeInvokeDeleteStrategyArs(t, clients, config, statefulSetFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForStatefulSetWithSecretUsingArs(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - envVarPostfix := constants.SecretEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, arsNamespace, arsSecretName, "d3d3LnR3aXR0ZXIuY29t") - config := getConfigWithAnnotations(envVarPostfix, arsSecretName, shaData, options.SecretUpdateOnChangeAnnotation, options.SecretReloaderAutoAnnotation) - statefulSetFuncs := GetStatefulSetRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, statefulSetFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for StatefulSet with secret") - } - - logrus.Infof("Verifying statefulSet update") - updated := testutil.VerifyResourceAnnotationUpdate(clients, config, statefulSetFuncs) - if !updated { - t.Errorf("StatefulSet was not updated") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": arsNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } - - testRollingUpgradeInvokeDeleteStrategyArs(t, clients, config, statefulSetFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForStatefulSetWithSecretInProjectedVolumeUsingArs(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - envVarPostfix := constants.SecretEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, arsNamespace, arsProjectedSecretName, "d3d3LnR3aXR0ZXIuY29t") - config := getConfigWithAnnotations(envVarPostfix, arsProjectedSecretName, shaData, options.SecretUpdateOnChangeAnnotation, options.SecretReloaderAutoAnnotation) - statefulSetFuncs := GetStatefulSetRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, statefulSetFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for StatefulSet with secret in projected volume") - } - - logrus.Infof("Verifying statefulSet update") - updated := testutil.VerifyResourceAnnotationUpdate(clients, config, statefulSetFuncs) - if !updated { - t.Errorf("StatefulSet was not updated") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": arsNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } - - testRollingUpgradeInvokeDeleteStrategyArs(t, clients, config, statefulSetFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForDeploymentWithPodAnnotationsUsingArs(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - envVarPostfix := constants.ConfigmapEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, arsNamespace, arsConfigmapWithPodAnnotations, "www.stakater.com") - config := getConfigWithAnnotations(envVarPostfix, arsConfigmapWithPodAnnotations, shaData, options.ConfigmapUpdateOnChangeAnnotation, options.ConfigmapReloaderAutoAnnotation) - deploymentFuncs := GetDeploymentRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for Deployment with pod annotations") - } - - logrus.Infof("Verifying deployment update") - items := deploymentFuncs.ItemsFunc(clients, config.Namespace) - var foundPod, foundBoth bool - for _, i := range items { - accessor, err := meta.Accessor(i) - if err != nil { - t.Errorf("Error getting accessor for item: %v", err) - } - name := accessor.GetName() - if name == arsConfigmapWithPodAnnotations { - annotations := deploymentFuncs.PodAnnotationsFunc(i) - updated := testutil.GetResourceSHAFromAnnotation(annotations) - if updated != config.SHAValue { - t.Errorf("Deployment was not updated") - } - foundPod = true - } - if name == arsConfigmapWithBothAnnotations { - annotations := deploymentFuncs.PodAnnotationsFunc(i) - updated := testutil.GetResourceSHAFromAnnotation(annotations) - if updated == config.SHAValue { - t.Errorf("Deployment was updated") - } - foundBoth = true - } - } - if !foundPod { - t.Errorf("Deployment with pod annotations was not found") - } - if !foundBoth { - t.Errorf("Deployment with both annotations was not found") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": arsNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } -} - -func TestFailedRollingUpgradeUsingArs(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, arsNamespace, arsConfigmapName, "fail.stakater.com") - config := getConfigWithAnnotations(constants.ConfigmapEnvVarPostfix, arsConfigmapName, shaData, options.ConfigmapUpdateOnChangeAnnotation, options.ConfigmapReloaderAutoAnnotation) - deploymentFuncs := GetDeploymentRollingUpgradeFuncs() - deploymentFuncs.UpdateFunc = func(_ kube.Clients, _ string, _ runtime.Object) error { - return fmt.Errorf("error") - } - deploymentFuncs.PatchFunc = func(kube.Clients, string, runtime.Object, patchtypes.PatchType, []byte) error { - return fmt.Errorf("error") - } - collectors := getCollectors() - - _ = PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelFailed)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "false", "namespace": arsNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } -} - -func TestIgnoreAnnotationNoReloadUsingArs(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - envVarPostfix := constants.ConfigmapEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, arsNamespace, arsConfigmapWithIgnoreAnnotation, "www.stakater.com") - config := getConfigWithAnnotations(envVarPostfix, arsConfigmapWithIgnoreAnnotation, shaData, options.ConfigmapUpdateOnChangeAnnotation, options.ConfigmapReloaderAutoAnnotation) - config.ResourceAnnotations = map[string]string{"reloader.stakater.com/ignore": "true"} - deploymentFuncs := GetDeploymentRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - if err != nil { - t.Errorf("Rolling upgrade failed for Deployment with Configmap and ignore annotation using ARS") - } - - // Ensure deployment is NOT updated - updated := testutil.VerifyResourceAnnotationUpdate(clients, config, deploymentFuncs) - if updated { - t.Errorf("Deployment was updated but should not have been") - } - - // Ensure counters remain zero - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 0 { - t.Errorf("Reload counter should not have increased") - } - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": arsNamespace})) != 0 { - t.Errorf("Reload counter by namespace should not have increased") - } -} -func TestIgnoreAnnotationNoReloadUsingErs(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - envVarPostfix := constants.ConfigmapEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, ersNamespace, ersConfigmapWithIgnoreAnnotation, "www.stakater.com") - config := getConfigWithAnnotations(envVarPostfix, ersConfigmapWithIgnoreAnnotation, shaData, options.ConfigmapUpdateOnChangeAnnotation, options.ConfigmapReloaderAutoAnnotation) - config.ResourceAnnotations = map[string]string{"reloader.stakater.com/ignore": "true"} - deploymentFuncs := GetDeploymentRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - if err != nil { - t.Errorf("Rolling upgrade failed for Deployment with Configmap and ignore annotation using ERS") - } - - // Ensure deployment is NOT updated - updated := testutil.VerifyResourceEnvVarUpdate(clients, config, envVarPostfix, deploymentFuncs) - if updated { - t.Errorf("Deployment was updated but should not have been (ERS)") - } - - // Ensure counters remain zero - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 0 { - t.Errorf("Reload counter should not have increased (ERS)") - } - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": ersNamespace})) != 0 { - t.Errorf("Reload counter by namespace should not have increased (ERS)") - } -} - -func testRollingUpgradeInvokeDeleteStrategyErs(t *testing.T, clients kube.Clients, config common.Config, upgradeFuncs callbacks.RollingUpgradeFuncs, collectors metrics.Collectors, envVarPostfix string) { - err := PerformAction(clients, config, upgradeFuncs, collectors, nil, invokeDeleteStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for %s with %s", upgradeFuncs.ResourceType, envVarPostfix) - } - - removed := testutil.VerifyResourceEnvVarRemoved(clients, config, envVarPostfix, upgradeFuncs) - if !removed { - t.Errorf("%s was not updated", upgradeFuncs.ResourceType) - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 2 { - t.Errorf("Counter was not increased") - } -} - -func testRollingUpgradeWithPatchAndInvokeDeleteStrategyErs(t *testing.T, clients kube.Clients, config common.Config, upgradeFuncs callbacks.RollingUpgradeFuncs, collectors metrics.Collectors, envVarPostfix string) { - assert.NotEmpty(t, upgradeFuncs.PatchTemplatesFunc().DeleteEnvVarTemplate) - - err := PerformAction(clients, config, upgradeFuncs, collectors, nil, invokeDeleteStrategy) - upgradeFuncs.PatchFunc = func(client kube.Clients, namespace string, resource runtime.Object, patchType patchtypes.PatchType, bytes []byte) error { - assert.Equal(t, patchtypes.JSONPatchType, patchType) - assert.NotEmpty(t, bytes) - return nil - } - upgradeFuncs.UpdateFunc = func(kube.Clients, string, runtime.Object) error { - t.Errorf("Update should not be called") - return nil - } - if err != nil { - t.Errorf("Rolling upgrade failed for %s with %s", upgradeFuncs.ResourceType, envVarPostfix) - } -} - -func TestRollingUpgradeForDeploymentWithConfigmapUsingErs(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - envVarPostfix := constants.ConfigmapEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, ersNamespace, ersConfigmapName, "www.stakater.com") - config := getConfigWithAnnotations(envVarPostfix, ersConfigmapName, shaData, options.ConfigmapUpdateOnChangeAnnotation, options.ConfigmapReloaderAutoAnnotation) - deploymentFuncs := GetDeploymentRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for %s with %s", deploymentFuncs.ResourceType, envVarPostfix) - } - - logrus.Infof("Verifying deployment update") - updated := testutil.VerifyResourceEnvVarUpdate(clients, config, envVarPostfix, deploymentFuncs) - if !updated { - t.Errorf("Deployment was not updated") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": ersNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } - - testRollingUpgradeInvokeDeleteStrategyErs(t, clients, config, deploymentFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForDeploymentWithPatchAndRetryUsingErs(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - envVarPostfix := constants.ConfigmapEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, ersNamespace, ersConfigmapName, "www.stakater.com") - config := getConfigWithAnnotations(envVarPostfix, ersConfigmapName, shaData, options.ConfigmapUpdateOnChangeAnnotation, options.ConfigmapReloaderAutoAnnotation) - deploymentFuncs := GetDeploymentRollingUpgradeFuncs() - - assert.True(t, deploymentFuncs.SupportsPatch) - assert.NotEmpty(t, deploymentFuncs.PatchTemplatesFunc().EnvVarTemplate) - - patchCalled := 0 - deploymentFuncs.PatchFunc = func(client kube.Clients, namespace string, resource runtime.Object, patchType patchtypes.PatchType, bytes []byte) error { - patchCalled++ - if patchCalled < 2 { - return &errors.StatusError{ErrStatus: metav1.Status{Reason: metav1.StatusReasonConflict}} // simulate conflict - } - assert.Equal(t, patchtypes.StrategicMergePatchType, patchType) - assert.NotEmpty(t, bytes) - assert.Contains(t, string(bytes), `{"spec":{"template":{"spec":{"containers":[{"name":`) - assert.Contains(t, string(bytes), `"value":"3c9a892aeaedc759abc3df9884a37b8be5680382"`) - return nil - } - - deploymentFuncs.UpdateFunc = func(kube.Clients, string, runtime.Object) error { - t.Errorf("Update should not be called") - return nil - } - - collectors := getCollectors() - - err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - if err != nil { - t.Errorf("Rolling upgrade failed for %s with %s", deploymentFuncs.ResourceType, envVarPostfix) - } - - assert.Equal(t, 2, patchCalled) - - deploymentFuncs = GetDeploymentRollingUpgradeFuncs() - testRollingUpgradeWithPatchAndInvokeDeleteStrategyErs(t, clients, config, deploymentFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForDeploymentWithConfigmapInProjectedVolumeUsingErs(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - envVarPostfix := constants.ConfigmapEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, ersNamespace, ersProjectedConfigMapName, "www.stakater.com") - config := getConfigWithAnnotations(envVarPostfix, ersProjectedConfigMapName, shaData, options.ConfigmapUpdateOnChangeAnnotation, options.ConfigmapReloaderAutoAnnotation) - deploymentFuncs := GetDeploymentRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - if err != nil { - t.Errorf("Rolling upgrade failed for Deployment with Configmap in projected volume") - } - - logrus.Infof("Verifying deployment update") - updated := testutil.VerifyResourceEnvVarUpdate(clients, config, envVarPostfix, deploymentFuncs) - if !updated { - t.Errorf("Deployment was not updated") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": ersNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } - - testRollingUpgradeInvokeDeleteStrategyErs(t, clients, config, deploymentFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForDeploymentWithConfigmapViaSearchAnnotationUsingErs(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - envVarPostfix := constants.ConfigmapEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, ersNamespace, ersConfigmapAnnotated, "www.stakater.com") - config := getConfigWithAnnotations(envVarPostfix, ersConfigmapAnnotated, shaData, "", options.ConfigmapReloaderAutoAnnotation) - config.ResourceAnnotations = map[string]string{"reloader.stakater.com/match": "true"} - deploymentFuncs := GetDeploymentRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - if err != nil { - t.Errorf("Rolling upgrade failed for %s with %s", deploymentFuncs.ResourceType, envVarPostfix) - } - - logrus.Infof("Verifying deployment update") - updated := testutil.VerifyResourceEnvVarUpdate(clients, config, envVarPostfix, deploymentFuncs) - if !updated { - t.Errorf("Deployment was not updated") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": ersNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } - - testRollingUpgradeInvokeDeleteStrategyErs(t, clients, config, deploymentFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForDeploymentWithConfigmapViaSearchAnnotationNoTriggersUsingErs(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - envVarPostfix := constants.ConfigmapEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, ersNamespace, ersConfigmapAnnotated, "www.stakater.com") - config := getConfigWithAnnotations(envVarPostfix, ersConfigmapAnnotated, shaData, "", options.ConfigmapReloaderAutoAnnotation) - config.ResourceAnnotations = map[string]string{"reloader.stakater.com/match": "false"} - deploymentFuncs := GetDeploymentRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - if err != nil { - t.Errorf("Rolling upgrade failed for %s with %s", deploymentFuncs.ResourceType, envVarPostfix) - } - - logrus.Infof("Verifying deployment update") - updated := testutil.VerifyResourceEnvVarUpdate(clients, config, envVarPostfix, deploymentFuncs) - time.Sleep(5 * time.Second) - if updated { - t.Errorf("Deployment was updated unexpectedly") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) > 0 { - t.Errorf("Counter was increased unexpectedly") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": ersNamespace})) > 0 { - t.Errorf("Counter by namespace was increased unexpectedly") - } -} - -func TestRollingUpgradeForDeploymentWithConfigmapViaSearchAnnotationNotMappedUsingErs(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - envVarPostfix := constants.ConfigmapEnvVarPostfix - - deployment, err := testutil.CreateDeploymentWithEnvVarSourceAndAnnotations( - clients.KubernetesClient, - ersConfigmapAnnotated+"-different", - ersNamespace, - map[string]string{"reloader.stakater.com/search": "true"}, - ) - if err != nil { - t.Errorf("Failed to create deployment with search annotation.") - } - defer func() { - _ = clients.KubernetesClient.AppsV1().Deployments(ersNamespace).Delete(context.TODO(), deployment.Name, metav1.DeleteOptions{}) - }() - // defer clients.KubernetesClient.AppsV1().Deployments(namespace).Delete(deployment.Name, &v1.DeleteOptions{}) - - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, ersNamespace, ersConfigmapAnnotated, "www.stakater.com") - config := getConfigWithAnnotations(envVarPostfix, ersConfigmapAnnotated, shaData, "", options.ConfigmapReloaderAutoAnnotation) - config.ResourceAnnotations = map[string]string{"reloader.stakater.com/match": "false"} - deploymentFuncs := GetDeploymentRollingUpgradeFuncs() - collectors := getCollectors() - - err = PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - if err != nil { - t.Errorf("Rolling upgrade failed for %s with %s", deploymentFuncs.ResourceType, envVarPostfix) - } - - logrus.Infof("Verifying deployment update") - updated := testutil.VerifyResourceEnvVarUpdate(clients, config, envVarPostfix, deploymentFuncs) - if updated { - t.Errorf("Deployment was updated unexpectedly") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) > 0 { - t.Errorf("Counter was increased unexpectedly") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": ersNamespace})) > 0 { - t.Errorf("Counter by namespace was increased unexpectedly") - } -} - -func TestRollingUpgradeForDeploymentWithConfigmapInInitContainerUsingErs(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - envVarPostfix := constants.ConfigmapEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, ersNamespace, ersConfigmapWithInitContainer, "www.stakater.com") - config := getConfigWithAnnotations(envVarPostfix, ersConfigmapWithInitContainer, shaData, options.ConfigmapUpdateOnChangeAnnotation, options.ConfigmapReloaderAutoAnnotation) - deploymentFuncs := GetDeploymentRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for %s with %s", deploymentFuncs.ResourceType, envVarPostfix) - } - - logrus.Infof("Verifying deployment update") - updated := testutil.VerifyResourceEnvVarUpdate(clients, config, envVarPostfix, deploymentFuncs) - if !updated { - t.Errorf("Deployment was not updated") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": ersNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } - - testRollingUpgradeInvokeDeleteStrategyErs(t, clients, config, deploymentFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForDeploymentWithConfigmapInProjectVolumeInInitContainerUsingErs(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - envVarPostfix := constants.ConfigmapEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, ersNamespace, ersProjectedConfigMapWithInitContainer, "www.stakater.com") - config := getConfigWithAnnotations(envVarPostfix, ersProjectedConfigMapWithInitContainer, shaData, options.ConfigmapUpdateOnChangeAnnotation, options.ConfigmapReloaderAutoAnnotation) - deploymentFuncs := GetDeploymentRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for Deployment with Configmap in projected volume") - } - - logrus.Infof("Verifying deployment update") - updated := testutil.VerifyResourceEnvVarUpdate(clients, config, envVarPostfix, deploymentFuncs) - if !updated { - t.Errorf("Deployment was not updated") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": ersNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } - - testRollingUpgradeInvokeDeleteStrategyErs(t, clients, config, deploymentFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForDeploymentWithConfigmapAsEnvVarUsingErs(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - envVarPostfix := constants.ConfigmapEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, ersNamespace, ersConfigmapWithEnvName, "www.stakater.com") - config := getConfigWithAnnotations(envVarPostfix, ersConfigmapWithEnvName, shaData, options.ReloaderAutoAnnotation, options.ConfigmapReloaderAutoAnnotation) - deploymentFuncs := GetDeploymentRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for Deployment with Configmap used as env var") - } - - logrus.Infof("Verifying deployment update") - updated := testutil.VerifyResourceEnvVarUpdate(clients, config, envVarPostfix, deploymentFuncs) - if !updated { - t.Errorf("Deployment was not updated") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": ersNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } - - testRollingUpgradeInvokeDeleteStrategyErs(t, clients, config, deploymentFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForDeploymentWithConfigmapAsEnvVarInInitContainerUsingErs(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - envVarPostfix := constants.ConfigmapEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, ersNamespace, ersConfigmapWithInitEnv, "www.stakater.com") - config := getConfigWithAnnotations(envVarPostfix, ersConfigmapWithInitEnv, shaData, options.ReloaderAutoAnnotation, options.ConfigmapReloaderAutoAnnotation) - deploymentFuncs := GetDeploymentRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for Deployment with Configmap used as env var") - } - - logrus.Infof("Verifying deployment update") - updated := testutil.VerifyResourceEnvVarUpdate(clients, config, envVarPostfix, deploymentFuncs) - if !updated { - t.Errorf("Deployment was not updated") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": ersNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } - - testRollingUpgradeInvokeDeleteStrategyErs(t, clients, config, deploymentFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForDeploymentWithConfigmapAsEnvVarFromUsingErs(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - envVarPostfix := constants.ConfigmapEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, ersNamespace, ersConfigmapWithEnvFromName, "www.stakater.com") - config := getConfigWithAnnotations(envVarPostfix, ersConfigmapWithEnvFromName, shaData, options.ReloaderAutoAnnotation, options.ConfigmapReloaderAutoAnnotation) - deploymentFuncs := GetDeploymentRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for Deployment with Configmap used as env var") - } - - logrus.Infof("Verifying deployment update") - updated := testutil.VerifyResourceEnvVarUpdate(clients, config, envVarPostfix, deploymentFuncs) - if !updated { - t.Errorf("Deployment was not updated") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": ersNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } - - testRollingUpgradeInvokeDeleteStrategyErs(t, clients, config, deploymentFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForDeploymentWithSecretUsingErs(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - envVarPostfix := constants.SecretEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, ersNamespace, ersSecretName, "dGVzdFVwZGF0ZWRTZWNyZXRFbmNvZGluZ0ZvclJlbG9hZGVy") - config := getConfigWithAnnotations(envVarPostfix, ersSecretName, shaData, options.SecretUpdateOnChangeAnnotation, options.SecretReloaderAutoAnnotation) - deploymentFuncs := GetDeploymentRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for Deployment with Secret") - } - - logrus.Infof("Verifying deployment update") - updated := testutil.VerifyResourceEnvVarUpdate(clients, config, envVarPostfix, deploymentFuncs) - if !updated { - t.Errorf("Deployment was not updated") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": ersNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } - - testRollingUpgradeInvokeDeleteStrategyErs(t, clients, config, deploymentFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForDeploymentWithSecretInProjectedVolumeUsingErs(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - envVarPostfix := constants.SecretEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, ersNamespace, ersProjectedSecretName, "dGVzdFVwZGF0ZWRTZWNyZXRFbmNvZGluZ0ZvclJlbG9hZGVy") - config := getConfigWithAnnotations(envVarPostfix, ersProjectedSecretName, shaData, options.SecretUpdateOnChangeAnnotation, options.SecretReloaderAutoAnnotation) - deploymentFuncs := GetDeploymentRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for Deployment with Secret in projected volume") - } - - logrus.Infof("Verifying deployment update") - updated := testutil.VerifyResourceEnvVarUpdate(clients, config, envVarPostfix, deploymentFuncs) - if !updated { - t.Errorf("Deployment was not updated") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": ersNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } - - testRollingUpgradeInvokeDeleteStrategyErs(t, clients, config, deploymentFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForDeploymentWithSecretinInitContainerUsingErs(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - envVarPostfix := constants.SecretEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, ersNamespace, ersSecretWithInitContainer, "dGVzdFVwZGF0ZWRTZWNyZXRFbmNvZGluZ0ZvclJlbG9hZGVy") - config := getConfigWithAnnotations(envVarPostfix, ersSecretWithInitContainer, shaData, options.SecretUpdateOnChangeAnnotation, options.SecretReloaderAutoAnnotation) - deploymentFuncs := GetDeploymentRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for Deployment with Secret") - } - - logrus.Infof("Verifying deployment update") - updated := testutil.VerifyResourceEnvVarUpdate(clients, config, envVarPostfix, deploymentFuncs) - if !updated { - t.Errorf("Deployment was not updated") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": ersNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } - - testRollingUpgradeInvokeDeleteStrategyErs(t, clients, config, deploymentFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForDeploymentWithSecretInProjectedVolumeinInitContainerUsingErs(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - envVarPostfix := constants.SecretEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, ersNamespace, ersProjectedSecretWithInitContainer, "dGVzdFVwZGF0ZWRTZWNyZXRFbmNvZGluZ0ZvclJlbG9hZGVy") - config := getConfigWithAnnotations(envVarPostfix, ersProjectedSecretWithInitContainer, shaData, options.SecretUpdateOnChangeAnnotation, options.SecretReloaderAutoAnnotation) - deploymentFuncs := GetDeploymentRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for Deployment with Secret in projected volume") - } - - logrus.Infof("Verifying deployment update") - updated := testutil.VerifyResourceEnvVarUpdate(clients, config, envVarPostfix, deploymentFuncs) - if !updated { - t.Errorf("Deployment was not updated") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": ersNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } - - testRollingUpgradeInvokeDeleteStrategyErs(t, clients, config, deploymentFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForDeploymentWithSecretAsEnvVarUsingErs(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - envVarPostfix := constants.SecretEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, ersNamespace, ersSecretWithEnvName, "dGVzdFVwZGF0ZWRTZWNyZXRFbmNvZGluZ0ZvclJlbG9hZGVy") - config := getConfigWithAnnotations(envVarPostfix, ersSecretWithEnvName, shaData, options.ReloaderAutoAnnotation, options.SecretReloaderAutoAnnotation) - deploymentFuncs := GetDeploymentRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for Deployment with Secret") - } - - logrus.Infof("Verifying deployment update") - updated := testutil.VerifyResourceEnvVarUpdate(clients, config, envVarPostfix, deploymentFuncs) - if !updated { - t.Errorf("Deployment was not updated") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": ersNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } - - testRollingUpgradeInvokeDeleteStrategyErs(t, clients, config, deploymentFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForDeploymentWithSecretAsEnvVarFromUsingErs(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - envVarPostfix := constants.SecretEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, ersNamespace, ersSecretWithEnvFromName, "dGVzdFVwZGF0ZWRTZWNyZXRFbmNvZGluZ0ZvclJlbG9hZGVy") - config := getConfigWithAnnotations(envVarPostfix, ersSecretWithEnvFromName, shaData, options.ReloaderAutoAnnotation, options.SecretReloaderAutoAnnotation) - deploymentFuncs := GetDeploymentRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for Deployment with Secret") - } - - logrus.Infof("Verifying deployment update") - updated := testutil.VerifyResourceEnvVarUpdate(clients, config, envVarPostfix, deploymentFuncs) - if !updated { - t.Errorf("Deployment was not updated") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": ersNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } - - testRollingUpgradeInvokeDeleteStrategyErs(t, clients, config, deploymentFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForDeploymentWithSecretAsEnvVarInInitContainerUsingErs(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - envVarPostfix := constants.SecretEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, ersNamespace, ersSecretWithInitEnv, "dGVzdFVwZGF0ZWRTZWNyZXRFbmNvZGluZ0ZvclJlbG9hZGVy") - config := getConfigWithAnnotations(envVarPostfix, ersSecretWithInitEnv, shaData, options.ReloaderAutoAnnotation, options.SecretReloaderAutoAnnotation) - deploymentFuncs := GetDeploymentRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for Deployment with Secret") - } - - logrus.Infof("Verifying deployment update") - updated := testutil.VerifyResourceEnvVarUpdate(clients, config, envVarPostfix, deploymentFuncs) - if !updated { - t.Errorf("Deployment was not updated") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": ersNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } - - testRollingUpgradeInvokeDeleteStrategyErs(t, clients, config, deploymentFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForDeploymentWithSecretExcludeAnnotationUsingErs(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - envVarPostfix := constants.SecretEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, ersNamespace, ersSecretWithSecretExcludeAnnotation, "dGVzdFVwZGF0ZWRTZWNyZXRFbmNvZGluZ0ZvclJlbG9hZGVy") - config := getConfigWithAnnotations(envVarPostfix, ersSecretWithSecretExcludeAnnotation, shaData, "", options.SecretReloaderAutoAnnotation) - deploymentFuncs := GetDeploymentRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for Deployment with exclude Secret") - } - - logrus.Infof("Verifying deployment did not update") - updated := testutil.VerifyResourceEnvVarUpdate(clients, config, envVarPostfix, deploymentFuncs) - if updated { - t.Errorf("Deployment that had to be excluded was updated") - } -} - -func TestRollingUpgradeForDeploymentWithSecretAutoAnnotationUsingErs(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - envVarPostfix := constants.SecretEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, ersNamespace, ersSecretWithSecretAutoAnnotation, "dGVzdFVwZGF0ZWRTZWNyZXRFbmNvZGluZ0ZvclJlbG9hZGVy") - config := getConfigWithAnnotations(envVarPostfix, ersSecretWithSecretAutoAnnotation, shaData, "", options.SecretReloaderAutoAnnotation) - deploymentFuncs := GetDeploymentRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for Deployment with Secret") - } - - logrus.Infof("Verifying deployment update") - updated := testutil.VerifyResourceEnvVarUpdate(clients, config, envVarPostfix, deploymentFuncs) - if !updated { - t.Errorf("Deployment was not updated") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": ersNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } - - testRollingUpgradeInvokeDeleteStrategyErs(t, clients, config, deploymentFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForDeploymentWithConfigMapExcludeAnnotationUsingErs(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - envVarPostfix := constants.ConfigmapEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, ersNamespace, ersConfigmapWithConfigMapExcludeAnnotation, "www.facebook.com") - config := getConfigWithAnnotations(envVarPostfix, ersConfigmapWithConfigMapExcludeAnnotation, shaData, "", options.ConfigmapReloaderAutoAnnotation) - deploymentFuncs := GetDeploymentRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for Deployment with exclude ConfigMap") - } - - logrus.Infof("Verifying deployment did not update") - updated := testutil.VerifyResourceEnvVarUpdate(clients, config, envVarPostfix, deploymentFuncs) - if updated { - t.Errorf("Deployment which had to be excluded was updated") - } -} - -func TestRollingUpgradeForDeploymentWithConfigMapAutoAnnotationUsingErs(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - envVarPostfix := constants.ConfigmapEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, ersNamespace, ersConfigmapWithConfigMapAutoAnnotation, "www.facebook.com") - config := getConfigWithAnnotations(envVarPostfix, ersConfigmapWithConfigMapAutoAnnotation, shaData, "", options.ConfigmapReloaderAutoAnnotation) - deploymentFuncs := GetDeploymentRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for Deployment with ConfigMap") - } - - logrus.Infof("Verifying deployment update") - updated := testutil.VerifyResourceEnvVarUpdate(clients, config, envVarPostfix, deploymentFuncs) - if !updated { - t.Errorf("Deployment was not updated") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": ersNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } - - testRollingUpgradeInvokeDeleteStrategyErs(t, clients, config, deploymentFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForDaemonSetWithConfigmapUsingErs(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - envVarPostfix := constants.ConfigmapEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, ersNamespace, ersConfigmapName, "www.facebook.com") - config := getConfigWithAnnotations(envVarPostfix, ersConfigmapName, shaData, options.ConfigmapUpdateOnChangeAnnotation, options.ConfigmapReloaderAutoAnnotation) - daemonSetFuncs := GetDaemonSetRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, daemonSetFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for DaemonSet with configmap") - } - - logrus.Infof("Verifying daemonSet update") - updated := testutil.VerifyResourceEnvVarUpdate(clients, config, envVarPostfix, daemonSetFuncs) - if !updated { - t.Errorf("DaemonSet was not updated") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": ersNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } - - testRollingUpgradeInvokeDeleteStrategyErs(t, clients, config, daemonSetFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForDaemonSetWithPatchAndRetryUsingErs(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - envVarPostfix := constants.ConfigmapEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, ersNamespace, ersConfigmapName, "www.facebook.com") - config := getConfigWithAnnotations(envVarPostfix, ersConfigmapName, shaData, options.ConfigmapUpdateOnChangeAnnotation, options.ConfigmapReloaderAutoAnnotation) - daemonSetFuncs := GetDaemonSetRollingUpgradeFuncs() - - assert.True(t, daemonSetFuncs.SupportsPatch) - assert.NotEmpty(t, daemonSetFuncs.PatchTemplatesFunc().EnvVarTemplate) - - patchCalled := 0 - daemonSetFuncs.PatchFunc = func(client kube.Clients, namespace string, resource runtime.Object, patchType patchtypes.PatchType, bytes []byte) error { - patchCalled++ - if patchCalled < 2 { - return &errors.StatusError{ErrStatus: metav1.Status{Reason: metav1.StatusReasonConflict}} // simulate conflict - } - assert.Equal(t, patchtypes.StrategicMergePatchType, patchType) - assert.NotEmpty(t, bytes) - assert.Contains(t, string(bytes), `{"spec":{"template":{"spec":{"containers":[{"name":`) - assert.Contains(t, string(bytes), `"value":"314a2269170750a974d79f02b5b9ee517de7f280"`) - return nil - } - - daemonSetFuncs.UpdateFunc = func(kube.Clients, string, runtime.Object) error { - t.Errorf("Update should not be called") - return nil - } - - collectors := getCollectors() - - err := PerformAction(clients, config, daemonSetFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for DaemonSet with configmap") - } - - assert.Equal(t, 2, patchCalled) - - daemonSetFuncs = GetDeploymentRollingUpgradeFuncs() - testRollingUpgradeWithPatchAndInvokeDeleteStrategyErs(t, clients, config, daemonSetFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForDaemonSetWithConfigmapInProjectedVolumeUsingErs(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - envVarPostfix := constants.ConfigmapEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, ersNamespace, ersProjectedConfigMapName, "www.facebook.com") - config := getConfigWithAnnotations(envVarPostfix, ersProjectedConfigMapName, shaData, options.ConfigmapUpdateOnChangeAnnotation, options.ConfigmapReloaderAutoAnnotation) - daemonSetFuncs := GetDaemonSetRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, daemonSetFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for DaemonSet with configmap in projected volume") - } - - logrus.Infof("Verifying daemonSet update") - updated := testutil.VerifyResourceEnvVarUpdate(clients, config, envVarPostfix, daemonSetFuncs) - if !updated { - t.Errorf("DaemonSet was not updated") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": ersNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } - - testRollingUpgradeInvokeDeleteStrategyErs(t, clients, config, daemonSetFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForDaemonSetWithConfigmapAsEnvVarUsingErs(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - envVarPostfix := constants.ConfigmapEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, ersNamespace, ersConfigmapWithEnvName, "www.facebook.com") - config := getConfigWithAnnotations(envVarPostfix, ersConfigmapWithEnvName, shaData, options.ReloaderAutoAnnotation, options.ConfigmapReloaderAutoAnnotation) - daemonSetFuncs := GetDaemonSetRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, daemonSetFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for DaemonSet with configmap used as env var") - } - - logrus.Infof("Verifying daemonSet update") - updated := testutil.VerifyResourceEnvVarUpdate(clients, config, envVarPostfix, daemonSetFuncs) - if !updated { - t.Errorf("DaemonSet was not updated") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": ersNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } - - testRollingUpgradeInvokeDeleteStrategyErs(t, clients, config, daemonSetFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForDaemonSetWithSecretUsingErs(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - envVarPostfix := constants.SecretEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, ersNamespace, ersSecretName, "d3d3LmZhY2Vib29rLmNvbQ==") - config := getConfigWithAnnotations(envVarPostfix, ersSecretName, shaData, options.SecretUpdateOnChangeAnnotation, options.SecretReloaderAutoAnnotation) - daemonSetFuncs := GetDaemonSetRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, daemonSetFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for DaemonSet with secret") - } - - logrus.Infof("Verifying daemonSet update") - updated := testutil.VerifyResourceEnvVarUpdate(clients, config, envVarPostfix, daemonSetFuncs) - if !updated { - t.Errorf("DaemonSet was not updated") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": ersNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } - - testRollingUpgradeInvokeDeleteStrategyErs(t, clients, config, daemonSetFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForDaemonSetWithSecretInProjectedVolumeUsingErs(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - envVarPostfix := constants.SecretEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, ersNamespace, ersProjectedSecretName, "d3d3LmZhY2Vib29rLmNvbQ==") - config := getConfigWithAnnotations(envVarPostfix, ersProjectedSecretName, shaData, options.SecretUpdateOnChangeAnnotation, options.SecretReloaderAutoAnnotation) - daemonSetFuncs := GetDaemonSetRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, daemonSetFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for DaemonSet with secret in projected volume") - } - - logrus.Infof("Verifying daemonSet update") - updated := testutil.VerifyResourceEnvVarUpdate(clients, config, envVarPostfix, daemonSetFuncs) - if !updated { - t.Errorf("DaemonSet was not updated") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": ersNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } - - testRollingUpgradeInvokeDeleteStrategyErs(t, clients, config, daemonSetFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForStatefulSetWithConfigmapUsingErs(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - envVarPostfix := constants.ConfigmapEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, ersNamespace, ersConfigmapName, "www.twitter.com") - config := getConfigWithAnnotations(envVarPostfix, ersConfigmapName, shaData, options.ConfigmapUpdateOnChangeAnnotation, options.ConfigmapReloaderAutoAnnotation) - statefulSetFuncs := GetStatefulSetRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, statefulSetFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for StatefulSet with configmap") - } - - logrus.Infof("Verifying statefulSet update") - updated := testutil.VerifyResourceEnvVarUpdate(clients, config, envVarPostfix, statefulSetFuncs) - if !updated { - t.Errorf("StatefulSet was not updated") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": ersNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } - - testRollingUpgradeInvokeDeleteStrategyErs(t, clients, config, statefulSetFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForStatefulSetWithPatchAndRetryUsingErs(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - envVarPostfix := constants.ConfigmapEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, ersNamespace, ersConfigmapName, "www.twitter.com") - config := getConfigWithAnnotations(envVarPostfix, ersConfigmapName, shaData, options.ConfigmapUpdateOnChangeAnnotation, options.ConfigmapReloaderAutoAnnotation) - statefulSetFuncs := GetStatefulSetRollingUpgradeFuncs() - - assert.True(t, statefulSetFuncs.SupportsPatch) - assert.NotEmpty(t, statefulSetFuncs.PatchTemplatesFunc().EnvVarTemplate) - - patchCalled := 0 - statefulSetFuncs.PatchFunc = func(client kube.Clients, namespace string, resource runtime.Object, patchType patchtypes.PatchType, bytes []byte) error { - patchCalled++ - if patchCalled < 2 { - return &errors.StatusError{ErrStatus: metav1.Status{Reason: metav1.StatusReasonConflict}} // simulate conflict - } - assert.Equal(t, patchtypes.StrategicMergePatchType, patchType) - assert.NotEmpty(t, bytes) - assert.Contains(t, string(bytes), `{"spec":{"template":{"spec":{"containers":[{"name":`) - assert.Contains(t, string(bytes), `"value":"f821414d40d8815fb330763f74a4ff7ab651d4fa"`) - return nil - } - - statefulSetFuncs.UpdateFunc = func(kube.Clients, string, runtime.Object) error { - t.Errorf("Update should not be called") - return nil - } - - collectors := getCollectors() - - err := PerformAction(clients, config, statefulSetFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for StatefulSet with configmap") - } - - assert.Equal(t, 2, patchCalled) - - statefulSetFuncs = GetDeploymentRollingUpgradeFuncs() - testRollingUpgradeWithPatchAndInvokeDeleteStrategyErs(t, clients, config, statefulSetFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForStatefulSetWithConfigmapInProjectedVolumeUsingErs(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - envVarPostfix := constants.ConfigmapEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, ersNamespace, ersProjectedConfigMapName, "www.twitter.com") - config := getConfigWithAnnotations(envVarPostfix, ersProjectedConfigMapName, shaData, options.ConfigmapUpdateOnChangeAnnotation, options.ConfigmapReloaderAutoAnnotation) - statefulSetFuncs := GetStatefulSetRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, statefulSetFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for StatefulSet with configmap in projected volume") - } - - logrus.Infof("Verifying statefulSet update") - updated := testutil.VerifyResourceEnvVarUpdate(clients, config, envVarPostfix, statefulSetFuncs) - if !updated { - t.Errorf("StatefulSet was not updated") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": ersNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } - - testRollingUpgradeInvokeDeleteStrategyErs(t, clients, config, statefulSetFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForStatefulSetWithSecretUsingErs(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - envVarPostfix := constants.SecretEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, ersNamespace, ersSecretName, "d3d3LnR3aXR0ZXIuY29t") - config := getConfigWithAnnotations(envVarPostfix, ersSecretName, shaData, options.SecretUpdateOnChangeAnnotation, options.SecretReloaderAutoAnnotation) - statefulSetFuncs := GetStatefulSetRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, statefulSetFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for StatefulSet with secret") - } - - logrus.Infof("Verifying statefulSet update") - updated := testutil.VerifyResourceEnvVarUpdate(clients, config, envVarPostfix, statefulSetFuncs) - if !updated { - t.Errorf("StatefulSet was not updated") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": ersNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } - - testRollingUpgradeInvokeDeleteStrategyErs(t, clients, config, statefulSetFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForStatefulSetWithSecretInProjectedVolumeUsingErs(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - envVarPostfix := constants.SecretEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, ersNamespace, ersProjectedSecretName, "d3d3LnR3aXR0ZXIuY29t") - config := getConfigWithAnnotations(envVarPostfix, ersProjectedSecretName, shaData, options.SecretUpdateOnChangeAnnotation, options.SecretReloaderAutoAnnotation) - statefulSetFuncs := GetStatefulSetRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, statefulSetFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for StatefulSet with secret in projected volume") - } - - logrus.Infof("Verifying statefulSet update") - updated := testutil.VerifyResourceEnvVarUpdate(clients, config, envVarPostfix, statefulSetFuncs) - if !updated { - t.Errorf("StatefulSet was not updated") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": ersNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } - - testRollingUpgradeInvokeDeleteStrategyErs(t, clients, config, statefulSetFuncs, collectors, envVarPostfix) -} - -func TestRollingUpgradeForDeploymentWithPodAnnotationsUsingErs(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - envVarPostfix := constants.ConfigmapEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, ersNamespace, ersConfigmapWithPodAnnotations, "www.stakater.com") - config := getConfigWithAnnotations(envVarPostfix, ersConfigmapWithPodAnnotations, shaData, options.ConfigmapUpdateOnChangeAnnotation, options.ConfigmapReloaderAutoAnnotation) - deploymentFuncs := GetDeploymentRollingUpgradeFuncs() - collectors := getCollectors() - - err := PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - time.Sleep(5 * time.Second) - if err != nil { - t.Errorf("Rolling upgrade failed for Deployment with pod annotations") - } - - logrus.Infof("Verifying deployment update") - envName := constants.EnvVarPrefix + util.ConvertToEnvVarName(config.ResourceName) + "_" + envVarPostfix - items := deploymentFuncs.ItemsFunc(clients, config.Namespace) - var foundPod, foundBoth bool - for _, i := range items { - accessor, err := meta.Accessor(i) - if err != nil { - t.Errorf("Error getting accessor for item: %v", err) - } - name := accessor.GetName() - if name == ersConfigmapWithPodAnnotations { - containers := deploymentFuncs.ContainersFunc(i) - updated := testutil.GetResourceSHAFromEnvVar(containers, envName) - if updated != config.SHAValue { - t.Errorf("Deployment was not updated") - } - foundPod = true - } - if name == ersConfigmapWithBothAnnotations { - containers := deploymentFuncs.ContainersFunc(i) - updated := testutil.GetResourceSHAFromEnvVar(containers, envName) - if updated == config.SHAValue { - t.Errorf("Deployment was updated") - } - foundBoth = true - } - } - if !foundPod { - t.Errorf("Deployment with pod annotations was not found") - } - if !foundBoth { - t.Errorf("Deployment with both annotations was not found") - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": ersNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } -} - -func TestFailedRollingUpgradeUsingErs(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - envVarPostfix := constants.ConfigmapEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, ersNamespace, ersConfigmapName, "fail.stakater.com") - config := getConfigWithAnnotations(envVarPostfix, ersConfigmapName, shaData, options.ConfigmapUpdateOnChangeAnnotation, options.ConfigmapReloaderAutoAnnotation) - deploymentFuncs := GetDeploymentRollingUpgradeFuncs() - deploymentFuncs.UpdateFunc = func(_ kube.Clients, _ string, _ runtime.Object) error { - return fmt.Errorf("error") - } - deploymentFuncs.PatchFunc = func(kube.Clients, string, runtime.Object, patchtypes.PatchType, []byte) error { - return fmt.Errorf("error") - } - collectors := getCollectors() - - _ = PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelFailed)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "false", "namespace": ersNamespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } -} - -func TestPausingDeploymentUsingErs(t *testing.T) { - options.ReloadStrategy = constants.EnvVarsReloadStrategy - testPausingDeployment(t, options.ReloadStrategy, ersConfigmapWithPausedDeployment, ersNamespace) -} - -func TestPausingDeploymentUsingArs(t *testing.T) { - options.ReloadStrategy = constants.AnnotationsReloadStrategy - testPausingDeployment(t, options.ReloadStrategy, arsConfigmapWithPausedDeployment, arsNamespace) -} - -func testPausingDeployment(t *testing.T, reloadStrategy string, testName string, namespace string) { - options.ReloadStrategy = reloadStrategy - envVarPostfix := constants.ConfigmapEnvVarPostfix - - shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, testName, "pause.stakater.com") - config := getConfigWithAnnotations(envVarPostfix, testName, shaData, options.ConfigmapUpdateOnChangeAnnotation, options.ConfigmapReloaderAutoAnnotation) - deploymentFuncs := GetDeploymentRollingUpgradeFuncs() - collectors := getCollectors() - - _ = PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - - // Wait for deployment to have paused-at annotation - logrus.Infof("Waiting for deployment %s to have paused-at annotation", testName) - err := waitForDeploymentPausedAtAnnotation(clients, deploymentFuncs, config.Namespace, testName, 30*time.Second) - if err != nil { - t.Errorf("Failed to wait for deployment paused-at annotation: %v", err) - } - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": namespace})) != 1 { - t.Errorf("Counter by namespace was not increased") - } - - logrus.Infof("Verifying deployment has been paused") - items := deploymentFuncs.ItemsFunc(clients, config.Namespace) - deploymentPaused, err := isDeploymentPaused(items, testName) - if err != nil { - t.Errorf("%s", err.Error()) - } - if !deploymentPaused { - t.Errorf("Deployment has not been paused") - } - - shaData = testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, testName, "pause-changed.stakater.com") - config = getConfigWithAnnotations(envVarPostfix, testName, shaData, options.ConfigmapUpdateOnChangeAnnotation, options.ConfigmapReloaderAutoAnnotation) - - _ = PerformAction(clients, config, deploymentFuncs, collectors, nil, invokeReloadStrategy) - - if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 2 { - t.Errorf("Counter was not increased") - } - - if promtestutil.ToFloat64(collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": namespace})) != 2 { - t.Errorf("Counter by namespace was not increased") - } - - logrus.Infof("Verifying deployment is still paused") - items = deploymentFuncs.ItemsFunc(clients, config.Namespace) - deploymentPaused, err = isDeploymentPaused(items, testName) - if err != nil { - t.Errorf("%s", err.Error()) - } - if !deploymentPaused { - t.Errorf("Deployment should still be paused") - } - - logrus.Infof("Verifying deployment has been resumed after pause interval") - time.Sleep(11 * time.Second) - items = deploymentFuncs.ItemsFunc(clients, config.Namespace) - deploymentPaused, err = isDeploymentPaused(items, testName) - if err != nil { - t.Errorf("%s", err.Error()) - } - if deploymentPaused { - t.Errorf("Deployment should have been resumed after pause interval") - } -} - -func isDeploymentPaused(deployments []runtime.Object, deploymentName string) (bool, error) { - deployment, err := FindDeploymentByName(deployments, deploymentName) - if err != nil { - return false, err - } - return IsPaused(deployment), nil -} - -// waitForDeploymentPausedAtAnnotation waits for a deployment to have the pause-period annotation -func waitForDeploymentPausedAtAnnotation(clients kube.Clients, deploymentFuncs callbacks.RollingUpgradeFuncs, namespace, deploymentName string, timeout time.Duration) error { - start := time.Now() - - for time.Since(start) < timeout { - items := deploymentFuncs.ItemsFunc(clients, namespace) - deployment, err := FindDeploymentByName(items, deploymentName) - if err == nil { - annotations := deployment.GetAnnotations() - if annotations != nil { - if _, exists := annotations[options.PauseDeploymentTimeAnnotation]; exists { - return nil - } - } - } - - time.Sleep(100 * time.Millisecond) - } - - return fmt.Errorf("timeout waiting for deployment %s to have pause-period annotation", deploymentName) -} - -// MockArgoRolloutWithEmptyContainers creates a mock Argo Rollout with no containers -// This simulates the scenario where Argo Rollouts with workloadRef return empty containers -func MockArgoRolloutWithEmptyContainers(namespace, name string) *runtime.Object { - rollout := &argorolloutv1alpha1.Rollout{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: namespace, +func TestGetRollingUpgradeFuncs(t *testing.T) { + tests := []struct { + name string + getFuncs func() callbacks.RollingUpgradeFuncs + resourceType string + supportsPatch bool + }{ + { + name: "Deployment", + getFuncs: GetDeploymentRollingUpgradeFuncs, + resourceType: "Deployment", + supportsPatch: true, }, - Spec: argorolloutv1alpha1.RolloutSpec{ - Template: v1.PodTemplateSpec{ - Spec: v1.PodSpec{ - Containers: []v1.Container{}, // Empty containers slice - InitContainers: []v1.Container{}, // Empty init containers slice - Volumes: []v1.Volume{}, // Empty volumes slice + { + name: "CronJob", + getFuncs: GetCronJobCreateJobFuncs, + resourceType: "CronJob", + supportsPatch: false, + }, + { + name: "Job", + getFuncs: GetJobCreateJobFuncs, + resourceType: "Job", + supportsPatch: false, + }, + { + name: "DaemonSet", + getFuncs: GetDaemonSetRollingUpgradeFuncs, + resourceType: "DaemonSet", + supportsPatch: true, + }, + { + name: "StatefulSet", + getFuncs: GetStatefulSetRollingUpgradeFuncs, + resourceType: "StatefulSet", + supportsPatch: true, + }, + { + name: "ArgoRollout", + getFuncs: GetArgoRolloutRollingUpgradeFuncs, + resourceType: "Rollout", + supportsPatch: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + funcs := tt.getFuncs() + assert.Equal(t, tt.resourceType, funcs.ResourceType) + assert.Equal(t, tt.supportsPatch, funcs.SupportsPatch) + assert.NotNil(t, funcs.ItemFunc) + assert.NotNil(t, funcs.ItemsFunc) + assert.NotNil(t, funcs.AnnotationsFunc) + assert.NotNil(t, funcs.PodAnnotationsFunc) + assert.NotNil(t, funcs.ContainersFunc) + assert.NotNil(t, funcs.InitContainersFunc) + assert.NotNil(t, funcs.UpdateFunc) + assert.NotNil(t, funcs.PatchFunc) + assert.NotNil(t, funcs.PatchTemplatesFunc) + assert.NotNil(t, funcs.VolumesFunc) + }) + } +} + +func TestGetVolumeMountName(t *testing.T) { + tests := []struct { + name string + volumes []v1.Volume + mountType string + volumeName string + expected string + }{ + { + name: "ConfigMap volume match", + volumes: []v1.Volume{ + { + Name: "config-volume", + VolumeSource: v1.VolumeSource{ + ConfigMap: &v1.ConfigMapVolumeSource{ + LocalObjectReference: v1.LocalObjectReference{ + Name: "my-configmap", + }, + }, + }, }, }, + mountType: constants.ConfigmapEnvVarPostfix, + volumeName: "my-configmap", + expected: "config-volume", + }, + { + name: "Secret volume match", + volumes: []v1.Volume{ + { + Name: "secret-volume", + VolumeSource: v1.VolumeSource{ + Secret: &v1.SecretVolumeSource{ + SecretName: "my-secret", + }, + }, + }, + }, + mountType: constants.SecretEnvVarPostfix, + volumeName: "my-secret", + expected: "secret-volume", + }, + { + name: "ConfigMap in projected volume", + volumes: []v1.Volume{ + { + Name: "projected-volume", + VolumeSource: v1.VolumeSource{ + Projected: &v1.ProjectedVolumeSource{ + Sources: []v1.VolumeProjection{ + { + ConfigMap: &v1.ConfigMapProjection{ + LocalObjectReference: v1.LocalObjectReference{ + Name: "projected-configmap", + }, + }, + }, + }, + }, + }, + }, + }, + mountType: constants.ConfigmapEnvVarPostfix, + volumeName: "projected-configmap", + expected: "projected-volume", + }, + { + name: "Secret in projected volume", + volumes: []v1.Volume{ + { + Name: "projected-volume", + VolumeSource: v1.VolumeSource{ + Projected: &v1.ProjectedVolumeSource{ + Sources: []v1.VolumeProjection{ + { + Secret: &v1.SecretProjection{ + LocalObjectReference: v1.LocalObjectReference{ + Name: "projected-secret", + }, + }, + }, + }, + }, + }, + }, + }, + mountType: constants.SecretEnvVarPostfix, + volumeName: "projected-secret", + expected: "projected-volume", + }, + { + name: "No match - wrong configmap name", + volumes: []v1.Volume{ + { + Name: "config-volume", + VolumeSource: v1.VolumeSource{ + ConfigMap: &v1.ConfigMapVolumeSource{ + LocalObjectReference: v1.LocalObjectReference{ + Name: "other-configmap", + }, + }, + }, + }, + }, + mountType: constants.ConfigmapEnvVarPostfix, + volumeName: "my-configmap", + expected: "", + }, + { + name: "No match - wrong type", + volumes: []v1.Volume{ + { + Name: "secret-volume", + VolumeSource: v1.VolumeSource{ + Secret: &v1.SecretVolumeSource{ + SecretName: "my-secret", + }, + }, + }, + }, + mountType: constants.ConfigmapEnvVarPostfix, // Looking for configmap but volume is secret + volumeName: "my-secret", + expected: "", + }, + { + name: "Empty volumes", + volumes: []v1.Volume{}, + mountType: constants.ConfigmapEnvVarPostfix, + volumeName: "any", + expected: "", }, } - var obj runtime.Object = rollout - return &obj + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := getVolumeMountName(tt.volumes, tt.mountType, tt.volumeName) + assert.Equal(t, tt.expected, result) + }) + } } -// TestGetContainerUsingResourceWithArgoRolloutEmptyContainers tests with real Argo Rollout functions -func TestGetContainerUsingResourceWithArgoRolloutEmptyContainers(t *testing.T) { - namespace := "test-namespace" - resourceName := "test-configmap" - - // Use real Argo Rollout functions but mock the containers function - rolloutFuncs := GetArgoRolloutRollingUpgradeFuncs() - originalContainersFunc := rolloutFuncs.ContainersFunc - originalInitContainersFunc := rolloutFuncs.InitContainersFunc - - // Override to return empty containers (simulating workloadRef scenario) - rolloutFuncs.ContainersFunc = func(item runtime.Object) []v1.Container { - return []v1.Container{} // Empty like workloadRef rollouts - } - rolloutFuncs.InitContainersFunc = func(item runtime.Object) []v1.Container { - return []v1.Container{} // Empty like workloadRef rollouts +func TestGetContainerWithVolumeMount(t *testing.T) { + tests := []struct { + name string + containers []v1.Container + volumeMountName string + expectFound bool + expectedName string + }{ + { + name: "Container with matching volume mount", + containers: []v1.Container{ + { + Name: "app", + VolumeMounts: []v1.VolumeMount{ + {Name: "config-volume", MountPath: "/etc/config"}, + }, + }, + }, + volumeMountName: "config-volume", + expectFound: true, + expectedName: "app", + }, + { + name: "Multiple containers, second has mount", + containers: []v1.Container{ + { + Name: "init", + VolumeMounts: []v1.VolumeMount{}, + }, + { + Name: "app", + VolumeMounts: []v1.VolumeMount{ + {Name: "config-volume", MountPath: "/etc/config"}, + }, + }, + }, + volumeMountName: "config-volume", + expectFound: true, + expectedName: "app", + }, + { + name: "No matching volume mount", + containers: []v1.Container{ + { + Name: "app", + VolumeMounts: []v1.VolumeMount{ + {Name: "other-volume", MountPath: "/etc/other"}, + }, + }, + }, + volumeMountName: "config-volume", + expectFound: false, + }, + { + name: "Empty containers", + containers: []v1.Container{}, + volumeMountName: "config-volume", + expectFound: false, + }, } - // Restore original functions after test - defer func() { - rolloutFuncs.ContainersFunc = originalContainersFunc - rolloutFuncs.InitContainersFunc = originalInitContainersFunc - }() - - // Use proper Argo Rollout object instead of Pod - mockRollout := MockArgoRolloutWithEmptyContainers(namespace, "test-rollout") - - config := common.Config{ - Namespace: namespace, - ResourceName: resourceName, - Type: constants.ConfigmapEnvVarPostfix, - SHAValue: "test-sha", - } - - // Test both autoReload scenarios using subtests as suggested by Felix - for _, autoReload := range []bool{true, false} { - t.Run(fmt.Sprintf("autoReload_%t", autoReload), func(t *testing.T) { - // This tests the actual fix in the context of Argo Rollouts - result := getContainerUsingResource(rolloutFuncs, *mockRollout, config, autoReload) - - if result != nil { - t.Errorf("Expected nil when using real Argo Rollout functions with empty containers (workloadRef scenario), got %v", result) + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := getContainerWithVolumeMount(tt.containers, tt.volumeMountName) + if tt.expectFound { + assert.NotNil(t, result) + assert.Equal(t, tt.expectedName, result.Name) + } else { + assert.Nil(t, result) + } + }) + } +} + +func TestGetContainerWithEnvReference(t *testing.T) { + tests := []struct { + name string + containers []v1.Container + resourceName string + resourceType string + expectFound bool + expectedName string + }{ + { + name: "Container with ConfigMapKeyRef", + containers: []v1.Container{ + { + Name: "app", + Env: []v1.EnvVar{ + { + Name: "CONFIG_VALUE", + ValueFrom: &v1.EnvVarSource{ + ConfigMapKeyRef: &v1.ConfigMapKeySelector{ + LocalObjectReference: v1.LocalObjectReference{ + Name: "my-configmap", + }, + Key: "key", + }, + }, + }, + }, + }, + }, + resourceName: "my-configmap", + resourceType: constants.ConfigmapEnvVarPostfix, + expectFound: true, + expectedName: "app", + }, + { + name: "Container with SecretKeyRef", + containers: []v1.Container{ + { + Name: "app", + Env: []v1.EnvVar{ + { + Name: "SECRET_VALUE", + ValueFrom: &v1.EnvVarSource{ + SecretKeyRef: &v1.SecretKeySelector{ + LocalObjectReference: v1.LocalObjectReference{ + Name: "my-secret", + }, + Key: "key", + }, + }, + }, + }, + }, + }, + resourceName: "my-secret", + resourceType: constants.SecretEnvVarPostfix, + expectFound: true, + expectedName: "app", + }, + { + name: "Container with ConfigMapRef (envFrom)", + containers: []v1.Container{ + { + Name: "app", + EnvFrom: []v1.EnvFromSource{ + { + ConfigMapRef: &v1.ConfigMapEnvSource{ + LocalObjectReference: v1.LocalObjectReference{ + Name: "my-configmap", + }, + }, + }, + }, + }, + }, + resourceName: "my-configmap", + resourceType: constants.ConfigmapEnvVarPostfix, + expectFound: true, + expectedName: "app", + }, + { + name: "Container with SecretRef (envFrom)", + containers: []v1.Container{ + { + Name: "app", + EnvFrom: []v1.EnvFromSource{ + { + SecretRef: &v1.SecretEnvSource{ + LocalObjectReference: v1.LocalObjectReference{ + Name: "my-secret", + }, + }, + }, + }, + }, + }, + resourceName: "my-secret", + resourceType: constants.SecretEnvVarPostfix, + expectFound: true, + expectedName: "app", + }, + { + name: "No match - wrong resource name", + containers: []v1.Container{ + { + Name: "app", + EnvFrom: []v1.EnvFromSource{ + { + ConfigMapRef: &v1.ConfigMapEnvSource{ + LocalObjectReference: v1.LocalObjectReference{ + Name: "other-configmap", + }, + }, + }, + }, + }, + }, + resourceName: "my-configmap", + resourceType: constants.ConfigmapEnvVarPostfix, + expectFound: false, + }, + { + name: "No match - wrong type (looking for secret but has configmap)", + containers: []v1.Container{ + { + Name: "app", + EnvFrom: []v1.EnvFromSource{ + { + ConfigMapRef: &v1.ConfigMapEnvSource{ + LocalObjectReference: v1.LocalObjectReference{ + Name: "my-resource", + }, + }, + }, + }, + }, + }, + resourceName: "my-resource", + resourceType: constants.SecretEnvVarPostfix, + expectFound: false, + }, + { + name: "Empty containers", + containers: []v1.Container{}, + resourceName: "any", + resourceType: constants.ConfigmapEnvVarPostfix, + expectFound: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := getContainerWithEnvReference(tt.containers, tt.resourceName, tt.resourceType) + if tt.expectFound { + assert.NotNil(t, result) + assert.Equal(t, tt.expectedName, result.Name) + } else { + assert.Nil(t, result) + } + }) + } +} + +func TestGetEnvVarName(t *testing.T) { + tests := []struct { + name string + resourceName string + typeName string + expected string + }{ + { + name: "ConfigMap with simple name", + resourceName: "my-config", + typeName: constants.ConfigmapEnvVarPostfix, + expected: "STAKATER_MY_CONFIG_CONFIGMAP", + }, + { + name: "Secret with simple name", + resourceName: "my-secret", + typeName: constants.SecretEnvVarPostfix, + expected: "STAKATER_MY_SECRET_SECRET", + }, + { + name: "Name with hyphens", + resourceName: "my-app-config", + typeName: constants.ConfigmapEnvVarPostfix, + expected: "STAKATER_MY_APP_CONFIG_CONFIGMAP", + }, + { + name: "Name with dots", + resourceName: "my.app.config", + typeName: constants.ConfigmapEnvVarPostfix, + expected: "STAKATER_MY_APP_CONFIG_CONFIGMAP", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := getEnvVarName(tt.resourceName, tt.typeName) + assert.Equal(t, tt.expected, result) + }) + } +} + +func TestUpdateEnvVar(t *testing.T) { + tests := []struct { + name string + container *v1.Container + envVar string + shaData string + expected constants.Result + newValue string // expected value after update + }{ + { + name: "Update existing env var with different value", + container: &v1.Container{ + Name: "app", + Env: []v1.EnvVar{ + {Name: "STAKATER_CONFIG_CONFIGMAP", Value: "old-sha"}, + }, + }, + envVar: "STAKATER_CONFIG_CONFIGMAP", + shaData: "new-sha", + expected: constants.Updated, + newValue: "new-sha", + }, + { + name: "No update when value is same", + container: &v1.Container{ + Name: "app", + Env: []v1.EnvVar{ + {Name: "STAKATER_CONFIG_CONFIGMAP", Value: "same-sha"}, + }, + }, + envVar: "STAKATER_CONFIG_CONFIGMAP", + shaData: "same-sha", + expected: constants.NotUpdated, + newValue: "same-sha", + }, + { + name: "Env var not found", + container: &v1.Container{ + Name: "app", + Env: []v1.EnvVar{ + {Name: "OTHER_VAR", Value: "value"}, + }, + }, + envVar: "STAKATER_CONFIG_CONFIGMAP", + shaData: "new-sha", + expected: constants.NoEnvVarFound, + }, + { + name: "Empty env list", + container: &v1.Container{ + Name: "app", + Env: []v1.EnvVar{}, + }, + envVar: "STAKATER_CONFIG_CONFIGMAP", + shaData: "new-sha", + expected: constants.NoEnvVarFound, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := updateEnvVar(tt.container, tt.envVar, tt.shaData) + assert.Equal(t, tt.expected, result) + + if tt.expected == constants.Updated || tt.expected == constants.NotUpdated { + // Verify the value in the container + for _, env := range tt.container.Env { + if env.Name == tt.envVar { + assert.Equal(t, tt.newValue, env.Value) + break + } + } + } + }) + } +} + +func TestGetReloaderAnnotationKey(t *testing.T) { + result := getReloaderAnnotationKey() + expected := "reloader.stakater.com/last-reloaded-from" + assert.Equal(t, expected, result) +} + +func TestJsonEscape(t *testing.T) { + tests := []struct { + name string + input string + expected string + hasError bool + }{ + { + name: "Simple string", + input: "hello", + expected: "hello", + hasError: false, + }, + { + name: "String with quotes", + input: `say "hello"`, + expected: `say \"hello\"`, + hasError: false, + }, + { + name: "String with backslash", + input: `path\to\file`, + expected: `path\\to\\file`, + hasError: false, + }, + { + name: "String with newline", + input: "line1\nline2", + expected: `line1\nline2`, + hasError: false, + }, + { + name: "JSON-like string", + input: `{"key":"value"}`, + expected: `{\"key\":\"value\"}`, + hasError: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result, err := jsonEscape(tt.input) + if tt.hasError { + assert.Error(t, err) + } else { + assert.NoError(t, err) + assert.Equal(t, tt.expected, result) + } + }) + } +} + +func TestCreateReloadedAnnotations(t *testing.T) { + tests := []struct { + name string + target *common.ReloadSource + hasError bool + }{ + { + name: "Nil target", + target: nil, + hasError: true, + }, + { + name: "Valid target", + target: &common.ReloadSource{ + Name: "my-configmap", + Type: "CONFIGMAP", + }, + hasError: false, + }, + } + + // Use a simple func that doesn't require patch templates + funcs := callbacks.RollingUpgradeFuncs{ + SupportsPatch: false, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + annotations, _, err := createReloadedAnnotations(tt.target, funcs) + if tt.hasError { + assert.Error(t, err) + assert.Nil(t, annotations) + } else { + assert.NoError(t, err) + assert.NotNil(t, annotations) + // Verify annotation key exists + _, exists := annotations[getReloaderAnnotationKey()] + assert.True(t, exists) } }) } diff --git a/internal/pkg/testutil/kube.go b/internal/pkg/testutil/kube.go index 1ad43e1..1bf441c 100644 --- a/internal/pkg/testutil/kube.go +++ b/internal/pkg/testutil/kube.go @@ -2,8 +2,6 @@ package testutil import ( "context" - "encoding/json" - "fmt" "math/rand" "sort" "strconv" @@ -12,13 +10,10 @@ import ( argorolloutv1alpha1 "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1" argorollout "github.com/argoproj/argo-rollouts/pkg/client/clientset/versioned" - openshiftv1 "github.com/openshift/api/apps/v1" - appsclient "github.com/openshift/client-go/apps/clientset/versioned" "github.com/sirupsen/logrus" "github.com/stakater/Reloader/internal/pkg/callbacks" "github.com/stakater/Reloader/internal/pkg/constants" "github.com/stakater/Reloader/internal/pkg/crypto" - "github.com/stakater/Reloader/internal/pkg/metrics" "github.com/stakater/Reloader/internal/pkg/options" "github.com/stakater/Reloader/internal/pkg/util" "github.com/stakater/Reloader/pkg/common" @@ -36,8 +31,6 @@ var ( letters = []rune("abcdefghijklmnopqrstuvwxyz") // ConfigmapResourceType is a resource type which controller watches for changes ConfigmapResourceType = "configMaps" - // SecretResourceType is a resource type which controller watches for changes - SecretResourceType = "secrets" ) var ( @@ -45,11 +38,6 @@ var ( Pod = "test-reloader-" + RandSeq(5) Namespace = "test-reloader-" + RandSeq(5) ConfigmapNamePrefix = "testconfigmap-reloader" - SecretNamePrefix = "testsecret-reloader" - Data = "dGVzdFNlY3JldEVuY29kaW5nRm9yUmVsb2FkZXI=" - NewData = "dGVzdE5ld1NlY3JldEVuY29kaW5nRm9yUmVsb2FkZXI=" - UpdatedData = "dGVzdFVwZGF0ZWRTZWNyZXRFbmNvZGluZ0ZvclJlbG9hZGVy" - Collectors = metrics.NewCollectors() SleepDuration = 3 * time.Second ) @@ -105,25 +93,6 @@ func getAnnotations(name string, autoReload bool, secretAutoReload bool, configm return annotations } -func getEnvVarSources(name string) []v1.EnvFromSource { - return []v1.EnvFromSource{ - { - ConfigMapRef: &v1.ConfigMapEnvSource{ - LocalObjectReference: v1.LocalObjectReference{ - Name: name, - }, - }, - }, - { - SecretRef: &v1.SecretEnvSource{ - LocalObjectReference: v1.LocalObjectReference{ - Name: name, - }, - }, - }, - } -} - func getVolumes(name string) []v1.Volume { return []v1.Volume{ { @@ -244,23 +213,6 @@ func getPodTemplateSpecWithEnvVars(name string) v1.PodTemplateSpec { } } -func getPodTemplateSpecWithEnvVarSources(name string) v1.PodTemplateSpec { - return v1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ - Labels: map[string]string{"secondLabel": "temp"}, - }, - Spec: v1.PodSpec{ - Containers: []v1.Container{ - { - Image: "tutum/hello-world", - Name: name, - EnvFrom: getEnvVarSources(name), - }, - }, - }, - } -} - func getPodTemplateSpecWithVolumes(name string) v1.PodTemplateSpec { return v1.PodTemplateSpec{ ObjectMeta: metav1.ObjectMeta{ @@ -285,65 +237,6 @@ func getPodTemplateSpecWithVolumes(name string) v1.PodTemplateSpec { } } -func getPodTemplateSpecWithInitContainer(name string) v1.PodTemplateSpec { - return v1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ - Labels: map[string]string{"secondLabel": "temp"}, - }, - Spec: v1.PodSpec{ - InitContainers: []v1.Container{ - { - Image: "busybox", - Name: "busyBox", - VolumeMounts: getVolumeMounts(), - }, - }, - Containers: []v1.Container{ - { - Image: "tutum/hello-world", - Name: name, - Env: []v1.EnvVar{ - { - Name: "BUCKET_NAME", - Value: "test", - }, - }, - }, - }, - Volumes: getVolumes(name), - }, - } -} - -func getPodTemplateSpecWithInitContainerAndEnv(name string) v1.PodTemplateSpec { - return v1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ - Labels: map[string]string{"secondLabel": "temp"}, - }, - Spec: v1.PodSpec{ - InitContainers: []v1.Container{ - { - Image: "busybox", - Name: "busyBox", - EnvFrom: getEnvVarSources(name), - }, - }, - Containers: []v1.Container{ - { - Image: "tutum/hello-world", - Name: name, - Env: []v1.EnvVar{ - { - Name: "BUCKET_NAME", - Value: "test", - }, - }, - }, - }, - }, - } -} - // GetDeployment provides deployment for testing func GetDeployment(namespace string, deploymentName string) *appsv1.Deployment { replicaset := int32(1) @@ -362,58 +255,6 @@ func GetDeployment(namespace string, deploymentName string) *appsv1.Deployment { } } -// GetDeploymentConfig provides deployment for testing -func GetDeploymentConfig(namespace string, deploymentConfigName string) *openshiftv1.DeploymentConfig { - replicaset := int32(1) - podTemplateSpecWithVolume := getPodTemplateSpecWithVolumes(deploymentConfigName) - return &openshiftv1.DeploymentConfig{ - ObjectMeta: getObjectMeta(namespace, deploymentConfigName, false, false, false, map[string]string{}), - Spec: openshiftv1.DeploymentConfigSpec{ - Replicas: replicaset, - Strategy: openshiftv1.DeploymentStrategy{ - Type: openshiftv1.DeploymentStrategyTypeRolling, - }, - Template: &podTemplateSpecWithVolume, - }, - } -} - -// GetDeploymentWithInitContainer provides deployment with init container and volumeMounts -func GetDeploymentWithInitContainer(namespace string, deploymentName string) *appsv1.Deployment { - replicaset := int32(1) - return &appsv1.Deployment{ - ObjectMeta: getObjectMeta(namespace, deploymentName, false, false, false, map[string]string{}), - Spec: appsv1.DeploymentSpec{ - Selector: &metav1.LabelSelector{ - MatchLabels: map[string]string{"secondLabel": "temp"}, - }, - Replicas: &replicaset, - Strategy: appsv1.DeploymentStrategy{ - Type: appsv1.RollingUpdateDeploymentStrategyType, - }, - Template: getPodTemplateSpecWithInitContainer(deploymentName), - }, - } -} - -// GetDeploymentWithInitContainerAndEnv provides deployment with init container and EnvSource -func GetDeploymentWithInitContainerAndEnv(namespace string, deploymentName string) *appsv1.Deployment { - replicaset := int32(1) - return &appsv1.Deployment{ - ObjectMeta: getObjectMeta(namespace, deploymentName, true, false, false, map[string]string{}), - Spec: appsv1.DeploymentSpec{ - Selector: &metav1.LabelSelector{ - MatchLabels: map[string]string{"secondLabel": "temp"}, - }, - Replicas: &replicaset, - Strategy: appsv1.DeploymentStrategy{ - Type: appsv1.RollingUpdateDeploymentStrategyType, - }, - Template: getPodTemplateSpecWithInitContainerAndEnv(deploymentName), - }, - } -} - func GetDeploymentWithEnvVars(namespace string, deploymentName string) *appsv1.Deployment { replicaset := int32(1) return &appsv1.Deployment{ @@ -431,117 +272,6 @@ func GetDeploymentWithEnvVars(namespace string, deploymentName string) *appsv1.D } } -func GetDeploymentConfigWithEnvVars(namespace string, deploymentConfigName string) *openshiftv1.DeploymentConfig { - replicaset := int32(1) - podTemplateSpecWithEnvVars := getPodTemplateSpecWithEnvVars(deploymentConfigName) - return &openshiftv1.DeploymentConfig{ - ObjectMeta: getObjectMeta(namespace, deploymentConfigName, false, false, false, map[string]string{}), - Spec: openshiftv1.DeploymentConfigSpec{ - Replicas: replicaset, - Strategy: openshiftv1.DeploymentStrategy{ - Type: openshiftv1.DeploymentStrategyTypeRolling, - }, - Template: &podTemplateSpecWithEnvVars, - }, - } -} - -func GetDeploymentWithEnvVarSources(namespace string, deploymentName string) *appsv1.Deployment { - replicaset := int32(1) - return &appsv1.Deployment{ - ObjectMeta: getObjectMeta(namespace, deploymentName, true, false, false, map[string]string{}), - Spec: appsv1.DeploymentSpec{ - Selector: &metav1.LabelSelector{ - MatchLabels: map[string]string{"secondLabel": "temp"}, - }, - Replicas: &replicaset, - Strategy: appsv1.DeploymentStrategy{ - Type: appsv1.RollingUpdateDeploymentStrategyType, - }, - Template: getPodTemplateSpecWithEnvVarSources(deploymentName), - }, - } -} - -func GetDeploymentWithPodAnnotations(namespace string, deploymentName string, both bool) *appsv1.Deployment { - replicaset := int32(1) - deployment := &appsv1.Deployment{ - ObjectMeta: getObjectMeta(namespace, deploymentName, false, false, false, map[string]string{}), - Spec: appsv1.DeploymentSpec{ - Selector: &metav1.LabelSelector{ - MatchLabels: map[string]string{"secondLabel": "temp"}, - }, - Replicas: &replicaset, - Strategy: appsv1.DeploymentStrategy{ - Type: appsv1.RollingUpdateDeploymentStrategyType, - }, - Template: getPodTemplateSpecWithEnvVarSources(deploymentName), - }, - } - if !both { - deployment.Annotations = nil - } - deployment.Spec.Template.Annotations = getAnnotations(deploymentName, true, false, false, map[string]string{}) - return deployment -} - -func GetDeploymentWithTypedAutoAnnotation(namespace string, deploymentName string, resourceType string) *appsv1.Deployment { - replicaset := int32(1) - var objectMeta metav1.ObjectMeta - switch resourceType { - case SecretResourceType: - objectMeta = getObjectMeta(namespace, deploymentName, false, true, false, map[string]string{}) - case ConfigmapResourceType: - objectMeta = getObjectMeta(namespace, deploymentName, false, false, true, map[string]string{}) - } - - return &appsv1.Deployment{ - ObjectMeta: objectMeta, - Spec: appsv1.DeploymentSpec{ - Selector: &metav1.LabelSelector{ - MatchLabels: map[string]string{"secondLabel": "temp"}, - }, - Replicas: &replicaset, - Strategy: appsv1.DeploymentStrategy{ - Type: appsv1.RollingUpdateDeploymentStrategyType, - }, - Template: getPodTemplateSpecWithVolumes(deploymentName), - }, - } -} - -func GetDeploymentWithExcludeAnnotation(namespace string, deploymentName string, resourceType string) *appsv1.Deployment { - replicaset := int32(1) - - annotation := map[string]string{} - - switch resourceType { - case SecretResourceType: - annotation[options.SecretExcludeReloaderAnnotation] = deploymentName - case ConfigmapResourceType: - annotation[options.ConfigmapExcludeReloaderAnnotation] = deploymentName - } - - return &appsv1.Deployment{ - ObjectMeta: metav1.ObjectMeta{ - Name: deploymentName, - Namespace: namespace, - Labels: map[string]string{"firstLabel": "temp"}, - Annotations: annotation, - }, - Spec: appsv1.DeploymentSpec{ - Selector: &metav1.LabelSelector{ - MatchLabels: map[string]string{"secondLabel": "temp"}, - }, - Replicas: &replicaset, - Strategy: appsv1.DeploymentStrategy{ - Type: appsv1.RollingUpdateDeploymentStrategyType, - }, - Template: getPodTemplateSpecWithVolumes(deploymentName), - }, - } -} - // GetDaemonSet provides daemonset for testing func GetDaemonSet(namespace string, daemonsetName string) *appsv1.DaemonSet { return &appsv1.DaemonSet{ @@ -629,18 +359,6 @@ func GetConfigmapWithUpdatedLabel(namespace string, configmapName string, testLa } } -// GetSecret provides secret for testing -func GetSecret(namespace string, secretName string, data string) *v1.Secret { - return &v1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: secretName, - Namespace: namespace, - Labels: map[string]string{"firstLabel": "temp"}, - }, - Data: map[string][]byte{"test.url": []byte(data)}, - } -} - func GetCronJob(namespace string, cronJobName string) *batchv1.CronJob { return &batchv1.CronJob{ ObjectMeta: getObjectMeta(namespace, cronJobName, false, false, false, map[string]string{}), @@ -699,18 +417,6 @@ func GetJobWithEnvVar(namespace string, jobName string) *batchv1.Job { } } -// GetSecretWithUpdatedLabel provides secret for testing -func GetSecretWithUpdatedLabel(namespace string, secretName string, label string, data string) *v1.Secret { - return &v1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: secretName, - Namespace: namespace, - Labels: map[string]string{"firstLabel": label}, - }, - Data: map[string][]byte{"test.url": []byte(data)}, - } -} - // GetResourceSHAFromEnvVar returns the SHA value of given environment variable func GetResourceSHAFromEnvVar(containers []v1.Container, envVar string) string { for i := range containers { @@ -724,38 +430,10 @@ func GetResourceSHAFromEnvVar(containers []v1.Container, envVar string) string { return "" } -// GetResourceSHAFromAnnotation returns the SHA value of given environment variable -func GetResourceSHAFromAnnotation(podAnnotations map[string]string) string { - lastReloadedResourceName := fmt.Sprintf("%s/%s", - constants.ReloaderAnnotationPrefix, - constants.LastReloadedFromAnnotation, - ) - - annotationJson, ok := podAnnotations[lastReloadedResourceName] - if !ok { - return "" - } - - var last common.ReloadSource - bytes := []byte(annotationJson) - err := json.Unmarshal(bytes, &last) - if err != nil { - return "" - } - - return last.Hash -} - -// ConvertResourceToSHA generates SHA from secret or configmap data +// ConvertResourceToSHA generates SHA from configmap data func ConvertResourceToSHA(resourceType string, namespace string, resourceName string, data string) string { values := []string{} - switch resourceType { - case SecretResourceType: - secret := GetSecret(namespace, resourceName, data) - for k, v := range secret.Data { - values = append(values, k+"="+string(v[:])) - } - case ConfigmapResourceType: + if resourceType == ConfigmapResourceType { configmap := GetConfigmap(namespace, resourceName, data) for k, v := range configmap.Data { values = append(values, k+"="+v) @@ -774,15 +452,6 @@ func CreateConfigMap(client kubernetes.Interface, namespace string, configmapNam return configmapClient, err } -// CreateSecret creates a secret in given namespace and returns the SecretInterface -func CreateSecret(client kubernetes.Interface, namespace string, secretName string, data string) (core_v1.SecretInterface, error) { - logrus.Infof("Creating secret") - secretClient := client.CoreV1().Secrets(namespace) - _, err := secretClient.Create(context.TODO(), GetSecret(namespace, secretName, data), metav1.CreateOptions{}) - time.Sleep(3 * time.Second) - return secretClient, err -} - // CreateDeployment creates a deployment in given namespace and returns the Deployment func CreateDeployment(client kubernetes.Interface, deploymentName string, namespace string, volumeMount bool) (*appsv1.Deployment, error) { logrus.Infof("Creating Deployment") @@ -798,108 +467,6 @@ func CreateDeployment(client kubernetes.Interface, deploymentName string, namesp return deployment, err } -// CreateDeployment creates a deployment in given namespace and returns the Deployment -func CreateDeploymentWithAnnotations(client kubernetes.Interface, deploymentName string, namespace string, additionalAnnotations map[string]string, volumeMount bool) (*appsv1.Deployment, error) { - logrus.Infof("Creating Deployment") - deploymentClient := client.AppsV1().Deployments(namespace) - var deploymentObj *appsv1.Deployment - if volumeMount { - deploymentObj = GetDeployment(namespace, deploymentName) - } else { - deploymentObj = GetDeploymentWithEnvVars(namespace, deploymentName) - } - - for annotationKey, annotationValue := range additionalAnnotations { - deploymentObj.Annotations[annotationKey] = annotationValue - } - - deployment, err := deploymentClient.Create(context.TODO(), deploymentObj, metav1.CreateOptions{}) - time.Sleep(3 * time.Second) - return deployment, err -} - -// CreateDeploymentConfig creates a deploymentConfig in given namespace and returns the DeploymentConfig -func CreateDeploymentConfig(client appsclient.Interface, deploymentName string, namespace string, volumeMount bool) (*openshiftv1.DeploymentConfig, error) { - logrus.Infof("Creating DeploymentConfig") - deploymentConfigsClient := client.AppsV1().DeploymentConfigs(namespace) - var deploymentConfigObj *openshiftv1.DeploymentConfig - if volumeMount { - deploymentConfigObj = GetDeploymentConfig(namespace, deploymentName) - } else { - deploymentConfigObj = GetDeploymentConfigWithEnvVars(namespace, deploymentName) - } - deploymentConfig, err := deploymentConfigsClient.Create(context.TODO(), deploymentConfigObj, metav1.CreateOptions{}) - time.Sleep(5 * time.Second) - return deploymentConfig, err -} - -// CreateDeploymentWithInitContainer creates a deployment in given namespace with init container and returns the Deployment -func CreateDeploymentWithInitContainer(client kubernetes.Interface, deploymentName string, namespace string, volumeMount bool) (*appsv1.Deployment, error) { - logrus.Infof("Creating Deployment") - deploymentClient := client.AppsV1().Deployments(namespace) - var deploymentObj *appsv1.Deployment - if volumeMount { - deploymentObj = GetDeploymentWithInitContainer(namespace, deploymentName) - } else { - deploymentObj = GetDeploymentWithInitContainerAndEnv(namespace, deploymentName) - } - deployment, err := deploymentClient.Create(context.TODO(), deploymentObj, metav1.CreateOptions{}) - time.Sleep(3 * time.Second) - return deployment, err -} - -// CreateDeploymentWithEnvVarSource creates a deployment in given namespace and returns the Deployment -func CreateDeploymentWithEnvVarSource(client kubernetes.Interface, deploymentName string, namespace string) (*appsv1.Deployment, error) { - logrus.Infof("Creating Deployment") - deploymentClient := client.AppsV1().Deployments(namespace) - deploymentObj := GetDeploymentWithEnvVarSources(namespace, deploymentName) - deployment, err := deploymentClient.Create(context.TODO(), deploymentObj, metav1.CreateOptions{}) - time.Sleep(3 * time.Second) - return deployment, err - -} - -// CreateDeploymentWithPodAnnotations creates a deployment in given namespace and returns the Deployment -func CreateDeploymentWithPodAnnotations(client kubernetes.Interface, deploymentName string, namespace string, both bool) (*appsv1.Deployment, error) { - logrus.Infof("Creating Deployment") - deploymentClient := client.AppsV1().Deployments(namespace) - deploymentObj := GetDeploymentWithPodAnnotations(namespace, deploymentName, both) - deployment, err := deploymentClient.Create(context.TODO(), deploymentObj, metav1.CreateOptions{}) - time.Sleep(3 * time.Second) - return deployment, err -} - -// CreateDeploymentWithEnvVarSourceAndAnnotations returns a deployment in given -// namespace with given annotations. -func CreateDeploymentWithEnvVarSourceAndAnnotations(client kubernetes.Interface, deploymentName string, namespace string, annotations map[string]string) (*appsv1.Deployment, error) { - logrus.Infof("Creating Deployment") - deploymentClient := client.AppsV1().Deployments(namespace) - deploymentObj := GetDeploymentWithEnvVarSources(namespace, deploymentName) - deploymentObj.Annotations = annotations - deployment, err := deploymentClient.Create(context.TODO(), deploymentObj, metav1.CreateOptions{}) - time.Sleep(3 * time.Second) - return deployment, err -} - -// CreateDeploymentWithTypedAutoAnnotation creates a deployment in given namespace and returns the Deployment with typed auto annotation -func CreateDeploymentWithTypedAutoAnnotation(client kubernetes.Interface, deploymentName string, namespace string, resourceType string) (*appsv1.Deployment, error) { - logrus.Infof("Creating Deployment") - deploymentClient := client.AppsV1().Deployments(namespace) - deploymentObj := GetDeploymentWithTypedAutoAnnotation(namespace, deploymentName, resourceType) - deployment, err := deploymentClient.Create(context.TODO(), deploymentObj, metav1.CreateOptions{}) - time.Sleep(3 * time.Second) - return deployment, err -} - -// CreateDeploymentWithExcludeAnnotation creates a deployment in given namespace and returns the Deployment with typed auto annotation -func CreateDeploymentWithExcludeAnnotation(client kubernetes.Interface, deploymentName string, namespace string, resourceType string) (*appsv1.Deployment, error) { - logrus.Infof("Creating Deployment") - deploymentClient := client.AppsV1().Deployments(namespace) - deploymentObj := GetDeploymentWithExcludeAnnotation(namespace, deploymentName, resourceType) - deployment, err := deploymentClient.Create(context.TODO(), deploymentObj, metav1.CreateOptions{}) - return deployment, err -} - // CreateDaemonSet creates a deployment in given namespace and returns the DaemonSet func CreateDaemonSet(client kubernetes.Interface, daemonsetName string, namespace string, volumeMount bool) (*appsv1.DaemonSet, error) { logrus.Infof("Creating DaemonSet") @@ -968,14 +535,6 @@ func DeleteDeployment(client kubernetes.Interface, namespace string, deploymentN return deploymentError } -// DeleteDeploymentConfig deletes a deploymentConfig in given namespace and returns the error if any -func DeleteDeploymentConfig(client appsclient.Interface, namespace string, deploymentConfigName string) error { - logrus.Infof("Deleting DeploymentConfig") - deploymentConfigError := client.AppsV1().DeploymentConfigs(namespace).Delete(context.TODO(), deploymentConfigName, metav1.DeleteOptions{}) - time.Sleep(3 * time.Second) - return deploymentConfigError -} - // DeleteDaemonSet creates a daemonset in given namespace and returns the error if any func DeleteDaemonSet(client kubernetes.Interface, namespace string, daemonsetName string) error { logrus.Infof("Deleting DaemonSet %s", daemonsetName) @@ -1022,20 +581,6 @@ func UpdateConfigMap(configmapClient core_v1.ConfigMapInterface, namespace strin return updateErr } -// UpdateSecret updates a secret in given namespace and returns the error if any -func UpdateSecret(secretClient core_v1.SecretInterface, namespace string, secretName string, label string, data string) error { - logrus.Infof("Updating secret %q.\n", secretName) - var secret *v1.Secret - if label != "" { - secret = GetSecretWithUpdatedLabel(namespace, secretName, label, data) - } else { - secret = GetSecret(namespace, secretName, data) - } - _, updateErr := secretClient.Update(context.TODO(), secret, metav1.UpdateOptions{}) - time.Sleep(3 * time.Second) - return updateErr -} - // DeleteConfigMap deletes a configmap in given namespace and returns the error if any func DeleteConfigMap(client kubernetes.Interface, namespace string, configmapName string) error { logrus.Infof("Deleting configmap %q.\n", configmapName) @@ -1044,14 +589,6 @@ func DeleteConfigMap(client kubernetes.Interface, namespace string, configmapNam return err } -// DeleteSecret deletes a secret in given namespace and returns the error if any -func DeleteSecret(client kubernetes.Interface, namespace string, secretName string) error { - logrus.Infof("Deleting secret %q.\n", secretName) - err := client.CoreV1().Secrets(namespace).Delete(context.TODO(), secretName, metav1.DeleteOptions{}) - time.Sleep(3 * time.Second) - return err -} - // RandSeq generates a random sequence func RandSeq(n int) string { b := make([]rune, n) @@ -1107,100 +644,6 @@ func VerifyResourceEnvVarUpdate(clients kube.Clients, config common.Config, envV return false } -// VerifyResourceEnvVarRemoved verifies whether the rolling upgrade happened or not and all Envvars SKAKATER_name_CONFIGMAP/SECRET are removed -func VerifyResourceEnvVarRemoved(clients kube.Clients, config common.Config, envVarPostfix string, upgradeFuncs callbacks.RollingUpgradeFuncs) bool { - items := upgradeFuncs.ItemsFunc(clients, config.Namespace) - for _, i := range items { - containers := upgradeFuncs.ContainersFunc(i) - accessor, err := meta.Accessor(i) - if err != nil { - return false - } - - annotations := accessor.GetAnnotations() - // match statefulsets with the correct annotation - - annotationValue := annotations[config.Annotation] - searchAnnotationValue := annotations[options.AutoSearchAnnotation] - reloaderEnabledValue := annotations[options.ReloaderAutoAnnotation] - typedAutoAnnotationEnabledValue := annotations[config.TypedAutoAnnotation] - reloaderEnabled, err := strconv.ParseBool(reloaderEnabledValue) - typedAutoAnnotationEnabled, errTyped := strconv.ParseBool(typedAutoAnnotationEnabledValue) - - matches := false - if err == nil && reloaderEnabled || errTyped == nil && typedAutoAnnotationEnabled { - matches = true - } else if annotationValue != "" { - values := strings.Split(annotationValue, ",") - for _, value := range values { - value = strings.Trim(value, " ") - if value == config.ResourceName { - matches = true - break - } - } - } else if searchAnnotationValue == "true" { - if config.ResourceAnnotations[options.SearchMatchAnnotation] == "true" { - matches = true - } - } - - if matches { - envName := constants.EnvVarPrefix + util.ConvertToEnvVarName(config.ResourceName) + "_" + envVarPostfix - value := GetResourceSHAFromEnvVar(containers, envName) - if value == "" { - return true - } - } - } - return false -} - -// VerifyResourceAnnotationUpdate verifies whether the rolling upgrade happened or not -func VerifyResourceAnnotationUpdate(clients kube.Clients, config common.Config, upgradeFuncs callbacks.RollingUpgradeFuncs) bool { - items := upgradeFuncs.ItemsFunc(clients, config.Namespace) - for _, i := range items { - podAnnotations := upgradeFuncs.PodAnnotationsFunc(i) - accessor, err := meta.Accessor(i) - if err != nil { - return false - } - annotations := accessor.GetAnnotations() - // match statefulsets with the correct annotation - annotationValue := annotations[config.Annotation] - searchAnnotationValue := annotations[options.AutoSearchAnnotation] - reloaderEnabledValue := annotations[options.ReloaderAutoAnnotation] - typedAutoAnnotationEnabledValue := annotations[config.TypedAutoAnnotation] - reloaderEnabled, _ := strconv.ParseBool(reloaderEnabledValue) - typedAutoAnnotationEnabled, _ := strconv.ParseBool(typedAutoAnnotationEnabledValue) - matches := false - if reloaderEnabled || typedAutoAnnotationEnabled || reloaderEnabledValue == "" && typedAutoAnnotationEnabledValue == "" && options.AutoReloadAll { - matches = true - } else if annotationValue != "" { - values := strings.Split(annotationValue, ",") - for _, value := range values { - value = strings.Trim(value, " ") - if value == config.ResourceName { - matches = true - break - } - } - } else if searchAnnotationValue == "true" { - if config.ResourceAnnotations[options.SearchMatchAnnotation] == "true" { - matches = true - } - } - - if matches { - updated := GetResourceSHAFromAnnotation(podAnnotations) - if updated == config.SHAValue { - return true - } - } - } - return false -} - func GetSHAfromEmptyData() string { return crypto.GenerateSHA("") } diff --git a/scripts/e2e-cluster-cleanup.sh b/scripts/e2e-cluster-cleanup.sh new file mode 100644 index 0000000..7fb9158 --- /dev/null +++ b/scripts/e2e-cluster-cleanup.sh @@ -0,0 +1,93 @@ +#!/bin/bash +# Cleanup script for e2e test cluster +# Run this after e2e tests complete: ./scripts/e2e-cluster-cleanup.sh +# This removes Argo Rollouts, test namespaces, and cluster-scoped resources. + +set -e + +ARGO_ROLLOUTS_VERSION="${ARGO_ROLLOUTS_VERSION:-v1.7.2}" +ARGO_ROLLOUTS_NAMESPACE="argo-rollouts" + +echo "=== E2E Cluster Cleanup ===" + +# Check if kubectl is available +if ! command -v kubectl &> /dev/null; then + echo "Error: kubectl is not installed or not in PATH" + exit 1 +fi + +# Check cluster connectivity +echo "Checking cluster connectivity..." +if ! kubectl cluster-info &> /dev/null; then + echo "Error: Cannot connect to Kubernetes cluster" + exit 1 +fi + +# ============================================================ +# Cleanup Reloader Test Resources +# ============================================================ +echo "" +echo "=== Cleaning up Reloader test resources ===" + +# Delete test namespaces (created by test suites) +echo "Deleting test namespaces..." +for ns in $(kubectl get namespaces -o name | grep -E "reloader-" | cut -d/ -f2); do + echo " Deleting namespace: ${ns}" + kubectl delete namespace "${ns}" --ignore-not-found --wait=false +done + +# Delete Reloader cluster-scoped resources +echo "Deleting Reloader cluster-scoped resources..." +for cr in $(kubectl get clusterrole -o name 2>/dev/null | grep -E "reloader-" | cut -d/ -f2); do + echo " Deleting ClusterRole: ${cr}" + kubectl delete clusterrole "${cr}" --ignore-not-found +done + +for crb in $(kubectl get clusterrolebinding -o name 2>/dev/null | grep -E "reloader-" | cut -d/ -f2); do + echo " Deleting ClusterRoleBinding: ${crb}" + kubectl delete clusterrolebinding "${crb}" --ignore-not-found +done + +# ============================================================ +# Cleanup Argo Rollouts +# ============================================================ +echo "" +echo "=== Uninstalling Argo Rollouts ===" + +# First, delete the deployment to stop the controller +echo "Stopping Argo Rollouts controller..." +kubectl delete deployment argo-rollouts -n ${ARGO_ROLLOUTS_NAMESPACE} --ignore-not-found --timeout=30s 2>/dev/null || true + +# Delete all Rollouts and other CRs in all namespaces to avoid finalizer issues +echo "Deleting Argo Rollouts custom resources..." +ARGO_RESOURCES="rollouts analysisruns analysistemplates experiments" +for res in ${ARGO_RESOURCES}; do + kubectl delete "${res}.argoproj.io" --all --all-namespaces --ignore-not-found --timeout=30s 2>/dev/null || true +done + +# Delete using the install manifest +echo "Deleting Argo Rollouts installation..." +ARGO_URL="https://github.com/argoproj/argo-rollouts/releases/download/${ARGO_ROLLOUTS_VERSION}/install.yaml" +kubectl delete -f ${ARGO_URL} --ignore-not-found --timeout=60s 2>/dev/null || true + +# Give resources time to be cleaned up before deleting CRDs +sleep 2 + +# Explicitly delete CRDs (cluster-scoped) +echo "Deleting Argo Rollouts CRDs..." +ARGO_CRDS="rollouts.argoproj.io analysisruns.argoproj.io analysistemplates.argoproj.io clusteranalysistemplates.argoproj.io experiments.argoproj.io" +for crd in ${ARGO_CRDS}; do + kubectl delete crd "${crd}" --ignore-not-found --timeout=30s 2>/dev/null || true +done + +# Delete namespace +echo "Deleting Argo Rollouts namespace..." +kubectl delete namespace ${ARGO_ROLLOUTS_NAMESPACE} --ignore-not-found --timeout=30s 2>/dev/null || true + +# Delete cluster-scoped RBAC +echo "Deleting Argo Rollouts cluster RBAC..." +kubectl delete clusterrole argo-rollouts argo-rollouts-aggregate-to-admin argo-rollouts-aggregate-to-edit argo-rollouts-aggregate-to-view --ignore-not-found 2>/dev/null || true +kubectl delete clusterrolebinding argo-rollouts --ignore-not-found 2>/dev/null || true + +echo "" +echo "=== E2E Cluster Cleanup Complete ===" diff --git a/scripts/e2e-cluster-setup.sh b/scripts/e2e-cluster-setup.sh new file mode 100644 index 0000000..eec7052 --- /dev/null +++ b/scripts/e2e-cluster-setup.sh @@ -0,0 +1,80 @@ +#!/bin/bash +# Setup script for e2e test cluster +# Run this before running e2e tests: ./scripts/e2e-cluster-setup.sh +# This installs Argo Rollouts and any other prerequisites needed for e2e tests. + +set -e + +ARGO_ROLLOUTS_VERSION="${ARGO_ROLLOUTS_VERSION:-v1.7.2}" +ARGO_ROLLOUTS_NAMESPACE="argo-rollouts" + +echo "=== E2E Cluster Setup ===" + +# Check if kubectl is available +if ! command -v kubectl &> /dev/null; then + echo "Error: kubectl is not installed or not in PATH" + exit 1 +fi + +# Check cluster connectivity +echo "Checking cluster connectivity..." +if ! kubectl cluster-info &> /dev/null; then + echo "Error: Cannot connect to Kubernetes cluster" + exit 1 +fi +echo "Cluster connectivity verified" + +# Install Argo Rollouts +echo "" +echo "=== Installing Argo Rollouts ${ARGO_ROLLOUTS_VERSION} ===" + +# Check if Argo Rollouts is already installed +if kubectl get crd rollouts.argoproj.io &> /dev/null; then + echo "Argo Rollouts CRD already exists, checking if controller is running..." + if kubectl get deployment argo-rollouts -n ${ARGO_ROLLOUTS_NAMESPACE} &> /dev/null; then + echo "Argo Rollouts is already installed and running" + else + echo "Argo Rollouts CRD exists but controller not running, reinstalling..." + fi +else + echo "Installing Argo Rollouts..." +fi + +# Create namespace (ignore if exists) +kubectl create namespace ${ARGO_ROLLOUTS_NAMESPACE} 2>/dev/null || true + +# Install Argo Rollouts +ARGO_URL="https://github.com/argoproj/argo-rollouts/releases/download/${ARGO_ROLLOUTS_VERSION}/install.yaml" +echo "Applying manifest from: ${ARGO_URL}" +kubectl apply -n ${ARGO_ROLLOUTS_NAMESPACE} -f ${ARGO_URL} + +# Wait for deployment to exist +echo "Waiting for deployment to be created..." +sleep 2 + +# Patch deployment to remove resource requirements (for Kind cluster compatibility) +# This avoids "Insufficient ephemeral-storage" errors in resource-constrained environments +echo "Patching deployment for Kind compatibility..." +PATCH_JSON='[{"op": "remove", "path": "/spec/template/spec/containers/0/resources"}]' +if ! kubectl patch deployment argo-rollouts -n ${ARGO_ROLLOUTS_NAMESPACE} --type=json -p "${PATCH_JSON}" 2>/dev/null; then + echo "JSON patch failed, trying strategic merge..." + PATCH_JSON='{"spec":{"template":{"spec":{"containers":[{"name":"argo-rollouts","resources":{"limits":null,"requests":null}}]}}}}' + kubectl patch deployment argo-rollouts -n ${ARGO_ROLLOUTS_NAMESPACE} --type=strategic -p "${PATCH_JSON}" || echo "Warning: Failed to patch resources" +fi + +# Wait for controller to be ready +echo "Waiting for Argo Rollouts controller to be ready..." +kubectl wait --for=condition=available deployment/argo-rollouts -n ${ARGO_ROLLOUTS_NAMESPACE} --timeout=180s + +# Wait for CRD to be established +echo "Waiting for Argo Rollouts CRD to be established..." +kubectl wait --for=condition=established crd/rollouts.argoproj.io --timeout=60s + +echo "" +echo "=== E2E Cluster Setup Complete ===" +echo "Argo Rollouts ${ARGO_ROLLOUTS_VERSION} is installed and ready" +echo "" +echo "You can now run e2e tests:" +echo " make e2e-test" +echo " # or" +echo " SKIP_BUILD=true RELOADER_IMAGE=ghcr.io/stakater/reloader:test go test -v ./test/e2e/..." diff --git a/test/e2e/README.md b/test/e2e/README.md new file mode 100644 index 0000000..ca57b11 --- /dev/null +++ b/test/e2e/README.md @@ -0,0 +1,457 @@ +# Reloader E2E Tests + +These tests verify that Reloader actually works in a real Kubernetes cluster. They spin up a Kind cluster, build and deploy Reloader, then create workloads and change their ConfigMaps/Secrets to make sure everything reloads correctly. + +## Running the Tests + +```bash +# Run everything (creates Kind cluster, builds image, runs tests) +make e2e + +# Test a specific image without building +SKIP_BUILD=true RELOADER_IMAGE=stakater/reloader:v1.0.0 make e2e + +# Run just one test suite +go test -v -timeout 30m ./test/e2e/core/... +go test -v -timeout 30m ./test/e2e/annotations/... +go test -v -timeout 30m ./test/e2e/flags/... + +# Skip Argo/OpenShift tests (if you don't have them installed) +go test -v ./test/e2e/core/... --ginkgo.label-filter="!argo && !openshift" +``` + +## What You Need + +- Go 1.21+ +- Docker +- [Kind](https://kind.sigs.k8s.io/) +- kubectl +- Helm 3 +- Argo Rollouts (optional, for Argo tests) +- OpenShift (optional, for DeploymentConfig tests) + +--- + +## What Gets Tested + +### Deployments + +Deployments are the most thoroughly tested workload. Here's everything we verify: + +**Basic Reload Behavior** +- Reloads when a referenced ConfigMap's data changes +- Reloads when a referenced Secret's data changes +- Reloads when using `auto=true` annotation (auto-detects all mounted ConfigMaps/Secrets) +- Does NOT reload when only ConfigMap/Secret labels change (data must change) +- Does NOT reload when `auto=false` is set + +**Different Ways to Reference ConfigMaps/Secrets** +- `envFrom` - inject all keys as environment variables +- `valueFrom.configMapKeyRef` - single key as env var +- `valueFrom.secretKeyRef` - single key as env var +- Volume mounts - mount ConfigMap/Secret as files +- Projected volumes - multiple sources combined into one mount +- Init containers with envFrom +- Init containers with volume mounts + +**Annotation Variations** +- `configmap.reloader.stakater.com/reload: my-config` - explicit ConfigMap +- `secret.reloader.stakater.com/reload: my-secret` - explicit Secret +- `reloader.stakater.com/auto: "true"` - auto-detect everything +- `configmap.reloader.stakater.com/auto: "true"` - auto-detect only ConfigMaps +- `secret.reloader.stakater.com/auto: "true"` - auto-detect only Secrets +- Multiple ConfigMaps/Secrets in one annotation (comma-separated) +- Annotations on pod template vs deployment metadata (both work) + +**Search & Match** +- Deployments with `search` annotation find ConfigMaps with `match` annotation +- Only reloads if both sides have the right annotations + +**Exclude & Ignore** +- Exclude specific ConfigMaps/Secrets from auto-reload +- Ignore annotation on ConfigMap/Secret prevents any reload + +**Pause Period** +- Deployment gets paused after reload when pause-period annotation is set + +**Regex Patterns** +- Pattern matching for ConfigMap/Secret names (e.g., `app-config-.*`) + +**Multi-Container** +- Works when multiple containers share the same ConfigMap +- Works when different containers use different ConfigMaps + +**EnvVars Strategy** +- Adds `STAKATER_` environment variables instead of pod annotations +- Verifies the env var appears after ConfigMap/Secret change + +### DaemonSets + +DaemonSets get the same treatment as Deployments: + +- Reloads when ConfigMap data changes +- Reloads when Secret data changes +- Works with `auto=true` annotation +- Does NOT reload on label-only changes +- Supports all reference methods (envFrom, valueFrom, volumes, projected, init containers) +- EnvVars strategy works + +### StatefulSets + +StatefulSets are tested identically to Deployments and DaemonSets: + +- Reloads when ConfigMap data changes +- Reloads when Secret data changes +- Works with `auto=true` annotation +- Does NOT reload on label-only changes +- Supports all reference methods +- EnvVars strategy works + +### CronJobs + +CronJobs are a bit special - when a CronJob's ConfigMap changes, Reloader updates the CronJob spec so the *next* Job it creates will have the new config. + +**What's Tested** +- CronJob spec updates when referenced ConfigMap changes +- CronJob spec updates when referenced Secret changes +- Works with `auto=true` annotation +- Works with explicit reload annotations +- Does NOT update on label-only changes + +**Note:** CronJobs don't support the EnvVars strategy since they don't have running pods to inject env vars into. + +### Jobs + +Jobs require special handling - since you can't modify a running Job, Reloader deletes and recreates it with the new config. + +**What's Tested** +- Job gets recreated (new UID) when ConfigMap changes +- Job gets recreated when Secret changes +- Works with `auto=true` annotation +- Works with explicit reload annotations +- Works with `valueFrom.configMapKeyRef` references +- Works with `valueFrom.secretKeyRef` references + +**Note:** Jobs don't support the EnvVars strategy. + +### Argo Rollouts + +Argo Rollouts are Kubernetes Deployments on steroids with advanced deployment strategies. Tests require Argo Rollouts to be installed. + +**What's Tested** +- Reloads when ConfigMap data changes +- Reloads when Secret data changes +- Works with `auto=true` annotation +- Does NOT reload on label-only changes +- Default strategy (annotation-based, like Deployments) +- Restart strategy (sets `spec.restartAt` field instead of annotations) +- Supports all reference methods +- EnvVars strategy works + +### DeploymentConfigs (OpenShift) + +OpenShift's legacy workload type. Tests only run on OpenShift clusters. + +**What's Tested** +- Reloads when ConfigMap data changes +- Reloads when Secret data changes +- Works with `auto=true` annotation +- Does NOT reload on label-only changes +- Supports all reference methods +- EnvVars strategy works + +--- + +## CLI Flag Tests + +These tests verify Reloader's command-line options work correctly. Each test deploys Reloader with different flags. + +### Namespace Filtering + +**`namespaceSelector`** +- Only watches namespaces with matching labels +- Ignores ConfigMap changes in non-matching namespaces + +**`ignoreNamespaces`** +- Skips specified namespaces entirely +- Still watches all other namespaces + +**`watchGlobally`** +- `true` (default): watches all namespaces +- `false`: only watches Reloader's own namespace + +### Resource Filtering + +**`resourceLabelSelector`** +- Only watches ConfigMaps/Secrets with matching labels +- Ignores changes to resources without the label + +**`ignoreSecrets`** +- Completely ignores all Secret changes +- Still watches ConfigMaps + +**`ignoreConfigMaps`** +- Completely ignores all ConfigMap changes +- Still watches Secrets + +### Workload Filtering + +**`ignoreCronJobs`** +- Skips CronJobs, still handles Deployments/etc + +**`ignoreJobs`** +- Skips Jobs, still handles other workloads + +### Reload Triggers + +**`reloadOnCreate`** +- `true`: triggers reload when a new ConfigMap/Secret is created +- `false` (default): only triggers on updates + +**`reloadOnDelete`** +- `true`: triggers reload when a ConfigMap/Secret is deleted +- `false` (default): only triggers on updates + +### Global Auto-Reload + +**`autoReloadAll`** +- `true`: all workloads auto-reload without needing annotations +- `auto=false` on a workload still opts it out + +--- + +## Annotation-Specific Tests + +### Auto Reload Variations + +- `reloader.stakater.com/auto: "true"` - watches both ConfigMaps and Secrets +- `reloader.stakater.com/auto: "false"` - completely disables reload +- `configmap.reloader.stakater.com/auto: "true"` - only watches ConfigMaps +- `secret.reloader.stakater.com/auto: "true"` - only watches Secrets + +### Combining Annotations + +- `auto=true` + explicit reload annotation work together +- Auto-detected resources + explicitly listed resources both trigger reload +- Exclude annotations override auto-detection + +### Search & Match + +The search/match system lets you decouple workloads from specific resource names: + +1. Workload has `reloader.stakater.com/search: "true"` +2. ConfigMap has `reloader.stakater.com/match: "true"` +3. When ConfigMap changes, workload reloads + +**Tests verify:** +- Reload happens when both annotations present +- No reload when workload has search but ConfigMap lacks match +- No reload when ConfigMap has match but no workload has search +- Multiple workloads can have search, only ones with search reload + +### Exclude Annotations + +Exclude specific resources from auto-reload: + +- `configmap.reloader.stakater.com/exclude: "config-to-skip"` +- `secret.reloader.stakater.com/exclude: "secret-to-skip"` + +**Tests verify:** +- Excluded ConfigMap changes don't trigger reload +- Non-excluded ConfigMap changes still trigger reload +- Same behavior for Secrets + +### Resource Ignore + +Put this on the ConfigMap/Secret itself to prevent any reload: + +- `reloader.stakater.com/ignore: "true"` + +**Tests verify:** +- ConfigMap with ignore annotation never triggers reload +- Secret with ignore annotation never triggers reload +- Even with explicit reload annotation on workload + +### Pause Period + +Delay between detecting change and triggering reload: + +- `reloader.stakater.com/pause-period: "10s"` + +**Tests verify:** +- Deployment gets paused-at annotation after reload +- Without pause-period, no paused-at annotation + +--- + +## Advanced Scenarios + +### Pod Template Annotations + +Reloader reads annotations from both places: + +1. Deployment/DaemonSet/etc metadata +2. Pod template metadata (inside spec.template.metadata) + +**Tests verify:** +- Annotation only on pod template still works +- Annotation on both locations works +- Mismatched annotations (ConfigMap annotation but updating Secret) correctly doesn't reload + +### Regex Patterns + +Use regex in the reload annotation: + +- `configmap.reloader.stakater.com/reload: "app-config-.*"` +- `secret.reloader.stakater.com/reload: "db-creds-.*"` + +**Tests verify:** +- Matching ConfigMap/Secret triggers reload +- Non-matching ConfigMap/Secret doesn't trigger reload + +### Multiple Containers + +**Tests verify:** +- Multiple containers sharing one ConfigMap - changes trigger reload +- Multiple containers with different ConfigMaps - change to either triggers reload + +--- + +## Test Organization + +``` +test/e2e/ +├── core/ # Main tests (all workload types) +│ ├── workloads_test.go # Basic reload behavior +│ └── reference_methods_test.go # envFrom, volumes, etc. +├── annotations/ # Annotation-specific behavior +│ ├── auto_reload_test.go +│ ├── combination_test.go +│ ├── exclude_test.go +│ ├── search_match_test.go +│ ├── pause_period_test.go +│ └── resource_ignore_test.go +├── flags/ # CLI flag behavior +│ ├── namespace_selector_test.go +│ ├── namespace_ignore_test.go +│ ├── resource_selector_test.go +│ ├── ignore_resources_test.go +│ ├── ignored_workloads_test.go +│ ├── auto_reload_all_test.go +│ ├── reload_on_create_test.go +│ ├── reload_on_delete_test.go +│ └── watch_globally_test.go +├── advanced/ # Edge cases +│ ├── job_reload_test.go +│ ├── multi_container_test.go +│ ├── pod_annotations_test.go +│ └── regex_test.go +├── argo/ # Argo Rollouts (requires installation) +│ └── rollout_test.go +├── openshift/ # OpenShift (requires cluster) +│ └── deploymentconfig_test.go +└── utils/ # Shared test helpers +``` + +--- + +## Debugging Failed Tests + +### See What's Happening + +```bash +# Verbose output +go test -v ./test/e2e/core/... + +# Run one specific test +go test -v ./test/e2e/core/... --ginkgo.focus="should reload when ConfigMap" + +# Keep the cluster around after tests +SKIP_CLEANUP=true make e2e +``` + +### Check Reloader Logs + +```bash +# Find the Reloader pod +kubectl get pods -A | grep reloader + +# Check its logs +kubectl logs -n -l app=reloader-reloader --tail=100 +``` + +### Common Problems + +| Problem | Solution | +|---------|----------| +| Test timeout | Reloader might not be running - check pod status | +| Argo tests skipped | Install Argo Rollouts first | +| OpenShift tests skipped | Only work on OpenShift clusters | +| "resource not found" | Missing CRDs (Argo, OpenShift) | + +--- + +## Environment Variables + +| Variable | What it does | Default | +|----------|--------------|---------| +| `RELOADER_IMAGE` | Image to test | `ghcr.io/stakater/reloader:test` | +| `SKIP_BUILD` | Don't build the image | `false` | +| `SKIP_CLEANUP` | Keep cluster after tests | `false` | +| `KIND_CLUSTER` | Kind cluster name | `kind` | +| `KUBECONFIG` | Kubernetes config path | `~/.kube/config` | + +--- + +## Writing New Tests + +### For Multiple Workload Types + +Use the adapter pattern to test the same behavior across Deployments, DaemonSets, etc: + +```go +DescribeTable("should reload when ConfigMap changes", + func(workloadType utils.WorkloadType) { + adapter := registry.Get(workloadType) + // ... create ConfigMap, workload, update ConfigMap, verify reload + }, + Entry("Deployment", utils.WorkloadDeployment), + Entry("DaemonSet", utils.WorkloadDaemonSet), + Entry("StatefulSet", utils.WorkloadStatefulSet), +) +``` + +### For Deployment-Only Tests + +Use the direct creation helpers: + +```go +It("should reload with my specific setup", func() { + _, err := utils.CreateConfigMap(ctx, kubeClient, testNamespace, configMapName, + map[string]string{"key": "value"}, nil) + + _, err = utils.CreateDeployment(ctx, kubeClient, testNamespace, deploymentName, + utils.WithConfigMapEnvFrom(configMapName), + utils.WithAnnotations(utils.BuildAutoTrueAnnotation()), + ) + + // Update and verify... +}) +``` + +### Negative Tests (Verifying Nothing Happens) + +```go +It("should NOT reload when only labels change", func() { + // Setup... + + // Make a change that shouldn't trigger reload + err = utils.UpdateConfigMapLabels(ctx, kubeClient, testNamespace, configMapName, + map[string]string{"new-label": "value"}) + + // Wait a bit, then verify NO reload happened + time.Sleep(utils.NegativeTestWait) + reloaded, _ := utils.WaitForDeploymentReloaded(...) + Expect(reloaded).To(BeFalse()) +}) +``` diff --git a/test/e2e/advanced/advanced_suite_test.go b/test/e2e/advanced/advanced_suite_test.go new file mode 100644 index 0000000..b6cb6e6 --- /dev/null +++ b/test/e2e/advanced/advanced_suite_test.go @@ -0,0 +1,51 @@ +package advanced + +import ( + "context" + "testing" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + "github.com/stakater/Reloader/test/e2e/utils" + "k8s.io/client-go/kubernetes" +) + +var ( + kubeClient kubernetes.Interface + testNamespace string + ctx context.Context + testEnv *utils.TestEnvironment +) + +func TestAdvanced(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Advanced E2E Suite") +} + +var _ = BeforeSuite(func() { + var err error + ctx = context.Background() + + // Setup test environment + testEnv, err = utils.SetupTestEnvironment(ctx, "reloader-advanced") + Expect(err).NotTo(HaveOccurred(), "Failed to setup test environment") + + // Export for use in tests + kubeClient = testEnv.KubeClient + testNamespace = testEnv.Namespace + + // Deploy Reloader with annotations strategy + err = testEnv.DeployAndWait(map[string]string{ + "reloader.reloadStrategy": "annotations", + }) + Expect(err).NotTo(HaveOccurred(), "Failed to deploy Reloader") +}) + +var _ = AfterSuite(func() { + if testEnv != nil { + err := testEnv.Cleanup() + Expect(err).NotTo(HaveOccurred(), "Failed to cleanup test environment") + } + + GinkgoWriter.Println("Advanced E2E Suite cleanup complete") +}) diff --git a/test/e2e/advanced/job_reload_test.go b/test/e2e/advanced/job_reload_test.go new file mode 100644 index 0000000..e2d1350 --- /dev/null +++ b/test/e2e/advanced/job_reload_test.go @@ -0,0 +1,187 @@ +package advanced + +import ( + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + "github.com/stakater/Reloader/test/e2e/utils" +) + +var _ = Describe("Job Workload Recreation Tests", func() { + var ( + jobName string + configMapName string + secretName string + ) + + BeforeEach(func() { + jobName = utils.RandName("job") + configMapName = utils.RandName("cm") + secretName = utils.RandName("secret") + }) + + AfterEach(func() { + _ = utils.DeleteJob(ctx, kubeClient, testNamespace, jobName) + _ = utils.DeleteConfigMap(ctx, kubeClient, testNamespace, configMapName) + _ = utils.DeleteSecret(ctx, kubeClient, testNamespace, secretName) + }) + + Context("Job with ConfigMap reference", func() { + It("should recreate Job when referenced ConfigMap changes", func() { + By("Creating a ConfigMap") + _, err := utils.CreateConfigMap(ctx, kubeClient, testNamespace, configMapName, + map[string]string{"JOB_CONFIG": "initial"}, nil) + Expect(err).NotTo(HaveOccurred()) + + By("Creating a Job with ConfigMap envFrom") + job, err := utils.CreateJob(ctx, kubeClient, testNamespace, jobName, + utils.WithJobConfigMapEnvFrom(configMapName), + utils.WithJobAnnotations(utils.BuildConfigMapReloadAnnotation(configMapName)), + ) + Expect(err).NotTo(HaveOccurred()) + originalUID := string(job.UID) + + By("Waiting for Job to exist") + err = utils.WaitForJobExists(ctx, kubeClient, testNamespace, jobName, utils.DeploymentReady) + Expect(err).NotTo(HaveOccurred()) + + By("Updating the ConfigMap") + err = utils.UpdateConfigMap(ctx, kubeClient, testNamespace, configMapName, + map[string]string{"JOB_CONFIG": "updated"}) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for Job to be recreated (new UID)") + _, recreated, err := utils.WaitForJobRecreated(ctx, kubeClient, testNamespace, jobName, + originalUID, utils.ReloadTimeout) + Expect(err).NotTo(HaveOccurred()) + Expect(recreated).To(BeTrue(), "Job should be recreated with new UID when ConfigMap changes") + }) + }) + + Context("Job with Secret reference", func() { + It("should recreate Job when referenced Secret changes", func() { + By("Creating a Secret") + _, err := utils.CreateSecretFromStrings(ctx, kubeClient, testNamespace, secretName, + map[string]string{"JOB_SECRET": "initial"}, nil) + Expect(err).NotTo(HaveOccurred()) + + By("Creating a Job with Secret envFrom") + job, err := utils.CreateJob(ctx, kubeClient, testNamespace, jobName, + utils.WithJobSecretEnvFrom(secretName), + utils.WithJobAnnotations(utils.BuildSecretReloadAnnotation(secretName)), + ) + Expect(err).NotTo(HaveOccurred()) + originalUID := string(job.UID) + + By("Waiting for Job to exist") + err = utils.WaitForJobExists(ctx, kubeClient, testNamespace, jobName, utils.DeploymentReady) + Expect(err).NotTo(HaveOccurred()) + + By("Updating the Secret") + err = utils.UpdateSecretFromStrings(ctx, kubeClient, testNamespace, secretName, + map[string]string{"JOB_SECRET": "updated"}) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for Job to be recreated (new UID)") + _, recreated, err := utils.WaitForJobRecreated(ctx, kubeClient, testNamespace, jobName, + originalUID, utils.ReloadTimeout) + Expect(err).NotTo(HaveOccurred()) + Expect(recreated).To(BeTrue(), "Job should be recreated with new UID when Secret changes") + }) + }) + + Context("Job with auto annotation", func() { + It("should recreate Job with auto=true when ConfigMap changes", func() { + By("Creating a ConfigMap") + _, err := utils.CreateConfigMap(ctx, kubeClient, testNamespace, configMapName, + map[string]string{"AUTO_CONFIG": "initial"}, nil) + Expect(err).NotTo(HaveOccurred()) + + By("Creating a Job with auto annotation") + job, err := utils.CreateJob(ctx, kubeClient, testNamespace, jobName, + utils.WithJobConfigMapEnvFrom(configMapName), + utils.WithJobAnnotations(utils.BuildAutoTrueAnnotation()), + ) + Expect(err).NotTo(HaveOccurred()) + originalUID := string(job.UID) + + By("Waiting for Job to exist") + err = utils.WaitForJobExists(ctx, kubeClient, testNamespace, jobName, utils.DeploymentReady) + Expect(err).NotTo(HaveOccurred()) + + By("Updating the ConfigMap") + err = utils.UpdateConfigMap(ctx, kubeClient, testNamespace, configMapName, + map[string]string{"AUTO_CONFIG": "updated"}) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for Job to be recreated (new UID)") + _, recreated, err := utils.WaitForJobRecreated(ctx, kubeClient, testNamespace, jobName, + originalUID, utils.ReloadTimeout) + Expect(err).NotTo(HaveOccurred()) + Expect(recreated).To(BeTrue(), "Job with auto=true should be recreated when ConfigMap changes") + }) + }) + + Context("Job with valueFrom ConfigMap reference", func() { + It("should recreate Job when ConfigMap referenced via valueFrom changes", func() { + By("Creating a ConfigMap") + _, err := utils.CreateConfigMap(ctx, kubeClient, testNamespace, configMapName, + map[string]string{"config_key": "initial"}, nil) + Expect(err).NotTo(HaveOccurred()) + + By("Creating a Job with valueFrom.configMapKeyRef") + job, err := utils.CreateJob(ctx, kubeClient, testNamespace, jobName, + utils.WithJobConfigMapKeyRef(configMapName, "config_key", "MY_CONFIG"), + utils.WithJobAnnotations(utils.BuildConfigMapReloadAnnotation(configMapName)), + ) + Expect(err).NotTo(HaveOccurred()) + originalUID := string(job.UID) + + By("Waiting for Job to exist") + err = utils.WaitForJobExists(ctx, kubeClient, testNamespace, jobName, utils.DeploymentReady) + Expect(err).NotTo(HaveOccurred()) + + By("Updating the ConfigMap") + err = utils.UpdateConfigMap(ctx, kubeClient, testNamespace, configMapName, + map[string]string{"config_key": "updated"}) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for Job to be recreated (new UID)") + _, recreated, err := utils.WaitForJobRecreated(ctx, kubeClient, testNamespace, jobName, + originalUID, utils.ReloadTimeout) + Expect(err).NotTo(HaveOccurred()) + Expect(recreated).To(BeTrue(), "Job with valueFrom.configMapKeyRef should be recreated when ConfigMap changes") + }) + }) + + Context("Job with valueFrom Secret reference", func() { + It("should recreate Job when Secret referenced via valueFrom changes", func() { + By("Creating a Secret") + _, err := utils.CreateSecretFromStrings(ctx, kubeClient, testNamespace, secretName, + map[string]string{"secret_key": "initial"}, nil) + Expect(err).NotTo(HaveOccurred()) + + By("Creating a Job with valueFrom.secretKeyRef") + job, err := utils.CreateJob(ctx, kubeClient, testNamespace, jobName, + utils.WithJobSecretKeyRef(secretName, "secret_key", "MY_SECRET"), + utils.WithJobAnnotations(utils.BuildSecretReloadAnnotation(secretName)), + ) + Expect(err).NotTo(HaveOccurred()) + originalUID := string(job.UID) + + By("Waiting for Job to exist") + err = utils.WaitForJobExists(ctx, kubeClient, testNamespace, jobName, utils.DeploymentReady) + Expect(err).NotTo(HaveOccurred()) + + By("Updating the Secret") + err = utils.UpdateSecretFromStrings(ctx, kubeClient, testNamespace, secretName, + map[string]string{"secret_key": "updated"}) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for Job to be recreated (new UID)") + _, recreated, err := utils.WaitForJobRecreated(ctx, kubeClient, testNamespace, jobName, + originalUID, utils.ReloadTimeout) + Expect(err).NotTo(HaveOccurred()) + Expect(recreated).To(BeTrue(), "Job with valueFrom.secretKeyRef should be recreated when Secret changes") + }) + }) +}) diff --git a/test/e2e/advanced/multi_container_test.go b/test/e2e/advanced/multi_container_test.go new file mode 100644 index 0000000..1b77c41 --- /dev/null +++ b/test/e2e/advanced/multi_container_test.go @@ -0,0 +1,94 @@ +package advanced + +import ( + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + "github.com/stakater/Reloader/test/e2e/utils" +) + +var _ = Describe("Multi-Container Tests", func() { + var ( + deploymentName string + configMapName string + configMapName2 string + ) + + BeforeEach(func() { + deploymentName = utils.RandName("deploy") + configMapName = utils.RandName("cm") + configMapName2 = utils.RandName("cm2") + }) + + AfterEach(func() { + _ = utils.DeleteDeployment(ctx, kubeClient, testNamespace, deploymentName) + _ = utils.DeleteConfigMap(ctx, kubeClient, testNamespace, configMapName) + _ = utils.DeleteConfigMap(ctx, kubeClient, testNamespace, configMapName2) + }) + + Context("Multiple containers same ConfigMap", func() { + It("should reload when ConfigMap used by multiple containers changes", func() { + By("Creating a ConfigMap") + _, err := utils.CreateConfigMap(ctx, kubeClient, testNamespace, configMapName, + map[string]string{"shared-key": "initial"}, nil) + Expect(err).NotTo(HaveOccurred()) + + By("Creating a Deployment with multiple containers using the same ConfigMap") + _, err = utils.CreateDeployment(ctx, kubeClient, testNamespace, deploymentName, + utils.WithMultipleContainers(2), + utils.WithConfigMapEnvFrom(configMapName), + utils.WithAnnotations(utils.BuildAutoTrueAnnotation()), + ) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for Deployment to be ready") + err = utils.WaitForDeploymentReady(ctx, kubeClient, testNamespace, deploymentName, utils.DeploymentReady) + Expect(err).NotTo(HaveOccurred()) + + By("Updating the ConfigMap") + err = utils.UpdateConfigMap(ctx, kubeClient, testNamespace, configMapName, + map[string]string{"shared-key": "updated"}) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for Deployment to be reloaded") + reloaded, err := utils.WaitForDeploymentReloaded(ctx, kubeClient, testNamespace, deploymentName, + utils.AnnotationLastReloadedFrom, utils.ReloadTimeout) + Expect(err).NotTo(HaveOccurred()) + Expect(reloaded).To(BeTrue(), "Deployment with multiple containers should be reloaded") + }) + }) + + Context("Multiple containers different ConfigMaps", func() { + It("should reload when any container's ConfigMap changes", func() { + By("Creating two ConfigMaps") + _, err := utils.CreateConfigMap(ctx, kubeClient, testNamespace, configMapName, + map[string]string{"key1": "initial1"}, nil) + Expect(err).NotTo(HaveOccurred()) + + _, err = utils.CreateConfigMap(ctx, kubeClient, testNamespace, configMapName2, + map[string]string{"key2": "initial2"}, nil) + Expect(err).NotTo(HaveOccurred()) + + By("Creating a Deployment with multiple containers using different ConfigMaps") + _, err = utils.CreateDeployment(ctx, kubeClient, testNamespace, deploymentName, + utils.WithMultipleContainersAndEnv(configMapName, configMapName2), + utils.WithAnnotations(utils.BuildAutoTrueAnnotation()), + ) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for Deployment to be ready") + err = utils.WaitForDeploymentReady(ctx, kubeClient, testNamespace, deploymentName, utils.DeploymentReady) + Expect(err).NotTo(HaveOccurred()) + + By("Updating the first ConfigMap") + err = utils.UpdateConfigMap(ctx, kubeClient, testNamespace, configMapName, + map[string]string{"key1": "updated1"}) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for Deployment to be reloaded") + reloaded, err := utils.WaitForDeploymentReloaded(ctx, kubeClient, testNamespace, deploymentName, + utils.AnnotationLastReloadedFrom, utils.ReloadTimeout) + Expect(err).NotTo(HaveOccurred()) + Expect(reloaded).To(BeTrue(), "Deployment should be reloaded when first container's ConfigMap changes") + }) + }) +}) diff --git a/test/e2e/advanced/pod_annotations_test.go b/test/e2e/advanced/pod_annotations_test.go new file mode 100644 index 0000000..25b8419 --- /dev/null +++ b/test/e2e/advanced/pod_annotations_test.go @@ -0,0 +1,191 @@ +package advanced + +import ( + "time" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + "github.com/stakater/Reloader/test/e2e/utils" +) + +var _ = Describe("Pod Template Annotations Tests", func() { + var ( + deploymentName string + configMapName string + secretName string + ) + + BeforeEach(func() { + deploymentName = utils.RandName("deploy") + configMapName = utils.RandName("cm") + secretName = utils.RandName("secret") + }) + + AfterEach(func() { + _ = utils.DeleteDeployment(ctx, kubeClient, testNamespace, deploymentName) + _ = utils.DeleteConfigMap(ctx, kubeClient, testNamespace, configMapName) + _ = utils.DeleteSecret(ctx, kubeClient, testNamespace, secretName) + }) + + Context("Annotations on pod template metadata only", func() { + It("should reload when using annotation on pod template metadata (not deployment metadata)", func() { + By("Creating a ConfigMap") + _, err := utils.CreateConfigMap(ctx, kubeClient, testNamespace, configMapName, + map[string]string{"POD_CONFIG": "initial"}, nil) + Expect(err).NotTo(HaveOccurred()) + + By("Creating a Deployment with annotation ONLY on pod template") + _, err = utils.CreateDeployment(ctx, kubeClient, testNamespace, deploymentName, + utils.WithConfigMapEnvFrom(configMapName), + utils.WithPodTemplateAnnotations(utils.BuildConfigMapReloadAnnotation(configMapName)), + // Note: No WithAnnotations - annotation only on pod template + ) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for Deployment to be ready") + err = utils.WaitForDeploymentReady(ctx, kubeClient, testNamespace, deploymentName, utils.DeploymentReady) + Expect(err).NotTo(HaveOccurred()) + + By("Updating the ConfigMap") + err = utils.UpdateConfigMap(ctx, kubeClient, testNamespace, configMapName, + map[string]string{"POD_CONFIG": "updated"}) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for Deployment to be reloaded") + reloaded, err := utils.WaitForDeploymentReloaded(ctx, kubeClient, testNamespace, deploymentName, + utils.AnnotationLastReloadedFrom, utils.ReloadTimeout) + Expect(err).NotTo(HaveOccurred()) + Expect(reloaded).To(BeTrue(), "Deployment should reload when annotation is on pod template metadata") + }) + }) + + Context("Annotations on both deployment and pod template metadata", func() { + It("should reload when annotations are on both deployment and pod template", func() { + By("Creating a ConfigMap") + _, err := utils.CreateConfigMap(ctx, kubeClient, testNamespace, configMapName, + map[string]string{"BOTH_CONFIG": "initial"}, nil) + Expect(err).NotTo(HaveOccurred()) + + By("Creating a Deployment with annotation on BOTH deployment and pod template") + _, err = utils.CreateDeployment(ctx, kubeClient, testNamespace, deploymentName, + utils.WithConfigMapEnvFrom(configMapName), + utils.WithAnnotations(utils.BuildConfigMapReloadAnnotation(configMapName)), + utils.WithPodTemplateAnnotations(utils.BuildConfigMapReloadAnnotation(configMapName)), + ) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for Deployment to be ready") + err = utils.WaitForDeploymentReady(ctx, kubeClient, testNamespace, deploymentName, utils.DeploymentReady) + Expect(err).NotTo(HaveOccurred()) + + By("Updating the ConfigMap") + err = utils.UpdateConfigMap(ctx, kubeClient, testNamespace, configMapName, + map[string]string{"BOTH_CONFIG": "updated"}) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for Deployment to be reloaded") + reloaded, err := utils.WaitForDeploymentReloaded(ctx, kubeClient, testNamespace, deploymentName, + utils.AnnotationLastReloadedFrom, utils.ReloadTimeout) + Expect(err).NotTo(HaveOccurred()) + Expect(reloaded).To(BeTrue(), "Deployment should reload when annotations are on both locations") + }) + }) + + Context("auto=true annotation on pod template", func() { + It("should reload when auto annotation is on pod template metadata", func() { + By("Creating a ConfigMap") + _, err := utils.CreateConfigMap(ctx, kubeClient, testNamespace, configMapName, + map[string]string{"AUTO_POD_CONFIG": "initial"}, nil) + Expect(err).NotTo(HaveOccurred()) + + By("Creating a Deployment with auto=true annotation on pod template") + _, err = utils.CreateDeployment(ctx, kubeClient, testNamespace, deploymentName, + utils.WithConfigMapEnvFrom(configMapName), + utils.WithPodTemplateAnnotations(utils.BuildAutoTrueAnnotation()), + ) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for Deployment to be ready") + err = utils.WaitForDeploymentReady(ctx, kubeClient, testNamespace, deploymentName, utils.DeploymentReady) + Expect(err).NotTo(HaveOccurred()) + + By("Updating the ConfigMap") + err = utils.UpdateConfigMap(ctx, kubeClient, testNamespace, configMapName, + map[string]string{"AUTO_POD_CONFIG": "updated"}) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for Deployment to be reloaded") + reloaded, err := utils.WaitForDeploymentReloaded(ctx, kubeClient, testNamespace, deploymentName, + utils.AnnotationLastReloadedFrom, utils.ReloadTimeout) + Expect(err).NotTo(HaveOccurred()) + Expect(reloaded).To(BeTrue(), "Deployment with auto=true on pod template should reload") + }) + }) + + Context("Secret annotation on pod template", func() { + It("should reload when secret reload annotation is on pod template", func() { + By("Creating a Secret") + _, err := utils.CreateSecretFromStrings(ctx, kubeClient, testNamespace, secretName, + map[string]string{"POD_SECRET": "initial"}, nil) + Expect(err).NotTo(HaveOccurred()) + + By("Creating a Deployment with secret reload annotation on pod template") + _, err = utils.CreateDeployment(ctx, kubeClient, testNamespace, deploymentName, + utils.WithSecretEnvFrom(secretName), + utils.WithPodTemplateAnnotations(utils.BuildSecretReloadAnnotation(secretName)), + ) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for Deployment to be ready") + err = utils.WaitForDeploymentReady(ctx, kubeClient, testNamespace, deploymentName, utils.DeploymentReady) + Expect(err).NotTo(HaveOccurred()) + + By("Updating the Secret") + err = utils.UpdateSecretFromStrings(ctx, kubeClient, testNamespace, secretName, + map[string]string{"POD_SECRET": "updated"}) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for Deployment to be reloaded") + reloaded, err := utils.WaitForDeploymentReloaded(ctx, kubeClient, testNamespace, deploymentName, + utils.AnnotationLastReloadedFrom, utils.ReloadTimeout) + Expect(err).NotTo(HaveOccurred()) + Expect(reloaded).To(BeTrue(), "Deployment should reload when secret annotation is on pod template") + }) + }) + + Context("Mismatched annotations (different resources)", func() { + It("should NOT reload when pod template has ConfigMap annotation but we update Secret", func() { + By("Creating both ConfigMap and Secret") + _, err := utils.CreateConfigMap(ctx, kubeClient, testNamespace, configMapName, + map[string]string{"CONFIG": "value"}, nil) + Expect(err).NotTo(HaveOccurred()) + + _, err = utils.CreateSecretFromStrings(ctx, kubeClient, testNamespace, secretName, + map[string]string{"SECRET": "initial"}, nil) + Expect(err).NotTo(HaveOccurred()) + + By("Creating a Deployment with ConfigMap annotation on pod template but using Secret") + _, err = utils.CreateDeployment(ctx, kubeClient, testNamespace, deploymentName, + utils.WithSecretEnvFrom(secretName), + utils.WithPodTemplateAnnotations(utils.BuildConfigMapReloadAnnotation(configMapName)), + ) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for Deployment to be ready") + err = utils.WaitForDeploymentReady(ctx, kubeClient, testNamespace, deploymentName, utils.DeploymentReady) + Expect(err).NotTo(HaveOccurred()) + + By("Updating the Secret (not the ConfigMap)") + err = utils.UpdateSecretFromStrings(ctx, kubeClient, testNamespace, secretName, + map[string]string{"SECRET": "updated"}) + Expect(err).NotTo(HaveOccurred()) + + By("Verifying Deployment was NOT reloaded (negative test)") + time.Sleep(utils.NegativeTestWait) + reloaded, err := utils.WaitForDeploymentReloaded(ctx, kubeClient, testNamespace, deploymentName, + utils.AnnotationLastReloadedFrom, utils.ShortTimeout) + Expect(err).NotTo(HaveOccurred()) + Expect(reloaded).To(BeFalse(), "Deployment should NOT reload when we update different resource than annotated") + }) + }) +}) diff --git a/test/e2e/advanced/regex_test.go b/test/e2e/advanced/regex_test.go new file mode 100644 index 0000000..67efe97 --- /dev/null +++ b/test/e2e/advanced/regex_test.go @@ -0,0 +1,134 @@ +package advanced + +import ( + "time" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + "github.com/stakater/Reloader/test/e2e/utils" +) + +var _ = Describe("Regex Pattern Tests", func() { + var ( + deploymentName string + matchingCM string + nonMatchingCM string + matchingSecret string + ) + + BeforeEach(func() { + deploymentName = utils.RandName("deploy") + matchingCM = "app-config-" + utils.RandName("cm") + nonMatchingCM = "other-" + utils.RandName("cm") + matchingSecret = "app-secret-" + utils.RandName("secret") + }) + + AfterEach(func() { + _ = utils.DeleteDeployment(ctx, kubeClient, testNamespace, deploymentName) + _ = utils.DeleteConfigMap(ctx, kubeClient, testNamespace, matchingCM) + _ = utils.DeleteConfigMap(ctx, kubeClient, testNamespace, nonMatchingCM) + _ = utils.DeleteSecret(ctx, kubeClient, testNamespace, matchingSecret) + }) + + Context("ConfigMap regex pattern", func() { + It("should reload when ConfigMap matching pattern changes", func() { + By("Creating a ConfigMap matching the pattern") + _, err := utils.CreateConfigMap(ctx, kubeClient, testNamespace, matchingCM, + map[string]string{"key": "initial"}, nil) + Expect(err).NotTo(HaveOccurred()) + + By("Creating a Deployment with ConfigMap pattern annotation") + _, err = utils.CreateDeployment(ctx, kubeClient, testNamespace, deploymentName, + utils.WithConfigMapEnvFrom(matchingCM), + utils.WithAnnotations(map[string]string{ + utils.AnnotationConfigMapReload: "app-config-.*", + }), + ) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for Deployment to be ready") + err = utils.WaitForDeploymentReady(ctx, kubeClient, testNamespace, deploymentName, utils.DeploymentReady) + Expect(err).NotTo(HaveOccurred()) + + By("Updating the matching ConfigMap") + err = utils.UpdateConfigMap(ctx, kubeClient, testNamespace, matchingCM, + map[string]string{"key": "updated"}) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for Deployment to be reloaded") + reloaded, err := utils.WaitForDeploymentReloaded(ctx, kubeClient, testNamespace, deploymentName, + utils.AnnotationLastReloadedFrom, utils.ReloadTimeout) + Expect(err).NotTo(HaveOccurred()) + Expect(reloaded).To(BeTrue(), "Deployment should be reloaded when matching ConfigMap changes") + }) + + It("should NOT reload when ConfigMap NOT matching pattern changes", func() { + By("Creating ConfigMaps - one matching, one not") + _, err := utils.CreateConfigMap(ctx, kubeClient, testNamespace, matchingCM, + map[string]string{"key": "initial"}, nil) + Expect(err).NotTo(HaveOccurred()) + + _, err = utils.CreateConfigMap(ctx, kubeClient, testNamespace, nonMatchingCM, + map[string]string{"other": "initial"}, nil) + Expect(err).NotTo(HaveOccurred()) + + By("Creating a Deployment with ConfigMap pattern annotation") + _, err = utils.CreateDeployment(ctx, kubeClient, testNamespace, deploymentName, + utils.WithConfigMapEnvFrom(matchingCM), + utils.WithAnnotations(map[string]string{ + utils.AnnotationConfigMapReload: "app-config-.*", + }), + ) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for Deployment to be ready") + err = utils.WaitForDeploymentReady(ctx, kubeClient, testNamespace, deploymentName, utils.DeploymentReady) + Expect(err).NotTo(HaveOccurred()) + + By("Updating the non-matching ConfigMap") + err = utils.UpdateConfigMap(ctx, kubeClient, testNamespace, nonMatchingCM, + map[string]string{"other": "updated"}) + Expect(err).NotTo(HaveOccurred()) + + By("Verifying Deployment was NOT reloaded (pattern mismatch)") + time.Sleep(utils.NegativeTestWait) + reloaded, err := utils.WaitForDeploymentReloaded(ctx, kubeClient, testNamespace, deploymentName, + utils.AnnotationLastReloadedFrom, utils.ShortTimeout) + Expect(err).NotTo(HaveOccurred()) + Expect(reloaded).To(BeFalse(), "Deployment should NOT reload when non-matching ConfigMap changes") + }) + }) + + Context("Secret regex pattern", func() { + It("should reload when Secret matching pattern changes", func() { + By("Creating a Secret matching the pattern") + _, err := utils.CreateSecretFromStrings(ctx, kubeClient, testNamespace, matchingSecret, + map[string]string{"password": "initial"}, nil) + Expect(err).NotTo(HaveOccurred()) + + By("Creating a Deployment with Secret pattern annotation") + _, err = utils.CreateDeployment(ctx, kubeClient, testNamespace, deploymentName, + utils.WithSecretEnvFrom(matchingSecret), + utils.WithAnnotations(map[string]string{ + utils.AnnotationSecretReload: "app-secret-.*", + }), + ) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for Deployment to be ready") + err = utils.WaitForDeploymentReady(ctx, kubeClient, testNamespace, deploymentName, utils.DeploymentReady) + Expect(err).NotTo(HaveOccurred()) + + By("Updating the matching Secret") + err = utils.UpdateSecretFromStrings(ctx, kubeClient, testNamespace, matchingSecret, + map[string]string{"password": "updated"}) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for Deployment to be reloaded") + reloaded, err := utils.WaitForDeploymentReloaded(ctx, kubeClient, testNamespace, deploymentName, + utils.AnnotationLastReloadedFrom, utils.ReloadTimeout) + Expect(err).NotTo(HaveOccurred()) + Expect(reloaded).To(BeTrue(), "Deployment should be reloaded when matching Secret changes") + }) + }) +}) diff --git a/test/e2e/annotations/annotations_suite_test.go b/test/e2e/annotations/annotations_suite_test.go new file mode 100644 index 0000000..a500b04 --- /dev/null +++ b/test/e2e/annotations/annotations_suite_test.go @@ -0,0 +1,59 @@ +package annotations + +import ( + "context" + "testing" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + "github.com/stakater/Reloader/test/e2e/utils" + "k8s.io/client-go/dynamic" + "k8s.io/client-go/kubernetes" +) + +var ( + kubeClient kubernetes.Interface + dynamicClient dynamic.Interface + testNamespace string + ctx context.Context + cancel context.CancelFunc + testEnv *utils.TestEnvironment +) + +func TestAnnotations(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Annotations Strategy E2E Suite") +} + +var _ = BeforeSuite(func() { + var err error + ctx, cancel = context.WithCancel(context.Background()) + + // Setup test environment + testEnv, err = utils.SetupTestEnvironment(ctx, "reloader-annotations-test") + Expect(err).NotTo(HaveOccurred(), "Failed to setup test environment") + + // Export for use in tests + kubeClient = testEnv.KubeClient + dynamicClient = testEnv.DynamicClient + testNamespace = testEnv.Namespace + + // Deploy Reloader with annotations strategy + err = testEnv.DeployAndWait(map[string]string{ + "reloader.reloadStrategy": "annotations", + }) + Expect(err).NotTo(HaveOccurred(), "Failed to deploy Reloader") +}) + +var _ = AfterSuite(func() { + if testEnv != nil { + err := testEnv.Cleanup() + Expect(err).NotTo(HaveOccurred(), "Failed to cleanup test environment") + } + + if cancel != nil { + cancel() + } + + GinkgoWriter.Println("Annotations E2E Suite cleanup complete") +}) diff --git a/test/e2e/annotations/auto_reload_test.go b/test/e2e/annotations/auto_reload_test.go new file mode 100644 index 0000000..baa0e92 --- /dev/null +++ b/test/e2e/annotations/auto_reload_test.go @@ -0,0 +1,269 @@ +package annotations + +import ( + "time" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + "github.com/stakater/Reloader/test/e2e/utils" +) + +var _ = Describe("Auto Reload Annotation Tests", func() { + var ( + deploymentName string + configMapName string + secretName string + ) + + BeforeEach(func() { + deploymentName = utils.RandName("deploy") + configMapName = utils.RandName("cm") + secretName = utils.RandName("secret") + }) + + AfterEach(func() { + _ = utils.DeleteDeployment(ctx, kubeClient, testNamespace, deploymentName) + _ = utils.DeleteConfigMap(ctx, kubeClient, testNamespace, configMapName) + _ = utils.DeleteSecret(ctx, kubeClient, testNamespace, secretName) + }) + + Context("with reloader.stakater.com/auto=true annotation", func() { + It("should reload Deployment when any referenced ConfigMap changes", func() { + By("Creating a ConfigMap") + _, err := utils.CreateConfigMap(ctx, kubeClient, testNamespace, configMapName, + map[string]string{"key": "initial"}, nil) + Expect(err).NotTo(HaveOccurred()) + + By("Creating a Deployment with auto=true annotation") + _, err = utils.CreateDeployment(ctx, kubeClient, testNamespace, deploymentName, + utils.WithConfigMapEnvFrom(configMapName), + utils.WithAnnotations(utils.BuildAutoTrueAnnotation()), + ) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for Deployment to be ready") + err = utils.WaitForDeploymentReady(ctx, kubeClient, testNamespace, deploymentName, utils.DeploymentReady) + Expect(err).NotTo(HaveOccurred()) + + By("Updating the ConfigMap data") + err = utils.UpdateConfigMap(ctx, kubeClient, testNamespace, configMapName, + map[string]string{"key": "updated"}) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for Deployment to be reloaded") + reloaded, err := utils.WaitForDeploymentReloaded(ctx, kubeClient, testNamespace, deploymentName, + utils.AnnotationLastReloadedFrom, utils.ReloadTimeout) + Expect(err).NotTo(HaveOccurred()) + Expect(reloaded).To(BeTrue(), "Deployment with auto=true should have been reloaded") + }) + + It("should reload Deployment when any referenced Secret changes", func() { + By("Creating a Secret") + _, err := utils.CreateSecretFromStrings(ctx, kubeClient, testNamespace, secretName, + map[string]string{"password": "initial"}, nil) + Expect(err).NotTo(HaveOccurred()) + + By("Creating a Deployment with auto=true annotation") + _, err = utils.CreateDeployment(ctx, kubeClient, testNamespace, deploymentName, + utils.WithSecretEnvFrom(secretName), + utils.WithAnnotations(utils.BuildAutoTrueAnnotation()), + ) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for Deployment to be ready") + err = utils.WaitForDeploymentReady(ctx, kubeClient, testNamespace, deploymentName, utils.DeploymentReady) + Expect(err).NotTo(HaveOccurred()) + + By("Updating the Secret data") + err = utils.UpdateSecretFromStrings(ctx, kubeClient, testNamespace, secretName, + map[string]string{"password": "updated"}) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for Deployment to be reloaded") + reloaded, err := utils.WaitForDeploymentReloaded(ctx, kubeClient, testNamespace, deploymentName, + utils.AnnotationLastReloadedFrom, utils.ReloadTimeout) + Expect(err).NotTo(HaveOccurred()) + Expect(reloaded).To(BeTrue(), "Deployment with auto=true should have been reloaded for Secret change") + }) + + It("should reload Deployment when either ConfigMap or Secret changes", func() { + By("Creating a ConfigMap and Secret") + _, err := utils.CreateConfigMap(ctx, kubeClient, testNamespace, configMapName, + map[string]string{"config": "initial"}, nil) + Expect(err).NotTo(HaveOccurred()) + + _, err = utils.CreateSecretFromStrings(ctx, kubeClient, testNamespace, secretName, + map[string]string{"secret": "initial"}, nil) + Expect(err).NotTo(HaveOccurred()) + + By("Creating a Deployment with auto=true annotation referencing both") + _, err = utils.CreateDeployment(ctx, kubeClient, testNamespace, deploymentName, + utils.WithConfigMapEnvFrom(configMapName), + utils.WithSecretEnvFrom(secretName), + utils.WithAnnotations(utils.BuildAutoTrueAnnotation()), + ) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for Deployment to be ready") + err = utils.WaitForDeploymentReady(ctx, kubeClient, testNamespace, deploymentName, utils.DeploymentReady) + Expect(err).NotTo(HaveOccurred()) + + By("Updating the ConfigMap") + err = utils.UpdateConfigMap(ctx, kubeClient, testNamespace, configMapName, + map[string]string{"config": "updated"}) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for Deployment to be reloaded") + reloaded, err := utils.WaitForDeploymentReloaded(ctx, kubeClient, testNamespace, deploymentName, + utils.AnnotationLastReloadedFrom, utils.ReloadTimeout) + Expect(err).NotTo(HaveOccurred()) + Expect(reloaded).To(BeTrue(), "Deployment with auto=true should have been reloaded for ConfigMap change") + }) + }) + + Context("with reloader.stakater.com/auto=false annotation", func() { + It("should NOT reload Deployment when ConfigMap changes", func() { + By("Creating a ConfigMap") + _, err := utils.CreateConfigMap(ctx, kubeClient, testNamespace, configMapName, + map[string]string{"key": "initial"}, nil) + Expect(err).NotTo(HaveOccurred()) + + By("Creating a Deployment with auto=false annotation") + _, err = utils.CreateDeployment(ctx, kubeClient, testNamespace, deploymentName, + utils.WithConfigMapEnvFrom(configMapName), + utils.WithAnnotations(utils.BuildAutoFalseAnnotation()), + ) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for Deployment to be ready") + err = utils.WaitForDeploymentReady(ctx, kubeClient, testNamespace, deploymentName, utils.DeploymentReady) + Expect(err).NotTo(HaveOccurred()) + + By("Updating the ConfigMap data") + err = utils.UpdateConfigMap(ctx, kubeClient, testNamespace, configMapName, + map[string]string{"key": "updated"}) + Expect(err).NotTo(HaveOccurred()) + + By("Verifying Deployment is NOT reloaded (negative test)") + time.Sleep(utils.NegativeTestWait) + reloaded, err := utils.WaitForDeploymentReloaded(ctx, kubeClient, testNamespace, deploymentName, + utils.AnnotationLastReloadedFrom, utils.ShortTimeout) + Expect(err).NotTo(HaveOccurred()) + Expect(reloaded).To(BeFalse(), "Deployment with auto=false should NOT have been reloaded") + }) + }) + + Context("with configmap.reloader.stakater.com/auto=true annotation", func() { + It("should reload Deployment only when ConfigMap changes, not Secret", func() { + By("Creating a ConfigMap and Secret") + _, err := utils.CreateConfigMap(ctx, kubeClient, testNamespace, configMapName, + map[string]string{"config": "initial"}, nil) + Expect(err).NotTo(HaveOccurred()) + + _, err = utils.CreateSecretFromStrings(ctx, kubeClient, testNamespace, secretName, + map[string]string{"secret": "initial"}, nil) + Expect(err).NotTo(HaveOccurred()) + + By("Creating a Deployment with configmap auto=true annotation") + _, err = utils.CreateDeployment(ctx, kubeClient, testNamespace, deploymentName, + utils.WithConfigMapEnvFrom(configMapName), + utils.WithSecretEnvFrom(secretName), + utils.WithAnnotations(utils.BuildConfigMapAutoAnnotation()), + ) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for Deployment to be ready") + err = utils.WaitForDeploymentReady(ctx, kubeClient, testNamespace, deploymentName, utils.DeploymentReady) + Expect(err).NotTo(HaveOccurred()) + + By("Updating the ConfigMap") + err = utils.UpdateConfigMap(ctx, kubeClient, testNamespace, configMapName, + map[string]string{"config": "updated"}) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for Deployment to be reloaded") + reloaded, err := utils.WaitForDeploymentReloaded(ctx, kubeClient, testNamespace, deploymentName, + utils.AnnotationLastReloadedFrom, utils.ReloadTimeout) + Expect(err).NotTo(HaveOccurred()) + Expect(reloaded).To(BeTrue(), "Deployment should have been reloaded for ConfigMap change") + }) + }) + + Context("with secret.reloader.stakater.com/auto=true annotation", func() { + It("should reload Deployment only when Secret changes, not ConfigMap", func() { + By("Creating a ConfigMap and Secret") + _, err := utils.CreateConfigMap(ctx, kubeClient, testNamespace, configMapName, + map[string]string{"config": "initial"}, nil) + Expect(err).NotTo(HaveOccurred()) + + _, err = utils.CreateSecretFromStrings(ctx, kubeClient, testNamespace, secretName, + map[string]string{"secret": "initial"}, nil) + Expect(err).NotTo(HaveOccurred()) + + By("Creating a Deployment with secret auto=true annotation") + _, err = utils.CreateDeployment(ctx, kubeClient, testNamespace, deploymentName, + utils.WithConfigMapEnvFrom(configMapName), + utils.WithSecretEnvFrom(secretName), + utils.WithAnnotations(utils.BuildSecretAutoAnnotation()), + ) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for Deployment to be ready") + err = utils.WaitForDeploymentReady(ctx, kubeClient, testNamespace, deploymentName, utils.DeploymentReady) + Expect(err).NotTo(HaveOccurred()) + + By("Updating the Secret") + err = utils.UpdateSecretFromStrings(ctx, kubeClient, testNamespace, secretName, + map[string]string{"secret": "updated"}) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for Deployment to be reloaded") + reloaded, err := utils.WaitForDeploymentReloaded(ctx, kubeClient, testNamespace, deploymentName, + utils.AnnotationLastReloadedFrom, utils.ReloadTimeout) + Expect(err).NotTo(HaveOccurred()) + Expect(reloaded).To(BeTrue(), "Deployment should have been reloaded for Secret change") + }) + }) + + Context("with auto annotation and explicit reload annotation together", func() { + It("should reload when auto-detected resource changes", func() { + configMapName2 := utils.RandName("cm2") + defer func() { _ = utils.DeleteConfigMap(ctx, kubeClient, testNamespace, configMapName2) }() + + By("Creating two ConfigMaps") + _, err := utils.CreateConfigMap(ctx, kubeClient, testNamespace, configMapName, + map[string]string{"key1": "value1"}, nil) + Expect(err).NotTo(HaveOccurred()) + + _, err = utils.CreateConfigMap(ctx, kubeClient, testNamespace, configMapName2, + map[string]string{"key2": "value2"}, nil) + Expect(err).NotTo(HaveOccurred()) + + By("Creating a Deployment with auto=true and explicit reload for first ConfigMap") + _, err = utils.CreateDeployment(ctx, kubeClient, testNamespace, deploymentName, + utils.WithConfigMapEnvFrom(configMapName), + utils.WithConfigMapEnvFrom(configMapName2), + utils.WithAnnotations(utils.MergeAnnotations( + utils.BuildAutoTrueAnnotation(), + utils.BuildConfigMapReloadAnnotation(configMapName), + )), + ) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for Deployment to be ready") + err = utils.WaitForDeploymentReady(ctx, kubeClient, testNamespace, deploymentName, utils.DeploymentReady) + Expect(err).NotTo(HaveOccurred()) + + By("Updating the second ConfigMap (auto-detected)") + err = utils.UpdateConfigMap(ctx, kubeClient, testNamespace, configMapName2, + map[string]string{"key2": "updated"}) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for Deployment to be reloaded") + reloaded, err := utils.WaitForDeploymentReloaded(ctx, kubeClient, testNamespace, deploymentName, + utils.AnnotationLastReloadedFrom, utils.ReloadTimeout) + Expect(err).NotTo(HaveOccurred()) + Expect(reloaded).To(BeTrue(), "Deployment should have been reloaded for auto-detected ConfigMap change") + }) + }) +}) diff --git a/test/e2e/annotations/combination_test.go b/test/e2e/annotations/combination_test.go new file mode 100644 index 0000000..3d13d7a --- /dev/null +++ b/test/e2e/annotations/combination_test.go @@ -0,0 +1,352 @@ +package annotations + +import ( + "time" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + "github.com/stakater/Reloader/test/e2e/utils" +) + +var _ = Describe("Combination Annotation Tests", func() { + var ( + deploymentName string + configMapName string + configMapName2 string + secretName string + secretName2 string + ) + + BeforeEach(func() { + deploymentName = utils.RandName("deploy") + configMapName = utils.RandName("cm") + configMapName2 = utils.RandName("cm2") + secretName = utils.RandName("secret") + secretName2 = utils.RandName("secret2") + }) + + AfterEach(func() { + _ = utils.DeleteDeployment(ctx, kubeClient, testNamespace, deploymentName) + _ = utils.DeleteConfigMap(ctx, kubeClient, testNamespace, configMapName) + _ = utils.DeleteConfigMap(ctx, kubeClient, testNamespace, configMapName2) + _ = utils.DeleteSecret(ctx, kubeClient, testNamespace, secretName) + _ = utils.DeleteSecret(ctx, kubeClient, testNamespace, secretName2) + }) + + Context("auto=true with explicit reload annotations", func() { + It("should reload when both auto-detected and explicitly listed ConfigMaps change", func() { + By("Creating two ConfigMaps") + _, err := utils.CreateConfigMap(ctx, kubeClient, testNamespace, configMapName, + map[string]string{"key": "initial"}, nil) + Expect(err).NotTo(HaveOccurred()) + _, err = utils.CreateConfigMap(ctx, kubeClient, testNamespace, configMapName2, + map[string]string{"extra": "initial"}, nil) + Expect(err).NotTo(HaveOccurred()) + + By("Creating a Deployment with auto=true AND explicit reload annotation for extra ConfigMap") + _, err = utils.CreateDeployment(ctx, kubeClient, testNamespace, deploymentName, + utils.WithConfigMapEnvFrom(configMapName), // auto-detected + utils.WithAnnotations(utils.MergeAnnotations( + utils.BuildAutoTrueAnnotation(), + utils.BuildConfigMapReloadAnnotation(configMapName2), // explicitly listed + )), + ) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for Deployment to be ready") + err = utils.WaitForDeploymentReady(ctx, kubeClient, testNamespace, deploymentName, utils.DeploymentReady) + Expect(err).NotTo(HaveOccurred()) + + By("Updating the auto-detected ConfigMap") + err = utils.UpdateConfigMap(ctx, kubeClient, testNamespace, configMapName, + map[string]string{"key": "updated"}) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for Deployment to be reloaded") + reloaded, err := utils.WaitForDeploymentReloaded(ctx, kubeClient, testNamespace, deploymentName, + utils.AnnotationLastReloadedFrom, utils.ReloadTimeout) + Expect(err).NotTo(HaveOccurred()) + Expect(reloaded).To(BeTrue(), "Deployment should reload when auto-detected ConfigMap changes") + }) + + It("should reload when explicitly listed ConfigMap changes with auto=true", func() { + By("Creating two ConfigMaps") + _, err := utils.CreateConfigMap(ctx, kubeClient, testNamespace, configMapName, + map[string]string{"key": "initial"}, nil) + Expect(err).NotTo(HaveOccurred()) + _, err = utils.CreateConfigMap(ctx, kubeClient, testNamespace, configMapName2, + map[string]string{"extra": "initial"}, nil) + Expect(err).NotTo(HaveOccurred()) + + By("Creating a Deployment with auto=true AND explicit reload annotation for extra ConfigMap") + _, err = utils.CreateDeployment(ctx, kubeClient, testNamespace, deploymentName, + utils.WithConfigMapEnvFrom(configMapName), // auto-detected + utils.WithAnnotations(utils.MergeAnnotations( + utils.BuildAutoTrueAnnotation(), + utils.BuildConfigMapReloadAnnotation(configMapName2), // explicitly listed + )), + ) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for Deployment to be ready") + err = utils.WaitForDeploymentReady(ctx, kubeClient, testNamespace, deploymentName, utils.DeploymentReady) + Expect(err).NotTo(HaveOccurred()) + + By("Updating the explicitly listed ConfigMap (not mounted)") + err = utils.UpdateConfigMap(ctx, kubeClient, testNamespace, configMapName2, + map[string]string{"extra": "updated"}) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for Deployment to be reloaded") + reloaded, err := utils.WaitForDeploymentReloaded(ctx, kubeClient, testNamespace, deploymentName, + utils.AnnotationLastReloadedFrom, utils.ReloadTimeout) + Expect(err).NotTo(HaveOccurred()) + Expect(reloaded).To(BeTrue(), "Deployment should reload when explicitly listed ConfigMap changes") + }) + + It("should reload when Secret changes with auto=true and explicit Secret annotation", func() { + By("Creating a Secret") + _, err := utils.CreateSecretFromStrings(ctx, kubeClient, testNamespace, secretName, + map[string]string{"password": "initial"}, nil) + Expect(err).NotTo(HaveOccurred()) + _, err = utils.CreateSecretFromStrings(ctx, kubeClient, testNamespace, secretName2, + map[string]string{"api-key": "initial"}, nil) + Expect(err).NotTo(HaveOccurred()) + + By("Creating a Deployment with auto=true AND explicit reload annotation for extra Secret") + _, err = utils.CreateDeployment(ctx, kubeClient, testNamespace, deploymentName, + utils.WithSecretEnvFrom(secretName), // auto-detected + utils.WithAnnotations(utils.MergeAnnotations( + utils.BuildAutoTrueAnnotation(), + utils.BuildSecretReloadAnnotation(secretName2), // explicitly listed + )), + ) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for Deployment to be ready") + err = utils.WaitForDeploymentReady(ctx, kubeClient, testNamespace, deploymentName, utils.DeploymentReady) + Expect(err).NotTo(HaveOccurred()) + + By("Updating the explicitly listed Secret") + err = utils.UpdateSecretFromStrings(ctx, kubeClient, testNamespace, secretName2, + map[string]string{"api-key": "updated"}) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for Deployment to be reloaded") + reloaded, err := utils.WaitForDeploymentReloaded(ctx, kubeClient, testNamespace, deploymentName, + utils.AnnotationLastReloadedFrom, utils.ReloadTimeout) + Expect(err).NotTo(HaveOccurred()) + Expect(reloaded).To(BeTrue(), "Deployment should reload when explicitly listed Secret changes") + }) + }) + + Context("auto=true with exclude annotations", func() { + It("should NOT reload when excluded ConfigMap changes", func() { + By("Creating two ConfigMaps") + _, err := utils.CreateConfigMap(ctx, kubeClient, testNamespace, configMapName, + map[string]string{"key": "initial"}, nil) + Expect(err).NotTo(HaveOccurred()) + _, err = utils.CreateConfigMap(ctx, kubeClient, testNamespace, configMapName2, + map[string]string{"excluded": "initial"}, nil) + Expect(err).NotTo(HaveOccurred()) + + By("Creating a Deployment with auto=true AND exclude for second ConfigMap") + _, err = utils.CreateDeployment(ctx, kubeClient, testNamespace, deploymentName, + utils.WithConfigMapEnvFrom(configMapName), + utils.WithConfigMapEnvFrom(configMapName2), // also mounted, but excluded + utils.WithAnnotations(utils.MergeAnnotations( + utils.BuildAutoTrueAnnotation(), + utils.BuildConfigMapExcludeAnnotation(configMapName2), // exclude this one + )), + ) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for Deployment to be ready") + err = utils.WaitForDeploymentReady(ctx, kubeClient, testNamespace, deploymentName, utils.DeploymentReady) + Expect(err).NotTo(HaveOccurred()) + + By("Updating the excluded ConfigMap") + err = utils.UpdateConfigMap(ctx, kubeClient, testNamespace, configMapName2, + map[string]string{"excluded": "updated"}) + Expect(err).NotTo(HaveOccurred()) + + By("Verifying Deployment was NOT reloaded (negative test)") + time.Sleep(utils.NegativeTestWait) + reloaded, err := utils.WaitForDeploymentReloaded(ctx, kubeClient, testNamespace, deploymentName, + utils.AnnotationLastReloadedFrom, utils.ShortTimeout) + Expect(err).NotTo(HaveOccurred()) + Expect(reloaded).To(BeFalse(), "Deployment should NOT reload when excluded ConfigMap changes") + }) + + It("should reload when non-excluded ConfigMap changes", func() { + By("Creating two ConfigMaps") + _, err := utils.CreateConfigMap(ctx, kubeClient, testNamespace, configMapName, + map[string]string{"key": "initial"}, nil) + Expect(err).NotTo(HaveOccurred()) + _, err = utils.CreateConfigMap(ctx, kubeClient, testNamespace, configMapName2, + map[string]string{"excluded": "initial"}, nil) + Expect(err).NotTo(HaveOccurred()) + + By("Creating a Deployment with auto=true AND exclude for second ConfigMap") + _, err = utils.CreateDeployment(ctx, kubeClient, testNamespace, deploymentName, + utils.WithConfigMapEnvFrom(configMapName), + utils.WithConfigMapEnvFrom(configMapName2), + utils.WithAnnotations(utils.MergeAnnotations( + utils.BuildAutoTrueAnnotation(), + utils.BuildConfigMapExcludeAnnotation(configMapName2), + )), + ) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for Deployment to be ready") + err = utils.WaitForDeploymentReady(ctx, kubeClient, testNamespace, deploymentName, utils.DeploymentReady) + Expect(err).NotTo(HaveOccurred()) + + By("Updating the non-excluded ConfigMap") + err = utils.UpdateConfigMap(ctx, kubeClient, testNamespace, configMapName, + map[string]string{"key": "updated"}) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for Deployment to be reloaded") + reloaded, err := utils.WaitForDeploymentReloaded(ctx, kubeClient, testNamespace, deploymentName, + utils.AnnotationLastReloadedFrom, utils.ReloadTimeout) + Expect(err).NotTo(HaveOccurred()) + Expect(reloaded).To(BeTrue(), "Deployment should reload when non-excluded ConfigMap changes") + }) + + It("should NOT reload when excluded Secret changes", func() { + By("Creating two Secrets") + _, err := utils.CreateSecretFromStrings(ctx, kubeClient, testNamespace, secretName, + map[string]string{"password": "initial"}, nil) + Expect(err).NotTo(HaveOccurred()) + _, err = utils.CreateSecretFromStrings(ctx, kubeClient, testNamespace, secretName2, + map[string]string{"excluded": "initial"}, nil) + Expect(err).NotTo(HaveOccurred()) + + By("Creating a Deployment with auto=true AND exclude for second Secret") + _, err = utils.CreateDeployment(ctx, kubeClient, testNamespace, deploymentName, + utils.WithSecretEnvFrom(secretName), + utils.WithSecretEnvFrom(secretName2), + utils.WithAnnotations(utils.MergeAnnotations( + utils.BuildAutoTrueAnnotation(), + utils.BuildSecretExcludeAnnotation(secretName2), + )), + ) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for Deployment to be ready") + err = utils.WaitForDeploymentReady(ctx, kubeClient, testNamespace, deploymentName, utils.DeploymentReady) + Expect(err).NotTo(HaveOccurred()) + + By("Updating the excluded Secret") + err = utils.UpdateSecretFromStrings(ctx, kubeClient, testNamespace, secretName2, + map[string]string{"excluded": "updated"}) + Expect(err).NotTo(HaveOccurred()) + + By("Verifying Deployment was NOT reloaded (negative test)") + time.Sleep(utils.NegativeTestWait) + reloaded, err := utils.WaitForDeploymentReloaded(ctx, kubeClient, testNamespace, deploymentName, + utils.AnnotationLastReloadedFrom, utils.ShortTimeout) + Expect(err).NotTo(HaveOccurred()) + Expect(reloaded).To(BeFalse(), "Deployment should NOT reload when excluded Secret changes") + }) + }) + + Context("multiple explicit references", func() { + It("should reload when any of multiple explicitly listed ConfigMaps change", func() { + By("Creating multiple ConfigMaps") + _, err := utils.CreateConfigMap(ctx, kubeClient, testNamespace, configMapName, + map[string]string{"key1": "initial"}, nil) + Expect(err).NotTo(HaveOccurred()) + _, err = utils.CreateConfigMap(ctx, kubeClient, testNamespace, configMapName2, + map[string]string{"key2": "initial"}, nil) + Expect(err).NotTo(HaveOccurred()) + + By("Creating a Deployment with multiple ConfigMaps in reload annotation (comma-separated)") + _, err = utils.CreateDeployment(ctx, kubeClient, testNamespace, deploymentName, + utils.WithAnnotations(utils.BuildConfigMapReloadAnnotation(configMapName, configMapName2)), + ) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for Deployment to be ready") + err = utils.WaitForDeploymentReady(ctx, kubeClient, testNamespace, deploymentName, utils.DeploymentReady) + Expect(err).NotTo(HaveOccurred()) + + By("Updating the second ConfigMap") + err = utils.UpdateConfigMap(ctx, kubeClient, testNamespace, configMapName2, + map[string]string{"key2": "updated"}) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for Deployment to be reloaded") + reloaded, err := utils.WaitForDeploymentReloaded(ctx, kubeClient, testNamespace, deploymentName, + utils.AnnotationLastReloadedFrom, utils.ReloadTimeout) + Expect(err).NotTo(HaveOccurred()) + Expect(reloaded).To(BeTrue(), "Deployment should reload when any of the listed ConfigMaps changes") + }) + + It("should reload when any of multiple explicitly listed Secrets change", func() { + By("Creating multiple Secrets") + _, err := utils.CreateSecretFromStrings(ctx, kubeClient, testNamespace, secretName, + map[string]string{"key1": "initial"}, nil) + Expect(err).NotTo(HaveOccurred()) + _, err = utils.CreateSecretFromStrings(ctx, kubeClient, testNamespace, secretName2, + map[string]string{"key2": "initial"}, nil) + Expect(err).NotTo(HaveOccurred()) + + By("Creating a Deployment with multiple Secrets in reload annotation (comma-separated)") + _, err = utils.CreateDeployment(ctx, kubeClient, testNamespace, deploymentName, + utils.WithAnnotations(utils.BuildSecretReloadAnnotation(secretName, secretName2)), + ) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for Deployment to be ready") + err = utils.WaitForDeploymentReady(ctx, kubeClient, testNamespace, deploymentName, utils.DeploymentReady) + Expect(err).NotTo(HaveOccurred()) + + By("Updating the first Secret") + err = utils.UpdateSecretFromStrings(ctx, kubeClient, testNamespace, secretName, + map[string]string{"key1": "updated"}) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for Deployment to be reloaded") + reloaded, err := utils.WaitForDeploymentReloaded(ctx, kubeClient, testNamespace, deploymentName, + utils.AnnotationLastReloadedFrom, utils.ReloadTimeout) + Expect(err).NotTo(HaveOccurred()) + Expect(reloaded).To(BeTrue(), "Deployment should reload when any of the listed Secrets changes") + }) + + It("should reload when both ConfigMap and Secret annotations are present", func() { + By("Creating a ConfigMap and a Secret") + _, err := utils.CreateConfigMap(ctx, kubeClient, testNamespace, configMapName, + map[string]string{"key": "initial"}, nil) + Expect(err).NotTo(HaveOccurred()) + _, err = utils.CreateSecretFromStrings(ctx, kubeClient, testNamespace, secretName, + map[string]string{"password": "initial"}, nil) + Expect(err).NotTo(HaveOccurred()) + + By("Creating a Deployment with both ConfigMap and Secret reload annotations") + _, err = utils.CreateDeployment(ctx, kubeClient, testNamespace, deploymentName, + utils.WithAnnotations(utils.MergeAnnotations( + utils.BuildConfigMapReloadAnnotation(configMapName), + utils.BuildSecretReloadAnnotation(secretName), + )), + ) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for Deployment to be ready") + err = utils.WaitForDeploymentReady(ctx, kubeClient, testNamespace, deploymentName, utils.DeploymentReady) + Expect(err).NotTo(HaveOccurred()) + + By("Updating the Secret") + err = utils.UpdateSecretFromStrings(ctx, kubeClient, testNamespace, secretName, + map[string]string{"password": "updated"}) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for Deployment to be reloaded") + reloaded, err := utils.WaitForDeploymentReloaded(ctx, kubeClient, testNamespace, deploymentName, + utils.AnnotationLastReloadedFrom, utils.ReloadTimeout) + Expect(err).NotTo(HaveOccurred()) + Expect(reloaded).To(BeTrue(), "Deployment should reload when Secret changes with both annotations present") + }) + }) +}) diff --git a/test/e2e/annotations/exclude_test.go b/test/e2e/annotations/exclude_test.go new file mode 100644 index 0000000..831895d --- /dev/null +++ b/test/e2e/annotations/exclude_test.go @@ -0,0 +1,196 @@ +package annotations + +import ( + "time" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + "github.com/stakater/Reloader/test/e2e/utils" +) + +var _ = Describe("Exclude Annotation Tests", func() { + var ( + deploymentName string + configMapName string + configMapName2 string + secretName string + secretName2 string + excludeNS string + ) + + BeforeEach(func() { + deploymentName = utils.RandName("deploy") + configMapName = utils.RandName("cm") + configMapName2 = utils.RandName("cm2") + secretName = utils.RandName("secret") + secretName2 = utils.RandName("secret2") + excludeNS = "exclude-" + utils.RandName("ns") + + // Create test namespace + err := utils.CreateNamespace(ctx, kubeClient, excludeNS) + Expect(err).NotTo(HaveOccurred()) + }) + + AfterEach(func() { + _ = utils.DeleteDeployment(ctx, kubeClient, excludeNS, deploymentName) + _ = utils.DeleteConfigMap(ctx, kubeClient, excludeNS, configMapName) + _ = utils.DeleteConfigMap(ctx, kubeClient, excludeNS, configMapName2) + _ = utils.DeleteSecret(ctx, kubeClient, excludeNS, secretName) + _ = utils.DeleteSecret(ctx, kubeClient, excludeNS, secretName2) + _ = utils.DeleteNamespace(ctx, kubeClient, excludeNS) + }) + + Context("ConfigMap exclude annotation", func() { + It("should NOT reload when excluded ConfigMap changes", func() { + By("Creating two ConfigMaps") + _, err := utils.CreateConfigMap(ctx, kubeClient, excludeNS, configMapName, + map[string]string{"key": "initial"}, nil) + Expect(err).NotTo(HaveOccurred()) + + _, err = utils.CreateConfigMap(ctx, kubeClient, excludeNS, configMapName2, + map[string]string{"key2": "initial2"}, nil) + Expect(err).NotTo(HaveOccurred()) + + By("Creating a Deployment with auto=true and configmaps.exclude annotation") + _, err = utils.CreateDeployment(ctx, kubeClient, excludeNS, deploymentName, + utils.WithConfigMapEnvFrom(configMapName), + utils.WithConfigMapEnvFrom(configMapName2), + utils.WithAnnotations(utils.MergeAnnotations( + utils.BuildAutoTrueAnnotation(), + utils.BuildConfigMapExcludeAnnotation(configMapName), + )), + ) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for Deployment to be ready") + err = utils.WaitForDeploymentReady(ctx, kubeClient, excludeNS, deploymentName, utils.DeploymentReady) + Expect(err).NotTo(HaveOccurred()) + + By("Updating the excluded ConfigMap") + err = utils.UpdateConfigMap(ctx, kubeClient, excludeNS, configMapName, + map[string]string{"key": "updated"}) + Expect(err).NotTo(HaveOccurred()) + + By("Verifying Deployment was NOT reloaded (excluded ConfigMap)") + time.Sleep(utils.NegativeTestWait) + reloaded, err := utils.WaitForDeploymentReloaded(ctx, kubeClient, excludeNS, deploymentName, + utils.AnnotationLastReloadedFrom, utils.ShortTimeout) + Expect(err).NotTo(HaveOccurred()) + Expect(reloaded).To(BeFalse(), "Deployment should NOT reload when excluded ConfigMap changes") + }) + + It("should reload when non-excluded ConfigMap changes", func() { + By("Creating two ConfigMaps") + _, err := utils.CreateConfigMap(ctx, kubeClient, excludeNS, configMapName, + map[string]string{"key": "initial"}, nil) + Expect(err).NotTo(HaveOccurred()) + + _, err = utils.CreateConfigMap(ctx, kubeClient, excludeNS, configMapName2, + map[string]string{"key2": "initial2"}, nil) + Expect(err).NotTo(HaveOccurred()) + + By("Creating a Deployment with auto=true and configmaps.exclude annotation") + _, err = utils.CreateDeployment(ctx, kubeClient, excludeNS, deploymentName, + utils.WithConfigMapEnvFrom(configMapName), + utils.WithConfigMapEnvFrom(configMapName2), + utils.WithAnnotations(utils.MergeAnnotations( + utils.BuildAutoTrueAnnotation(), + utils.BuildConfigMapExcludeAnnotation(configMapName), + )), + ) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for Deployment to be ready") + err = utils.WaitForDeploymentReady(ctx, kubeClient, excludeNS, deploymentName, utils.DeploymentReady) + Expect(err).NotTo(HaveOccurred()) + + By("Updating the non-excluded ConfigMap") + err = utils.UpdateConfigMap(ctx, kubeClient, excludeNS, configMapName2, + map[string]string{"key2": "updated2"}) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for Deployment to be reloaded") + reloaded, err := utils.WaitForDeploymentReloaded(ctx, kubeClient, excludeNS, deploymentName, + utils.AnnotationLastReloadedFrom, utils.ReloadTimeout) + Expect(err).NotTo(HaveOccurred()) + Expect(reloaded).To(BeTrue(), "Deployment should reload when non-excluded ConfigMap changes") + }) + }) + + Context("Secret exclude annotation", func() { + It("should NOT reload when excluded Secret changes", func() { + By("Creating two Secrets") + _, err := utils.CreateSecretFromStrings(ctx, kubeClient, excludeNS, secretName, + map[string]string{"password": "initial"}, nil) + Expect(err).NotTo(HaveOccurred()) + + _, err = utils.CreateSecretFromStrings(ctx, kubeClient, excludeNS, secretName2, + map[string]string{"password2": "initial2"}, nil) + Expect(err).NotTo(HaveOccurred()) + + By("Creating a Deployment with auto=true and secrets.exclude annotation") + _, err = utils.CreateDeployment(ctx, kubeClient, excludeNS, deploymentName, + utils.WithSecretEnvFrom(secretName), + utils.WithSecretEnvFrom(secretName2), + utils.WithAnnotations(utils.MergeAnnotations( + utils.BuildAutoTrueAnnotation(), + utils.BuildSecretExcludeAnnotation(secretName), + )), + ) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for Deployment to be ready") + err = utils.WaitForDeploymentReady(ctx, kubeClient, excludeNS, deploymentName, utils.DeploymentReady) + Expect(err).NotTo(HaveOccurred()) + + By("Updating the excluded Secret") + err = utils.UpdateSecretFromStrings(ctx, kubeClient, excludeNS, secretName, + map[string]string{"password": "updated"}) + Expect(err).NotTo(HaveOccurred()) + + By("Verifying Deployment was NOT reloaded (excluded Secret)") + time.Sleep(utils.NegativeTestWait) + reloaded, err := utils.WaitForDeploymentReloaded(ctx, kubeClient, excludeNS, deploymentName, + utils.AnnotationLastReloadedFrom, utils.ShortTimeout) + Expect(err).NotTo(HaveOccurred()) + Expect(reloaded).To(BeFalse(), "Deployment should NOT reload when excluded Secret changes") + }) + + It("should reload when non-excluded Secret changes", func() { + By("Creating two Secrets") + _, err := utils.CreateSecretFromStrings(ctx, kubeClient, excludeNS, secretName, + map[string]string{"password": "initial"}, nil) + Expect(err).NotTo(HaveOccurred()) + + _, err = utils.CreateSecretFromStrings(ctx, kubeClient, excludeNS, secretName2, + map[string]string{"password2": "initial2"}, nil) + Expect(err).NotTo(HaveOccurred()) + + By("Creating a Deployment with auto=true and secrets.exclude annotation") + _, err = utils.CreateDeployment(ctx, kubeClient, excludeNS, deploymentName, + utils.WithSecretEnvFrom(secretName), + utils.WithSecretEnvFrom(secretName2), + utils.WithAnnotations(utils.MergeAnnotations( + utils.BuildAutoTrueAnnotation(), + utils.BuildSecretExcludeAnnotation(secretName), + )), + ) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for Deployment to be ready") + err = utils.WaitForDeploymentReady(ctx, kubeClient, excludeNS, deploymentName, utils.DeploymentReady) + Expect(err).NotTo(HaveOccurred()) + + By("Updating the non-excluded Secret") + err = utils.UpdateSecretFromStrings(ctx, kubeClient, excludeNS, secretName2, + map[string]string{"password2": "updated2"}) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for Deployment to be reloaded") + reloaded, err := utils.WaitForDeploymentReloaded(ctx, kubeClient, excludeNS, deploymentName, + utils.AnnotationLastReloadedFrom, utils.ReloadTimeout) + Expect(err).NotTo(HaveOccurred()) + Expect(reloaded).To(BeTrue(), "Deployment should reload when non-excluded Secret changes") + }) + }) +}) diff --git a/test/e2e/annotations/pause_period_test.go b/test/e2e/annotations/pause_period_test.go new file mode 100644 index 0000000..225ce0a --- /dev/null +++ b/test/e2e/annotations/pause_period_test.go @@ -0,0 +1,102 @@ +package annotations + +import ( + "time" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + "github.com/stakater/Reloader/test/e2e/utils" +) + +var _ = Describe("Pause Period Tests", func() { + var ( + deploymentName string + configMapName string + ) + + BeforeEach(func() { + deploymentName = utils.RandName("deploy") + configMapName = utils.RandName("cm") + }) + + AfterEach(func() { + _ = utils.DeleteDeployment(ctx, kubeClient, testNamespace, deploymentName) + _ = utils.DeleteConfigMap(ctx, kubeClient, testNamespace, configMapName) + }) + + Context("with pause-period annotation", func() { + It("should pause Deployment after reload", func() { + By("Creating a ConfigMap") + _, err := utils.CreateConfigMap(ctx, kubeClient, testNamespace, configMapName, + map[string]string{"key": "initial"}, nil) + Expect(err).NotTo(HaveOccurred()) + + By("Creating a Deployment with pause-period annotation") + _, err = utils.CreateDeployment(ctx, kubeClient, testNamespace, deploymentName, + utils.WithConfigMapEnvFrom(configMapName), + utils.WithAnnotations(utils.MergeAnnotations( + utils.BuildConfigMapReloadAnnotation(configMapName), + utils.BuildPausePeriodAnnotation("10s"), + )), + ) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for Deployment to be ready") + err = utils.WaitForDeploymentReady(ctx, kubeClient, testNamespace, deploymentName, utils.DeploymentReady) + Expect(err).NotTo(HaveOccurred()) + + By("Updating the ConfigMap data") + err = utils.UpdateConfigMap(ctx, kubeClient, testNamespace, configMapName, + map[string]string{"key": "updated"}) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for Deployment to be reloaded") + reloaded, err := utils.WaitForDeploymentReloaded(ctx, kubeClient, testNamespace, deploymentName, + utils.AnnotationLastReloadedFrom, utils.ReloadTimeout) + Expect(err).NotTo(HaveOccurred()) + Expect(reloaded).To(BeTrue(), "Deployment should have been reloaded") + + By("Verifying Deployment has paused-at annotation") + paused, err := utils.WaitForDeploymentPaused(ctx, kubeClient, testNamespace, deploymentName, + "utils.AnnotationDeploymentPausedAt", utils.ShortTimeout) + Expect(err).NotTo(HaveOccurred()) + Expect(paused).To(BeTrue(), "Deployment should have paused-at annotation after reload") + }) + + It("should NOT pause Deployment without pause-period annotation", func() { + By("Creating a ConfigMap") + _, err := utils.CreateConfigMap(ctx, kubeClient, testNamespace, configMapName, + map[string]string{"key": "initial"}, nil) + Expect(err).NotTo(HaveOccurred()) + + By("Creating a Deployment WITHOUT pause-period annotation") + _, err = utils.CreateDeployment(ctx, kubeClient, testNamespace, deploymentName, + utils.WithConfigMapEnvFrom(configMapName), + utils.WithAnnotations(utils.BuildConfigMapReloadAnnotation(configMapName)), + ) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for Deployment to be ready") + err = utils.WaitForDeploymentReady(ctx, kubeClient, testNamespace, deploymentName, utils.DeploymentReady) + Expect(err).NotTo(HaveOccurred()) + + By("Updating the ConfigMap data") + err = utils.UpdateConfigMap(ctx, kubeClient, testNamespace, configMapName, + map[string]string{"key": "updated"}) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for Deployment to be reloaded") + reloaded, err := utils.WaitForDeploymentReloaded(ctx, kubeClient, testNamespace, deploymentName, + utils.AnnotationLastReloadedFrom, utils.ReloadTimeout) + Expect(err).NotTo(HaveOccurred()) + Expect(reloaded).To(BeTrue(), "Deployment should have been reloaded") + + By("Verifying Deployment does NOT have paused-at annotation") + time.Sleep(utils.NegativeTestWait) + paused, err := utils.WaitForDeploymentPaused(ctx, kubeClient, testNamespace, deploymentName, + "utils.AnnotationDeploymentPausedAt", utils.ShortTimeout) + Expect(err).NotTo(HaveOccurred()) + Expect(paused).To(BeFalse(), "Deployment should NOT have paused-at annotation without pause-period") + }) + }) +}) diff --git a/test/e2e/annotations/resource_ignore_test.go b/test/e2e/annotations/resource_ignore_test.go new file mode 100644 index 0000000..d6ed661 --- /dev/null +++ b/test/e2e/annotations/resource_ignore_test.go @@ -0,0 +1,93 @@ +package annotations + +import ( + "time" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + "github.com/stakater/Reloader/test/e2e/utils" +) + +var _ = Describe("Resource Ignore Annotation Tests", func() { + var ( + deploymentName string + configMapName string + secretName string + ) + + BeforeEach(func() { + deploymentName = utils.RandName("deploy") + configMapName = utils.RandName("cm") + secretName = utils.RandName("secret") + }) + + AfterEach(func() { + _ = utils.DeleteDeployment(ctx, kubeClient, testNamespace, deploymentName) + _ = utils.DeleteConfigMap(ctx, kubeClient, testNamespace, configMapName) + _ = utils.DeleteSecret(ctx, kubeClient, testNamespace, secretName) + }) + + Context("with reloader.stakater.com/ignore annotation on resource", func() { + It("should NOT reload when ConfigMap has ignore=true annotation", func() { + By("Creating a ConfigMap with ignore=true annotation") + _, err := utils.CreateConfigMap(ctx, kubeClient, testNamespace, configMapName, + map[string]string{"key": "initial"}, + utils.BuildIgnoreAnnotation()) + Expect(err).NotTo(HaveOccurred()) + + By("Creating a Deployment with ConfigMap reference annotation") + _, err = utils.CreateDeployment(ctx, kubeClient, testNamespace, deploymentName, + utils.WithConfigMapEnvFrom(configMapName), + utils.WithAnnotations(utils.BuildConfigMapReloadAnnotation(configMapName)), + ) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for Deployment to be ready") + err = utils.WaitForDeploymentReady(ctx, kubeClient, testNamespace, deploymentName, utils.DeploymentReady) + Expect(err).NotTo(HaveOccurred()) + + By("Updating the ConfigMap data") + err = utils.UpdateConfigMap(ctx, kubeClient, testNamespace, configMapName, + map[string]string{"key": "updated"}) + Expect(err).NotTo(HaveOccurred()) + + By("Verifying Deployment was NOT reloaded (negative test)") + time.Sleep(utils.NegativeTestWait) + reloaded, err := utils.WaitForDeploymentReloaded(ctx, kubeClient, testNamespace, deploymentName, + utils.AnnotationLastReloadedFrom, utils.ShortTimeout) + Expect(err).NotTo(HaveOccurred()) + Expect(reloaded).To(BeFalse(), "Deployment should NOT reload when ConfigMap has ignore=true") + }) + + It("should NOT reload when Secret has ignore=true annotation", func() { + By("Creating a Secret with ignore=true annotation") + _, err := utils.CreateSecretFromStrings(ctx, kubeClient, testNamespace, secretName, + map[string]string{"password": "initial"}, + utils.BuildIgnoreAnnotation()) + Expect(err).NotTo(HaveOccurred()) + + By("Creating a Deployment with Secret reference annotation") + _, err = utils.CreateDeployment(ctx, kubeClient, testNamespace, deploymentName, + utils.WithSecretEnvFrom(secretName), + utils.WithAnnotations(utils.BuildSecretReloadAnnotation(secretName)), + ) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for Deployment to be ready") + err = utils.WaitForDeploymentReady(ctx, kubeClient, testNamespace, deploymentName, utils.DeploymentReady) + Expect(err).NotTo(HaveOccurred()) + + By("Updating the Secret data") + err = utils.UpdateSecretFromStrings(ctx, kubeClient, testNamespace, secretName, + map[string]string{"password": "updated"}) + Expect(err).NotTo(HaveOccurred()) + + By("Verifying Deployment was NOT reloaded (negative test)") + time.Sleep(utils.NegativeTestWait) + reloaded, err := utils.WaitForDeploymentReloaded(ctx, kubeClient, testNamespace, deploymentName, + utils.AnnotationLastReloadedFrom, utils.ShortTimeout) + Expect(err).NotTo(HaveOccurred()) + Expect(reloaded).To(BeFalse(), "Deployment should NOT reload when Secret has ignore=true") + }) + }) +}) diff --git a/test/e2e/annotations/search_match_test.go b/test/e2e/annotations/search_match_test.go new file mode 100644 index 0000000..73868c8 --- /dev/null +++ b/test/e2e/annotations/search_match_test.go @@ -0,0 +1,169 @@ +package annotations + +import ( + "time" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + "github.com/stakater/Reloader/test/e2e/utils" +) + +var _ = Describe("Search and Match Annotation Tests", func() { + var ( + deploymentName string + configMapName string + ) + + BeforeEach(func() { + deploymentName = utils.RandName("deploy") + configMapName = utils.RandName("cm") + }) + + AfterEach(func() { + _ = utils.DeleteDeployment(ctx, kubeClient, testNamespace, deploymentName) + _ = utils.DeleteConfigMap(ctx, kubeClient, testNamespace, configMapName) + }) + + Context("with search and match annotations", func() { + It("should reload when workload has search annotation and ConfigMap has match annotation", func() { + By("Creating a ConfigMap with match annotation") + _, err := utils.CreateConfigMap(ctx, kubeClient, testNamespace, configMapName, + map[string]string{"key": "initial"}, + utils.BuildMatchAnnotation()) + Expect(err).NotTo(HaveOccurred()) + + By("Creating a Deployment with search annotation") + _, err = utils.CreateDeployment(ctx, kubeClient, testNamespace, deploymentName, + utils.WithConfigMapEnvFrom(configMapName), + utils.WithAnnotations(utils.BuildSearchAnnotation()), + ) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for Deployment to be ready") + err = utils.WaitForDeploymentReady(ctx, kubeClient, testNamespace, deploymentName, utils.DeploymentReady) + Expect(err).NotTo(HaveOccurred()) + + By("Updating the ConfigMap data") + err = utils.UpdateConfigMap(ctx, kubeClient, testNamespace, configMapName, + map[string]string{"key": "updated"}) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for Deployment to be reloaded") + reloaded, err := utils.WaitForDeploymentReloaded(ctx, kubeClient, testNamespace, deploymentName, + utils.AnnotationLastReloadedFrom, utils.ReloadTimeout) + Expect(err).NotTo(HaveOccurred()) + Expect(reloaded).To(BeTrue(), "Deployment with search annotation should reload when ConfigMap has match annotation") + }) + + It("should NOT reload when workload has search but ConfigMap has no match", func() { + By("Creating a ConfigMap WITHOUT match annotation") + _, err := utils.CreateConfigMap(ctx, kubeClient, testNamespace, configMapName, + map[string]string{"key": "initial"}, nil) + Expect(err).NotTo(HaveOccurred()) + + By("Creating a Deployment with search annotation") + _, err = utils.CreateDeployment(ctx, kubeClient, testNamespace, deploymentName, + utils.WithConfigMapEnvFrom(configMapName), + utils.WithAnnotations(utils.BuildSearchAnnotation()), + ) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for Deployment to be ready") + err = utils.WaitForDeploymentReady(ctx, kubeClient, testNamespace, deploymentName, utils.DeploymentReady) + Expect(err).NotTo(HaveOccurred()) + + By("Updating the ConfigMap data") + err = utils.UpdateConfigMap(ctx, kubeClient, testNamespace, configMapName, + map[string]string{"key": "updated"}) + Expect(err).NotTo(HaveOccurred()) + + By("Verifying Deployment was NOT reloaded (negative test)") + time.Sleep(utils.NegativeTestWait) + reloaded, err := utils.WaitForDeploymentReloaded(ctx, kubeClient, testNamespace, deploymentName, + utils.AnnotationLastReloadedFrom, utils.ShortTimeout) + Expect(err).NotTo(HaveOccurred()) + Expect(reloaded).To(BeFalse(), "Deployment should NOT reload when ConfigMap lacks match annotation") + }) + + It("should NOT reload when resource has match but no Deployment has search", func() { + By("Creating a ConfigMap WITH match annotation") + _, err := utils.CreateConfigMap(ctx, kubeClient, testNamespace, configMapName, + map[string]string{"key": "initial"}, + utils.BuildMatchAnnotation()) + Expect(err).NotTo(HaveOccurred()) + + By("Creating a Deployment WITHOUT search annotation (only standard annotation)") + _, err = utils.CreateDeployment(ctx, kubeClient, testNamespace, deploymentName, + utils.WithConfigMapEnvFrom(configMapName), + // Note: No search or reload annotation - deployment won't be affected by match + ) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for Deployment to be ready") + err = utils.WaitForDeploymentReady(ctx, kubeClient, testNamespace, deploymentName, utils.DeploymentReady) + Expect(err).NotTo(HaveOccurred()) + + By("Updating the ConfigMap data") + err = utils.UpdateConfigMap(ctx, kubeClient, testNamespace, configMapName, + map[string]string{"key": "updated"}) + Expect(err).NotTo(HaveOccurred()) + + By("Verifying Deployment was NOT reloaded (negative test)") + time.Sleep(utils.NegativeTestWait) + reloaded, err := utils.WaitForDeploymentReloaded(ctx, kubeClient, testNamespace, deploymentName, + utils.AnnotationLastReloadedFrom, utils.ShortTimeout) + Expect(err).NotTo(HaveOccurred()) + Expect(reloaded).To(BeFalse(), "Deployment without search annotation should NOT reload even when ConfigMap has match") + }) + + It("should reload only the deployment with search annotation when multiple deployments use same ConfigMap", func() { + deploymentName2 := utils.RandName("deploy2") + defer func() { + _ = utils.DeleteDeployment(ctx, kubeClient, testNamespace, deploymentName2) + }() + + By("Creating a ConfigMap with match annotation") + _, err := utils.CreateConfigMap(ctx, kubeClient, testNamespace, configMapName, + map[string]string{"key": "initial"}, + utils.BuildMatchAnnotation()) + Expect(err).NotTo(HaveOccurred()) + + By("Creating first Deployment WITH search annotation") + _, err = utils.CreateDeployment(ctx, kubeClient, testNamespace, deploymentName, + utils.WithConfigMapEnvFrom(configMapName), + utils.WithAnnotations(utils.BuildSearchAnnotation()), + ) + Expect(err).NotTo(HaveOccurred()) + + By("Creating second Deployment WITHOUT search annotation") + _, err = utils.CreateDeployment(ctx, kubeClient, testNamespace, deploymentName2, + utils.WithConfigMapEnvFrom(configMapName), + // No search annotation + ) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for both Deployments to be ready") + err = utils.WaitForDeploymentReady(ctx, kubeClient, testNamespace, deploymentName, utils.DeploymentReady) + Expect(err).NotTo(HaveOccurred()) + err = utils.WaitForDeploymentReady(ctx, kubeClient, testNamespace, deploymentName2, utils.DeploymentReady) + Expect(err).NotTo(HaveOccurred()) + + By("Updating the ConfigMap data") + err = utils.UpdateConfigMap(ctx, kubeClient, testNamespace, configMapName, + map[string]string{"key": "updated"}) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for first Deployment to be reloaded") + reloaded, err := utils.WaitForDeploymentReloaded(ctx, kubeClient, testNamespace, deploymentName, + utils.AnnotationLastReloadedFrom, utils.ReloadTimeout) + Expect(err).NotTo(HaveOccurred()) + Expect(reloaded).To(BeTrue(), "Deployment with search annotation should reload") + + By("Verifying second Deployment was NOT reloaded") + reloaded2, err := utils.WaitForDeploymentReloaded(ctx, kubeClient, testNamespace, deploymentName2, + utils.AnnotationLastReloadedFrom, utils.ShortTimeout) + Expect(err).NotTo(HaveOccurred()) + Expect(reloaded2).To(BeFalse(), "Deployment without search annotation should NOT reload") + }) + }) +}) diff --git a/test/e2e/argo/argo_suite_test.go b/test/e2e/argo/argo_suite_test.go new file mode 100644 index 0000000..d3071ee --- /dev/null +++ b/test/e2e/argo/argo_suite_test.go @@ -0,0 +1,66 @@ +package argo + +import ( + "context" + "testing" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + "github.com/stakater/Reloader/test/e2e/utils" + "k8s.io/client-go/dynamic" + "k8s.io/client-go/kubernetes" +) + +var ( + kubeClient kubernetes.Interface + dynamicClient dynamic.Interface + testNamespace string + ctx context.Context + testEnv *utils.TestEnvironment +) + +func TestArgo(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Argo Rollouts E2E Suite") +} + +var _ = BeforeSuite(func() { + var err error + ctx = context.Background() + + // Setup test environment + testEnv, err = utils.SetupTestEnvironment(ctx, "reloader-argo") + Expect(err).NotTo(HaveOccurred(), "Failed to setup test environment") + + // Export for use in tests + kubeClient = testEnv.KubeClient + dynamicClient = testEnv.DynamicClient + testNamespace = testEnv.Namespace + + // Check if Argo Rollouts is installed + // NOTE: Argo Rollouts should be pre-installed using: ./scripts/e2e-cluster-setup.sh + // This suite does NOT install Argo Rollouts to ensure consistent behavior across all test suites. + if !utils.IsArgoRolloutsInstalled(ctx, dynamicClient) { + Skip("Argo Rollouts is not installed. Run ./scripts/e2e-cluster-setup.sh first") + } + GinkgoWriter.Println("Argo Rollouts is installed") + + // Deploy Reloader with Argo Rollouts support + err = testEnv.DeployAndWait(map[string]string{ + "reloader.reloadStrategy": "annotations", + "reloader.isArgoRollouts": "true", + }) + Expect(err).NotTo(HaveOccurred(), "Failed to deploy Reloader") +}) + +var _ = AfterSuite(func() { + // Cleanup test environment (Reloader + namespace) + if testEnv != nil { + err := testEnv.Cleanup() + Expect(err).NotTo(HaveOccurred(), "Failed to cleanup test environment") + } + + // NOTE: Argo Rollouts is NOT uninstalled here to allow other test suites (core/) + // to run Argo tests. Cleanup is handled by: ./scripts/e2e-cluster-cleanup.sh + GinkgoWriter.Println("Argo Rollouts E2E Suite cleanup complete (Argo Rollouts preserved for other suites)") +}) diff --git a/test/e2e/argo/rollout_test.go b/test/e2e/argo/rollout_test.go new file mode 100644 index 0000000..5542f42 --- /dev/null +++ b/test/e2e/argo/rollout_test.go @@ -0,0 +1,91 @@ +package argo + +import ( + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + "github.com/stakater/Reloader/test/e2e/utils" +) + +// Note: Basic Argo Rollout reload tests (ConfigMap, Secret, auto=true, volume mounts, label-only negative) +// are covered by core/workloads_test.go with Label("argo"). +// This file contains only Argo-specific tests that cannot be parameterized. + +var _ = Describe("Argo Rollout Strategy Tests", func() { + var ( + rolloutName string + configMapName string + ) + + BeforeEach(func() { + rolloutName = utils.RandName("rollout") + configMapName = utils.RandName("cm") + }) + + AfterEach(func() { + _ = utils.DeleteArgoRollout(ctx, dynamicClient, testNamespace, rolloutName) + _ = utils.DeleteConfigMap(ctx, kubeClient, testNamespace, configMapName) + }) + + // Argo Rollouts have a special "restart" strategy that sets spec.restartAt field + // instead of using pod template annotations. This is unique to Argo Rollouts. + Context("Rollout strategy annotation", func() { + It("should use default rollout strategy (annotation-based reload)", func() { + By("Creating a ConfigMap") + _, err := utils.CreateConfigMap(ctx, kubeClient, testNamespace, configMapName, + map[string]string{"key": "initial"}, nil) + Expect(err).NotTo(HaveOccurred()) + + By("Creating an Argo Rollout with auto=true (default strategy)") + err = utils.CreateArgoRollout(ctx, dynamicClient, testNamespace, rolloutName, + utils.WithRolloutConfigMapEnvFrom(configMapName), + utils.WithRolloutAnnotations(utils.BuildAutoTrueAnnotation()), + ) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for Rollout to be ready") + err = utils.WaitForRolloutReady(ctx, dynamicClient, testNamespace, rolloutName, utils.DeploymentReady) + Expect(err).NotTo(HaveOccurred()) + + By("Updating the ConfigMap") + err = utils.UpdateConfigMap(ctx, kubeClient, testNamespace, configMapName, + map[string]string{"key": "updated"}) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for Rollout to be reloaded with annotation") + reloaded, err := utils.WaitForRolloutReloaded(ctx, dynamicClient, testNamespace, rolloutName, + utils.AnnotationLastReloadedFrom, utils.ReloadTimeout) + Expect(err).NotTo(HaveOccurred()) + Expect(reloaded).To(BeTrue(), "Argo Rollout should be reloaded with default rollout strategy") + }) + + It("should use restart strategy when specified (sets restartAt field)", func() { + By("Creating a ConfigMap") + _, err := utils.CreateConfigMap(ctx, kubeClient, testNamespace, configMapName, + map[string]string{"key": "initial"}, nil) + Expect(err).NotTo(HaveOccurred()) + + By("Creating an Argo Rollout with restart strategy annotation") + // Note: auto annotation goes on pod template, rollout-strategy goes on object metadata + err = utils.CreateArgoRollout(ctx, dynamicClient, testNamespace, rolloutName, + utils.WithRolloutConfigMapEnvFrom(configMapName), + utils.WithRolloutAnnotations(utils.BuildAutoTrueAnnotation()), + utils.WithRolloutObjectAnnotations(utils.BuildRolloutRestartStrategyAnnotation()), + ) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for Rollout to be ready") + err = utils.WaitForRolloutReady(ctx, dynamicClient, testNamespace, rolloutName, utils.DeploymentReady) + Expect(err).NotTo(HaveOccurred()) + + By("Updating the ConfigMap") + err = utils.UpdateConfigMap(ctx, kubeClient, testNamespace, configMapName, + map[string]string{"key": "updated"}) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for Rollout to have restartAt field set") + restarted, err := utils.WaitForRolloutRestartAt(ctx, dynamicClient, testNamespace, rolloutName, utils.ReloadTimeout) + Expect(err).NotTo(HaveOccurred()) + Expect(restarted).To(BeTrue(), "Argo Rollout should have restartAt field set with restart strategy") + }) + }) +}) diff --git a/test/e2e/core/core_suite_test.go b/test/e2e/core/core_suite_test.go new file mode 100644 index 0000000..5564946 --- /dev/null +++ b/test/e2e/core/core_suite_test.go @@ -0,0 +1,89 @@ +package core + +import ( + "context" + "testing" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + "github.com/stakater/Reloader/test/e2e/utils" + "k8s.io/client-go/dynamic" + "k8s.io/client-go/kubernetes" +) + + +var ( + kubeClient kubernetes.Interface + dynamicClient dynamic.Interface + testNamespace string + ctx context.Context + cancel context.CancelFunc + testEnv *utils.TestEnvironment + registry *utils.AdapterRegistry +) + +func TestCore(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Core Workload E2E Suite") +} + +var _ = BeforeSuite(func() { + var err error + ctx, cancel = context.WithCancel(context.Background()) + + // Setup test environment + testEnv, err = utils.SetupTestEnvironment(ctx, "reloader-core-test") + Expect(err).NotTo(HaveOccurred(), "Failed to setup test environment") + + // Export for use in tests + kubeClient = testEnv.KubeClient + dynamicClient = testEnv.DynamicClient + testNamespace = testEnv.Namespace + + // Create adapter registry + registry = utils.NewAdapterRegistry(kubeClient, dynamicClient) + + // Register ArgoRolloutAdapter if Argo Rollouts is installed + if utils.IsArgoRolloutsInstalled(ctx, dynamicClient) { + GinkgoWriter.Println("Argo Rollouts detected, registering ArgoRolloutAdapter") + registry.RegisterAdapter(utils.NewArgoRolloutAdapter(dynamicClient)) + } else { + GinkgoWriter.Println("Argo Rollouts not detected, skipping ArgoRolloutAdapter registration") + } + + // Register DeploymentConfigAdapter if OpenShift is available + if utils.HasDeploymentConfigSupport(testEnv.DiscoveryClient) { + GinkgoWriter.Println("OpenShift detected, registering DeploymentConfigAdapter") + registry.RegisterAdapter(utils.NewDeploymentConfigAdapter(dynamicClient)) + } else { + GinkgoWriter.Println("OpenShift not detected, skipping DeploymentConfigAdapter registration") + } + + // Deploy Reloader with default annotations strategy + // Individual test contexts will redeploy with different strategies if needed + deployValues := map[string]string{ + "reloader.reloadStrategy": "annotations", + } + + // Enable Argo Rollouts support if Argo is installed + if utils.IsArgoRolloutsInstalled(ctx, dynamicClient) { + deployValues["reloader.isArgoRollouts"] = "true" + GinkgoWriter.Println("Deploying Reloader with Argo Rollouts support") + } + + err = testEnv.DeployAndWait(deployValues) + Expect(err).NotTo(HaveOccurred(), "Failed to deploy Reloader") +}) + +var _ = AfterSuite(func() { + if testEnv != nil { + err := testEnv.Cleanup() + Expect(err).NotTo(HaveOccurred(), "Failed to cleanup test environment") + } + + if cancel != nil { + cancel() + } + + GinkgoWriter.Println("Core E2E Suite cleanup complete") +}) diff --git a/test/e2e/core/reference_methods_test.go b/test/e2e/core/reference_methods_test.go new file mode 100644 index 0000000..38f52c5 --- /dev/null +++ b/test/e2e/core/reference_methods_test.go @@ -0,0 +1,528 @@ +package core + +import ( + "fmt" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + "github.com/stakater/Reloader/test/e2e/utils" +) + +var _ = Describe("Reference Method Tests", func() { + var ( + configMapName string + secretName string + workloadName string + ) + + BeforeEach(func() { + configMapName = utils.RandName("cm") + secretName = utils.RandName("secret") + workloadName = utils.RandName("workload") + }) + + AfterEach(func() { + _ = utils.DeleteConfigMap(ctx, kubeClient, testNamespace, configMapName) + _ = utils.DeleteSecret(ctx, kubeClient, testNamespace, secretName) + }) + + // ============================================================ + // valueFrom.configMapKeyRef TESTS + // ============================================================ + Context("valueFrom.configMapKeyRef", func() { + DescribeTable("should reload when ConfigMap referenced via valueFrom.configMapKeyRef changes", + func(workloadType utils.WorkloadType) { + adapter := registry.Get(workloadType) + if adapter == nil { Skip(fmt.Sprintf("%s adapter not available (CRD not installed)", workloadType)) } + + By("Creating a ConfigMap") + _, err := utils.CreateConfigMap(ctx, kubeClient, testNamespace, configMapName, + map[string]string{"config_key": "initial_value"}, nil) + Expect(err).NotTo(HaveOccurred()) + + By("Creating workload with valueFrom.configMapKeyRef") + err = adapter.Create(ctx, testNamespace, workloadName, utils.WorkloadConfig{ + ConfigMapName: configMapName, + UseConfigMapKeyRef: true, + ConfigMapKey: "config_key", + EnvVarName: "MY_CONFIG_VAR", + Annotations: utils.BuildConfigMapReloadAnnotation(configMapName), + }) + Expect(err).NotTo(HaveOccurred()) + DeferCleanup(func() { _ = adapter.Delete(ctx, testNamespace, workloadName) }) + + By("Waiting for workload to be ready") + err = adapter.WaitReady(ctx, testNamespace, workloadName, utils.DeploymentReady) + Expect(err).NotTo(HaveOccurred()) + + By("Updating the ConfigMap") + err = utils.UpdateConfigMap(ctx, kubeClient, testNamespace, configMapName, + map[string]string{"config_key": "updated_value"}) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for workload to be reloaded") + reloaded, err := adapter.WaitReloaded(ctx, testNamespace, workloadName, + utils.AnnotationLastReloadedFrom, utils.ReloadTimeout) + Expect(err).NotTo(HaveOccurred()) + Expect(reloaded).To(BeTrue(), "%s with valueFrom.configMapKeyRef should reload", workloadType) + }, + Entry("Deployment", utils.WorkloadDeployment), + Entry("DaemonSet", utils.WorkloadDaemonSet), + Entry("StatefulSet", utils.WorkloadStatefulSet), + Entry("ArgoRollout", Label("argo"), utils.WorkloadArgoRollout), + Entry("DeploymentConfig", Label("openshift"), utils.WorkloadDeploymentConfig), + ) + }) + + // ============================================================ + // valueFrom.secretKeyRef TESTS + // ============================================================ + Context("valueFrom.secretKeyRef", func() { + DescribeTable("should reload when Secret referenced via valueFrom.secretKeyRef changes", + func(workloadType utils.WorkloadType) { + adapter := registry.Get(workloadType) + if adapter == nil { Skip(fmt.Sprintf("%s adapter not available (CRD not installed)", workloadType)) } + + By("Creating a Secret") + _, err := utils.CreateSecretFromStrings(ctx, kubeClient, testNamespace, secretName, + map[string]string{"secret_key": "initial_secret"}, nil) + Expect(err).NotTo(HaveOccurred()) + + By("Creating workload with valueFrom.secretKeyRef") + err = adapter.Create(ctx, testNamespace, workloadName, utils.WorkloadConfig{ + SecretName: secretName, + UseSecretKeyRef: true, + SecretKey: "secret_key", + EnvVarName: "MY_SECRET_VAR", + Annotations: utils.BuildSecretReloadAnnotation(secretName), + }) + Expect(err).NotTo(HaveOccurred()) + DeferCleanup(func() { _ = adapter.Delete(ctx, testNamespace, workloadName) }) + + By("Waiting for workload to be ready") + err = adapter.WaitReady(ctx, testNamespace, workloadName, utils.DeploymentReady) + Expect(err).NotTo(HaveOccurred()) + + By("Updating the Secret") + err = utils.UpdateSecretFromStrings(ctx, kubeClient, testNamespace, secretName, + map[string]string{"secret_key": "updated_secret"}) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for workload to be reloaded") + reloaded, err := adapter.WaitReloaded(ctx, testNamespace, workloadName, + utils.AnnotationLastReloadedFrom, utils.ReloadTimeout) + Expect(err).NotTo(HaveOccurred()) + Expect(reloaded).To(BeTrue(), "%s with valueFrom.secretKeyRef should reload", workloadType) + }, + Entry("Deployment", utils.WorkloadDeployment), + Entry("DaemonSet", utils.WorkloadDaemonSet), + Entry("StatefulSet", utils.WorkloadStatefulSet), + Entry("ArgoRollout", Label("argo"), utils.WorkloadArgoRollout), + Entry("DeploymentConfig", Label("openshift"), utils.WorkloadDeploymentConfig), + ) + }) + + // ============================================================ + // PROJECTED VOLUME TESTS + // ============================================================ + Context("Projected Volumes", func() { + DescribeTable("should reload when ConfigMap in projected volume changes", + func(workloadType utils.WorkloadType) { + adapter := registry.Get(workloadType) + if adapter == nil { Skip(fmt.Sprintf("%s adapter not available (CRD not installed)", workloadType)) } + + By("Creating a ConfigMap") + _, err := utils.CreateConfigMap(ctx, kubeClient, testNamespace, configMapName, + map[string]string{"config.yaml": "initial"}, nil) + Expect(err).NotTo(HaveOccurred()) + + By("Creating workload with projected ConfigMap volume") + err = adapter.Create(ctx, testNamespace, workloadName, utils.WorkloadConfig{ + ConfigMapName: configMapName, + UseProjectedVolume: true, + Annotations: utils.BuildConfigMapReloadAnnotation(configMapName), + }) + Expect(err).NotTo(HaveOccurred()) + DeferCleanup(func() { _ = adapter.Delete(ctx, testNamespace, workloadName) }) + + By("Waiting for workload to be ready") + err = adapter.WaitReady(ctx, testNamespace, workloadName, utils.DeploymentReady) + Expect(err).NotTo(HaveOccurred()) + + By("Updating the ConfigMap") + err = utils.UpdateConfigMap(ctx, kubeClient, testNamespace, configMapName, + map[string]string{"config.yaml": "updated"}) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for workload to be reloaded") + reloaded, err := adapter.WaitReloaded(ctx, testNamespace, workloadName, + utils.AnnotationLastReloadedFrom, utils.ReloadTimeout) + Expect(err).NotTo(HaveOccurred()) + Expect(reloaded).To(BeTrue(), "%s with projected ConfigMap volume should reload", workloadType) + }, + Entry("Deployment", utils.WorkloadDeployment), + Entry("DaemonSet", utils.WorkloadDaemonSet), + Entry("StatefulSet", utils.WorkloadStatefulSet), + Entry("ArgoRollout", Label("argo"), utils.WorkloadArgoRollout), + Entry("DeploymentConfig", Label("openshift"), utils.WorkloadDeploymentConfig), + ) + + DescribeTable("should reload when Secret in projected volume changes", + func(workloadType utils.WorkloadType) { + adapter := registry.Get(workloadType) + if adapter == nil { Skip(fmt.Sprintf("%s adapter not available (CRD not installed)", workloadType)) } + + By("Creating a Secret") + _, err := utils.CreateSecretFromStrings(ctx, kubeClient, testNamespace, secretName, + map[string]string{"credentials": "initial"}, nil) + Expect(err).NotTo(HaveOccurred()) + + By("Creating workload with projected Secret volume") + err = adapter.Create(ctx, testNamespace, workloadName, utils.WorkloadConfig{ + SecretName: secretName, + UseProjectedVolume: true, + Annotations: utils.BuildSecretReloadAnnotation(secretName), + }) + Expect(err).NotTo(HaveOccurred()) + DeferCleanup(func() { _ = adapter.Delete(ctx, testNamespace, workloadName) }) + + By("Waiting for workload to be ready") + err = adapter.WaitReady(ctx, testNamespace, workloadName, utils.DeploymentReady) + Expect(err).NotTo(HaveOccurred()) + + By("Updating the Secret") + err = utils.UpdateSecretFromStrings(ctx, kubeClient, testNamespace, secretName, + map[string]string{"credentials": "updated"}) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for workload to be reloaded") + reloaded, err := adapter.WaitReloaded(ctx, testNamespace, workloadName, + utils.AnnotationLastReloadedFrom, utils.ReloadTimeout) + Expect(err).NotTo(HaveOccurred()) + Expect(reloaded).To(BeTrue(), "%s with projected Secret volume should reload", workloadType) + }, + Entry("Deployment", utils.WorkloadDeployment), + Entry("DaemonSet", utils.WorkloadDaemonSet), + Entry("StatefulSet", utils.WorkloadStatefulSet), + Entry("ArgoRollout", Label("argo"), utils.WorkloadArgoRollout), + Entry("DeploymentConfig", Label("openshift"), utils.WorkloadDeploymentConfig), + ) + + DescribeTable("should reload when ConfigMap changes in mixed projected volume", + func(workloadType utils.WorkloadType) { + adapter := registry.Get(workloadType) + if adapter == nil { Skip(fmt.Sprintf("%s adapter not available (CRD not installed)", workloadType)) } + + By("Creating a ConfigMap and Secret") + _, err := utils.CreateConfigMap(ctx, kubeClient, testNamespace, configMapName, + map[string]string{"config.yaml": "initial"}, nil) + Expect(err).NotTo(HaveOccurred()) + + _, err = utils.CreateSecretFromStrings(ctx, kubeClient, testNamespace, secretName, + map[string]string{"credentials": "initial"}, nil) + Expect(err).NotTo(HaveOccurred()) + + By("Creating workload with projected volume containing both") + err = adapter.Create(ctx, testNamespace, workloadName, utils.WorkloadConfig{ + ConfigMapName: configMapName, + SecretName: secretName, + UseProjectedVolume: true, + Annotations: utils.MergeAnnotations( + utils.BuildConfigMapReloadAnnotation(configMapName), + utils.BuildSecretReloadAnnotation(secretName), + ), + }) + Expect(err).NotTo(HaveOccurred()) + DeferCleanup(func() { _ = adapter.Delete(ctx, testNamespace, workloadName) }) + + By("Waiting for workload to be ready") + err = adapter.WaitReady(ctx, testNamespace, workloadName, utils.DeploymentReady) + Expect(err).NotTo(HaveOccurred()) + + By("Updating the ConfigMap") + err = utils.UpdateConfigMap(ctx, kubeClient, testNamespace, configMapName, + map[string]string{"config.yaml": "updated"}) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for workload to be reloaded") + reloaded, err := adapter.WaitReloaded(ctx, testNamespace, workloadName, + utils.AnnotationLastReloadedFrom, utils.ReloadTimeout) + Expect(err).NotTo(HaveOccurred()) + Expect(reloaded).To(BeTrue(), "%s should reload when ConfigMap in mixed projected volume changes", workloadType) + }, + Entry("Deployment", utils.WorkloadDeployment), + Entry("DaemonSet", utils.WorkloadDaemonSet), + Entry("StatefulSet", utils.WorkloadStatefulSet), + Entry("ArgoRollout", Label("argo"), utils.WorkloadArgoRollout), + Entry("DeploymentConfig", Label("openshift"), utils.WorkloadDeploymentConfig), + ) + + DescribeTable("should reload when Secret changes in mixed projected volume", + func(workloadType utils.WorkloadType) { + adapter := registry.Get(workloadType) + if adapter == nil { Skip(fmt.Sprintf("%s adapter not available (CRD not installed)", workloadType)) } + + By("Creating a ConfigMap and Secret") + _, err := utils.CreateConfigMap(ctx, kubeClient, testNamespace, configMapName, + map[string]string{"config.yaml": "initial"}, nil) + Expect(err).NotTo(HaveOccurred()) + + _, err = utils.CreateSecretFromStrings(ctx, kubeClient, testNamespace, secretName, + map[string]string{"credentials": "initial"}, nil) + Expect(err).NotTo(HaveOccurred()) + + By("Creating workload with projected volume containing both") + err = adapter.Create(ctx, testNamespace, workloadName, utils.WorkloadConfig{ + ConfigMapName: configMapName, + SecretName: secretName, + UseProjectedVolume: true, + Annotations: utils.MergeAnnotations( + utils.BuildConfigMapReloadAnnotation(configMapName), + utils.BuildSecretReloadAnnotation(secretName), + ), + }) + Expect(err).NotTo(HaveOccurred()) + DeferCleanup(func() { _ = adapter.Delete(ctx, testNamespace, workloadName) }) + + By("Waiting for workload to be ready") + err = adapter.WaitReady(ctx, testNamespace, workloadName, utils.DeploymentReady) + Expect(err).NotTo(HaveOccurred()) + + By("Updating the Secret") + err = utils.UpdateSecretFromStrings(ctx, kubeClient, testNamespace, secretName, + map[string]string{"credentials": "updated"}) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for workload to be reloaded") + reloaded, err := adapter.WaitReloaded(ctx, testNamespace, workloadName, + utils.AnnotationLastReloadedFrom, utils.ReloadTimeout) + Expect(err).NotTo(HaveOccurred()) + Expect(reloaded).To(BeTrue(), "%s should reload when Secret in mixed projected volume changes", workloadType) + }, + Entry("Deployment", utils.WorkloadDeployment), + Entry("DaemonSet", utils.WorkloadDaemonSet), + Entry("StatefulSet", utils.WorkloadStatefulSet), + Entry("ArgoRollout", Label("argo"), utils.WorkloadArgoRollout), + Entry("DeploymentConfig", Label("openshift"), utils.WorkloadDeploymentConfig), + ) + }) + + // ============================================================ + // INIT CONTAINER TESTS + // ============================================================ + Context("Init Container with envFrom", func() { + DescribeTable("should reload when ConfigMap referenced by init container changes", + func(workloadType utils.WorkloadType) { + adapter := registry.Get(workloadType) + if adapter == nil { Skip(fmt.Sprintf("%s adapter not available (CRD not installed)", workloadType)) } + + By("Creating a ConfigMap") + _, err := utils.CreateConfigMap(ctx, kubeClient, testNamespace, configMapName, + map[string]string{"INIT_VAR": "initial"}, nil) + Expect(err).NotTo(HaveOccurred()) + + By("Creating workload with init container referencing ConfigMap") + err = adapter.Create(ctx, testNamespace, workloadName, utils.WorkloadConfig{ + ConfigMapName: configMapName, + UseInitContainer: true, + Annotations: utils.BuildConfigMapReloadAnnotation(configMapName), + }) + Expect(err).NotTo(HaveOccurred()) + DeferCleanup(func() { _ = adapter.Delete(ctx, testNamespace, workloadName) }) + + By("Waiting for workload to be ready") + err = adapter.WaitReady(ctx, testNamespace, workloadName, utils.DeploymentReady) + Expect(err).NotTo(HaveOccurred()) + + By("Updating the ConfigMap") + err = utils.UpdateConfigMap(ctx, kubeClient, testNamespace, configMapName, + map[string]string{"INIT_VAR": "updated"}) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for workload to be reloaded") + reloaded, err := adapter.WaitReloaded(ctx, testNamespace, workloadName, + utils.AnnotationLastReloadedFrom, utils.ReloadTimeout) + Expect(err).NotTo(HaveOccurred()) + Expect(reloaded).To(BeTrue(), "%s with init container ConfigMap should reload", workloadType) + }, + Entry("Deployment", utils.WorkloadDeployment), + Entry("DaemonSet", utils.WorkloadDaemonSet), + Entry("StatefulSet", utils.WorkloadStatefulSet), + Entry("ArgoRollout", Label("argo"), utils.WorkloadArgoRollout), + Entry("DeploymentConfig", Label("openshift"), utils.WorkloadDeploymentConfig), + ) + + DescribeTable("should reload when Secret referenced by init container changes", + func(workloadType utils.WorkloadType) { + adapter := registry.Get(workloadType) + if adapter == nil { Skip(fmt.Sprintf("%s adapter not available (CRD not installed)", workloadType)) } + + By("Creating a Secret") + _, err := utils.CreateSecretFromStrings(ctx, kubeClient, testNamespace, secretName, + map[string]string{"INIT_SECRET": "initial"}, nil) + Expect(err).NotTo(HaveOccurred()) + + By("Creating workload with init container referencing Secret") + err = adapter.Create(ctx, testNamespace, workloadName, utils.WorkloadConfig{ + SecretName: secretName, + UseInitContainer: true, + Annotations: utils.BuildSecretReloadAnnotation(secretName), + }) + Expect(err).NotTo(HaveOccurred()) + DeferCleanup(func() { _ = adapter.Delete(ctx, testNamespace, workloadName) }) + + By("Waiting for workload to be ready") + err = adapter.WaitReady(ctx, testNamespace, workloadName, utils.DeploymentReady) + Expect(err).NotTo(HaveOccurred()) + + By("Updating the Secret") + err = utils.UpdateSecretFromStrings(ctx, kubeClient, testNamespace, secretName, + map[string]string{"INIT_SECRET": "updated"}) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for workload to be reloaded") + reloaded, err := adapter.WaitReloaded(ctx, testNamespace, workloadName, + utils.AnnotationLastReloadedFrom, utils.ReloadTimeout) + Expect(err).NotTo(HaveOccurred()) + Expect(reloaded).To(BeTrue(), "%s with init container Secret should reload", workloadType) + }, + Entry("Deployment", utils.WorkloadDeployment), + Entry("DaemonSet", utils.WorkloadDaemonSet), + Entry("StatefulSet", utils.WorkloadStatefulSet), + Entry("ArgoRollout", Label("argo"), utils.WorkloadArgoRollout), + Entry("DeploymentConfig", Label("openshift"), utils.WorkloadDeploymentConfig), + ) + }) + + Context("Init Container with Volume Mount", func() { + DescribeTable("should reload when ConfigMap volume mounted in init container changes", + func(workloadType utils.WorkloadType) { + adapter := registry.Get(workloadType) + if adapter == nil { Skip(fmt.Sprintf("%s adapter not available (CRD not installed)", workloadType)) } + + By("Creating a ConfigMap") + _, err := utils.CreateConfigMap(ctx, kubeClient, testNamespace, configMapName, + map[string]string{"config.yaml": "initial"}, nil) + Expect(err).NotTo(HaveOccurred()) + + By("Creating workload with init container using ConfigMap volume mount") + err = adapter.Create(ctx, testNamespace, workloadName, utils.WorkloadConfig{ + ConfigMapName: configMapName, + UseInitContainerVolume: true, + Annotations: utils.BuildConfigMapReloadAnnotation(configMapName), + }) + Expect(err).NotTo(HaveOccurred()) + DeferCleanup(func() { _ = adapter.Delete(ctx, testNamespace, workloadName) }) + + By("Waiting for workload to be ready") + err = adapter.WaitReady(ctx, testNamespace, workloadName, utils.DeploymentReady) + Expect(err).NotTo(HaveOccurred()) + + By("Updating the ConfigMap") + err = utils.UpdateConfigMap(ctx, kubeClient, testNamespace, configMapName, + map[string]string{"config.yaml": "updated"}) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for workload to be reloaded") + reloaded, err := adapter.WaitReloaded(ctx, testNamespace, workloadName, + utils.AnnotationLastReloadedFrom, utils.ReloadTimeout) + Expect(err).NotTo(HaveOccurred()) + Expect(reloaded).To(BeTrue(), "%s with init container ConfigMap volume should reload", workloadType) + }, + Entry("Deployment", utils.WorkloadDeployment), + Entry("DaemonSet", utils.WorkloadDaemonSet), + Entry("StatefulSet", utils.WorkloadStatefulSet), + Entry("ArgoRollout", Label("argo"), utils.WorkloadArgoRollout), + Entry("DeploymentConfig", Label("openshift"), utils.WorkloadDeploymentConfig), + ) + + DescribeTable("should reload when Secret volume mounted in init container changes", + func(workloadType utils.WorkloadType) { + adapter := registry.Get(workloadType) + if adapter == nil { Skip(fmt.Sprintf("%s adapter not available (CRD not installed)", workloadType)) } + + By("Creating a Secret") + _, err := utils.CreateSecretFromStrings(ctx, kubeClient, testNamespace, secretName, + map[string]string{"credentials": "initial"}, nil) + Expect(err).NotTo(HaveOccurred()) + + By("Creating workload with init container using Secret volume mount") + err = adapter.Create(ctx, testNamespace, workloadName, utils.WorkloadConfig{ + SecretName: secretName, + UseInitContainerVolume: true, + Annotations: utils.BuildSecretReloadAnnotation(secretName), + }) + Expect(err).NotTo(HaveOccurred()) + DeferCleanup(func() { _ = adapter.Delete(ctx, testNamespace, workloadName) }) + + By("Waiting for workload to be ready") + err = adapter.WaitReady(ctx, testNamespace, workloadName, utils.DeploymentReady) + Expect(err).NotTo(HaveOccurred()) + + By("Updating the Secret") + err = utils.UpdateSecretFromStrings(ctx, kubeClient, testNamespace, secretName, + map[string]string{"credentials": "updated"}) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for workload to be reloaded") + reloaded, err := adapter.WaitReloaded(ctx, testNamespace, workloadName, + utils.AnnotationLastReloadedFrom, utils.ReloadTimeout) + Expect(err).NotTo(HaveOccurred()) + Expect(reloaded).To(BeTrue(), "%s with init container Secret volume should reload", workloadType) + }, + Entry("Deployment", utils.WorkloadDeployment), + Entry("DaemonSet", utils.WorkloadDaemonSet), + Entry("StatefulSet", utils.WorkloadStatefulSet), + Entry("ArgoRollout", Label("argo"), utils.WorkloadArgoRollout), + Entry("DeploymentConfig", Label("openshift"), utils.WorkloadDeploymentConfig), + ) + }) + + // ============================================================ + // AUTO ANNOTATION WITH VALUEFROM TESTS + // ============================================================ + Context("Auto Annotation with valueFrom", func() { + DescribeTable("should reload with auto=true when ConfigMap referenced via valueFrom changes", + func(workloadType utils.WorkloadType) { + adapter := registry.Get(workloadType) + if adapter == nil { Skip(fmt.Sprintf("%s adapter not available (CRD not installed)", workloadType)) } + + By("Creating a ConfigMap") + _, err := utils.CreateConfigMap(ctx, kubeClient, testNamespace, configMapName, + map[string]string{"auto_config_key": "initial"}, nil) + Expect(err).NotTo(HaveOccurred()) + + By("Creating workload with auto=true and valueFrom") + err = adapter.Create(ctx, testNamespace, workloadName, utils.WorkloadConfig{ + ConfigMapName: configMapName, + UseConfigMapKeyRef: true, + ConfigMapKey: "auto_config_key", + EnvVarName: "AUTO_CONFIG_VAR", + Annotations: utils.BuildAutoTrueAnnotation(), + }) + Expect(err).NotTo(HaveOccurred()) + DeferCleanup(func() { _ = adapter.Delete(ctx, testNamespace, workloadName) }) + + By("Waiting for workload to be ready") + err = adapter.WaitReady(ctx, testNamespace, workloadName, utils.DeploymentReady) + Expect(err).NotTo(HaveOccurred()) + + By("Updating the ConfigMap") + err = utils.UpdateConfigMap(ctx, kubeClient, testNamespace, configMapName, + map[string]string{"auto_config_key": "updated"}) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for workload to be reloaded") + reloaded, err := adapter.WaitReloaded(ctx, testNamespace, workloadName, + utils.AnnotationLastReloadedFrom, utils.ReloadTimeout) + Expect(err).NotTo(HaveOccurred()) + Expect(reloaded).To(BeTrue(), "%s with auto=true and valueFrom should reload", workloadType) + }, + Entry("Deployment", utils.WorkloadDeployment), + Entry("DaemonSet", utils.WorkloadDaemonSet), + Entry("StatefulSet", utils.WorkloadStatefulSet), + Entry("ArgoRollout", Label("argo"), utils.WorkloadArgoRollout), + Entry("DeploymentConfig", Label("openshift"), utils.WorkloadDeploymentConfig), + ) + }) +}) diff --git a/test/e2e/core/workloads_test.go b/test/e2e/core/workloads_test.go new file mode 100644 index 0000000..4b49177 --- /dev/null +++ b/test/e2e/core/workloads_test.go @@ -0,0 +1,912 @@ +package core + +import ( + "fmt" + "time" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + "github.com/stakater/Reloader/test/e2e/utils" +) + +var _ = Describe("Workload Reload Tests", func() { + var ( + configMapName string + secretName string + workloadName string + ) + + BeforeEach(func() { + configMapName = utils.RandName("cm") + secretName = utils.RandName("secret") + workloadName = utils.RandName("workload") + }) + + AfterEach(func() { + _ = utils.DeleteConfigMap(ctx, kubeClient, testNamespace, configMapName) + _ = utils.DeleteSecret(ctx, kubeClient, testNamespace, secretName) + }) + + // ============================================================ + // ANNOTATIONS STRATEGY TESTS + // ============================================================ + Context("Annotations Strategy", func() { + // Standard workloads that support annotation-based reload + standardWorkloads := []utils.WorkloadType{ + utils.WorkloadDeployment, + utils.WorkloadDaemonSet, + utils.WorkloadStatefulSet, + } + + // ConfigMap reload tests for standard workloads + DescribeTable("should reload when ConfigMap changes", + func(workloadType utils.WorkloadType) { + adapter := registry.Get(workloadType) + if adapter == nil { Skip(fmt.Sprintf("%s adapter not available (CRD not installed)", workloadType)) } + + By("Creating a ConfigMap") + _, err := utils.CreateConfigMap(ctx, kubeClient, testNamespace, configMapName, + map[string]string{"key": "initial"}, nil) + Expect(err).NotTo(HaveOccurred()) + + By("Creating workload with ConfigMap reference annotation") + err = adapter.Create(ctx, testNamespace, workloadName, utils.WorkloadConfig{ + ConfigMapName: configMapName, + UseConfigMapEnvFrom: true, + Annotations: utils.BuildConfigMapReloadAnnotation(configMapName), + }) + Expect(err).NotTo(HaveOccurred()) + DeferCleanup(func() { _ = adapter.Delete(ctx, testNamespace, workloadName) }) + + By("Waiting for workload to be ready") + err = adapter.WaitReady(ctx, testNamespace, workloadName, utils.DeploymentReady) + Expect(err).NotTo(HaveOccurred()) + + By("Updating the ConfigMap data") + err = utils.UpdateConfigMap(ctx, kubeClient, testNamespace, configMapName, + map[string]string{"key": "updated"}) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for workload to be reloaded") + reloaded, err := adapter.WaitReloaded(ctx, testNamespace, workloadName, + utils.AnnotationLastReloadedFrom, utils.ReloadTimeout) + Expect(err).NotTo(HaveOccurred()) + Expect(reloaded).To(BeTrue(), "%s should have been reloaded", workloadType) + }, + Entry("Deployment", utils.WorkloadDeployment), + Entry("DaemonSet", utils.WorkloadDaemonSet), + Entry("StatefulSet", utils.WorkloadStatefulSet), + Entry("ArgoRollout", Label("argo"), utils.WorkloadArgoRollout), + Entry("DeploymentConfig", Label("openshift"), utils.WorkloadDeploymentConfig), + ) + + // Secret reload tests for standard workloads + DescribeTable("should reload when Secret changes", + func(workloadType utils.WorkloadType) { + adapter := registry.Get(workloadType) + if adapter == nil { Skip(fmt.Sprintf("%s adapter not available (CRD not installed)", workloadType)) } + + By("Creating a Secret") + _, err := utils.CreateSecretFromStrings(ctx, kubeClient, testNamespace, secretName, + map[string]string{"password": "initial"}, nil) + Expect(err).NotTo(HaveOccurred()) + + By("Creating workload with Secret reference annotation") + err = adapter.Create(ctx, testNamespace, workloadName, utils.WorkloadConfig{ + SecretName: secretName, + UseSecretEnvFrom: true, + Annotations: utils.BuildSecretReloadAnnotation(secretName), + }) + Expect(err).NotTo(HaveOccurred()) + DeferCleanup(func() { _ = adapter.Delete(ctx, testNamespace, workloadName) }) + + By("Waiting for workload to be ready") + err = adapter.WaitReady(ctx, testNamespace, workloadName, utils.DeploymentReady) + Expect(err).NotTo(HaveOccurred()) + + By("Updating the Secret data") + err = utils.UpdateSecretFromStrings(ctx, kubeClient, testNamespace, secretName, + map[string]string{"password": "updated"}) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for workload to be reloaded") + reloaded, err := adapter.WaitReloaded(ctx, testNamespace, workloadName, + utils.AnnotationLastReloadedFrom, utils.ReloadTimeout) + Expect(err).NotTo(HaveOccurred()) + Expect(reloaded).To(BeTrue(), "%s should have been reloaded", workloadType) + }, + Entry("Deployment", utils.WorkloadDeployment), + Entry("DaemonSet", utils.WorkloadDaemonSet), + Entry("StatefulSet", utils.WorkloadStatefulSet), + Entry("ArgoRollout", Label("argo"), utils.WorkloadArgoRollout), + Entry("DeploymentConfig", Label("openshift"), utils.WorkloadDeploymentConfig), + ) + + // Auto=true annotation tests + DescribeTable("should reload with auto=true annotation when ConfigMap changes", + func(workloadType utils.WorkloadType) { + adapter := registry.Get(workloadType) + if adapter == nil { Skip(fmt.Sprintf("%s adapter not available (CRD not installed)", workloadType)) } + + By("Creating a ConfigMap") + _, err := utils.CreateConfigMap(ctx, kubeClient, testNamespace, configMapName, + map[string]string{"key": "initial"}, nil) + Expect(err).NotTo(HaveOccurred()) + + By("Creating workload with auto=true annotation") + err = adapter.Create(ctx, testNamespace, workloadName, utils.WorkloadConfig{ + ConfigMapName: configMapName, + UseConfigMapEnvFrom: true, + Annotations: utils.BuildAutoTrueAnnotation(), + }) + Expect(err).NotTo(HaveOccurred()) + DeferCleanup(func() { _ = adapter.Delete(ctx, testNamespace, workloadName) }) + + By("Waiting for workload to be ready") + err = adapter.WaitReady(ctx, testNamespace, workloadName, utils.DeploymentReady) + Expect(err).NotTo(HaveOccurred()) + + By("Updating the ConfigMap data") + err = utils.UpdateConfigMap(ctx, kubeClient, testNamespace, configMapName, + map[string]string{"key": "updated"}) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for workload to be reloaded") + reloaded, err := adapter.WaitReloaded(ctx, testNamespace, workloadName, + utils.AnnotationLastReloadedFrom, utils.ReloadTimeout) + Expect(err).NotTo(HaveOccurred()) + Expect(reloaded).To(BeTrue(), "%s with auto=true should have been reloaded", workloadType) + }, + Entry("Deployment", utils.WorkloadDeployment), + Entry("DaemonSet", utils.WorkloadDaemonSet), + Entry("StatefulSet", utils.WorkloadStatefulSet), + Entry("ArgoRollout", Label("argo"), utils.WorkloadArgoRollout), + Entry("DeploymentConfig", Label("openshift"), utils.WorkloadDeploymentConfig), + ) + + // Negative tests: label-only changes should NOT trigger reload + DescribeTable("should NOT reload when only ConfigMap labels change (no data change)", + func(workloadType utils.WorkloadType) { + adapter := registry.Get(workloadType) + if adapter == nil { Skip(fmt.Sprintf("%s adapter not available (CRD not installed)", workloadType)) } + + By("Creating a ConfigMap") + _, err := utils.CreateConfigMap(ctx, kubeClient, testNamespace, configMapName, + map[string]string{"key": "initial"}, nil) + Expect(err).NotTo(HaveOccurred()) + + By("Creating workload with ConfigMap reference annotation") + err = adapter.Create(ctx, testNamespace, workloadName, utils.WorkloadConfig{ + ConfigMapName: configMapName, + UseConfigMapEnvFrom: true, + Annotations: utils.BuildConfigMapReloadAnnotation(configMapName), + }) + Expect(err).NotTo(HaveOccurred()) + DeferCleanup(func() { _ = adapter.Delete(ctx, testNamespace, workloadName) }) + + By("Waiting for workload to be ready") + err = adapter.WaitReady(ctx, testNamespace, workloadName, utils.DeploymentReady) + Expect(err).NotTo(HaveOccurred()) + + By("Updating only the ConfigMap labels (no data change)") + err = utils.UpdateConfigMapLabels(ctx, kubeClient, testNamespace, configMapName, + map[string]string{"new-label": "new-value"}) + Expect(err).NotTo(HaveOccurred()) + + By("Verifying workload was NOT reloaded (negative test)") + time.Sleep(utils.NegativeTestWait) + reloaded, err := adapter.WaitReloaded(ctx, testNamespace, workloadName, + utils.AnnotationLastReloadedFrom, utils.ShortTimeout) + Expect(err).NotTo(HaveOccurred()) + Expect(reloaded).To(BeFalse(), "%s should NOT reload when only ConfigMap labels change", workloadType) + }, + Entry("Deployment", utils.WorkloadDeployment), + Entry("DaemonSet", utils.WorkloadDaemonSet), + Entry("StatefulSet", utils.WorkloadStatefulSet), + Entry("ArgoRollout", Label("argo"), utils.WorkloadArgoRollout), + Entry("DeploymentConfig", Label("openshift"), utils.WorkloadDeploymentConfig), + ) + + DescribeTable("should NOT reload when only Secret labels change (no data change)", + func(workloadType utils.WorkloadType) { + adapter := registry.Get(workloadType) + if adapter == nil { Skip(fmt.Sprintf("%s adapter not available (CRD not installed)", workloadType)) } + + By("Creating a Secret") + _, err := utils.CreateSecretFromStrings(ctx, kubeClient, testNamespace, secretName, + map[string]string{"password": "initial"}, nil) + Expect(err).NotTo(HaveOccurred()) + + By("Creating workload with Secret reference annotation") + err = adapter.Create(ctx, testNamespace, workloadName, utils.WorkloadConfig{ + SecretName: secretName, + UseSecretEnvFrom: true, + Annotations: utils.BuildSecretReloadAnnotation(secretName), + }) + Expect(err).NotTo(HaveOccurred()) + DeferCleanup(func() { _ = adapter.Delete(ctx, testNamespace, workloadName) }) + + By("Waiting for workload to be ready") + err = adapter.WaitReady(ctx, testNamespace, workloadName, utils.DeploymentReady) + Expect(err).NotTo(HaveOccurred()) + + By("Updating only the Secret labels (no data change)") + err = utils.UpdateSecretLabels(ctx, kubeClient, testNamespace, secretName, + map[string]string{"new-label": "new-value"}) + Expect(err).NotTo(HaveOccurred()) + + By("Verifying workload was NOT reloaded (negative test)") + time.Sleep(utils.NegativeTestWait) + reloaded, err := adapter.WaitReloaded(ctx, testNamespace, workloadName, + utils.AnnotationLastReloadedFrom, utils.ShortTimeout) + Expect(err).NotTo(HaveOccurred()) + Expect(reloaded).To(BeFalse(), "%s should NOT reload when only Secret labels change", workloadType) + }, + Entry("Deployment", utils.WorkloadDeployment), + Entry("DaemonSet", utils.WorkloadDaemonSet), + Entry("StatefulSet", utils.WorkloadStatefulSet), + Entry("ArgoRollout", Label("argo"), utils.WorkloadArgoRollout), + Entry("DeploymentConfig", Label("openshift"), utils.WorkloadDeploymentConfig), + ) + + // CronJob special handling - triggers a Job instead of annotation + Context("CronJob (special handling)", func() { + var cronJobAdapter *utils.CronJobAdapter + + BeforeEach(func() { + adapter := registry.Get(utils.WorkloadCronJob) + Expect(adapter).NotTo(BeNil()) + var ok bool + cronJobAdapter, ok = adapter.(*utils.CronJobAdapter) + Expect(ok).To(BeTrue(), "Should be able to cast to CronJobAdapter") + }) + + It("should trigger a Job when ConfigMap changes", func() { + By("Creating a ConfigMap") + _, err := utils.CreateConfigMap(ctx, kubeClient, testNamespace, configMapName, + map[string]string{"key": "initial"}, nil) + Expect(err).NotTo(HaveOccurred()) + + By("Creating a CronJob with ConfigMap reference annotation") + err = cronJobAdapter.Create(ctx, testNamespace, workloadName, utils.WorkloadConfig{ + ConfigMapName: configMapName, + UseConfigMapEnvFrom: true, + Annotations: utils.BuildConfigMapReloadAnnotation(configMapName), + }) + Expect(err).NotTo(HaveOccurred()) + DeferCleanup(func() { _ = cronJobAdapter.Delete(ctx, testNamespace, workloadName) }) + + By("Updating the ConfigMap data") + err = utils.UpdateConfigMap(ctx, kubeClient, testNamespace, configMapName, + map[string]string{"key": "updated"}) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for a Job to be created by CronJob reload") + triggered, err := cronJobAdapter.WaitForTriggeredJob(ctx, testNamespace, workloadName, utils.ReloadTimeout) + Expect(err).NotTo(HaveOccurred()) + Expect(triggered).To(BeTrue(), "CronJob should have triggered a Job creation") + }) + + It("should trigger a Job when Secret changes", func() { + By("Creating a Secret") + _, err := utils.CreateSecretFromStrings(ctx, kubeClient, testNamespace, secretName, + map[string]string{"password": "initial"}, nil) + Expect(err).NotTo(HaveOccurred()) + + By("Creating a CronJob with Secret reference annotation") + err = cronJobAdapter.Create(ctx, testNamespace, workloadName, utils.WorkloadConfig{ + SecretName: secretName, + UseSecretEnvFrom: true, + Annotations: utils.BuildSecretReloadAnnotation(secretName), + }) + Expect(err).NotTo(HaveOccurred()) + DeferCleanup(func() { _ = cronJobAdapter.Delete(ctx, testNamespace, workloadName) }) + + By("Updating the Secret data") + err = utils.UpdateSecretFromStrings(ctx, kubeClient, testNamespace, secretName, + map[string]string{"password": "updated"}) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for a Job to be created by CronJob reload") + triggered, err := cronJobAdapter.WaitForTriggeredJob(ctx, testNamespace, workloadName, utils.ReloadTimeout) + Expect(err).NotTo(HaveOccurred()) + Expect(triggered).To(BeTrue(), "CronJob should have triggered a Job creation") + }) + + It("should trigger a Job with auto=true annotation when ConfigMap changes", func() { + By("Creating a ConfigMap") + _, err := utils.CreateConfigMap(ctx, kubeClient, testNamespace, configMapName, + map[string]string{"key": "initial"}, nil) + Expect(err).NotTo(HaveOccurred()) + + By("Creating a CronJob with auto=true annotation") + err = cronJobAdapter.Create(ctx, testNamespace, workloadName, utils.WorkloadConfig{ + ConfigMapName: configMapName, + UseConfigMapEnvFrom: true, + Annotations: utils.BuildAutoTrueAnnotation(), + }) + Expect(err).NotTo(HaveOccurred()) + DeferCleanup(func() { _ = cronJobAdapter.Delete(ctx, testNamespace, workloadName) }) + + By("Updating the ConfigMap data") + err = utils.UpdateConfigMap(ctx, kubeClient, testNamespace, configMapName, + map[string]string{"key": "updated"}) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for a Job to be created by CronJob reload") + triggered, err := cronJobAdapter.WaitForTriggeredJob(ctx, testNamespace, workloadName, utils.ReloadTimeout) + Expect(err).NotTo(HaveOccurred()) + Expect(triggered).To(BeTrue(), "CronJob with auto=true should have triggered a Job creation") + }) + }) + + // Volume mount tests + DescribeTable("should reload when volume-mounted ConfigMap changes", + func(workloadType utils.WorkloadType) { + adapter := registry.Get(workloadType) + if adapter == nil { Skip(fmt.Sprintf("%s adapter not available (CRD not installed)", workloadType)) } + + By("Creating a ConfigMap") + _, err := utils.CreateConfigMap(ctx, kubeClient, testNamespace, configMapName, + map[string]string{"config.yaml": "setting: initial"}, nil) + Expect(err).NotTo(HaveOccurred()) + + By("Creating workload with ConfigMap volume") + err = adapter.Create(ctx, testNamespace, workloadName, utils.WorkloadConfig{ + ConfigMapName: configMapName, + UseConfigMapVolume: true, + Annotations: utils.BuildConfigMapReloadAnnotation(configMapName), + }) + Expect(err).NotTo(HaveOccurred()) + DeferCleanup(func() { _ = adapter.Delete(ctx, testNamespace, workloadName) }) + + By("Waiting for workload to be ready") + err = adapter.WaitReady(ctx, testNamespace, workloadName, utils.DeploymentReady) + Expect(err).NotTo(HaveOccurred()) + + By("Updating the ConfigMap data") + err = utils.UpdateConfigMap(ctx, kubeClient, testNamespace, configMapName, + map[string]string{"config.yaml": "setting: updated"}) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for workload to be reloaded") + reloaded, err := adapter.WaitReloaded(ctx, testNamespace, workloadName, + utils.AnnotationLastReloadedFrom, utils.ReloadTimeout) + Expect(err).NotTo(HaveOccurred()) + Expect(reloaded).To(BeTrue(), "%s with volume-mounted ConfigMap should have been reloaded", workloadType) + }, + Entry("Deployment", utils.WorkloadDeployment), + Entry("DaemonSet", utils.WorkloadDaemonSet), + Entry("StatefulSet", utils.WorkloadStatefulSet), + Entry("ArgoRollout", Label("argo"), utils.WorkloadArgoRollout), + Entry("DeploymentConfig", Label("openshift"), utils.WorkloadDeploymentConfig), + ) + + DescribeTable("should reload when volume-mounted Secret changes", + func(workloadType utils.WorkloadType) { + adapter := registry.Get(workloadType) + if adapter == nil { Skip(fmt.Sprintf("%s adapter not available (CRD not installed)", workloadType)) } + + By("Creating a Secret") + _, err := utils.CreateSecretFromStrings(ctx, kubeClient, testNamespace, secretName, + map[string]string{"credentials.yaml": "secret: initial"}, nil) + Expect(err).NotTo(HaveOccurred()) + + By("Creating workload with Secret volume") + err = adapter.Create(ctx, testNamespace, workloadName, utils.WorkloadConfig{ + SecretName: secretName, + UseSecretVolume: true, + Annotations: utils.BuildSecretReloadAnnotation(secretName), + }) + Expect(err).NotTo(HaveOccurred()) + DeferCleanup(func() { _ = adapter.Delete(ctx, testNamespace, workloadName) }) + + By("Waiting for workload to be ready") + err = adapter.WaitReady(ctx, testNamespace, workloadName, utils.DeploymentReady) + Expect(err).NotTo(HaveOccurred()) + + By("Updating the Secret data") + err = utils.UpdateSecretFromStrings(ctx, kubeClient, testNamespace, secretName, + map[string]string{"credentials.yaml": "secret: updated"}) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for workload to be reloaded") + reloaded, err := adapter.WaitReloaded(ctx, testNamespace, workloadName, + utils.AnnotationLastReloadedFrom, utils.ReloadTimeout) + Expect(err).NotTo(HaveOccurred()) + Expect(reloaded).To(BeTrue(), "%s with volume-mounted Secret should have been reloaded", workloadType) + }, + Entry("Deployment", utils.WorkloadDeployment), + Entry("DaemonSet", utils.WorkloadDaemonSet), + Entry("StatefulSet", utils.WorkloadStatefulSet), + Entry("ArgoRollout", Label("argo"), utils.WorkloadArgoRollout), + Entry("DeploymentConfig", Label("openshift"), utils.WorkloadDeploymentConfig), + ) + + // Test for workloads without Reloader annotation + DescribeTable("should NOT reload without Reloader annotation", + func(workloadType utils.WorkloadType) { + adapter := registry.Get(workloadType) + if adapter == nil { Skip(fmt.Sprintf("%s adapter not available (CRD not installed)", workloadType)) } + + By("Creating a ConfigMap") + _, err := utils.CreateConfigMap(ctx, kubeClient, testNamespace, configMapName, + map[string]string{"key": "value"}, nil) + Expect(err).NotTo(HaveOccurred()) + + By("Creating workload WITHOUT Reloader annotation") + err = adapter.Create(ctx, testNamespace, workloadName, utils.WorkloadConfig{ + ConfigMapName: configMapName, + UseConfigMapEnvFrom: true, + // No Reloader annotations + }) + Expect(err).NotTo(HaveOccurred()) + DeferCleanup(func() { _ = adapter.Delete(ctx, testNamespace, workloadName) }) + + By("Waiting for workload to be ready") + err = adapter.WaitReady(ctx, testNamespace, workloadName, utils.DeploymentReady) + Expect(err).NotTo(HaveOccurred()) + + By("Updating the ConfigMap data") + err = utils.UpdateConfigMap(ctx, kubeClient, testNamespace, configMapName, + map[string]string{"key": "updated"}) + Expect(err).NotTo(HaveOccurred()) + + By("Verifying workload is NOT reloaded (negative test)") + time.Sleep(utils.NegativeTestWait) + reloaded, err := adapter.WaitReloaded(ctx, testNamespace, workloadName, + utils.AnnotationLastReloadedFrom, utils.ShortTimeout) + Expect(err).NotTo(HaveOccurred()) + Expect(reloaded).To(BeFalse(), "%s without Reloader annotation should NOT be reloaded", workloadType) + }, + Entry("Deployment", utils.WorkloadDeployment), + Entry("DaemonSet", utils.WorkloadDaemonSet), + Entry("StatefulSet", utils.WorkloadStatefulSet), + ) + + // Variable to track for use in lint + _ = standardWorkloads + + // ============================================================ + // EDGE CASE TESTS (Deployment-specific) + // ============================================================ + Context("Edge Cases", func() { + It("should reload deployment with multiple ConfigMaps when any one changes", func() { + configMapName2 := utils.RandName("cm2") + defer func() { _ = utils.DeleteConfigMap(ctx, kubeClient, testNamespace, configMapName2) }() + + adapter := registry.Get(utils.WorkloadDeployment) + Expect(adapter).NotTo(BeNil()) + + By("Creating two ConfigMaps") + _, err := utils.CreateConfigMap(ctx, kubeClient, testNamespace, configMapName, + map[string]string{"key1": "value1"}, nil) + Expect(err).NotTo(HaveOccurred()) + + _, err = utils.CreateConfigMap(ctx, kubeClient, testNamespace, configMapName2, + map[string]string{"key2": "value2"}, nil) + Expect(err).NotTo(HaveOccurred()) + + By("Creating a Deployment referencing both ConfigMaps") + err = adapter.Create(ctx, testNamespace, workloadName, utils.WorkloadConfig{ + ConfigMapName: configMapName, + UseConfigMapEnvFrom: true, + Annotations: utils.BuildConfigMapReloadAnnotation(configMapName, configMapName2), + }) + Expect(err).NotTo(HaveOccurred()) + DeferCleanup(func() { _ = adapter.Delete(ctx, testNamespace, workloadName) }) + + By("Waiting for Deployment to be ready") + err = adapter.WaitReady(ctx, testNamespace, workloadName, utils.DeploymentReady) + Expect(err).NotTo(HaveOccurred()) + + By("Updating the second ConfigMap") + err = utils.UpdateConfigMap(ctx, kubeClient, testNamespace, configMapName2, + map[string]string{"key2": "updated-value2"}) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for Deployment to be reloaded") + reloaded, err := adapter.WaitReloaded(ctx, testNamespace, workloadName, + utils.AnnotationLastReloadedFrom, utils.ReloadTimeout) + Expect(err).NotTo(HaveOccurred()) + Expect(reloaded).To(BeTrue(), "Deployment should have been reloaded when second ConfigMap changed") + }) + + It("should reload deployment with multiple Secrets when any one changes", func() { + secretName2 := utils.RandName("secret2") + defer func() { _ = utils.DeleteSecret(ctx, kubeClient, testNamespace, secretName2) }() + + adapter := registry.Get(utils.WorkloadDeployment) + Expect(adapter).NotTo(BeNil()) + + By("Creating two Secrets") + _, err := utils.CreateSecretFromStrings(ctx, kubeClient, testNamespace, secretName, + map[string]string{"key1": "value1"}, nil) + Expect(err).NotTo(HaveOccurred()) + + _, err = utils.CreateSecretFromStrings(ctx, kubeClient, testNamespace, secretName2, + map[string]string{"key2": "value2"}, nil) + Expect(err).NotTo(HaveOccurred()) + + By("Creating a Deployment referencing both Secrets") + err = adapter.Create(ctx, testNamespace, workloadName, utils.WorkloadConfig{ + SecretName: secretName, + UseSecretEnvFrom: true, + Annotations: utils.BuildSecretReloadAnnotation(secretName, secretName2), + }) + Expect(err).NotTo(HaveOccurred()) + DeferCleanup(func() { _ = adapter.Delete(ctx, testNamespace, workloadName) }) + + By("Waiting for Deployment to be ready") + err = adapter.WaitReady(ctx, testNamespace, workloadName, utils.DeploymentReady) + Expect(err).NotTo(HaveOccurred()) + + By("Updating the second Secret") + err = utils.UpdateSecretFromStrings(ctx, kubeClient, testNamespace, secretName2, + map[string]string{"key2": "updated-value2"}) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for Deployment to be reloaded") + reloaded, err := adapter.WaitReloaded(ctx, testNamespace, workloadName, + utils.AnnotationLastReloadedFrom, utils.ReloadTimeout) + Expect(err).NotTo(HaveOccurred()) + Expect(reloaded).To(BeTrue(), "Deployment should have been reloaded when second Secret changed") + }) + + It("should reload deployment multiple times for sequential ConfigMap updates", func() { + adapter := registry.Get(utils.WorkloadDeployment) + Expect(adapter).NotTo(BeNil()) + + By("Creating a ConfigMap") + _, err := utils.CreateConfigMap(ctx, kubeClient, testNamespace, configMapName, + map[string]string{"key": "v1"}, nil) + Expect(err).NotTo(HaveOccurred()) + + By("Creating a Deployment with ConfigMap reference annotation") + err = adapter.Create(ctx, testNamespace, workloadName, utils.WorkloadConfig{ + ConfigMapName: configMapName, + UseConfigMapEnvFrom: true, + Annotations: utils.BuildConfigMapReloadAnnotation(configMapName), + }) + Expect(err).NotTo(HaveOccurred()) + DeferCleanup(func() { _ = adapter.Delete(ctx, testNamespace, workloadName) }) + + By("Waiting for Deployment to be ready") + err = adapter.WaitReady(ctx, testNamespace, workloadName, utils.DeploymentReady) + Expect(err).NotTo(HaveOccurred()) + + By("First update to ConfigMap") + err = utils.UpdateConfigMap(ctx, kubeClient, testNamespace, configMapName, + map[string]string{"key": "v2"}) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for first reload") + reloaded, err := adapter.WaitReloaded(ctx, testNamespace, workloadName, + utils.AnnotationLastReloadedFrom, utils.ReloadTimeout) + Expect(err).NotTo(HaveOccurred()) + Expect(reloaded).To(BeTrue()) + + By("Getting first reload annotation value") + deploy, err := utils.GetDeployment(ctx, kubeClient, testNamespace, workloadName) + Expect(err).NotTo(HaveOccurred()) + firstReloadValue := deploy.Spec.Template.Annotations[utils.AnnotationLastReloadedFrom] + + By("Second update to ConfigMap") + err = utils.UpdateConfigMap(ctx, kubeClient, testNamespace, configMapName, + map[string]string{"key": "v3"}) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for second reload with different annotation value") + Eventually(func() string { + deploy, err := utils.GetDeployment(ctx, kubeClient, testNamespace, workloadName) + if err != nil { + return "" + } + return deploy.Spec.Template.Annotations[utils.AnnotationLastReloadedFrom] + }, utils.ReloadTimeout, utils.DefaultInterval).ShouldNot( + Equal(firstReloadValue), + "Reload annotation should change after second update", + ) + }) + + It("should reload deployment when either ConfigMap or Secret changes", func() { + adapter := registry.Get(utils.WorkloadDeployment) + Expect(adapter).NotTo(BeNil()) + + By("Creating a ConfigMap and Secret") + _, err := utils.CreateConfigMap(ctx, kubeClient, testNamespace, configMapName, + map[string]string{"config": "initial"}, nil) + Expect(err).NotTo(HaveOccurred()) + + _, err = utils.CreateSecretFromStrings(ctx, kubeClient, testNamespace, secretName, + map[string]string{"secret": "initial"}, nil) + Expect(err).NotTo(HaveOccurred()) + + By("Creating a Deployment referencing both") + err = adapter.Create(ctx, testNamespace, workloadName, utils.WorkloadConfig{ + ConfigMapName: configMapName, + SecretName: secretName, + UseConfigMapEnvFrom: true, + UseSecretEnvFrom: true, + Annotations: utils.MergeAnnotations( + utils.BuildConfigMapReloadAnnotation(configMapName), + utils.BuildSecretReloadAnnotation(secretName), + ), + }) + Expect(err).NotTo(HaveOccurred()) + DeferCleanup(func() { _ = adapter.Delete(ctx, testNamespace, workloadName) }) + + By("Waiting for Deployment to be ready") + err = adapter.WaitReady(ctx, testNamespace, workloadName, utils.DeploymentReady) + Expect(err).NotTo(HaveOccurred()) + + By("Updating the Secret") + err = utils.UpdateSecretFromStrings(ctx, kubeClient, testNamespace, secretName, + map[string]string{"secret": "updated"}) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for Deployment to be reloaded") + reloaded, err := adapter.WaitReloaded(ctx, testNamespace, workloadName, + utils.AnnotationLastReloadedFrom, utils.ReloadTimeout) + Expect(err).NotTo(HaveOccurred()) + Expect(reloaded).To(BeTrue(), "Deployment should have been reloaded when Secret changed") + }) + + It("should NOT reload deployment with auto=false annotation", func() { + adapter := registry.Get(utils.WorkloadDeployment) + Expect(adapter).NotTo(BeNil()) + + By("Creating a ConfigMap") + _, err := utils.CreateConfigMap(ctx, kubeClient, testNamespace, configMapName, + map[string]string{"key": "initial"}, nil) + Expect(err).NotTo(HaveOccurred()) + + By("Creating a Deployment with auto=false annotation") + err = adapter.Create(ctx, testNamespace, workloadName, utils.WorkloadConfig{ + ConfigMapName: configMapName, + UseConfigMapEnvFrom: true, + Annotations: utils.BuildAutoFalseAnnotation(), + }) + Expect(err).NotTo(HaveOccurred()) + DeferCleanup(func() { _ = adapter.Delete(ctx, testNamespace, workloadName) }) + + By("Waiting for Deployment to be ready") + err = adapter.WaitReady(ctx, testNamespace, workloadName, utils.DeploymentReady) + Expect(err).NotTo(HaveOccurred()) + + By("Updating the ConfigMap data") + err = utils.UpdateConfigMap(ctx, kubeClient, testNamespace, configMapName, + map[string]string{"key": "updated"}) + Expect(err).NotTo(HaveOccurred()) + + By("Verifying Deployment is NOT reloaded (auto=false)") + time.Sleep(utils.NegativeTestWait) + reloaded, err := adapter.WaitReloaded(ctx, testNamespace, workloadName, + utils.AnnotationLastReloadedFrom, utils.ShortTimeout) + Expect(err).NotTo(HaveOccurred()) + Expect(reloaded).To(BeFalse(), "Deployment with auto=false should NOT have been reloaded") + }) + }) + }) + + // ============================================================ + // ENVVARS STRATEGY TESTS + // ============================================================ + Context("EnvVars Strategy", Label("envvars"), Ordered, func() { + // Redeploy Reloader with envvars strategy for this context + BeforeAll(func() { + By("Redeploying Reloader with envvars strategy") + deployValues := map[string]string{ + "reloader.reloadStrategy": "env-vars", + } + // Preserve Argo support if available + if utils.IsArgoRolloutsInstalled(ctx, dynamicClient) { + deployValues["reloader.isArgoRollouts"] = "true" + } + err := testEnv.DeployAndWait(deployValues) + Expect(err).NotTo(HaveOccurred(), "Failed to redeploy Reloader with envvars strategy") + }) + + AfterAll(func() { + By("Restoring Reloader to annotations strategy") + deployValues := map[string]string{ + "reloader.reloadStrategy": "annotations", + } + // Preserve Argo support if available + if utils.IsArgoRolloutsInstalled(ctx, dynamicClient) { + deployValues["reloader.isArgoRollouts"] = "true" + } + err := testEnv.DeployAndWait(deployValues) + Expect(err).NotTo(HaveOccurred(), "Failed to restore Reloader to annotations strategy") + }) + + // EnvVar workloads (CronJob does NOT support env var strategy) + envVarWorkloads := []utils.WorkloadType{ + utils.WorkloadDeployment, + utils.WorkloadDaemonSet, + utils.WorkloadStatefulSet, + } + + DescribeTable("should add STAKATER_ env var when ConfigMap changes", + func(workloadType utils.WorkloadType) { + adapter := registry.Get(workloadType) + if adapter == nil { Skip(fmt.Sprintf("%s adapter not available (CRD not installed)", workloadType)) } + + if !adapter.SupportsEnvVarStrategy() { + Skip("Workload type does not support env var strategy") + } + + By("Creating a ConfigMap") + _, err := utils.CreateConfigMap(ctx, kubeClient, testNamespace, configMapName, + map[string]string{"key": "initial"}, nil) + Expect(err).NotTo(HaveOccurred()) + + By("Creating workload with ConfigMap reference annotation") + err = adapter.Create(ctx, testNamespace, workloadName, utils.WorkloadConfig{ + ConfigMapName: configMapName, + UseConfigMapEnvFrom: true, + Annotations: utils.BuildConfigMapReloadAnnotation(configMapName), + }) + Expect(err).NotTo(HaveOccurred()) + DeferCleanup(func() { _ = adapter.Delete(ctx, testNamespace, workloadName) }) + + By("Waiting for workload to be ready") + err = adapter.WaitReady(ctx, testNamespace, workloadName, utils.DeploymentReady) + Expect(err).NotTo(HaveOccurred()) + + By("Updating the ConfigMap data") + err = utils.UpdateConfigMap(ctx, kubeClient, testNamespace, configMapName, + map[string]string{"key": "updated"}) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for workload to have STAKATER_ env var") + found, err := adapter.WaitEnvVar(ctx, testNamespace, workloadName, + utils.StakaterEnvVarPrefix, utils.ReloadTimeout) + Expect(err).NotTo(HaveOccurred()) + Expect(found).To(BeTrue(), "%s should have STAKATER_ env var after ConfigMap change", workloadType) + }, + Entry("Deployment", utils.WorkloadDeployment), + Entry("DaemonSet", utils.WorkloadDaemonSet), + Entry("StatefulSet", utils.WorkloadStatefulSet), + Entry("ArgoRollout", Label("argo"), utils.WorkloadArgoRollout), + Entry("DeploymentConfig", Label("openshift"), utils.WorkloadDeploymentConfig), + ) + + DescribeTable("should add STAKATER_ env var when Secret changes", + func(workloadType utils.WorkloadType) { + adapter := registry.Get(workloadType) + if adapter == nil { Skip(fmt.Sprintf("%s adapter not available (CRD not installed)", workloadType)) } + + if !adapter.SupportsEnvVarStrategy() { + Skip("Workload type does not support env var strategy") + } + + By("Creating a Secret") + _, err := utils.CreateSecretFromStrings(ctx, kubeClient, testNamespace, secretName, + map[string]string{"password": "initial"}, nil) + Expect(err).NotTo(HaveOccurred()) + + By("Creating workload with Secret reference annotation") + err = adapter.Create(ctx, testNamespace, workloadName, utils.WorkloadConfig{ + SecretName: secretName, + UseSecretEnvFrom: true, + Annotations: utils.BuildSecretReloadAnnotation(secretName), + }) + Expect(err).NotTo(HaveOccurred()) + DeferCleanup(func() { _ = adapter.Delete(ctx, testNamespace, workloadName) }) + + By("Waiting for workload to be ready") + err = adapter.WaitReady(ctx, testNamespace, workloadName, utils.DeploymentReady) + Expect(err).NotTo(HaveOccurred()) + + By("Updating the Secret data") + err = utils.UpdateSecretFromStrings(ctx, kubeClient, testNamespace, secretName, + map[string]string{"password": "updated"}) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for workload to have STAKATER_ env var") + found, err := adapter.WaitEnvVar(ctx, testNamespace, workloadName, + utils.StakaterEnvVarPrefix, utils.ReloadTimeout) + Expect(err).NotTo(HaveOccurred()) + Expect(found).To(BeTrue(), "%s should have STAKATER_ env var after Secret change", workloadType) + }, + Entry("Deployment", utils.WorkloadDeployment), + Entry("DaemonSet", utils.WorkloadDaemonSet), + Entry("StatefulSet", utils.WorkloadStatefulSet), + Entry("ArgoRollout", Label("argo"), utils.WorkloadArgoRollout), + Entry("DeploymentConfig", Label("openshift"), utils.WorkloadDeploymentConfig), + ) + + // Negative tests for env var strategy + DescribeTable("should NOT add STAKATER_ env var when only ConfigMap labels change", + func(workloadType utils.WorkloadType) { + adapter := registry.Get(workloadType) + if adapter == nil { Skip(fmt.Sprintf("%s adapter not available (CRD not installed)", workloadType)) } + + if !adapter.SupportsEnvVarStrategy() { + Skip("Workload type does not support env var strategy") + } + + By("Creating a ConfigMap") + _, err := utils.CreateConfigMap(ctx, kubeClient, testNamespace, configMapName, + map[string]string{"key": "value"}, nil) + Expect(err).NotTo(HaveOccurred()) + + By("Creating workload with ConfigMap reference annotation") + err = adapter.Create(ctx, testNamespace, workloadName, utils.WorkloadConfig{ + ConfigMapName: configMapName, + UseConfigMapEnvFrom: true, + Annotations: utils.BuildConfigMapReloadAnnotation(configMapName), + }) + Expect(err).NotTo(HaveOccurred()) + DeferCleanup(func() { _ = adapter.Delete(ctx, testNamespace, workloadName) }) + + By("Waiting for workload to be ready") + err = adapter.WaitReady(ctx, testNamespace, workloadName, utils.DeploymentReady) + Expect(err).NotTo(HaveOccurred()) + + By("Updating only the ConfigMap labels") + err = utils.UpdateConfigMapLabels(ctx, kubeClient, testNamespace, configMapName, + map[string]string{"new-label": "new-value"}) + Expect(err).NotTo(HaveOccurred()) + + By("Verifying workload does NOT have STAKATER_ env var") + time.Sleep(utils.NegativeTestWait) + found, err := adapter.WaitEnvVar(ctx, testNamespace, workloadName, + utils.StakaterEnvVarPrefix, utils.ShortTimeout) + Expect(err).NotTo(HaveOccurred()) + Expect(found).To(BeFalse(), "%s should NOT have STAKATER_ env var for label-only change", workloadType) + }, + Entry("Deployment", utils.WorkloadDeployment), + Entry("DaemonSet", utils.WorkloadDaemonSet), + Entry("StatefulSet", utils.WorkloadStatefulSet), + ) + + DescribeTable("should NOT add STAKATER_ env var when only Secret labels change", + func(workloadType utils.WorkloadType) { + adapter := registry.Get(workloadType) + if adapter == nil { Skip(fmt.Sprintf("%s adapter not available (CRD not installed)", workloadType)) } + + if !adapter.SupportsEnvVarStrategy() { + Skip("Workload type does not support env var strategy") + } + + By("Creating a Secret") + _, err := utils.CreateSecretFromStrings(ctx, kubeClient, testNamespace, secretName, + map[string]string{"password": "value"}, nil) + Expect(err).NotTo(HaveOccurred()) + + By("Creating workload with Secret reference annotation") + err = adapter.Create(ctx, testNamespace, workloadName, utils.WorkloadConfig{ + SecretName: secretName, + UseSecretEnvFrom: true, + Annotations: utils.BuildSecretReloadAnnotation(secretName), + }) + Expect(err).NotTo(HaveOccurred()) + DeferCleanup(func() { _ = adapter.Delete(ctx, testNamespace, workloadName) }) + + By("Waiting for workload to be ready") + err = adapter.WaitReady(ctx, testNamespace, workloadName, utils.DeploymentReady) + Expect(err).NotTo(HaveOccurred()) + + By("Updating only the Secret labels") + err = utils.UpdateSecretLabels(ctx, kubeClient, testNamespace, secretName, + map[string]string{"new-label": "new-value"}) + Expect(err).NotTo(HaveOccurred()) + + By("Verifying workload does NOT have STAKATER_ env var") + time.Sleep(utils.NegativeTestWait) + found, err := adapter.WaitEnvVar(ctx, testNamespace, workloadName, + utils.StakaterEnvVarPrefix, utils.ShortTimeout) + Expect(err).NotTo(HaveOccurred()) + Expect(found).To(BeFalse(), "%s should NOT have STAKATER_ env var for label-only change", workloadType) + }, + Entry("Deployment", utils.WorkloadDeployment), + Entry("DaemonSet", utils.WorkloadDaemonSet), + Entry("StatefulSet", utils.WorkloadStatefulSet), + ) + + // Variable to track for use in lint + _ = envVarWorkloads + }) +}) diff --git a/test/e2e/e2e_suite_test.go b/test/e2e/e2e_suite_test.go new file mode 100644 index 0000000..b45374a --- /dev/null +++ b/test/e2e/e2e_suite_test.go @@ -0,0 +1,84 @@ +package e2e + +import ( + "context" + "fmt" + "os" + "os/exec" + "testing" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + "github.com/stakater/Reloader/test/e2e/utils" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/tools/clientcmd" +) + +var ( + kubeClient kubernetes.Interface + projectDir string + testImage string + ctx context.Context + cancel context.CancelFunc +) + +func TestE2E(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Reloader E2E Suite") +} + +var _ = BeforeSuite(func() { + var err error + ctx, cancel = context.WithCancel(context.Background()) + + // Get project directory + projectDir, err = utils.GetProjectDir() + Expect(err).NotTo(HaveOccurred(), "Failed to get project directory") + + // Get test image from environment or use default + testImage = utils.GetTestImage() + + GinkgoWriter.Printf("Using test image: %s\n", testImage) + GinkgoWriter.Printf("Project directory: %s\n", projectDir) + + // Build image if SKIP_BUILD is not set + if os.Getenv("SKIP_BUILD") != "true" { + GinkgoWriter.Println("Building Docker image...") + cmd := exec.Command("make", "docker-build", fmt.Sprintf("IMG=%s", testImage)) + output, err := utils.Run(cmd) + Expect(err).NotTo(HaveOccurred(), "Failed to build Docker image: %s", output) + GinkgoWriter.Println("Docker image built successfully") + } else { + GinkgoWriter.Println("Skipping Docker build (SKIP_BUILD=true)") + } + + // Load image to Kind cluster + GinkgoWriter.Println("Loading image to Kind cluster...") + err = utils.LoadImageToKindCluster(testImage) + Expect(err).NotTo(HaveOccurred(), "Failed to load image to Kind cluster") + GinkgoWriter.Println("Image loaded to Kind cluster successfully") + + // Setup Kubernetes client + kubeconfig := utils.GetKubeconfig() + GinkgoWriter.Printf("Using kubeconfig: %s\n", kubeconfig) + + config, err := clientcmd.BuildConfigFromFlags("", kubeconfig) + Expect(err).NotTo(HaveOccurred(), "Failed to build config from kubeconfig") + + kubeClient, err = kubernetes.NewForConfig(config) + Expect(err).NotTo(HaveOccurred(), "Failed to create Kubernetes client") + + // Verify cluster connectivity + GinkgoWriter.Println("Verifying cluster connectivity...") + _, err = kubeClient.CoreV1().Namespaces().List(ctx, metav1.ListOptions{Limit: 1}) + Expect(err).NotTo(HaveOccurred(), "Failed to connect to Kubernetes cluster") + GinkgoWriter.Println("Cluster connectivity verified") +}) + +var _ = AfterSuite(func() { + if cancel != nil { + cancel() + } + GinkgoWriter.Println("E2E Suite cleanup complete") +}) diff --git a/test/e2e/flags/auto_reload_all_test.go b/test/e2e/flags/auto_reload_all_test.go new file mode 100644 index 0000000..54f30d4 --- /dev/null +++ b/test/e2e/flags/auto_reload_all_test.go @@ -0,0 +1,106 @@ +package flags + +import ( + "time" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + "github.com/stakater/Reloader/test/e2e/utils" +) + +var _ = Describe("Auto Reload All Flag Tests", func() { + var ( + deploymentName string + configMapName string + autoNamespace string + ) + + BeforeEach(func() { + deploymentName = utils.RandName("deploy") + configMapName = utils.RandName("cm") + autoNamespace = "auto-" + utils.RandName("ns") + }) + + AfterEach(func() { + _ = utils.DeleteDeployment(ctx, kubeClient, autoNamespace, deploymentName) + _ = utils.DeleteConfigMap(ctx, kubeClient, autoNamespace, configMapName) + }) + + Context("with autoReloadAll=true flag", func() { + BeforeEach(func() { + err := utils.CreateNamespace(ctx, kubeClient, autoNamespace) + Expect(err).NotTo(HaveOccurred()) + + err = deployReloaderWithFlags(map[string]string{ + "reloader.autoReloadAll": "true", + }) + Expect(err).NotTo(HaveOccurred()) + + err = waitForReloaderReady() + Expect(err).NotTo(HaveOccurred()) + }) + + AfterEach(func() { + _ = undeployReloader() + _ = utils.DeleteNamespace(ctx, kubeClient, autoNamespace) + }) + + It("should reload workloads without any annotations when autoReloadAll is true", func() { + By("Creating a ConfigMap") + _, err := utils.CreateConfigMap(ctx, kubeClient, autoNamespace, configMapName, + map[string]string{"key": "initial"}, nil) + Expect(err).NotTo(HaveOccurred()) + + By("Creating a Deployment WITHOUT any Reloader annotations") + _, err = utils.CreateDeployment(ctx, kubeClient, autoNamespace, deploymentName, + utils.WithConfigMapEnvFrom(configMapName), + ) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for Deployment to be ready") + err = utils.WaitForDeploymentReady(ctx, kubeClient, autoNamespace, deploymentName, utils.DeploymentReady) + Expect(err).NotTo(HaveOccurred()) + + By("Updating the ConfigMap") + err = utils.UpdateConfigMap(ctx, kubeClient, autoNamespace, configMapName, + map[string]string{"key": "updated"}) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for Deployment to be reloaded (autoReloadAll=true)") + reloaded, err := utils.WaitForDeploymentReloaded(ctx, kubeClient, autoNamespace, deploymentName, + utils.AnnotationLastReloadedFrom, utils.ReloadTimeout) + Expect(err).NotTo(HaveOccurred()) + Expect(reloaded).To(BeTrue(), "Deployment without annotations should reload when autoReloadAll=true") + }) + + It("should respect auto=false annotation even when autoReloadAll is true", func() { + By("Creating a ConfigMap") + _, err := utils.CreateConfigMap(ctx, kubeClient, autoNamespace, configMapName, + map[string]string{"key": "initial"}, nil) + Expect(err).NotTo(HaveOccurred()) + + By("Creating a Deployment with auto=false annotation") + _, err = utils.CreateDeployment(ctx, kubeClient, autoNamespace, deploymentName, + utils.WithConfigMapEnvFrom(configMapName), + utils.WithAnnotations(utils.BuildAutoFalseAnnotation()), + ) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for Deployment to be ready") + err = utils.WaitForDeploymentReady(ctx, kubeClient, autoNamespace, deploymentName, utils.DeploymentReady) + Expect(err).NotTo(HaveOccurred()) + + By("Updating the ConfigMap") + err = utils.UpdateConfigMap(ctx, kubeClient, autoNamespace, configMapName, + map[string]string{"key": "updated"}) + Expect(err).NotTo(HaveOccurred()) + + By("Verifying Deployment was NOT reloaded (auto=false overrides autoReloadAll)") + time.Sleep(utils.NegativeTestWait) + reloaded, err := utils.WaitForDeploymentReloaded(ctx, kubeClient, autoNamespace, deploymentName, + utils.AnnotationLastReloadedFrom, utils.ShortTimeout) + Expect(err).NotTo(HaveOccurred()) + Expect(reloaded).To(BeFalse(), "Deployment with auto=false should NOT reload even with autoReloadAll=true") + }) + }) +}) diff --git a/test/e2e/flags/flags_suite_test.go b/test/e2e/flags/flags_suite_test.go new file mode 100644 index 0000000..f70adaf --- /dev/null +++ b/test/e2e/flags/flags_suite_test.go @@ -0,0 +1,71 @@ +package flags + +import ( + "context" + "testing" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + "github.com/stakater/Reloader/test/e2e/utils" + "k8s.io/client-go/kubernetes" +) + +var ( + kubeClient kubernetes.Interface + testNamespace string + ctx context.Context + testEnv *utils.TestEnvironment +) + +func TestFlags(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Flag-Based E2E Suite") +} + +var _ = BeforeSuite(func() { + var err error + ctx = context.Background() + + // Setup test environment (but don't deploy Reloader - tests do that with specific flags) + testEnv, err = utils.SetupTestEnvironment(ctx, "reloader-flags") + Expect(err).NotTo(HaveOccurred(), "Failed to setup test environment") + + // Export for use in tests + kubeClient = testEnv.KubeClient + testNamespace = testEnv.Namespace + + // Note: Unlike other suites, we don't deploy Reloader here. + // Each test deploys with specific flag configurations. +}) + +var _ = AfterSuite(func() { + if testEnv != nil { + err := testEnv.Cleanup() + Expect(err).NotTo(HaveOccurred(), "Failed to cleanup test environment") + } + + GinkgoWriter.Println("Flags E2E Suite cleanup complete") +}) + +// deployReloaderWithFlags deploys Reloader with the specified Helm value overrides. +// This is a convenience function for tests that need to deploy with specific flags. +func deployReloaderWithFlags(values map[string]string) error { + // Always include annotations strategy + if values == nil { + values = make(map[string]string) + } + if _, ok := values["reloader.reloadStrategy"]; !ok { + values["reloader.reloadStrategy"] = "annotations" + } + return testEnv.DeployAndWait(values) +} + +// undeployReloader removes the Reloader installation. +func undeployReloader() error { + return utils.UndeployReloader(testNamespace, testEnv.ReleaseName) +} + +// waitForReloaderReady waits for the Reloader deployment to be ready. +func waitForReloaderReady() error { + return testEnv.WaitForReloader() +} diff --git a/test/e2e/flags/ignore_resources_test.go b/test/e2e/flags/ignore_resources_test.go new file mode 100644 index 0000000..5c17d82 --- /dev/null +++ b/test/e2e/flags/ignore_resources_test.go @@ -0,0 +1,193 @@ +package flags + +import ( + "time" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + "github.com/stakater/Reloader/test/e2e/utils" +) + +var _ = Describe("Ignore Resources Flag Tests", func() { + var ( + deploymentName string + configMapName string + secretName string + ignoreNS string + ) + + BeforeEach(func() { + deploymentName = utils.RandName("deploy") + configMapName = utils.RandName("cm") + secretName = utils.RandName("secret") + ignoreNS = "ignore-" + utils.RandName("ns") + }) + + AfterEach(func() { + _ = utils.DeleteDeployment(ctx, kubeClient, ignoreNS, deploymentName) + _ = utils.DeleteConfigMap(ctx, kubeClient, ignoreNS, configMapName) + _ = utils.DeleteSecret(ctx, kubeClient, ignoreNS, secretName) + }) + + Context("with ignoreSecrets=true flag", func() { + BeforeEach(func() { + // Create test namespace + err := utils.CreateNamespace(ctx, kubeClient, ignoreNS) + Expect(err).NotTo(HaveOccurred()) + + // Deploy Reloader with ignoreSecrets flag + err = deployReloaderWithFlags(map[string]string{ + "reloader.ignoreSecrets": "true", + }) + Expect(err).NotTo(HaveOccurred()) + + err = waitForReloaderReady() + Expect(err).NotTo(HaveOccurred()) + }) + + AfterEach(func() { + _ = undeployReloader() + _ = utils.DeleteNamespace(ctx, kubeClient, ignoreNS) + }) + + It("should NOT reload when Secret changes with ignoreSecrets=true", func() { + By("Creating a Secret") + _, err := utils.CreateSecretFromStrings(ctx, kubeClient, ignoreNS, secretName, + map[string]string{"password": "initial"}, nil) + Expect(err).NotTo(HaveOccurred()) + + By("Creating a Deployment with auto annotation referencing the Secret") + _, err = utils.CreateDeployment(ctx, kubeClient, ignoreNS, deploymentName, + utils.WithSecretEnvFrom(secretName), + utils.WithAnnotations(utils.BuildAutoTrueAnnotation()), + ) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for Deployment to be ready") + err = utils.WaitForDeploymentReady(ctx, kubeClient, ignoreNS, deploymentName, utils.DeploymentReady) + Expect(err).NotTo(HaveOccurred()) + + By("Updating the Secret") + err = utils.UpdateSecretFromStrings(ctx, kubeClient, ignoreNS, secretName, + map[string]string{"password": "updated"}) + Expect(err).NotTo(HaveOccurred()) + + By("Verifying Deployment was NOT reloaded (ignoreSecrets=true)") + time.Sleep(utils.NegativeTestWait) + reloaded, err := utils.WaitForDeploymentReloaded(ctx, kubeClient, ignoreNS, deploymentName, + utils.AnnotationLastReloadedFrom, utils.ShortTimeout) + Expect(err).NotTo(HaveOccurred()) + Expect(reloaded).To(BeFalse(), "Deployment should NOT reload when ignoreSecrets=true") + }) + + It("should still reload when ConfigMap changes with ignoreSecrets=true", func() { + By("Creating a ConfigMap") + _, err := utils.CreateConfigMap(ctx, kubeClient, ignoreNS, configMapName, + map[string]string{"key": "initial"}, nil) + Expect(err).NotTo(HaveOccurred()) + + By("Creating a Deployment with auto annotation referencing the ConfigMap") + _, err = utils.CreateDeployment(ctx, kubeClient, ignoreNS, deploymentName, + utils.WithConfigMapEnvFrom(configMapName), + utils.WithAnnotations(utils.BuildAutoTrueAnnotation()), + ) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for Deployment to be ready") + err = utils.WaitForDeploymentReady(ctx, kubeClient, ignoreNS, deploymentName, utils.DeploymentReady) + Expect(err).NotTo(HaveOccurred()) + + By("Updating the ConfigMap") + err = utils.UpdateConfigMap(ctx, kubeClient, ignoreNS, configMapName, + map[string]string{"key": "updated"}) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for Deployment to be reloaded (ConfigMap should still work)") + reloaded, err := utils.WaitForDeploymentReloaded(ctx, kubeClient, ignoreNS, deploymentName, + utils.AnnotationLastReloadedFrom, utils.ReloadTimeout) + Expect(err).NotTo(HaveOccurred()) + Expect(reloaded).To(BeTrue(), "ConfigMap changes should still trigger reload with ignoreSecrets=true") + }) + }) + + Context("with ignoreConfigMaps=true flag", func() { + BeforeEach(func() { + // Create test namespace + err := utils.CreateNamespace(ctx, kubeClient, ignoreNS) + Expect(err).NotTo(HaveOccurred()) + + // Deploy Reloader with ignoreConfigMaps flag + err = deployReloaderWithFlags(map[string]string{ + "reloader.ignoreConfigMaps": "true", + }) + Expect(err).NotTo(HaveOccurred()) + + err = waitForReloaderReady() + Expect(err).NotTo(HaveOccurred()) + }) + + AfterEach(func() { + _ = undeployReloader() + _ = utils.DeleteNamespace(ctx, kubeClient, ignoreNS) + }) + + It("should NOT reload when ConfigMap changes with ignoreConfigMaps=true", func() { + By("Creating a ConfigMap") + _, err := utils.CreateConfigMap(ctx, kubeClient, ignoreNS, configMapName, + map[string]string{"key": "initial"}, nil) + Expect(err).NotTo(HaveOccurred()) + + By("Creating a Deployment with auto annotation referencing the ConfigMap") + _, err = utils.CreateDeployment(ctx, kubeClient, ignoreNS, deploymentName, + utils.WithConfigMapEnvFrom(configMapName), + utils.WithAnnotations(utils.BuildAutoTrueAnnotation()), + ) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for Deployment to be ready") + err = utils.WaitForDeploymentReady(ctx, kubeClient, ignoreNS, deploymentName, utils.DeploymentReady) + Expect(err).NotTo(HaveOccurred()) + + By("Updating the ConfigMap") + err = utils.UpdateConfigMap(ctx, kubeClient, ignoreNS, configMapName, + map[string]string{"key": "updated"}) + Expect(err).NotTo(HaveOccurred()) + + By("Verifying Deployment was NOT reloaded (ignoreConfigMaps=true)") + time.Sleep(utils.NegativeTestWait) + reloaded, err := utils.WaitForDeploymentReloaded(ctx, kubeClient, ignoreNS, deploymentName, + utils.AnnotationLastReloadedFrom, utils.ShortTimeout) + Expect(err).NotTo(HaveOccurred()) + Expect(reloaded).To(BeFalse(), "Deployment should NOT reload when ignoreConfigMaps=true") + }) + + It("should still reload when Secret changes with ignoreConfigMaps=true", func() { + By("Creating a Secret") + _, err := utils.CreateSecretFromStrings(ctx, kubeClient, ignoreNS, secretName, + map[string]string{"password": "initial"}, nil) + Expect(err).NotTo(HaveOccurred()) + + By("Creating a Deployment with auto annotation referencing the Secret") + _, err = utils.CreateDeployment(ctx, kubeClient, ignoreNS, deploymentName, + utils.WithSecretEnvFrom(secretName), + utils.WithAnnotations(utils.BuildAutoTrueAnnotation()), + ) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for Deployment to be ready") + err = utils.WaitForDeploymentReady(ctx, kubeClient, ignoreNS, deploymentName, utils.DeploymentReady) + Expect(err).NotTo(HaveOccurred()) + + By("Updating the Secret") + err = utils.UpdateSecretFromStrings(ctx, kubeClient, ignoreNS, secretName, + map[string]string{"password": "updated"}) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for Deployment to be reloaded (Secret should still work)") + reloaded, err := utils.WaitForDeploymentReloaded(ctx, kubeClient, ignoreNS, deploymentName, + utils.AnnotationLastReloadedFrom, utils.ReloadTimeout) + Expect(err).NotTo(HaveOccurred()) + Expect(reloaded).To(BeTrue(), "Secret changes should still trigger reload with ignoreConfigMaps=true") + }) + }) +}) diff --git a/test/e2e/flags/ignored_workloads_test.go b/test/e2e/flags/ignored_workloads_test.go new file mode 100644 index 0000000..c2910c3 --- /dev/null +++ b/test/e2e/flags/ignored_workloads_test.go @@ -0,0 +1,159 @@ +package flags + +import ( + "time" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + "github.com/stakater/Reloader/test/e2e/utils" +) + +var _ = Describe("Ignored Workloads Flag Tests", func() { + var ( + cronJobName string + configMapName string + ignoreNS string + ) + + BeforeEach(func() { + cronJobName = utils.RandName("cj") + configMapName = utils.RandName("cm") + ignoreNS = "ignore-wl-" + utils.RandName("ns") + }) + + AfterEach(func() { + _ = utils.DeleteCronJob(ctx, kubeClient, ignoreNS, cronJobName) + _ = utils.DeleteConfigMap(ctx, kubeClient, ignoreNS, configMapName) + }) + + Context("with ignoreCronJobs=true flag", func() { + BeforeEach(func() { + // Create test namespace + err := utils.CreateNamespace(ctx, kubeClient, ignoreNS) + Expect(err).NotTo(HaveOccurred()) + + // Deploy Reloader with ignoreCronJobs flag + err = deployReloaderWithFlags(map[string]string{ + "reloader.ignoreCronJobs": "true", + }) + Expect(err).NotTo(HaveOccurred()) + + err = waitForReloaderReady() + Expect(err).NotTo(HaveOccurred()) + }) + + AfterEach(func() { + _ = undeployReloader() + _ = utils.DeleteNamespace(ctx, kubeClient, ignoreNS) + }) + + It("should NOT reload CronJobs when ignoreCronJobs=true", func() { + By("Creating a ConfigMap") + _, err := utils.CreateConfigMap(ctx, kubeClient, ignoreNS, configMapName, + map[string]string{"key": "initial"}, nil) + Expect(err).NotTo(HaveOccurred()) + + By("Creating a CronJob with auto annotation referencing the ConfigMap") + _, err = utils.CreateCronJob(ctx, kubeClient, ignoreNS, cronJobName, + utils.WithCronJobConfigMapEnvFrom(configMapName), + utils.WithCronJobAnnotations(utils.BuildAutoTrueAnnotation()), + ) + Expect(err).NotTo(HaveOccurred()) + + By("Updating the ConfigMap") + err = utils.UpdateConfigMap(ctx, kubeClient, ignoreNS, configMapName, + map[string]string{"key": "updated"}) + Expect(err).NotTo(HaveOccurred()) + + By("Verifying CronJob was NOT reloaded (ignoreCronJobs=true)") + time.Sleep(utils.NegativeTestWait) + reloaded, err := utils.WaitForCronJobReloaded(ctx, kubeClient, ignoreNS, cronJobName, + utils.AnnotationLastReloadedFrom, utils.ShortTimeout) + Expect(err).NotTo(HaveOccurred()) + Expect(reloaded).To(BeFalse(), "CronJob should NOT reload when ignoreCronJobs=true") + }) + + It("should still reload Deployments when ignoreCronJobs=true", func() { + deploymentName := utils.RandName("deploy") + + By("Creating a ConfigMap") + _, err := utils.CreateConfigMap(ctx, kubeClient, ignoreNS, configMapName, + map[string]string{"key": "initial"}, nil) + Expect(err).NotTo(HaveOccurred()) + + By("Creating a Deployment with auto annotation referencing the ConfigMap") + _, err = utils.CreateDeployment(ctx, kubeClient, ignoreNS, deploymentName, + utils.WithConfigMapEnvFrom(configMapName), + utils.WithAnnotations(utils.BuildAutoTrueAnnotation()), + ) + Expect(err).NotTo(HaveOccurred()) + defer func() { + _ = utils.DeleteDeployment(ctx, kubeClient, ignoreNS, deploymentName) + }() + + By("Waiting for Deployment to be ready") + err = utils.WaitForDeploymentReady(ctx, kubeClient, ignoreNS, deploymentName, utils.DeploymentReady) + Expect(err).NotTo(HaveOccurred()) + + By("Updating the ConfigMap") + err = utils.UpdateConfigMap(ctx, kubeClient, ignoreNS, configMapName, + map[string]string{"key": "updated-deploy"}) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for Deployment to be reloaded (Deployment should still work)") + reloaded, err := utils.WaitForDeploymentReloaded(ctx, kubeClient, ignoreNS, deploymentName, + utils.AnnotationLastReloadedFrom, utils.ReloadTimeout) + Expect(err).NotTo(HaveOccurred()) + Expect(reloaded).To(BeTrue(), "Deployment should still reload with ignoreCronJobs=true") + }) + }) + + Context("with both ignoreCronJobs=true and ignoreJobs=true flags", func() { + BeforeEach(func() { + // Create test namespace + err := utils.CreateNamespace(ctx, kubeClient, ignoreNS) + Expect(err).NotTo(HaveOccurred()) + + // Deploy Reloader with both ignore flags + err = deployReloaderWithFlags(map[string]string{ + "reloader.ignoreCronJobs": "true", + "reloader.ignoreJobs": "true", + }) + Expect(err).NotTo(HaveOccurred()) + + err = waitForReloaderReady() + Expect(err).NotTo(HaveOccurred()) + }) + + AfterEach(func() { + _ = undeployReloader() + _ = utils.DeleteNamespace(ctx, kubeClient, ignoreNS) + }) + + It("should NOT reload CronJobs when both job flags are true", func() { + By("Creating a ConfigMap") + _, err := utils.CreateConfigMap(ctx, kubeClient, ignoreNS, configMapName, + map[string]string{"key": "initial"}, nil) + Expect(err).NotTo(HaveOccurred()) + + By("Creating a CronJob with auto annotation") + _, err = utils.CreateCronJob(ctx, kubeClient, ignoreNS, cronJobName, + utils.WithCronJobConfigMapEnvFrom(configMapName), + utils.WithCronJobAnnotations(utils.BuildAutoTrueAnnotation()), + ) + Expect(err).NotTo(HaveOccurred()) + + By("Updating the ConfigMap") + err = utils.UpdateConfigMap(ctx, kubeClient, ignoreNS, configMapName, + map[string]string{"key": "updated"}) + Expect(err).NotTo(HaveOccurred()) + + By("Verifying CronJob was NOT reloaded") + time.Sleep(utils.NegativeTestWait) + reloaded, err := utils.WaitForCronJobReloaded(ctx, kubeClient, ignoreNS, cronJobName, + utils.AnnotationLastReloadedFrom, utils.ShortTimeout) + Expect(err).NotTo(HaveOccurred()) + Expect(reloaded).To(BeFalse(), "CronJob should NOT reload when ignoreCronJobs=true and ignoreJobs=true") + }) + }) +}) diff --git a/test/e2e/flags/namespace_ignore_test.go b/test/e2e/flags/namespace_ignore_test.go new file mode 100644 index 0000000..31767f9 --- /dev/null +++ b/test/e2e/flags/namespace_ignore_test.go @@ -0,0 +1,114 @@ +package flags + +import ( + "time" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + "github.com/stakater/Reloader/test/e2e/utils" +) + +var _ = Describe("Namespace Ignore Flag Tests", func() { + var ( + deploymentName string + configMapName string + ignoredNamespace string + watchedNamespace string + ) + + BeforeEach(func() { + deploymentName = utils.RandName("deploy") + configMapName = utils.RandName("cm") + ignoredNamespace = "ignored-" + utils.RandName("ns") + watchedNamespace = "watched-" + utils.RandName("ns") + }) + + AfterEach(func() { + _ = utils.DeleteDeployment(ctx, kubeClient, ignoredNamespace, deploymentName) + _ = utils.DeleteDeployment(ctx, kubeClient, watchedNamespace, deploymentName) + _ = utils.DeleteConfigMap(ctx, kubeClient, ignoredNamespace, configMapName) + _ = utils.DeleteConfigMap(ctx, kubeClient, watchedNamespace, configMapName) + }) + + Context("with ignoreNamespaces flag", func() { + BeforeEach(func() { + err := utils.CreateNamespace(ctx, kubeClient, ignoredNamespace) + Expect(err).NotTo(HaveOccurred()) + err = utils.CreateNamespace(ctx, kubeClient, watchedNamespace) + Expect(err).NotTo(HaveOccurred()) + + err = deployReloaderWithFlags(map[string]string{ + "reloader.ignoreNamespaces": ignoredNamespace, + }) + Expect(err).NotTo(HaveOccurred()) + + err = waitForReloaderReady() + Expect(err).NotTo(HaveOccurred()) + }) + + AfterEach(func() { + _ = undeployReloader() + _ = utils.DeleteNamespace(ctx, kubeClient, ignoredNamespace) + _ = utils.DeleteNamespace(ctx, kubeClient, watchedNamespace) + }) + + It("should NOT reload in ignored namespace", func() { + By("Creating a ConfigMap in the ignored namespace") + _, err := utils.CreateConfigMap(ctx, kubeClient, ignoredNamespace, configMapName, + map[string]string{"key": "initial"}, nil) + Expect(err).NotTo(HaveOccurred()) + + By("Creating a Deployment in the ignored namespace") + _, err = utils.CreateDeployment(ctx, kubeClient, ignoredNamespace, deploymentName, + utils.WithConfigMapEnvFrom(configMapName), + utils.WithAnnotations(utils.BuildConfigMapReloadAnnotation(configMapName)), + ) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for Deployment to be ready") + err = utils.WaitForDeploymentReady(ctx, kubeClient, ignoredNamespace, deploymentName, utils.DeploymentReady) + Expect(err).NotTo(HaveOccurred()) + + By("Updating the ConfigMap") + err = utils.UpdateConfigMap(ctx, kubeClient, ignoredNamespace, configMapName, + map[string]string{"key": "updated"}) + Expect(err).NotTo(HaveOccurred()) + + By("Verifying Deployment was NOT reloaded (ignored namespace)") + time.Sleep(utils.NegativeTestWait) + reloaded, err := utils.WaitForDeploymentReloaded(ctx, kubeClient, ignoredNamespace, deploymentName, + utils.AnnotationLastReloadedFrom, utils.ShortTimeout) + Expect(err).NotTo(HaveOccurred()) + Expect(reloaded).To(BeFalse(), "Deployment in ignored namespace should NOT be reloaded") + }) + + It("should reload in watched (non-ignored) namespace", func() { + By("Creating a ConfigMap in the watched namespace") + _, err := utils.CreateConfigMap(ctx, kubeClient, watchedNamespace, configMapName, + map[string]string{"key": "initial"}, nil) + Expect(err).NotTo(HaveOccurred()) + + By("Creating a Deployment in the watched namespace") + _, err = utils.CreateDeployment(ctx, kubeClient, watchedNamespace, deploymentName, + utils.WithConfigMapEnvFrom(configMapName), + utils.WithAnnotations(utils.BuildConfigMapReloadAnnotation(configMapName)), + ) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for Deployment to be ready") + err = utils.WaitForDeploymentReady(ctx, kubeClient, watchedNamespace, deploymentName, utils.DeploymentReady) + Expect(err).NotTo(HaveOccurred()) + + By("Updating the ConfigMap") + err = utils.UpdateConfigMap(ctx, kubeClient, watchedNamespace, configMapName, + map[string]string{"key": "updated"}) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for Deployment to be reloaded") + reloaded, err := utils.WaitForDeploymentReloaded(ctx, kubeClient, watchedNamespace, deploymentName, + utils.AnnotationLastReloadedFrom, utils.ReloadTimeout) + Expect(err).NotTo(HaveOccurred()) + Expect(reloaded).To(BeTrue(), "Deployment in non-ignored namespace should be reloaded") + }) + }) +}) diff --git a/test/e2e/flags/namespace_selector_test.go b/test/e2e/flags/namespace_selector_test.go new file mode 100644 index 0000000..82781f3 --- /dev/null +++ b/test/e2e/flags/namespace_selector_test.go @@ -0,0 +1,116 @@ +package flags + +import ( + "time" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + "github.com/stakater/Reloader/test/e2e/utils" +) + +var _ = Describe("Namespace Selector Flag Tests", func() { + var ( + deploymentName string + configMapName string + matchingNS string + nonMatchingNS string + ) + + BeforeEach(func() { + deploymentName = utils.RandName("deploy") + configMapName = utils.RandName("cm") + matchingNS = "match-" + utils.RandName("ns") + nonMatchingNS = "nomatch-" + utils.RandName("ns") + }) + + AfterEach(func() { + _ = utils.DeleteDeployment(ctx, kubeClient, matchingNS, deploymentName) + _ = utils.DeleteDeployment(ctx, kubeClient, nonMatchingNS, deploymentName) + _ = utils.DeleteConfigMap(ctx, kubeClient, matchingNS, configMapName) + _ = utils.DeleteConfigMap(ctx, kubeClient, nonMatchingNS, configMapName) + }) + + Context("with namespaceSelector flag", func() { + BeforeEach(func() { + err := utils.CreateNamespaceWithLabels(ctx, kubeClient, matchingNS, + map[string]string{"env": "test"}) + Expect(err).NotTo(HaveOccurred()) + + err = utils.CreateNamespace(ctx, kubeClient, nonMatchingNS) + Expect(err).NotTo(HaveOccurred()) + + err = deployReloaderWithFlags(map[string]string{ + "reloader.namespaceSelector": "env=test", + }) + Expect(err).NotTo(HaveOccurred()) + + err = waitForReloaderReady() + Expect(err).NotTo(HaveOccurred()) + }) + + AfterEach(func() { + _ = undeployReloader() + _ = utils.DeleteNamespace(ctx, kubeClient, matchingNS) + _ = utils.DeleteNamespace(ctx, kubeClient, nonMatchingNS) + }) + + It("should reload workloads in matching namespaces", func() { + By("Creating a ConfigMap in matching namespace") + _, err := utils.CreateConfigMap(ctx, kubeClient, matchingNS, configMapName, + map[string]string{"key": "initial"}, nil) + Expect(err).NotTo(HaveOccurred()) + + By("Creating a Deployment in matching namespace with auto annotation") + _, err = utils.CreateDeployment(ctx, kubeClient, matchingNS, deploymentName, + utils.WithConfigMapEnvFrom(configMapName), + utils.WithAnnotations(utils.BuildAutoTrueAnnotation()), + ) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for Deployment to be ready") + err = utils.WaitForDeploymentReady(ctx, kubeClient, matchingNS, deploymentName, utils.DeploymentReady) + Expect(err).NotTo(HaveOccurred()) + + By("Updating the ConfigMap") + err = utils.UpdateConfigMap(ctx, kubeClient, matchingNS, configMapName, + map[string]string{"key": "updated"}) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for Deployment to be reloaded") + reloaded, err := utils.WaitForDeploymentReloaded(ctx, kubeClient, matchingNS, deploymentName, + utils.AnnotationLastReloadedFrom, utils.ReloadTimeout) + Expect(err).NotTo(HaveOccurred()) + Expect(reloaded).To(BeTrue(), "Deployment in matching namespace should be reloaded") + }) + + It("should NOT reload workloads in non-matching namespaces", func() { + By("Creating a ConfigMap in non-matching namespace") + _, err := utils.CreateConfigMap(ctx, kubeClient, nonMatchingNS, configMapName, + map[string]string{"key": "initial"}, nil) + Expect(err).NotTo(HaveOccurred()) + + By("Creating a Deployment in non-matching namespace with auto annotation") + _, err = utils.CreateDeployment(ctx, kubeClient, nonMatchingNS, deploymentName, + utils.WithConfigMapEnvFrom(configMapName), + utils.WithAnnotations(utils.BuildAutoTrueAnnotation()), + ) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for Deployment to be ready") + err = utils.WaitForDeploymentReady(ctx, kubeClient, nonMatchingNS, deploymentName, utils.DeploymentReady) + Expect(err).NotTo(HaveOccurred()) + + By("Updating the ConfigMap") + err = utils.UpdateConfigMap(ctx, kubeClient, nonMatchingNS, configMapName, + map[string]string{"key": "updated"}) + Expect(err).NotTo(HaveOccurred()) + + By("Verifying Deployment was NOT reloaded (non-matching namespace)") + time.Sleep(utils.NegativeTestWait) + reloaded, err := utils.WaitForDeploymentReloaded(ctx, kubeClient, nonMatchingNS, deploymentName, + utils.AnnotationLastReloadedFrom, utils.ShortTimeout) + Expect(err).NotTo(HaveOccurred()) + Expect(reloaded).To(BeFalse(), "Deployment in non-matching namespace should NOT be reloaded") + }) + }) +}) diff --git a/test/e2e/flags/reload_on_create_test.go b/test/e2e/flags/reload_on_create_test.go new file mode 100644 index 0000000..c27a727 --- /dev/null +++ b/test/e2e/flags/reload_on_create_test.go @@ -0,0 +1,143 @@ +package flags + +import ( + "time" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + "github.com/stakater/Reloader/test/e2e/utils" +) + +var _ = Describe("Reload On Create Flag Tests", func() { + var ( + deploymentName string + configMapName string + createNamespace string + ) + + BeforeEach(func() { + deploymentName = utils.RandName("deploy") + configMapName = utils.RandName("cm") + createNamespace = "create-" + utils.RandName("ns") + }) + + AfterEach(func() { + _ = utils.DeleteDeployment(ctx, kubeClient, createNamespace, deploymentName) + _ = utils.DeleteConfigMap(ctx, kubeClient, createNamespace, configMapName) + }) + + Context("with reloadOnCreate=true flag", func() { + BeforeEach(func() { + // Create test namespace + err := utils.CreateNamespace(ctx, kubeClient, createNamespace) + Expect(err).NotTo(HaveOccurred()) + + // Deploy Reloader with reloadOnCreate flag + err = deployReloaderWithFlags(map[string]string{ + "reloader.reloadOnCreate": "true", + }) + Expect(err).NotTo(HaveOccurred()) + + err = waitForReloaderReady() + Expect(err).NotTo(HaveOccurred()) + }) + + AfterEach(func() { + _ = undeployReloader() + _ = utils.DeleteNamespace(ctx, kubeClient, createNamespace) + }) + + It("should reload when a new ConfigMap is created", func() { + By("Creating a Deployment with annotation for a ConfigMap that doesn't exist yet") + _, err := utils.CreateDeployment(ctx, kubeClient, createNamespace, deploymentName, + utils.WithAnnotations(utils.BuildConfigMapReloadAnnotation(configMapName)), + ) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for Deployment to be ready") + err = utils.WaitForDeploymentReady(ctx, kubeClient, createNamespace, deploymentName, utils.DeploymentReady) + Expect(err).NotTo(HaveOccurred()) + + By("Creating the ConfigMap that the Deployment references") + _, err = utils.CreateConfigMap(ctx, kubeClient, createNamespace, configMapName, + map[string]string{"key": "value"}, nil) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for Deployment to be reloaded (reloadOnCreate=true)") + reloaded, err := utils.WaitForDeploymentReloaded(ctx, kubeClient, createNamespace, deploymentName, + utils.AnnotationLastReloadedFrom, utils.ReloadTimeout) + Expect(err).NotTo(HaveOccurred()) + Expect(reloaded).To(BeTrue(), "Deployment should reload when referenced ConfigMap is created") + }) + + It("should reload when a new Secret is created", func() { + secretName := utils.RandName("secret") + defer func() { _ = utils.DeleteSecret(ctx, kubeClient, createNamespace, secretName) }() + + By("Creating a Deployment with annotation for a Secret that doesn't exist yet") + _, err := utils.CreateDeployment(ctx, kubeClient, createNamespace, deploymentName, + utils.WithAnnotations(utils.BuildSecretReloadAnnotation(secretName)), + ) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for Deployment to be ready") + err = utils.WaitForDeploymentReady(ctx, kubeClient, createNamespace, deploymentName, utils.DeploymentReady) + Expect(err).NotTo(HaveOccurred()) + + By("Creating the Secret that the Deployment references") + _, err = utils.CreateSecretFromStrings(ctx, kubeClient, createNamespace, secretName, + map[string]string{"password": "secret"}, nil) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for Deployment to be reloaded (reloadOnCreate=true)") + reloaded, err := utils.WaitForDeploymentReloaded(ctx, kubeClient, createNamespace, deploymentName, + utils.AnnotationLastReloadedFrom, utils.ReloadTimeout) + Expect(err).NotTo(HaveOccurred()) + Expect(reloaded).To(BeTrue(), "Deployment should reload when referenced Secret is created") + }) + }) + + Context("with reloadOnCreate=false (default)", func() { + BeforeEach(func() { + // Create test namespace + err := utils.CreateNamespace(ctx, kubeClient, createNamespace) + Expect(err).NotTo(HaveOccurred()) + + // Deploy Reloader without reloadOnCreate flag (default is false) + err = deployReloaderWithFlags(map[string]string{}) + Expect(err).NotTo(HaveOccurred()) + + err = waitForReloaderReady() + Expect(err).NotTo(HaveOccurred()) + }) + + AfterEach(func() { + _ = undeployReloader() + _ = utils.DeleteNamespace(ctx, kubeClient, createNamespace) + }) + + It("should NOT reload when a new ConfigMap is created (default behavior)", func() { + By("Creating a Deployment with annotation for a ConfigMap that doesn't exist yet") + _, err := utils.CreateDeployment(ctx, kubeClient, createNamespace, deploymentName, + utils.WithAnnotations(utils.BuildConfigMapReloadAnnotation(configMapName)), + ) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for Deployment to be ready") + err = utils.WaitForDeploymentReady(ctx, kubeClient, createNamespace, deploymentName, utils.DeploymentReady) + Expect(err).NotTo(HaveOccurred()) + + By("Creating the ConfigMap that the Deployment references") + _, err = utils.CreateConfigMap(ctx, kubeClient, createNamespace, configMapName, + map[string]string{"key": "value"}, nil) + Expect(err).NotTo(HaveOccurred()) + + By("Verifying Deployment was NOT reloaded (reloadOnCreate=false)") + time.Sleep(utils.NegativeTestWait) + reloaded, err := utils.WaitForDeploymentReloaded(ctx, kubeClient, createNamespace, deploymentName, + utils.AnnotationLastReloadedFrom, utils.ShortTimeout) + Expect(err).NotTo(HaveOccurred()) + Expect(reloaded).To(BeFalse(), "Deployment should NOT reload on create when reloadOnCreate=false") + }) + }) +}) diff --git a/test/e2e/flags/reload_on_delete_test.go b/test/e2e/flags/reload_on_delete_test.go new file mode 100644 index 0000000..3e822b0 --- /dev/null +++ b/test/e2e/flags/reload_on_delete_test.go @@ -0,0 +1,154 @@ +package flags + +import ( + "time" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + "github.com/stakater/Reloader/test/e2e/utils" +) + +var _ = Describe("Reload On Delete Flag Tests", func() { + var ( + deploymentName string + configMapName string + deleteNamespace string + ) + + BeforeEach(func() { + deploymentName = utils.RandName("deploy") + configMapName = utils.RandName("cm") + deleteNamespace = "delete-" + utils.RandName("ns") + }) + + AfterEach(func() { + _ = utils.DeleteDeployment(ctx, kubeClient, deleteNamespace, deploymentName) + _ = utils.DeleteConfigMap(ctx, kubeClient, deleteNamespace, configMapName) + }) + + Context("with reloadOnDelete=true flag", func() { + BeforeEach(func() { + // Create test namespace + err := utils.CreateNamespace(ctx, kubeClient, deleteNamespace) + Expect(err).NotTo(HaveOccurred()) + + // Deploy Reloader with reloadOnDelete flag + err = deployReloaderWithFlags(map[string]string{ + "reloader.reloadOnDelete": "true", + }) + Expect(err).NotTo(HaveOccurred()) + + err = waitForReloaderReady() + Expect(err).NotTo(HaveOccurred()) + }) + + AfterEach(func() { + _ = undeployReloader() + _ = utils.DeleteNamespace(ctx, kubeClient, deleteNamespace) + }) + + It("should reload when a referenced ConfigMap is deleted", func() { + By("Creating a ConfigMap") + _, err := utils.CreateConfigMap(ctx, kubeClient, deleteNamespace, configMapName, + map[string]string{"key": "value"}, nil) + Expect(err).NotTo(HaveOccurred()) + + By("Creating a Deployment with annotation for the ConfigMap") + _, err = utils.CreateDeployment(ctx, kubeClient, deleteNamespace, deploymentName, + utils.WithAnnotations(utils.BuildConfigMapReloadAnnotation(configMapName)), + ) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for Deployment to be ready") + err = utils.WaitForDeploymentReady(ctx, kubeClient, deleteNamespace, deploymentName, utils.DeploymentReady) + Expect(err).NotTo(HaveOccurred()) + + By("Deleting the ConfigMap") + err = utils.DeleteConfigMap(ctx, kubeClient, deleteNamespace, configMapName) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for Deployment to be reloaded (reloadOnDelete=true)") + reloaded, err := utils.WaitForDeploymentReloaded(ctx, kubeClient, deleteNamespace, deploymentName, + utils.AnnotationLastReloadedFrom, utils.ReloadTimeout) + Expect(err).NotTo(HaveOccurred()) + Expect(reloaded).To(BeTrue(), "Deployment should reload when referenced ConfigMap is deleted") + }) + + It("should reload when a referenced Secret is deleted", func() { + secretName := utils.RandName("secret") + + By("Creating a Secret") + _, err := utils.CreateSecretFromStrings(ctx, kubeClient, deleteNamespace, secretName, + map[string]string{"password": "secret"}, nil) + Expect(err).NotTo(HaveOccurred()) + + By("Creating a Deployment with annotation for the Secret") + _, err = utils.CreateDeployment(ctx, kubeClient, deleteNamespace, deploymentName, + utils.WithAnnotations(utils.BuildSecretReloadAnnotation(secretName)), + ) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for Deployment to be ready") + err = utils.WaitForDeploymentReady(ctx, kubeClient, deleteNamespace, deploymentName, utils.DeploymentReady) + Expect(err).NotTo(HaveOccurred()) + + By("Deleting the Secret") + err = utils.DeleteSecret(ctx, kubeClient, deleteNamespace, secretName) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for Deployment to be reloaded (reloadOnDelete=true)") + reloaded, err := utils.WaitForDeploymentReloaded(ctx, kubeClient, deleteNamespace, deploymentName, + utils.AnnotationLastReloadedFrom, utils.ReloadTimeout) + Expect(err).NotTo(HaveOccurred()) + Expect(reloaded).To(BeTrue(), "Deployment should reload when referenced Secret is deleted") + }) + }) + + Context("with reloadOnDelete=false (default)", func() { + BeforeEach(func() { + // Create test namespace + err := utils.CreateNamespace(ctx, kubeClient, deleteNamespace) + Expect(err).NotTo(HaveOccurred()) + + // Deploy Reloader without reloadOnDelete flag (default is false) + err = deployReloaderWithFlags(map[string]string{}) + Expect(err).NotTo(HaveOccurred()) + + err = waitForReloaderReady() + Expect(err).NotTo(HaveOccurred()) + }) + + AfterEach(func() { + _ = undeployReloader() + _ = utils.DeleteNamespace(ctx, kubeClient, deleteNamespace) + }) + + It("should NOT reload when a referenced ConfigMap is deleted (default behavior)", func() { + By("Creating a ConfigMap") + _, err := utils.CreateConfigMap(ctx, kubeClient, deleteNamespace, configMapName, + map[string]string{"key": "value"}, nil) + Expect(err).NotTo(HaveOccurred()) + + By("Creating a Deployment with annotation for the ConfigMap") + _, err = utils.CreateDeployment(ctx, kubeClient, deleteNamespace, deploymentName, + utils.WithAnnotations(utils.BuildConfigMapReloadAnnotation(configMapName)), + ) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for Deployment to be ready") + err = utils.WaitForDeploymentReady(ctx, kubeClient, deleteNamespace, deploymentName, utils.DeploymentReady) + Expect(err).NotTo(HaveOccurred()) + + By("Deleting the ConfigMap") + err = utils.DeleteConfigMap(ctx, kubeClient, deleteNamespace, configMapName) + Expect(err).NotTo(HaveOccurred()) + + By("Verifying Deployment was NOT reloaded (reloadOnDelete=false)") + time.Sleep(utils.NegativeTestWait) + reloaded, err := utils.WaitForDeploymentReloaded(ctx, kubeClient, deleteNamespace, deploymentName, + utils.AnnotationLastReloadedFrom, utils.ShortTimeout) + Expect(err).NotTo(HaveOccurred()) + Expect(reloaded).To(BeFalse(), "Deployment should NOT reload on delete when reloadOnDelete=false") + }) + }) +}) diff --git a/test/e2e/flags/resource_selector_test.go b/test/e2e/flags/resource_selector_test.go new file mode 100644 index 0000000..6282c40 --- /dev/null +++ b/test/e2e/flags/resource_selector_test.go @@ -0,0 +1,114 @@ +package flags + +import ( + "time" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + "github.com/stakater/Reloader/test/e2e/utils" +) + +var _ = Describe("Resource Label Selector Flag Tests", func() { + var ( + deploymentName string + matchingCM string + nonMatchingCM string + resourceNS string + ) + + BeforeEach(func() { + deploymentName = utils.RandName("deploy") + matchingCM = utils.RandName("match-cm") + nonMatchingCM = utils.RandName("nomatch-cm") + resourceNS = "resource-" + utils.RandName("ns") + }) + + AfterEach(func() { + _ = utils.DeleteDeployment(ctx, kubeClient, resourceNS, deploymentName) + _ = utils.DeleteConfigMap(ctx, kubeClient, resourceNS, matchingCM) + _ = utils.DeleteConfigMap(ctx, kubeClient, resourceNS, nonMatchingCM) + }) + + Context("with resourceLabelSelector flag", func() { + BeforeEach(func() { + // Create test namespace + err := utils.CreateNamespace(ctx, kubeClient, resourceNS) + Expect(err).NotTo(HaveOccurred()) + + // Deploy Reloader with resourceLabelSelector flag + err = deployReloaderWithFlags(map[string]string{ + "reloader.resourceLabelSelector": "reload=true", + }) + Expect(err).NotTo(HaveOccurred()) + + err = waitForReloaderReady() + Expect(err).NotTo(HaveOccurred()) + }) + + AfterEach(func() { + _ = undeployReloader() + _ = utils.DeleteNamespace(ctx, kubeClient, resourceNS) + }) + + It("should reload when labeled ConfigMap changes", func() { + By("Creating a ConfigMap with matching label") + _, err := utils.CreateConfigMapWithLabels(ctx, kubeClient, resourceNS, matchingCM, + map[string]string{"key": "initial"}, + map[string]string{"reload": "true"}, + nil) // no annotations + Expect(err).NotTo(HaveOccurred()) + + By("Creating a Deployment with auto annotation") + _, err = utils.CreateDeployment(ctx, kubeClient, resourceNS, deploymentName, + utils.WithConfigMapEnvFrom(matchingCM), + utils.WithAnnotations(utils.BuildAutoTrueAnnotation()), + ) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for Deployment to be ready") + err = utils.WaitForDeploymentReady(ctx, kubeClient, resourceNS, deploymentName, utils.DeploymentReady) + Expect(err).NotTo(HaveOccurred()) + + By("Updating the labeled ConfigMap") + err = utils.UpdateConfigMap(ctx, kubeClient, resourceNS, matchingCM, + map[string]string{"key": "updated"}) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for Deployment to be reloaded") + reloaded, err := utils.WaitForDeploymentReloaded(ctx, kubeClient, resourceNS, deploymentName, + utils.AnnotationLastReloadedFrom, utils.ReloadTimeout) + Expect(err).NotTo(HaveOccurred()) + Expect(reloaded).To(BeTrue(), "Deployment should be reloaded when labeled ConfigMap changes") + }) + + It("should NOT reload when unlabeled ConfigMap changes", func() { + By("Creating a ConfigMap WITHOUT matching label") + _, err := utils.CreateConfigMap(ctx, kubeClient, resourceNS, nonMatchingCM, + map[string]string{"key": "initial"}, nil) + Expect(err).NotTo(HaveOccurred()) + + By("Creating a Deployment with auto annotation") + _, err = utils.CreateDeployment(ctx, kubeClient, resourceNS, deploymentName, + utils.WithConfigMapEnvFrom(nonMatchingCM), + utils.WithAnnotations(utils.BuildAutoTrueAnnotation()), + ) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for Deployment to be ready") + err = utils.WaitForDeploymentReady(ctx, kubeClient, resourceNS, deploymentName, utils.DeploymentReady) + Expect(err).NotTo(HaveOccurred()) + + By("Updating the unlabeled ConfigMap") + err = utils.UpdateConfigMap(ctx, kubeClient, resourceNS, nonMatchingCM, + map[string]string{"key": "updated"}) + Expect(err).NotTo(HaveOccurred()) + + By("Verifying Deployment was NOT reloaded (unlabeled ConfigMap)") + time.Sleep(utils.NegativeTestWait) + reloaded, err := utils.WaitForDeploymentReloaded(ctx, kubeClient, resourceNS, deploymentName, + utils.AnnotationLastReloadedFrom, utils.ShortTimeout) + Expect(err).NotTo(HaveOccurred()) + Expect(reloaded).To(BeFalse(), "Deployment should NOT reload when unlabeled ConfigMap changes") + }) + }) +}) diff --git a/test/e2e/flags/watch_globally_test.go b/test/e2e/flags/watch_globally_test.go new file mode 100644 index 0000000..c8cbf94 --- /dev/null +++ b/test/e2e/flags/watch_globally_test.go @@ -0,0 +1,170 @@ +package flags + +import ( + "time" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + "github.com/stakater/Reloader/test/e2e/utils" +) + +var _ = Describe("Watch Globally Flag Tests", func() { + var ( + deploymentName string + configMapName string + otherNS string + ) + + BeforeEach(func() { + deploymentName = utils.RandName("deploy") + configMapName = utils.RandName("cm") + otherNS = "other-" + utils.RandName("ns") + }) + + AfterEach(func() { + // Clean up resources in both namespaces + _ = utils.DeleteDeployment(ctx, kubeClient, testNamespace, deploymentName) + _ = utils.DeleteConfigMap(ctx, kubeClient, testNamespace, configMapName) + _ = utils.DeleteDeployment(ctx, kubeClient, otherNS, deploymentName) + _ = utils.DeleteConfigMap(ctx, kubeClient, otherNS, configMapName) + }) + + Context("with watchGlobally=false flag", func() { + BeforeEach(func() { + // Create the other namespace for testing cross-namespace behavior + err := utils.CreateNamespace(ctx, kubeClient, otherNS) + Expect(err).NotTo(HaveOccurred()) + + // Deploy Reloader with watchGlobally=false + // This makes Reloader only watch resources in its own namespace (testNamespace) + err = deployReloaderWithFlags(map[string]string{ + "reloader.watchGlobally": "false", + }) + Expect(err).NotTo(HaveOccurred()) + + err = waitForReloaderReady() + Expect(err).NotTo(HaveOccurred()) + }) + + AfterEach(func() { + _ = undeployReloader() + _ = utils.DeleteNamespace(ctx, kubeClient, otherNS) + }) + + It("should reload workloads in Reloader's namespace when watchGlobally=false", func() { + By("Creating a ConfigMap in Reloader's namespace") + _, err := utils.CreateConfigMap(ctx, kubeClient, testNamespace, configMapName, + map[string]string{"key": "initial"}, nil) + Expect(err).NotTo(HaveOccurred()) + + By("Creating a Deployment in Reloader's namespace with auto annotation") + _, err = utils.CreateDeployment(ctx, kubeClient, testNamespace, deploymentName, + utils.WithConfigMapEnvFrom(configMapName), + utils.WithAnnotations(utils.BuildAutoTrueAnnotation()), + ) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for Deployment to be ready") + err = utils.WaitForDeploymentReady(ctx, kubeClient, testNamespace, deploymentName, utils.DeploymentReady) + Expect(err).NotTo(HaveOccurred()) + + By("Updating the ConfigMap") + err = utils.UpdateConfigMap(ctx, kubeClient, testNamespace, configMapName, + map[string]string{"key": "updated"}) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for Deployment to be reloaded (same namespace should work)") + reloaded, err := utils.WaitForDeploymentReloaded(ctx, kubeClient, testNamespace, deploymentName, + utils.AnnotationLastReloadedFrom, utils.ReloadTimeout) + Expect(err).NotTo(HaveOccurred()) + Expect(reloaded).To(BeTrue(), "Deployment in Reloader's namespace should reload with watchGlobally=false") + }) + + It("should NOT reload workloads in other namespaces when watchGlobally=false", func() { + By("Creating a ConfigMap in another namespace") + _, err := utils.CreateConfigMap(ctx, kubeClient, otherNS, configMapName, + map[string]string{"key": "initial"}, nil) + Expect(err).NotTo(HaveOccurred()) + + By("Creating a Deployment in another namespace with auto annotation") + _, err = utils.CreateDeployment(ctx, kubeClient, otherNS, deploymentName, + utils.WithConfigMapEnvFrom(configMapName), + utils.WithAnnotations(utils.BuildAutoTrueAnnotation()), + ) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for Deployment to be ready") + err = utils.WaitForDeploymentReady(ctx, kubeClient, otherNS, deploymentName, utils.DeploymentReady) + Expect(err).NotTo(HaveOccurred()) + + By("Updating the ConfigMap in the other namespace") + err = utils.UpdateConfigMap(ctx, kubeClient, otherNS, configMapName, + map[string]string{"key": "updated"}) + Expect(err).NotTo(HaveOccurred()) + + By("Verifying Deployment was NOT reloaded (different namespace with watchGlobally=false)") + time.Sleep(utils.NegativeTestWait) + reloaded, err := utils.WaitForDeploymentReloaded(ctx, kubeClient, otherNS, deploymentName, + utils.AnnotationLastReloadedFrom, utils.ShortTimeout) + Expect(err).NotTo(HaveOccurred()) + Expect(reloaded).To(BeFalse(), "Deployment in other namespace should NOT reload with watchGlobally=false") + }) + }) + + Context("with watchGlobally=true flag (default)", func() { + var globalNS string + + BeforeEach(func() { + globalNS = "global-" + utils.RandName("ns") + + // Create test namespace + err := utils.CreateNamespace(ctx, kubeClient, globalNS) + Expect(err).NotTo(HaveOccurred()) + + // Deploy Reloader with watchGlobally=true (default) + err = deployReloaderWithFlags(map[string]string{ + "reloader.watchGlobally": "true", + }) + Expect(err).NotTo(HaveOccurred()) + + err = waitForReloaderReady() + Expect(err).NotTo(HaveOccurred()) + }) + + AfterEach(func() { + _ = utils.DeleteDeployment(ctx, kubeClient, globalNS, deploymentName) + _ = utils.DeleteConfigMap(ctx, kubeClient, globalNS, configMapName) + _ = undeployReloader() + _ = utils.DeleteNamespace(ctx, kubeClient, globalNS) + }) + + It("should reload workloads in any namespace when watchGlobally=true", func() { + By("Creating a ConfigMap in a different namespace") + _, err := utils.CreateConfigMap(ctx, kubeClient, globalNS, configMapName, + map[string]string{"key": "initial"}, nil) + Expect(err).NotTo(HaveOccurred()) + + By("Creating a Deployment in a different namespace with auto annotation") + _, err = utils.CreateDeployment(ctx, kubeClient, globalNS, deploymentName, + utils.WithConfigMapEnvFrom(configMapName), + utils.WithAnnotations(utils.BuildAutoTrueAnnotation()), + ) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for Deployment to be ready") + err = utils.WaitForDeploymentReady(ctx, kubeClient, globalNS, deploymentName, utils.DeploymentReady) + Expect(err).NotTo(HaveOccurred()) + + By("Updating the ConfigMap") + err = utils.UpdateConfigMap(ctx, kubeClient, globalNS, configMapName, + map[string]string{"key": "updated"}) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for Deployment to be reloaded (watchGlobally=true)") + reloaded, err := utils.WaitForDeploymentReloaded(ctx, kubeClient, globalNS, deploymentName, + utils.AnnotationLastReloadedFrom, utils.ReloadTimeout) + Expect(err).NotTo(HaveOccurred()) + Expect(reloaded).To(BeTrue(), "Deployment in any namespace should reload with watchGlobally=true") + }) + }) +}) diff --git a/test/e2e/utils/annotations.go b/test/e2e/utils/annotations.go new file mode 100644 index 0000000..1be0415 --- /dev/null +++ b/test/e2e/utils/annotations.go @@ -0,0 +1,207 @@ +package utils + +// Annotation key constants used by Reloader. +// These follow the pattern: {scope}.reloader.stakater.com/{action} +// where scope can be empty (all resources), "configmap", "secret", "deployment", etc. +const ( + // ============================================================ + // Core reload annotations + // ============================================================ + + // AnnotationLastReloadedFrom is set by Reloader on workloads to track the last resource + // that triggered a reload. Format: "{namespace}/{resource-type}/{resource-name}" + AnnotationLastReloadedFrom = "reloader.stakater.com/last-reloaded-from" + + // AnnotationConfigMapReload triggers reload when specified ConfigMap(s) change. + // Value: comma-separated list of ConfigMap names, e.g., "config1,config2" + AnnotationConfigMapReload = "configmap.reloader.stakater.com/reload" + + // AnnotationSecretReload triggers reload when specified Secret(s) change. + // Value: comma-separated list of Secret names, e.g., "secret1,secret2" + AnnotationSecretReload = "secret.reloader.stakater.com/reload" + + // ============================================================ + // Auto-reload annotations + // ============================================================ + + // AnnotationAuto enables auto-reload for all referenced ConfigMaps and Secrets. + // Value: "true" or "false" + AnnotationAuto = "reloader.stakater.com/auto" + + // AnnotationConfigMapAuto enables auto-reload for all referenced ConfigMaps only. + // Value: "true" or "false" + AnnotationConfigMapAuto = "configmap.reloader.stakater.com/auto" + + // AnnotationSecretAuto enables auto-reload for all referenced Secrets only. + // Value: "true" or "false" + AnnotationSecretAuto = "secret.reloader.stakater.com/auto" + + // ============================================================ + // Exclude annotations (used with auto=true to exclude specific resources) + // ============================================================ + + // AnnotationConfigMapExclude excludes specified ConfigMaps from auto-reload. + // Value: comma-separated list of ConfigMap names + AnnotationConfigMapExclude = "configmaps.exclude.reloader.stakater.com/reload" + + // AnnotationSecretExclude excludes specified Secrets from auto-reload. + // Value: comma-separated list of Secret names + AnnotationSecretExclude = "secrets.exclude.reloader.stakater.com/reload" + + // ============================================================ + // Search annotations (for regex matching) + // ============================================================ + + // AnnotationSearch enables regex search mode for ConfigMap/Secret names. + // Value: "true" + // Used with reload annotation where value is a regex pattern. + AnnotationSearch = "reloader.stakater.com/search" + + // AnnotationMatch is an alias for AnnotationSearch. + // Value: "true" + AnnotationMatch = "reloader.stakater.com/match" + + // ============================================================ + // Resource-level annotations (placed on ConfigMap/Secret) + // ============================================================ + + // AnnotationIgnore prevents Reloader from triggering reloads for this resource. + // Place this on a ConfigMap or Secret to exclude it from reload triggers. + // Value: "true" + AnnotationIgnore = "reloader.stakater.com/ignore" + + // ============================================================ + // Pause/period annotations + // ============================================================ + + // AnnotationDeploymentPausePeriod sets a pause period before triggering reload. + // Value: duration string, e.g., "10s", "1m" + AnnotationDeploymentPausePeriod = "deployment.reloader.stakater.com/pause-period" + + // AnnotationDeploymentPausedAt is set by Reloader when a workload is paused. + // Value: RFC3339 timestamp + AnnotationDeploymentPausedAt = "deployment.reloader.stakater.com/paused-at" + + // ============================================================ + // Argo Rollouts specific annotations + // ============================================================ + + // AnnotationRolloutStrategy specifies the strategy for Argo Rollouts. + // Value: "restart" (sets spec.restartAt) + AnnotationRolloutStrategy = "reloader.stakater.com/rollout-strategy" +) + +// Annotation values. +const ( + // AnnotationValueTrue is the string "true" for annotation values. + AnnotationValueTrue = "true" + + // AnnotationValueFalse is the string "false" for annotation values. + AnnotationValueFalse = "false" + + // AnnotationValueRestart is the "restart" strategy value for Argo Rollouts. + AnnotationValueRestart = "restart" +) + +// BuildConfigMapReloadAnnotation creates an annotation map for ConfigMap reload. +func BuildConfigMapReloadAnnotation(configMapNames ...string) map[string]string { + return map[string]string{ + AnnotationConfigMapReload: joinNames(configMapNames), + } +} + +// BuildSecretReloadAnnotation creates an annotation map for Secret reload. +func BuildSecretReloadAnnotation(secretNames ...string) map[string]string { + return map[string]string{ + AnnotationSecretReload: joinNames(secretNames), + } +} + +// BuildAutoTrueAnnotation creates an annotation map with auto=true. +func BuildAutoTrueAnnotation() map[string]string { + return map[string]string{ + AnnotationAuto: AnnotationValueTrue, + } +} + +// BuildAutoFalseAnnotation creates an annotation map with auto=false. +func BuildAutoFalseAnnotation() map[string]string { + return map[string]string{ + AnnotationAuto: AnnotationValueFalse, + } +} + +// BuildConfigMapAutoAnnotation creates an annotation map with configmap auto=true. +func BuildConfigMapAutoAnnotation() map[string]string { + return map[string]string{ + AnnotationConfigMapAuto: AnnotationValueTrue, + } +} + +// BuildSecretAutoAnnotation creates an annotation map with secret auto=true. +func BuildSecretAutoAnnotation() map[string]string { + return map[string]string{ + AnnotationSecretAuto: AnnotationValueTrue, + } +} + +// BuildSearchAnnotation creates an annotation map to enable search mode. +func BuildSearchAnnotation() map[string]string { + return map[string]string{ + AnnotationSearch: AnnotationValueTrue, + } +} + +// BuildMatchAnnotation creates an annotation map to enable match mode. +func BuildMatchAnnotation() map[string]string { + return map[string]string{ + AnnotationMatch: AnnotationValueTrue, + } +} + +// BuildIgnoreAnnotation creates an annotation map to ignore a resource. +func BuildIgnoreAnnotation() map[string]string { + return map[string]string{ + AnnotationIgnore: AnnotationValueTrue, + } +} + +// BuildRolloutRestartStrategyAnnotation creates an annotation for Argo Rollout restart strategy. +func BuildRolloutRestartStrategyAnnotation() map[string]string { + return map[string]string{ + AnnotationRolloutStrategy: AnnotationValueRestart, + } +} + +// BuildConfigMapExcludeAnnotation creates an annotation to exclude ConfigMaps from auto-reload. +func BuildConfigMapExcludeAnnotation(configMapNames ...string) map[string]string { + return map[string]string{ + AnnotationConfigMapExclude: joinNames(configMapNames), + } +} + +// BuildSecretExcludeAnnotation creates an annotation to exclude Secrets from auto-reload. +func BuildSecretExcludeAnnotation(secretNames ...string) map[string]string { + return map[string]string{ + AnnotationSecretExclude: joinNames(secretNames), + } +} + +// BuildPausePeriodAnnotation creates an annotation for deployment pause period. +func BuildPausePeriodAnnotation(duration string) map[string]string { + return map[string]string{ + AnnotationDeploymentPausePeriod: duration, + } +} + +// joinNames joins names with comma separator. +func joinNames(names []string) string { + if len(names) == 0 { + return "" + } + result := names[0] + for i := 1; i < len(names); i++ { + result += "," + names[i] + } + return result +} diff --git a/test/e2e/utils/annotations_test.go b/test/e2e/utils/annotations_test.go new file mode 100644 index 0000000..4689d10 --- /dev/null +++ b/test/e2e/utils/annotations_test.go @@ -0,0 +1,306 @@ +package utils + +import ( + "testing" +) + +func TestBuildConfigMapReloadAnnotation(t *testing.T) { + tests := []struct { + name string + configMaps []string + expected map[string]string + }{ + { + name: "single ConfigMap", + configMaps: []string{"my-config"}, + expected: map[string]string{ + AnnotationConfigMapReload: "my-config", + }, + }, + { + name: "multiple ConfigMaps", + configMaps: []string{"config1", "config2", "config3"}, + expected: map[string]string{ + AnnotationConfigMapReload: "config1,config2,config3", + }, + }, + { + name: "empty list", + configMaps: []string{}, + expected: map[string]string{ + AnnotationConfigMapReload: "", + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := BuildConfigMapReloadAnnotation(tt.configMaps...) + if len(result) != len(tt.expected) { + t.Errorf("BuildConfigMapReloadAnnotation() returned %d entries, want %d", len(result), len(tt.expected)) + } + for k, v := range tt.expected { + if result[k] != v { + t.Errorf("BuildConfigMapReloadAnnotation()[%q] = %q, want %q", k, result[k], v) + } + } + }) + } +} + +func TestBuildSecretReloadAnnotation(t *testing.T) { + tests := []struct { + name string + secrets []string + expected map[string]string + }{ + { + name: "single Secret", + secrets: []string{"my-secret"}, + expected: map[string]string{ + AnnotationSecretReload: "my-secret", + }, + }, + { + name: "multiple Secrets", + secrets: []string{"secret1", "secret2"}, + expected: map[string]string{ + AnnotationSecretReload: "secret1,secret2", + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := BuildSecretReloadAnnotation(tt.secrets...) + for k, v := range tt.expected { + if result[k] != v { + t.Errorf("BuildSecretReloadAnnotation()[%q] = %q, want %q", k, result[k], v) + } + } + }) + } +} + +func TestBuildAutoAnnotations(t *testing.T) { + t.Run("BuildAutoTrueAnnotation", func(t *testing.T) { + result := BuildAutoTrueAnnotation() + if result[AnnotationAuto] != AnnotationValueTrue { + t.Errorf("BuildAutoTrueAnnotation()[%q] = %q, want %q", + AnnotationAuto, result[AnnotationAuto], AnnotationValueTrue) + } + }) + + t.Run("BuildAutoFalseAnnotation", func(t *testing.T) { + result := BuildAutoFalseAnnotation() + if result[AnnotationAuto] != AnnotationValueFalse { + t.Errorf("BuildAutoFalseAnnotation()[%q] = %q, want %q", + AnnotationAuto, result[AnnotationAuto], AnnotationValueFalse) + } + }) + + t.Run("BuildConfigMapAutoAnnotation", func(t *testing.T) { + result := BuildConfigMapAutoAnnotation() + if result[AnnotationConfigMapAuto] != AnnotationValueTrue { + t.Errorf("BuildConfigMapAutoAnnotation()[%q] = %q, want %q", + AnnotationConfigMapAuto, result[AnnotationConfigMapAuto], AnnotationValueTrue) + } + }) + + t.Run("BuildSecretAutoAnnotation", func(t *testing.T) { + result := BuildSecretAutoAnnotation() + if result[AnnotationSecretAuto] != AnnotationValueTrue { + t.Errorf("BuildSecretAutoAnnotation()[%q] = %q, want %q", + AnnotationSecretAuto, result[AnnotationSecretAuto], AnnotationValueTrue) + } + }) +} + +func TestBuildSearchMatchAnnotations(t *testing.T) { + t.Run("BuildSearchAnnotation", func(t *testing.T) { + result := BuildSearchAnnotation() + if result[AnnotationSearch] != AnnotationValueTrue { + t.Errorf("BuildSearchAnnotation()[%q] = %q, want %q", + AnnotationSearch, result[AnnotationSearch], AnnotationValueTrue) + } + }) + + t.Run("BuildMatchAnnotation", func(t *testing.T) { + result := BuildMatchAnnotation() + if result[AnnotationMatch] != AnnotationValueTrue { + t.Errorf("BuildMatchAnnotation()[%q] = %q, want %q", + AnnotationMatch, result[AnnotationMatch], AnnotationValueTrue) + } + }) +} + +func TestBuildIgnoreAnnotation(t *testing.T) { + result := BuildIgnoreAnnotation() + if result[AnnotationIgnore] != AnnotationValueTrue { + t.Errorf("BuildIgnoreAnnotation()[%q] = %q, want %q", + AnnotationIgnore, result[AnnotationIgnore], AnnotationValueTrue) + } +} + +func TestBuildRolloutRestartStrategyAnnotation(t *testing.T) { + result := BuildRolloutRestartStrategyAnnotation() + if result[AnnotationRolloutStrategy] != AnnotationValueRestart { + t.Errorf("BuildRolloutRestartStrategyAnnotation()[%q] = %q, want %q", + AnnotationRolloutStrategy, result[AnnotationRolloutStrategy], AnnotationValueRestart) + } +} + +func TestBuildExcludeAnnotations(t *testing.T) { + t.Run("BuildConfigMapExcludeAnnotation single", func(t *testing.T) { + result := BuildConfigMapExcludeAnnotation("excluded-cm") + if result[AnnotationConfigMapExclude] != "excluded-cm" { + t.Errorf("BuildConfigMapExcludeAnnotation()[%q] = %q, want %q", + AnnotationConfigMapExclude, result[AnnotationConfigMapExclude], "excluded-cm") + } + }) + + t.Run("BuildConfigMapExcludeAnnotation multiple", func(t *testing.T) { + result := BuildConfigMapExcludeAnnotation("cm1", "cm2", "cm3") + expected := "cm1,cm2,cm3" + if result[AnnotationConfigMapExclude] != expected { + t.Errorf("BuildConfigMapExcludeAnnotation()[%q] = %q, want %q", + AnnotationConfigMapExclude, result[AnnotationConfigMapExclude], expected) + } + }) + + t.Run("BuildSecretExcludeAnnotation single", func(t *testing.T) { + result := BuildSecretExcludeAnnotation("excluded-secret") + if result[AnnotationSecretExclude] != "excluded-secret" { + t.Errorf("BuildSecretExcludeAnnotation()[%q] = %q, want %q", + AnnotationSecretExclude, result[AnnotationSecretExclude], "excluded-secret") + } + }) + + t.Run("BuildSecretExcludeAnnotation multiple", func(t *testing.T) { + result := BuildSecretExcludeAnnotation("s1", "s2") + expected := "s1,s2" + if result[AnnotationSecretExclude] != expected { + t.Errorf("BuildSecretExcludeAnnotation()[%q] = %q, want %q", + AnnotationSecretExclude, result[AnnotationSecretExclude], expected) + } + }) +} + +func TestBuildPausePeriodAnnotation(t *testing.T) { + tests := []struct { + name string + duration string + expected string + }{ + { + name: "10 seconds", + duration: "10s", + expected: "10s", + }, + { + name: "1 minute", + duration: "1m", + expected: "1m", + }, + { + name: "30 minutes", + duration: "30m", + expected: "30m", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := BuildPausePeriodAnnotation(tt.duration) + if result[AnnotationDeploymentPausePeriod] != tt.expected { + t.Errorf("BuildPausePeriodAnnotation(%q)[%q] = %q, want %q", + tt.duration, AnnotationDeploymentPausePeriod, + result[AnnotationDeploymentPausePeriod], tt.expected) + } + }) + } +} + +func TestJoinNames(t *testing.T) { + tests := []struct { + name string + names []string + expected string + }{ + { + name: "empty slice", + names: []string{}, + expected: "", + }, + { + name: "single name", + names: []string{"one"}, + expected: "one", + }, + { + name: "two names", + names: []string{"one", "two"}, + expected: "one,two", + }, + { + name: "three names", + names: []string{"alpha", "beta", "gamma"}, + expected: "alpha,beta,gamma", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := joinNames(tt.names) + if result != tt.expected { + t.Errorf("joinNames(%v) = %q, want %q", tt.names, result, tt.expected) + } + }) + } +} + +func TestAnnotationConstants(t *testing.T) { + // Verify annotation constants have expected values + // This ensures we don't accidentally change the annotation keys + tests := []struct { + name string + constant string + expected string + }{ + {"AnnotationLastReloadedFrom", AnnotationLastReloadedFrom, "reloader.stakater.com/last-reloaded-from"}, + {"AnnotationConfigMapReload", AnnotationConfigMapReload, "configmap.reloader.stakater.com/reload"}, + {"AnnotationSecretReload", AnnotationSecretReload, "secret.reloader.stakater.com/reload"}, + {"AnnotationAuto", AnnotationAuto, "reloader.stakater.com/auto"}, + {"AnnotationConfigMapAuto", AnnotationConfigMapAuto, "configmap.reloader.stakater.com/auto"}, + {"AnnotationSecretAuto", AnnotationSecretAuto, "secret.reloader.stakater.com/auto"}, + {"AnnotationConfigMapExclude", AnnotationConfigMapExclude, "configmaps.exclude.reloader.stakater.com/reload"}, + {"AnnotationSecretExclude", AnnotationSecretExclude, "secrets.exclude.reloader.stakater.com/reload"}, + {"AnnotationSearch", AnnotationSearch, "reloader.stakater.com/search"}, + {"AnnotationMatch", AnnotationMatch, "reloader.stakater.com/match"}, + {"AnnotationIgnore", AnnotationIgnore, "reloader.stakater.com/ignore"}, + {"AnnotationDeploymentPausePeriod", AnnotationDeploymentPausePeriod, "deployment.reloader.stakater.com/pause-period"}, + {"AnnotationDeploymentPausedAt", AnnotationDeploymentPausedAt, "deployment.reloader.stakater.com/paused-at"}, + {"AnnotationRolloutStrategy", AnnotationRolloutStrategy, "reloader.stakater.com/rollout-strategy"}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if tt.constant != tt.expected { + t.Errorf("%s = %q, want %q", tt.name, tt.constant, tt.expected) + } + }) + } +} + +func TestAnnotationValues(t *testing.T) { + // Verify annotation value constants + if AnnotationValueTrue != "true" { + t.Errorf("AnnotationValueTrue = %q, want \"true\"", AnnotationValueTrue) + } + if AnnotationValueFalse != "false" { + t.Errorf("AnnotationValueFalse = %q, want \"false\"", AnnotationValueFalse) + } + if AnnotationValueRestart != "restart" { + t.Errorf("AnnotationValueRestart = %q, want \"restart\"", AnnotationValueRestart) + } +} diff --git a/test/e2e/utils/argo.go b/test/e2e/utils/argo.go new file mode 100644 index 0000000..6df5cf3 --- /dev/null +++ b/test/e2e/utils/argo.go @@ -0,0 +1,308 @@ +package utils + +import ( + "context" + "time" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/dynamic" +) + +// ArgoRolloutGVR returns the GroupVersionResource for Argo Rollouts. +var ArgoRolloutGVR = schema.GroupVersionResource{ + Group: "argoproj.io", + Version: "v1alpha1", + Resource: "rollouts", +} + +// RolloutOption is a functional option for configuring an Argo Rollout. +type RolloutOption func(*unstructured.Unstructured) + +// IsArgoRolloutsInstalled checks if Argo Rollouts CRD is installed in the cluster. +func IsArgoRolloutsInstalled(ctx context.Context, dynamicClient dynamic.Interface) bool { + // Try to list rollouts - if CRD exists, this will succeed (possibly with empty list) + _, err := dynamicClient.Resource(ArgoRolloutGVR).Namespace("default").List(ctx, metav1.ListOptions{Limit: 1}) + return err == nil +} + +// CreateArgoRollout creates an Argo Rollout with the given options. +func CreateArgoRollout(ctx context.Context, dynamicClient dynamic.Interface, namespace, name string, opts ...RolloutOption) error { + rollout := &unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": "argoproj.io/v1alpha1", + "kind": "Rollout", + "metadata": map[string]interface{}{ + "name": name, + "namespace": namespace, + }, + "spec": map[string]interface{}{ + "replicas": int64(1), + "selector": map[string]interface{}{ + "matchLabels": map[string]interface{}{ + "app": name, + }, + }, + "template": map[string]interface{}{ + "metadata": map[string]interface{}{ + "labels": map[string]interface{}{ + "app": name, + }, + }, + "spec": map[string]interface{}{ + "containers": []interface{}{ + map[string]interface{}{ + "name": "app", + "image": "busybox:1.36", + "command": []interface{}{"sh", "-c", "sleep 3600"}, + }, + }, + }, + }, + "strategy": map[string]interface{}{ + "canary": map[string]interface{}{ + "steps": []interface{}{ + map[string]interface{}{ + "setWeight": int64(100), + }, + }, + }, + }, + }, + }, + } + + // Apply options + for _, opt := range opts { + opt(rollout) + } + + _, err := dynamicClient.Resource(ArgoRolloutGVR).Namespace(namespace).Create(ctx, rollout, metav1.CreateOptions{}) + return err +} + +// DeleteArgoRollout deletes an Argo Rollout. +func DeleteArgoRollout(ctx context.Context, dynamicClient dynamic.Interface, namespace, name string) error { + err := dynamicClient.Resource(ArgoRolloutGVR).Namespace(namespace).Delete(ctx, name, metav1.DeleteOptions{}) + return err +} + +// GetArgoRollout retrieves an Argo Rollout. +func GetArgoRollout(ctx context.Context, dynamicClient dynamic.Interface, namespace, name string) (*unstructured.Unstructured, error) { + return dynamicClient.Resource(ArgoRolloutGVR).Namespace(namespace).Get(ctx, name, metav1.GetOptions{}) +} + +// WithRolloutConfigMapEnvFrom adds a ConfigMap envFrom to the Rollout. +func WithRolloutConfigMapEnvFrom(configMapName string) RolloutOption { + return func(rollout *unstructured.Unstructured) { + containers, _, _ := unstructured.NestedSlice(rollout.Object, "spec", "template", "spec", "containers") + if len(containers) > 0 { + container := containers[0].(map[string]interface{}) + envFrom, _, _ := unstructured.NestedSlice(container, "envFrom") + envFrom = append(envFrom, map[string]interface{}{ + "configMapRef": map[string]interface{}{ + "name": configMapName, + }, + }) + container["envFrom"] = envFrom + containers[0] = container + _ = unstructured.SetNestedSlice(rollout.Object, containers, "spec", "template", "spec", "containers") + } + } +} + +// WithRolloutSecretEnvFrom adds a Secret envFrom to the Rollout. +func WithRolloutSecretEnvFrom(secretName string) RolloutOption { + return func(rollout *unstructured.Unstructured) { + containers, _, _ := unstructured.NestedSlice(rollout.Object, "spec", "template", "spec", "containers") + if len(containers) > 0 { + container := containers[0].(map[string]interface{}) + envFrom, _, _ := unstructured.NestedSlice(container, "envFrom") + envFrom = append(envFrom, map[string]interface{}{ + "secretRef": map[string]interface{}{ + "name": secretName, + }, + }) + container["envFrom"] = envFrom + containers[0] = container + _ = unstructured.SetNestedSlice(rollout.Object, containers, "spec", "template", "spec", "containers") + } + } +} + +// WithRolloutConfigMapVolume adds a ConfigMap volume to the Rollout. +func WithRolloutConfigMapVolume(configMapName string) RolloutOption { + return func(rollout *unstructured.Unstructured) { + // Add volume + volumes, _, _ := unstructured.NestedSlice(rollout.Object, "spec", "template", "spec", "volumes") + volumes = append(volumes, map[string]interface{}{ + "name": configMapName + "-volume", + "configMap": map[string]interface{}{ + "name": configMapName, + }, + }) + _ = unstructured.SetNestedSlice(rollout.Object, volumes, "spec", "template", "spec", "volumes") + + // Add volumeMount + containers, _, _ := unstructured.NestedSlice(rollout.Object, "spec", "template", "spec", "containers") + if len(containers) > 0 { + container := containers[0].(map[string]interface{}) + volumeMounts, _, _ := unstructured.NestedSlice(container, "volumeMounts") + volumeMounts = append(volumeMounts, map[string]interface{}{ + "name": configMapName + "-volume", + "mountPath": "/etc/config/" + configMapName, + }) + container["volumeMounts"] = volumeMounts + containers[0] = container + _ = unstructured.SetNestedSlice(rollout.Object, containers, "spec", "template", "spec", "containers") + } + } +} + +// WithRolloutSecretVolume adds a Secret volume to the Rollout. +func WithRolloutSecretVolume(secretName string) RolloutOption { + return func(rollout *unstructured.Unstructured) { + // Add volume + volumes, _, _ := unstructured.NestedSlice(rollout.Object, "spec", "template", "spec", "volumes") + volumes = append(volumes, map[string]interface{}{ + "name": secretName + "-volume", + "secret": map[string]interface{}{ + "secretName": secretName, + }, + }) + _ = unstructured.SetNestedSlice(rollout.Object, volumes, "spec", "template", "spec", "volumes") + + // Add volumeMount + containers, _, _ := unstructured.NestedSlice(rollout.Object, "spec", "template", "spec", "containers") + if len(containers) > 0 { + container := containers[0].(map[string]interface{}) + volumeMounts, _, _ := unstructured.NestedSlice(container, "volumeMounts") + volumeMounts = append(volumeMounts, map[string]interface{}{ + "name": secretName + "-volume", + "mountPath": "/etc/secrets/" + secretName, + }) + container["volumeMounts"] = volumeMounts + containers[0] = container + _ = unstructured.SetNestedSlice(rollout.Object, containers, "spec", "template", "spec", "containers") + } + } +} + +// WithRolloutAnnotations adds annotations to the Rollout's pod template. +func WithRolloutAnnotations(annotations map[string]string) RolloutOption { + return func(rollout *unstructured.Unstructured) { + annotationsMap := make(map[string]interface{}) + for k, v := range annotations { + annotationsMap[k] = v + } + _ = unstructured.SetNestedMap(rollout.Object, annotationsMap, "spec", "template", "metadata", "annotations") + } +} + +// WithRolloutObjectAnnotations adds annotations to the Rollout's top-level metadata. +// Use this for annotations that are read from the Rollout object itself (like rollout-strategy). +func WithRolloutObjectAnnotations(annotations map[string]string) RolloutOption { + return func(rollout *unstructured.Unstructured) { + annotationsMap := make(map[string]interface{}) + for k, v := range annotations { + annotationsMap[k] = v + } + _ = unstructured.SetNestedMap(rollout.Object, annotationsMap, "metadata", "annotations") + } +} + +// WaitForRolloutReady waits for an Argo Rollout to be ready. +func WaitForRolloutReady(ctx context.Context, dynamicClient dynamic.Interface, namespace, name string, timeout time.Duration) error { + return wait.PollUntilContextTimeout(ctx, DefaultInterval, timeout, true, func(ctx context.Context) (bool, error) { + rollout, err := dynamicClient.Resource(ArgoRolloutGVR).Namespace(namespace).Get(ctx, name, metav1.GetOptions{}) + if err != nil { + return false, nil // Keep polling + } + + // Check status.phase == "Healthy" or replicas == availableReplicas + status, found, _ := unstructured.NestedMap(rollout.Object, "status") + if !found { + return false, nil + } + + phase, _, _ := unstructured.NestedString(status, "phase") + if phase == "Healthy" { + return true, nil + } + + // Alternative: check replicas + replicas, _, _ := unstructured.NestedInt64(rollout.Object, "spec", "replicas") + availableReplicas, _, _ := unstructured.NestedInt64(status, "availableReplicas") + if replicas > 0 && replicas == availableReplicas { + return true, nil + } + + return false, nil + }) +} + +// WaitForRolloutReloaded waits for an Argo Rollout's pod template to have the reloader annotation. +func WaitForRolloutReloaded(ctx context.Context, dynamicClient dynamic.Interface, namespace, name, annotationKey string, timeout time.Duration) (bool, error) { + var found bool + err := wait.PollUntilContextTimeout(ctx, DefaultInterval, timeout, true, func(ctx context.Context) (bool, error) { + rollout, err := dynamicClient.Resource(ArgoRolloutGVR).Namespace(namespace).Get(ctx, name, metav1.GetOptions{}) + if err != nil { + return false, nil + } + + // Check pod template annotations + annotations, _, _ := unstructured.NestedStringMap(rollout.Object, "spec", "template", "metadata", "annotations") + if annotations != nil { + if _, ok := annotations[annotationKey]; ok { + found = true + return true, nil + } + } + + return false, nil + }) + + if err != nil && err != context.DeadlineExceeded { + return false, err + } + return found, nil +} + +// GetRolloutPodTemplateAnnotations retrieves the pod template annotations from an Argo Rollout. +func GetRolloutPodTemplateAnnotations(ctx context.Context, dynamicClient dynamic.Interface, namespace, name string) (map[string]string, error) { + rollout, err := dynamicClient.Resource(ArgoRolloutGVR).Namespace(namespace).Get(ctx, name, metav1.GetOptions{}) + if err != nil { + return nil, err + } + + annotations, _, _ := unstructured.NestedStringMap(rollout.Object, "spec", "template", "metadata", "annotations") + return annotations, nil +} + +// WaitForRolloutRestartAt waits for an Argo Rollout's spec.restartAt field to be set. +// This is used when the restart strategy is specified. +func WaitForRolloutRestartAt(ctx context.Context, dynamicClient dynamic.Interface, namespace, name string, timeout time.Duration) (bool, error) { + var found bool + err := wait.PollUntilContextTimeout(ctx, DefaultInterval, timeout, true, func(ctx context.Context) (bool, error) { + rollout, err := dynamicClient.Resource(ArgoRolloutGVR).Namespace(namespace).Get(ctx, name, metav1.GetOptions{}) + if err != nil { + return false, nil + } + + // Check if spec.restartAt is set + restartAt, exists, _ := unstructured.NestedString(rollout.Object, "spec", "restartAt") + if exists && restartAt != "" { + found = true + return true, nil + } + + return false, nil + }) + + if err != nil && err != context.DeadlineExceeded { + return false, err + } + return found, nil +} diff --git a/test/e2e/utils/helm.go b/test/e2e/utils/helm.go new file mode 100644 index 0000000..3e826eb --- /dev/null +++ b/test/e2e/utils/helm.go @@ -0,0 +1,224 @@ +package utils + +import ( + "fmt" + "os" + "os/exec" + "path/filepath" + "strings" + "time" +) + +// Helm-related constants. +const ( + // DefaultTestImage is the default image to test if RELOADER_IMAGE is not set. + DefaultTestImage = "ghcr.io/stakater/reloader:test" + + // DefaultHelmReleaseName is the Helm release name for Reloader. + DefaultHelmReleaseName = "reloader" + + // DefaultHelmChartPath is the path to the Helm chart relative to project root. + DefaultHelmChartPath = "deployments/kubernetes/chart/reloader" + + // StakaterEnvVarPrefix is the prefix for Stakater environment variables. + StakaterEnvVarPrefix = "STAKATER_" +) + +// DeployOptions configures how Reloader is deployed. +type DeployOptions struct { + // Namespace to deploy Reloader into. + Namespace string + + // Image is the full image reference (e.g., "ghcr.io/stakater/reloader:test"). + Image string + + // Values are additional Helm values to set (key=value pairs). + Values map[string]string + + // ReleaseName is the Helm release name. Defaults to DefaultHelmReleaseName. + ReleaseName string + + // Timeout for Helm operations. Defaults to "120s". + Timeout string +} + +// DeployReloader deploys Reloader using Helm with the specified options. +func DeployReloader(opts DeployOptions) error { + projectDir, err := GetProjectDir() + if err != nil { + return fmt.Errorf("getting project dir: %w", err) + } + + if opts.ReleaseName == "" { + opts.ReleaseName = DefaultHelmReleaseName + } + if opts.Timeout == "" { + opts.Timeout = "120s" + } + if opts.Image == "" { + opts.Image = GetTestImage() + } + + // Clean up any existing cluster-scoped resources before deploying + // This prevents "already exists" errors when a previous test didn't clean up properly + cleanupClusterResources(opts.ReleaseName) + + chartPath := filepath.Join(projectDir, DefaultHelmChartPath) + + args := []string{ + "upgrade", "--install", opts.ReleaseName, + chartPath, + "--namespace", opts.Namespace, + "--create-namespace", + "--reset-values", // Important: reset values to ensure clean state between tests + "--set", fmt.Sprintf("image.repository=%s", GetImageRepository(opts.Image)), + "--set", fmt.Sprintf("image.tag=%s", GetImageTag(opts.Image)), + "--set", "image.pullPolicy=IfNotPresent", + "--wait", + "--timeout", opts.Timeout, + } + + // Add custom values + for key, value := range opts.Values { + args = append(args, "--set", fmt.Sprintf("%s=%s", key, value)) + } + + cmd := exec.Command("helm", args...) + output, err := Run(cmd) + if err != nil { + return fmt.Errorf("helm install failed: %s: %w", output, err) + } + + return nil +} + +// UndeployReloader removes the Reloader Helm release and cleans up cluster-scoped resources. +// This function waits for all resources to be fully deleted to prevent race conditions +// between test suites. +func UndeployReloader(namespace, releaseName string) error { + if releaseName == "" { + releaseName = DefaultHelmReleaseName + } + + // Use --wait to ensure Helm waits for resources to be deleted + cmd := exec.Command("helm", "uninstall", releaseName, "--namespace", namespace, "--ignore-not-found", "--wait") + output, err := Run(cmd) + if err != nil { + return fmt.Errorf("helm uninstall failed: %s: %w", output, err) + } + + // Clean up cluster-scoped resources that Helm might not delete + // Use --wait to ensure resources are fully deleted before returning + clusterResources := []struct { + kind string + name string + }{ + {"clusterrole", releaseName + "-reloader-role"}, + {"clusterrolebinding", releaseName + "-reloader-role-binding"}, + } + + for _, res := range clusterResources { + cmd := exec.Command("kubectl", "delete", res.kind, res.name, "--ignore-not-found", "--wait=true") + _, _ = Run(cmd) // Ignore errors - resource may not exist + } + + // Additional wait to ensure controller is fully stopped and resources are cleaned up + // This prevents race conditions when the next test tries to deploy immediately + waitForReloaderGone(namespace, releaseName) + + return nil +} + +// waitForReloaderGone waits for the Reloader deployment to be fully removed. +func waitForReloaderGone(namespace, releaseName string) { + deploymentName := ReloaderDeploymentName(releaseName) + + // Poll until deployment is gone (max 30 seconds) + for i := 0; i < 30; i++ { + cmd := exec.Command("kubectl", "get", "deployment", deploymentName, "-n", namespace, "--ignore-not-found", "-o", "name") + output, _ := Run(cmd) + if strings.TrimSpace(output) == "" { + return + } + time.Sleep(1 * time.Second) + } +} + +// cleanupClusterResources removes cluster-scoped resources that might be left over +// from a previous test run. This is called before deploying to ensure clean state. +func cleanupClusterResources(releaseName string) { + if releaseName == "" { + releaseName = DefaultHelmReleaseName + } + + clusterResources := []struct { + kind string + name string + }{ + {"clusterrole", releaseName + "-reloader-role"}, + {"clusterrolebinding", releaseName + "-reloader-role-binding"}, + } + + for _, res := range clusterResources { + cmd := exec.Command("kubectl", "delete", res.kind, res.name, "--ignore-not-found", "--wait=true") + _, _ = Run(cmd) + } + + // Small wait to ensure API server has processed the deletions + time.Sleep(500 * time.Millisecond) +} + +// GetTestImage returns the test image from environment or the default. +func GetTestImage() string { + if img := os.Getenv("RELOADER_IMAGE"); img != "" { + return img + } + return DefaultTestImage +} + +// GetImageRepository extracts the repository (without tag) from a full image reference. +// Example: "ghcr.io/stakater/reloader:v1.0.0" -> "ghcr.io/stakater/reloader" +func GetImageRepository(image string) string { + for i := len(image) - 1; i >= 0; i-- { + if image[i] == ':' { + return image[:i] + } + if image[i] == '/' { + // No tag found, return as-is + break + } + } + return image +} + +// GetImageTag extracts the tag from a full image reference. +// Example: "ghcr.io/stakater/reloader:v1.0.0" -> "v1.0.0" +// Returns "latest" if no tag is found. +func GetImageTag(image string) string { + for i := len(image) - 1; i >= 0; i-- { + if image[i] == ':' { + return image[i+1:] + } + if image[i] == '/' { + // No tag found + break + } + } + return "latest" +} + +// ReloaderDeploymentName returns the full deployment name for Reloader. +func ReloaderDeploymentName(releaseName string) string { + if releaseName == "" { + releaseName = DefaultHelmReleaseName + } + return releaseName + "-reloader" +} + +// ReloaderPodSelector returns the label selector for Reloader pods. +func ReloaderPodSelector(releaseName string) string { + if releaseName == "" { + releaseName = DefaultHelmReleaseName + } + return "app=" + releaseName + "-reloader" +} diff --git a/test/e2e/utils/helm_test.go b/test/e2e/utils/helm_test.go new file mode 100644 index 0000000..010172e --- /dev/null +++ b/test/e2e/utils/helm_test.go @@ -0,0 +1,157 @@ +package utils + +import ( + "testing" +) + +func TestGetImageRepository(t *testing.T) { + tests := []struct { + name string + image string + expected string + }{ + { + name: "full image with tag", + image: "ghcr.io/stakater/reloader:v1.0.0", + expected: "ghcr.io/stakater/reloader", + }, + { + name: "image with latest tag", + image: "nginx:latest", + expected: "nginx", + }, + { + name: "image without tag", + image: "ghcr.io/stakater/reloader", + expected: "ghcr.io/stakater/reloader", + }, + { + name: "image with digest (not fully supported)", + image: "nginx@sha256:abc123", + expected: "nginx@sha256", // Note: digest handling is limited + }, + { + name: "simple image name", + image: "nginx", + expected: "nginx", + }, + { + name: "image with port in registry", + image: "localhost:5000/myimage:v1", + expected: "localhost:5000/myimage", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := GetImageRepository(tt.image) + if result != tt.expected { + t.Errorf("GetImageRepository(%q) = %q, want %q", tt.image, result, tt.expected) + } + }) + } +} + +func TestGetImageTag(t *testing.T) { + tests := []struct { + name string + image string + expected string + }{ + { + name: "full image with tag", + image: "ghcr.io/stakater/reloader:v1.0.0", + expected: "v1.0.0", + }, + { + name: "image with latest tag", + image: "nginx:latest", + expected: "latest", + }, + { + name: "image without tag", + image: "ghcr.io/stakater/reloader", + expected: "latest", + }, + { + name: "simple image name", + image: "nginx", + expected: "latest", + }, + { + name: "image with port in registry", + image: "localhost:5000/myimage:v1", + expected: "v1", + }, + { + name: "tag with sha", + image: "myimage:sha-abc123", + expected: "sha-abc123", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := GetImageTag(tt.image) + if result != tt.expected { + t.Errorf("GetImageTag(%q) = %q, want %q", tt.image, result, tt.expected) + } + }) + } +} + +func TestReloaderDeploymentName(t *testing.T) { + tests := []struct { + name string + releaseName string + expected string + }{ + { + name: "default release name", + releaseName: "", + expected: "reloader-reloader", + }, + { + name: "custom release name", + releaseName: "my-reloader", + expected: "my-reloader-reloader", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := ReloaderDeploymentName(tt.releaseName) + if result != tt.expected { + t.Errorf("ReloaderDeploymentName(%q) = %q, want %q", tt.releaseName, result, tt.expected) + } + }) + } +} + +func TestReloaderPodSelector(t *testing.T) { + tests := []struct { + name string + releaseName string + expected string + }{ + { + name: "default release name", + releaseName: "", + expected: "app=reloader-reloader", + }, + { + name: "custom release name", + releaseName: "my-reloader", + expected: "app=my-reloader-reloader", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := ReloaderPodSelector(tt.releaseName) + if result != tt.expected { + t.Errorf("ReloaderPodSelector(%q) = %q, want %q", tt.releaseName, result, tt.expected) + } + }) + } +} diff --git a/test/e2e/utils/kind.go b/test/e2e/utils/kind.go new file mode 100644 index 0000000..1da9956 --- /dev/null +++ b/test/e2e/utils/kind.go @@ -0,0 +1,27 @@ +package utils + +import ( + "fmt" + "os" + "os/exec" +) + +// GetKindClusterName returns the Kind cluster name from the KIND_CLUSTER environment variable, +// or "kind" as the default. +func GetKindClusterName() string { + if cluster := os.Getenv("KIND_CLUSTER"); cluster != "" { + return cluster + } + return "kind" +} + +// LoadImageToKindCluster loads a Docker image into the Kind cluster using the default cluster name. +func LoadImageToKindCluster(image string) error { + cmd := exec.Command("kind", "load", "docker-image", image, "--name", GetKindClusterName()) + output, err := Run(cmd) + if err != nil { + return fmt.Errorf("failed to load image %s to Kind cluster: %w\nOutput: %s", + image, err, output) + } + return nil +} diff --git a/test/e2e/utils/openshift.go b/test/e2e/utils/openshift.go new file mode 100644 index 0000000..dac55f4 --- /dev/null +++ b/test/e2e/utils/openshift.go @@ -0,0 +1,265 @@ +package utils + +import ( + "context" + "time" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/discovery" + "k8s.io/client-go/dynamic" +) + +// DeploymentConfigGVR returns the GroupVersionResource for OpenShift DeploymentConfigs. +var DeploymentConfigGVR = schema.GroupVersionResource{ + Group: "apps.openshift.io", + Version: "v1", + Resource: "deploymentconfigs", +} + +// DCOption is a functional option for configuring a DeploymentConfig. +type DCOption func(*unstructured.Unstructured) + +// HasDeploymentConfigSupport checks if the cluster has OpenShift DeploymentConfig API available. +func HasDeploymentConfigSupport(discoveryClient discovery.DiscoveryInterface) bool { + _, apiLists, err := discoveryClient.ServerGroupsAndResources() + if err != nil { + return false + } + + for _, apiList := range apiLists { + for _, resource := range apiList.APIResources { + if resource.Kind == "DeploymentConfig" { + return true + } + } + } + + return false +} + +// CreateDeploymentConfig creates an OpenShift DeploymentConfig with the given options. +func CreateDeploymentConfig(ctx context.Context, dynamicClient dynamic.Interface, namespace, name string, opts ...DCOption) error { + dc := &unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": "apps.openshift.io/v1", + "kind": "DeploymentConfig", + "metadata": map[string]interface{}{ + "name": name, + "namespace": namespace, + }, + "spec": map[string]interface{}{ + "replicas": int64(1), + "selector": map[string]interface{}{ + "app": name, + }, + "template": map[string]interface{}{ + "metadata": map[string]interface{}{ + "labels": map[string]interface{}{ + "app": name, + }, + }, + "spec": map[string]interface{}{ + "containers": []interface{}{ + map[string]interface{}{ + "name": "app", + "image": "busybox:1.36", + "command": []interface{}{"sh", "-c", "sleep 3600"}, + }, + }, + }, + }, + "triggers": []interface{}{ + map[string]interface{}{ + "type": "ConfigChange", + }, + }, + }, + }, + } + + // Apply options + for _, opt := range opts { + opt(dc) + } + + _, err := dynamicClient.Resource(DeploymentConfigGVR).Namespace(namespace).Create(ctx, dc, metav1.CreateOptions{}) + return err +} + +// DeleteDeploymentConfig deletes a DeploymentConfig. +func DeleteDeploymentConfig(ctx context.Context, dynamicClient dynamic.Interface, namespace, name string) error { + return dynamicClient.Resource(DeploymentConfigGVR).Namespace(namespace).Delete(ctx, name, metav1.DeleteOptions{}) +} + +// GetDeploymentConfig retrieves a DeploymentConfig. +func GetDeploymentConfig(ctx context.Context, dynamicClient dynamic.Interface, namespace, name string) (*unstructured.Unstructured, error) { + return dynamicClient.Resource(DeploymentConfigGVR).Namespace(namespace).Get(ctx, name, metav1.GetOptions{}) +} + +// WithDCConfigMapEnvFrom adds a ConfigMap envFrom to the DeploymentConfig. +func WithDCConfigMapEnvFrom(configMapName string) DCOption { + return func(dc *unstructured.Unstructured) { + containers, _, _ := unstructured.NestedSlice(dc.Object, "spec", "template", "spec", "containers") + if len(containers) > 0 { + container := containers[0].(map[string]interface{}) + envFrom, _, _ := unstructured.NestedSlice(container, "envFrom") + envFrom = append(envFrom, map[string]interface{}{ + "configMapRef": map[string]interface{}{ + "name": configMapName, + }, + }) + container["envFrom"] = envFrom + containers[0] = container + _ = unstructured.SetNestedSlice(dc.Object, containers, "spec", "template", "spec", "containers") + } + } +} + +// WithDCSecretEnvFrom adds a Secret envFrom to the DeploymentConfig. +func WithDCSecretEnvFrom(secretName string) DCOption { + return func(dc *unstructured.Unstructured) { + containers, _, _ := unstructured.NestedSlice(dc.Object, "spec", "template", "spec", "containers") + if len(containers) > 0 { + container := containers[0].(map[string]interface{}) + envFrom, _, _ := unstructured.NestedSlice(container, "envFrom") + envFrom = append(envFrom, map[string]interface{}{ + "secretRef": map[string]interface{}{ + "name": secretName, + }, + }) + container["envFrom"] = envFrom + containers[0] = container + _ = unstructured.SetNestedSlice(dc.Object, containers, "spec", "template", "spec", "containers") + } + } +} + +// WithDCConfigMapVolume adds a ConfigMap volume to the DeploymentConfig. +func WithDCConfigMapVolume(configMapName string) DCOption { + return func(dc *unstructured.Unstructured) { + // Add volume + volumes, _, _ := unstructured.NestedSlice(dc.Object, "spec", "template", "spec", "volumes") + volumes = append(volumes, map[string]interface{}{ + "name": configMapName + "-volume", + "configMap": map[string]interface{}{ + "name": configMapName, + }, + }) + _ = unstructured.SetNestedSlice(dc.Object, volumes, "spec", "template", "spec", "volumes") + + // Add volumeMount + containers, _, _ := unstructured.NestedSlice(dc.Object, "spec", "template", "spec", "containers") + if len(containers) > 0 { + container := containers[0].(map[string]interface{}) + volumeMounts, _, _ := unstructured.NestedSlice(container, "volumeMounts") + volumeMounts = append(volumeMounts, map[string]interface{}{ + "name": configMapName + "-volume", + "mountPath": "/etc/config/" + configMapName, + }) + container["volumeMounts"] = volumeMounts + containers[0] = container + _ = unstructured.SetNestedSlice(dc.Object, containers, "spec", "template", "spec", "containers") + } + } +} + +// WithDCSecretVolume adds a Secret volume to the DeploymentConfig. +func WithDCSecretVolume(secretName string) DCOption { + return func(dc *unstructured.Unstructured) { + // Add volume + volumes, _, _ := unstructured.NestedSlice(dc.Object, "spec", "template", "spec", "volumes") + volumes = append(volumes, map[string]interface{}{ + "name": secretName + "-volume", + "secret": map[string]interface{}{ + "secretName": secretName, + }, + }) + _ = unstructured.SetNestedSlice(dc.Object, volumes, "spec", "template", "spec", "volumes") + + // Add volumeMount + containers, _, _ := unstructured.NestedSlice(dc.Object, "spec", "template", "spec", "containers") + if len(containers) > 0 { + container := containers[0].(map[string]interface{}) + volumeMounts, _, _ := unstructured.NestedSlice(container, "volumeMounts") + volumeMounts = append(volumeMounts, map[string]interface{}{ + "name": secretName + "-volume", + "mountPath": "/etc/secrets/" + secretName, + }) + container["volumeMounts"] = volumeMounts + containers[0] = container + _ = unstructured.SetNestedSlice(dc.Object, containers, "spec", "template", "spec", "containers") + } + } +} + +// WithDCAnnotations adds annotations to the DeploymentConfig's pod template. +func WithDCAnnotations(annotations map[string]string) DCOption { + return func(dc *unstructured.Unstructured) { + annotationsMap := make(map[string]interface{}) + for k, v := range annotations { + annotationsMap[k] = v + } + _ = unstructured.SetNestedMap(dc.Object, annotationsMap, "spec", "template", "metadata", "annotations") + } +} + +// WaitForDeploymentConfigReady waits for a DeploymentConfig to be ready. +func WaitForDeploymentConfigReady(ctx context.Context, dynamicClient dynamic.Interface, namespace, name string, timeout time.Duration) error { + return wait.PollUntilContextTimeout(ctx, DefaultInterval, timeout, true, func(ctx context.Context) (bool, error) { + dc, err := dynamicClient.Resource(DeploymentConfigGVR).Namespace(namespace).Get(ctx, name, metav1.GetOptions{}) + if err != nil { + return false, nil // Keep polling + } + + // Check replicas == readyReplicas + replicas, _, _ := unstructured.NestedInt64(dc.Object, "spec", "replicas") + readyReplicas, _, _ := unstructured.NestedInt64(dc.Object, "status", "readyReplicas") + + if replicas > 0 && replicas == readyReplicas { + return true, nil + } + + return false, nil + }) +} + +// WaitForDeploymentConfigReloaded waits for a DeploymentConfig's pod template to have the reloader annotation. +func WaitForDeploymentConfigReloaded(ctx context.Context, dynamicClient dynamic.Interface, namespace, name, annotationKey string, timeout time.Duration) (bool, error) { + var found bool + err := wait.PollUntilContextTimeout(ctx, DefaultInterval, timeout, true, func(ctx context.Context) (bool, error) { + dc, err := dynamicClient.Resource(DeploymentConfigGVR).Namespace(namespace).Get(ctx, name, metav1.GetOptions{}) + if err != nil { + return false, nil + } + + // Check pod template annotations + annotations, _, _ := unstructured.NestedStringMap(dc.Object, "spec", "template", "metadata", "annotations") + if annotations != nil { + if _, ok := annotations[annotationKey]; ok { + found = true + return true, nil + } + } + + return false, nil + }) + + if err != nil && err != context.DeadlineExceeded { + return false, err + } + return found, nil +} + +// GetDeploymentConfigPodTemplateAnnotations retrieves the pod template annotations from a DeploymentConfig. +func GetDeploymentConfigPodTemplateAnnotations(ctx context.Context, dynamicClient dynamic.Interface, namespace, name string) (map[string]string, error) { + dc, err := dynamicClient.Resource(DeploymentConfigGVR).Namespace(namespace).Get(ctx, name, metav1.GetOptions{}) + if err != nil { + return nil, err + } + + annotations, _, _ := unstructured.NestedStringMap(dc.Object, "spec", "template", "metadata", "annotations") + return annotations, nil +} diff --git a/test/e2e/utils/rand.go b/test/e2e/utils/rand.go new file mode 100644 index 0000000..601b14a --- /dev/null +++ b/test/e2e/utils/rand.go @@ -0,0 +1,26 @@ +package utils + +import ( + "math/rand" + "time" +) + +const letters = "abcdefghijklmnopqrstuvwxyz" + +var randSource = rand.New(rand.NewSource(time.Now().UnixNano())) //nolint:gosec + +// RandSeq generates a random lowercase string of length n. +// This is useful for creating unique resource names in tests. +func RandSeq(n int) string { + b := make([]byte, n) + for i := range b { + b[i] = letters[randSource.Intn(len(letters))] + } + return string(b) +} + +// RandName generates a unique name with the given prefix. +// Format: prefix-xxxxx where x is a random lowercase letter. +func RandName(prefix string) string { + return prefix + "-" + RandSeq(5) +} diff --git a/test/e2e/utils/rand_test.go b/test/e2e/utils/rand_test.go new file mode 100644 index 0000000..2a8ad3f --- /dev/null +++ b/test/e2e/utils/rand_test.go @@ -0,0 +1,135 @@ +package utils + +import ( + "regexp" + "testing" +) + +func TestRandSeq(t *testing.T) { + tests := []struct { + name string + length int + }{ + {"length 0", 0}, + {"length 1", 1}, + {"length 5", 5}, + {"length 10", 10}, + {"length 100", 100}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := RandSeq(tt.length) + + // Verify length + if len(result) != tt.length { + t.Errorf("RandSeq(%d) returned string of length %d, want %d", + tt.length, len(result), tt.length) + } + + // Verify only lowercase letters + if tt.length > 0 { + matched, _ := regexp.MatchString("^[a-z]+$", result) + if !matched { + t.Errorf("RandSeq(%d) = %q, contains non-lowercase letters", tt.length, result) + } + } + }) + } +} + +func TestRandSeqRandomness(t *testing.T) { + // Generate multiple sequences and verify they're different + // (with very high probability) + const iterations = 10 + const length = 20 + + seen := make(map[string]bool) + for i := 0; i < iterations; i++ { + s := RandSeq(length) + if seen[s] { + // This is extremely unlikely with 20 chars (26^20 possibilities) + t.Errorf("RandSeq generated duplicate: %q", s) + } + seen[s] = true + } + + // Verify we got 10 unique strings + if len(seen) != iterations { + t.Errorf("Expected %d unique strings, got %d", iterations, len(seen)) + } +} + +func TestRandName(t *testing.T) { + tests := []struct { + name string + prefix string + }{ + {"deploy prefix", "deploy"}, + {"cm prefix", "cm"}, + {"secret prefix", "secret"}, + {"test-app prefix", "test-app"}, + {"empty prefix", ""}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := RandName(tt.prefix) + + // Verify format: prefix-xxxxx + expectedPrefix := tt.prefix + "-" + if len(result) <= len(expectedPrefix) { + t.Errorf("RandName(%q) = %q, too short", tt.prefix, result) + return + } + + // Check prefix + if result[:len(expectedPrefix)] != expectedPrefix { + t.Errorf("RandName(%q) = %q, doesn't start with %q", + tt.prefix, result, expectedPrefix) + } + + // Check random suffix is 5 lowercase letters + suffix := result[len(expectedPrefix):] + if len(suffix) != 5 { + t.Errorf("RandName(%q) suffix length = %d, want 5", tt.prefix, len(suffix)) + } + + matched, _ := regexp.MatchString("^[a-z]{5}$", suffix) + if !matched { + t.Errorf("RandName(%q) suffix = %q, should be 5 lowercase letters", + tt.prefix, suffix) + } + }) + } +} + +func TestRandNameUniqueness(t *testing.T) { + // Generate multiple names with same prefix and verify uniqueness + const prefix = "test" + const iterations = 100 + + seen := make(map[string]bool) + for i := 0; i < iterations; i++ { + name := RandName(prefix) + if seen[name] { + t.Errorf("RandName generated duplicate: %q", name) + } + seen[name] = true + } +} + +func TestRandNameKubernetesCompatibility(t *testing.T) { + // Verify generated names are valid Kubernetes resource names + // Must match: [a-z0-9]([-a-z0-9]*[a-z0-9])? + + prefixes := []string{"deploy", "cm", "secret", "test-app", "my-resource"} + k8sNamePattern := regexp.MustCompile(`^[a-z0-9]([-a-z0-9]*[a-z0-9])?$`) + + for _, prefix := range prefixes { + name := RandName(prefix) + if !k8sNamePattern.MatchString(name) { + t.Errorf("RandName(%q) = %q is not a valid Kubernetes name", prefix, name) + } + } +} diff --git a/test/e2e/utils/resources.go b/test/e2e/utils/resources.go new file mode 100644 index 0000000..e4dc83d --- /dev/null +++ b/test/e2e/utils/resources.go @@ -0,0 +1,1094 @@ +package utils + +import ( + "context" + "fmt" + + appsv1 "k8s.io/api/apps/v1" + batchv1 "k8s.io/api/batch/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" + "k8s.io/utils/ptr" +) + +const ( + // DefaultImage is the default container image used for test workloads. + DefaultImage = "busybox:1.36" + // DefaultCommand is the default command for test containers. + DefaultCommand = "sleep 3600" +) + +// CreateNamespace creates a namespace with the given name. +func CreateNamespace(ctx context.Context, client kubernetes.Interface, name string) error { + ns := &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + }, + } + _, err := client.CoreV1().Namespaces().Create(ctx, ns, metav1.CreateOptions{}) + return err +} + +// CreateNamespaceWithLabels creates a namespace with the given name and labels. +func CreateNamespaceWithLabels(ctx context.Context, client kubernetes.Interface, name string, labels map[string]string) error { + ns := &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Labels: labels, + }, + } + _, err := client.CoreV1().Namespaces().Create(ctx, ns, metav1.CreateOptions{}) + return err +} + +// DeleteNamespace deletes the namespace with the given name. +func DeleteNamespace(ctx context.Context, client kubernetes.Interface, name string) error { + return client.CoreV1().Namespaces().Delete(ctx, name, metav1.DeleteOptions{}) +} + +// CreateConfigMap creates a ConfigMap with the given name, data, and optional annotations. +func CreateConfigMap(ctx context.Context, client kubernetes.Interface, namespace, name string, data map[string]string, annotations map[string]string) (*corev1.ConfigMap, error) { + cm := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + Annotations: annotations, + }, + Data: data, + } + return client.CoreV1().ConfigMaps(namespace).Create(ctx, cm, metav1.CreateOptions{}) +} + +// CreateConfigMapWithLabels creates a ConfigMap with the given name, data, labels, and optional annotations. +func CreateConfigMapWithLabels(ctx context.Context, client kubernetes.Interface, namespace, name string, data map[string]string, labels, annotations map[string]string) (*corev1.ConfigMap, error) { + cm := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + Labels: labels, + Annotations: annotations, + }, + Data: data, + } + return client.CoreV1().ConfigMaps(namespace).Create(ctx, cm, metav1.CreateOptions{}) +} + +// CreateSecret creates a Secret with the given name, data, and optional annotations. +func CreateSecret(ctx context.Context, client kubernetes.Interface, namespace, name string, data map[string][]byte, annotations map[string]string) (*corev1.Secret, error) { + secret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + Annotations: annotations, + }, + Data: data, + } + return client.CoreV1().Secrets(namespace).Create(ctx, secret, metav1.CreateOptions{}) +} + +// UpdateConfigMap updates a ConfigMap's data. +func UpdateConfigMap(ctx context.Context, client kubernetes.Interface, namespace, name string, data map[string]string) error { + cm, err := client.CoreV1().ConfigMaps(namespace).Get(ctx, name, metav1.GetOptions{}) + if err != nil { + return err + } + cm.Data = data + _, err = client.CoreV1().ConfigMaps(namespace).Update(ctx, cm, metav1.UpdateOptions{}) + return err +} + +// UpdateConfigMapLabels updates a ConfigMap's labels. +func UpdateConfigMapLabels(ctx context.Context, client kubernetes.Interface, namespace, name string, labels map[string]string) error { + cm, err := client.CoreV1().ConfigMaps(namespace).Get(ctx, name, metav1.GetOptions{}) + if err != nil { + return err + } + if cm.Labels == nil { + cm.Labels = make(map[string]string) + } + for k, v := range labels { + cm.Labels[k] = v + } + _, err = client.CoreV1().ConfigMaps(namespace).Update(ctx, cm, metav1.UpdateOptions{}) + return err +} + +// UpdateSecret updates a Secret's data. +func UpdateSecret(ctx context.Context, client kubernetes.Interface, namespace, name string, data map[string][]byte) error { + secret, err := client.CoreV1().Secrets(namespace).Get(ctx, name, metav1.GetOptions{}) + if err != nil { + return err + } + secret.Data = data + _, err = client.CoreV1().Secrets(namespace).Update(ctx, secret, metav1.UpdateOptions{}) + return err +} + +// UpdateSecretLabels updates a Secret's labels. +func UpdateSecretLabels(ctx context.Context, client kubernetes.Interface, namespace, name string, labels map[string]string) error { + secret, err := client.CoreV1().Secrets(namespace).Get(ctx, name, metav1.GetOptions{}) + if err != nil { + return err + } + if secret.Labels == nil { + secret.Labels = make(map[string]string) + } + for k, v := range labels { + secret.Labels[k] = v + } + _, err = client.CoreV1().Secrets(namespace).Update(ctx, secret, metav1.UpdateOptions{}) + return err +} + +// stringToByteMap converts a string map to a byte map for Secret data. +func stringToByteMap(data map[string]string) map[string][]byte { + result := make(map[string][]byte) + for k, v := range data { + result[k] = []byte(v) + } + return result +} + +// CreateSecretFromStrings creates a Secret with string data (convenience wrapper). +func CreateSecretFromStrings(ctx context.Context, client kubernetes.Interface, namespace, name string, data map[string]string, annotations map[string]string) (*corev1.Secret, error) { + return CreateSecret(ctx, client, namespace, name, stringToByteMap(data), annotations) +} + +// UpdateSecretFromStrings updates a Secret's data using string values. +func UpdateSecretFromStrings(ctx context.Context, client kubernetes.Interface, namespace, name string, data map[string]string) error { + return UpdateSecret(ctx, client, namespace, name, stringToByteMap(data)) +} + +// DeleteConfigMap deletes a ConfigMap. +func DeleteConfigMap(ctx context.Context, client kubernetes.Interface, namespace, name string) error { + return client.CoreV1().ConfigMaps(namespace).Delete(ctx, name, metav1.DeleteOptions{}) +} + +// DeleteSecret deletes a Secret. +func DeleteSecret(ctx context.Context, client kubernetes.Interface, namespace, name string) error { + return client.CoreV1().Secrets(namespace).Delete(ctx, name, metav1.DeleteOptions{}) +} + +// DeploymentOption is a functional option for configuring a Deployment. +type DeploymentOption func(*appsv1.Deployment) + +// CreateDeployment creates a Deployment with the given options. +func CreateDeployment(ctx context.Context, client kubernetes.Interface, namespace, name string, opts ...DeploymentOption) (*appsv1.Deployment, error) { + deploy := baseDeployment(namespace, name) + for _, opt := range opts { + opt(deploy) + } + return client.AppsV1().Deployments(namespace).Create(ctx, deploy, metav1.CreateOptions{}) +} + +// WithAnnotations adds annotations to the Deployment metadata. +func WithAnnotations(annotations map[string]string) DeploymentOption { + return func(d *appsv1.Deployment) { + if d.Annotations == nil { + d.Annotations = make(map[string]string) + } + for k, v := range annotations { + d.Annotations[k] = v + } + } +} + +// WithConfigMapEnvFrom adds an envFrom reference to a ConfigMap. +func WithConfigMapEnvFrom(name string) DeploymentOption { + return func(d *appsv1.Deployment) { + d.Spec.Template.Spec.Containers[0].EnvFrom = append( + d.Spec.Template.Spec.Containers[0].EnvFrom, + corev1.EnvFromSource{ + ConfigMapRef: &corev1.ConfigMapEnvSource{ + LocalObjectReference: corev1.LocalObjectReference{Name: name}, + }, + }, + ) + } +} + +// WithSecretEnvFrom adds an envFrom reference to a Secret. +func WithSecretEnvFrom(name string) DeploymentOption { + return func(d *appsv1.Deployment) { + d.Spec.Template.Spec.Containers[0].EnvFrom = append( + d.Spec.Template.Spec.Containers[0].EnvFrom, + corev1.EnvFromSource{ + SecretRef: &corev1.SecretEnvSource{ + LocalObjectReference: corev1.LocalObjectReference{Name: name}, + }, + }, + ) + } +} + +// WithConfigMapVolume adds a volume mount for a ConfigMap. +func WithConfigMapVolume(name string) DeploymentOption { + return func(d *appsv1.Deployment) { + volumeName := fmt.Sprintf("cm-%s", name) + d.Spec.Template.Spec.Volumes = append(d.Spec.Template.Spec.Volumes, corev1.Volume{ + Name: volumeName, + VolumeSource: corev1.VolumeSource{ + ConfigMap: &corev1.ConfigMapVolumeSource{ + LocalObjectReference: corev1.LocalObjectReference{Name: name}, + }, + }, + }) + d.Spec.Template.Spec.Containers[0].VolumeMounts = append( + d.Spec.Template.Spec.Containers[0].VolumeMounts, + corev1.VolumeMount{ + Name: volumeName, + MountPath: fmt.Sprintf("/etc/config/%s", name), + }, + ) + } +} + +// WithSecretVolume adds a volume mount for a Secret. +func WithSecretVolume(name string) DeploymentOption { + return func(d *appsv1.Deployment) { + volumeName := fmt.Sprintf("secret-%s", name) + d.Spec.Template.Spec.Volumes = append(d.Spec.Template.Spec.Volumes, corev1.Volume{ + Name: volumeName, + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: name, + }, + }, + }) + d.Spec.Template.Spec.Containers[0].VolumeMounts = append( + d.Spec.Template.Spec.Containers[0].VolumeMounts, + corev1.VolumeMount{ + Name: volumeName, + MountPath: fmt.Sprintf("/etc/secrets/%s", name), + }, + ) + } +} + +// WithProjectedVolume adds a projected volume with ConfigMap and/or Secret sources. +func WithProjectedVolume(cmName, secretName string) DeploymentOption { + return func(d *appsv1.Deployment) { + volumeName := "projected-config" + sources := []corev1.VolumeProjection{} + + if cmName != "" { + sources = append(sources, corev1.VolumeProjection{ + ConfigMap: &corev1.ConfigMapProjection{ + LocalObjectReference: corev1.LocalObjectReference{Name: cmName}, + }, + }) + } + if secretName != "" { + sources = append(sources, corev1.VolumeProjection{ + Secret: &corev1.SecretProjection{ + LocalObjectReference: corev1.LocalObjectReference{Name: secretName}, + }, + }) + } + + d.Spec.Template.Spec.Volumes = append(d.Spec.Template.Spec.Volumes, corev1.Volume{ + Name: volumeName, + VolumeSource: corev1.VolumeSource{ + Projected: &corev1.ProjectedVolumeSource{ + Sources: sources, + }, + }, + }) + d.Spec.Template.Spec.Containers[0].VolumeMounts = append( + d.Spec.Template.Spec.Containers[0].VolumeMounts, + corev1.VolumeMount{ + Name: volumeName, + MountPath: "/etc/projected", + }, + ) + } +} + +// WithInitContainer adds an init container that references ConfigMap and/or Secret. +func WithInitContainer(cmName, secretName string) DeploymentOption { + return func(d *appsv1.Deployment) { + initContainer := corev1.Container{ + Name: "init", + Image: DefaultImage, + Command: []string{"sh", "-c", "echo init done"}, + } + + if cmName != "" { + initContainer.EnvFrom = append(initContainer.EnvFrom, corev1.EnvFromSource{ + ConfigMapRef: &corev1.ConfigMapEnvSource{ + LocalObjectReference: corev1.LocalObjectReference{Name: cmName}, + }, + }) + } + if secretName != "" { + initContainer.EnvFrom = append(initContainer.EnvFrom, corev1.EnvFromSource{ + SecretRef: &corev1.SecretEnvSource{ + LocalObjectReference: corev1.LocalObjectReference{Name: secretName}, + }, + }) + } + + d.Spec.Template.Spec.InitContainers = append(d.Spec.Template.Spec.InitContainers, initContainer) + } +} + +// WithMultipleContainers adds additional containers to the pod. +func WithMultipleContainers(count int) DeploymentOption { + return func(d *appsv1.Deployment) { + for i := 1; i < count; i++ { + d.Spec.Template.Spec.Containers = append(d.Spec.Template.Spec.Containers, corev1.Container{ + Name: fmt.Sprintf("container-%d", i), + Image: DefaultImage, + Command: []string{"sh", "-c", DefaultCommand}, + }) + } + } +} + +// WithMultipleContainersAndEnv creates two containers, each with a different ConfigMap envFrom. +func WithMultipleContainersAndEnv(cm1Name, cm2Name string) DeploymentOption { + return func(d *appsv1.Deployment) { + // First container gets the first ConfigMap + d.Spec.Template.Spec.Containers[0].EnvFrom = append(d.Spec.Template.Spec.Containers[0].EnvFrom, + corev1.EnvFromSource{ + ConfigMapRef: &corev1.ConfigMapEnvSource{ + LocalObjectReference: corev1.LocalObjectReference{Name: cm1Name}, + }, + }) + // Add second container with second ConfigMap + d.Spec.Template.Spec.Containers = append(d.Spec.Template.Spec.Containers, corev1.Container{ + Name: "container-1", + Image: DefaultImage, + Command: []string{"sh", "-c", DefaultCommand}, + EnvFrom: []corev1.EnvFromSource{ + { + ConfigMapRef: &corev1.ConfigMapEnvSource{ + LocalObjectReference: corev1.LocalObjectReference{Name: cm2Name}, + }, + }, + }, + }) + } +} + +// WithReplicas sets the number of replicas. +func WithReplicas(replicas int32) DeploymentOption { + return func(d *appsv1.Deployment) { + d.Spec.Replicas = ptr.To(replicas) + } +} + +// baseDeployment creates a base Deployment template. +func baseDeployment(namespace, name string) *appsv1.Deployment { + labels := map[string]string{"app": name} + return &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + }, + Spec: appsv1.DeploymentSpec{ + Replicas: ptr.To(int32(1)), + Selector: &metav1.LabelSelector{ + MatchLabels: labels, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: labels, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "app", + Image: DefaultImage, + Command: []string{"sh", "-c", DefaultCommand}, + }, + }, + }, + }, + }, + } +} + +// DeleteDeployment deletes a Deployment. +func DeleteDeployment(ctx context.Context, client kubernetes.Interface, namespace, name string) error { + return client.AppsV1().Deployments(namespace).Delete(ctx, name, metav1.DeleteOptions{}) +} + +// DaemonSetOption is a functional option for configuring a DaemonSet. +type DaemonSetOption func(*appsv1.DaemonSet) + +// CreateDaemonSet creates a DaemonSet with the given options. +func CreateDaemonSet(ctx context.Context, client kubernetes.Interface, namespace, name string, opts ...DaemonSetOption) (*appsv1.DaemonSet, error) { + ds := baseDaemonSet(namespace, name) + for _, opt := range opts { + opt(ds) + } + return client.AppsV1().DaemonSets(namespace).Create(ctx, ds, metav1.CreateOptions{}) +} + +// WithDaemonSetAnnotations adds annotations to the DaemonSet metadata. +func WithDaemonSetAnnotations(annotations map[string]string) DaemonSetOption { + return func(ds *appsv1.DaemonSet) { + if ds.Annotations == nil { + ds.Annotations = make(map[string]string) + } + for k, v := range annotations { + ds.Annotations[k] = v + } + } +} + +// WithDaemonSetConfigMapEnvFrom adds an envFrom reference to a ConfigMap. +func WithDaemonSetConfigMapEnvFrom(name string) DaemonSetOption { + return func(ds *appsv1.DaemonSet) { + ds.Spec.Template.Spec.Containers[0].EnvFrom = append( + ds.Spec.Template.Spec.Containers[0].EnvFrom, + corev1.EnvFromSource{ + ConfigMapRef: &corev1.ConfigMapEnvSource{ + LocalObjectReference: corev1.LocalObjectReference{Name: name}, + }, + }, + ) + } +} + +// WithDaemonSetSecretEnvFrom adds an envFrom reference to a Secret. +func WithDaemonSetSecretEnvFrom(name string) DaemonSetOption { + return func(ds *appsv1.DaemonSet) { + ds.Spec.Template.Spec.Containers[0].EnvFrom = append( + ds.Spec.Template.Spec.Containers[0].EnvFrom, + corev1.EnvFromSource{ + SecretRef: &corev1.SecretEnvSource{ + LocalObjectReference: corev1.LocalObjectReference{Name: name}, + }, + }, + ) + } +} + +// baseDaemonSet creates a base DaemonSet template. +func baseDaemonSet(namespace, name string) *appsv1.DaemonSet { + labels := map[string]string{"app": name} + return &appsv1.DaemonSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + }, + Spec: appsv1.DaemonSetSpec{ + Selector: &metav1.LabelSelector{ + MatchLabels: labels, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: labels, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "app", + Image: DefaultImage, + Command: []string{"sh", "-c", DefaultCommand}, + }, + }, + }, + }, + }, + } +} + +// DeleteDaemonSet deletes a DaemonSet. +func DeleteDaemonSet(ctx context.Context, client kubernetes.Interface, namespace, name string) error { + return client.AppsV1().DaemonSets(namespace).Delete(ctx, name, metav1.DeleteOptions{}) +} + +// StatefulSetOption is a functional option for configuring a StatefulSet. +type StatefulSetOption func(*appsv1.StatefulSet) + +// CreateStatefulSet creates a StatefulSet with the given options. +func CreateStatefulSet(ctx context.Context, client kubernetes.Interface, namespace, name string, opts ...StatefulSetOption) (*appsv1.StatefulSet, error) { + ss := baseStatefulSet(namespace, name) + for _, opt := range opts { + opt(ss) + } + return client.AppsV1().StatefulSets(namespace).Create(ctx, ss, metav1.CreateOptions{}) +} + +// WithStatefulSetAnnotations adds annotations to the StatefulSet metadata. +func WithStatefulSetAnnotations(annotations map[string]string) StatefulSetOption { + return func(ss *appsv1.StatefulSet) { + if ss.Annotations == nil { + ss.Annotations = make(map[string]string) + } + for k, v := range annotations { + ss.Annotations[k] = v + } + } +} + +// WithStatefulSetConfigMapEnvFrom adds an envFrom reference to a ConfigMap. +func WithStatefulSetConfigMapEnvFrom(name string) StatefulSetOption { + return func(ss *appsv1.StatefulSet) { + ss.Spec.Template.Spec.Containers[0].EnvFrom = append( + ss.Spec.Template.Spec.Containers[0].EnvFrom, + corev1.EnvFromSource{ + ConfigMapRef: &corev1.ConfigMapEnvSource{ + LocalObjectReference: corev1.LocalObjectReference{Name: name}, + }, + }, + ) + } +} + +// WithStatefulSetSecretEnvFrom adds an envFrom reference to a Secret. +func WithStatefulSetSecretEnvFrom(name string) StatefulSetOption { + return func(ss *appsv1.StatefulSet) { + ss.Spec.Template.Spec.Containers[0].EnvFrom = append( + ss.Spec.Template.Spec.Containers[0].EnvFrom, + corev1.EnvFromSource{ + SecretRef: &corev1.SecretEnvSource{ + LocalObjectReference: corev1.LocalObjectReference{Name: name}, + }, + }, + ) + } +} + +// baseStatefulSet creates a base StatefulSet template. +func baseStatefulSet(namespace, name string) *appsv1.StatefulSet { + labels := map[string]string{"app": name} + return &appsv1.StatefulSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + }, + Spec: appsv1.StatefulSetSpec{ + ServiceName: name, + Replicas: ptr.To(int32(1)), + Selector: &metav1.LabelSelector{ + MatchLabels: labels, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: labels, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "app", + Image: DefaultImage, + Command: []string{"sh", "-c", DefaultCommand}, + }, + }, + }, + }, + }, + } +} + +// DeleteStatefulSet deletes a StatefulSet. +func DeleteStatefulSet(ctx context.Context, client kubernetes.Interface, namespace, name string) error { + return client.AppsV1().StatefulSets(namespace).Delete(ctx, name, metav1.DeleteOptions{}) +} + +// CronJobOption is a functional option for configuring a CronJob. +type CronJobOption func(*batchv1.CronJob) + +// CreateCronJob creates a CronJob with the given options. +func CreateCronJob(ctx context.Context, client kubernetes.Interface, namespace, name string, opts ...CronJobOption) (*batchv1.CronJob, error) { + cj := baseCronJob(namespace, name) + for _, opt := range opts { + opt(cj) + } + return client.BatchV1().CronJobs(namespace).Create(ctx, cj, metav1.CreateOptions{}) +} + +// WithCronJobAnnotations adds annotations to the CronJob metadata. +func WithCronJobAnnotations(annotations map[string]string) CronJobOption { + return func(cj *batchv1.CronJob) { + if cj.Annotations == nil { + cj.Annotations = make(map[string]string) + } + for k, v := range annotations { + cj.Annotations[k] = v + } + } +} + +// WithCronJobConfigMapEnvFrom adds an envFrom reference to a ConfigMap. +func WithCronJobConfigMapEnvFrom(name string) CronJobOption { + return func(cj *batchv1.CronJob) { + cj.Spec.JobTemplate.Spec.Template.Spec.Containers[0].EnvFrom = append( + cj.Spec.JobTemplate.Spec.Template.Spec.Containers[0].EnvFrom, + corev1.EnvFromSource{ + ConfigMapRef: &corev1.ConfigMapEnvSource{ + LocalObjectReference: corev1.LocalObjectReference{Name: name}, + }, + }, + ) + } +} + +// WithCronJobSecretEnvFrom adds an envFrom reference to a Secret. +func WithCronJobSecretEnvFrom(name string) CronJobOption { + return func(cj *batchv1.CronJob) { + cj.Spec.JobTemplate.Spec.Template.Spec.Containers[0].EnvFrom = append( + cj.Spec.JobTemplate.Spec.Template.Spec.Containers[0].EnvFrom, + corev1.EnvFromSource{ + SecretRef: &corev1.SecretEnvSource{ + LocalObjectReference: corev1.LocalObjectReference{Name: name}, + }, + }, + ) + } +} + +// baseCronJob creates a base CronJob template. +func baseCronJob(namespace, name string) *batchv1.CronJob { + labels := map[string]string{"app": name} + return &batchv1.CronJob{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + }, + Spec: batchv1.CronJobSpec{ + Schedule: "* * * * *", // Every minute + JobTemplate: batchv1.JobTemplateSpec{ + Spec: batchv1.JobSpec{ + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: labels, + }, + Spec: corev1.PodSpec{ + RestartPolicy: corev1.RestartPolicyOnFailure, + Containers: []corev1.Container{ + { + Name: "job", + Image: DefaultImage, + Command: []string{"sh", "-c", "echo done"}, + }, + }, + }, + }, + }, + }, + }, + } +} + +// DeleteCronJob deletes a CronJob. +func DeleteCronJob(ctx context.Context, client kubernetes.Interface, namespace, name string) error { + return client.BatchV1().CronJobs(namespace).Delete(ctx, name, metav1.DeleteOptions{}) +} + +// JobOption is a functional option for configuring a Job. +type JobOption func(*batchv1.Job) + +// CreateJob creates a Job with the given options. +func CreateJob(ctx context.Context, client kubernetes.Interface, namespace, name string, opts ...JobOption) (*batchv1.Job, error) { + job := baseJob(namespace, name) + for _, opt := range opts { + opt(job) + } + return client.BatchV1().Jobs(namespace).Create(ctx, job, metav1.CreateOptions{}) +} + +// WithJobAnnotations adds annotations to the Job metadata. +func WithJobAnnotations(annotations map[string]string) JobOption { + return func(j *batchv1.Job) { + if j.Annotations == nil { + j.Annotations = make(map[string]string) + } + for k, v := range annotations { + j.Annotations[k] = v + } + } +} + +// WithJobConfigMapEnvFrom adds an envFrom reference to a ConfigMap. +func WithJobConfigMapEnvFrom(name string) JobOption { + return func(j *batchv1.Job) { + j.Spec.Template.Spec.Containers[0].EnvFrom = append( + j.Spec.Template.Spec.Containers[0].EnvFrom, + corev1.EnvFromSource{ + ConfigMapRef: &corev1.ConfigMapEnvSource{ + LocalObjectReference: corev1.LocalObjectReference{Name: name}, + }, + }, + ) + } +} + +// WithJobSecretEnvFrom adds an envFrom reference to a Secret. +func WithJobSecretEnvFrom(name string) JobOption { + return func(j *batchv1.Job) { + j.Spec.Template.Spec.Containers[0].EnvFrom = append( + j.Spec.Template.Spec.Containers[0].EnvFrom, + corev1.EnvFromSource{ + SecretRef: &corev1.SecretEnvSource{ + LocalObjectReference: corev1.LocalObjectReference{Name: name}, + }, + }, + ) + } +} + +// baseJob creates a base Job template. +func baseJob(namespace, name string) *batchv1.Job { + labels := map[string]string{"app": name} + return &batchv1.Job{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + }, + Spec: batchv1.JobSpec{ + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: labels, + }, + Spec: corev1.PodSpec{ + RestartPolicy: corev1.RestartPolicyNever, + Containers: []corev1.Container{ + { + Name: "job", + Image: DefaultImage, + Command: []string{"sh", "-c", "echo done"}, + }, + }, + }, + }, + }, + } +} + +// DeleteJob deletes a Job. +func DeleteJob(ctx context.Context, client kubernetes.Interface, namespace, name string) error { + propagation := metav1.DeletePropagationBackground + return client.BatchV1().Jobs(namespace).Delete(ctx, name, metav1.DeleteOptions{ + PropagationPolicy: &propagation, + }) +} + +// WithConfigMapKeyRef adds a valueFrom.configMapKeyRef env var to the container. +func WithConfigMapKeyRef(cmName, key, envVarName string) DeploymentOption { + return func(d *appsv1.Deployment) { + d.Spec.Template.Spec.Containers[0].Env = append( + d.Spec.Template.Spec.Containers[0].Env, + corev1.EnvVar{ + Name: envVarName, + ValueFrom: &corev1.EnvVarSource{ + ConfigMapKeyRef: &corev1.ConfigMapKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{Name: cmName}, + Key: key, + }, + }, + }, + ) + } +} + +// WithSecretKeyRef adds a valueFrom.secretKeyRef env var to the container. +func WithSecretKeyRef(secretName, key, envVarName string) DeploymentOption { + return func(d *appsv1.Deployment) { + d.Spec.Template.Spec.Containers[0].Env = append( + d.Spec.Template.Spec.Containers[0].Env, + corev1.EnvVar{ + Name: envVarName, + ValueFrom: &corev1.EnvVarSource{ + SecretKeyRef: &corev1.SecretKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{Name: secretName}, + Key: key, + }, + }, + }, + ) + } +} + +// WithPodTemplateAnnotations adds annotations to the pod template metadata (not deployment metadata). +func WithPodTemplateAnnotations(annotations map[string]string) DeploymentOption { + return func(d *appsv1.Deployment) { + if d.Spec.Template.Annotations == nil { + d.Spec.Template.Annotations = make(map[string]string) + } + for k, v := range annotations { + d.Spec.Template.Annotations[k] = v + } + } +} + +// WithInitContainerVolume adds an init container with ConfigMap/Secret volume mounts. +func WithInitContainerVolume(cmName, secretName string) DeploymentOption { + return func(d *appsv1.Deployment) { + initContainer := corev1.Container{ + Name: "init", + Image: DefaultImage, + Command: []string{"sh", "-c", "echo init done"}, + } + + if cmName != "" { + volumeName := fmt.Sprintf("init-cm-%s", cmName) + d.Spec.Template.Spec.Volumes = append(d.Spec.Template.Spec.Volumes, corev1.Volume{ + Name: volumeName, + VolumeSource: corev1.VolumeSource{ + ConfigMap: &corev1.ConfigMapVolumeSource{ + LocalObjectReference: corev1.LocalObjectReference{Name: cmName}, + }, + }, + }) + initContainer.VolumeMounts = append(initContainer.VolumeMounts, corev1.VolumeMount{ + Name: volumeName, + MountPath: fmt.Sprintf("/etc/init-config/%s", cmName), + }) + } + if secretName != "" { + volumeName := fmt.Sprintf("init-secret-%s", secretName) + d.Spec.Template.Spec.Volumes = append(d.Spec.Template.Spec.Volumes, corev1.Volume{ + Name: volumeName, + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: secretName, + }, + }, + }) + initContainer.VolumeMounts = append(initContainer.VolumeMounts, corev1.VolumeMount{ + Name: volumeName, + MountPath: fmt.Sprintf("/etc/init-secrets/%s", secretName), + }) + } + + d.Spec.Template.Spec.InitContainers = append(d.Spec.Template.Spec.InitContainers, initContainer) + } +} + +// WithInitContainerProjectedVolume adds an init container with projected volume. +func WithInitContainerProjectedVolume(cmName, secretName string) DeploymentOption { + return func(d *appsv1.Deployment) { + volumeName := "init-projected-config" + sources := []corev1.VolumeProjection{} + + if cmName != "" { + sources = append(sources, corev1.VolumeProjection{ + ConfigMap: &corev1.ConfigMapProjection{ + LocalObjectReference: corev1.LocalObjectReference{Name: cmName}, + }, + }) + } + if secretName != "" { + sources = append(sources, corev1.VolumeProjection{ + Secret: &corev1.SecretProjection{ + LocalObjectReference: corev1.LocalObjectReference{Name: secretName}, + }, + }) + } + + d.Spec.Template.Spec.Volumes = append(d.Spec.Template.Spec.Volumes, corev1.Volume{ + Name: volumeName, + VolumeSource: corev1.VolumeSource{ + Projected: &corev1.ProjectedVolumeSource{ + Sources: sources, + }, + }, + }) + + initContainer := corev1.Container{ + Name: "init", + Image: DefaultImage, + Command: []string{"sh", "-c", "echo init done"}, + VolumeMounts: []corev1.VolumeMount{ + { + Name: volumeName, + MountPath: "/etc/init-projected", + }, + }, + } + + d.Spec.Template.Spec.InitContainers = append(d.Spec.Template.Spec.InitContainers, initContainer) + } +} + +// WithDaemonSetProjectedVolume adds a projected volume with ConfigMap and/or Secret sources to a DaemonSet. +func WithDaemonSetProjectedVolume(cmName, secretName string) DaemonSetOption { + return func(ds *appsv1.DaemonSet) { + volumeName := "projected-config" + sources := []corev1.VolumeProjection{} + + if cmName != "" { + sources = append(sources, corev1.VolumeProjection{ + ConfigMap: &corev1.ConfigMapProjection{ + LocalObjectReference: corev1.LocalObjectReference{Name: cmName}, + }, + }) + } + if secretName != "" { + sources = append(sources, corev1.VolumeProjection{ + Secret: &corev1.SecretProjection{ + LocalObjectReference: corev1.LocalObjectReference{Name: secretName}, + }, + }) + } + + ds.Spec.Template.Spec.Volumes = append(ds.Spec.Template.Spec.Volumes, corev1.Volume{ + Name: volumeName, + VolumeSource: corev1.VolumeSource{ + Projected: &corev1.ProjectedVolumeSource{ + Sources: sources, + }, + }, + }) + ds.Spec.Template.Spec.Containers[0].VolumeMounts = append( + ds.Spec.Template.Spec.Containers[0].VolumeMounts, + corev1.VolumeMount{ + Name: volumeName, + MountPath: "/etc/projected", + }, + ) + } +} + +// WithStatefulSetProjectedVolume adds a projected volume with ConfigMap and/or Secret sources to a StatefulSet. +func WithStatefulSetProjectedVolume(cmName, secretName string) StatefulSetOption { + return func(ss *appsv1.StatefulSet) { + volumeName := "projected-config" + sources := []corev1.VolumeProjection{} + + if cmName != "" { + sources = append(sources, corev1.VolumeProjection{ + ConfigMap: &corev1.ConfigMapProjection{ + LocalObjectReference: corev1.LocalObjectReference{Name: cmName}, + }, + }) + } + if secretName != "" { + sources = append(sources, corev1.VolumeProjection{ + Secret: &corev1.SecretProjection{ + LocalObjectReference: corev1.LocalObjectReference{Name: secretName}, + }, + }) + } + + ss.Spec.Template.Spec.Volumes = append(ss.Spec.Template.Spec.Volumes, corev1.Volume{ + Name: volumeName, + VolumeSource: corev1.VolumeSource{ + Projected: &corev1.ProjectedVolumeSource{ + Sources: sources, + }, + }, + }) + ss.Spec.Template.Spec.Containers[0].VolumeMounts = append( + ss.Spec.Template.Spec.Containers[0].VolumeMounts, + corev1.VolumeMount{ + Name: volumeName, + MountPath: "/etc/projected", + }, + ) + } +} + +// WithDaemonSetConfigMapKeyRef adds a valueFrom.configMapKeyRef env var to a DaemonSet. +func WithDaemonSetConfigMapKeyRef(cmName, key, envVarName string) DaemonSetOption { + return func(ds *appsv1.DaemonSet) { + ds.Spec.Template.Spec.Containers[0].Env = append( + ds.Spec.Template.Spec.Containers[0].Env, + corev1.EnvVar{ + Name: envVarName, + ValueFrom: &corev1.EnvVarSource{ + ConfigMapKeyRef: &corev1.ConfigMapKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{Name: cmName}, + Key: key, + }, + }, + }, + ) + } +} + +// WithDaemonSetSecretKeyRef adds a valueFrom.secretKeyRef env var to a DaemonSet. +func WithDaemonSetSecretKeyRef(secretName, key, envVarName string) DaemonSetOption { + return func(ds *appsv1.DaemonSet) { + ds.Spec.Template.Spec.Containers[0].Env = append( + ds.Spec.Template.Spec.Containers[0].Env, + corev1.EnvVar{ + Name: envVarName, + ValueFrom: &corev1.EnvVarSource{ + SecretKeyRef: &corev1.SecretKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{Name: secretName}, + Key: key, + }, + }, + }, + ) + } +} + +// WithStatefulSetConfigMapKeyRef adds a valueFrom.configMapKeyRef env var to a StatefulSet. +func WithStatefulSetConfigMapKeyRef(cmName, key, envVarName string) StatefulSetOption { + return func(ss *appsv1.StatefulSet) { + ss.Spec.Template.Spec.Containers[0].Env = append( + ss.Spec.Template.Spec.Containers[0].Env, + corev1.EnvVar{ + Name: envVarName, + ValueFrom: &corev1.EnvVarSource{ + ConfigMapKeyRef: &corev1.ConfigMapKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{Name: cmName}, + Key: key, + }, + }, + }, + ) + } +} + +// WithStatefulSetSecretKeyRef adds a valueFrom.secretKeyRef env var to a StatefulSet. +func WithStatefulSetSecretKeyRef(secretName, key, envVarName string) StatefulSetOption { + return func(ss *appsv1.StatefulSet) { + ss.Spec.Template.Spec.Containers[0].Env = append( + ss.Spec.Template.Spec.Containers[0].Env, + corev1.EnvVar{ + Name: envVarName, + ValueFrom: &corev1.EnvVarSource{ + SecretKeyRef: &corev1.SecretKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{Name: secretName}, + Key: key, + }, + }, + }, + ) + } +} + +// WithJobConfigMapKeyRef adds a valueFrom.configMapKeyRef env var to a Job. +func WithJobConfigMapKeyRef(cmName, key, envVarName string) JobOption { + return func(j *batchv1.Job) { + j.Spec.Template.Spec.Containers[0].Env = append( + j.Spec.Template.Spec.Containers[0].Env, + corev1.EnvVar{ + Name: envVarName, + ValueFrom: &corev1.EnvVarSource{ + ConfigMapKeyRef: &corev1.ConfigMapKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{Name: cmName}, + Key: key, + }, + }, + }, + ) + } +} + +// WithJobSecretKeyRef adds a valueFrom.secretKeyRef env var to a Job. +func WithJobSecretKeyRef(secretName, key, envVarName string) JobOption { + return func(j *batchv1.Job) { + j.Spec.Template.Spec.Containers[0].Env = append( + j.Spec.Template.Spec.Containers[0].Env, + corev1.EnvVar{ + Name: envVarName, + ValueFrom: &corev1.EnvVarSource{ + SecretKeyRef: &corev1.SecretKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{Name: secretName}, + Key: key, + }, + }, + }, + ) + } +} diff --git a/test/e2e/utils/test_helpers.go b/test/e2e/utils/test_helpers.go new file mode 100644 index 0000000..f075b70 --- /dev/null +++ b/test/e2e/utils/test_helpers.go @@ -0,0 +1,12 @@ +package utils + +// MergeAnnotations merges multiple annotation maps into one. +func MergeAnnotations(maps ...map[string]string) map[string]string { + result := make(map[string]string) + for _, m := range maps { + for k, v := range m { + result[k] = v + } + } + return result +} diff --git a/test/e2e/utils/test_helpers_test.go b/test/e2e/utils/test_helpers_test.go new file mode 100644 index 0000000..33c5751 --- /dev/null +++ b/test/e2e/utils/test_helpers_test.go @@ -0,0 +1,148 @@ +package utils + +import ( + "testing" +) + +func TestMergeAnnotations(t *testing.T) { + tests := []struct { + name string + maps []map[string]string + expected map[string]string + }{ + { + name: "no maps", + maps: []map[string]string{}, + expected: map[string]string{}, + }, + { + name: "single map", + maps: []map[string]string{ + {"key1": "value1"}, + }, + expected: map[string]string{ + "key1": "value1", + }, + }, + { + name: "two maps no overlap", + maps: []map[string]string{ + {"key1": "value1"}, + {"key2": "value2"}, + }, + expected: map[string]string{ + "key1": "value1", + "key2": "value2", + }, + }, + { + name: "three maps with overlap - last wins", + maps: []map[string]string{ + {"key1": "value1", "shared": "first"}, + {"key2": "value2", "shared": "second"}, + {"key3": "value3", "shared": "third"}, + }, + expected: map[string]string{ + "key1": "value1", + "key2": "value2", + "key3": "value3", + "shared": "third", // Last map wins + }, + }, + { + name: "empty map in the middle", + maps: []map[string]string{ + {"key1": "value1"}, + {}, + {"key2": "value2"}, + }, + expected: map[string]string{ + "key1": "value1", + "key2": "value2", + }, + }, + { + name: "nil map in the middle", + maps: []map[string]string{ + {"key1": "value1"}, + nil, + {"key2": "value2"}, + }, + expected: map[string]string{ + "key1": "value1", + "key2": "value2", + }, + }, + { + name: "realistic use case - auto annotation with reload annotation", + maps: []map[string]string{ + BuildAutoTrueAnnotation(), + BuildConfigMapReloadAnnotation("my-config"), + }, + expected: map[string]string{ + AnnotationAuto: AnnotationValueTrue, + AnnotationConfigMapReload: "my-config", + }, + }, + { + name: "realistic use case - pause period with reload annotation", + maps: []map[string]string{ + BuildConfigMapReloadAnnotation("config1"), + BuildPausePeriodAnnotation("10s"), + }, + expected: map[string]string{ + AnnotationConfigMapReload: "config1", + AnnotationDeploymentPausePeriod: "10s", + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := MergeAnnotations(tt.maps...) + + if len(result) != len(tt.expected) { + t.Errorf("MergeAnnotations() returned %d entries, want %d", len(result), len(tt.expected)) + t.Errorf("Got: %v", result) + t.Errorf("Want: %v", tt.expected) + return + } + + for k, v := range tt.expected { + if result[k] != v { + t.Errorf("MergeAnnotations()[%q] = %q, want %q", k, result[k], v) + } + } + }) + } +} + +func TestMergeAnnotationsDoesNotModifyInput(t *testing.T) { + // Ensure MergeAnnotations doesn't modify the input maps + map1 := map[string]string{"key1": "value1"} + map2 := map[string]string{"key2": "value2"} + + _ = MergeAnnotations(map1, map2) + + // Verify original maps are unchanged + if len(map1) != 1 || map1["key1"] != "value1" { + t.Errorf("map1 was modified: %v", map1) + } + if len(map2) != 1 || map2["key2"] != "value2" { + t.Errorf("map2 was modified: %v", map2) + } +} + +func TestMergeAnnotationsReturnsNewMap(t *testing.T) { + // Ensure MergeAnnotations returns a new map, not a reference to an input + input := map[string]string{"key1": "value1"} + result := MergeAnnotations(input) + + // Modify the result + result["key2"] = "value2" + + // Verify original is unchanged + if _, exists := input["key2"]; exists { + t.Error("modifying result affected input map - should return a new map") + } +} diff --git a/test/e2e/utils/testenv.go b/test/e2e/utils/testenv.go new file mode 100644 index 0000000..f405073 --- /dev/null +++ b/test/e2e/utils/testenv.go @@ -0,0 +1,154 @@ +package utils + +import ( + "context" + "fmt" + + . "github.com/onsi/ginkgo/v2" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/discovery" + "k8s.io/client-go/dynamic" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/tools/clientcmd" +) + +// TestEnvironment holds the common test environment state. +type TestEnvironment struct { + Ctx context.Context + Cancel context.CancelFunc + KubeClient kubernetes.Interface + DynamicClient dynamic.Interface + DiscoveryClient discovery.DiscoveryInterface + Namespace string + ReleaseName string // Unique Helm release name to prevent cluster-scoped resource conflicts + TestImage string + ProjectDir string +} + +// SetupTestEnvironment creates a new test environment with kubernetes clients. +// It creates a unique namespace with the given prefix. +func SetupTestEnvironment(ctx context.Context, namespacePrefix string) (*TestEnvironment, error) { + env := &TestEnvironment{ + Ctx: ctx, + TestImage: GetTestImage(), + } + + var err error + + // Get project directory + env.ProjectDir, err = GetProjectDir() + if err != nil { + return nil, fmt.Errorf("getting project directory: %w", err) + } + + // Setup Kubernetes client + kubeconfig := GetKubeconfig() + GinkgoWriter.Printf("Using kubeconfig: %s\n", kubeconfig) + + config, err := clientcmd.BuildConfigFromFlags("", kubeconfig) + if err != nil { + return nil, fmt.Errorf("building config from kubeconfig: %w", err) + } + + env.KubeClient, err = kubernetes.NewForConfig(config) + if err != nil { + return nil, fmt.Errorf("creating kubernetes client: %w", err) + } + + env.DynamicClient, err = dynamic.NewForConfig(config) + if err != nil { + return nil, fmt.Errorf("creating dynamic client: %w", err) + } + + env.DiscoveryClient, err = discovery.NewDiscoveryClientForConfig(config) + if err != nil { + return nil, fmt.Errorf("creating discovery client: %w", err) + } + + // Verify cluster connectivity + GinkgoWriter.Println("Verifying cluster connectivity...") + _, err = env.KubeClient.CoreV1().Namespaces().List(ctx, metav1.ListOptions{Limit: 1}) + if err != nil { + return nil, fmt.Errorf("connecting to kubernetes cluster: %w", err) + } + GinkgoWriter.Println("Cluster connectivity verified") + + // Create test namespace with random suffix + env.Namespace = RandName(namespacePrefix) + // Use a unique release name to prevent cluster-scoped resource conflicts between test suites + env.ReleaseName = RandName("reloader") + GinkgoWriter.Printf("Creating test namespace: %s\n", env.Namespace) + GinkgoWriter.Printf("Using Helm release name: %s\n", env.ReleaseName) + if err := CreateNamespace(ctx, env.KubeClient, env.Namespace); err != nil { + return nil, fmt.Errorf("creating test namespace: %w", err) + } + + GinkgoWriter.Printf("Using test image: %s\n", env.TestImage) + GinkgoWriter.Printf("Project directory: %s\n", env.ProjectDir) + + return env, nil +} + +// Cleanup cleans up the test environment resources. +func (e *TestEnvironment) Cleanup() error { + if e.Namespace == "" { + return nil + } + + GinkgoWriter.Printf("Cleaning up test namespace: %s\n", e.Namespace) + GinkgoWriter.Printf("Cleaning up Helm release: %s\n", e.ReleaseName) + + // Collect Reloader logs before cleanup (useful for debugging) + logs, err := GetPodLogs(e.Ctx, e.KubeClient, e.Namespace, ReloaderPodSelector(e.ReleaseName)) + if err == nil && logs != "" { + GinkgoWriter.Println("Reloader logs:") + GinkgoWriter.Println(logs) + } + + // Undeploy Reloader using the suite's release name + _ = UndeployReloader(e.Namespace, e.ReleaseName) + + // Delete test namespace + if err := DeleteNamespace(e.Ctx, e.KubeClient, e.Namespace); err != nil { + return fmt.Errorf("deleting namespace: %w", err) + } + + return nil +} + +// DeployReloaderWithStrategy deploys Reloader with the specified reload strategy. +func (e *TestEnvironment) DeployReloaderWithStrategy(strategy string) error { + return e.DeployReloaderWithValues(map[string]string{ + "reloader.reloadStrategy": strategy, + }) +} + +// DeployReloaderWithValues deploys Reloader with the specified Helm values. +// Each test suite uses a unique release name to prevent cluster-scoped resource conflicts. +func (e *TestEnvironment) DeployReloaderWithValues(values map[string]string) error { + GinkgoWriter.Printf("Deploying Reloader with values: %v\n", values) + return DeployReloader(DeployOptions{ + Namespace: e.Namespace, + ReleaseName: e.ReleaseName, + Image: e.TestImage, + Values: values, + }) +} + +// WaitForReloader waits for the Reloader deployment to be ready. +func (e *TestEnvironment) WaitForReloader() error { + GinkgoWriter.Println("Waiting for Reloader to be ready...") + return WaitForDeploymentReady(e.Ctx, e.KubeClient, e.Namespace, ReloaderDeploymentName(e.ReleaseName), DeploymentReady) +} + +// DeployAndWait deploys Reloader with the given values and waits for it to be ready. +func (e *TestEnvironment) DeployAndWait(values map[string]string) error { + if err := e.DeployReloaderWithValues(values); err != nil { + return fmt.Errorf("deploying Reloader: %w", err) + } + if err := e.WaitForReloader(); err != nil { + return fmt.Errorf("waiting for Reloader: %w", err) + } + GinkgoWriter.Println("Reloader is ready") + return nil +} diff --git a/test/e2e/utils/utils.go b/test/e2e/utils/utils.go new file mode 100644 index 0000000..3cf0035 --- /dev/null +++ b/test/e2e/utils/utils.go @@ -0,0 +1,114 @@ +// Package utils provides helper functions for e2e tests. +package utils + +import ( + "bytes" + "fmt" + "os" + "os/exec" + "path/filepath" + "strings" + + . "github.com/onsi/ginkgo/v2" //nolint:revive,staticcheck +) + +// Run executes the provided command and returns its combined stdout/stderr output. +// The command is executed from the project directory. +func Run(cmd *exec.Cmd) (string, error) { + dir, err := GetProjectDir() + if err != nil { + return "", fmt.Errorf("failed to get project dir: %w", err) + } + cmd.Dir = dir + + if err := os.Chdir(cmd.Dir); err != nil { + _, _ = fmt.Fprintf(GinkgoWriter, "chdir dir: %q\n", err) + } + + cmd.Env = append(os.Environ(), "GO111MODULE=on") + command := strings.Join(cmd.Args, " ") + _, _ = fmt.Fprintf(GinkgoWriter, "running: %q\n", command) + + var stdout, stderr bytes.Buffer + cmd.Stdout = &stdout + cmd.Stderr = &stderr + + err = cmd.Run() + output := stdout.String() + stderr.String() + if err != nil { + return output, fmt.Errorf("%q failed with error %q: %w", command, output, err) + } + + return output, nil +} + +// GetProjectDir returns the root directory of the project. +// It works by finding the directory containing go.mod. +func GetProjectDir() (string, error) { + wd, err := os.Getwd() + if err != nil { + return "", fmt.Errorf("failed to get current working directory: %w", err) + } + + // Walk up the directory tree looking for go.mod + dir := wd + for { + if _, err := os.Stat(filepath.Join(dir, "go.mod")); err == nil { + return dir, nil + } + + parent := filepath.Dir(dir) + if parent == dir { + // Reached root without finding go.mod + break + } + dir = parent + } + + // Fallback: try to strip common test paths + wd = strings.ReplaceAll(wd, "/test/e2e", "") + wd = strings.ReplaceAll(wd, "/test/e2e/annotations", "") + wd = strings.ReplaceAll(wd, "/test/e2e/envvars", "") + wd = strings.ReplaceAll(wd, "/test/e2e/flags", "") + wd = strings.ReplaceAll(wd, "/test/e2e/advanced", "") + wd = strings.ReplaceAll(wd, "/test/e2e/argo", "") + wd = strings.ReplaceAll(wd, "/test/e2e/openshift", "") + + return wd, nil +} + +// GetNonEmptyLines splits the given output string into individual lines, +// filtering out empty lines. +func GetNonEmptyLines(output string) []string { + var result []string + lines := strings.Split(output, "\n") + for _, line := range lines { + trimmed := strings.TrimSpace(line) + if trimmed != "" { + result = append(result, trimmed) + } + } + return result +} + +// GetEnvOrDefault returns the value of the environment variable named by key, +// or defaultValue if the variable is not present or empty. +func GetEnvOrDefault(key, defaultValue string) string { + if value := os.Getenv(key); value != "" { + return value + } + return defaultValue +} + +// GetKubeconfig returns the path to the kubeconfig file. +// It checks KUBECONFIG environment variable first, then falls back to ~/.kube/config. +func GetKubeconfig() string { + if kubeconfig := os.Getenv("KUBECONFIG"); kubeconfig != "" { + return kubeconfig + } + home, err := os.UserHomeDir() + if err != nil { + return "" + } + return filepath.Join(home, ".kube", "config") +} diff --git a/test/e2e/utils/wait.go b/test/e2e/utils/wait.go new file mode 100644 index 0000000..7d77b56 --- /dev/null +++ b/test/e2e/utils/wait.go @@ -0,0 +1,498 @@ +package utils + +import ( + "context" + "fmt" + "strings" + "time" + + appsv1 "k8s.io/api/apps/v1" + batchv1 "k8s.io/api/batch/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/kubernetes" +) + +// Timeout and interval constants for polling operations. +const ( + DefaultTimeout = 30 * time.Second // General operations + DefaultInterval = 1 * time.Second // Polling interval (faster feedback) + ShortTimeout = 5 * time.Second // Quick checks + NegativeTestWait = 3 * time.Second // Wait before checking negative conditions + DeploymentReady = 60 * time.Second // Workload readiness (buffer for CI) + ReloadTimeout = 15 * time.Second // Time for reload to trigger +) + +// WaitForDeploymentReady waits for a deployment to have all replicas available. +func WaitForDeploymentReady(ctx context.Context, client kubernetes.Interface, namespace, name string, timeout time.Duration) error { + return wait.PollUntilContextTimeout(ctx, DefaultInterval, timeout, true, func(ctx context.Context) (bool, error) { + deploy, err := client.AppsV1().Deployments(namespace).Get(ctx, name, metav1.GetOptions{}) + if err != nil { + return false, nil // Keep polling + } + + // Check if deployment is ready + if deploy.Status.ReadyReplicas == *deploy.Spec.Replicas && + deploy.Status.UpdatedReplicas == *deploy.Spec.Replicas && + deploy.Status.AvailableReplicas == *deploy.Spec.Replicas { + return true, nil + } + + return false, nil + }) +} + +// WaitForDeploymentReloaded waits for a deployment's pod template to have the reloader annotation. +// Returns true if the annotation was found, false if timeout occurred. +func WaitForDeploymentReloaded(ctx context.Context, client kubernetes.Interface, namespace, name, annotationKey string, timeout time.Duration) (bool, error) { + var found bool + err := wait.PollUntilContextTimeout(ctx, DefaultInterval, timeout, true, func(ctx context.Context) (bool, error) { + deploy, err := client.AppsV1().Deployments(namespace).Get(ctx, name, metav1.GetOptions{}) + if err != nil { + return false, nil // Keep polling + } + + // Check pod template annotations + if deploy.Spec.Template.Annotations != nil { + if _, ok := deploy.Spec.Template.Annotations[annotationKey]; ok { + found = true + return true, nil + } + } + + return false, nil + }) + + if err != nil && err != context.DeadlineExceeded { + return false, err + } + return found, nil +} + +// WaitForDaemonSetReloaded waits for a DaemonSet's pod template to have the reloader annotation. +func WaitForDaemonSetReloaded(ctx context.Context, client kubernetes.Interface, namespace, name, annotationKey string, timeout time.Duration) (bool, error) { + var found bool + err := wait.PollUntilContextTimeout(ctx, DefaultInterval, timeout, true, func(ctx context.Context) (bool, error) { + ds, err := client.AppsV1().DaemonSets(namespace).Get(ctx, name, metav1.GetOptions{}) + if err != nil { + return false, nil + } + + if ds.Spec.Template.Annotations != nil { + if _, ok := ds.Spec.Template.Annotations[annotationKey]; ok { + found = true + return true, nil + } + } + + return false, nil + }) + + if err != nil && err != context.DeadlineExceeded { + return false, err + } + return found, nil +} + +// WaitForStatefulSetReloaded waits for a StatefulSet's pod template to have the reloader annotation. +func WaitForStatefulSetReloaded(ctx context.Context, client kubernetes.Interface, namespace, name, annotationKey string, timeout time.Duration) (bool, error) { + var found bool + err := wait.PollUntilContextTimeout(ctx, DefaultInterval, timeout, true, func(ctx context.Context) (bool, error) { + ss, err := client.AppsV1().StatefulSets(namespace).Get(ctx, name, metav1.GetOptions{}) + if err != nil { + return false, nil + } + + if ss.Spec.Template.Annotations != nil { + if _, ok := ss.Spec.Template.Annotations[annotationKey]; ok { + found = true + return true, nil + } + } + + return false, nil + }) + + if err != nil && err != context.DeadlineExceeded { + return false, err + } + return found, nil +} + +// WaitForCronJobReloaded waits for a CronJob's pod template to have the reloader annotation. +func WaitForCronJobReloaded(ctx context.Context, client kubernetes.Interface, namespace, name, annotationKey string, timeout time.Duration) (bool, error) { + var found bool + err := wait.PollUntilContextTimeout(ctx, DefaultInterval, timeout, true, func(ctx context.Context) (bool, error) { + cj, err := client.BatchV1().CronJobs(namespace).Get(ctx, name, metav1.GetOptions{}) + if err != nil { + return false, nil + } + + if cj.Spec.JobTemplate.Spec.Template.Annotations != nil { + if _, ok := cj.Spec.JobTemplate.Spec.Template.Annotations[annotationKey]; ok { + found = true + return true, nil + } + } + + return false, nil + }) + + if err != nil && err != context.DeadlineExceeded { + return false, err + } + return found, nil +} + +// WaitForJobCreated waits for a Job to be created with the given label selector. +func WaitForJobCreated(ctx context.Context, client kubernetes.Interface, namespace, labelSelector string, timeout time.Duration) (bool, error) { + var found bool + err := wait.PollUntilContextTimeout(ctx, DefaultInterval, timeout, true, func(ctx context.Context) (bool, error) { + jobs, err := client.BatchV1().Jobs(namespace).List(ctx, metav1.ListOptions{ + LabelSelector: labelSelector, + }) + if err != nil { + return false, nil + } + + if len(jobs.Items) > 0 { + found = true + return true, nil + } + + return false, nil + }) + + if err != nil && err != context.DeadlineExceeded { + return false, err + } + return found, nil +} + +// WaitForCronJobTriggeredJob waits for a Job to be created by the specified CronJob. +// It checks owner references to find Jobs created by Reloader's manual trigger. +func WaitForCronJobTriggeredJob(ctx context.Context, client kubernetes.Interface, namespace, cronJobName string, timeout time.Duration) (bool, error) { + var found bool + err := wait.PollUntilContextTimeout(ctx, DefaultInterval, timeout, true, func(ctx context.Context) (bool, error) { + jobs, err := client.BatchV1().Jobs(namespace).List(ctx, metav1.ListOptions{}) + if err != nil { + return false, nil + } + + for _, job := range jobs.Items { + // Check if this job is owned by the CronJob + for _, ownerRef := range job.OwnerReferences { + if ownerRef.Kind == "CronJob" && ownerRef.Name == cronJobName { + // Check for the manual instantiate annotation (added by Reloader) + if job.Annotations != nil { + if _, ok := job.Annotations["cronjob.kubernetes.io/instantiate"]; ok { + found = true + return true, nil + } + } + } + } + } + + return false, nil + }) + + if err != nil && err != context.DeadlineExceeded { + return false, err + } + return found, nil +} + +// WaitForDeploymentEnvVar waits for a deployment's containers to have an environment variable +// with the given prefix (e.g., "STAKATER_"). +func WaitForDeploymentEnvVar(ctx context.Context, client kubernetes.Interface, namespace, name, prefix string, timeout time.Duration) (bool, error) { + var found bool + err := wait.PollUntilContextTimeout(ctx, DefaultInterval, timeout, true, func(ctx context.Context) (bool, error) { + deploy, err := client.AppsV1().Deployments(namespace).Get(ctx, name, metav1.GetOptions{}) + if err != nil { + return false, nil + } + + if hasEnvVarWithPrefix(deploy.Spec.Template.Spec.Containers, prefix) { + found = true + return true, nil + } + + return false, nil + }) + + if err != nil && err != context.DeadlineExceeded { + return false, err + } + return found, nil +} + +// WaitForDaemonSetEnvVar waits for a DaemonSet's containers to have an environment variable +// with the given prefix. +func WaitForDaemonSetEnvVar(ctx context.Context, client kubernetes.Interface, namespace, name, prefix string, timeout time.Duration) (bool, error) { + var found bool + err := wait.PollUntilContextTimeout(ctx, DefaultInterval, timeout, true, func(ctx context.Context) (bool, error) { + ds, err := client.AppsV1().DaemonSets(namespace).Get(ctx, name, metav1.GetOptions{}) + if err != nil { + return false, nil + } + + if hasEnvVarWithPrefix(ds.Spec.Template.Spec.Containers, prefix) { + found = true + return true, nil + } + + return false, nil + }) + + if err != nil && err != context.DeadlineExceeded { + return false, err + } + return found, nil +} + +// WaitForStatefulSetEnvVar waits for a StatefulSet's containers to have an environment variable +// with the given prefix. +func WaitForStatefulSetEnvVar(ctx context.Context, client kubernetes.Interface, namespace, name, prefix string, timeout time.Duration) (bool, error) { + var found bool + err := wait.PollUntilContextTimeout(ctx, DefaultInterval, timeout, true, func(ctx context.Context) (bool, error) { + ss, err := client.AppsV1().StatefulSets(namespace).Get(ctx, name, metav1.GetOptions{}) + if err != nil { + return false, nil + } + + if hasEnvVarWithPrefix(ss.Spec.Template.Spec.Containers, prefix) { + found = true + return true, nil + } + + return false, nil + }) + + if err != nil && err != context.DeadlineExceeded { + return false, err + } + return found, nil +} + +// WaitForDeploymentPaused waits for a deployment to have the paused-at annotation. +func WaitForDeploymentPaused(ctx context.Context, client kubernetes.Interface, namespace, name, pausedAtAnnotation string, timeout time.Duration) (bool, error) { + var found bool + err := wait.PollUntilContextTimeout(ctx, DefaultInterval, timeout, true, func(ctx context.Context) (bool, error) { + deploy, err := client.AppsV1().Deployments(namespace).Get(ctx, name, metav1.GetOptions{}) + if err != nil { + return false, nil + } + + // Check deployment annotations (not pod template) + if deploy.Annotations != nil { + if _, ok := deploy.Annotations[pausedAtAnnotation]; ok { + found = true + return true, nil + } + } + + return false, nil + }) + + if err != nil && err != context.DeadlineExceeded { + return false, err + } + return found, nil +} + +// WaitForDeploymentUnpaused waits for a deployment to NOT have the paused-at annotation. +func WaitForDeploymentUnpaused(ctx context.Context, client kubernetes.Interface, namespace, name, pausedAtAnnotation string, timeout time.Duration) (bool, error) { + var unpaused bool + err := wait.PollUntilContextTimeout(ctx, DefaultInterval, timeout, true, func(ctx context.Context) (bool, error) { + deploy, err := client.AppsV1().Deployments(namespace).Get(ctx, name, metav1.GetOptions{}) + if err != nil { + return false, nil + } + + // Check if paused-at annotation is gone + if deploy.Annotations == nil { + unpaused = true + return true, nil + } + if _, ok := deploy.Annotations[pausedAtAnnotation]; !ok { + unpaused = true + return true, nil + } + + return false, nil + }) + + if err != nil && err != context.DeadlineExceeded { + return false, err + } + return unpaused, nil +} + +// WaitForDaemonSetReady waits for a DaemonSet to have all pods ready. +func WaitForDaemonSetReady(ctx context.Context, client kubernetes.Interface, namespace, name string, timeout time.Duration) error { + return wait.PollUntilContextTimeout(ctx, DefaultInterval, timeout, true, func(ctx context.Context) (bool, error) { + ds, err := client.AppsV1().DaemonSets(namespace).Get(ctx, name, metav1.GetOptions{}) + if err != nil { + return false, nil + } + + if ds.Status.DesiredNumberScheduled > 0 && + ds.Status.NumberReady == ds.Status.DesiredNumberScheduled { + return true, nil + } + + return false, nil + }) +} + +// WaitForStatefulSetReady waits for a StatefulSet to have all replicas ready. +func WaitForStatefulSetReady(ctx context.Context, client kubernetes.Interface, namespace, name string, timeout time.Duration) error { + return wait.PollUntilContextTimeout(ctx, DefaultInterval, timeout, true, func(ctx context.Context) (bool, error) { + ss, err := client.AppsV1().StatefulSets(namespace).Get(ctx, name, metav1.GetOptions{}) + if err != nil { + return false, nil + } + + if ss.Status.ReadyReplicas == *ss.Spec.Replicas { + return true, nil + } + + return false, nil + }) +} + +// GetDeployment retrieves a deployment by name. +func GetDeployment(ctx context.Context, client kubernetes.Interface, namespace, name string) (*appsv1.Deployment, error) { + return client.AppsV1().Deployments(namespace).Get(ctx, name, metav1.GetOptions{}) +} + +// GetDaemonSet retrieves a DaemonSet by name. +func GetDaemonSet(ctx context.Context, client kubernetes.Interface, namespace, name string) (*appsv1.DaemonSet, error) { + return client.AppsV1().DaemonSets(namespace).Get(ctx, name, metav1.GetOptions{}) +} + +// GetStatefulSet retrieves a StatefulSet by name. +func GetStatefulSet(ctx context.Context, client kubernetes.Interface, namespace, name string) (*appsv1.StatefulSet, error) { + return client.AppsV1().StatefulSets(namespace).Get(ctx, name, metav1.GetOptions{}) +} + +// GetCronJob retrieves a CronJob by name. +func GetCronJob(ctx context.Context, client kubernetes.Interface, namespace, name string) (*batchv1.CronJob, error) { + return client.BatchV1().CronJobs(namespace).Get(ctx, name, metav1.GetOptions{}) +} + +// WaitForCronJobExists waits for a CronJob to exist in the cluster. +// This is useful for giving Reloader time to detect and index the CronJob before making changes. +func WaitForCronJobExists(ctx context.Context, client kubernetes.Interface, namespace, name string, timeout time.Duration) error { + return wait.PollUntilContextTimeout(ctx, DefaultInterval, timeout, true, func(ctx context.Context) (bool, error) { + _, err := client.BatchV1().CronJobs(namespace).Get(ctx, name, metav1.GetOptions{}) + if err != nil { + return false, nil // Keep polling + } + return true, nil + }) +} + +// GetJob retrieves a Job by name. +func GetJob(ctx context.Context, client kubernetes.Interface, namespace, name string) (*batchv1.Job, error) { + return client.BatchV1().Jobs(namespace).Get(ctx, name, metav1.GetOptions{}) +} + +// hasEnvVarWithPrefix checks if any container has an environment variable with the given prefix. +func hasEnvVarWithPrefix(containers []corev1.Container, prefix string) bool { + for _, container := range containers { + for _, env := range container.Env { + if strings.HasPrefix(env.Name, prefix) { + return true + } + } + } + return false +} + +// WaitForJobRecreated waits for a Job to be deleted and recreated with a new UID. +// Returns the new Job's UID if recreation was detected. +func WaitForJobRecreated(ctx context.Context, client kubernetes.Interface, namespace, name, originalUID string, timeout time.Duration) (string, bool, error) { + var newUID string + var recreated bool + + err := wait.PollUntilContextTimeout(ctx, DefaultInterval, timeout, true, func(ctx context.Context) (bool, error) { + job, err := client.BatchV1().Jobs(namespace).Get(ctx, name, metav1.GetOptions{}) + if err != nil { + // Job not found means it's been deleted, keep polling for recreation + return false, nil + } + + // Check if the UID has changed (indicating recreation) + if string(job.UID) != originalUID { + newUID = string(job.UID) + recreated = true + return true, nil + } + + return false, nil + }) + + if err != nil && err != context.DeadlineExceeded { + return "", false, err + } + return newUID, recreated, nil +} + +// WaitForJobNotFound waits for a Job to be deleted. +func WaitForJobNotFound(ctx context.Context, client kubernetes.Interface, namespace, name string, timeout time.Duration) (bool, error) { + var deleted bool + + err := wait.PollUntilContextTimeout(ctx, DefaultInterval, timeout, true, func(ctx context.Context) (bool, error) { + _, err := client.BatchV1().Jobs(namespace).Get(ctx, name, metav1.GetOptions{}) + if err != nil { + deleted = true + return true, nil + } + return false, nil + }) + + if err != nil && err != context.DeadlineExceeded { + return false, err + } + return deleted, nil +} + +// WaitForJobExists waits for a Job to exist in the cluster. +func WaitForJobExists(ctx context.Context, client kubernetes.Interface, namespace, name string, timeout time.Duration) error { + return wait.PollUntilContextTimeout(ctx, DefaultInterval, timeout, true, func(ctx context.Context) (bool, error) { + _, err := client.BatchV1().Jobs(namespace).Get(ctx, name, metav1.GetOptions{}) + if err != nil { + return false, nil // Keep polling + } + return true, nil + }) +} + +// GetPodLogs retrieves logs from pods matching the given label selector. +func GetPodLogs(ctx context.Context, client kubernetes.Interface, namespace, labelSelector string) (string, error) { + pods, err := client.CoreV1().Pods(namespace).List(ctx, metav1.ListOptions{ + LabelSelector: labelSelector, + }) + if err != nil { + return "", fmt.Errorf("failed to list pods: %w", err) + } + + var allLogs strings.Builder + for _, pod := range pods.Items { + for _, container := range pod.Spec.Containers { + logs, err := client.CoreV1().Pods(namespace).GetLogs(pod.Name, &corev1.PodLogOptions{ + Container: container.Name, + }).Do(ctx).Raw() + if err != nil { + allLogs.WriteString(fmt.Sprintf("Error getting logs for %s/%s: %v\n", pod.Name, container.Name, err)) + continue + } + allLogs.WriteString(fmt.Sprintf("=== %s/%s ===\n%s\n", pod.Name, container.Name, string(logs))) + } + } + + return allLogs.String(), nil +} diff --git a/test/e2e/utils/workload_adapter.go b/test/e2e/utils/workload_adapter.go new file mode 100644 index 0000000..f8374d8 --- /dev/null +++ b/test/e2e/utils/workload_adapter.go @@ -0,0 +1,160 @@ +package utils + +import ( + "context" + "time" + + "k8s.io/client-go/dynamic" + "k8s.io/client-go/kubernetes" +) + +// WorkloadType represents the type of Kubernetes workload. +type WorkloadType string + +const ( + WorkloadDeployment WorkloadType = "Deployment" + WorkloadDaemonSet WorkloadType = "DaemonSet" + WorkloadStatefulSet WorkloadType = "StatefulSet" + WorkloadCronJob WorkloadType = "CronJob" + WorkloadJob WorkloadType = "Job" + WorkloadArgoRollout WorkloadType = "ArgoRollout" + WorkloadDeploymentConfig WorkloadType = "DeploymentConfig" +) + +// ReloadStrategy represents the reload strategy used by Reloader. +type ReloadStrategy string + +const ( + StrategyAnnotations ReloadStrategy = "annotations" + StrategyEnvVars ReloadStrategy = "envvars" +) + +// WorkloadConfig holds configuration for workload creation. +type WorkloadConfig struct { + // Resource references + ConfigMapName string + SecretName string + + // Annotations to set on the workload + Annotations map[string]string + + // Reference methods (flags - multiple can be true) + UseConfigMapEnvFrom bool + UseSecretEnvFrom bool + UseConfigMapVolume bool + UseSecretVolume bool + UseProjectedVolume bool + UseConfigMapKeyRef bool + UseSecretKeyRef bool + UseInitContainer bool + UseInitContainerVolume bool + + // For valueFrom references + ConfigMapKey string + SecretKey string + EnvVarName string + + // Special options + MultipleContainers int // Number of containers (0 or 1 means single container) +} + +// WorkloadAdapter provides a unified interface for all workload types. +// This allows tests to be parameterized across different workload types. +type WorkloadAdapter interface { + // Type returns the workload type. + Type() WorkloadType + + // Create creates the workload with the given config. + Create(ctx context.Context, namespace, name string, cfg WorkloadConfig) error + + // Delete removes the workload. + Delete(ctx context.Context, namespace, name string) error + + // WaitReady waits for the workload to be ready. + WaitReady(ctx context.Context, namespace, name string, timeout time.Duration) error + + // WaitReloaded waits for the workload to have the reload annotation. + // Returns true if the annotation was found, false if timeout occurred. + WaitReloaded(ctx context.Context, namespace, name, annotationKey string, timeout time.Duration) (bool, error) + + // WaitEnvVar waits for the workload to have a STAKATER_ env var (for envvars strategy). + // Returns true if the env var was found, false if timeout occurred. + WaitEnvVar(ctx context.Context, namespace, name, prefix string, timeout time.Duration) (bool, error) + + // SupportsEnvVarStrategy returns true if the workload supports env var reload strategy. + // CronJob does not support this as it uses job creation instead. + SupportsEnvVarStrategy() bool + + // RequiresSpecialHandling returns true for workloads that need special handling. + // For example, CronJob triggers a new job instead of rolling restart. + RequiresSpecialHandling() bool +} + +// AdapterRegistry holds adapters for all workload types. +type AdapterRegistry struct { + kubeClient kubernetes.Interface + dynamicClient dynamic.Interface + adapters map[WorkloadType]WorkloadAdapter +} + +// NewAdapterRegistry creates a new adapter registry with all standard adapters. +func NewAdapterRegistry(kubeClient kubernetes.Interface, dynamicClient dynamic.Interface) *AdapterRegistry { + r := &AdapterRegistry{ + kubeClient: kubeClient, + dynamicClient: dynamicClient, + adapters: make(map[WorkloadType]WorkloadAdapter), + } + + // Register standard adapters + r.adapters[WorkloadDeployment] = NewDeploymentAdapter(kubeClient) + r.adapters[WorkloadDaemonSet] = NewDaemonSetAdapter(kubeClient) + r.adapters[WorkloadStatefulSet] = NewStatefulSetAdapter(kubeClient) + r.adapters[WorkloadCronJob] = NewCronJobAdapter(kubeClient) + r.adapters[WorkloadJob] = NewJobAdapter(kubeClient) + + // Argo and OpenShift adapters are registered separately via RegisterAdapter + // as they require specific cluster support + + return r +} + +// RegisterAdapter registers a custom adapter for a workload type. +// Use this to add Argo Rollout or DeploymentConfig adapters. +func (r *AdapterRegistry) RegisterAdapter(adapter WorkloadAdapter) { + r.adapters[adapter.Type()] = adapter +} + +// Get returns the adapter for the given workload type. +// Returns nil if the adapter is not registered. +func (r *AdapterRegistry) Get(wt WorkloadType) WorkloadAdapter { + return r.adapters[wt] +} + +// GetStandardWorkloads returns the standard workload types that are always available. +func (r *AdapterRegistry) GetStandardWorkloads() []WorkloadType { + return []WorkloadType{ + WorkloadDeployment, + WorkloadDaemonSet, + WorkloadStatefulSet, + } +} + +// GetAllWorkloads returns all registered workload types. +func (r *AdapterRegistry) GetAllWorkloads() []WorkloadType { + result := make([]WorkloadType, 0, len(r.adapters)) + for wt := range r.adapters { + result = append(result, wt) + } + return result +} + +// GetEnvVarWorkloads returns workload types that support env var reload strategy. +func (r *AdapterRegistry) GetEnvVarWorkloads() []WorkloadType { + result := make([]WorkloadType, 0) + for wt, adapter := range r.adapters { + if adapter.SupportsEnvVarStrategy() { + result = append(result, wt) + } + } + return result +} diff --git a/test/e2e/utils/workload_argo.go b/test/e2e/utils/workload_argo.go new file mode 100644 index 0000000..b2f37f7 --- /dev/null +++ b/test/e2e/utils/workload_argo.go @@ -0,0 +1,340 @@ +package utils + +import ( + "context" + "fmt" + "strings" + "time" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/dynamic" +) + +// ArgoRolloutAdapter implements WorkloadAdapter for Argo Rollouts. +type ArgoRolloutAdapter struct { + dynamicClient dynamic.Interface +} + +// NewArgoRolloutAdapter creates a new ArgoRolloutAdapter. +func NewArgoRolloutAdapter(dynamicClient dynamic.Interface) *ArgoRolloutAdapter { + return &ArgoRolloutAdapter{dynamicClient: dynamicClient} +} + +// Type returns the workload type. +func (a *ArgoRolloutAdapter) Type() WorkloadType { + return WorkloadArgoRollout +} + +// Create creates an Argo Rollout with the given config. +func (a *ArgoRolloutAdapter) Create(ctx context.Context, namespace, name string, cfg WorkloadConfig) error { + opts := buildRolloutOptions(cfg) + return CreateArgoRollout(ctx, a.dynamicClient, namespace, name, opts...) +} + +// Delete removes the Argo Rollout. +func (a *ArgoRolloutAdapter) Delete(ctx context.Context, namespace, name string) error { + return DeleteArgoRollout(ctx, a.dynamicClient, namespace, name) +} + +// WaitReady waits for the Argo Rollout to be ready. +func (a *ArgoRolloutAdapter) WaitReady(ctx context.Context, namespace, name string, timeout time.Duration) error { + return WaitForRolloutReady(ctx, a.dynamicClient, namespace, name, timeout) +} + +// WaitReloaded waits for the Argo Rollout to have the reload annotation. +func (a *ArgoRolloutAdapter) WaitReloaded(ctx context.Context, namespace, name, annotationKey string, timeout time.Duration) (bool, error) { + return WaitForRolloutReloaded(ctx, a.dynamicClient, namespace, name, annotationKey, timeout) +} + +// WaitEnvVar waits for the Argo Rollout to have a STAKATER_ env var. +func (a *ArgoRolloutAdapter) WaitEnvVar(ctx context.Context, namespace, name, prefix string, timeout time.Duration) (bool, error) { + return WaitForRolloutEnvVar(ctx, a.dynamicClient, namespace, name, prefix, timeout) +} + +// SupportsEnvVarStrategy returns true as Argo Rollouts support env var reload strategy. +func (a *ArgoRolloutAdapter) SupportsEnvVarStrategy() bool { + return true +} + +// RequiresSpecialHandling returns false as Argo Rollouts use standard rolling restart. +func (a *ArgoRolloutAdapter) RequiresSpecialHandling() bool { + return false +} + +// buildRolloutOptions converts WorkloadConfig to RolloutOption slice. +func buildRolloutOptions(cfg WorkloadConfig) []RolloutOption { + var opts []RolloutOption + + // Add annotations (to pod template) + if len(cfg.Annotations) > 0 { + opts = append(opts, WithRolloutAnnotations(cfg.Annotations)) + } + + // Add envFrom references + if cfg.UseConfigMapEnvFrom && cfg.ConfigMapName != "" { + opts = append(opts, WithRolloutConfigMapEnvFrom(cfg.ConfigMapName)) + } + if cfg.UseSecretEnvFrom && cfg.SecretName != "" { + opts = append(opts, WithRolloutSecretEnvFrom(cfg.SecretName)) + } + + // Add volume mounts + if cfg.UseConfigMapVolume && cfg.ConfigMapName != "" { + opts = append(opts, WithRolloutConfigMapVolume(cfg.ConfigMapName)) + } + if cfg.UseSecretVolume && cfg.SecretName != "" { + opts = append(opts, WithRolloutSecretVolume(cfg.SecretName)) + } + + // Add projected volume + if cfg.UseProjectedVolume { + opts = append(opts, WithRolloutProjectedVolume(cfg.ConfigMapName, cfg.SecretName)) + } + + // Add valueFrom references + if cfg.UseConfigMapKeyRef && cfg.ConfigMapName != "" { + key := cfg.ConfigMapKey + if key == "" { + key = "key" + } + envVar := cfg.EnvVarName + if envVar == "" { + envVar = "CONFIG_VAR" + } + opts = append(opts, WithRolloutConfigMapKeyRef(cfg.ConfigMapName, key, envVar)) + } + if cfg.UseSecretKeyRef && cfg.SecretName != "" { + key := cfg.SecretKey + if key == "" { + key = "key" + } + envVar := cfg.EnvVarName + if envVar == "" { + envVar = "SECRET_VAR" + } + opts = append(opts, WithRolloutSecretKeyRef(cfg.SecretName, key, envVar)) + } + + // Add init container with envFrom + if cfg.UseInitContainer { + opts = append(opts, WithRolloutInitContainer(cfg.ConfigMapName, cfg.SecretName)) + } + + // Add init container with volume mount + if cfg.UseInitContainerVolume { + opts = append(opts, WithRolloutInitContainerVolume(cfg.ConfigMapName, cfg.SecretName)) + } + + return opts +} + +// WithRolloutProjectedVolume adds a projected volume with ConfigMap and/or Secret sources to a Rollout. +func WithRolloutProjectedVolume(cmName, secretName string) RolloutOption { + return func(rollout *unstructured.Unstructured) { + volumeName := "projected-config" + sources := []interface{}{} + + if cmName != "" { + sources = append(sources, map[string]interface{}{ + "configMap": map[string]interface{}{ + "name": cmName, + }, + }) + } + if secretName != "" { + sources = append(sources, map[string]interface{}{ + "secret": map[string]interface{}{ + "name": secretName, + }, + }) + } + + // Add volume + volumes, _, _ := unstructured.NestedSlice(rollout.Object, "spec", "template", "spec", "volumes") + volumes = append(volumes, map[string]interface{}{ + "name": volumeName, + "projected": map[string]interface{}{ + "sources": sources, + }, + }) + _ = unstructured.SetNestedSlice(rollout.Object, volumes, "spec", "template", "spec", "volumes") + + // Add volumeMount + containers, _, _ := unstructured.NestedSlice(rollout.Object, "spec", "template", "spec", "containers") + if len(containers) > 0 { + container := containers[0].(map[string]interface{}) + volumeMounts, _, _ := unstructured.NestedSlice(container, "volumeMounts") + volumeMounts = append(volumeMounts, map[string]interface{}{ + "name": volumeName, + "mountPath": "/etc/projected", + }) + container["volumeMounts"] = volumeMounts + containers[0] = container + _ = unstructured.SetNestedSlice(rollout.Object, containers, "spec", "template", "spec", "containers") + } + } +} + +// WithRolloutConfigMapKeyRef adds an env var with valueFrom.configMapKeyRef to a Rollout. +func WithRolloutConfigMapKeyRef(cmName, key, envVarName string) RolloutOption { + return func(rollout *unstructured.Unstructured) { + containers, _, _ := unstructured.NestedSlice(rollout.Object, "spec", "template", "spec", "containers") + if len(containers) > 0 { + container := containers[0].(map[string]interface{}) + env, _, _ := unstructured.NestedSlice(container, "env") + env = append(env, map[string]interface{}{ + "name": envVarName, + "valueFrom": map[string]interface{}{ + "configMapKeyRef": map[string]interface{}{ + "name": cmName, + "key": key, + }, + }, + }) + container["env"] = env + containers[0] = container + _ = unstructured.SetNestedSlice(rollout.Object, containers, "spec", "template", "spec", "containers") + } + } +} + +// WithRolloutSecretKeyRef adds an env var with valueFrom.secretKeyRef to a Rollout. +func WithRolloutSecretKeyRef(secretName, key, envVarName string) RolloutOption { + return func(rollout *unstructured.Unstructured) { + containers, _, _ := unstructured.NestedSlice(rollout.Object, "spec", "template", "spec", "containers") + if len(containers) > 0 { + container := containers[0].(map[string]interface{}) + env, _, _ := unstructured.NestedSlice(container, "env") + env = append(env, map[string]interface{}{ + "name": envVarName, + "valueFrom": map[string]interface{}{ + "secretKeyRef": map[string]interface{}{ + "name": secretName, + "key": key, + }, + }, + }) + container["env"] = env + containers[0] = container + _ = unstructured.SetNestedSlice(rollout.Object, containers, "spec", "template", "spec", "containers") + } + } +} + +// WithRolloutInitContainer adds an init container that references ConfigMap and/or Secret. +func WithRolloutInitContainer(cmName, secretName string) RolloutOption { + return func(rollout *unstructured.Unstructured) { + initContainer := map[string]interface{}{ + "name": "init", + "image": DefaultImage, + "command": []interface{}{"sh", "-c", "echo init done"}, + } + + envFrom := []interface{}{} + if cmName != "" { + envFrom = append(envFrom, map[string]interface{}{ + "configMapRef": map[string]interface{}{ + "name": cmName, + }, + }) + } + if secretName != "" { + envFrom = append(envFrom, map[string]interface{}{ + "secretRef": map[string]interface{}{ + "name": secretName, + }, + }) + } + if len(envFrom) > 0 { + initContainer["envFrom"] = envFrom + } + + initContainers, _, _ := unstructured.NestedSlice(rollout.Object, "spec", "template", "spec", "initContainers") + initContainers = append(initContainers, initContainer) + _ = unstructured.SetNestedSlice(rollout.Object, initContainers, "spec", "template", "spec", "initContainers") + } +} + +// WithRolloutInitContainerVolume adds an init container with ConfigMap/Secret volume mounts. +func WithRolloutInitContainerVolume(cmName, secretName string) RolloutOption { + return func(rollout *unstructured.Unstructured) { + initContainer := map[string]interface{}{ + "name": "init", + "image": DefaultImage, + "command": []interface{}{"sh", "-c", "echo init done"}, + } + + volumeMounts := []interface{}{} + volumes, _, _ := unstructured.NestedSlice(rollout.Object, "spec", "template", "spec", "volumes") + + if cmName != "" { + volumeName := fmt.Sprintf("init-cm-%s", cmName) + volumes = append(volumes, map[string]interface{}{ + "name": volumeName, + "configMap": map[string]interface{}{ + "name": cmName, + }, + }) + volumeMounts = append(volumeMounts, map[string]interface{}{ + "name": volumeName, + "mountPath": fmt.Sprintf("/etc/init-config/%s", cmName), + }) + } + if secretName != "" { + volumeName := fmt.Sprintf("init-secret-%s", secretName) + volumes = append(volumes, map[string]interface{}{ + "name": volumeName, + "secret": map[string]interface{}{ + "secretName": secretName, + }, + }) + volumeMounts = append(volumeMounts, map[string]interface{}{ + "name": volumeName, + "mountPath": fmt.Sprintf("/etc/init-secrets/%s", secretName), + }) + } + + if len(volumeMounts) > 0 { + initContainer["volumeMounts"] = volumeMounts + } + + _ = unstructured.SetNestedSlice(rollout.Object, volumes, "spec", "template", "spec", "volumes") + + initContainers, _, _ := unstructured.NestedSlice(rollout.Object, "spec", "template", "spec", "initContainers") + initContainers = append(initContainers, initContainer) + _ = unstructured.SetNestedSlice(rollout.Object, initContainers, "spec", "template", "spec", "initContainers") + } +} + +// WaitForRolloutEnvVar waits for an Argo Rollout's container to have an env var with the given prefix. +func WaitForRolloutEnvVar(ctx context.Context, dynamicClient dynamic.Interface, namespace, name, prefix string, timeout time.Duration) (bool, error) { + var found bool + err := wait.PollUntilContextTimeout(ctx, DefaultInterval, timeout, true, func(ctx context.Context) (bool, error) { + rollout, err := dynamicClient.Resource(ArgoRolloutGVR).Namespace(namespace).Get(ctx, name, metav1.GetOptions{}) + if err != nil { + return false, nil + } + + containers, _, _ := unstructured.NestedSlice(rollout.Object, "spec", "template", "spec", "containers") + for _, c := range containers { + container := c.(map[string]interface{}) + env, _, _ := unstructured.NestedSlice(container, "env") + for _, e := range env { + envVar := e.(map[string]interface{}) + if name, ok := envVar["name"].(string); ok && strings.HasPrefix(name, prefix) { + found = true + return true, nil + } + } + } + + return false, nil + }) + + if err != nil && err != context.DeadlineExceeded { + return false, err + } + return found, nil +} diff --git a/test/e2e/utils/workload_cronjob.go b/test/e2e/utils/workload_cronjob.go new file mode 100644 index 0000000..00d85e5 --- /dev/null +++ b/test/e2e/utils/workload_cronjob.go @@ -0,0 +1,223 @@ +package utils + +import ( + "context" + "time" + + batchv1 "k8s.io/api/batch/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/kubernetes" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// CronJobAdapter implements WorkloadAdapter for Kubernetes CronJobs. +type CronJobAdapter struct { + client kubernetes.Interface +} + +// NewCronJobAdapter creates a new CronJobAdapter. +func NewCronJobAdapter(client kubernetes.Interface) *CronJobAdapter { + return &CronJobAdapter{client: client} +} + +// Type returns the workload type. +func (a *CronJobAdapter) Type() WorkloadType { + return WorkloadCronJob +} + +// Create creates a CronJob with the given config. +func (a *CronJobAdapter) Create(ctx context.Context, namespace, name string, cfg WorkloadConfig) error { + opts := buildCronJobOptions(cfg) + _, err := CreateCronJob(ctx, a.client, namespace, name, opts...) + return err +} + +// Delete removes the CronJob. +func (a *CronJobAdapter) Delete(ctx context.Context, namespace, name string) error { + return DeleteCronJob(ctx, a.client, namespace, name) +} + +// WaitReady waits for the CronJob to exist (CronJobs are "ready" immediately after creation). +func (a *CronJobAdapter) WaitReady(ctx context.Context, namespace, name string, timeout time.Duration) error { + return WaitForCronJobExists(ctx, a.client, namespace, name, timeout) +} + +// WaitReloaded waits for the CronJob to have the reload annotation OR for a triggered Job. +// For CronJobs, Reloader can either: +// 1. Add an annotation to the pod template +// 2. Trigger a new Job (which is the special handling case) +func (a *CronJobAdapter) WaitReloaded(ctx context.Context, namespace, name, annotationKey string, timeout time.Duration) (bool, error) { + return WaitForCronJobReloaded(ctx, a.client, namespace, name, annotationKey, timeout) +} + +// WaitEnvVar is not supported for CronJobs as they don't use env var reload strategy. +func (a *CronJobAdapter) WaitEnvVar(ctx context.Context, namespace, name, prefix string, timeout time.Duration) (bool, error) { + // CronJobs don't support env var strategy + return false, nil +} + +// SupportsEnvVarStrategy returns false as CronJobs don't support env var reload strategy. +func (a *CronJobAdapter) SupportsEnvVarStrategy() bool { + return false +} + +// RequiresSpecialHandling returns true as CronJobs use job triggering instead of rolling restart. +func (a *CronJobAdapter) RequiresSpecialHandling() bool { + return true +} + +// WaitForTriggeredJob waits for Reloader to trigger a new Job from this CronJob. +func (a *CronJobAdapter) WaitForTriggeredJob(ctx context.Context, namespace, cronJobName string, timeout time.Duration) (bool, error) { + return WaitForCronJobTriggeredJob(ctx, a.client, namespace, cronJobName, timeout) +} + +// buildCronJobOptions converts WorkloadConfig to CronJobOption slice. +func buildCronJobOptions(cfg WorkloadConfig) []CronJobOption { + var opts []CronJobOption + + // Add annotations + if len(cfg.Annotations) > 0 { + opts = append(opts, WithCronJobAnnotations(cfg.Annotations)) + } + + // Add envFrom references + if cfg.UseConfigMapEnvFrom && cfg.ConfigMapName != "" { + opts = append(opts, WithCronJobConfigMapEnvFrom(cfg.ConfigMapName)) + } + if cfg.UseSecretEnvFrom && cfg.SecretName != "" { + opts = append(opts, WithCronJobSecretEnvFrom(cfg.SecretName)) + } + + // Add volume mounts + if cfg.UseConfigMapVolume && cfg.ConfigMapName != "" { + opts = append(opts, WithCronJobConfigMapVolume(cfg.ConfigMapName)) + } + if cfg.UseSecretVolume && cfg.SecretName != "" { + opts = append(opts, WithCronJobSecretVolume(cfg.SecretName)) + } + + // Add projected volume + if cfg.UseProjectedVolume { + opts = append(opts, WithCronJobProjectedVolume(cfg.ConfigMapName, cfg.SecretName)) + } + + return opts +} + +// WithCronJobConfigMapVolume adds a volume mount for a ConfigMap to a CronJob. +func WithCronJobConfigMapVolume(name string) CronJobOption { + return func(cj *batchv1.CronJob) { + volumeName := "cm-" + name + cj.Spec.JobTemplate.Spec.Template.Spec.Volumes = append( + cj.Spec.JobTemplate.Spec.Template.Spec.Volumes, + corev1.Volume{ + Name: volumeName, + VolumeSource: corev1.VolumeSource{ + ConfigMap: &corev1.ConfigMapVolumeSource{ + LocalObjectReference: corev1.LocalObjectReference{Name: name}, + }, + }, + }, + ) + cj.Spec.JobTemplate.Spec.Template.Spec.Containers[0].VolumeMounts = append( + cj.Spec.JobTemplate.Spec.Template.Spec.Containers[0].VolumeMounts, + corev1.VolumeMount{ + Name: volumeName, + MountPath: "/etc/config/" + name, + }, + ) + } +} + +// WithCronJobSecretVolume adds a volume mount for a Secret to a CronJob. +func WithCronJobSecretVolume(name string) CronJobOption { + return func(cj *batchv1.CronJob) { + volumeName := "secret-" + name + cj.Spec.JobTemplate.Spec.Template.Spec.Volumes = append( + cj.Spec.JobTemplate.Spec.Template.Spec.Volumes, + corev1.Volume{ + Name: volumeName, + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: name, + }, + }, + }, + ) + cj.Spec.JobTemplate.Spec.Template.Spec.Containers[0].VolumeMounts = append( + cj.Spec.JobTemplate.Spec.Template.Spec.Containers[0].VolumeMounts, + corev1.VolumeMount{ + Name: volumeName, + MountPath: "/etc/secrets/" + name, + }, + ) + } +} + +// WithCronJobProjectedVolume adds a projected volume with ConfigMap and/or Secret sources to a CronJob. +func WithCronJobProjectedVolume(cmName, secretName string) CronJobOption { + return func(cj *batchv1.CronJob) { + volumeName := "projected-config" + sources := []corev1.VolumeProjection{} + + if cmName != "" { + sources = append(sources, corev1.VolumeProjection{ + ConfigMap: &corev1.ConfigMapProjection{ + LocalObjectReference: corev1.LocalObjectReference{Name: cmName}, + }, + }) + } + if secretName != "" { + sources = append(sources, corev1.VolumeProjection{ + Secret: &corev1.SecretProjection{ + LocalObjectReference: corev1.LocalObjectReference{Name: secretName}, + }, + }) + } + + cj.Spec.JobTemplate.Spec.Template.Spec.Volumes = append( + cj.Spec.JobTemplate.Spec.Template.Spec.Volumes, + corev1.Volume{ + Name: volumeName, + VolumeSource: corev1.VolumeSource{ + Projected: &corev1.ProjectedVolumeSource{ + Sources: sources, + }, + }, + }, + ) + cj.Spec.JobTemplate.Spec.Template.Spec.Containers[0].VolumeMounts = append( + cj.Spec.JobTemplate.Spec.Template.Spec.Containers[0].VolumeMounts, + corev1.VolumeMount{ + Name: volumeName, + MountPath: "/etc/projected", + }, + ) + } +} + +// WaitForCronJobEnvVar waits for a CronJob's containers to have an environment variable +// with the given prefix. Note: CronJobs don't typically use this strategy. +func WaitForCronJobEnvVar(ctx context.Context, client kubernetes.Interface, namespace, name, prefix string, timeout time.Duration) (bool, error) { + var found bool + err := wait.PollUntilContextTimeout(ctx, DefaultInterval, timeout, true, func(ctx context.Context) (bool, error) { + cj, err := client.BatchV1().CronJobs(namespace).Get(ctx, name, metav1.GetOptions{}) + if err != nil { + return false, nil + } + + if hasEnvVarWithPrefix(cj.Spec.JobTemplate.Spec.Template.Spec.Containers, prefix) { + found = true + return true, nil + } + + return false, nil + }) + + if err != nil && err != context.DeadlineExceeded { + return false, err + } + return found, nil +} diff --git a/test/e2e/utils/workload_daemonset.go b/test/e2e/utils/workload_daemonset.go new file mode 100644 index 0000000..8d4d55b --- /dev/null +++ b/test/e2e/utils/workload_daemonset.go @@ -0,0 +1,246 @@ +package utils + +import ( + "context" + "fmt" + "time" + + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/client-go/kubernetes" +) + +// DaemonSetAdapter implements WorkloadAdapter for Kubernetes DaemonSets. +type DaemonSetAdapter struct { + client kubernetes.Interface +} + +// NewDaemonSetAdapter creates a new DaemonSetAdapter. +func NewDaemonSetAdapter(client kubernetes.Interface) *DaemonSetAdapter { + return &DaemonSetAdapter{client: client} +} + +// Type returns the workload type. +func (a *DaemonSetAdapter) Type() WorkloadType { + return WorkloadDaemonSet +} + +// Create creates a DaemonSet with the given config. +func (a *DaemonSetAdapter) Create(ctx context.Context, namespace, name string, cfg WorkloadConfig) error { + opts := buildDaemonSetOptions(cfg) + _, err := CreateDaemonSet(ctx, a.client, namespace, name, opts...) + return err +} + +// Delete removes the DaemonSet. +func (a *DaemonSetAdapter) Delete(ctx context.Context, namespace, name string) error { + return DeleteDaemonSet(ctx, a.client, namespace, name) +} + +// WaitReady waits for the DaemonSet to be ready. +func (a *DaemonSetAdapter) WaitReady(ctx context.Context, namespace, name string, timeout time.Duration) error { + return WaitForDaemonSetReady(ctx, a.client, namespace, name, timeout) +} + +// WaitReloaded waits for the DaemonSet to have the reload annotation. +func (a *DaemonSetAdapter) WaitReloaded(ctx context.Context, namespace, name, annotationKey string, timeout time.Duration) (bool, error) { + return WaitForDaemonSetReloaded(ctx, a.client, namespace, name, annotationKey, timeout) +} + +// WaitEnvVar waits for the DaemonSet to have a STAKATER_ env var. +func (a *DaemonSetAdapter) WaitEnvVar(ctx context.Context, namespace, name, prefix string, timeout time.Duration) (bool, error) { + return WaitForDaemonSetEnvVar(ctx, a.client, namespace, name, prefix, timeout) +} + +// SupportsEnvVarStrategy returns true as DaemonSets support env var reload strategy. +func (a *DaemonSetAdapter) SupportsEnvVarStrategy() bool { + return true +} + +// RequiresSpecialHandling returns false as DaemonSets use standard rolling restart. +func (a *DaemonSetAdapter) RequiresSpecialHandling() bool { + return false +} + +// buildDaemonSetOptions converts WorkloadConfig to DaemonSetOption slice. +func buildDaemonSetOptions(cfg WorkloadConfig) []DaemonSetOption { + var opts []DaemonSetOption + + // Add annotations + if len(cfg.Annotations) > 0 { + opts = append(opts, WithDaemonSetAnnotations(cfg.Annotations)) + } + + // Add envFrom references + if cfg.UseConfigMapEnvFrom && cfg.ConfigMapName != "" { + opts = append(opts, WithDaemonSetConfigMapEnvFrom(cfg.ConfigMapName)) + } + if cfg.UseSecretEnvFrom && cfg.SecretName != "" { + opts = append(opts, WithDaemonSetSecretEnvFrom(cfg.SecretName)) + } + + // Add volume mounts + if cfg.UseConfigMapVolume && cfg.ConfigMapName != "" { + opts = append(opts, WithDaemonSetConfigMapVolume(cfg.ConfigMapName)) + } + if cfg.UseSecretVolume && cfg.SecretName != "" { + opts = append(opts, WithDaemonSetSecretVolume(cfg.SecretName)) + } + + // Add projected volume + if cfg.UseProjectedVolume { + opts = append(opts, WithDaemonSetProjectedVolume(cfg.ConfigMapName, cfg.SecretName)) + } + + // Add valueFrom references + if cfg.UseConfigMapKeyRef && cfg.ConfigMapName != "" { + key := cfg.ConfigMapKey + if key == "" { + key = "key" + } + envVar := cfg.EnvVarName + if envVar == "" { + envVar = "CONFIG_VAR" + } + opts = append(opts, WithDaemonSetConfigMapKeyRef(cfg.ConfigMapName, key, envVar)) + } + if cfg.UseSecretKeyRef && cfg.SecretName != "" { + key := cfg.SecretKey + if key == "" { + key = "key" + } + envVar := cfg.EnvVarName + if envVar == "" { + envVar = "SECRET_VAR" + } + opts = append(opts, WithDaemonSetSecretKeyRef(cfg.SecretName, key, envVar)) + } + + // Add init container with envFrom + if cfg.UseInitContainer { + opts = append(opts, WithDaemonSetInitContainer(cfg.ConfigMapName, cfg.SecretName)) + } + + // Add init container with volume mount + if cfg.UseInitContainerVolume { + opts = append(opts, WithDaemonSetInitContainerVolume(cfg.ConfigMapName, cfg.SecretName)) + } + + return opts +} + +// WithDaemonSetConfigMapVolume adds a volume mount for a ConfigMap to a DaemonSet. +func WithDaemonSetConfigMapVolume(name string) DaemonSetOption { + return func(ds *appsv1.DaemonSet) { + volumeName := fmt.Sprintf("cm-%s", name) + ds.Spec.Template.Spec.Volumes = append(ds.Spec.Template.Spec.Volumes, corev1.Volume{ + Name: volumeName, + VolumeSource: corev1.VolumeSource{ + ConfigMap: &corev1.ConfigMapVolumeSource{ + LocalObjectReference: corev1.LocalObjectReference{Name: name}, + }, + }, + }) + ds.Spec.Template.Spec.Containers[0].VolumeMounts = append( + ds.Spec.Template.Spec.Containers[0].VolumeMounts, + corev1.VolumeMount{ + Name: volumeName, + MountPath: fmt.Sprintf("/etc/config/%s", name), + }, + ) + } +} + +// WithDaemonSetSecretVolume adds a volume mount for a Secret to a DaemonSet. +func WithDaemonSetSecretVolume(name string) DaemonSetOption { + return func(ds *appsv1.DaemonSet) { + volumeName := fmt.Sprintf("secret-%s", name) + ds.Spec.Template.Spec.Volumes = append(ds.Spec.Template.Spec.Volumes, corev1.Volume{ + Name: volumeName, + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: name, + }, + }, + }) + ds.Spec.Template.Spec.Containers[0].VolumeMounts = append( + ds.Spec.Template.Spec.Containers[0].VolumeMounts, + corev1.VolumeMount{ + Name: volumeName, + MountPath: fmt.Sprintf("/etc/secrets/%s", name), + }, + ) + } +} + +// WithDaemonSetInitContainer adds an init container that references ConfigMap and/or Secret. +func WithDaemonSetInitContainer(cmName, secretName string) DaemonSetOption { + return func(ds *appsv1.DaemonSet) { + initContainer := corev1.Container{ + Name: "init", + Image: DefaultImage, + Command: []string{"sh", "-c", "echo init done"}, + } + + if cmName != "" { + initContainer.EnvFrom = append(initContainer.EnvFrom, corev1.EnvFromSource{ + ConfigMapRef: &corev1.ConfigMapEnvSource{ + LocalObjectReference: corev1.LocalObjectReference{Name: cmName}, + }, + }) + } + if secretName != "" { + initContainer.EnvFrom = append(initContainer.EnvFrom, corev1.EnvFromSource{ + SecretRef: &corev1.SecretEnvSource{ + LocalObjectReference: corev1.LocalObjectReference{Name: secretName}, + }, + }) + } + + ds.Spec.Template.Spec.InitContainers = append(ds.Spec.Template.Spec.InitContainers, initContainer) + } +} + +// WithDaemonSetInitContainerVolume adds an init container with ConfigMap/Secret volume mounts. +func WithDaemonSetInitContainerVolume(cmName, secretName string) DaemonSetOption { + return func(ds *appsv1.DaemonSet) { + initContainer := corev1.Container{ + Name: "init", + Image: DefaultImage, + Command: []string{"sh", "-c", "echo init done"}, + } + + if cmName != "" { + volumeName := fmt.Sprintf("init-cm-%s", cmName) + ds.Spec.Template.Spec.Volumes = append(ds.Spec.Template.Spec.Volumes, corev1.Volume{ + Name: volumeName, + VolumeSource: corev1.VolumeSource{ + ConfigMap: &corev1.ConfigMapVolumeSource{ + LocalObjectReference: corev1.LocalObjectReference{Name: cmName}, + }, + }, + }) + initContainer.VolumeMounts = append(initContainer.VolumeMounts, corev1.VolumeMount{ + Name: volumeName, + MountPath: fmt.Sprintf("/etc/init-config/%s", cmName), + }) + } + if secretName != "" { + volumeName := fmt.Sprintf("init-secret-%s", secretName) + ds.Spec.Template.Spec.Volumes = append(ds.Spec.Template.Spec.Volumes, corev1.Volume{ + Name: volumeName, + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: secretName, + }, + }, + }) + initContainer.VolumeMounts = append(initContainer.VolumeMounts, corev1.VolumeMount{ + Name: volumeName, + MountPath: fmt.Sprintf("/etc/init-secrets/%s", secretName), + }) + } + + ds.Spec.Template.Spec.InitContainers = append(ds.Spec.Template.Spec.InitContainers, initContainer) + } +} diff --git a/test/e2e/utils/workload_deployment.go b/test/e2e/utils/workload_deployment.go new file mode 100644 index 0000000..951ba79 --- /dev/null +++ b/test/e2e/utils/workload_deployment.go @@ -0,0 +1,132 @@ +package utils + +import ( + "context" + "time" + + "k8s.io/client-go/kubernetes" +) + +// DeploymentAdapter implements WorkloadAdapter for Kubernetes Deployments. +type DeploymentAdapter struct { + client kubernetes.Interface +} + +// NewDeploymentAdapter creates a new DeploymentAdapter. +func NewDeploymentAdapter(client kubernetes.Interface) *DeploymentAdapter { + return &DeploymentAdapter{client: client} +} + +// Type returns the workload type. +func (a *DeploymentAdapter) Type() WorkloadType { + return WorkloadDeployment +} + +// Create creates a Deployment with the given config. +func (a *DeploymentAdapter) Create(ctx context.Context, namespace, name string, cfg WorkloadConfig) error { + opts := buildDeploymentOptions(cfg) + _, err := CreateDeployment(ctx, a.client, namespace, name, opts...) + return err +} + +// Delete removes the Deployment. +func (a *DeploymentAdapter) Delete(ctx context.Context, namespace, name string) error { + return DeleteDeployment(ctx, a.client, namespace, name) +} + +// WaitReady waits for the Deployment to be ready. +func (a *DeploymentAdapter) WaitReady(ctx context.Context, namespace, name string, timeout time.Duration) error { + return WaitForDeploymentReady(ctx, a.client, namespace, name, timeout) +} + +// WaitReloaded waits for the Deployment to have the reload annotation. +func (a *DeploymentAdapter) WaitReloaded(ctx context.Context, namespace, name, annotationKey string, timeout time.Duration) (bool, error) { + return WaitForDeploymentReloaded(ctx, a.client, namespace, name, annotationKey, timeout) +} + +// WaitEnvVar waits for the Deployment to have a STAKATER_ env var. +func (a *DeploymentAdapter) WaitEnvVar(ctx context.Context, namespace, name, prefix string, timeout time.Duration) (bool, error) { + return WaitForDeploymentEnvVar(ctx, a.client, namespace, name, prefix, timeout) +} + +// SupportsEnvVarStrategy returns true as Deployments support env var reload strategy. +func (a *DeploymentAdapter) SupportsEnvVarStrategy() bool { + return true +} + +// RequiresSpecialHandling returns false as Deployments use standard rolling restart. +func (a *DeploymentAdapter) RequiresSpecialHandling() bool { + return false +} + +// buildDeploymentOptions converts WorkloadConfig to DeploymentOption slice. +func buildDeploymentOptions(cfg WorkloadConfig) []DeploymentOption { + var opts []DeploymentOption + + // Add annotations + if len(cfg.Annotations) > 0 { + opts = append(opts, WithAnnotations(cfg.Annotations)) + } + + // Add envFrom references + if cfg.UseConfigMapEnvFrom && cfg.ConfigMapName != "" { + opts = append(opts, WithConfigMapEnvFrom(cfg.ConfigMapName)) + } + if cfg.UseSecretEnvFrom && cfg.SecretName != "" { + opts = append(opts, WithSecretEnvFrom(cfg.SecretName)) + } + + // Add volume mounts + if cfg.UseConfigMapVolume && cfg.ConfigMapName != "" { + opts = append(opts, WithConfigMapVolume(cfg.ConfigMapName)) + } + if cfg.UseSecretVolume && cfg.SecretName != "" { + opts = append(opts, WithSecretVolume(cfg.SecretName)) + } + + // Add projected volume + if cfg.UseProjectedVolume { + opts = append(opts, WithProjectedVolume(cfg.ConfigMapName, cfg.SecretName)) + } + + // Add valueFrom references + if cfg.UseConfigMapKeyRef && cfg.ConfigMapName != "" { + key := cfg.ConfigMapKey + if key == "" { + key = "key" + } + envVar := cfg.EnvVarName + if envVar == "" { + envVar = "CONFIG_VAR" + } + opts = append(opts, WithConfigMapKeyRef(cfg.ConfigMapName, key, envVar)) + } + if cfg.UseSecretKeyRef && cfg.SecretName != "" { + key := cfg.SecretKey + if key == "" { + key = "key" + } + envVar := cfg.EnvVarName + if envVar == "" { + envVar = "SECRET_VAR" + } + opts = append(opts, WithSecretKeyRef(cfg.SecretName, key, envVar)) + } + + // Add init container with envFrom + if cfg.UseInitContainer { + opts = append(opts, WithInitContainer(cfg.ConfigMapName, cfg.SecretName)) + } + + // Add init container with volume mount + if cfg.UseInitContainerVolume { + opts = append(opts, WithInitContainerVolume(cfg.ConfigMapName, cfg.SecretName)) + } + + // Add multiple containers + if cfg.MultipleContainers > 1 { + opts = append(opts, WithMultipleContainers(cfg.MultipleContainers)) + } + + return opts +} diff --git a/test/e2e/utils/workload_job.go b/test/e2e/utils/workload_job.go new file mode 100644 index 0000000..d2a405e --- /dev/null +++ b/test/e2e/utils/workload_job.go @@ -0,0 +1,207 @@ +package utils + +import ( + "context" + "time" + + batchv1 "k8s.io/api/batch/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/client-go/kubernetes" +) + +// JobAdapter implements WorkloadAdapter for Kubernetes Jobs. +// Note: Jobs are handled specially by Reloader - they are recreated rather than updated. +type JobAdapter struct { + client kubernetes.Interface +} + +// NewJobAdapter creates a new JobAdapter. +func NewJobAdapter(client kubernetes.Interface) *JobAdapter { + return &JobAdapter{client: client} +} + +// Type returns the workload type. +func (a *JobAdapter) Type() WorkloadType { + return WorkloadJob +} + +// Create creates a Job with the given config. +func (a *JobAdapter) Create(ctx context.Context, namespace, name string, cfg WorkloadConfig) error { + opts := buildJobOptions(cfg) + _, err := CreateJob(ctx, a.client, namespace, name, opts...) + return err +} + +// Delete removes the Job. +func (a *JobAdapter) Delete(ctx context.Context, namespace, name string) error { + return DeleteJob(ctx, a.client, namespace, name) +} + +// WaitReady waits for the Job to exist. +func (a *JobAdapter) WaitReady(ctx context.Context, namespace, name string, timeout time.Duration) error { + return WaitForJobExists(ctx, a.client, namespace, name, timeout) +} + +// WaitReloaded waits for the Job to be recreated (new UID). +// For Jobs, Reloader recreates the Job rather than updating annotations. +func (a *JobAdapter) WaitReloaded(ctx context.Context, namespace, name, annotationKey string, timeout time.Duration) (bool, error) { + // For Jobs, we check if it was recreated by looking for a new UID + // This requires storing the original UID before the test + // For simplicity, we use the same pattern as other workloads + // The test should verify recreation using WaitForJobRecreated instead + return false, nil +} + +// WaitEnvVar is not supported for Jobs as they don't use env var reload strategy. +func (a *JobAdapter) WaitEnvVar(ctx context.Context, namespace, name, prefix string, timeout time.Duration) (bool, error) { + return false, nil +} + +// SupportsEnvVarStrategy returns false as Jobs don't support env var reload strategy. +func (a *JobAdapter) SupportsEnvVarStrategy() bool { + return false +} + +// RequiresSpecialHandling returns true as Jobs are recreated by Reloader. +func (a *JobAdapter) RequiresSpecialHandling() bool { + return true +} + +// GetOriginalUID retrieves the current UID of the Job for recreation verification. +func (a *JobAdapter) GetOriginalUID(ctx context.Context, namespace, name string) (string, error) { + job, err := GetJob(ctx, a.client, namespace, name) + if err != nil { + return "", err + } + return string(job.UID), nil +} + +// WaitForRecreation waits for the Job to be recreated with a new UID. +func (a *JobAdapter) WaitForRecreation(ctx context.Context, namespace, name, originalUID string, timeout time.Duration) (string, bool, error) { + return WaitForJobRecreated(ctx, a.client, namespace, name, originalUID, timeout) +} + +// buildJobOptions converts WorkloadConfig to JobOption slice. +func buildJobOptions(cfg WorkloadConfig) []JobOption { + var opts []JobOption + + // Add annotations + if len(cfg.Annotations) > 0 { + opts = append(opts, WithJobAnnotations(cfg.Annotations)) + } + + // Add envFrom references + if cfg.UseConfigMapEnvFrom && cfg.ConfigMapName != "" { + opts = append(opts, WithJobConfigMapEnvFrom(cfg.ConfigMapName)) + } + if cfg.UseSecretEnvFrom && cfg.SecretName != "" { + opts = append(opts, WithJobSecretEnvFrom(cfg.SecretName)) + } + + // Add volume mounts + if cfg.UseConfigMapVolume && cfg.ConfigMapName != "" { + opts = append(opts, WithJobConfigMapVolume(cfg.ConfigMapName)) + } + if cfg.UseSecretVolume && cfg.SecretName != "" { + opts = append(opts, WithJobSecretVolume(cfg.SecretName)) + } + + // Add projected volume + if cfg.UseProjectedVolume { + opts = append(opts, WithJobProjectedVolume(cfg.ConfigMapName, cfg.SecretName)) + } + + return opts +} + +// WithJobConfigMapVolume adds a volume mount for a ConfigMap to a Job. +func WithJobConfigMapVolume(name string) JobOption { + return func(j *batchv1.Job) { + volumeName := "cm-" + name + j.Spec.Template.Spec.Volumes = append( + j.Spec.Template.Spec.Volumes, + corev1.Volume{ + Name: volumeName, + VolumeSource: corev1.VolumeSource{ + ConfigMap: &corev1.ConfigMapVolumeSource{ + LocalObjectReference: corev1.LocalObjectReference{Name: name}, + }, + }, + }, + ) + j.Spec.Template.Spec.Containers[0].VolumeMounts = append( + j.Spec.Template.Spec.Containers[0].VolumeMounts, + corev1.VolumeMount{ + Name: volumeName, + MountPath: "/etc/config/" + name, + }, + ) + } +} + +// WithJobSecretVolume adds a volume mount for a Secret to a Job. +func WithJobSecretVolume(name string) JobOption { + return func(j *batchv1.Job) { + volumeName := "secret-" + name + j.Spec.Template.Spec.Volumes = append( + j.Spec.Template.Spec.Volumes, + corev1.Volume{ + Name: volumeName, + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: name, + }, + }, + }, + ) + j.Spec.Template.Spec.Containers[0].VolumeMounts = append( + j.Spec.Template.Spec.Containers[0].VolumeMounts, + corev1.VolumeMount{ + Name: volumeName, + MountPath: "/etc/secrets/" + name, + }, + ) + } +} + +// WithJobProjectedVolume adds a projected volume with ConfigMap and/or Secret sources to a Job. +func WithJobProjectedVolume(cmName, secretName string) JobOption { + return func(j *batchv1.Job) { + volumeName := "projected-config" + sources := []corev1.VolumeProjection{} + + if cmName != "" { + sources = append(sources, corev1.VolumeProjection{ + ConfigMap: &corev1.ConfigMapProjection{ + LocalObjectReference: corev1.LocalObjectReference{Name: cmName}, + }, + }) + } + if secretName != "" { + sources = append(sources, corev1.VolumeProjection{ + Secret: &corev1.SecretProjection{ + LocalObjectReference: corev1.LocalObjectReference{Name: secretName}, + }, + }) + } + + j.Spec.Template.Spec.Volumes = append( + j.Spec.Template.Spec.Volumes, + corev1.Volume{ + Name: volumeName, + VolumeSource: corev1.VolumeSource{ + Projected: &corev1.ProjectedVolumeSource{ + Sources: sources, + }, + }, + }, + ) + j.Spec.Template.Spec.Containers[0].VolumeMounts = append( + j.Spec.Template.Spec.Containers[0].VolumeMounts, + corev1.VolumeMount{ + Name: volumeName, + MountPath: "/etc/projected", + }, + ) + } +} diff --git a/test/e2e/utils/workload_openshift.go b/test/e2e/utils/workload_openshift.go new file mode 100644 index 0000000..e4e2455 --- /dev/null +++ b/test/e2e/utils/workload_openshift.go @@ -0,0 +1,340 @@ +package utils + +import ( + "context" + "fmt" + "strings" + "time" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/dynamic" +) + +// DeploymentConfigAdapter implements WorkloadAdapter for OpenShift DeploymentConfigs. +type DeploymentConfigAdapter struct { + dynamicClient dynamic.Interface +} + +// NewDeploymentConfigAdapter creates a new DeploymentConfigAdapter. +func NewDeploymentConfigAdapter(dynamicClient dynamic.Interface) *DeploymentConfigAdapter { + return &DeploymentConfigAdapter{dynamicClient: dynamicClient} +} + +// Type returns the workload type. +func (a *DeploymentConfigAdapter) Type() WorkloadType { + return WorkloadDeploymentConfig +} + +// Create creates a DeploymentConfig with the given config. +func (a *DeploymentConfigAdapter) Create(ctx context.Context, namespace, name string, cfg WorkloadConfig) error { + opts := buildDCOptions(cfg) + return CreateDeploymentConfig(ctx, a.dynamicClient, namespace, name, opts...) +} + +// Delete removes the DeploymentConfig. +func (a *DeploymentConfigAdapter) Delete(ctx context.Context, namespace, name string) error { + return DeleteDeploymentConfig(ctx, a.dynamicClient, namespace, name) +} + +// WaitReady waits for the DeploymentConfig to be ready. +func (a *DeploymentConfigAdapter) WaitReady(ctx context.Context, namespace, name string, timeout time.Duration) error { + return WaitForDeploymentConfigReady(ctx, a.dynamicClient, namespace, name, timeout) +} + +// WaitReloaded waits for the DeploymentConfig to have the reload annotation. +func (a *DeploymentConfigAdapter) WaitReloaded(ctx context.Context, namespace, name, annotationKey string, timeout time.Duration) (bool, error) { + return WaitForDeploymentConfigReloaded(ctx, a.dynamicClient, namespace, name, annotationKey, timeout) +} + +// WaitEnvVar waits for the DeploymentConfig to have a STAKATER_ env var. +func (a *DeploymentConfigAdapter) WaitEnvVar(ctx context.Context, namespace, name, prefix string, timeout time.Duration) (bool, error) { + return WaitForDeploymentConfigEnvVar(ctx, a.dynamicClient, namespace, name, prefix, timeout) +} + +// SupportsEnvVarStrategy returns true as DeploymentConfigs support env var reload strategy. +func (a *DeploymentConfigAdapter) SupportsEnvVarStrategy() bool { + return true +} + +// RequiresSpecialHandling returns false as DeploymentConfigs use standard rolling restart. +func (a *DeploymentConfigAdapter) RequiresSpecialHandling() bool { + return false +} + +// buildDCOptions converts WorkloadConfig to DCOption slice. +func buildDCOptions(cfg WorkloadConfig) []DCOption { + var opts []DCOption + + // Add annotations (to pod template) + if len(cfg.Annotations) > 0 { + opts = append(opts, WithDCAnnotations(cfg.Annotations)) + } + + // Add envFrom references + if cfg.UseConfigMapEnvFrom && cfg.ConfigMapName != "" { + opts = append(opts, WithDCConfigMapEnvFrom(cfg.ConfigMapName)) + } + if cfg.UseSecretEnvFrom && cfg.SecretName != "" { + opts = append(opts, WithDCSecretEnvFrom(cfg.SecretName)) + } + + // Add volume mounts + if cfg.UseConfigMapVolume && cfg.ConfigMapName != "" { + opts = append(opts, WithDCConfigMapVolume(cfg.ConfigMapName)) + } + if cfg.UseSecretVolume && cfg.SecretName != "" { + opts = append(opts, WithDCSecretVolume(cfg.SecretName)) + } + + // Add projected volume + if cfg.UseProjectedVolume { + opts = append(opts, WithDCProjectedVolume(cfg.ConfigMapName, cfg.SecretName)) + } + + // Add valueFrom references + if cfg.UseConfigMapKeyRef && cfg.ConfigMapName != "" { + key := cfg.ConfigMapKey + if key == "" { + key = "key" + } + envVar := cfg.EnvVarName + if envVar == "" { + envVar = "CONFIG_VAR" + } + opts = append(opts, WithDCConfigMapKeyRef(cfg.ConfigMapName, key, envVar)) + } + if cfg.UseSecretKeyRef && cfg.SecretName != "" { + key := cfg.SecretKey + if key == "" { + key = "key" + } + envVar := cfg.EnvVarName + if envVar == "" { + envVar = "SECRET_VAR" + } + opts = append(opts, WithDCSecretKeyRef(cfg.SecretName, key, envVar)) + } + + // Add init container with envFrom + if cfg.UseInitContainer { + opts = append(opts, WithDCInitContainer(cfg.ConfigMapName, cfg.SecretName)) + } + + // Add init container with volume mount + if cfg.UseInitContainerVolume { + opts = append(opts, WithDCInitContainerVolume(cfg.ConfigMapName, cfg.SecretName)) + } + + return opts +} + +// WithDCProjectedVolume adds a projected volume with ConfigMap and/or Secret sources to a DeploymentConfig. +func WithDCProjectedVolume(cmName, secretName string) DCOption { + return func(dc *unstructured.Unstructured) { + volumeName := "projected-config" + sources := []interface{}{} + + if cmName != "" { + sources = append(sources, map[string]interface{}{ + "configMap": map[string]interface{}{ + "name": cmName, + }, + }) + } + if secretName != "" { + sources = append(sources, map[string]interface{}{ + "secret": map[string]interface{}{ + "name": secretName, + }, + }) + } + + // Add volume + volumes, _, _ := unstructured.NestedSlice(dc.Object, "spec", "template", "spec", "volumes") + volumes = append(volumes, map[string]interface{}{ + "name": volumeName, + "projected": map[string]interface{}{ + "sources": sources, + }, + }) + _ = unstructured.SetNestedSlice(dc.Object, volumes, "spec", "template", "spec", "volumes") + + // Add volumeMount + containers, _, _ := unstructured.NestedSlice(dc.Object, "spec", "template", "spec", "containers") + if len(containers) > 0 { + container := containers[0].(map[string]interface{}) + volumeMounts, _, _ := unstructured.NestedSlice(container, "volumeMounts") + volumeMounts = append(volumeMounts, map[string]interface{}{ + "name": volumeName, + "mountPath": "/etc/projected", + }) + container["volumeMounts"] = volumeMounts + containers[0] = container + _ = unstructured.SetNestedSlice(dc.Object, containers, "spec", "template", "spec", "containers") + } + } +} + +// WithDCConfigMapKeyRef adds an env var with valueFrom.configMapKeyRef to a DeploymentConfig. +func WithDCConfigMapKeyRef(cmName, key, envVarName string) DCOption { + return func(dc *unstructured.Unstructured) { + containers, _, _ := unstructured.NestedSlice(dc.Object, "spec", "template", "spec", "containers") + if len(containers) > 0 { + container := containers[0].(map[string]interface{}) + env, _, _ := unstructured.NestedSlice(container, "env") + env = append(env, map[string]interface{}{ + "name": envVarName, + "valueFrom": map[string]interface{}{ + "configMapKeyRef": map[string]interface{}{ + "name": cmName, + "key": key, + }, + }, + }) + container["env"] = env + containers[0] = container + _ = unstructured.SetNestedSlice(dc.Object, containers, "spec", "template", "spec", "containers") + } + } +} + +// WithDCSecretKeyRef adds an env var with valueFrom.secretKeyRef to a DeploymentConfig. +func WithDCSecretKeyRef(secretName, key, envVarName string) DCOption { + return func(dc *unstructured.Unstructured) { + containers, _, _ := unstructured.NestedSlice(dc.Object, "spec", "template", "spec", "containers") + if len(containers) > 0 { + container := containers[0].(map[string]interface{}) + env, _, _ := unstructured.NestedSlice(container, "env") + env = append(env, map[string]interface{}{ + "name": envVarName, + "valueFrom": map[string]interface{}{ + "secretKeyRef": map[string]interface{}{ + "name": secretName, + "key": key, + }, + }, + }) + container["env"] = env + containers[0] = container + _ = unstructured.SetNestedSlice(dc.Object, containers, "spec", "template", "spec", "containers") + } + } +} + +// WithDCInitContainer adds an init container that references ConfigMap and/or Secret via envFrom. +func WithDCInitContainer(cmName, secretName string) DCOption { + return func(dc *unstructured.Unstructured) { + initContainer := map[string]interface{}{ + "name": "init", + "image": DefaultImage, + "command": []interface{}{"sh", "-c", "echo init done"}, + } + + envFrom := []interface{}{} + if cmName != "" { + envFrom = append(envFrom, map[string]interface{}{ + "configMapRef": map[string]interface{}{ + "name": cmName, + }, + }) + } + if secretName != "" { + envFrom = append(envFrom, map[string]interface{}{ + "secretRef": map[string]interface{}{ + "name": secretName, + }, + }) + } + if len(envFrom) > 0 { + initContainer["envFrom"] = envFrom + } + + initContainers, _, _ := unstructured.NestedSlice(dc.Object, "spec", "template", "spec", "initContainers") + initContainers = append(initContainers, initContainer) + _ = unstructured.SetNestedSlice(dc.Object, initContainers, "spec", "template", "spec", "initContainers") + } +} + +// WithDCInitContainerVolume adds an init container with ConfigMap/Secret volume mounts to a DeploymentConfig. +func WithDCInitContainerVolume(cmName, secretName string) DCOption { + return func(dc *unstructured.Unstructured) { + initContainer := map[string]interface{}{ + "name": "init", + "image": DefaultImage, + "command": []interface{}{"sh", "-c", "echo init done"}, + } + + volumeMounts := []interface{}{} + volumes, _, _ := unstructured.NestedSlice(dc.Object, "spec", "template", "spec", "volumes") + + if cmName != "" { + volumeName := fmt.Sprintf("init-cm-%s", cmName) + volumes = append(volumes, map[string]interface{}{ + "name": volumeName, + "configMap": map[string]interface{}{ + "name": cmName, + }, + }) + volumeMounts = append(volumeMounts, map[string]interface{}{ + "name": volumeName, + "mountPath": fmt.Sprintf("/etc/init-config/%s", cmName), + }) + } + if secretName != "" { + volumeName := fmt.Sprintf("init-secret-%s", secretName) + volumes = append(volumes, map[string]interface{}{ + "name": volumeName, + "secret": map[string]interface{}{ + "secretName": secretName, + }, + }) + volumeMounts = append(volumeMounts, map[string]interface{}{ + "name": volumeName, + "mountPath": fmt.Sprintf("/etc/init-secrets/%s", secretName), + }) + } + + if len(volumeMounts) > 0 { + initContainer["volumeMounts"] = volumeMounts + } + + _ = unstructured.SetNestedSlice(dc.Object, volumes, "spec", "template", "spec", "volumes") + + initContainers, _, _ := unstructured.NestedSlice(dc.Object, "spec", "template", "spec", "initContainers") + initContainers = append(initContainers, initContainer) + _ = unstructured.SetNestedSlice(dc.Object, initContainers, "spec", "template", "spec", "initContainers") + } +} + +// WaitForDeploymentConfigEnvVar waits for a DeploymentConfig's container to have an env var with the given prefix. +func WaitForDeploymentConfigEnvVar(ctx context.Context, dynamicClient dynamic.Interface, namespace, name, prefix string, timeout time.Duration) (bool, error) { + var found bool + err := wait.PollUntilContextTimeout(ctx, DefaultInterval, timeout, true, func(ctx context.Context) (bool, error) { + dc, err := dynamicClient.Resource(DeploymentConfigGVR).Namespace(namespace).Get(ctx, name, metav1.GetOptions{}) + if err != nil { + return false, nil + } + + containers, _, _ := unstructured.NestedSlice(dc.Object, "spec", "template", "spec", "containers") + for _, c := range containers { + container := c.(map[string]interface{}) + env, _, _ := unstructured.NestedSlice(container, "env") + for _, e := range env { + envVar := e.(map[string]interface{}) + if envName, ok := envVar["name"].(string); ok && strings.HasPrefix(envName, prefix) { + found = true + return true, nil + } + } + } + + return false, nil + }) + + if err != nil && err != context.DeadlineExceeded { + return false, err + } + return found, nil +} diff --git a/test/e2e/utils/workload_statefulset.go b/test/e2e/utils/workload_statefulset.go new file mode 100644 index 0000000..fb20914 --- /dev/null +++ b/test/e2e/utils/workload_statefulset.go @@ -0,0 +1,246 @@ +package utils + +import ( + "context" + "fmt" + "time" + + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/client-go/kubernetes" +) + +// StatefulSetAdapter implements WorkloadAdapter for Kubernetes StatefulSets. +type StatefulSetAdapter struct { + client kubernetes.Interface +} + +// NewStatefulSetAdapter creates a new StatefulSetAdapter. +func NewStatefulSetAdapter(client kubernetes.Interface) *StatefulSetAdapter { + return &StatefulSetAdapter{client: client} +} + +// Type returns the workload type. +func (a *StatefulSetAdapter) Type() WorkloadType { + return WorkloadStatefulSet +} + +// Create creates a StatefulSet with the given config. +func (a *StatefulSetAdapter) Create(ctx context.Context, namespace, name string, cfg WorkloadConfig) error { + opts := buildStatefulSetOptions(cfg) + _, err := CreateStatefulSet(ctx, a.client, namespace, name, opts...) + return err +} + +// Delete removes the StatefulSet. +func (a *StatefulSetAdapter) Delete(ctx context.Context, namespace, name string) error { + return DeleteStatefulSet(ctx, a.client, namespace, name) +} + +// WaitReady waits for the StatefulSet to be ready. +func (a *StatefulSetAdapter) WaitReady(ctx context.Context, namespace, name string, timeout time.Duration) error { + return WaitForStatefulSetReady(ctx, a.client, namespace, name, timeout) +} + +// WaitReloaded waits for the StatefulSet to have the reload annotation. +func (a *StatefulSetAdapter) WaitReloaded(ctx context.Context, namespace, name, annotationKey string, timeout time.Duration) (bool, error) { + return WaitForStatefulSetReloaded(ctx, a.client, namespace, name, annotationKey, timeout) +} + +// WaitEnvVar waits for the StatefulSet to have a STAKATER_ env var. +func (a *StatefulSetAdapter) WaitEnvVar(ctx context.Context, namespace, name, prefix string, timeout time.Duration) (bool, error) { + return WaitForStatefulSetEnvVar(ctx, a.client, namespace, name, prefix, timeout) +} + +// SupportsEnvVarStrategy returns true as StatefulSets support env var reload strategy. +func (a *StatefulSetAdapter) SupportsEnvVarStrategy() bool { + return true +} + +// RequiresSpecialHandling returns false as StatefulSets use standard rolling restart. +func (a *StatefulSetAdapter) RequiresSpecialHandling() bool { + return false +} + +// buildStatefulSetOptions converts WorkloadConfig to StatefulSetOption slice. +func buildStatefulSetOptions(cfg WorkloadConfig) []StatefulSetOption { + var opts []StatefulSetOption + + // Add annotations + if len(cfg.Annotations) > 0 { + opts = append(opts, WithStatefulSetAnnotations(cfg.Annotations)) + } + + // Add envFrom references + if cfg.UseConfigMapEnvFrom && cfg.ConfigMapName != "" { + opts = append(opts, WithStatefulSetConfigMapEnvFrom(cfg.ConfigMapName)) + } + if cfg.UseSecretEnvFrom && cfg.SecretName != "" { + opts = append(opts, WithStatefulSetSecretEnvFrom(cfg.SecretName)) + } + + // Add volume mounts + if cfg.UseConfigMapVolume && cfg.ConfigMapName != "" { + opts = append(opts, WithStatefulSetConfigMapVolume(cfg.ConfigMapName)) + } + if cfg.UseSecretVolume && cfg.SecretName != "" { + opts = append(opts, WithStatefulSetSecretVolume(cfg.SecretName)) + } + + // Add projected volume + if cfg.UseProjectedVolume { + opts = append(opts, WithStatefulSetProjectedVolume(cfg.ConfigMapName, cfg.SecretName)) + } + + // Add valueFrom references + if cfg.UseConfigMapKeyRef && cfg.ConfigMapName != "" { + key := cfg.ConfigMapKey + if key == "" { + key = "key" + } + envVar := cfg.EnvVarName + if envVar == "" { + envVar = "CONFIG_VAR" + } + opts = append(opts, WithStatefulSetConfigMapKeyRef(cfg.ConfigMapName, key, envVar)) + } + if cfg.UseSecretKeyRef && cfg.SecretName != "" { + key := cfg.SecretKey + if key == "" { + key = "key" + } + envVar := cfg.EnvVarName + if envVar == "" { + envVar = "SECRET_VAR" + } + opts = append(opts, WithStatefulSetSecretKeyRef(cfg.SecretName, key, envVar)) + } + + // Add init container with envFrom + if cfg.UseInitContainer { + opts = append(opts, WithStatefulSetInitContainer(cfg.ConfigMapName, cfg.SecretName)) + } + + // Add init container with volume mount + if cfg.UseInitContainerVolume { + opts = append(opts, WithStatefulSetInitContainerVolume(cfg.ConfigMapName, cfg.SecretName)) + } + + return opts +} + +// WithStatefulSetConfigMapVolume adds a volume mount for a ConfigMap to a StatefulSet. +func WithStatefulSetConfigMapVolume(name string) StatefulSetOption { + return func(ss *appsv1.StatefulSet) { + volumeName := fmt.Sprintf("cm-%s", name) + ss.Spec.Template.Spec.Volumes = append(ss.Spec.Template.Spec.Volumes, corev1.Volume{ + Name: volumeName, + VolumeSource: corev1.VolumeSource{ + ConfigMap: &corev1.ConfigMapVolumeSource{ + LocalObjectReference: corev1.LocalObjectReference{Name: name}, + }, + }, + }) + ss.Spec.Template.Spec.Containers[0].VolumeMounts = append( + ss.Spec.Template.Spec.Containers[0].VolumeMounts, + corev1.VolumeMount{ + Name: volumeName, + MountPath: fmt.Sprintf("/etc/config/%s", name), + }, + ) + } +} + +// WithStatefulSetSecretVolume adds a volume mount for a Secret to a StatefulSet. +func WithStatefulSetSecretVolume(name string) StatefulSetOption { + return func(ss *appsv1.StatefulSet) { + volumeName := fmt.Sprintf("secret-%s", name) + ss.Spec.Template.Spec.Volumes = append(ss.Spec.Template.Spec.Volumes, corev1.Volume{ + Name: volumeName, + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: name, + }, + }, + }) + ss.Spec.Template.Spec.Containers[0].VolumeMounts = append( + ss.Spec.Template.Spec.Containers[0].VolumeMounts, + corev1.VolumeMount{ + Name: volumeName, + MountPath: fmt.Sprintf("/etc/secrets/%s", name), + }, + ) + } +} + +// WithStatefulSetInitContainer adds an init container that references ConfigMap and/or Secret. +func WithStatefulSetInitContainer(cmName, secretName string) StatefulSetOption { + return func(ss *appsv1.StatefulSet) { + initContainer := corev1.Container{ + Name: "init", + Image: DefaultImage, + Command: []string{"sh", "-c", "echo init done"}, + } + + if cmName != "" { + initContainer.EnvFrom = append(initContainer.EnvFrom, corev1.EnvFromSource{ + ConfigMapRef: &corev1.ConfigMapEnvSource{ + LocalObjectReference: corev1.LocalObjectReference{Name: cmName}, + }, + }) + } + if secretName != "" { + initContainer.EnvFrom = append(initContainer.EnvFrom, corev1.EnvFromSource{ + SecretRef: &corev1.SecretEnvSource{ + LocalObjectReference: corev1.LocalObjectReference{Name: secretName}, + }, + }) + } + + ss.Spec.Template.Spec.InitContainers = append(ss.Spec.Template.Spec.InitContainers, initContainer) + } +} + +// WithStatefulSetInitContainerVolume adds an init container with ConfigMap/Secret volume mounts. +func WithStatefulSetInitContainerVolume(cmName, secretName string) StatefulSetOption { + return func(ss *appsv1.StatefulSet) { + initContainer := corev1.Container{ + Name: "init", + Image: DefaultImage, + Command: []string{"sh", "-c", "echo init done"}, + } + + if cmName != "" { + volumeName := fmt.Sprintf("init-cm-%s", cmName) + ss.Spec.Template.Spec.Volumes = append(ss.Spec.Template.Spec.Volumes, corev1.Volume{ + Name: volumeName, + VolumeSource: corev1.VolumeSource{ + ConfigMap: &corev1.ConfigMapVolumeSource{ + LocalObjectReference: corev1.LocalObjectReference{Name: cmName}, + }, + }, + }) + initContainer.VolumeMounts = append(initContainer.VolumeMounts, corev1.VolumeMount{ + Name: volumeName, + MountPath: fmt.Sprintf("/etc/init-config/%s", cmName), + }) + } + if secretName != "" { + volumeName := fmt.Sprintf("init-secret-%s", secretName) + ss.Spec.Template.Spec.Volumes = append(ss.Spec.Template.Spec.Volumes, corev1.Volume{ + Name: volumeName, + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: secretName, + }, + }, + }) + initContainer.VolumeMounts = append(initContainer.VolumeMounts, corev1.VolumeMount{ + Name: volumeName, + MountPath: fmt.Sprintf("/etc/init-secrets/%s", secretName), + }) + } + + ss.Spec.Template.Spec.InitContainers = append(ss.Spec.Template.Spec.InitContainers, initContainer) + } +}