feat: Initial e2e tests and migrate old ones into e2e

This commit is contained in:
TheiLLeniumStudios
2026-01-08 11:06:45 +01:00
parent fdd2474b3f
commit fafd5460a2
63 changed files with 14035 additions and 7116 deletions

View File

@@ -14,6 +14,9 @@ DOCKER_IMAGE ?= ghcr.io/stakater/reloader
# Default value "dev"
VERSION ?= 0.0.1
# Full image reference (used for docker-build)
IMG ?= $(DOCKER_IMAGE):v$(VERSION)
REPOSITORY_GENERIC = ${DOCKER_IMAGE}:${VERSION}
REPOSITORY_ARCH = ${DOCKER_IMAGE}:v${VERSION}-${ARCH}
BUILD=
@@ -140,7 +143,63 @@ manifest:
docker manifest annotate --arch $(ARCH) $(REPOSITORY_GENERIC) $(REPOSITORY_ARCH)
test:
"$(GOCMD)" test -timeout 1800s -v ./...
"$(GOCMD)" test -timeout 1800s -v -short ./internal/... ./test/e2e/utils/...
##@ E2E Tests
E2E_IMG ?= ghcr.io/stakater/reloader:test
E2E_TIMEOUT ?= 45m
KIND_CLUSTER ?= kind
# Detect container runtime (docker or podman)
CONTAINER_RUNTIME ?= $(shell command -v docker 2>/dev/null || command -v podman 2>/dev/null)
.PHONY: e2e-build
e2e-build: ## Build container image for e2e testing (uses docker or podman)
$(CONTAINER_RUNTIME) build -t $(E2E_IMG) -f Dockerfile .
.PHONY: e2e-load
e2e-load: ## Load e2e image to Kind cluster (handles both docker and podman)
ifeq ($(notdir $(CONTAINER_RUNTIME)),podman)
@echo "Using podman: loading via image-archive..."
$(CONTAINER_RUNTIME) save $(E2E_IMG) -o /tmp/reloader-e2e.tar
kind load image-archive /tmp/reloader-e2e.tar --name $(KIND_CLUSTER)
rm -f /tmp/reloader-e2e.tar
else
kind load docker-image $(E2E_IMG) --name $(KIND_CLUSTER)
endif
.PHONY: e2e-setup
e2e-setup: e2e-build e2e-load ## Build image and load to Kind (run once before tests)
@echo "E2E setup complete. Image $(E2E_IMG) loaded to Kind cluster $(KIND_CLUSTER)"
.PHONY: e2e-cluster-setup
e2e-cluster-setup: ## Setup e2e cluster prerequisites (Argo Rollouts, etc.)
./scripts/e2e-cluster-setup.sh
.PHONY: e2e-cluster-cleanup
e2e-cluster-cleanup: ## Cleanup e2e cluster resources (Argo Rollouts, test namespaces, etc.)
./scripts/e2e-cluster-cleanup.sh
.PHONY: e2e
e2e: e2e-setup e2e-cluster-setup ## Run all e2e tests (builds image, loads to Kind, sets up cluster, runs tests)
SKIP_BUILD=true RELOADER_IMAGE=$(E2E_IMG) "$(GOCMD)" test -v -count=1 -p 1 -timeout $(E2E_TIMEOUT) ./test/e2e/...
@echo "E2E tests complete. Run 'make e2e-cluster-cleanup' to cleanup cluster resources."
.PHONY: e2e-kind-create
e2e-kind-create: ## Create Kind cluster for e2e tests
kind create cluster --name $(KIND_CLUSTER) || true
.PHONY: e2e-ci
e2e-ci: e2e-kind-create e2e e2e-cluster-cleanup ## Full CI pipeline: create Kind cluster, build, load, run tests, cleanup
.PHONY: e2e-kind-delete
e2e-kind-delete: ## Delete Kind cluster used for e2e tests
kind delete cluster --name $(KIND_CLUSTER)
.PHONY: docker-build
docker-build: ## Build Docker image
$(CONTAINER_RUNTIME) build -t $(IMG) -f Dockerfile .
stop:
@docker stop "${BINARY}"

8
go.mod
View File

@@ -4,7 +4,8 @@ go 1.25.5
require (
github.com/argoproj/argo-rollouts v1.8.3
github.com/openshift/api v0.0.0-20250411135543-10a8fa583797
github.com/onsi/ginkgo/v2 v2.21.0
github.com/onsi/gomega v1.35.1
github.com/openshift/client-go v0.0.0-20250402181141-b3bad3b645f2
github.com/parnurzeal/gorequest v0.3.0
github.com/prometheus/client_golang v1.22.0
@@ -29,21 +30,23 @@ require (
github.com/go-openapi/jsonpointer v0.21.1 // indirect
github.com/go-openapi/jsonreference v0.21.0 // indirect
github.com/go-openapi/swag v0.23.1 // indirect
github.com/go-task/slim-sprig/v3 v3.0.0 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang/protobuf v1.5.4 // indirect
github.com/google/gnostic-models v0.6.9 // indirect
github.com/google/go-cmp v0.7.0 // indirect
github.com/google/gofuzz v1.2.0 // indirect
github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db // indirect
github.com/google/uuid v1.6.0 // indirect
github.com/inconshreveable/mousetrap v1.1.0 // indirect
github.com/josharian/intern v1.0.0 // indirect
github.com/json-iterator/go v1.1.12 // indirect
github.com/kylelemons/godebug v1.1.0 // indirect
github.com/mailru/easyjson v0.9.0 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.2 // indirect
github.com/moul/http2curl v1.0.0 // indirect
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
github.com/openshift/api v0.0.0-20250411135543-10a8fa583797 // indirect
github.com/pkg/errors v0.9.1 // indirect
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
github.com/prometheus/client_model v0.6.2 // indirect
@@ -58,6 +61,7 @@ require (
golang.org/x/term v0.31.0 // indirect
golang.org/x/text v0.24.0 // indirect
golang.org/x/time v0.11.0 // indirect
golang.org/x/tools v0.26.0 // indirect
google.golang.org/protobuf v1.36.6 // indirect
gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,358 @@
package handler
import (
"testing"
"github.com/stakater/Reloader/internal/pkg/constants"
"github.com/stakater/Reloader/internal/pkg/metrics"
"github.com/stretchr/testify/assert"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
func TestResourceCreatedHandler_GetConfig(t *testing.T) {
tests := []struct {
name string
resource interface{}
expectedName string
expectedNS string
expectedType string
expectSHANotEmpty bool
expectOldSHAEmpty bool
}{
{
name: "ConfigMap with data",
resource: &v1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: "my-configmap",
Namespace: "test-ns",
},
Data: map[string]string{
"key1": "value1",
"key2": "value2",
},
},
expectedName: "my-configmap",
expectedNS: "test-ns",
expectedType: constants.ConfigmapEnvVarPostfix,
expectSHANotEmpty: true,
expectOldSHAEmpty: true,
},
{
name: "ConfigMap with empty data",
resource: &v1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: "empty-configmap",
Namespace: "default",
},
Data: map[string]string{},
},
expectedName: "empty-configmap",
expectedNS: "default",
expectedType: constants.ConfigmapEnvVarPostfix,
expectSHANotEmpty: true,
expectOldSHAEmpty: true,
},
{
name: "ConfigMap with binary data",
resource: &v1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: "binary-configmap",
Namespace: "default",
},
BinaryData: map[string][]byte{
"binary-key": []byte("binary-value"),
},
},
expectedName: "binary-configmap",
expectedNS: "default",
expectedType: constants.ConfigmapEnvVarPostfix,
expectSHANotEmpty: true,
expectOldSHAEmpty: true,
},
{
name: "ConfigMap with annotations",
resource: &v1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: "annotated-configmap",
Namespace: "default",
Annotations: map[string]string{
"reloader.stakater.com/match": "true",
},
},
Data: map[string]string{"key": "value"},
},
expectedName: "annotated-configmap",
expectedNS: "default",
expectedType: constants.ConfigmapEnvVarPostfix,
expectSHANotEmpty: true,
expectOldSHAEmpty: true,
},
{
name: "Secret with data",
resource: &v1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: "my-secret",
Namespace: "secret-ns",
},
Data: map[string][]byte{
"password": []byte("secret-password"),
},
},
expectedName: "my-secret",
expectedNS: "secret-ns",
expectedType: constants.SecretEnvVarPostfix,
expectSHANotEmpty: true,
expectOldSHAEmpty: true,
},
{
name: "Secret with empty data",
resource: &v1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: "empty-secret",
Namespace: "default",
},
Data: map[string][]byte{},
},
expectedName: "empty-secret",
expectedNS: "default",
expectedType: constants.SecretEnvVarPostfix,
expectSHANotEmpty: true,
expectOldSHAEmpty: true,
},
{
name: "Secret with StringData",
resource: &v1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: "stringdata-secret",
Namespace: "default",
},
StringData: map[string]string{
"username": "admin",
},
},
expectedName: "stringdata-secret",
expectedNS: "default",
expectedType: constants.SecretEnvVarPostfix,
expectSHANotEmpty: true,
expectOldSHAEmpty: true,
},
{
name: "Secret with labels",
resource: &v1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: "labeled-secret",
Namespace: "default",
Labels: map[string]string{
"app": "test",
},
},
Data: map[string][]byte{"key": []byte("value")},
},
expectedName: "labeled-secret",
expectedNS: "default",
expectedType: constants.SecretEnvVarPostfix,
expectSHANotEmpty: true,
expectOldSHAEmpty: true,
},
{
name: "Invalid resource type - string",
resource: "invalid-string",
expectedName: "",
expectedNS: "",
expectedType: "",
expectSHANotEmpty: false,
expectOldSHAEmpty: true,
},
{
name: "Invalid resource type - int",
resource: 123,
expectedName: "",
expectedNS: "",
expectedType: "",
expectSHANotEmpty: false,
expectOldSHAEmpty: true,
},
{
name: "Invalid resource type - struct",
resource: struct{ Name string }{Name: "test"},
expectedName: "",
expectedNS: "",
expectedType: "",
expectSHANotEmpty: false,
expectOldSHAEmpty: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
handler := ResourceCreatedHandler{
Resource: tt.resource,
Collectors: metrics.NewCollectors(),
}
config, oldSHA := handler.GetConfig()
assert.Equal(t, tt.expectedName, config.ResourceName)
assert.Equal(t, tt.expectedNS, config.Namespace)
assert.Equal(t, tt.expectedType, config.Type)
if tt.expectSHANotEmpty {
assert.NotEmpty(t, config.SHAValue, "SHA should not be empty")
}
if tt.expectOldSHAEmpty {
assert.Empty(t, oldSHA, "oldSHA should always be empty for create handler")
}
})
}
}
func TestResourceCreatedHandler_GetConfig_Annotations(t *testing.T) {
// Test that annotations are properly captured in config
cm := &v1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: "annotated-cm",
Namespace: "default",
Annotations: map[string]string{
"reloader.stakater.com/match": "true",
"reloader.stakater.com/search": "true",
},
},
Data: map[string]string{"key": "value"},
}
handler := ResourceCreatedHandler{
Resource: cm,
Collectors: metrics.NewCollectors(),
}
config, _ := handler.GetConfig()
assert.NotNil(t, config.ResourceAnnotations)
assert.Equal(t, "true", config.ResourceAnnotations["reloader.stakater.com/match"])
assert.Equal(t, "true", config.ResourceAnnotations["reloader.stakater.com/search"])
}
func TestResourceCreatedHandler_GetConfig_Labels(t *testing.T) {
// Test that labels are properly captured in config
secret := &v1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: "labeled-secret",
Namespace: "default",
Labels: map[string]string{
"app": "myapp",
"version": "v1",
},
},
Data: map[string][]byte{"key": []byte("value")},
}
handler := ResourceCreatedHandler{
Resource: secret,
Collectors: metrics.NewCollectors(),
}
config, _ := handler.GetConfig()
assert.NotNil(t, config.Labels)
assert.Equal(t, "myapp", config.Labels["app"])
assert.Equal(t, "v1", config.Labels["version"])
}
func TestResourceCreatedHandler_Handle(t *testing.T) {
tests := []struct {
name string
resource interface{}
expectError bool
}{
{
name: "Nil resource",
resource: nil,
expectError: false, // logs error but returns nil
},
{
name: "Valid ConfigMap - no workloads to update",
resource: &v1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: "test-cm",
Namespace: "default",
},
Data: map[string]string{"key": "value"},
},
expectError: false,
},
{
name: "Valid Secret - no workloads to update",
resource: &v1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: "test-secret",
Namespace: "default",
},
Data: map[string][]byte{"key": []byte("value")},
},
expectError: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
handler := ResourceCreatedHandler{
Resource: tt.resource,
Collectors: metrics.NewCollectors(),
}
err := handler.Handle()
if tt.expectError {
assert.Error(t, err)
} else {
assert.NoError(t, err)
}
})
}
}
func TestResourceCreatedHandler_SHAConsistency(t *testing.T) {
// Test that same data produces same SHA
data := map[string]string{"key": "value"}
cm1 := &v1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{Name: "cm1", Namespace: "default"},
Data: data,
}
cm2 := &v1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{Name: "cm2", Namespace: "default"},
Data: data,
}
handler1 := ResourceCreatedHandler{Resource: cm1, Collectors: metrics.NewCollectors()}
handler2 := ResourceCreatedHandler{Resource: cm2, Collectors: metrics.NewCollectors()}
config1, _ := handler1.GetConfig()
config2, _ := handler2.GetConfig()
// Same data should produce same SHA
assert.Equal(t, config1.SHAValue, config2.SHAValue)
}
func TestResourceCreatedHandler_SHADifference(t *testing.T) {
// Test that different data produces different SHA
cm1 := &v1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{Name: "cm", Namespace: "default"},
Data: map[string]string{"key": "value1"},
}
cm2 := &v1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{Name: "cm", Namespace: "default"},
Data: map[string]string{"key": "value2"},
}
handler1 := ResourceCreatedHandler{Resource: cm1, Collectors: metrics.NewCollectors()}
handler2 := ResourceCreatedHandler{Resource: cm2, Collectors: metrics.NewCollectors()}
config1, _ := handler1.GetConfig()
config2, _ := handler2.GetConfig()
// Different data should produce different SHA
assert.NotEqual(t, config1.SHAValue, config2.SHAValue)
}

View File

@@ -0,0 +1,356 @@
package handler
import (
"testing"
"github.com/stakater/Reloader/internal/pkg/callbacks"
"github.com/stakater/Reloader/internal/pkg/constants"
"github.com/stakater/Reloader/internal/pkg/options"
"github.com/stakater/Reloader/pkg/common"
"github.com/stretchr/testify/assert"
appsv1 "k8s.io/api/apps/v1"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
)
// mockDeploymentForDelete creates a deployment with containers for testing delete strategies
func mockDeploymentForDelete(name, namespace string, containers []v1.Container, volumes []v1.Volume) *appsv1.Deployment {
return &appsv1.Deployment{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: namespace,
},
Spec: appsv1.DeploymentSpec{
Template: v1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Annotations: map[string]string{},
},
Spec: v1.PodSpec{
Containers: containers,
Volumes: volumes,
},
},
},
}
}
// Mock funcs for testing
func mockContainersFunc(item runtime.Object) []v1.Container {
deployment, ok := item.(*appsv1.Deployment)
if !ok {
return nil
}
return deployment.Spec.Template.Spec.Containers
}
func mockInitContainersFunc(item runtime.Object) []v1.Container {
deployment, ok := item.(*appsv1.Deployment)
if !ok {
return nil
}
return deployment.Spec.Template.Spec.InitContainers
}
func mockVolumesFunc(item runtime.Object) []v1.Volume {
deployment, ok := item.(*appsv1.Deployment)
if !ok {
return nil
}
return deployment.Spec.Template.Spec.Volumes
}
func mockPodAnnotationsFunc(item runtime.Object) map[string]string {
deployment, ok := item.(*appsv1.Deployment)
if !ok {
return nil
}
return deployment.Spec.Template.Annotations
}
func mockPatchTemplatesFunc() callbacks.PatchTemplates {
return callbacks.PatchTemplates{
AnnotationTemplate: `{"spec":{"template":{"metadata":{"annotations":{"%s":"%s"}}}}}`,
EnvVarTemplate: `{"spec":{"template":{"spec":{"containers":[{"name":"%s","env":[{"name":"%s","value":"%s"}]}]}}}}`,
DeleteEnvVarTemplate: `[{"op":"remove","path":"/spec/template/spec/containers/%d/env/%d"}]`,
}
}
func TestRemoveContainerEnvVars(t *testing.T) {
tests := []struct {
name string
containers []v1.Container
volumes []v1.Volume
config common.Config
autoReload bool
expected constants.Result
envVarRemoved bool
}{
{
name: "Remove existing env var - configmap envFrom",
containers: []v1.Container{
{
Name: "app",
EnvFrom: []v1.EnvFromSource{
{
ConfigMapRef: &v1.ConfigMapEnvSource{
LocalObjectReference: v1.LocalObjectReference{
Name: "my-configmap",
},
},
},
},
Env: []v1.EnvVar{
{Name: "STAKATER_MY_CONFIGMAP_CONFIGMAP", Value: "sha-value"},
},
},
},
volumes: []v1.Volume{},
config: common.Config{
ResourceName: "my-configmap",
Type: constants.ConfigmapEnvVarPostfix,
},
autoReload: true,
expected: constants.Updated,
envVarRemoved: true,
},
{
name: "No env var to remove",
containers: []v1.Container{
{
Name: "app",
EnvFrom: []v1.EnvFromSource{
{
ConfigMapRef: &v1.ConfigMapEnvSource{
LocalObjectReference: v1.LocalObjectReference{
Name: "my-configmap",
},
},
},
},
Env: []v1.EnvVar{}, // No env vars
},
},
volumes: []v1.Volume{},
config: common.Config{
ResourceName: "my-configmap",
Type: constants.ConfigmapEnvVarPostfix,
},
autoReload: true,
expected: constants.NotUpdated,
envVarRemoved: false,
},
{
name: "Remove existing env var - secret envFrom",
containers: []v1.Container{
{
Name: "app",
EnvFrom: []v1.EnvFromSource{
{
SecretRef: &v1.SecretEnvSource{
LocalObjectReference: v1.LocalObjectReference{
Name: "my-secret",
},
},
},
},
Env: []v1.EnvVar{
{Name: "STAKATER_MY_SECRET_SECRET", Value: "sha-value"},
},
},
},
volumes: []v1.Volume{},
config: common.Config{
ResourceName: "my-secret",
Type: constants.SecretEnvVarPostfix,
},
autoReload: true,
expected: constants.Updated,
envVarRemoved: true,
},
{
name: "No container found",
containers: []v1.Container{},
volumes: []v1.Volume{},
config: common.Config{
ResourceName: "my-configmap",
Type: constants.ConfigmapEnvVarPostfix,
},
autoReload: true,
expected: constants.NoContainerFound,
envVarRemoved: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
deployment := mockDeploymentForDelete("test-deploy", "default", tt.containers, tt.volumes)
funcs := callbacks.RollingUpgradeFuncs{
ContainersFunc: mockContainersFunc,
InitContainersFunc: mockInitContainersFunc,
VolumesFunc: mockVolumesFunc,
PodAnnotationsFunc: mockPodAnnotationsFunc,
PatchTemplatesFunc: mockPatchTemplatesFunc,
SupportsPatch: true,
}
result := removeContainerEnvVars(funcs, deployment, tt.config, tt.autoReload)
assert.Equal(t, tt.expected, result.Result)
if tt.envVarRemoved {
// Verify env var was removed from container
containers := deployment.Spec.Template.Spec.Containers
for _, c := range containers {
for _, env := range c.Env {
envVarName := getEnvVarName(tt.config.ResourceName, tt.config.Type)
assert.NotEqual(t, envVarName, env.Name, "Env var should have been removed")
}
}
}
})
}
}
func TestInvokeDeleteStrategy(t *testing.T) {
// Save original strategy and restore after test
originalStrategy := options.ReloadStrategy
defer func() {
options.ReloadStrategy = originalStrategy
}()
tests := []struct {
name string
reloadStrategy string
containers []v1.Container
volumes []v1.Volume
config common.Config
}{
{
name: "Annotations strategy",
reloadStrategy: constants.AnnotationsReloadStrategy,
containers: []v1.Container{
{
Name: "app",
EnvFrom: []v1.EnvFromSource{
{
ConfigMapRef: &v1.ConfigMapEnvSource{
LocalObjectReference: v1.LocalObjectReference{
Name: "my-configmap",
},
},
},
},
},
},
volumes: []v1.Volume{},
config: common.Config{
ResourceName: "my-configmap",
Type: constants.ConfigmapEnvVarPostfix,
SHAValue: "sha-value",
},
},
{
name: "EnvVars strategy",
reloadStrategy: constants.EnvVarsReloadStrategy,
containers: []v1.Container{
{
Name: "app",
EnvFrom: []v1.EnvFromSource{
{
ConfigMapRef: &v1.ConfigMapEnvSource{
LocalObjectReference: v1.LocalObjectReference{
Name: "my-configmap",
},
},
},
},
Env: []v1.EnvVar{
{Name: "STAKATER_MY_CONFIGMAP_CONFIGMAP", Value: "sha-value"},
},
},
},
volumes: []v1.Volume{},
config: common.Config{
ResourceName: "my-configmap",
Type: constants.ConfigmapEnvVarPostfix,
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
options.ReloadStrategy = tt.reloadStrategy
deployment := mockDeploymentForDelete("test-deploy", "default", tt.containers, tt.volumes)
funcs := callbacks.RollingUpgradeFuncs{
ContainersFunc: mockContainersFunc,
InitContainersFunc: mockInitContainersFunc,
VolumesFunc: mockVolumesFunc,
PodAnnotationsFunc: mockPodAnnotationsFunc,
PatchTemplatesFunc: mockPatchTemplatesFunc,
SupportsPatch: true,
}
result := invokeDeleteStrategy(funcs, deployment, tt.config, true)
// Should return a valid result
assert.NotNil(t, result)
})
}
}
func TestRemovePodAnnotations(t *testing.T) {
tests := []struct {
name string
containers []v1.Container
volumes []v1.Volume
config common.Config
}{
{
name: "Remove pod annotations - configmap",
containers: []v1.Container{
{
Name: "app",
EnvFrom: []v1.EnvFromSource{
{
ConfigMapRef: &v1.ConfigMapEnvSource{
LocalObjectReference: v1.LocalObjectReference{
Name: "my-configmap",
},
},
},
},
},
},
volumes: []v1.Volume{},
config: common.Config{
ResourceName: "my-configmap",
Type: constants.ConfigmapEnvVarPostfix,
SHAValue: "sha-value",
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
deployment := mockDeploymentForDelete("test-deploy", "default", tt.containers, tt.volumes)
funcs := callbacks.RollingUpgradeFuncs{
ContainersFunc: mockContainersFunc,
InitContainersFunc: mockInitContainersFunc,
VolumesFunc: mockVolumesFunc,
PodAnnotationsFunc: mockPodAnnotationsFunc,
PatchTemplatesFunc: mockPatchTemplatesFunc,
SupportsPatch: false, // No patch for annotations removal test
}
result := removePodAnnotations(funcs, deployment, tt.config, true)
// Should return Updated since it sets the SHA to empty data hash
assert.Equal(t, constants.Updated, result.Result)
})
}
}

View File

@@ -0,0 +1,288 @@
package handler
import (
"testing"
"github.com/stakater/Reloader/internal/pkg/constants"
"github.com/stakater/Reloader/internal/pkg/metrics"
"github.com/stretchr/testify/assert"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// Helper function to create a test ConfigMap
func createTestConfigMap(name, namespace string, data map[string]string) *v1.ConfigMap {
return &v1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: namespace,
},
Data: data,
}
}
// Helper function to create a test Secret
func createTestSecret(name, namespace string, data map[string][]byte) *v1.Secret {
return &v1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: namespace,
},
Data: data,
}
}
// Helper function to create test metrics collectors
func createTestCollectors() metrics.Collectors {
return metrics.NewCollectors()
}
// ============================================================
// ResourceCreatedHandler Tests
// ============================================================
func TestResourceCreatedHandler_GetConfig_ConfigMap(t *testing.T) {
cm := createTestConfigMap("test-cm", "default", map[string]string{"key": "value"})
handler := ResourceCreatedHandler{
Resource: cm,
Collectors: createTestCollectors(),
}
config, oldSHA := handler.GetConfig()
assert.Equal(t, "test-cm", config.ResourceName)
assert.Equal(t, "default", config.Namespace)
assert.Equal(t, constants.ConfigmapEnvVarPostfix, config.Type)
assert.NotEmpty(t, config.SHAValue)
assert.Empty(t, oldSHA) // oldSHA is always empty for create handler
}
func TestResourceCreatedHandler_GetConfig_Secret(t *testing.T) {
secret := createTestSecret("test-secret", "default", map[string][]byte{"key": []byte("value")})
handler := ResourceCreatedHandler{
Resource: secret,
Collectors: createTestCollectors(),
}
config, oldSHA := handler.GetConfig()
assert.Equal(t, "test-secret", config.ResourceName)
assert.Equal(t, "default", config.Namespace)
assert.Equal(t, constants.SecretEnvVarPostfix, config.Type)
assert.NotEmpty(t, config.SHAValue)
assert.Empty(t, oldSHA)
}
func TestResourceCreatedHandler_GetConfig_InvalidResource(t *testing.T) {
// Test with an invalid resource type
handler := ResourceCreatedHandler{
Resource: "invalid",
Collectors: createTestCollectors(),
}
config, _ := handler.GetConfig()
// Config should be empty/zero for invalid resources
assert.Empty(t, config.ResourceName)
}
func TestResourceCreatedHandler_Handle_NilResource(t *testing.T) {
handler := ResourceCreatedHandler{
Resource: nil,
Collectors: createTestCollectors(),
}
err := handler.Handle()
// Should not return error even with nil resource (just logs error)
assert.NoError(t, err)
}
// ============================================================
// ResourceDeleteHandler Tests
// ============================================================
func TestResourceDeleteHandler_GetConfig_ConfigMap(t *testing.T) {
cm := createTestConfigMap("test-cm", "default", map[string]string{"key": "value"})
handler := ResourceDeleteHandler{
Resource: cm,
Collectors: createTestCollectors(),
}
config, oldSHA := handler.GetConfig()
assert.Equal(t, "test-cm", config.ResourceName)
assert.Equal(t, "default", config.Namespace)
assert.Equal(t, constants.ConfigmapEnvVarPostfix, config.Type)
assert.NotEmpty(t, config.SHAValue)
assert.Empty(t, oldSHA)
}
func TestResourceDeleteHandler_GetConfig_Secret(t *testing.T) {
secret := createTestSecret("test-secret", "default", map[string][]byte{"key": []byte("value")})
handler := ResourceDeleteHandler{
Resource: secret,
Collectors: createTestCollectors(),
}
config, oldSHA := handler.GetConfig()
assert.Equal(t, "test-secret", config.ResourceName)
assert.Equal(t, "default", config.Namespace)
assert.Equal(t, constants.SecretEnvVarPostfix, config.Type)
assert.NotEmpty(t, config.SHAValue)
assert.Empty(t, oldSHA)
}
func TestResourceDeleteHandler_GetConfig_InvalidResource(t *testing.T) {
handler := ResourceDeleteHandler{
Resource: "invalid",
Collectors: createTestCollectors(),
}
config, _ := handler.GetConfig()
assert.Empty(t, config.ResourceName)
}
func TestResourceDeleteHandler_Handle_NilResource(t *testing.T) {
handler := ResourceDeleteHandler{
Resource: nil,
Collectors: createTestCollectors(),
}
err := handler.Handle()
assert.NoError(t, err)
}
// ============================================================
// ResourceUpdatedHandler Tests
// ============================================================
func TestResourceUpdatedHandler_GetConfig_ConfigMap(t *testing.T) {
oldCM := createTestConfigMap("test-cm", "default", map[string]string{"key": "old-value"})
newCM := createTestConfigMap("test-cm", "default", map[string]string{"key": "new-value"})
handler := ResourceUpdatedHandler{
Resource: newCM,
OldResource: oldCM,
Collectors: createTestCollectors(),
}
config, oldSHA := handler.GetConfig()
assert.Equal(t, "test-cm", config.ResourceName)
assert.Equal(t, "default", config.Namespace)
assert.Equal(t, constants.ConfigmapEnvVarPostfix, config.Type)
assert.NotEmpty(t, config.SHAValue)
assert.NotEmpty(t, oldSHA)
// SHAs should be different since data changed
assert.NotEqual(t, config.SHAValue, oldSHA)
}
func TestResourceUpdatedHandler_GetConfig_ConfigMap_SameData(t *testing.T) {
oldCM := createTestConfigMap("test-cm", "default", map[string]string{"key": "same-value"})
newCM := createTestConfigMap("test-cm", "default", map[string]string{"key": "same-value"})
handler := ResourceUpdatedHandler{
Resource: newCM,
OldResource: oldCM,
Collectors: createTestCollectors(),
}
config, oldSHA := handler.GetConfig()
assert.Equal(t, "test-cm", config.ResourceName)
// SHAs should be the same since data didn't change
assert.Equal(t, config.SHAValue, oldSHA)
}
func TestResourceUpdatedHandler_GetConfig_Secret(t *testing.T) {
oldSecret := createTestSecret("test-secret", "default", map[string][]byte{"key": []byte("old-value")})
newSecret := createTestSecret("test-secret", "default", map[string][]byte{"key": []byte("new-value")})
handler := ResourceUpdatedHandler{
Resource: newSecret,
OldResource: oldSecret,
Collectors: createTestCollectors(),
}
config, oldSHA := handler.GetConfig()
assert.Equal(t, "test-secret", config.ResourceName)
assert.Equal(t, "default", config.Namespace)
assert.Equal(t, constants.SecretEnvVarPostfix, config.Type)
assert.NotEmpty(t, config.SHAValue)
assert.NotEmpty(t, oldSHA)
assert.NotEqual(t, config.SHAValue, oldSHA)
}
func TestResourceUpdatedHandler_GetConfig_Secret_SameData(t *testing.T) {
oldSecret := createTestSecret("test-secret", "default", map[string][]byte{"key": []byte("same-value")})
newSecret := createTestSecret("test-secret", "default", map[string][]byte{"key": []byte("same-value")})
handler := ResourceUpdatedHandler{
Resource: newSecret,
OldResource: oldSecret,
Collectors: createTestCollectors(),
}
config, oldSHA := handler.GetConfig()
assert.Equal(t, "test-secret", config.ResourceName)
// SHAs should be the same since data didn't change
assert.Equal(t, config.SHAValue, oldSHA)
}
func TestResourceUpdatedHandler_GetConfig_InvalidResource(t *testing.T) {
handler := ResourceUpdatedHandler{
Resource: "invalid",
OldResource: "invalid",
Collectors: createTestCollectors(),
}
config, _ := handler.GetConfig()
assert.Empty(t, config.ResourceName)
}
func TestResourceUpdatedHandler_Handle_NilResource(t *testing.T) {
handler := ResourceUpdatedHandler{
Resource: nil,
OldResource: nil,
Collectors: createTestCollectors(),
}
err := handler.Handle()
assert.NoError(t, err)
}
func TestResourceUpdatedHandler_Handle_NilOldResource(t *testing.T) {
cm := createTestConfigMap("test-cm", "default", map[string]string{"key": "value"})
handler := ResourceUpdatedHandler{
Resource: cm,
OldResource: nil,
Collectors: createTestCollectors(),
}
err := handler.Handle()
// Should not return error (just logs error)
assert.NoError(t, err)
}
func TestResourceUpdatedHandler_Handle_NoChange(t *testing.T) {
// When SHA values are the same, Handle should return nil without doing anything
cm := createTestConfigMap("test-cm", "default", map[string]string{"key": "same-value"})
handler := ResourceUpdatedHandler{
Resource: cm,
OldResource: cm, // Same resource = same SHA
Collectors: createTestCollectors(),
}
err := handler.Handle()
assert.NoError(t, err)
}

View File

@@ -0,0 +1,530 @@
package handler
import (
"testing"
"github.com/stakater/Reloader/internal/pkg/constants"
"github.com/stakater/Reloader/internal/pkg/metrics"
"github.com/stretchr/testify/assert"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
func TestResourceUpdatedHandler_GetConfig(t *testing.T) {
tests := []struct {
name string
oldResource any
newResource any
expectedName string
expectedNS string
expectedType string
expectSHANotEmpty bool
expectSHAChanged bool
}{
{
name: "ConfigMap data changed",
oldResource: &v1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{Name: "my-cm", Namespace: "default"},
Data: map[string]string{"key": "old-value"},
},
newResource: &v1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{Name: "my-cm", Namespace: "default"},
Data: map[string]string{"key": "new-value"},
},
expectedName: "my-cm",
expectedNS: "default",
expectedType: constants.ConfigmapEnvVarPostfix,
expectSHANotEmpty: true,
expectSHAChanged: true,
},
{
name: "ConfigMap data unchanged",
oldResource: &v1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{Name: "my-cm", Namespace: "default"},
Data: map[string]string{"key": "same-value"},
},
newResource: &v1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{Name: "my-cm", Namespace: "default"},
Data: map[string]string{"key": "same-value"},
},
expectedName: "my-cm",
expectedNS: "default",
expectedType: constants.ConfigmapEnvVarPostfix,
expectSHANotEmpty: true,
expectSHAChanged: false,
},
{
name: "ConfigMap key added",
oldResource: &v1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{Name: "my-cm", Namespace: "default"},
Data: map[string]string{"key1": "value1"},
},
newResource: &v1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{Name: "my-cm", Namespace: "default"},
Data: map[string]string{"key1": "value1", "key2": "value2"},
},
expectedName: "my-cm",
expectedNS: "default",
expectedType: constants.ConfigmapEnvVarPostfix,
expectSHANotEmpty: true,
expectSHAChanged: true,
},
{
name: "ConfigMap key removed",
oldResource: &v1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{Name: "my-cm", Namespace: "default"},
Data: map[string]string{"key1": "value1", "key2": "value2"},
},
newResource: &v1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{Name: "my-cm", Namespace: "default"},
Data: map[string]string{"key1": "value1"},
},
expectedName: "my-cm",
expectedNS: "default",
expectedType: constants.ConfigmapEnvVarPostfix,
expectSHANotEmpty: true,
expectSHAChanged: true,
},
{
name: "ConfigMap only labels changed - SHA unchanged",
oldResource: &v1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: "my-cm",
Namespace: "default",
Labels: map[string]string{"version": "v1"},
},
Data: map[string]string{"key": "value"},
},
newResource: &v1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: "my-cm",
Namespace: "default",
Labels: map[string]string{"version": "v2"},
},
Data: map[string]string{"key": "value"},
},
expectedName: "my-cm",
expectedNS: "default",
expectedType: constants.ConfigmapEnvVarPostfix,
expectSHANotEmpty: true,
expectSHAChanged: false, // Only data affects SHA, not labels
},
{
name: "ConfigMap only annotations changed - SHA unchanged",
oldResource: &v1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: "my-cm",
Namespace: "default",
Annotations: map[string]string{"note": "old"},
},
Data: map[string]string{"key": "value"},
},
newResource: &v1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: "my-cm",
Namespace: "default",
Annotations: map[string]string{"note": "new"},
},
Data: map[string]string{"key": "value"},
},
expectedName: "my-cm",
expectedNS: "default",
expectedType: constants.ConfigmapEnvVarPostfix,
expectSHANotEmpty: true,
expectSHAChanged: false, // Only data affects SHA, not annotations
},
{
name: "Secret data changed",
oldResource: &v1.Secret{
ObjectMeta: metav1.ObjectMeta{Name: "my-secret", Namespace: "default"},
Data: map[string][]byte{"password": []byte("old-pass")},
},
newResource: &v1.Secret{
ObjectMeta: metav1.ObjectMeta{Name: "my-secret", Namespace: "default"},
Data: map[string][]byte{"password": []byte("new-pass")},
},
expectedName: "my-secret",
expectedNS: "default",
expectedType: constants.SecretEnvVarPostfix,
expectSHANotEmpty: true,
expectSHAChanged: true,
},
{
name: "Secret data unchanged",
oldResource: &v1.Secret{
ObjectMeta: metav1.ObjectMeta{Name: "my-secret", Namespace: "default"},
Data: map[string][]byte{"password": []byte("same-pass")},
},
newResource: &v1.Secret{
ObjectMeta: metav1.ObjectMeta{Name: "my-secret", Namespace: "default"},
Data: map[string][]byte{"password": []byte("same-pass")},
},
expectedName: "my-secret",
expectedNS: "default",
expectedType: constants.SecretEnvVarPostfix,
expectSHANotEmpty: true,
expectSHAChanged: false,
},
{
name: "Secret key added",
oldResource: &v1.Secret{
ObjectMeta: metav1.ObjectMeta{Name: "my-secret", Namespace: "default"},
Data: map[string][]byte{"key1": []byte("value1")},
},
newResource: &v1.Secret{
ObjectMeta: metav1.ObjectMeta{Name: "my-secret", Namespace: "default"},
Data: map[string][]byte{"key1": []byte("value1"), "key2": []byte("value2")},
},
expectedName: "my-secret",
expectedNS: "default",
expectedType: constants.SecretEnvVarPostfix,
expectSHANotEmpty: true,
expectSHAChanged: true,
},
{
name: "Secret only labels changed - SHA unchanged",
oldResource: &v1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: "my-secret",
Namespace: "default",
Labels: map[string]string{"env": "dev"},
},
Data: map[string][]byte{"key": []byte("value")},
},
newResource: &v1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: "my-secret",
Namespace: "default",
Labels: map[string]string{"env": "prod"},
},
Data: map[string][]byte{"key": []byte("value")},
},
expectedName: "my-secret",
expectedNS: "default",
expectedType: constants.SecretEnvVarPostfix,
expectSHANotEmpty: true,
expectSHAChanged: false,
},
{
name: "Invalid resource type",
oldResource: "invalid",
newResource: "invalid",
expectedName: "",
expectedNS: "",
expectedType: "",
expectSHANotEmpty: false,
expectSHAChanged: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
handler := ResourceUpdatedHandler{
Resource: tt.newResource,
OldResource: tt.oldResource,
Collectors: metrics.NewCollectors(),
}
config, oldSHA := handler.GetConfig()
assert.Equal(t, tt.expectedName, config.ResourceName)
assert.Equal(t, tt.expectedNS, config.Namespace)
assert.Equal(t, tt.expectedType, config.Type)
if tt.expectSHANotEmpty {
assert.NotEmpty(t, config.SHAValue, "new SHA should not be empty")
assert.NotEmpty(t, oldSHA, "old SHA should not be empty")
}
if tt.expectSHAChanged {
assert.NotEqual(t, config.SHAValue, oldSHA, "SHA should have changed")
} else if tt.expectSHANotEmpty {
assert.Equal(t, config.SHAValue, oldSHA, "SHA should not have changed")
}
})
}
}
func TestResourceUpdatedHandler_Handle(t *testing.T) {
tests := []struct {
name string
oldResource any
newResource any
expectError bool
}{
{
name: "Both resources nil",
oldResource: nil,
newResource: nil,
expectError: false, // logs error but returns nil
},
{
name: "Old resource nil",
oldResource: nil,
newResource: &v1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{Name: "cm", Namespace: "default"},
Data: map[string]string{"key": "value"},
},
expectError: false,
},
{
name: "New resource nil",
oldResource: &v1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{Name: "cm", Namespace: "default"},
Data: map[string]string{"key": "value"},
},
newResource: nil,
expectError: false,
},
{
name: "ConfigMap unchanged - no action",
oldResource: &v1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{Name: "cm", Namespace: "default"},
Data: map[string]string{"key": "same"},
},
newResource: &v1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{Name: "cm", Namespace: "default"},
Data: map[string]string{"key": "same"},
},
expectError: false,
},
{
name: "ConfigMap changed - triggers update",
oldResource: &v1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{Name: "cm", Namespace: "default"},
Data: map[string]string{"key": "old"},
},
newResource: &v1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{Name: "cm", Namespace: "default"},
Data: map[string]string{"key": "new"},
},
expectError: false, // No error, but no workloads to update in test
},
{
name: "Secret unchanged - no action",
oldResource: &v1.Secret{
ObjectMeta: metav1.ObjectMeta{Name: "secret", Namespace: "default"},
Data: map[string][]byte{"key": []byte("same")},
},
newResource: &v1.Secret{
ObjectMeta: metav1.ObjectMeta{Name: "secret", Namespace: "default"},
Data: map[string][]byte{"key": []byte("same")},
},
expectError: false,
},
{
name: "Secret changed - triggers update",
oldResource: &v1.Secret{
ObjectMeta: metav1.ObjectMeta{Name: "secret", Namespace: "default"},
Data: map[string][]byte{"key": []byte("old")},
},
newResource: &v1.Secret{
ObjectMeta: metav1.ObjectMeta{Name: "secret", Namespace: "default"},
Data: map[string][]byte{"key": []byte("new")},
},
expectError: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
handler := ResourceUpdatedHandler{
Resource: tt.newResource,
OldResource: tt.oldResource,
Collectors: metrics.NewCollectors(),
}
err := handler.Handle()
if tt.expectError {
assert.Error(t, err)
} else {
assert.NoError(t, err)
}
})
}
}
func TestResourceUpdatedHandler_GetConfig_Annotations(t *testing.T) {
// Test that annotations from the new resource are captured
oldCM := &v1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: "cm",
Namespace: "default",
Annotations: map[string]string{
"old-annotation": "old-value",
},
},
Data: map[string]string{"key": "value"},
}
newCM := &v1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: "cm",
Namespace: "default",
Annotations: map[string]string{
"new-annotation": "new-value",
},
},
Data: map[string]string{"key": "value"},
}
handler := ResourceUpdatedHandler{
Resource: newCM,
OldResource: oldCM,
Collectors: metrics.NewCollectors(),
}
config, _ := handler.GetConfig()
// Should have new annotations
assert.Equal(t, "new-value", config.ResourceAnnotations["new-annotation"])
// Should not have old annotations
_, hasOld := config.ResourceAnnotations["old-annotation"]
assert.False(t, hasOld)
}
func TestResourceUpdatedHandler_GetConfig_Labels(t *testing.T) {
// Test that labels from the new resource are captured
oldSecret := &v1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: "secret",
Namespace: "default",
Labels: map[string]string{"version": "v1"},
},
Data: map[string][]byte{"key": []byte("value")},
}
newSecret := &v1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: "secret",
Namespace: "default",
Labels: map[string]string{"version": "v2"},
},
Data: map[string][]byte{"key": []byte("value")},
}
handler := ResourceUpdatedHandler{
Resource: newSecret,
OldResource: oldSecret,
Collectors: metrics.NewCollectors(),
}
config, _ := handler.GetConfig()
// Should have new labels
assert.Equal(t, "v2", config.Labels["version"])
}
func TestResourceUpdatedHandler_EmptyToNonEmpty(t *testing.T) {
// Test transition from empty data to non-empty data
oldCM := &v1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{Name: "cm", Namespace: "default"},
Data: map[string]string{},
}
newCM := &v1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{Name: "cm", Namespace: "default"},
Data: map[string]string{"key": "value"},
}
handler := ResourceUpdatedHandler{
Resource: newCM,
OldResource: oldCM,
Collectors: metrics.NewCollectors(),
}
config, oldSHA := handler.GetConfig()
assert.NotEqual(t, config.SHAValue, oldSHA, "SHA should change when data is added")
}
func TestResourceUpdatedHandler_NonEmptyToEmpty(t *testing.T) {
// Test transition from non-empty data to empty data
oldCM := &v1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{Name: "cm", Namespace: "default"},
Data: map[string]string{"key": "value"},
}
newCM := &v1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{Name: "cm", Namespace: "default"},
Data: map[string]string{},
}
handler := ResourceUpdatedHandler{
Resource: newCM,
OldResource: oldCM,
Collectors: metrics.NewCollectors(),
}
config, oldSHA := handler.GetConfig()
assert.NotEqual(t, config.SHAValue, oldSHA, "SHA should change when data is removed")
}
func TestResourceUpdatedHandler_BinaryDataChange(t *testing.T) {
// Test ConfigMap binary data change
oldCM := &v1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{Name: "cm", Namespace: "default"},
BinaryData: map[string][]byte{"binary": []byte("old-binary")},
}
newCM := &v1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{Name: "cm", Namespace: "default"},
BinaryData: map[string][]byte{"binary": []byte("new-binary")},
}
handler := ResourceUpdatedHandler{
Resource: newCM,
OldResource: oldCM,
Collectors: metrics.NewCollectors(),
}
config, oldSHA := handler.GetConfig()
assert.NotEqual(t, config.SHAValue, oldSHA, "SHA should change when binary data changes")
}
func TestResourceUpdatedHandler_MixedDataAndBinaryData(t *testing.T) {
// Test ConfigMap with both Data and BinaryData
oldCM := &v1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{Name: "cm", Namespace: "default"},
Data: map[string]string{"text": "value"},
BinaryData: map[string][]byte{"binary": []byte("binary-value")},
}
newCM := &v1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{Name: "cm", Namespace: "default"},
Data: map[string]string{"text": "value"},
BinaryData: map[string][]byte{"binary": []byte("new-binary-value")},
}
handler := ResourceUpdatedHandler{
Resource: newCM,
OldResource: oldCM,
Collectors: metrics.NewCollectors(),
}
config, oldSHA := handler.GetConfig()
assert.NotEqual(t, config.SHAValue, oldSHA, "SHA should change when binary data changes")
}
func TestResourceUpdatedHandler_DifferentNamespaces(t *testing.T) {
// Edge case: what if namespaces are different (shouldn't happen in practice)
oldCM := &v1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{Name: "cm", Namespace: "ns1"},
Data: map[string]string{"key": "value"},
}
newCM := &v1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{Name: "cm", Namespace: "ns2"},
Data: map[string]string{"key": "value"},
}
handler := ResourceUpdatedHandler{
Resource: newCM,
OldResource: oldCM,
Collectors: metrics.NewCollectors(),
}
config, _ := handler.GetConfig()
// Should use new resource's namespace
assert.Equal(t, "ns2", config.Namespace)
}

File diff suppressed because it is too large Load Diff

View File

@@ -2,8 +2,6 @@ package testutil
import (
"context"
"encoding/json"
"fmt"
"math/rand"
"sort"
"strconv"
@@ -12,13 +10,10 @@ import (
argorolloutv1alpha1 "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1"
argorollout "github.com/argoproj/argo-rollouts/pkg/client/clientset/versioned"
openshiftv1 "github.com/openshift/api/apps/v1"
appsclient "github.com/openshift/client-go/apps/clientset/versioned"
"github.com/sirupsen/logrus"
"github.com/stakater/Reloader/internal/pkg/callbacks"
"github.com/stakater/Reloader/internal/pkg/constants"
"github.com/stakater/Reloader/internal/pkg/crypto"
"github.com/stakater/Reloader/internal/pkg/metrics"
"github.com/stakater/Reloader/internal/pkg/options"
"github.com/stakater/Reloader/internal/pkg/util"
"github.com/stakater/Reloader/pkg/common"
@@ -36,8 +31,6 @@ var (
letters = []rune("abcdefghijklmnopqrstuvwxyz")
// ConfigmapResourceType is a resource type which controller watches for changes
ConfigmapResourceType = "configMaps"
// SecretResourceType is a resource type which controller watches for changes
SecretResourceType = "secrets"
)
var (
@@ -45,11 +38,6 @@ var (
Pod = "test-reloader-" + RandSeq(5)
Namespace = "test-reloader-" + RandSeq(5)
ConfigmapNamePrefix = "testconfigmap-reloader"
SecretNamePrefix = "testsecret-reloader"
Data = "dGVzdFNlY3JldEVuY29kaW5nRm9yUmVsb2FkZXI="
NewData = "dGVzdE5ld1NlY3JldEVuY29kaW5nRm9yUmVsb2FkZXI="
UpdatedData = "dGVzdFVwZGF0ZWRTZWNyZXRFbmNvZGluZ0ZvclJlbG9hZGVy"
Collectors = metrics.NewCollectors()
SleepDuration = 3 * time.Second
)
@@ -105,25 +93,6 @@ func getAnnotations(name string, autoReload bool, secretAutoReload bool, configm
return annotations
}
func getEnvVarSources(name string) []v1.EnvFromSource {
return []v1.EnvFromSource{
{
ConfigMapRef: &v1.ConfigMapEnvSource{
LocalObjectReference: v1.LocalObjectReference{
Name: name,
},
},
},
{
SecretRef: &v1.SecretEnvSource{
LocalObjectReference: v1.LocalObjectReference{
Name: name,
},
},
},
}
}
func getVolumes(name string) []v1.Volume {
return []v1.Volume{
{
@@ -244,23 +213,6 @@ func getPodTemplateSpecWithEnvVars(name string) v1.PodTemplateSpec {
}
}
func getPodTemplateSpecWithEnvVarSources(name string) v1.PodTemplateSpec {
return v1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{"secondLabel": "temp"},
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Image: "tutum/hello-world",
Name: name,
EnvFrom: getEnvVarSources(name),
},
},
},
}
}
func getPodTemplateSpecWithVolumes(name string) v1.PodTemplateSpec {
return v1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
@@ -285,65 +237,6 @@ func getPodTemplateSpecWithVolumes(name string) v1.PodTemplateSpec {
}
}
func getPodTemplateSpecWithInitContainer(name string) v1.PodTemplateSpec {
return v1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{"secondLabel": "temp"},
},
Spec: v1.PodSpec{
InitContainers: []v1.Container{
{
Image: "busybox",
Name: "busyBox",
VolumeMounts: getVolumeMounts(),
},
},
Containers: []v1.Container{
{
Image: "tutum/hello-world",
Name: name,
Env: []v1.EnvVar{
{
Name: "BUCKET_NAME",
Value: "test",
},
},
},
},
Volumes: getVolumes(name),
},
}
}
func getPodTemplateSpecWithInitContainerAndEnv(name string) v1.PodTemplateSpec {
return v1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{"secondLabel": "temp"},
},
Spec: v1.PodSpec{
InitContainers: []v1.Container{
{
Image: "busybox",
Name: "busyBox",
EnvFrom: getEnvVarSources(name),
},
},
Containers: []v1.Container{
{
Image: "tutum/hello-world",
Name: name,
Env: []v1.EnvVar{
{
Name: "BUCKET_NAME",
Value: "test",
},
},
},
},
},
}
}
// GetDeployment provides deployment for testing
func GetDeployment(namespace string, deploymentName string) *appsv1.Deployment {
replicaset := int32(1)
@@ -362,58 +255,6 @@ func GetDeployment(namespace string, deploymentName string) *appsv1.Deployment {
}
}
// GetDeploymentConfig provides deployment for testing
func GetDeploymentConfig(namespace string, deploymentConfigName string) *openshiftv1.DeploymentConfig {
replicaset := int32(1)
podTemplateSpecWithVolume := getPodTemplateSpecWithVolumes(deploymentConfigName)
return &openshiftv1.DeploymentConfig{
ObjectMeta: getObjectMeta(namespace, deploymentConfigName, false, false, false, map[string]string{}),
Spec: openshiftv1.DeploymentConfigSpec{
Replicas: replicaset,
Strategy: openshiftv1.DeploymentStrategy{
Type: openshiftv1.DeploymentStrategyTypeRolling,
},
Template: &podTemplateSpecWithVolume,
},
}
}
// GetDeploymentWithInitContainer provides deployment with init container and volumeMounts
func GetDeploymentWithInitContainer(namespace string, deploymentName string) *appsv1.Deployment {
replicaset := int32(1)
return &appsv1.Deployment{
ObjectMeta: getObjectMeta(namespace, deploymentName, false, false, false, map[string]string{}),
Spec: appsv1.DeploymentSpec{
Selector: &metav1.LabelSelector{
MatchLabels: map[string]string{"secondLabel": "temp"},
},
Replicas: &replicaset,
Strategy: appsv1.DeploymentStrategy{
Type: appsv1.RollingUpdateDeploymentStrategyType,
},
Template: getPodTemplateSpecWithInitContainer(deploymentName),
},
}
}
// GetDeploymentWithInitContainerAndEnv provides deployment with init container and EnvSource
func GetDeploymentWithInitContainerAndEnv(namespace string, deploymentName string) *appsv1.Deployment {
replicaset := int32(1)
return &appsv1.Deployment{
ObjectMeta: getObjectMeta(namespace, deploymentName, true, false, false, map[string]string{}),
Spec: appsv1.DeploymentSpec{
Selector: &metav1.LabelSelector{
MatchLabels: map[string]string{"secondLabel": "temp"},
},
Replicas: &replicaset,
Strategy: appsv1.DeploymentStrategy{
Type: appsv1.RollingUpdateDeploymentStrategyType,
},
Template: getPodTemplateSpecWithInitContainerAndEnv(deploymentName),
},
}
}
func GetDeploymentWithEnvVars(namespace string, deploymentName string) *appsv1.Deployment {
replicaset := int32(1)
return &appsv1.Deployment{
@@ -431,117 +272,6 @@ func GetDeploymentWithEnvVars(namespace string, deploymentName string) *appsv1.D
}
}
func GetDeploymentConfigWithEnvVars(namespace string, deploymentConfigName string) *openshiftv1.DeploymentConfig {
replicaset := int32(1)
podTemplateSpecWithEnvVars := getPodTemplateSpecWithEnvVars(deploymentConfigName)
return &openshiftv1.DeploymentConfig{
ObjectMeta: getObjectMeta(namespace, deploymentConfigName, false, false, false, map[string]string{}),
Spec: openshiftv1.DeploymentConfigSpec{
Replicas: replicaset,
Strategy: openshiftv1.DeploymentStrategy{
Type: openshiftv1.DeploymentStrategyTypeRolling,
},
Template: &podTemplateSpecWithEnvVars,
},
}
}
func GetDeploymentWithEnvVarSources(namespace string, deploymentName string) *appsv1.Deployment {
replicaset := int32(1)
return &appsv1.Deployment{
ObjectMeta: getObjectMeta(namespace, deploymentName, true, false, false, map[string]string{}),
Spec: appsv1.DeploymentSpec{
Selector: &metav1.LabelSelector{
MatchLabels: map[string]string{"secondLabel": "temp"},
},
Replicas: &replicaset,
Strategy: appsv1.DeploymentStrategy{
Type: appsv1.RollingUpdateDeploymentStrategyType,
},
Template: getPodTemplateSpecWithEnvVarSources(deploymentName),
},
}
}
func GetDeploymentWithPodAnnotations(namespace string, deploymentName string, both bool) *appsv1.Deployment {
replicaset := int32(1)
deployment := &appsv1.Deployment{
ObjectMeta: getObjectMeta(namespace, deploymentName, false, false, false, map[string]string{}),
Spec: appsv1.DeploymentSpec{
Selector: &metav1.LabelSelector{
MatchLabels: map[string]string{"secondLabel": "temp"},
},
Replicas: &replicaset,
Strategy: appsv1.DeploymentStrategy{
Type: appsv1.RollingUpdateDeploymentStrategyType,
},
Template: getPodTemplateSpecWithEnvVarSources(deploymentName),
},
}
if !both {
deployment.Annotations = nil
}
deployment.Spec.Template.Annotations = getAnnotations(deploymentName, true, false, false, map[string]string{})
return deployment
}
func GetDeploymentWithTypedAutoAnnotation(namespace string, deploymentName string, resourceType string) *appsv1.Deployment {
replicaset := int32(1)
var objectMeta metav1.ObjectMeta
switch resourceType {
case SecretResourceType:
objectMeta = getObjectMeta(namespace, deploymentName, false, true, false, map[string]string{})
case ConfigmapResourceType:
objectMeta = getObjectMeta(namespace, deploymentName, false, false, true, map[string]string{})
}
return &appsv1.Deployment{
ObjectMeta: objectMeta,
Spec: appsv1.DeploymentSpec{
Selector: &metav1.LabelSelector{
MatchLabels: map[string]string{"secondLabel": "temp"},
},
Replicas: &replicaset,
Strategy: appsv1.DeploymentStrategy{
Type: appsv1.RollingUpdateDeploymentStrategyType,
},
Template: getPodTemplateSpecWithVolumes(deploymentName),
},
}
}
func GetDeploymentWithExcludeAnnotation(namespace string, deploymentName string, resourceType string) *appsv1.Deployment {
replicaset := int32(1)
annotation := map[string]string{}
switch resourceType {
case SecretResourceType:
annotation[options.SecretExcludeReloaderAnnotation] = deploymentName
case ConfigmapResourceType:
annotation[options.ConfigmapExcludeReloaderAnnotation] = deploymentName
}
return &appsv1.Deployment{
ObjectMeta: metav1.ObjectMeta{
Name: deploymentName,
Namespace: namespace,
Labels: map[string]string{"firstLabel": "temp"},
Annotations: annotation,
},
Spec: appsv1.DeploymentSpec{
Selector: &metav1.LabelSelector{
MatchLabels: map[string]string{"secondLabel": "temp"},
},
Replicas: &replicaset,
Strategy: appsv1.DeploymentStrategy{
Type: appsv1.RollingUpdateDeploymentStrategyType,
},
Template: getPodTemplateSpecWithVolumes(deploymentName),
},
}
}
// GetDaemonSet provides daemonset for testing
func GetDaemonSet(namespace string, daemonsetName string) *appsv1.DaemonSet {
return &appsv1.DaemonSet{
@@ -629,18 +359,6 @@ func GetConfigmapWithUpdatedLabel(namespace string, configmapName string, testLa
}
}
// GetSecret provides secret for testing
func GetSecret(namespace string, secretName string, data string) *v1.Secret {
return &v1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: secretName,
Namespace: namespace,
Labels: map[string]string{"firstLabel": "temp"},
},
Data: map[string][]byte{"test.url": []byte(data)},
}
}
func GetCronJob(namespace string, cronJobName string) *batchv1.CronJob {
return &batchv1.CronJob{
ObjectMeta: getObjectMeta(namespace, cronJobName, false, false, false, map[string]string{}),
@@ -699,18 +417,6 @@ func GetJobWithEnvVar(namespace string, jobName string) *batchv1.Job {
}
}
// GetSecretWithUpdatedLabel provides secret for testing
func GetSecretWithUpdatedLabel(namespace string, secretName string, label string, data string) *v1.Secret {
return &v1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: secretName,
Namespace: namespace,
Labels: map[string]string{"firstLabel": label},
},
Data: map[string][]byte{"test.url": []byte(data)},
}
}
// GetResourceSHAFromEnvVar returns the SHA value of given environment variable
func GetResourceSHAFromEnvVar(containers []v1.Container, envVar string) string {
for i := range containers {
@@ -724,38 +430,10 @@ func GetResourceSHAFromEnvVar(containers []v1.Container, envVar string) string {
return ""
}
// GetResourceSHAFromAnnotation returns the SHA value of given environment variable
func GetResourceSHAFromAnnotation(podAnnotations map[string]string) string {
lastReloadedResourceName := fmt.Sprintf("%s/%s",
constants.ReloaderAnnotationPrefix,
constants.LastReloadedFromAnnotation,
)
annotationJson, ok := podAnnotations[lastReloadedResourceName]
if !ok {
return ""
}
var last common.ReloadSource
bytes := []byte(annotationJson)
err := json.Unmarshal(bytes, &last)
if err != nil {
return ""
}
return last.Hash
}
// ConvertResourceToSHA generates SHA from secret or configmap data
// ConvertResourceToSHA generates SHA from configmap data
func ConvertResourceToSHA(resourceType string, namespace string, resourceName string, data string) string {
values := []string{}
switch resourceType {
case SecretResourceType:
secret := GetSecret(namespace, resourceName, data)
for k, v := range secret.Data {
values = append(values, k+"="+string(v[:]))
}
case ConfigmapResourceType:
if resourceType == ConfigmapResourceType {
configmap := GetConfigmap(namespace, resourceName, data)
for k, v := range configmap.Data {
values = append(values, k+"="+v)
@@ -774,15 +452,6 @@ func CreateConfigMap(client kubernetes.Interface, namespace string, configmapNam
return configmapClient, err
}
// CreateSecret creates a secret in given namespace and returns the SecretInterface
func CreateSecret(client kubernetes.Interface, namespace string, secretName string, data string) (core_v1.SecretInterface, error) {
logrus.Infof("Creating secret")
secretClient := client.CoreV1().Secrets(namespace)
_, err := secretClient.Create(context.TODO(), GetSecret(namespace, secretName, data), metav1.CreateOptions{})
time.Sleep(3 * time.Second)
return secretClient, err
}
// CreateDeployment creates a deployment in given namespace and returns the Deployment
func CreateDeployment(client kubernetes.Interface, deploymentName string, namespace string, volumeMount bool) (*appsv1.Deployment, error) {
logrus.Infof("Creating Deployment")
@@ -798,108 +467,6 @@ func CreateDeployment(client kubernetes.Interface, deploymentName string, namesp
return deployment, err
}
// CreateDeployment creates a deployment in given namespace and returns the Deployment
func CreateDeploymentWithAnnotations(client kubernetes.Interface, deploymentName string, namespace string, additionalAnnotations map[string]string, volumeMount bool) (*appsv1.Deployment, error) {
logrus.Infof("Creating Deployment")
deploymentClient := client.AppsV1().Deployments(namespace)
var deploymentObj *appsv1.Deployment
if volumeMount {
deploymentObj = GetDeployment(namespace, deploymentName)
} else {
deploymentObj = GetDeploymentWithEnvVars(namespace, deploymentName)
}
for annotationKey, annotationValue := range additionalAnnotations {
deploymentObj.Annotations[annotationKey] = annotationValue
}
deployment, err := deploymentClient.Create(context.TODO(), deploymentObj, metav1.CreateOptions{})
time.Sleep(3 * time.Second)
return deployment, err
}
// CreateDeploymentConfig creates a deploymentConfig in given namespace and returns the DeploymentConfig
func CreateDeploymentConfig(client appsclient.Interface, deploymentName string, namespace string, volumeMount bool) (*openshiftv1.DeploymentConfig, error) {
logrus.Infof("Creating DeploymentConfig")
deploymentConfigsClient := client.AppsV1().DeploymentConfigs(namespace)
var deploymentConfigObj *openshiftv1.DeploymentConfig
if volumeMount {
deploymentConfigObj = GetDeploymentConfig(namespace, deploymentName)
} else {
deploymentConfigObj = GetDeploymentConfigWithEnvVars(namespace, deploymentName)
}
deploymentConfig, err := deploymentConfigsClient.Create(context.TODO(), deploymentConfigObj, metav1.CreateOptions{})
time.Sleep(5 * time.Second)
return deploymentConfig, err
}
// CreateDeploymentWithInitContainer creates a deployment in given namespace with init container and returns the Deployment
func CreateDeploymentWithInitContainer(client kubernetes.Interface, deploymentName string, namespace string, volumeMount bool) (*appsv1.Deployment, error) {
logrus.Infof("Creating Deployment")
deploymentClient := client.AppsV1().Deployments(namespace)
var deploymentObj *appsv1.Deployment
if volumeMount {
deploymentObj = GetDeploymentWithInitContainer(namespace, deploymentName)
} else {
deploymentObj = GetDeploymentWithInitContainerAndEnv(namespace, deploymentName)
}
deployment, err := deploymentClient.Create(context.TODO(), deploymentObj, metav1.CreateOptions{})
time.Sleep(3 * time.Second)
return deployment, err
}
// CreateDeploymentWithEnvVarSource creates a deployment in given namespace and returns the Deployment
func CreateDeploymentWithEnvVarSource(client kubernetes.Interface, deploymentName string, namespace string) (*appsv1.Deployment, error) {
logrus.Infof("Creating Deployment")
deploymentClient := client.AppsV1().Deployments(namespace)
deploymentObj := GetDeploymentWithEnvVarSources(namespace, deploymentName)
deployment, err := deploymentClient.Create(context.TODO(), deploymentObj, metav1.CreateOptions{})
time.Sleep(3 * time.Second)
return deployment, err
}
// CreateDeploymentWithPodAnnotations creates a deployment in given namespace and returns the Deployment
func CreateDeploymentWithPodAnnotations(client kubernetes.Interface, deploymentName string, namespace string, both bool) (*appsv1.Deployment, error) {
logrus.Infof("Creating Deployment")
deploymentClient := client.AppsV1().Deployments(namespace)
deploymentObj := GetDeploymentWithPodAnnotations(namespace, deploymentName, both)
deployment, err := deploymentClient.Create(context.TODO(), deploymentObj, metav1.CreateOptions{})
time.Sleep(3 * time.Second)
return deployment, err
}
// CreateDeploymentWithEnvVarSourceAndAnnotations returns a deployment in given
// namespace with given annotations.
func CreateDeploymentWithEnvVarSourceAndAnnotations(client kubernetes.Interface, deploymentName string, namespace string, annotations map[string]string) (*appsv1.Deployment, error) {
logrus.Infof("Creating Deployment")
deploymentClient := client.AppsV1().Deployments(namespace)
deploymentObj := GetDeploymentWithEnvVarSources(namespace, deploymentName)
deploymentObj.Annotations = annotations
deployment, err := deploymentClient.Create(context.TODO(), deploymentObj, metav1.CreateOptions{})
time.Sleep(3 * time.Second)
return deployment, err
}
// CreateDeploymentWithTypedAutoAnnotation creates a deployment in given namespace and returns the Deployment with typed auto annotation
func CreateDeploymentWithTypedAutoAnnotation(client kubernetes.Interface, deploymentName string, namespace string, resourceType string) (*appsv1.Deployment, error) {
logrus.Infof("Creating Deployment")
deploymentClient := client.AppsV1().Deployments(namespace)
deploymentObj := GetDeploymentWithTypedAutoAnnotation(namespace, deploymentName, resourceType)
deployment, err := deploymentClient.Create(context.TODO(), deploymentObj, metav1.CreateOptions{})
time.Sleep(3 * time.Second)
return deployment, err
}
// CreateDeploymentWithExcludeAnnotation creates a deployment in given namespace and returns the Deployment with typed auto annotation
func CreateDeploymentWithExcludeAnnotation(client kubernetes.Interface, deploymentName string, namespace string, resourceType string) (*appsv1.Deployment, error) {
logrus.Infof("Creating Deployment")
deploymentClient := client.AppsV1().Deployments(namespace)
deploymentObj := GetDeploymentWithExcludeAnnotation(namespace, deploymentName, resourceType)
deployment, err := deploymentClient.Create(context.TODO(), deploymentObj, metav1.CreateOptions{})
return deployment, err
}
// CreateDaemonSet creates a deployment in given namespace and returns the DaemonSet
func CreateDaemonSet(client kubernetes.Interface, daemonsetName string, namespace string, volumeMount bool) (*appsv1.DaemonSet, error) {
logrus.Infof("Creating DaemonSet")
@@ -968,14 +535,6 @@ func DeleteDeployment(client kubernetes.Interface, namespace string, deploymentN
return deploymentError
}
// DeleteDeploymentConfig deletes a deploymentConfig in given namespace and returns the error if any
func DeleteDeploymentConfig(client appsclient.Interface, namespace string, deploymentConfigName string) error {
logrus.Infof("Deleting DeploymentConfig")
deploymentConfigError := client.AppsV1().DeploymentConfigs(namespace).Delete(context.TODO(), deploymentConfigName, metav1.DeleteOptions{})
time.Sleep(3 * time.Second)
return deploymentConfigError
}
// DeleteDaemonSet creates a daemonset in given namespace and returns the error if any
func DeleteDaemonSet(client kubernetes.Interface, namespace string, daemonsetName string) error {
logrus.Infof("Deleting DaemonSet %s", daemonsetName)
@@ -1022,20 +581,6 @@ func UpdateConfigMap(configmapClient core_v1.ConfigMapInterface, namespace strin
return updateErr
}
// UpdateSecret updates a secret in given namespace and returns the error if any
func UpdateSecret(secretClient core_v1.SecretInterface, namespace string, secretName string, label string, data string) error {
logrus.Infof("Updating secret %q.\n", secretName)
var secret *v1.Secret
if label != "" {
secret = GetSecretWithUpdatedLabel(namespace, secretName, label, data)
} else {
secret = GetSecret(namespace, secretName, data)
}
_, updateErr := secretClient.Update(context.TODO(), secret, metav1.UpdateOptions{})
time.Sleep(3 * time.Second)
return updateErr
}
// DeleteConfigMap deletes a configmap in given namespace and returns the error if any
func DeleteConfigMap(client kubernetes.Interface, namespace string, configmapName string) error {
logrus.Infof("Deleting configmap %q.\n", configmapName)
@@ -1044,14 +589,6 @@ func DeleteConfigMap(client kubernetes.Interface, namespace string, configmapNam
return err
}
// DeleteSecret deletes a secret in given namespace and returns the error if any
func DeleteSecret(client kubernetes.Interface, namespace string, secretName string) error {
logrus.Infof("Deleting secret %q.\n", secretName)
err := client.CoreV1().Secrets(namespace).Delete(context.TODO(), secretName, metav1.DeleteOptions{})
time.Sleep(3 * time.Second)
return err
}
// RandSeq generates a random sequence
func RandSeq(n int) string {
b := make([]rune, n)
@@ -1107,100 +644,6 @@ func VerifyResourceEnvVarUpdate(clients kube.Clients, config common.Config, envV
return false
}
// VerifyResourceEnvVarRemoved verifies whether the rolling upgrade happened or not and all Envvars SKAKATER_name_CONFIGMAP/SECRET are removed
func VerifyResourceEnvVarRemoved(clients kube.Clients, config common.Config, envVarPostfix string, upgradeFuncs callbacks.RollingUpgradeFuncs) bool {
items := upgradeFuncs.ItemsFunc(clients, config.Namespace)
for _, i := range items {
containers := upgradeFuncs.ContainersFunc(i)
accessor, err := meta.Accessor(i)
if err != nil {
return false
}
annotations := accessor.GetAnnotations()
// match statefulsets with the correct annotation
annotationValue := annotations[config.Annotation]
searchAnnotationValue := annotations[options.AutoSearchAnnotation]
reloaderEnabledValue := annotations[options.ReloaderAutoAnnotation]
typedAutoAnnotationEnabledValue := annotations[config.TypedAutoAnnotation]
reloaderEnabled, err := strconv.ParseBool(reloaderEnabledValue)
typedAutoAnnotationEnabled, errTyped := strconv.ParseBool(typedAutoAnnotationEnabledValue)
matches := false
if err == nil && reloaderEnabled || errTyped == nil && typedAutoAnnotationEnabled {
matches = true
} else if annotationValue != "" {
values := strings.Split(annotationValue, ",")
for _, value := range values {
value = strings.Trim(value, " ")
if value == config.ResourceName {
matches = true
break
}
}
} else if searchAnnotationValue == "true" {
if config.ResourceAnnotations[options.SearchMatchAnnotation] == "true" {
matches = true
}
}
if matches {
envName := constants.EnvVarPrefix + util.ConvertToEnvVarName(config.ResourceName) + "_" + envVarPostfix
value := GetResourceSHAFromEnvVar(containers, envName)
if value == "" {
return true
}
}
}
return false
}
// VerifyResourceAnnotationUpdate verifies whether the rolling upgrade happened or not
func VerifyResourceAnnotationUpdate(clients kube.Clients, config common.Config, upgradeFuncs callbacks.RollingUpgradeFuncs) bool {
items := upgradeFuncs.ItemsFunc(clients, config.Namespace)
for _, i := range items {
podAnnotations := upgradeFuncs.PodAnnotationsFunc(i)
accessor, err := meta.Accessor(i)
if err != nil {
return false
}
annotations := accessor.GetAnnotations()
// match statefulsets with the correct annotation
annotationValue := annotations[config.Annotation]
searchAnnotationValue := annotations[options.AutoSearchAnnotation]
reloaderEnabledValue := annotations[options.ReloaderAutoAnnotation]
typedAutoAnnotationEnabledValue := annotations[config.TypedAutoAnnotation]
reloaderEnabled, _ := strconv.ParseBool(reloaderEnabledValue)
typedAutoAnnotationEnabled, _ := strconv.ParseBool(typedAutoAnnotationEnabledValue)
matches := false
if reloaderEnabled || typedAutoAnnotationEnabled || reloaderEnabledValue == "" && typedAutoAnnotationEnabledValue == "" && options.AutoReloadAll {
matches = true
} else if annotationValue != "" {
values := strings.Split(annotationValue, ",")
for _, value := range values {
value = strings.Trim(value, " ")
if value == config.ResourceName {
matches = true
break
}
}
} else if searchAnnotationValue == "true" {
if config.ResourceAnnotations[options.SearchMatchAnnotation] == "true" {
matches = true
}
}
if matches {
updated := GetResourceSHAFromAnnotation(podAnnotations)
if updated == config.SHAValue {
return true
}
}
}
return false
}
func GetSHAfromEmptyData() string {
return crypto.GenerateSHA("")
}

View File

@@ -0,0 +1,93 @@
#!/bin/bash
# Cleanup script for e2e test cluster
# Run this after e2e tests complete: ./scripts/e2e-cluster-cleanup.sh
# This removes Argo Rollouts, test namespaces, and cluster-scoped resources.
set -e
ARGO_ROLLOUTS_VERSION="${ARGO_ROLLOUTS_VERSION:-v1.7.2}"
ARGO_ROLLOUTS_NAMESPACE="argo-rollouts"
echo "=== E2E Cluster Cleanup ==="
# Check if kubectl is available
if ! command -v kubectl &> /dev/null; then
echo "Error: kubectl is not installed or not in PATH"
exit 1
fi
# Check cluster connectivity
echo "Checking cluster connectivity..."
if ! kubectl cluster-info &> /dev/null; then
echo "Error: Cannot connect to Kubernetes cluster"
exit 1
fi
# ============================================================
# Cleanup Reloader Test Resources
# ============================================================
echo ""
echo "=== Cleaning up Reloader test resources ==="
# Delete test namespaces (created by test suites)
echo "Deleting test namespaces..."
for ns in $(kubectl get namespaces -o name | grep -E "reloader-" | cut -d/ -f2); do
echo " Deleting namespace: ${ns}"
kubectl delete namespace "${ns}" --ignore-not-found --wait=false
done
# Delete Reloader cluster-scoped resources
echo "Deleting Reloader cluster-scoped resources..."
for cr in $(kubectl get clusterrole -o name 2>/dev/null | grep -E "reloader-" | cut -d/ -f2); do
echo " Deleting ClusterRole: ${cr}"
kubectl delete clusterrole "${cr}" --ignore-not-found
done
for crb in $(kubectl get clusterrolebinding -o name 2>/dev/null | grep -E "reloader-" | cut -d/ -f2); do
echo " Deleting ClusterRoleBinding: ${crb}"
kubectl delete clusterrolebinding "${crb}" --ignore-not-found
done
# ============================================================
# Cleanup Argo Rollouts
# ============================================================
echo ""
echo "=== Uninstalling Argo Rollouts ==="
# First, delete the deployment to stop the controller
echo "Stopping Argo Rollouts controller..."
kubectl delete deployment argo-rollouts -n ${ARGO_ROLLOUTS_NAMESPACE} --ignore-not-found --timeout=30s 2>/dev/null || true
# Delete all Rollouts and other CRs in all namespaces to avoid finalizer issues
echo "Deleting Argo Rollouts custom resources..."
ARGO_RESOURCES="rollouts analysisruns analysistemplates experiments"
for res in ${ARGO_RESOURCES}; do
kubectl delete "${res}.argoproj.io" --all --all-namespaces --ignore-not-found --timeout=30s 2>/dev/null || true
done
# Delete using the install manifest
echo "Deleting Argo Rollouts installation..."
ARGO_URL="https://github.com/argoproj/argo-rollouts/releases/download/${ARGO_ROLLOUTS_VERSION}/install.yaml"
kubectl delete -f ${ARGO_URL} --ignore-not-found --timeout=60s 2>/dev/null || true
# Give resources time to be cleaned up before deleting CRDs
sleep 2
# Explicitly delete CRDs (cluster-scoped)
echo "Deleting Argo Rollouts CRDs..."
ARGO_CRDS="rollouts.argoproj.io analysisruns.argoproj.io analysistemplates.argoproj.io clusteranalysistemplates.argoproj.io experiments.argoproj.io"
for crd in ${ARGO_CRDS}; do
kubectl delete crd "${crd}" --ignore-not-found --timeout=30s 2>/dev/null || true
done
# Delete namespace
echo "Deleting Argo Rollouts namespace..."
kubectl delete namespace ${ARGO_ROLLOUTS_NAMESPACE} --ignore-not-found --timeout=30s 2>/dev/null || true
# Delete cluster-scoped RBAC
echo "Deleting Argo Rollouts cluster RBAC..."
kubectl delete clusterrole argo-rollouts argo-rollouts-aggregate-to-admin argo-rollouts-aggregate-to-edit argo-rollouts-aggregate-to-view --ignore-not-found 2>/dev/null || true
kubectl delete clusterrolebinding argo-rollouts --ignore-not-found 2>/dev/null || true
echo ""
echo "=== E2E Cluster Cleanup Complete ==="

View File

@@ -0,0 +1,80 @@
#!/bin/bash
# Setup script for e2e test cluster
# Run this before running e2e tests: ./scripts/e2e-cluster-setup.sh
# This installs Argo Rollouts and any other prerequisites needed for e2e tests.
set -e
ARGO_ROLLOUTS_VERSION="${ARGO_ROLLOUTS_VERSION:-v1.7.2}"
ARGO_ROLLOUTS_NAMESPACE="argo-rollouts"
echo "=== E2E Cluster Setup ==="
# Check if kubectl is available
if ! command -v kubectl &> /dev/null; then
echo "Error: kubectl is not installed or not in PATH"
exit 1
fi
# Check cluster connectivity
echo "Checking cluster connectivity..."
if ! kubectl cluster-info &> /dev/null; then
echo "Error: Cannot connect to Kubernetes cluster"
exit 1
fi
echo "Cluster connectivity verified"
# Install Argo Rollouts
echo ""
echo "=== Installing Argo Rollouts ${ARGO_ROLLOUTS_VERSION} ==="
# Check if Argo Rollouts is already installed
if kubectl get crd rollouts.argoproj.io &> /dev/null; then
echo "Argo Rollouts CRD already exists, checking if controller is running..."
if kubectl get deployment argo-rollouts -n ${ARGO_ROLLOUTS_NAMESPACE} &> /dev/null; then
echo "Argo Rollouts is already installed and running"
else
echo "Argo Rollouts CRD exists but controller not running, reinstalling..."
fi
else
echo "Installing Argo Rollouts..."
fi
# Create namespace (ignore if exists)
kubectl create namespace ${ARGO_ROLLOUTS_NAMESPACE} 2>/dev/null || true
# Install Argo Rollouts
ARGO_URL="https://github.com/argoproj/argo-rollouts/releases/download/${ARGO_ROLLOUTS_VERSION}/install.yaml"
echo "Applying manifest from: ${ARGO_URL}"
kubectl apply -n ${ARGO_ROLLOUTS_NAMESPACE} -f ${ARGO_URL}
# Wait for deployment to exist
echo "Waiting for deployment to be created..."
sleep 2
# Patch deployment to remove resource requirements (for Kind cluster compatibility)
# This avoids "Insufficient ephemeral-storage" errors in resource-constrained environments
echo "Patching deployment for Kind compatibility..."
PATCH_JSON='[{"op": "remove", "path": "/spec/template/spec/containers/0/resources"}]'
if ! kubectl patch deployment argo-rollouts -n ${ARGO_ROLLOUTS_NAMESPACE} --type=json -p "${PATCH_JSON}" 2>/dev/null; then
echo "JSON patch failed, trying strategic merge..."
PATCH_JSON='{"spec":{"template":{"spec":{"containers":[{"name":"argo-rollouts","resources":{"limits":null,"requests":null}}]}}}}'
kubectl patch deployment argo-rollouts -n ${ARGO_ROLLOUTS_NAMESPACE} --type=strategic -p "${PATCH_JSON}" || echo "Warning: Failed to patch resources"
fi
# Wait for controller to be ready
echo "Waiting for Argo Rollouts controller to be ready..."
kubectl wait --for=condition=available deployment/argo-rollouts -n ${ARGO_ROLLOUTS_NAMESPACE} --timeout=180s
# Wait for CRD to be established
echo "Waiting for Argo Rollouts CRD to be established..."
kubectl wait --for=condition=established crd/rollouts.argoproj.io --timeout=60s
echo ""
echo "=== E2E Cluster Setup Complete ==="
echo "Argo Rollouts ${ARGO_ROLLOUTS_VERSION} is installed and ready"
echo ""
echo "You can now run e2e tests:"
echo " make e2e-test"
echo " # or"
echo " SKIP_BUILD=true RELOADER_IMAGE=ghcr.io/stakater/reloader:test go test -v ./test/e2e/..."

457
test/e2e/README.md Normal file
View File

@@ -0,0 +1,457 @@
# Reloader E2E Tests
These tests verify that Reloader actually works in a real Kubernetes cluster. They spin up a Kind cluster, build and deploy Reloader, then create workloads and change their ConfigMaps/Secrets to make sure everything reloads correctly.
## Running the Tests
```bash
# Run everything (creates Kind cluster, builds image, runs tests)
make e2e
# Test a specific image without building
SKIP_BUILD=true RELOADER_IMAGE=stakater/reloader:v1.0.0 make e2e
# Run just one test suite
go test -v -timeout 30m ./test/e2e/core/...
go test -v -timeout 30m ./test/e2e/annotations/...
go test -v -timeout 30m ./test/e2e/flags/...
# Skip Argo/OpenShift tests (if you don't have them installed)
go test -v ./test/e2e/core/... --ginkgo.label-filter="!argo && !openshift"
```
## What You Need
- Go 1.21+
- Docker
- [Kind](https://kind.sigs.k8s.io/)
- kubectl
- Helm 3
- Argo Rollouts (optional, for Argo tests)
- OpenShift (optional, for DeploymentConfig tests)
---
## What Gets Tested
### Deployments
Deployments are the most thoroughly tested workload. Here's everything we verify:
**Basic Reload Behavior**
- Reloads when a referenced ConfigMap's data changes
- Reloads when a referenced Secret's data changes
- Reloads when using `auto=true` annotation (auto-detects all mounted ConfigMaps/Secrets)
- Does NOT reload when only ConfigMap/Secret labels change (data must change)
- Does NOT reload when `auto=false` is set
**Different Ways to Reference ConfigMaps/Secrets**
- `envFrom` - inject all keys as environment variables
- `valueFrom.configMapKeyRef` - single key as env var
- `valueFrom.secretKeyRef` - single key as env var
- Volume mounts - mount ConfigMap/Secret as files
- Projected volumes - multiple sources combined into one mount
- Init containers with envFrom
- Init containers with volume mounts
**Annotation Variations**
- `configmap.reloader.stakater.com/reload: my-config` - explicit ConfigMap
- `secret.reloader.stakater.com/reload: my-secret` - explicit Secret
- `reloader.stakater.com/auto: "true"` - auto-detect everything
- `configmap.reloader.stakater.com/auto: "true"` - auto-detect only ConfigMaps
- `secret.reloader.stakater.com/auto: "true"` - auto-detect only Secrets
- Multiple ConfigMaps/Secrets in one annotation (comma-separated)
- Annotations on pod template vs deployment metadata (both work)
**Search & Match**
- Deployments with `search` annotation find ConfigMaps with `match` annotation
- Only reloads if both sides have the right annotations
**Exclude & Ignore**
- Exclude specific ConfigMaps/Secrets from auto-reload
- Ignore annotation on ConfigMap/Secret prevents any reload
**Pause Period**
- Deployment gets paused after reload when pause-period annotation is set
**Regex Patterns**
- Pattern matching for ConfigMap/Secret names (e.g., `app-config-.*`)
**Multi-Container**
- Works when multiple containers share the same ConfigMap
- Works when different containers use different ConfigMaps
**EnvVars Strategy**
- Adds `STAKATER_` environment variables instead of pod annotations
- Verifies the env var appears after ConfigMap/Secret change
### DaemonSets
DaemonSets get the same treatment as Deployments:
- Reloads when ConfigMap data changes
- Reloads when Secret data changes
- Works with `auto=true` annotation
- Does NOT reload on label-only changes
- Supports all reference methods (envFrom, valueFrom, volumes, projected, init containers)
- EnvVars strategy works
### StatefulSets
StatefulSets are tested identically to Deployments and DaemonSets:
- Reloads when ConfigMap data changes
- Reloads when Secret data changes
- Works with `auto=true` annotation
- Does NOT reload on label-only changes
- Supports all reference methods
- EnvVars strategy works
### CronJobs
CronJobs are a bit special - when a CronJob's ConfigMap changes, Reloader updates the CronJob spec so the *next* Job it creates will have the new config.
**What's Tested**
- CronJob spec updates when referenced ConfigMap changes
- CronJob spec updates when referenced Secret changes
- Works with `auto=true` annotation
- Works with explicit reload annotations
- Does NOT update on label-only changes
**Note:** CronJobs don't support the EnvVars strategy since they don't have running pods to inject env vars into.
### Jobs
Jobs require special handling - since you can't modify a running Job, Reloader deletes and recreates it with the new config.
**What's Tested**
- Job gets recreated (new UID) when ConfigMap changes
- Job gets recreated when Secret changes
- Works with `auto=true` annotation
- Works with explicit reload annotations
- Works with `valueFrom.configMapKeyRef` references
- Works with `valueFrom.secretKeyRef` references
**Note:** Jobs don't support the EnvVars strategy.
### Argo Rollouts
Argo Rollouts are Kubernetes Deployments on steroids with advanced deployment strategies. Tests require Argo Rollouts to be installed.
**What's Tested**
- Reloads when ConfigMap data changes
- Reloads when Secret data changes
- Works with `auto=true` annotation
- Does NOT reload on label-only changes
- Default strategy (annotation-based, like Deployments)
- Restart strategy (sets `spec.restartAt` field instead of annotations)
- Supports all reference methods
- EnvVars strategy works
### DeploymentConfigs (OpenShift)
OpenShift's legacy workload type. Tests only run on OpenShift clusters.
**What's Tested**
- Reloads when ConfigMap data changes
- Reloads when Secret data changes
- Works with `auto=true` annotation
- Does NOT reload on label-only changes
- Supports all reference methods
- EnvVars strategy works
---
## CLI Flag Tests
These tests verify Reloader's command-line options work correctly. Each test deploys Reloader with different flags.
### Namespace Filtering
**`namespaceSelector`**
- Only watches namespaces with matching labels
- Ignores ConfigMap changes in non-matching namespaces
**`ignoreNamespaces`**
- Skips specified namespaces entirely
- Still watches all other namespaces
**`watchGlobally`**
- `true` (default): watches all namespaces
- `false`: only watches Reloader's own namespace
### Resource Filtering
**`resourceLabelSelector`**
- Only watches ConfigMaps/Secrets with matching labels
- Ignores changes to resources without the label
**`ignoreSecrets`**
- Completely ignores all Secret changes
- Still watches ConfigMaps
**`ignoreConfigMaps`**
- Completely ignores all ConfigMap changes
- Still watches Secrets
### Workload Filtering
**`ignoreCronJobs`**
- Skips CronJobs, still handles Deployments/etc
**`ignoreJobs`**
- Skips Jobs, still handles other workloads
### Reload Triggers
**`reloadOnCreate`**
- `true`: triggers reload when a new ConfigMap/Secret is created
- `false` (default): only triggers on updates
**`reloadOnDelete`**
- `true`: triggers reload when a ConfigMap/Secret is deleted
- `false` (default): only triggers on updates
### Global Auto-Reload
**`autoReloadAll`**
- `true`: all workloads auto-reload without needing annotations
- `auto=false` on a workload still opts it out
---
## Annotation-Specific Tests
### Auto Reload Variations
- `reloader.stakater.com/auto: "true"` - watches both ConfigMaps and Secrets
- `reloader.stakater.com/auto: "false"` - completely disables reload
- `configmap.reloader.stakater.com/auto: "true"` - only watches ConfigMaps
- `secret.reloader.stakater.com/auto: "true"` - only watches Secrets
### Combining Annotations
- `auto=true` + explicit reload annotation work together
- Auto-detected resources + explicitly listed resources both trigger reload
- Exclude annotations override auto-detection
### Search & Match
The search/match system lets you decouple workloads from specific resource names:
1. Workload has `reloader.stakater.com/search: "true"`
2. ConfigMap has `reloader.stakater.com/match: "true"`
3. When ConfigMap changes, workload reloads
**Tests verify:**
- Reload happens when both annotations present
- No reload when workload has search but ConfigMap lacks match
- No reload when ConfigMap has match but no workload has search
- Multiple workloads can have search, only ones with search reload
### Exclude Annotations
Exclude specific resources from auto-reload:
- `configmap.reloader.stakater.com/exclude: "config-to-skip"`
- `secret.reloader.stakater.com/exclude: "secret-to-skip"`
**Tests verify:**
- Excluded ConfigMap changes don't trigger reload
- Non-excluded ConfigMap changes still trigger reload
- Same behavior for Secrets
### Resource Ignore
Put this on the ConfigMap/Secret itself to prevent any reload:
- `reloader.stakater.com/ignore: "true"`
**Tests verify:**
- ConfigMap with ignore annotation never triggers reload
- Secret with ignore annotation never triggers reload
- Even with explicit reload annotation on workload
### Pause Period
Delay between detecting change and triggering reload:
- `reloader.stakater.com/pause-period: "10s"`
**Tests verify:**
- Deployment gets paused-at annotation after reload
- Without pause-period, no paused-at annotation
---
## Advanced Scenarios
### Pod Template Annotations
Reloader reads annotations from both places:
1. Deployment/DaemonSet/etc metadata
2. Pod template metadata (inside spec.template.metadata)
**Tests verify:**
- Annotation only on pod template still works
- Annotation on both locations works
- Mismatched annotations (ConfigMap annotation but updating Secret) correctly doesn't reload
### Regex Patterns
Use regex in the reload annotation:
- `configmap.reloader.stakater.com/reload: "app-config-.*"`
- `secret.reloader.stakater.com/reload: "db-creds-.*"`
**Tests verify:**
- Matching ConfigMap/Secret triggers reload
- Non-matching ConfigMap/Secret doesn't trigger reload
### Multiple Containers
**Tests verify:**
- Multiple containers sharing one ConfigMap - changes trigger reload
- Multiple containers with different ConfigMaps - change to either triggers reload
---
## Test Organization
```
test/e2e/
├── core/ # Main tests (all workload types)
│ ├── workloads_test.go # Basic reload behavior
│ └── reference_methods_test.go # envFrom, volumes, etc.
├── annotations/ # Annotation-specific behavior
│ ├── auto_reload_test.go
│ ├── combination_test.go
│ ├── exclude_test.go
│ ├── search_match_test.go
│ ├── pause_period_test.go
│ └── resource_ignore_test.go
├── flags/ # CLI flag behavior
│ ├── namespace_selector_test.go
│ ├── namespace_ignore_test.go
│ ├── resource_selector_test.go
│ ├── ignore_resources_test.go
│ ├── ignored_workloads_test.go
│ ├── auto_reload_all_test.go
│ ├── reload_on_create_test.go
│ ├── reload_on_delete_test.go
│ └── watch_globally_test.go
├── advanced/ # Edge cases
│ ├── job_reload_test.go
│ ├── multi_container_test.go
│ ├── pod_annotations_test.go
│ └── regex_test.go
├── argo/ # Argo Rollouts (requires installation)
│ └── rollout_test.go
├── openshift/ # OpenShift (requires cluster)
│ └── deploymentconfig_test.go
└── utils/ # Shared test helpers
```
---
## Debugging Failed Tests
### See What's Happening
```bash
# Verbose output
go test -v ./test/e2e/core/...
# Run one specific test
go test -v ./test/e2e/core/... --ginkgo.focus="should reload when ConfigMap"
# Keep the cluster around after tests
SKIP_CLEANUP=true make e2e
```
### Check Reloader Logs
```bash
# Find the Reloader pod
kubectl get pods -A | grep reloader
# Check its logs
kubectl logs -n <namespace> -l app=reloader-reloader --tail=100
```
### Common Problems
| Problem | Solution |
|---------|----------|
| Test timeout | Reloader might not be running - check pod status |
| Argo tests skipped | Install Argo Rollouts first |
| OpenShift tests skipped | Only work on OpenShift clusters |
| "resource not found" | Missing CRDs (Argo, OpenShift) |
---
## Environment Variables
| Variable | What it does | Default |
|----------|--------------|---------|
| `RELOADER_IMAGE` | Image to test | `ghcr.io/stakater/reloader:test` |
| `SKIP_BUILD` | Don't build the image | `false` |
| `SKIP_CLEANUP` | Keep cluster after tests | `false` |
| `KIND_CLUSTER` | Kind cluster name | `kind` |
| `KUBECONFIG` | Kubernetes config path | `~/.kube/config` |
---
## Writing New Tests
### For Multiple Workload Types
Use the adapter pattern to test the same behavior across Deployments, DaemonSets, etc:
```go
DescribeTable("should reload when ConfigMap changes",
func(workloadType utils.WorkloadType) {
adapter := registry.Get(workloadType)
// ... create ConfigMap, workload, update ConfigMap, verify reload
},
Entry("Deployment", utils.WorkloadDeployment),
Entry("DaemonSet", utils.WorkloadDaemonSet),
Entry("StatefulSet", utils.WorkloadStatefulSet),
)
```
### For Deployment-Only Tests
Use the direct creation helpers:
```go
It("should reload with my specific setup", func() {
_, err := utils.CreateConfigMap(ctx, kubeClient, testNamespace, configMapName,
map[string]string{"key": "value"}, nil)
_, err = utils.CreateDeployment(ctx, kubeClient, testNamespace, deploymentName,
utils.WithConfigMapEnvFrom(configMapName),
utils.WithAnnotations(utils.BuildAutoTrueAnnotation()),
)
// Update and verify...
})
```
### Negative Tests (Verifying Nothing Happens)
```go
It("should NOT reload when only labels change", func() {
// Setup...
// Make a change that shouldn't trigger reload
err = utils.UpdateConfigMapLabels(ctx, kubeClient, testNamespace, configMapName,
map[string]string{"new-label": "value"})
// Wait a bit, then verify NO reload happened
time.Sleep(utils.NegativeTestWait)
reloaded, _ := utils.WaitForDeploymentReloaded(...)
Expect(reloaded).To(BeFalse())
})
```

View File

@@ -0,0 +1,51 @@
package advanced
import (
"context"
"testing"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
"github.com/stakater/Reloader/test/e2e/utils"
"k8s.io/client-go/kubernetes"
)
var (
kubeClient kubernetes.Interface
testNamespace string
ctx context.Context
testEnv *utils.TestEnvironment
)
func TestAdvanced(t *testing.T) {
RegisterFailHandler(Fail)
RunSpecs(t, "Advanced E2E Suite")
}
var _ = BeforeSuite(func() {
var err error
ctx = context.Background()
// Setup test environment
testEnv, err = utils.SetupTestEnvironment(ctx, "reloader-advanced")
Expect(err).NotTo(HaveOccurred(), "Failed to setup test environment")
// Export for use in tests
kubeClient = testEnv.KubeClient
testNamespace = testEnv.Namespace
// Deploy Reloader with annotations strategy
err = testEnv.DeployAndWait(map[string]string{
"reloader.reloadStrategy": "annotations",
})
Expect(err).NotTo(HaveOccurred(), "Failed to deploy Reloader")
})
var _ = AfterSuite(func() {
if testEnv != nil {
err := testEnv.Cleanup()
Expect(err).NotTo(HaveOccurred(), "Failed to cleanup test environment")
}
GinkgoWriter.Println("Advanced E2E Suite cleanup complete")
})

View File

@@ -0,0 +1,187 @@
package advanced
import (
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
"github.com/stakater/Reloader/test/e2e/utils"
)
var _ = Describe("Job Workload Recreation Tests", func() {
var (
jobName string
configMapName string
secretName string
)
BeforeEach(func() {
jobName = utils.RandName("job")
configMapName = utils.RandName("cm")
secretName = utils.RandName("secret")
})
AfterEach(func() {
_ = utils.DeleteJob(ctx, kubeClient, testNamespace, jobName)
_ = utils.DeleteConfigMap(ctx, kubeClient, testNamespace, configMapName)
_ = utils.DeleteSecret(ctx, kubeClient, testNamespace, secretName)
})
Context("Job with ConfigMap reference", func() {
It("should recreate Job when referenced ConfigMap changes", func() {
By("Creating a ConfigMap")
_, err := utils.CreateConfigMap(ctx, kubeClient, testNamespace, configMapName,
map[string]string{"JOB_CONFIG": "initial"}, nil)
Expect(err).NotTo(HaveOccurred())
By("Creating a Job with ConfigMap envFrom")
job, err := utils.CreateJob(ctx, kubeClient, testNamespace, jobName,
utils.WithJobConfigMapEnvFrom(configMapName),
utils.WithJobAnnotations(utils.BuildConfigMapReloadAnnotation(configMapName)),
)
Expect(err).NotTo(HaveOccurred())
originalUID := string(job.UID)
By("Waiting for Job to exist")
err = utils.WaitForJobExists(ctx, kubeClient, testNamespace, jobName, utils.DeploymentReady)
Expect(err).NotTo(HaveOccurred())
By("Updating the ConfigMap")
err = utils.UpdateConfigMap(ctx, kubeClient, testNamespace, configMapName,
map[string]string{"JOB_CONFIG": "updated"})
Expect(err).NotTo(HaveOccurred())
By("Waiting for Job to be recreated (new UID)")
_, recreated, err := utils.WaitForJobRecreated(ctx, kubeClient, testNamespace, jobName,
originalUID, utils.ReloadTimeout)
Expect(err).NotTo(HaveOccurred())
Expect(recreated).To(BeTrue(), "Job should be recreated with new UID when ConfigMap changes")
})
})
Context("Job with Secret reference", func() {
It("should recreate Job when referenced Secret changes", func() {
By("Creating a Secret")
_, err := utils.CreateSecretFromStrings(ctx, kubeClient, testNamespace, secretName,
map[string]string{"JOB_SECRET": "initial"}, nil)
Expect(err).NotTo(HaveOccurred())
By("Creating a Job with Secret envFrom")
job, err := utils.CreateJob(ctx, kubeClient, testNamespace, jobName,
utils.WithJobSecretEnvFrom(secretName),
utils.WithJobAnnotations(utils.BuildSecretReloadAnnotation(secretName)),
)
Expect(err).NotTo(HaveOccurred())
originalUID := string(job.UID)
By("Waiting for Job to exist")
err = utils.WaitForJobExists(ctx, kubeClient, testNamespace, jobName, utils.DeploymentReady)
Expect(err).NotTo(HaveOccurred())
By("Updating the Secret")
err = utils.UpdateSecretFromStrings(ctx, kubeClient, testNamespace, secretName,
map[string]string{"JOB_SECRET": "updated"})
Expect(err).NotTo(HaveOccurred())
By("Waiting for Job to be recreated (new UID)")
_, recreated, err := utils.WaitForJobRecreated(ctx, kubeClient, testNamespace, jobName,
originalUID, utils.ReloadTimeout)
Expect(err).NotTo(HaveOccurred())
Expect(recreated).To(BeTrue(), "Job should be recreated with new UID when Secret changes")
})
})
Context("Job with auto annotation", func() {
It("should recreate Job with auto=true when ConfigMap changes", func() {
By("Creating a ConfigMap")
_, err := utils.CreateConfigMap(ctx, kubeClient, testNamespace, configMapName,
map[string]string{"AUTO_CONFIG": "initial"}, nil)
Expect(err).NotTo(HaveOccurred())
By("Creating a Job with auto annotation")
job, err := utils.CreateJob(ctx, kubeClient, testNamespace, jobName,
utils.WithJobConfigMapEnvFrom(configMapName),
utils.WithJobAnnotations(utils.BuildAutoTrueAnnotation()),
)
Expect(err).NotTo(HaveOccurred())
originalUID := string(job.UID)
By("Waiting for Job to exist")
err = utils.WaitForJobExists(ctx, kubeClient, testNamespace, jobName, utils.DeploymentReady)
Expect(err).NotTo(HaveOccurred())
By("Updating the ConfigMap")
err = utils.UpdateConfigMap(ctx, kubeClient, testNamespace, configMapName,
map[string]string{"AUTO_CONFIG": "updated"})
Expect(err).NotTo(HaveOccurred())
By("Waiting for Job to be recreated (new UID)")
_, recreated, err := utils.WaitForJobRecreated(ctx, kubeClient, testNamespace, jobName,
originalUID, utils.ReloadTimeout)
Expect(err).NotTo(HaveOccurred())
Expect(recreated).To(BeTrue(), "Job with auto=true should be recreated when ConfigMap changes")
})
})
Context("Job with valueFrom ConfigMap reference", func() {
It("should recreate Job when ConfigMap referenced via valueFrom changes", func() {
By("Creating a ConfigMap")
_, err := utils.CreateConfigMap(ctx, kubeClient, testNamespace, configMapName,
map[string]string{"config_key": "initial"}, nil)
Expect(err).NotTo(HaveOccurred())
By("Creating a Job with valueFrom.configMapKeyRef")
job, err := utils.CreateJob(ctx, kubeClient, testNamespace, jobName,
utils.WithJobConfigMapKeyRef(configMapName, "config_key", "MY_CONFIG"),
utils.WithJobAnnotations(utils.BuildConfigMapReloadAnnotation(configMapName)),
)
Expect(err).NotTo(HaveOccurred())
originalUID := string(job.UID)
By("Waiting for Job to exist")
err = utils.WaitForJobExists(ctx, kubeClient, testNamespace, jobName, utils.DeploymentReady)
Expect(err).NotTo(HaveOccurred())
By("Updating the ConfigMap")
err = utils.UpdateConfigMap(ctx, kubeClient, testNamespace, configMapName,
map[string]string{"config_key": "updated"})
Expect(err).NotTo(HaveOccurred())
By("Waiting for Job to be recreated (new UID)")
_, recreated, err := utils.WaitForJobRecreated(ctx, kubeClient, testNamespace, jobName,
originalUID, utils.ReloadTimeout)
Expect(err).NotTo(HaveOccurred())
Expect(recreated).To(BeTrue(), "Job with valueFrom.configMapKeyRef should be recreated when ConfigMap changes")
})
})
Context("Job with valueFrom Secret reference", func() {
It("should recreate Job when Secret referenced via valueFrom changes", func() {
By("Creating a Secret")
_, err := utils.CreateSecretFromStrings(ctx, kubeClient, testNamespace, secretName,
map[string]string{"secret_key": "initial"}, nil)
Expect(err).NotTo(HaveOccurred())
By("Creating a Job with valueFrom.secretKeyRef")
job, err := utils.CreateJob(ctx, kubeClient, testNamespace, jobName,
utils.WithJobSecretKeyRef(secretName, "secret_key", "MY_SECRET"),
utils.WithJobAnnotations(utils.BuildSecretReloadAnnotation(secretName)),
)
Expect(err).NotTo(HaveOccurred())
originalUID := string(job.UID)
By("Waiting for Job to exist")
err = utils.WaitForJobExists(ctx, kubeClient, testNamespace, jobName, utils.DeploymentReady)
Expect(err).NotTo(HaveOccurred())
By("Updating the Secret")
err = utils.UpdateSecretFromStrings(ctx, kubeClient, testNamespace, secretName,
map[string]string{"secret_key": "updated"})
Expect(err).NotTo(HaveOccurred())
By("Waiting for Job to be recreated (new UID)")
_, recreated, err := utils.WaitForJobRecreated(ctx, kubeClient, testNamespace, jobName,
originalUID, utils.ReloadTimeout)
Expect(err).NotTo(HaveOccurred())
Expect(recreated).To(BeTrue(), "Job with valueFrom.secretKeyRef should be recreated when Secret changes")
})
})
})

View File

@@ -0,0 +1,94 @@
package advanced
import (
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
"github.com/stakater/Reloader/test/e2e/utils"
)
var _ = Describe("Multi-Container Tests", func() {
var (
deploymentName string
configMapName string
configMapName2 string
)
BeforeEach(func() {
deploymentName = utils.RandName("deploy")
configMapName = utils.RandName("cm")
configMapName2 = utils.RandName("cm2")
})
AfterEach(func() {
_ = utils.DeleteDeployment(ctx, kubeClient, testNamespace, deploymentName)
_ = utils.DeleteConfigMap(ctx, kubeClient, testNamespace, configMapName)
_ = utils.DeleteConfigMap(ctx, kubeClient, testNamespace, configMapName2)
})
Context("Multiple containers same ConfigMap", func() {
It("should reload when ConfigMap used by multiple containers changes", func() {
By("Creating a ConfigMap")
_, err := utils.CreateConfigMap(ctx, kubeClient, testNamespace, configMapName,
map[string]string{"shared-key": "initial"}, nil)
Expect(err).NotTo(HaveOccurred())
By("Creating a Deployment with multiple containers using the same ConfigMap")
_, err = utils.CreateDeployment(ctx, kubeClient, testNamespace, deploymentName,
utils.WithMultipleContainers(2),
utils.WithConfigMapEnvFrom(configMapName),
utils.WithAnnotations(utils.BuildAutoTrueAnnotation()),
)
Expect(err).NotTo(HaveOccurred())
By("Waiting for Deployment to be ready")
err = utils.WaitForDeploymentReady(ctx, kubeClient, testNamespace, deploymentName, utils.DeploymentReady)
Expect(err).NotTo(HaveOccurred())
By("Updating the ConfigMap")
err = utils.UpdateConfigMap(ctx, kubeClient, testNamespace, configMapName,
map[string]string{"shared-key": "updated"})
Expect(err).NotTo(HaveOccurred())
By("Waiting for Deployment to be reloaded")
reloaded, err := utils.WaitForDeploymentReloaded(ctx, kubeClient, testNamespace, deploymentName,
utils.AnnotationLastReloadedFrom, utils.ReloadTimeout)
Expect(err).NotTo(HaveOccurred())
Expect(reloaded).To(BeTrue(), "Deployment with multiple containers should be reloaded")
})
})
Context("Multiple containers different ConfigMaps", func() {
It("should reload when any container's ConfigMap changes", func() {
By("Creating two ConfigMaps")
_, err := utils.CreateConfigMap(ctx, kubeClient, testNamespace, configMapName,
map[string]string{"key1": "initial1"}, nil)
Expect(err).NotTo(HaveOccurred())
_, err = utils.CreateConfigMap(ctx, kubeClient, testNamespace, configMapName2,
map[string]string{"key2": "initial2"}, nil)
Expect(err).NotTo(HaveOccurred())
By("Creating a Deployment with multiple containers using different ConfigMaps")
_, err = utils.CreateDeployment(ctx, kubeClient, testNamespace, deploymentName,
utils.WithMultipleContainersAndEnv(configMapName, configMapName2),
utils.WithAnnotations(utils.BuildAutoTrueAnnotation()),
)
Expect(err).NotTo(HaveOccurred())
By("Waiting for Deployment to be ready")
err = utils.WaitForDeploymentReady(ctx, kubeClient, testNamespace, deploymentName, utils.DeploymentReady)
Expect(err).NotTo(HaveOccurred())
By("Updating the first ConfigMap")
err = utils.UpdateConfigMap(ctx, kubeClient, testNamespace, configMapName,
map[string]string{"key1": "updated1"})
Expect(err).NotTo(HaveOccurred())
By("Waiting for Deployment to be reloaded")
reloaded, err := utils.WaitForDeploymentReloaded(ctx, kubeClient, testNamespace, deploymentName,
utils.AnnotationLastReloadedFrom, utils.ReloadTimeout)
Expect(err).NotTo(HaveOccurred())
Expect(reloaded).To(BeTrue(), "Deployment should be reloaded when first container's ConfigMap changes")
})
})
})

View File

@@ -0,0 +1,191 @@
package advanced
import (
"time"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
"github.com/stakater/Reloader/test/e2e/utils"
)
var _ = Describe("Pod Template Annotations Tests", func() {
var (
deploymentName string
configMapName string
secretName string
)
BeforeEach(func() {
deploymentName = utils.RandName("deploy")
configMapName = utils.RandName("cm")
secretName = utils.RandName("secret")
})
AfterEach(func() {
_ = utils.DeleteDeployment(ctx, kubeClient, testNamespace, deploymentName)
_ = utils.DeleteConfigMap(ctx, kubeClient, testNamespace, configMapName)
_ = utils.DeleteSecret(ctx, kubeClient, testNamespace, secretName)
})
Context("Annotations on pod template metadata only", func() {
It("should reload when using annotation on pod template metadata (not deployment metadata)", func() {
By("Creating a ConfigMap")
_, err := utils.CreateConfigMap(ctx, kubeClient, testNamespace, configMapName,
map[string]string{"POD_CONFIG": "initial"}, nil)
Expect(err).NotTo(HaveOccurred())
By("Creating a Deployment with annotation ONLY on pod template")
_, err = utils.CreateDeployment(ctx, kubeClient, testNamespace, deploymentName,
utils.WithConfigMapEnvFrom(configMapName),
utils.WithPodTemplateAnnotations(utils.BuildConfigMapReloadAnnotation(configMapName)),
// Note: No WithAnnotations - annotation only on pod template
)
Expect(err).NotTo(HaveOccurred())
By("Waiting for Deployment to be ready")
err = utils.WaitForDeploymentReady(ctx, kubeClient, testNamespace, deploymentName, utils.DeploymentReady)
Expect(err).NotTo(HaveOccurred())
By("Updating the ConfigMap")
err = utils.UpdateConfigMap(ctx, kubeClient, testNamespace, configMapName,
map[string]string{"POD_CONFIG": "updated"})
Expect(err).NotTo(HaveOccurred())
By("Waiting for Deployment to be reloaded")
reloaded, err := utils.WaitForDeploymentReloaded(ctx, kubeClient, testNamespace, deploymentName,
utils.AnnotationLastReloadedFrom, utils.ReloadTimeout)
Expect(err).NotTo(HaveOccurred())
Expect(reloaded).To(BeTrue(), "Deployment should reload when annotation is on pod template metadata")
})
})
Context("Annotations on both deployment and pod template metadata", func() {
It("should reload when annotations are on both deployment and pod template", func() {
By("Creating a ConfigMap")
_, err := utils.CreateConfigMap(ctx, kubeClient, testNamespace, configMapName,
map[string]string{"BOTH_CONFIG": "initial"}, nil)
Expect(err).NotTo(HaveOccurred())
By("Creating a Deployment with annotation on BOTH deployment and pod template")
_, err = utils.CreateDeployment(ctx, kubeClient, testNamespace, deploymentName,
utils.WithConfigMapEnvFrom(configMapName),
utils.WithAnnotations(utils.BuildConfigMapReloadAnnotation(configMapName)),
utils.WithPodTemplateAnnotations(utils.BuildConfigMapReloadAnnotation(configMapName)),
)
Expect(err).NotTo(HaveOccurred())
By("Waiting for Deployment to be ready")
err = utils.WaitForDeploymentReady(ctx, kubeClient, testNamespace, deploymentName, utils.DeploymentReady)
Expect(err).NotTo(HaveOccurred())
By("Updating the ConfigMap")
err = utils.UpdateConfigMap(ctx, kubeClient, testNamespace, configMapName,
map[string]string{"BOTH_CONFIG": "updated"})
Expect(err).NotTo(HaveOccurred())
By("Waiting for Deployment to be reloaded")
reloaded, err := utils.WaitForDeploymentReloaded(ctx, kubeClient, testNamespace, deploymentName,
utils.AnnotationLastReloadedFrom, utils.ReloadTimeout)
Expect(err).NotTo(HaveOccurred())
Expect(reloaded).To(BeTrue(), "Deployment should reload when annotations are on both locations")
})
})
Context("auto=true annotation on pod template", func() {
It("should reload when auto annotation is on pod template metadata", func() {
By("Creating a ConfigMap")
_, err := utils.CreateConfigMap(ctx, kubeClient, testNamespace, configMapName,
map[string]string{"AUTO_POD_CONFIG": "initial"}, nil)
Expect(err).NotTo(HaveOccurred())
By("Creating a Deployment with auto=true annotation on pod template")
_, err = utils.CreateDeployment(ctx, kubeClient, testNamespace, deploymentName,
utils.WithConfigMapEnvFrom(configMapName),
utils.WithPodTemplateAnnotations(utils.BuildAutoTrueAnnotation()),
)
Expect(err).NotTo(HaveOccurred())
By("Waiting for Deployment to be ready")
err = utils.WaitForDeploymentReady(ctx, kubeClient, testNamespace, deploymentName, utils.DeploymentReady)
Expect(err).NotTo(HaveOccurred())
By("Updating the ConfigMap")
err = utils.UpdateConfigMap(ctx, kubeClient, testNamespace, configMapName,
map[string]string{"AUTO_POD_CONFIG": "updated"})
Expect(err).NotTo(HaveOccurred())
By("Waiting for Deployment to be reloaded")
reloaded, err := utils.WaitForDeploymentReloaded(ctx, kubeClient, testNamespace, deploymentName,
utils.AnnotationLastReloadedFrom, utils.ReloadTimeout)
Expect(err).NotTo(HaveOccurred())
Expect(reloaded).To(BeTrue(), "Deployment with auto=true on pod template should reload")
})
})
Context("Secret annotation on pod template", func() {
It("should reload when secret reload annotation is on pod template", func() {
By("Creating a Secret")
_, err := utils.CreateSecretFromStrings(ctx, kubeClient, testNamespace, secretName,
map[string]string{"POD_SECRET": "initial"}, nil)
Expect(err).NotTo(HaveOccurred())
By("Creating a Deployment with secret reload annotation on pod template")
_, err = utils.CreateDeployment(ctx, kubeClient, testNamespace, deploymentName,
utils.WithSecretEnvFrom(secretName),
utils.WithPodTemplateAnnotations(utils.BuildSecretReloadAnnotation(secretName)),
)
Expect(err).NotTo(HaveOccurred())
By("Waiting for Deployment to be ready")
err = utils.WaitForDeploymentReady(ctx, kubeClient, testNamespace, deploymentName, utils.DeploymentReady)
Expect(err).NotTo(HaveOccurred())
By("Updating the Secret")
err = utils.UpdateSecretFromStrings(ctx, kubeClient, testNamespace, secretName,
map[string]string{"POD_SECRET": "updated"})
Expect(err).NotTo(HaveOccurred())
By("Waiting for Deployment to be reloaded")
reloaded, err := utils.WaitForDeploymentReloaded(ctx, kubeClient, testNamespace, deploymentName,
utils.AnnotationLastReloadedFrom, utils.ReloadTimeout)
Expect(err).NotTo(HaveOccurred())
Expect(reloaded).To(BeTrue(), "Deployment should reload when secret annotation is on pod template")
})
})
Context("Mismatched annotations (different resources)", func() {
It("should NOT reload when pod template has ConfigMap annotation but we update Secret", func() {
By("Creating both ConfigMap and Secret")
_, err := utils.CreateConfigMap(ctx, kubeClient, testNamespace, configMapName,
map[string]string{"CONFIG": "value"}, nil)
Expect(err).NotTo(HaveOccurred())
_, err = utils.CreateSecretFromStrings(ctx, kubeClient, testNamespace, secretName,
map[string]string{"SECRET": "initial"}, nil)
Expect(err).NotTo(HaveOccurred())
By("Creating a Deployment with ConfigMap annotation on pod template but using Secret")
_, err = utils.CreateDeployment(ctx, kubeClient, testNamespace, deploymentName,
utils.WithSecretEnvFrom(secretName),
utils.WithPodTemplateAnnotations(utils.BuildConfigMapReloadAnnotation(configMapName)),
)
Expect(err).NotTo(HaveOccurred())
By("Waiting for Deployment to be ready")
err = utils.WaitForDeploymentReady(ctx, kubeClient, testNamespace, deploymentName, utils.DeploymentReady)
Expect(err).NotTo(HaveOccurred())
By("Updating the Secret (not the ConfigMap)")
err = utils.UpdateSecretFromStrings(ctx, kubeClient, testNamespace, secretName,
map[string]string{"SECRET": "updated"})
Expect(err).NotTo(HaveOccurred())
By("Verifying Deployment was NOT reloaded (negative test)")
time.Sleep(utils.NegativeTestWait)
reloaded, err := utils.WaitForDeploymentReloaded(ctx, kubeClient, testNamespace, deploymentName,
utils.AnnotationLastReloadedFrom, utils.ShortTimeout)
Expect(err).NotTo(HaveOccurred())
Expect(reloaded).To(BeFalse(), "Deployment should NOT reload when we update different resource than annotated")
})
})
})

View File

@@ -0,0 +1,134 @@
package advanced
import (
"time"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
"github.com/stakater/Reloader/test/e2e/utils"
)
var _ = Describe("Regex Pattern Tests", func() {
var (
deploymentName string
matchingCM string
nonMatchingCM string
matchingSecret string
)
BeforeEach(func() {
deploymentName = utils.RandName("deploy")
matchingCM = "app-config-" + utils.RandName("cm")
nonMatchingCM = "other-" + utils.RandName("cm")
matchingSecret = "app-secret-" + utils.RandName("secret")
})
AfterEach(func() {
_ = utils.DeleteDeployment(ctx, kubeClient, testNamespace, deploymentName)
_ = utils.DeleteConfigMap(ctx, kubeClient, testNamespace, matchingCM)
_ = utils.DeleteConfigMap(ctx, kubeClient, testNamespace, nonMatchingCM)
_ = utils.DeleteSecret(ctx, kubeClient, testNamespace, matchingSecret)
})
Context("ConfigMap regex pattern", func() {
It("should reload when ConfigMap matching pattern changes", func() {
By("Creating a ConfigMap matching the pattern")
_, err := utils.CreateConfigMap(ctx, kubeClient, testNamespace, matchingCM,
map[string]string{"key": "initial"}, nil)
Expect(err).NotTo(HaveOccurred())
By("Creating a Deployment with ConfigMap pattern annotation")
_, err = utils.CreateDeployment(ctx, kubeClient, testNamespace, deploymentName,
utils.WithConfigMapEnvFrom(matchingCM),
utils.WithAnnotations(map[string]string{
utils.AnnotationConfigMapReload: "app-config-.*",
}),
)
Expect(err).NotTo(HaveOccurred())
By("Waiting for Deployment to be ready")
err = utils.WaitForDeploymentReady(ctx, kubeClient, testNamespace, deploymentName, utils.DeploymentReady)
Expect(err).NotTo(HaveOccurred())
By("Updating the matching ConfigMap")
err = utils.UpdateConfigMap(ctx, kubeClient, testNamespace, matchingCM,
map[string]string{"key": "updated"})
Expect(err).NotTo(HaveOccurred())
By("Waiting for Deployment to be reloaded")
reloaded, err := utils.WaitForDeploymentReloaded(ctx, kubeClient, testNamespace, deploymentName,
utils.AnnotationLastReloadedFrom, utils.ReloadTimeout)
Expect(err).NotTo(HaveOccurred())
Expect(reloaded).To(BeTrue(), "Deployment should be reloaded when matching ConfigMap changes")
})
It("should NOT reload when ConfigMap NOT matching pattern changes", func() {
By("Creating ConfigMaps - one matching, one not")
_, err := utils.CreateConfigMap(ctx, kubeClient, testNamespace, matchingCM,
map[string]string{"key": "initial"}, nil)
Expect(err).NotTo(HaveOccurred())
_, err = utils.CreateConfigMap(ctx, kubeClient, testNamespace, nonMatchingCM,
map[string]string{"other": "initial"}, nil)
Expect(err).NotTo(HaveOccurred())
By("Creating a Deployment with ConfigMap pattern annotation")
_, err = utils.CreateDeployment(ctx, kubeClient, testNamespace, deploymentName,
utils.WithConfigMapEnvFrom(matchingCM),
utils.WithAnnotations(map[string]string{
utils.AnnotationConfigMapReload: "app-config-.*",
}),
)
Expect(err).NotTo(HaveOccurred())
By("Waiting for Deployment to be ready")
err = utils.WaitForDeploymentReady(ctx, kubeClient, testNamespace, deploymentName, utils.DeploymentReady)
Expect(err).NotTo(HaveOccurred())
By("Updating the non-matching ConfigMap")
err = utils.UpdateConfigMap(ctx, kubeClient, testNamespace, nonMatchingCM,
map[string]string{"other": "updated"})
Expect(err).NotTo(HaveOccurred())
By("Verifying Deployment was NOT reloaded (pattern mismatch)")
time.Sleep(utils.NegativeTestWait)
reloaded, err := utils.WaitForDeploymentReloaded(ctx, kubeClient, testNamespace, deploymentName,
utils.AnnotationLastReloadedFrom, utils.ShortTimeout)
Expect(err).NotTo(HaveOccurred())
Expect(reloaded).To(BeFalse(), "Deployment should NOT reload when non-matching ConfigMap changes")
})
})
Context("Secret regex pattern", func() {
It("should reload when Secret matching pattern changes", func() {
By("Creating a Secret matching the pattern")
_, err := utils.CreateSecretFromStrings(ctx, kubeClient, testNamespace, matchingSecret,
map[string]string{"password": "initial"}, nil)
Expect(err).NotTo(HaveOccurred())
By("Creating a Deployment with Secret pattern annotation")
_, err = utils.CreateDeployment(ctx, kubeClient, testNamespace, deploymentName,
utils.WithSecretEnvFrom(matchingSecret),
utils.WithAnnotations(map[string]string{
utils.AnnotationSecretReload: "app-secret-.*",
}),
)
Expect(err).NotTo(HaveOccurred())
By("Waiting for Deployment to be ready")
err = utils.WaitForDeploymentReady(ctx, kubeClient, testNamespace, deploymentName, utils.DeploymentReady)
Expect(err).NotTo(HaveOccurred())
By("Updating the matching Secret")
err = utils.UpdateSecretFromStrings(ctx, kubeClient, testNamespace, matchingSecret,
map[string]string{"password": "updated"})
Expect(err).NotTo(HaveOccurred())
By("Waiting for Deployment to be reloaded")
reloaded, err := utils.WaitForDeploymentReloaded(ctx, kubeClient, testNamespace, deploymentName,
utils.AnnotationLastReloadedFrom, utils.ReloadTimeout)
Expect(err).NotTo(HaveOccurred())
Expect(reloaded).To(BeTrue(), "Deployment should be reloaded when matching Secret changes")
})
})
})

View File

@@ -0,0 +1,59 @@
package annotations
import (
"context"
"testing"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
"github.com/stakater/Reloader/test/e2e/utils"
"k8s.io/client-go/dynamic"
"k8s.io/client-go/kubernetes"
)
var (
kubeClient kubernetes.Interface
dynamicClient dynamic.Interface
testNamespace string
ctx context.Context
cancel context.CancelFunc
testEnv *utils.TestEnvironment
)
func TestAnnotations(t *testing.T) {
RegisterFailHandler(Fail)
RunSpecs(t, "Annotations Strategy E2E Suite")
}
var _ = BeforeSuite(func() {
var err error
ctx, cancel = context.WithCancel(context.Background())
// Setup test environment
testEnv, err = utils.SetupTestEnvironment(ctx, "reloader-annotations-test")
Expect(err).NotTo(HaveOccurred(), "Failed to setup test environment")
// Export for use in tests
kubeClient = testEnv.KubeClient
dynamicClient = testEnv.DynamicClient
testNamespace = testEnv.Namespace
// Deploy Reloader with annotations strategy
err = testEnv.DeployAndWait(map[string]string{
"reloader.reloadStrategy": "annotations",
})
Expect(err).NotTo(HaveOccurred(), "Failed to deploy Reloader")
})
var _ = AfterSuite(func() {
if testEnv != nil {
err := testEnv.Cleanup()
Expect(err).NotTo(HaveOccurred(), "Failed to cleanup test environment")
}
if cancel != nil {
cancel()
}
GinkgoWriter.Println("Annotations E2E Suite cleanup complete")
})

View File

@@ -0,0 +1,269 @@
package annotations
import (
"time"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
"github.com/stakater/Reloader/test/e2e/utils"
)
var _ = Describe("Auto Reload Annotation Tests", func() {
var (
deploymentName string
configMapName string
secretName string
)
BeforeEach(func() {
deploymentName = utils.RandName("deploy")
configMapName = utils.RandName("cm")
secretName = utils.RandName("secret")
})
AfterEach(func() {
_ = utils.DeleteDeployment(ctx, kubeClient, testNamespace, deploymentName)
_ = utils.DeleteConfigMap(ctx, kubeClient, testNamespace, configMapName)
_ = utils.DeleteSecret(ctx, kubeClient, testNamespace, secretName)
})
Context("with reloader.stakater.com/auto=true annotation", func() {
It("should reload Deployment when any referenced ConfigMap changes", func() {
By("Creating a ConfigMap")
_, err := utils.CreateConfigMap(ctx, kubeClient, testNamespace, configMapName,
map[string]string{"key": "initial"}, nil)
Expect(err).NotTo(HaveOccurred())
By("Creating a Deployment with auto=true annotation")
_, err = utils.CreateDeployment(ctx, kubeClient, testNamespace, deploymentName,
utils.WithConfigMapEnvFrom(configMapName),
utils.WithAnnotations(utils.BuildAutoTrueAnnotation()),
)
Expect(err).NotTo(HaveOccurred())
By("Waiting for Deployment to be ready")
err = utils.WaitForDeploymentReady(ctx, kubeClient, testNamespace, deploymentName, utils.DeploymentReady)
Expect(err).NotTo(HaveOccurred())
By("Updating the ConfigMap data")
err = utils.UpdateConfigMap(ctx, kubeClient, testNamespace, configMapName,
map[string]string{"key": "updated"})
Expect(err).NotTo(HaveOccurred())
By("Waiting for Deployment to be reloaded")
reloaded, err := utils.WaitForDeploymentReloaded(ctx, kubeClient, testNamespace, deploymentName,
utils.AnnotationLastReloadedFrom, utils.ReloadTimeout)
Expect(err).NotTo(HaveOccurred())
Expect(reloaded).To(BeTrue(), "Deployment with auto=true should have been reloaded")
})
It("should reload Deployment when any referenced Secret changes", func() {
By("Creating a Secret")
_, err := utils.CreateSecretFromStrings(ctx, kubeClient, testNamespace, secretName,
map[string]string{"password": "initial"}, nil)
Expect(err).NotTo(HaveOccurred())
By("Creating a Deployment with auto=true annotation")
_, err = utils.CreateDeployment(ctx, kubeClient, testNamespace, deploymentName,
utils.WithSecretEnvFrom(secretName),
utils.WithAnnotations(utils.BuildAutoTrueAnnotation()),
)
Expect(err).NotTo(HaveOccurred())
By("Waiting for Deployment to be ready")
err = utils.WaitForDeploymentReady(ctx, kubeClient, testNamespace, deploymentName, utils.DeploymentReady)
Expect(err).NotTo(HaveOccurred())
By("Updating the Secret data")
err = utils.UpdateSecretFromStrings(ctx, kubeClient, testNamespace, secretName,
map[string]string{"password": "updated"})
Expect(err).NotTo(HaveOccurred())
By("Waiting for Deployment to be reloaded")
reloaded, err := utils.WaitForDeploymentReloaded(ctx, kubeClient, testNamespace, deploymentName,
utils.AnnotationLastReloadedFrom, utils.ReloadTimeout)
Expect(err).NotTo(HaveOccurred())
Expect(reloaded).To(BeTrue(), "Deployment with auto=true should have been reloaded for Secret change")
})
It("should reload Deployment when either ConfigMap or Secret changes", func() {
By("Creating a ConfigMap and Secret")
_, err := utils.CreateConfigMap(ctx, kubeClient, testNamespace, configMapName,
map[string]string{"config": "initial"}, nil)
Expect(err).NotTo(HaveOccurred())
_, err = utils.CreateSecretFromStrings(ctx, kubeClient, testNamespace, secretName,
map[string]string{"secret": "initial"}, nil)
Expect(err).NotTo(HaveOccurred())
By("Creating a Deployment with auto=true annotation referencing both")
_, err = utils.CreateDeployment(ctx, kubeClient, testNamespace, deploymentName,
utils.WithConfigMapEnvFrom(configMapName),
utils.WithSecretEnvFrom(secretName),
utils.WithAnnotations(utils.BuildAutoTrueAnnotation()),
)
Expect(err).NotTo(HaveOccurred())
By("Waiting for Deployment to be ready")
err = utils.WaitForDeploymentReady(ctx, kubeClient, testNamespace, deploymentName, utils.DeploymentReady)
Expect(err).NotTo(HaveOccurred())
By("Updating the ConfigMap")
err = utils.UpdateConfigMap(ctx, kubeClient, testNamespace, configMapName,
map[string]string{"config": "updated"})
Expect(err).NotTo(HaveOccurred())
By("Waiting for Deployment to be reloaded")
reloaded, err := utils.WaitForDeploymentReloaded(ctx, kubeClient, testNamespace, deploymentName,
utils.AnnotationLastReloadedFrom, utils.ReloadTimeout)
Expect(err).NotTo(HaveOccurred())
Expect(reloaded).To(BeTrue(), "Deployment with auto=true should have been reloaded for ConfigMap change")
})
})
Context("with reloader.stakater.com/auto=false annotation", func() {
It("should NOT reload Deployment when ConfigMap changes", func() {
By("Creating a ConfigMap")
_, err := utils.CreateConfigMap(ctx, kubeClient, testNamespace, configMapName,
map[string]string{"key": "initial"}, nil)
Expect(err).NotTo(HaveOccurred())
By("Creating a Deployment with auto=false annotation")
_, err = utils.CreateDeployment(ctx, kubeClient, testNamespace, deploymentName,
utils.WithConfigMapEnvFrom(configMapName),
utils.WithAnnotations(utils.BuildAutoFalseAnnotation()),
)
Expect(err).NotTo(HaveOccurred())
By("Waiting for Deployment to be ready")
err = utils.WaitForDeploymentReady(ctx, kubeClient, testNamespace, deploymentName, utils.DeploymentReady)
Expect(err).NotTo(HaveOccurred())
By("Updating the ConfigMap data")
err = utils.UpdateConfigMap(ctx, kubeClient, testNamespace, configMapName,
map[string]string{"key": "updated"})
Expect(err).NotTo(HaveOccurred())
By("Verifying Deployment is NOT reloaded (negative test)")
time.Sleep(utils.NegativeTestWait)
reloaded, err := utils.WaitForDeploymentReloaded(ctx, kubeClient, testNamespace, deploymentName,
utils.AnnotationLastReloadedFrom, utils.ShortTimeout)
Expect(err).NotTo(HaveOccurred())
Expect(reloaded).To(BeFalse(), "Deployment with auto=false should NOT have been reloaded")
})
})
Context("with configmap.reloader.stakater.com/auto=true annotation", func() {
It("should reload Deployment only when ConfigMap changes, not Secret", func() {
By("Creating a ConfigMap and Secret")
_, err := utils.CreateConfigMap(ctx, kubeClient, testNamespace, configMapName,
map[string]string{"config": "initial"}, nil)
Expect(err).NotTo(HaveOccurred())
_, err = utils.CreateSecretFromStrings(ctx, kubeClient, testNamespace, secretName,
map[string]string{"secret": "initial"}, nil)
Expect(err).NotTo(HaveOccurred())
By("Creating a Deployment with configmap auto=true annotation")
_, err = utils.CreateDeployment(ctx, kubeClient, testNamespace, deploymentName,
utils.WithConfigMapEnvFrom(configMapName),
utils.WithSecretEnvFrom(secretName),
utils.WithAnnotations(utils.BuildConfigMapAutoAnnotation()),
)
Expect(err).NotTo(HaveOccurred())
By("Waiting for Deployment to be ready")
err = utils.WaitForDeploymentReady(ctx, kubeClient, testNamespace, deploymentName, utils.DeploymentReady)
Expect(err).NotTo(HaveOccurred())
By("Updating the ConfigMap")
err = utils.UpdateConfigMap(ctx, kubeClient, testNamespace, configMapName,
map[string]string{"config": "updated"})
Expect(err).NotTo(HaveOccurred())
By("Waiting for Deployment to be reloaded")
reloaded, err := utils.WaitForDeploymentReloaded(ctx, kubeClient, testNamespace, deploymentName,
utils.AnnotationLastReloadedFrom, utils.ReloadTimeout)
Expect(err).NotTo(HaveOccurred())
Expect(reloaded).To(BeTrue(), "Deployment should have been reloaded for ConfigMap change")
})
})
Context("with secret.reloader.stakater.com/auto=true annotation", func() {
It("should reload Deployment only when Secret changes, not ConfigMap", func() {
By("Creating a ConfigMap and Secret")
_, err := utils.CreateConfigMap(ctx, kubeClient, testNamespace, configMapName,
map[string]string{"config": "initial"}, nil)
Expect(err).NotTo(HaveOccurred())
_, err = utils.CreateSecretFromStrings(ctx, kubeClient, testNamespace, secretName,
map[string]string{"secret": "initial"}, nil)
Expect(err).NotTo(HaveOccurred())
By("Creating a Deployment with secret auto=true annotation")
_, err = utils.CreateDeployment(ctx, kubeClient, testNamespace, deploymentName,
utils.WithConfigMapEnvFrom(configMapName),
utils.WithSecretEnvFrom(secretName),
utils.WithAnnotations(utils.BuildSecretAutoAnnotation()),
)
Expect(err).NotTo(HaveOccurred())
By("Waiting for Deployment to be ready")
err = utils.WaitForDeploymentReady(ctx, kubeClient, testNamespace, deploymentName, utils.DeploymentReady)
Expect(err).NotTo(HaveOccurred())
By("Updating the Secret")
err = utils.UpdateSecretFromStrings(ctx, kubeClient, testNamespace, secretName,
map[string]string{"secret": "updated"})
Expect(err).NotTo(HaveOccurred())
By("Waiting for Deployment to be reloaded")
reloaded, err := utils.WaitForDeploymentReloaded(ctx, kubeClient, testNamespace, deploymentName,
utils.AnnotationLastReloadedFrom, utils.ReloadTimeout)
Expect(err).NotTo(HaveOccurred())
Expect(reloaded).To(BeTrue(), "Deployment should have been reloaded for Secret change")
})
})
Context("with auto annotation and explicit reload annotation together", func() {
It("should reload when auto-detected resource changes", func() {
configMapName2 := utils.RandName("cm2")
defer func() { _ = utils.DeleteConfigMap(ctx, kubeClient, testNamespace, configMapName2) }()
By("Creating two ConfigMaps")
_, err := utils.CreateConfigMap(ctx, kubeClient, testNamespace, configMapName,
map[string]string{"key1": "value1"}, nil)
Expect(err).NotTo(HaveOccurred())
_, err = utils.CreateConfigMap(ctx, kubeClient, testNamespace, configMapName2,
map[string]string{"key2": "value2"}, nil)
Expect(err).NotTo(HaveOccurred())
By("Creating a Deployment with auto=true and explicit reload for first ConfigMap")
_, err = utils.CreateDeployment(ctx, kubeClient, testNamespace, deploymentName,
utils.WithConfigMapEnvFrom(configMapName),
utils.WithConfigMapEnvFrom(configMapName2),
utils.WithAnnotations(utils.MergeAnnotations(
utils.BuildAutoTrueAnnotation(),
utils.BuildConfigMapReloadAnnotation(configMapName),
)),
)
Expect(err).NotTo(HaveOccurred())
By("Waiting for Deployment to be ready")
err = utils.WaitForDeploymentReady(ctx, kubeClient, testNamespace, deploymentName, utils.DeploymentReady)
Expect(err).NotTo(HaveOccurred())
By("Updating the second ConfigMap (auto-detected)")
err = utils.UpdateConfigMap(ctx, kubeClient, testNamespace, configMapName2,
map[string]string{"key2": "updated"})
Expect(err).NotTo(HaveOccurred())
By("Waiting for Deployment to be reloaded")
reloaded, err := utils.WaitForDeploymentReloaded(ctx, kubeClient, testNamespace, deploymentName,
utils.AnnotationLastReloadedFrom, utils.ReloadTimeout)
Expect(err).NotTo(HaveOccurred())
Expect(reloaded).To(BeTrue(), "Deployment should have been reloaded for auto-detected ConfigMap change")
})
})
})

View File

@@ -0,0 +1,352 @@
package annotations
import (
"time"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
"github.com/stakater/Reloader/test/e2e/utils"
)
var _ = Describe("Combination Annotation Tests", func() {
var (
deploymentName string
configMapName string
configMapName2 string
secretName string
secretName2 string
)
BeforeEach(func() {
deploymentName = utils.RandName("deploy")
configMapName = utils.RandName("cm")
configMapName2 = utils.RandName("cm2")
secretName = utils.RandName("secret")
secretName2 = utils.RandName("secret2")
})
AfterEach(func() {
_ = utils.DeleteDeployment(ctx, kubeClient, testNamespace, deploymentName)
_ = utils.DeleteConfigMap(ctx, kubeClient, testNamespace, configMapName)
_ = utils.DeleteConfigMap(ctx, kubeClient, testNamespace, configMapName2)
_ = utils.DeleteSecret(ctx, kubeClient, testNamespace, secretName)
_ = utils.DeleteSecret(ctx, kubeClient, testNamespace, secretName2)
})
Context("auto=true with explicit reload annotations", func() {
It("should reload when both auto-detected and explicitly listed ConfigMaps change", func() {
By("Creating two ConfigMaps")
_, err := utils.CreateConfigMap(ctx, kubeClient, testNamespace, configMapName,
map[string]string{"key": "initial"}, nil)
Expect(err).NotTo(HaveOccurred())
_, err = utils.CreateConfigMap(ctx, kubeClient, testNamespace, configMapName2,
map[string]string{"extra": "initial"}, nil)
Expect(err).NotTo(HaveOccurred())
By("Creating a Deployment with auto=true AND explicit reload annotation for extra ConfigMap")
_, err = utils.CreateDeployment(ctx, kubeClient, testNamespace, deploymentName,
utils.WithConfigMapEnvFrom(configMapName), // auto-detected
utils.WithAnnotations(utils.MergeAnnotations(
utils.BuildAutoTrueAnnotation(),
utils.BuildConfigMapReloadAnnotation(configMapName2), // explicitly listed
)),
)
Expect(err).NotTo(HaveOccurred())
By("Waiting for Deployment to be ready")
err = utils.WaitForDeploymentReady(ctx, kubeClient, testNamespace, deploymentName, utils.DeploymentReady)
Expect(err).NotTo(HaveOccurred())
By("Updating the auto-detected ConfigMap")
err = utils.UpdateConfigMap(ctx, kubeClient, testNamespace, configMapName,
map[string]string{"key": "updated"})
Expect(err).NotTo(HaveOccurred())
By("Waiting for Deployment to be reloaded")
reloaded, err := utils.WaitForDeploymentReloaded(ctx, kubeClient, testNamespace, deploymentName,
utils.AnnotationLastReloadedFrom, utils.ReloadTimeout)
Expect(err).NotTo(HaveOccurred())
Expect(reloaded).To(BeTrue(), "Deployment should reload when auto-detected ConfigMap changes")
})
It("should reload when explicitly listed ConfigMap changes with auto=true", func() {
By("Creating two ConfigMaps")
_, err := utils.CreateConfigMap(ctx, kubeClient, testNamespace, configMapName,
map[string]string{"key": "initial"}, nil)
Expect(err).NotTo(HaveOccurred())
_, err = utils.CreateConfigMap(ctx, kubeClient, testNamespace, configMapName2,
map[string]string{"extra": "initial"}, nil)
Expect(err).NotTo(HaveOccurred())
By("Creating a Deployment with auto=true AND explicit reload annotation for extra ConfigMap")
_, err = utils.CreateDeployment(ctx, kubeClient, testNamespace, deploymentName,
utils.WithConfigMapEnvFrom(configMapName), // auto-detected
utils.WithAnnotations(utils.MergeAnnotations(
utils.BuildAutoTrueAnnotation(),
utils.BuildConfigMapReloadAnnotation(configMapName2), // explicitly listed
)),
)
Expect(err).NotTo(HaveOccurred())
By("Waiting for Deployment to be ready")
err = utils.WaitForDeploymentReady(ctx, kubeClient, testNamespace, deploymentName, utils.DeploymentReady)
Expect(err).NotTo(HaveOccurred())
By("Updating the explicitly listed ConfigMap (not mounted)")
err = utils.UpdateConfigMap(ctx, kubeClient, testNamespace, configMapName2,
map[string]string{"extra": "updated"})
Expect(err).NotTo(HaveOccurred())
By("Waiting for Deployment to be reloaded")
reloaded, err := utils.WaitForDeploymentReloaded(ctx, kubeClient, testNamespace, deploymentName,
utils.AnnotationLastReloadedFrom, utils.ReloadTimeout)
Expect(err).NotTo(HaveOccurred())
Expect(reloaded).To(BeTrue(), "Deployment should reload when explicitly listed ConfigMap changes")
})
It("should reload when Secret changes with auto=true and explicit Secret annotation", func() {
By("Creating a Secret")
_, err := utils.CreateSecretFromStrings(ctx, kubeClient, testNamespace, secretName,
map[string]string{"password": "initial"}, nil)
Expect(err).NotTo(HaveOccurred())
_, err = utils.CreateSecretFromStrings(ctx, kubeClient, testNamespace, secretName2,
map[string]string{"api-key": "initial"}, nil)
Expect(err).NotTo(HaveOccurred())
By("Creating a Deployment with auto=true AND explicit reload annotation for extra Secret")
_, err = utils.CreateDeployment(ctx, kubeClient, testNamespace, deploymentName,
utils.WithSecretEnvFrom(secretName), // auto-detected
utils.WithAnnotations(utils.MergeAnnotations(
utils.BuildAutoTrueAnnotation(),
utils.BuildSecretReloadAnnotation(secretName2), // explicitly listed
)),
)
Expect(err).NotTo(HaveOccurred())
By("Waiting for Deployment to be ready")
err = utils.WaitForDeploymentReady(ctx, kubeClient, testNamespace, deploymentName, utils.DeploymentReady)
Expect(err).NotTo(HaveOccurred())
By("Updating the explicitly listed Secret")
err = utils.UpdateSecretFromStrings(ctx, kubeClient, testNamespace, secretName2,
map[string]string{"api-key": "updated"})
Expect(err).NotTo(HaveOccurred())
By("Waiting for Deployment to be reloaded")
reloaded, err := utils.WaitForDeploymentReloaded(ctx, kubeClient, testNamespace, deploymentName,
utils.AnnotationLastReloadedFrom, utils.ReloadTimeout)
Expect(err).NotTo(HaveOccurred())
Expect(reloaded).To(BeTrue(), "Deployment should reload when explicitly listed Secret changes")
})
})
Context("auto=true with exclude annotations", func() {
It("should NOT reload when excluded ConfigMap changes", func() {
By("Creating two ConfigMaps")
_, err := utils.CreateConfigMap(ctx, kubeClient, testNamespace, configMapName,
map[string]string{"key": "initial"}, nil)
Expect(err).NotTo(HaveOccurred())
_, err = utils.CreateConfigMap(ctx, kubeClient, testNamespace, configMapName2,
map[string]string{"excluded": "initial"}, nil)
Expect(err).NotTo(HaveOccurred())
By("Creating a Deployment with auto=true AND exclude for second ConfigMap")
_, err = utils.CreateDeployment(ctx, kubeClient, testNamespace, deploymentName,
utils.WithConfigMapEnvFrom(configMapName),
utils.WithConfigMapEnvFrom(configMapName2), // also mounted, but excluded
utils.WithAnnotations(utils.MergeAnnotations(
utils.BuildAutoTrueAnnotation(),
utils.BuildConfigMapExcludeAnnotation(configMapName2), // exclude this one
)),
)
Expect(err).NotTo(HaveOccurred())
By("Waiting for Deployment to be ready")
err = utils.WaitForDeploymentReady(ctx, kubeClient, testNamespace, deploymentName, utils.DeploymentReady)
Expect(err).NotTo(HaveOccurred())
By("Updating the excluded ConfigMap")
err = utils.UpdateConfigMap(ctx, kubeClient, testNamespace, configMapName2,
map[string]string{"excluded": "updated"})
Expect(err).NotTo(HaveOccurred())
By("Verifying Deployment was NOT reloaded (negative test)")
time.Sleep(utils.NegativeTestWait)
reloaded, err := utils.WaitForDeploymentReloaded(ctx, kubeClient, testNamespace, deploymentName,
utils.AnnotationLastReloadedFrom, utils.ShortTimeout)
Expect(err).NotTo(HaveOccurred())
Expect(reloaded).To(BeFalse(), "Deployment should NOT reload when excluded ConfigMap changes")
})
It("should reload when non-excluded ConfigMap changes", func() {
By("Creating two ConfigMaps")
_, err := utils.CreateConfigMap(ctx, kubeClient, testNamespace, configMapName,
map[string]string{"key": "initial"}, nil)
Expect(err).NotTo(HaveOccurred())
_, err = utils.CreateConfigMap(ctx, kubeClient, testNamespace, configMapName2,
map[string]string{"excluded": "initial"}, nil)
Expect(err).NotTo(HaveOccurred())
By("Creating a Deployment with auto=true AND exclude for second ConfigMap")
_, err = utils.CreateDeployment(ctx, kubeClient, testNamespace, deploymentName,
utils.WithConfigMapEnvFrom(configMapName),
utils.WithConfigMapEnvFrom(configMapName2),
utils.WithAnnotations(utils.MergeAnnotations(
utils.BuildAutoTrueAnnotation(),
utils.BuildConfigMapExcludeAnnotation(configMapName2),
)),
)
Expect(err).NotTo(HaveOccurred())
By("Waiting for Deployment to be ready")
err = utils.WaitForDeploymentReady(ctx, kubeClient, testNamespace, deploymentName, utils.DeploymentReady)
Expect(err).NotTo(HaveOccurred())
By("Updating the non-excluded ConfigMap")
err = utils.UpdateConfigMap(ctx, kubeClient, testNamespace, configMapName,
map[string]string{"key": "updated"})
Expect(err).NotTo(HaveOccurred())
By("Waiting for Deployment to be reloaded")
reloaded, err := utils.WaitForDeploymentReloaded(ctx, kubeClient, testNamespace, deploymentName,
utils.AnnotationLastReloadedFrom, utils.ReloadTimeout)
Expect(err).NotTo(HaveOccurred())
Expect(reloaded).To(BeTrue(), "Deployment should reload when non-excluded ConfigMap changes")
})
It("should NOT reload when excluded Secret changes", func() {
By("Creating two Secrets")
_, err := utils.CreateSecretFromStrings(ctx, kubeClient, testNamespace, secretName,
map[string]string{"password": "initial"}, nil)
Expect(err).NotTo(HaveOccurred())
_, err = utils.CreateSecretFromStrings(ctx, kubeClient, testNamespace, secretName2,
map[string]string{"excluded": "initial"}, nil)
Expect(err).NotTo(HaveOccurred())
By("Creating a Deployment with auto=true AND exclude for second Secret")
_, err = utils.CreateDeployment(ctx, kubeClient, testNamespace, deploymentName,
utils.WithSecretEnvFrom(secretName),
utils.WithSecretEnvFrom(secretName2),
utils.WithAnnotations(utils.MergeAnnotations(
utils.BuildAutoTrueAnnotation(),
utils.BuildSecretExcludeAnnotation(secretName2),
)),
)
Expect(err).NotTo(HaveOccurred())
By("Waiting for Deployment to be ready")
err = utils.WaitForDeploymentReady(ctx, kubeClient, testNamespace, deploymentName, utils.DeploymentReady)
Expect(err).NotTo(HaveOccurred())
By("Updating the excluded Secret")
err = utils.UpdateSecretFromStrings(ctx, kubeClient, testNamespace, secretName2,
map[string]string{"excluded": "updated"})
Expect(err).NotTo(HaveOccurred())
By("Verifying Deployment was NOT reloaded (negative test)")
time.Sleep(utils.NegativeTestWait)
reloaded, err := utils.WaitForDeploymentReloaded(ctx, kubeClient, testNamespace, deploymentName,
utils.AnnotationLastReloadedFrom, utils.ShortTimeout)
Expect(err).NotTo(HaveOccurred())
Expect(reloaded).To(BeFalse(), "Deployment should NOT reload when excluded Secret changes")
})
})
Context("multiple explicit references", func() {
It("should reload when any of multiple explicitly listed ConfigMaps change", func() {
By("Creating multiple ConfigMaps")
_, err := utils.CreateConfigMap(ctx, kubeClient, testNamespace, configMapName,
map[string]string{"key1": "initial"}, nil)
Expect(err).NotTo(HaveOccurred())
_, err = utils.CreateConfigMap(ctx, kubeClient, testNamespace, configMapName2,
map[string]string{"key2": "initial"}, nil)
Expect(err).NotTo(HaveOccurred())
By("Creating a Deployment with multiple ConfigMaps in reload annotation (comma-separated)")
_, err = utils.CreateDeployment(ctx, kubeClient, testNamespace, deploymentName,
utils.WithAnnotations(utils.BuildConfigMapReloadAnnotation(configMapName, configMapName2)),
)
Expect(err).NotTo(HaveOccurred())
By("Waiting for Deployment to be ready")
err = utils.WaitForDeploymentReady(ctx, kubeClient, testNamespace, deploymentName, utils.DeploymentReady)
Expect(err).NotTo(HaveOccurred())
By("Updating the second ConfigMap")
err = utils.UpdateConfigMap(ctx, kubeClient, testNamespace, configMapName2,
map[string]string{"key2": "updated"})
Expect(err).NotTo(HaveOccurred())
By("Waiting for Deployment to be reloaded")
reloaded, err := utils.WaitForDeploymentReloaded(ctx, kubeClient, testNamespace, deploymentName,
utils.AnnotationLastReloadedFrom, utils.ReloadTimeout)
Expect(err).NotTo(HaveOccurred())
Expect(reloaded).To(BeTrue(), "Deployment should reload when any of the listed ConfigMaps changes")
})
It("should reload when any of multiple explicitly listed Secrets change", func() {
By("Creating multiple Secrets")
_, err := utils.CreateSecretFromStrings(ctx, kubeClient, testNamespace, secretName,
map[string]string{"key1": "initial"}, nil)
Expect(err).NotTo(HaveOccurred())
_, err = utils.CreateSecretFromStrings(ctx, kubeClient, testNamespace, secretName2,
map[string]string{"key2": "initial"}, nil)
Expect(err).NotTo(HaveOccurred())
By("Creating a Deployment with multiple Secrets in reload annotation (comma-separated)")
_, err = utils.CreateDeployment(ctx, kubeClient, testNamespace, deploymentName,
utils.WithAnnotations(utils.BuildSecretReloadAnnotation(secretName, secretName2)),
)
Expect(err).NotTo(HaveOccurred())
By("Waiting for Deployment to be ready")
err = utils.WaitForDeploymentReady(ctx, kubeClient, testNamespace, deploymentName, utils.DeploymentReady)
Expect(err).NotTo(HaveOccurred())
By("Updating the first Secret")
err = utils.UpdateSecretFromStrings(ctx, kubeClient, testNamespace, secretName,
map[string]string{"key1": "updated"})
Expect(err).NotTo(HaveOccurred())
By("Waiting for Deployment to be reloaded")
reloaded, err := utils.WaitForDeploymentReloaded(ctx, kubeClient, testNamespace, deploymentName,
utils.AnnotationLastReloadedFrom, utils.ReloadTimeout)
Expect(err).NotTo(HaveOccurred())
Expect(reloaded).To(BeTrue(), "Deployment should reload when any of the listed Secrets changes")
})
It("should reload when both ConfigMap and Secret annotations are present", func() {
By("Creating a ConfigMap and a Secret")
_, err := utils.CreateConfigMap(ctx, kubeClient, testNamespace, configMapName,
map[string]string{"key": "initial"}, nil)
Expect(err).NotTo(HaveOccurred())
_, err = utils.CreateSecretFromStrings(ctx, kubeClient, testNamespace, secretName,
map[string]string{"password": "initial"}, nil)
Expect(err).NotTo(HaveOccurred())
By("Creating a Deployment with both ConfigMap and Secret reload annotations")
_, err = utils.CreateDeployment(ctx, kubeClient, testNamespace, deploymentName,
utils.WithAnnotations(utils.MergeAnnotations(
utils.BuildConfigMapReloadAnnotation(configMapName),
utils.BuildSecretReloadAnnotation(secretName),
)),
)
Expect(err).NotTo(HaveOccurred())
By("Waiting for Deployment to be ready")
err = utils.WaitForDeploymentReady(ctx, kubeClient, testNamespace, deploymentName, utils.DeploymentReady)
Expect(err).NotTo(HaveOccurred())
By("Updating the Secret")
err = utils.UpdateSecretFromStrings(ctx, kubeClient, testNamespace, secretName,
map[string]string{"password": "updated"})
Expect(err).NotTo(HaveOccurred())
By("Waiting for Deployment to be reloaded")
reloaded, err := utils.WaitForDeploymentReloaded(ctx, kubeClient, testNamespace, deploymentName,
utils.AnnotationLastReloadedFrom, utils.ReloadTimeout)
Expect(err).NotTo(HaveOccurred())
Expect(reloaded).To(BeTrue(), "Deployment should reload when Secret changes with both annotations present")
})
})
})

View File

@@ -0,0 +1,196 @@
package annotations
import (
"time"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
"github.com/stakater/Reloader/test/e2e/utils"
)
var _ = Describe("Exclude Annotation Tests", func() {
var (
deploymentName string
configMapName string
configMapName2 string
secretName string
secretName2 string
excludeNS string
)
BeforeEach(func() {
deploymentName = utils.RandName("deploy")
configMapName = utils.RandName("cm")
configMapName2 = utils.RandName("cm2")
secretName = utils.RandName("secret")
secretName2 = utils.RandName("secret2")
excludeNS = "exclude-" + utils.RandName("ns")
// Create test namespace
err := utils.CreateNamespace(ctx, kubeClient, excludeNS)
Expect(err).NotTo(HaveOccurred())
})
AfterEach(func() {
_ = utils.DeleteDeployment(ctx, kubeClient, excludeNS, deploymentName)
_ = utils.DeleteConfigMap(ctx, kubeClient, excludeNS, configMapName)
_ = utils.DeleteConfigMap(ctx, kubeClient, excludeNS, configMapName2)
_ = utils.DeleteSecret(ctx, kubeClient, excludeNS, secretName)
_ = utils.DeleteSecret(ctx, kubeClient, excludeNS, secretName2)
_ = utils.DeleteNamespace(ctx, kubeClient, excludeNS)
})
Context("ConfigMap exclude annotation", func() {
It("should NOT reload when excluded ConfigMap changes", func() {
By("Creating two ConfigMaps")
_, err := utils.CreateConfigMap(ctx, kubeClient, excludeNS, configMapName,
map[string]string{"key": "initial"}, nil)
Expect(err).NotTo(HaveOccurred())
_, err = utils.CreateConfigMap(ctx, kubeClient, excludeNS, configMapName2,
map[string]string{"key2": "initial2"}, nil)
Expect(err).NotTo(HaveOccurred())
By("Creating a Deployment with auto=true and configmaps.exclude annotation")
_, err = utils.CreateDeployment(ctx, kubeClient, excludeNS, deploymentName,
utils.WithConfigMapEnvFrom(configMapName),
utils.WithConfigMapEnvFrom(configMapName2),
utils.WithAnnotations(utils.MergeAnnotations(
utils.BuildAutoTrueAnnotation(),
utils.BuildConfigMapExcludeAnnotation(configMapName),
)),
)
Expect(err).NotTo(HaveOccurred())
By("Waiting for Deployment to be ready")
err = utils.WaitForDeploymentReady(ctx, kubeClient, excludeNS, deploymentName, utils.DeploymentReady)
Expect(err).NotTo(HaveOccurred())
By("Updating the excluded ConfigMap")
err = utils.UpdateConfigMap(ctx, kubeClient, excludeNS, configMapName,
map[string]string{"key": "updated"})
Expect(err).NotTo(HaveOccurred())
By("Verifying Deployment was NOT reloaded (excluded ConfigMap)")
time.Sleep(utils.NegativeTestWait)
reloaded, err := utils.WaitForDeploymentReloaded(ctx, kubeClient, excludeNS, deploymentName,
utils.AnnotationLastReloadedFrom, utils.ShortTimeout)
Expect(err).NotTo(HaveOccurred())
Expect(reloaded).To(BeFalse(), "Deployment should NOT reload when excluded ConfigMap changes")
})
It("should reload when non-excluded ConfigMap changes", func() {
By("Creating two ConfigMaps")
_, err := utils.CreateConfigMap(ctx, kubeClient, excludeNS, configMapName,
map[string]string{"key": "initial"}, nil)
Expect(err).NotTo(HaveOccurred())
_, err = utils.CreateConfigMap(ctx, kubeClient, excludeNS, configMapName2,
map[string]string{"key2": "initial2"}, nil)
Expect(err).NotTo(HaveOccurred())
By("Creating a Deployment with auto=true and configmaps.exclude annotation")
_, err = utils.CreateDeployment(ctx, kubeClient, excludeNS, deploymentName,
utils.WithConfigMapEnvFrom(configMapName),
utils.WithConfigMapEnvFrom(configMapName2),
utils.WithAnnotations(utils.MergeAnnotations(
utils.BuildAutoTrueAnnotation(),
utils.BuildConfigMapExcludeAnnotation(configMapName),
)),
)
Expect(err).NotTo(HaveOccurred())
By("Waiting for Deployment to be ready")
err = utils.WaitForDeploymentReady(ctx, kubeClient, excludeNS, deploymentName, utils.DeploymentReady)
Expect(err).NotTo(HaveOccurred())
By("Updating the non-excluded ConfigMap")
err = utils.UpdateConfigMap(ctx, kubeClient, excludeNS, configMapName2,
map[string]string{"key2": "updated2"})
Expect(err).NotTo(HaveOccurred())
By("Waiting for Deployment to be reloaded")
reloaded, err := utils.WaitForDeploymentReloaded(ctx, kubeClient, excludeNS, deploymentName,
utils.AnnotationLastReloadedFrom, utils.ReloadTimeout)
Expect(err).NotTo(HaveOccurred())
Expect(reloaded).To(BeTrue(), "Deployment should reload when non-excluded ConfigMap changes")
})
})
Context("Secret exclude annotation", func() {
It("should NOT reload when excluded Secret changes", func() {
By("Creating two Secrets")
_, err := utils.CreateSecretFromStrings(ctx, kubeClient, excludeNS, secretName,
map[string]string{"password": "initial"}, nil)
Expect(err).NotTo(HaveOccurred())
_, err = utils.CreateSecretFromStrings(ctx, kubeClient, excludeNS, secretName2,
map[string]string{"password2": "initial2"}, nil)
Expect(err).NotTo(HaveOccurred())
By("Creating a Deployment with auto=true and secrets.exclude annotation")
_, err = utils.CreateDeployment(ctx, kubeClient, excludeNS, deploymentName,
utils.WithSecretEnvFrom(secretName),
utils.WithSecretEnvFrom(secretName2),
utils.WithAnnotations(utils.MergeAnnotations(
utils.BuildAutoTrueAnnotation(),
utils.BuildSecretExcludeAnnotation(secretName),
)),
)
Expect(err).NotTo(HaveOccurred())
By("Waiting for Deployment to be ready")
err = utils.WaitForDeploymentReady(ctx, kubeClient, excludeNS, deploymentName, utils.DeploymentReady)
Expect(err).NotTo(HaveOccurred())
By("Updating the excluded Secret")
err = utils.UpdateSecretFromStrings(ctx, kubeClient, excludeNS, secretName,
map[string]string{"password": "updated"})
Expect(err).NotTo(HaveOccurred())
By("Verifying Deployment was NOT reloaded (excluded Secret)")
time.Sleep(utils.NegativeTestWait)
reloaded, err := utils.WaitForDeploymentReloaded(ctx, kubeClient, excludeNS, deploymentName,
utils.AnnotationLastReloadedFrom, utils.ShortTimeout)
Expect(err).NotTo(HaveOccurred())
Expect(reloaded).To(BeFalse(), "Deployment should NOT reload when excluded Secret changes")
})
It("should reload when non-excluded Secret changes", func() {
By("Creating two Secrets")
_, err := utils.CreateSecretFromStrings(ctx, kubeClient, excludeNS, secretName,
map[string]string{"password": "initial"}, nil)
Expect(err).NotTo(HaveOccurred())
_, err = utils.CreateSecretFromStrings(ctx, kubeClient, excludeNS, secretName2,
map[string]string{"password2": "initial2"}, nil)
Expect(err).NotTo(HaveOccurred())
By("Creating a Deployment with auto=true and secrets.exclude annotation")
_, err = utils.CreateDeployment(ctx, kubeClient, excludeNS, deploymentName,
utils.WithSecretEnvFrom(secretName),
utils.WithSecretEnvFrom(secretName2),
utils.WithAnnotations(utils.MergeAnnotations(
utils.BuildAutoTrueAnnotation(),
utils.BuildSecretExcludeAnnotation(secretName),
)),
)
Expect(err).NotTo(HaveOccurred())
By("Waiting for Deployment to be ready")
err = utils.WaitForDeploymentReady(ctx, kubeClient, excludeNS, deploymentName, utils.DeploymentReady)
Expect(err).NotTo(HaveOccurred())
By("Updating the non-excluded Secret")
err = utils.UpdateSecretFromStrings(ctx, kubeClient, excludeNS, secretName2,
map[string]string{"password2": "updated2"})
Expect(err).NotTo(HaveOccurred())
By("Waiting for Deployment to be reloaded")
reloaded, err := utils.WaitForDeploymentReloaded(ctx, kubeClient, excludeNS, deploymentName,
utils.AnnotationLastReloadedFrom, utils.ReloadTimeout)
Expect(err).NotTo(HaveOccurred())
Expect(reloaded).To(BeTrue(), "Deployment should reload when non-excluded Secret changes")
})
})
})

View File

@@ -0,0 +1,102 @@
package annotations
import (
"time"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
"github.com/stakater/Reloader/test/e2e/utils"
)
var _ = Describe("Pause Period Tests", func() {
var (
deploymentName string
configMapName string
)
BeforeEach(func() {
deploymentName = utils.RandName("deploy")
configMapName = utils.RandName("cm")
})
AfterEach(func() {
_ = utils.DeleteDeployment(ctx, kubeClient, testNamespace, deploymentName)
_ = utils.DeleteConfigMap(ctx, kubeClient, testNamespace, configMapName)
})
Context("with pause-period annotation", func() {
It("should pause Deployment after reload", func() {
By("Creating a ConfigMap")
_, err := utils.CreateConfigMap(ctx, kubeClient, testNamespace, configMapName,
map[string]string{"key": "initial"}, nil)
Expect(err).NotTo(HaveOccurred())
By("Creating a Deployment with pause-period annotation")
_, err = utils.CreateDeployment(ctx, kubeClient, testNamespace, deploymentName,
utils.WithConfigMapEnvFrom(configMapName),
utils.WithAnnotations(utils.MergeAnnotations(
utils.BuildConfigMapReloadAnnotation(configMapName),
utils.BuildPausePeriodAnnotation("10s"),
)),
)
Expect(err).NotTo(HaveOccurred())
By("Waiting for Deployment to be ready")
err = utils.WaitForDeploymentReady(ctx, kubeClient, testNamespace, deploymentName, utils.DeploymentReady)
Expect(err).NotTo(HaveOccurred())
By("Updating the ConfigMap data")
err = utils.UpdateConfigMap(ctx, kubeClient, testNamespace, configMapName,
map[string]string{"key": "updated"})
Expect(err).NotTo(HaveOccurred())
By("Waiting for Deployment to be reloaded")
reloaded, err := utils.WaitForDeploymentReloaded(ctx, kubeClient, testNamespace, deploymentName,
utils.AnnotationLastReloadedFrom, utils.ReloadTimeout)
Expect(err).NotTo(HaveOccurred())
Expect(reloaded).To(BeTrue(), "Deployment should have been reloaded")
By("Verifying Deployment has paused-at annotation")
paused, err := utils.WaitForDeploymentPaused(ctx, kubeClient, testNamespace, deploymentName,
"utils.AnnotationDeploymentPausedAt", utils.ShortTimeout)
Expect(err).NotTo(HaveOccurred())
Expect(paused).To(BeTrue(), "Deployment should have paused-at annotation after reload")
})
It("should NOT pause Deployment without pause-period annotation", func() {
By("Creating a ConfigMap")
_, err := utils.CreateConfigMap(ctx, kubeClient, testNamespace, configMapName,
map[string]string{"key": "initial"}, nil)
Expect(err).NotTo(HaveOccurred())
By("Creating a Deployment WITHOUT pause-period annotation")
_, err = utils.CreateDeployment(ctx, kubeClient, testNamespace, deploymentName,
utils.WithConfigMapEnvFrom(configMapName),
utils.WithAnnotations(utils.BuildConfigMapReloadAnnotation(configMapName)),
)
Expect(err).NotTo(HaveOccurred())
By("Waiting for Deployment to be ready")
err = utils.WaitForDeploymentReady(ctx, kubeClient, testNamespace, deploymentName, utils.DeploymentReady)
Expect(err).NotTo(HaveOccurred())
By("Updating the ConfigMap data")
err = utils.UpdateConfigMap(ctx, kubeClient, testNamespace, configMapName,
map[string]string{"key": "updated"})
Expect(err).NotTo(HaveOccurred())
By("Waiting for Deployment to be reloaded")
reloaded, err := utils.WaitForDeploymentReloaded(ctx, kubeClient, testNamespace, deploymentName,
utils.AnnotationLastReloadedFrom, utils.ReloadTimeout)
Expect(err).NotTo(HaveOccurred())
Expect(reloaded).To(BeTrue(), "Deployment should have been reloaded")
By("Verifying Deployment does NOT have paused-at annotation")
time.Sleep(utils.NegativeTestWait)
paused, err := utils.WaitForDeploymentPaused(ctx, kubeClient, testNamespace, deploymentName,
"utils.AnnotationDeploymentPausedAt", utils.ShortTimeout)
Expect(err).NotTo(HaveOccurred())
Expect(paused).To(BeFalse(), "Deployment should NOT have paused-at annotation without pause-period")
})
})
})

View File

@@ -0,0 +1,93 @@
package annotations
import (
"time"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
"github.com/stakater/Reloader/test/e2e/utils"
)
var _ = Describe("Resource Ignore Annotation Tests", func() {
var (
deploymentName string
configMapName string
secretName string
)
BeforeEach(func() {
deploymentName = utils.RandName("deploy")
configMapName = utils.RandName("cm")
secretName = utils.RandName("secret")
})
AfterEach(func() {
_ = utils.DeleteDeployment(ctx, kubeClient, testNamespace, deploymentName)
_ = utils.DeleteConfigMap(ctx, kubeClient, testNamespace, configMapName)
_ = utils.DeleteSecret(ctx, kubeClient, testNamespace, secretName)
})
Context("with reloader.stakater.com/ignore annotation on resource", func() {
It("should NOT reload when ConfigMap has ignore=true annotation", func() {
By("Creating a ConfigMap with ignore=true annotation")
_, err := utils.CreateConfigMap(ctx, kubeClient, testNamespace, configMapName,
map[string]string{"key": "initial"},
utils.BuildIgnoreAnnotation())
Expect(err).NotTo(HaveOccurred())
By("Creating a Deployment with ConfigMap reference annotation")
_, err = utils.CreateDeployment(ctx, kubeClient, testNamespace, deploymentName,
utils.WithConfigMapEnvFrom(configMapName),
utils.WithAnnotations(utils.BuildConfigMapReloadAnnotation(configMapName)),
)
Expect(err).NotTo(HaveOccurred())
By("Waiting for Deployment to be ready")
err = utils.WaitForDeploymentReady(ctx, kubeClient, testNamespace, deploymentName, utils.DeploymentReady)
Expect(err).NotTo(HaveOccurred())
By("Updating the ConfigMap data")
err = utils.UpdateConfigMap(ctx, kubeClient, testNamespace, configMapName,
map[string]string{"key": "updated"})
Expect(err).NotTo(HaveOccurred())
By("Verifying Deployment was NOT reloaded (negative test)")
time.Sleep(utils.NegativeTestWait)
reloaded, err := utils.WaitForDeploymentReloaded(ctx, kubeClient, testNamespace, deploymentName,
utils.AnnotationLastReloadedFrom, utils.ShortTimeout)
Expect(err).NotTo(HaveOccurred())
Expect(reloaded).To(BeFalse(), "Deployment should NOT reload when ConfigMap has ignore=true")
})
It("should NOT reload when Secret has ignore=true annotation", func() {
By("Creating a Secret with ignore=true annotation")
_, err := utils.CreateSecretFromStrings(ctx, kubeClient, testNamespace, secretName,
map[string]string{"password": "initial"},
utils.BuildIgnoreAnnotation())
Expect(err).NotTo(HaveOccurred())
By("Creating a Deployment with Secret reference annotation")
_, err = utils.CreateDeployment(ctx, kubeClient, testNamespace, deploymentName,
utils.WithSecretEnvFrom(secretName),
utils.WithAnnotations(utils.BuildSecretReloadAnnotation(secretName)),
)
Expect(err).NotTo(HaveOccurred())
By("Waiting for Deployment to be ready")
err = utils.WaitForDeploymentReady(ctx, kubeClient, testNamespace, deploymentName, utils.DeploymentReady)
Expect(err).NotTo(HaveOccurred())
By("Updating the Secret data")
err = utils.UpdateSecretFromStrings(ctx, kubeClient, testNamespace, secretName,
map[string]string{"password": "updated"})
Expect(err).NotTo(HaveOccurred())
By("Verifying Deployment was NOT reloaded (negative test)")
time.Sleep(utils.NegativeTestWait)
reloaded, err := utils.WaitForDeploymentReloaded(ctx, kubeClient, testNamespace, deploymentName,
utils.AnnotationLastReloadedFrom, utils.ShortTimeout)
Expect(err).NotTo(HaveOccurred())
Expect(reloaded).To(BeFalse(), "Deployment should NOT reload when Secret has ignore=true")
})
})
})

View File

@@ -0,0 +1,169 @@
package annotations
import (
"time"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
"github.com/stakater/Reloader/test/e2e/utils"
)
var _ = Describe("Search and Match Annotation Tests", func() {
var (
deploymentName string
configMapName string
)
BeforeEach(func() {
deploymentName = utils.RandName("deploy")
configMapName = utils.RandName("cm")
})
AfterEach(func() {
_ = utils.DeleteDeployment(ctx, kubeClient, testNamespace, deploymentName)
_ = utils.DeleteConfigMap(ctx, kubeClient, testNamespace, configMapName)
})
Context("with search and match annotations", func() {
It("should reload when workload has search annotation and ConfigMap has match annotation", func() {
By("Creating a ConfigMap with match annotation")
_, err := utils.CreateConfigMap(ctx, kubeClient, testNamespace, configMapName,
map[string]string{"key": "initial"},
utils.BuildMatchAnnotation())
Expect(err).NotTo(HaveOccurred())
By("Creating a Deployment with search annotation")
_, err = utils.CreateDeployment(ctx, kubeClient, testNamespace, deploymentName,
utils.WithConfigMapEnvFrom(configMapName),
utils.WithAnnotations(utils.BuildSearchAnnotation()),
)
Expect(err).NotTo(HaveOccurred())
By("Waiting for Deployment to be ready")
err = utils.WaitForDeploymentReady(ctx, kubeClient, testNamespace, deploymentName, utils.DeploymentReady)
Expect(err).NotTo(HaveOccurred())
By("Updating the ConfigMap data")
err = utils.UpdateConfigMap(ctx, kubeClient, testNamespace, configMapName,
map[string]string{"key": "updated"})
Expect(err).NotTo(HaveOccurred())
By("Waiting for Deployment to be reloaded")
reloaded, err := utils.WaitForDeploymentReloaded(ctx, kubeClient, testNamespace, deploymentName,
utils.AnnotationLastReloadedFrom, utils.ReloadTimeout)
Expect(err).NotTo(HaveOccurred())
Expect(reloaded).To(BeTrue(), "Deployment with search annotation should reload when ConfigMap has match annotation")
})
It("should NOT reload when workload has search but ConfigMap has no match", func() {
By("Creating a ConfigMap WITHOUT match annotation")
_, err := utils.CreateConfigMap(ctx, kubeClient, testNamespace, configMapName,
map[string]string{"key": "initial"}, nil)
Expect(err).NotTo(HaveOccurred())
By("Creating a Deployment with search annotation")
_, err = utils.CreateDeployment(ctx, kubeClient, testNamespace, deploymentName,
utils.WithConfigMapEnvFrom(configMapName),
utils.WithAnnotations(utils.BuildSearchAnnotation()),
)
Expect(err).NotTo(HaveOccurred())
By("Waiting for Deployment to be ready")
err = utils.WaitForDeploymentReady(ctx, kubeClient, testNamespace, deploymentName, utils.DeploymentReady)
Expect(err).NotTo(HaveOccurred())
By("Updating the ConfigMap data")
err = utils.UpdateConfigMap(ctx, kubeClient, testNamespace, configMapName,
map[string]string{"key": "updated"})
Expect(err).NotTo(HaveOccurred())
By("Verifying Deployment was NOT reloaded (negative test)")
time.Sleep(utils.NegativeTestWait)
reloaded, err := utils.WaitForDeploymentReloaded(ctx, kubeClient, testNamespace, deploymentName,
utils.AnnotationLastReloadedFrom, utils.ShortTimeout)
Expect(err).NotTo(HaveOccurred())
Expect(reloaded).To(BeFalse(), "Deployment should NOT reload when ConfigMap lacks match annotation")
})
It("should NOT reload when resource has match but no Deployment has search", func() {
By("Creating a ConfigMap WITH match annotation")
_, err := utils.CreateConfigMap(ctx, kubeClient, testNamespace, configMapName,
map[string]string{"key": "initial"},
utils.BuildMatchAnnotation())
Expect(err).NotTo(HaveOccurred())
By("Creating a Deployment WITHOUT search annotation (only standard annotation)")
_, err = utils.CreateDeployment(ctx, kubeClient, testNamespace, deploymentName,
utils.WithConfigMapEnvFrom(configMapName),
// Note: No search or reload annotation - deployment won't be affected by match
)
Expect(err).NotTo(HaveOccurred())
By("Waiting for Deployment to be ready")
err = utils.WaitForDeploymentReady(ctx, kubeClient, testNamespace, deploymentName, utils.DeploymentReady)
Expect(err).NotTo(HaveOccurred())
By("Updating the ConfigMap data")
err = utils.UpdateConfigMap(ctx, kubeClient, testNamespace, configMapName,
map[string]string{"key": "updated"})
Expect(err).NotTo(HaveOccurred())
By("Verifying Deployment was NOT reloaded (negative test)")
time.Sleep(utils.NegativeTestWait)
reloaded, err := utils.WaitForDeploymentReloaded(ctx, kubeClient, testNamespace, deploymentName,
utils.AnnotationLastReloadedFrom, utils.ShortTimeout)
Expect(err).NotTo(HaveOccurred())
Expect(reloaded).To(BeFalse(), "Deployment without search annotation should NOT reload even when ConfigMap has match")
})
It("should reload only the deployment with search annotation when multiple deployments use same ConfigMap", func() {
deploymentName2 := utils.RandName("deploy2")
defer func() {
_ = utils.DeleteDeployment(ctx, kubeClient, testNamespace, deploymentName2)
}()
By("Creating a ConfigMap with match annotation")
_, err := utils.CreateConfigMap(ctx, kubeClient, testNamespace, configMapName,
map[string]string{"key": "initial"},
utils.BuildMatchAnnotation())
Expect(err).NotTo(HaveOccurred())
By("Creating first Deployment WITH search annotation")
_, err = utils.CreateDeployment(ctx, kubeClient, testNamespace, deploymentName,
utils.WithConfigMapEnvFrom(configMapName),
utils.WithAnnotations(utils.BuildSearchAnnotation()),
)
Expect(err).NotTo(HaveOccurred())
By("Creating second Deployment WITHOUT search annotation")
_, err = utils.CreateDeployment(ctx, kubeClient, testNamespace, deploymentName2,
utils.WithConfigMapEnvFrom(configMapName),
// No search annotation
)
Expect(err).NotTo(HaveOccurred())
By("Waiting for both Deployments to be ready")
err = utils.WaitForDeploymentReady(ctx, kubeClient, testNamespace, deploymentName, utils.DeploymentReady)
Expect(err).NotTo(HaveOccurred())
err = utils.WaitForDeploymentReady(ctx, kubeClient, testNamespace, deploymentName2, utils.DeploymentReady)
Expect(err).NotTo(HaveOccurred())
By("Updating the ConfigMap data")
err = utils.UpdateConfigMap(ctx, kubeClient, testNamespace, configMapName,
map[string]string{"key": "updated"})
Expect(err).NotTo(HaveOccurred())
By("Waiting for first Deployment to be reloaded")
reloaded, err := utils.WaitForDeploymentReloaded(ctx, kubeClient, testNamespace, deploymentName,
utils.AnnotationLastReloadedFrom, utils.ReloadTimeout)
Expect(err).NotTo(HaveOccurred())
Expect(reloaded).To(BeTrue(), "Deployment with search annotation should reload")
By("Verifying second Deployment was NOT reloaded")
reloaded2, err := utils.WaitForDeploymentReloaded(ctx, kubeClient, testNamespace, deploymentName2,
utils.AnnotationLastReloadedFrom, utils.ShortTimeout)
Expect(err).NotTo(HaveOccurred())
Expect(reloaded2).To(BeFalse(), "Deployment without search annotation should NOT reload")
})
})
})

View File

@@ -0,0 +1,66 @@
package argo
import (
"context"
"testing"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
"github.com/stakater/Reloader/test/e2e/utils"
"k8s.io/client-go/dynamic"
"k8s.io/client-go/kubernetes"
)
var (
kubeClient kubernetes.Interface
dynamicClient dynamic.Interface
testNamespace string
ctx context.Context
testEnv *utils.TestEnvironment
)
func TestArgo(t *testing.T) {
RegisterFailHandler(Fail)
RunSpecs(t, "Argo Rollouts E2E Suite")
}
var _ = BeforeSuite(func() {
var err error
ctx = context.Background()
// Setup test environment
testEnv, err = utils.SetupTestEnvironment(ctx, "reloader-argo")
Expect(err).NotTo(HaveOccurred(), "Failed to setup test environment")
// Export for use in tests
kubeClient = testEnv.KubeClient
dynamicClient = testEnv.DynamicClient
testNamespace = testEnv.Namespace
// Check if Argo Rollouts is installed
// NOTE: Argo Rollouts should be pre-installed using: ./scripts/e2e-cluster-setup.sh
// This suite does NOT install Argo Rollouts to ensure consistent behavior across all test suites.
if !utils.IsArgoRolloutsInstalled(ctx, dynamicClient) {
Skip("Argo Rollouts is not installed. Run ./scripts/e2e-cluster-setup.sh first")
}
GinkgoWriter.Println("Argo Rollouts is installed")
// Deploy Reloader with Argo Rollouts support
err = testEnv.DeployAndWait(map[string]string{
"reloader.reloadStrategy": "annotations",
"reloader.isArgoRollouts": "true",
})
Expect(err).NotTo(HaveOccurred(), "Failed to deploy Reloader")
})
var _ = AfterSuite(func() {
// Cleanup test environment (Reloader + namespace)
if testEnv != nil {
err := testEnv.Cleanup()
Expect(err).NotTo(HaveOccurred(), "Failed to cleanup test environment")
}
// NOTE: Argo Rollouts is NOT uninstalled here to allow other test suites (core/)
// to run Argo tests. Cleanup is handled by: ./scripts/e2e-cluster-cleanup.sh
GinkgoWriter.Println("Argo Rollouts E2E Suite cleanup complete (Argo Rollouts preserved for other suites)")
})

View File

@@ -0,0 +1,91 @@
package argo
import (
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
"github.com/stakater/Reloader/test/e2e/utils"
)
// Note: Basic Argo Rollout reload tests (ConfigMap, Secret, auto=true, volume mounts, label-only negative)
// are covered by core/workloads_test.go with Label("argo").
// This file contains only Argo-specific tests that cannot be parameterized.
var _ = Describe("Argo Rollout Strategy Tests", func() {
var (
rolloutName string
configMapName string
)
BeforeEach(func() {
rolloutName = utils.RandName("rollout")
configMapName = utils.RandName("cm")
})
AfterEach(func() {
_ = utils.DeleteArgoRollout(ctx, dynamicClient, testNamespace, rolloutName)
_ = utils.DeleteConfigMap(ctx, kubeClient, testNamespace, configMapName)
})
// Argo Rollouts have a special "restart" strategy that sets spec.restartAt field
// instead of using pod template annotations. This is unique to Argo Rollouts.
Context("Rollout strategy annotation", func() {
It("should use default rollout strategy (annotation-based reload)", func() {
By("Creating a ConfigMap")
_, err := utils.CreateConfigMap(ctx, kubeClient, testNamespace, configMapName,
map[string]string{"key": "initial"}, nil)
Expect(err).NotTo(HaveOccurred())
By("Creating an Argo Rollout with auto=true (default strategy)")
err = utils.CreateArgoRollout(ctx, dynamicClient, testNamespace, rolloutName,
utils.WithRolloutConfigMapEnvFrom(configMapName),
utils.WithRolloutAnnotations(utils.BuildAutoTrueAnnotation()),
)
Expect(err).NotTo(HaveOccurred())
By("Waiting for Rollout to be ready")
err = utils.WaitForRolloutReady(ctx, dynamicClient, testNamespace, rolloutName, utils.DeploymentReady)
Expect(err).NotTo(HaveOccurred())
By("Updating the ConfigMap")
err = utils.UpdateConfigMap(ctx, kubeClient, testNamespace, configMapName,
map[string]string{"key": "updated"})
Expect(err).NotTo(HaveOccurred())
By("Waiting for Rollout to be reloaded with annotation")
reloaded, err := utils.WaitForRolloutReloaded(ctx, dynamicClient, testNamespace, rolloutName,
utils.AnnotationLastReloadedFrom, utils.ReloadTimeout)
Expect(err).NotTo(HaveOccurred())
Expect(reloaded).To(BeTrue(), "Argo Rollout should be reloaded with default rollout strategy")
})
It("should use restart strategy when specified (sets restartAt field)", func() {
By("Creating a ConfigMap")
_, err := utils.CreateConfigMap(ctx, kubeClient, testNamespace, configMapName,
map[string]string{"key": "initial"}, nil)
Expect(err).NotTo(HaveOccurred())
By("Creating an Argo Rollout with restart strategy annotation")
// Note: auto annotation goes on pod template, rollout-strategy goes on object metadata
err = utils.CreateArgoRollout(ctx, dynamicClient, testNamespace, rolloutName,
utils.WithRolloutConfigMapEnvFrom(configMapName),
utils.WithRolloutAnnotations(utils.BuildAutoTrueAnnotation()),
utils.WithRolloutObjectAnnotations(utils.BuildRolloutRestartStrategyAnnotation()),
)
Expect(err).NotTo(HaveOccurred())
By("Waiting for Rollout to be ready")
err = utils.WaitForRolloutReady(ctx, dynamicClient, testNamespace, rolloutName, utils.DeploymentReady)
Expect(err).NotTo(HaveOccurred())
By("Updating the ConfigMap")
err = utils.UpdateConfigMap(ctx, kubeClient, testNamespace, configMapName,
map[string]string{"key": "updated"})
Expect(err).NotTo(HaveOccurred())
By("Waiting for Rollout to have restartAt field set")
restarted, err := utils.WaitForRolloutRestartAt(ctx, dynamicClient, testNamespace, rolloutName, utils.ReloadTimeout)
Expect(err).NotTo(HaveOccurred())
Expect(restarted).To(BeTrue(), "Argo Rollout should have restartAt field set with restart strategy")
})
})
})

View File

@@ -0,0 +1,89 @@
package core
import (
"context"
"testing"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
"github.com/stakater/Reloader/test/e2e/utils"
"k8s.io/client-go/dynamic"
"k8s.io/client-go/kubernetes"
)
var (
kubeClient kubernetes.Interface
dynamicClient dynamic.Interface
testNamespace string
ctx context.Context
cancel context.CancelFunc
testEnv *utils.TestEnvironment
registry *utils.AdapterRegistry
)
func TestCore(t *testing.T) {
RegisterFailHandler(Fail)
RunSpecs(t, "Core Workload E2E Suite")
}
var _ = BeforeSuite(func() {
var err error
ctx, cancel = context.WithCancel(context.Background())
// Setup test environment
testEnv, err = utils.SetupTestEnvironment(ctx, "reloader-core-test")
Expect(err).NotTo(HaveOccurred(), "Failed to setup test environment")
// Export for use in tests
kubeClient = testEnv.KubeClient
dynamicClient = testEnv.DynamicClient
testNamespace = testEnv.Namespace
// Create adapter registry
registry = utils.NewAdapterRegistry(kubeClient, dynamicClient)
// Register ArgoRolloutAdapter if Argo Rollouts is installed
if utils.IsArgoRolloutsInstalled(ctx, dynamicClient) {
GinkgoWriter.Println("Argo Rollouts detected, registering ArgoRolloutAdapter")
registry.RegisterAdapter(utils.NewArgoRolloutAdapter(dynamicClient))
} else {
GinkgoWriter.Println("Argo Rollouts not detected, skipping ArgoRolloutAdapter registration")
}
// Register DeploymentConfigAdapter if OpenShift is available
if utils.HasDeploymentConfigSupport(testEnv.DiscoveryClient) {
GinkgoWriter.Println("OpenShift detected, registering DeploymentConfigAdapter")
registry.RegisterAdapter(utils.NewDeploymentConfigAdapter(dynamicClient))
} else {
GinkgoWriter.Println("OpenShift not detected, skipping DeploymentConfigAdapter registration")
}
// Deploy Reloader with default annotations strategy
// Individual test contexts will redeploy with different strategies if needed
deployValues := map[string]string{
"reloader.reloadStrategy": "annotations",
}
// Enable Argo Rollouts support if Argo is installed
if utils.IsArgoRolloutsInstalled(ctx, dynamicClient) {
deployValues["reloader.isArgoRollouts"] = "true"
GinkgoWriter.Println("Deploying Reloader with Argo Rollouts support")
}
err = testEnv.DeployAndWait(deployValues)
Expect(err).NotTo(HaveOccurred(), "Failed to deploy Reloader")
})
var _ = AfterSuite(func() {
if testEnv != nil {
err := testEnv.Cleanup()
Expect(err).NotTo(HaveOccurred(), "Failed to cleanup test environment")
}
if cancel != nil {
cancel()
}
GinkgoWriter.Println("Core E2E Suite cleanup complete")
})

View File

@@ -0,0 +1,528 @@
package core
import (
"fmt"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
"github.com/stakater/Reloader/test/e2e/utils"
)
var _ = Describe("Reference Method Tests", func() {
var (
configMapName string
secretName string
workloadName string
)
BeforeEach(func() {
configMapName = utils.RandName("cm")
secretName = utils.RandName("secret")
workloadName = utils.RandName("workload")
})
AfterEach(func() {
_ = utils.DeleteConfigMap(ctx, kubeClient, testNamespace, configMapName)
_ = utils.DeleteSecret(ctx, kubeClient, testNamespace, secretName)
})
// ============================================================
// valueFrom.configMapKeyRef TESTS
// ============================================================
Context("valueFrom.configMapKeyRef", func() {
DescribeTable("should reload when ConfigMap referenced via valueFrom.configMapKeyRef changes",
func(workloadType utils.WorkloadType) {
adapter := registry.Get(workloadType)
if adapter == nil { Skip(fmt.Sprintf("%s adapter not available (CRD not installed)", workloadType)) }
By("Creating a ConfigMap")
_, err := utils.CreateConfigMap(ctx, kubeClient, testNamespace, configMapName,
map[string]string{"config_key": "initial_value"}, nil)
Expect(err).NotTo(HaveOccurred())
By("Creating workload with valueFrom.configMapKeyRef")
err = adapter.Create(ctx, testNamespace, workloadName, utils.WorkloadConfig{
ConfigMapName: configMapName,
UseConfigMapKeyRef: true,
ConfigMapKey: "config_key",
EnvVarName: "MY_CONFIG_VAR",
Annotations: utils.BuildConfigMapReloadAnnotation(configMapName),
})
Expect(err).NotTo(HaveOccurred())
DeferCleanup(func() { _ = adapter.Delete(ctx, testNamespace, workloadName) })
By("Waiting for workload to be ready")
err = adapter.WaitReady(ctx, testNamespace, workloadName, utils.DeploymentReady)
Expect(err).NotTo(HaveOccurred())
By("Updating the ConfigMap")
err = utils.UpdateConfigMap(ctx, kubeClient, testNamespace, configMapName,
map[string]string{"config_key": "updated_value"})
Expect(err).NotTo(HaveOccurred())
By("Waiting for workload to be reloaded")
reloaded, err := adapter.WaitReloaded(ctx, testNamespace, workloadName,
utils.AnnotationLastReloadedFrom, utils.ReloadTimeout)
Expect(err).NotTo(HaveOccurred())
Expect(reloaded).To(BeTrue(), "%s with valueFrom.configMapKeyRef should reload", workloadType)
},
Entry("Deployment", utils.WorkloadDeployment),
Entry("DaemonSet", utils.WorkloadDaemonSet),
Entry("StatefulSet", utils.WorkloadStatefulSet),
Entry("ArgoRollout", Label("argo"), utils.WorkloadArgoRollout),
Entry("DeploymentConfig", Label("openshift"), utils.WorkloadDeploymentConfig),
)
})
// ============================================================
// valueFrom.secretKeyRef TESTS
// ============================================================
Context("valueFrom.secretKeyRef", func() {
DescribeTable("should reload when Secret referenced via valueFrom.secretKeyRef changes",
func(workloadType utils.WorkloadType) {
adapter := registry.Get(workloadType)
if adapter == nil { Skip(fmt.Sprintf("%s adapter not available (CRD not installed)", workloadType)) }
By("Creating a Secret")
_, err := utils.CreateSecretFromStrings(ctx, kubeClient, testNamespace, secretName,
map[string]string{"secret_key": "initial_secret"}, nil)
Expect(err).NotTo(HaveOccurred())
By("Creating workload with valueFrom.secretKeyRef")
err = adapter.Create(ctx, testNamespace, workloadName, utils.WorkloadConfig{
SecretName: secretName,
UseSecretKeyRef: true,
SecretKey: "secret_key",
EnvVarName: "MY_SECRET_VAR",
Annotations: utils.BuildSecretReloadAnnotation(secretName),
})
Expect(err).NotTo(HaveOccurred())
DeferCleanup(func() { _ = adapter.Delete(ctx, testNamespace, workloadName) })
By("Waiting for workload to be ready")
err = adapter.WaitReady(ctx, testNamespace, workloadName, utils.DeploymentReady)
Expect(err).NotTo(HaveOccurred())
By("Updating the Secret")
err = utils.UpdateSecretFromStrings(ctx, kubeClient, testNamespace, secretName,
map[string]string{"secret_key": "updated_secret"})
Expect(err).NotTo(HaveOccurred())
By("Waiting for workload to be reloaded")
reloaded, err := adapter.WaitReloaded(ctx, testNamespace, workloadName,
utils.AnnotationLastReloadedFrom, utils.ReloadTimeout)
Expect(err).NotTo(HaveOccurred())
Expect(reloaded).To(BeTrue(), "%s with valueFrom.secretKeyRef should reload", workloadType)
},
Entry("Deployment", utils.WorkloadDeployment),
Entry("DaemonSet", utils.WorkloadDaemonSet),
Entry("StatefulSet", utils.WorkloadStatefulSet),
Entry("ArgoRollout", Label("argo"), utils.WorkloadArgoRollout),
Entry("DeploymentConfig", Label("openshift"), utils.WorkloadDeploymentConfig),
)
})
// ============================================================
// PROJECTED VOLUME TESTS
// ============================================================
Context("Projected Volumes", func() {
DescribeTable("should reload when ConfigMap in projected volume changes",
func(workloadType utils.WorkloadType) {
adapter := registry.Get(workloadType)
if adapter == nil { Skip(fmt.Sprintf("%s adapter not available (CRD not installed)", workloadType)) }
By("Creating a ConfigMap")
_, err := utils.CreateConfigMap(ctx, kubeClient, testNamespace, configMapName,
map[string]string{"config.yaml": "initial"}, nil)
Expect(err).NotTo(HaveOccurred())
By("Creating workload with projected ConfigMap volume")
err = adapter.Create(ctx, testNamespace, workloadName, utils.WorkloadConfig{
ConfigMapName: configMapName,
UseProjectedVolume: true,
Annotations: utils.BuildConfigMapReloadAnnotation(configMapName),
})
Expect(err).NotTo(HaveOccurred())
DeferCleanup(func() { _ = adapter.Delete(ctx, testNamespace, workloadName) })
By("Waiting for workload to be ready")
err = adapter.WaitReady(ctx, testNamespace, workloadName, utils.DeploymentReady)
Expect(err).NotTo(HaveOccurred())
By("Updating the ConfigMap")
err = utils.UpdateConfigMap(ctx, kubeClient, testNamespace, configMapName,
map[string]string{"config.yaml": "updated"})
Expect(err).NotTo(HaveOccurred())
By("Waiting for workload to be reloaded")
reloaded, err := adapter.WaitReloaded(ctx, testNamespace, workloadName,
utils.AnnotationLastReloadedFrom, utils.ReloadTimeout)
Expect(err).NotTo(HaveOccurred())
Expect(reloaded).To(BeTrue(), "%s with projected ConfigMap volume should reload", workloadType)
},
Entry("Deployment", utils.WorkloadDeployment),
Entry("DaemonSet", utils.WorkloadDaemonSet),
Entry("StatefulSet", utils.WorkloadStatefulSet),
Entry("ArgoRollout", Label("argo"), utils.WorkloadArgoRollout),
Entry("DeploymentConfig", Label("openshift"), utils.WorkloadDeploymentConfig),
)
DescribeTable("should reload when Secret in projected volume changes",
func(workloadType utils.WorkloadType) {
adapter := registry.Get(workloadType)
if adapter == nil { Skip(fmt.Sprintf("%s adapter not available (CRD not installed)", workloadType)) }
By("Creating a Secret")
_, err := utils.CreateSecretFromStrings(ctx, kubeClient, testNamespace, secretName,
map[string]string{"credentials": "initial"}, nil)
Expect(err).NotTo(HaveOccurred())
By("Creating workload with projected Secret volume")
err = adapter.Create(ctx, testNamespace, workloadName, utils.WorkloadConfig{
SecretName: secretName,
UseProjectedVolume: true,
Annotations: utils.BuildSecretReloadAnnotation(secretName),
})
Expect(err).NotTo(HaveOccurred())
DeferCleanup(func() { _ = adapter.Delete(ctx, testNamespace, workloadName) })
By("Waiting for workload to be ready")
err = adapter.WaitReady(ctx, testNamespace, workloadName, utils.DeploymentReady)
Expect(err).NotTo(HaveOccurred())
By("Updating the Secret")
err = utils.UpdateSecretFromStrings(ctx, kubeClient, testNamespace, secretName,
map[string]string{"credentials": "updated"})
Expect(err).NotTo(HaveOccurred())
By("Waiting for workload to be reloaded")
reloaded, err := adapter.WaitReloaded(ctx, testNamespace, workloadName,
utils.AnnotationLastReloadedFrom, utils.ReloadTimeout)
Expect(err).NotTo(HaveOccurred())
Expect(reloaded).To(BeTrue(), "%s with projected Secret volume should reload", workloadType)
},
Entry("Deployment", utils.WorkloadDeployment),
Entry("DaemonSet", utils.WorkloadDaemonSet),
Entry("StatefulSet", utils.WorkloadStatefulSet),
Entry("ArgoRollout", Label("argo"), utils.WorkloadArgoRollout),
Entry("DeploymentConfig", Label("openshift"), utils.WorkloadDeploymentConfig),
)
DescribeTable("should reload when ConfigMap changes in mixed projected volume",
func(workloadType utils.WorkloadType) {
adapter := registry.Get(workloadType)
if adapter == nil { Skip(fmt.Sprintf("%s adapter not available (CRD not installed)", workloadType)) }
By("Creating a ConfigMap and Secret")
_, err := utils.CreateConfigMap(ctx, kubeClient, testNamespace, configMapName,
map[string]string{"config.yaml": "initial"}, nil)
Expect(err).NotTo(HaveOccurred())
_, err = utils.CreateSecretFromStrings(ctx, kubeClient, testNamespace, secretName,
map[string]string{"credentials": "initial"}, nil)
Expect(err).NotTo(HaveOccurred())
By("Creating workload with projected volume containing both")
err = adapter.Create(ctx, testNamespace, workloadName, utils.WorkloadConfig{
ConfigMapName: configMapName,
SecretName: secretName,
UseProjectedVolume: true,
Annotations: utils.MergeAnnotations(
utils.BuildConfigMapReloadAnnotation(configMapName),
utils.BuildSecretReloadAnnotation(secretName),
),
})
Expect(err).NotTo(HaveOccurred())
DeferCleanup(func() { _ = adapter.Delete(ctx, testNamespace, workloadName) })
By("Waiting for workload to be ready")
err = adapter.WaitReady(ctx, testNamespace, workloadName, utils.DeploymentReady)
Expect(err).NotTo(HaveOccurred())
By("Updating the ConfigMap")
err = utils.UpdateConfigMap(ctx, kubeClient, testNamespace, configMapName,
map[string]string{"config.yaml": "updated"})
Expect(err).NotTo(HaveOccurred())
By("Waiting for workload to be reloaded")
reloaded, err := adapter.WaitReloaded(ctx, testNamespace, workloadName,
utils.AnnotationLastReloadedFrom, utils.ReloadTimeout)
Expect(err).NotTo(HaveOccurred())
Expect(reloaded).To(BeTrue(), "%s should reload when ConfigMap in mixed projected volume changes", workloadType)
},
Entry("Deployment", utils.WorkloadDeployment),
Entry("DaemonSet", utils.WorkloadDaemonSet),
Entry("StatefulSet", utils.WorkloadStatefulSet),
Entry("ArgoRollout", Label("argo"), utils.WorkloadArgoRollout),
Entry("DeploymentConfig", Label("openshift"), utils.WorkloadDeploymentConfig),
)
DescribeTable("should reload when Secret changes in mixed projected volume",
func(workloadType utils.WorkloadType) {
adapter := registry.Get(workloadType)
if adapter == nil { Skip(fmt.Sprintf("%s adapter not available (CRD not installed)", workloadType)) }
By("Creating a ConfigMap and Secret")
_, err := utils.CreateConfigMap(ctx, kubeClient, testNamespace, configMapName,
map[string]string{"config.yaml": "initial"}, nil)
Expect(err).NotTo(HaveOccurred())
_, err = utils.CreateSecretFromStrings(ctx, kubeClient, testNamespace, secretName,
map[string]string{"credentials": "initial"}, nil)
Expect(err).NotTo(HaveOccurred())
By("Creating workload with projected volume containing both")
err = adapter.Create(ctx, testNamespace, workloadName, utils.WorkloadConfig{
ConfigMapName: configMapName,
SecretName: secretName,
UseProjectedVolume: true,
Annotations: utils.MergeAnnotations(
utils.BuildConfigMapReloadAnnotation(configMapName),
utils.BuildSecretReloadAnnotation(secretName),
),
})
Expect(err).NotTo(HaveOccurred())
DeferCleanup(func() { _ = adapter.Delete(ctx, testNamespace, workloadName) })
By("Waiting for workload to be ready")
err = adapter.WaitReady(ctx, testNamespace, workloadName, utils.DeploymentReady)
Expect(err).NotTo(HaveOccurred())
By("Updating the Secret")
err = utils.UpdateSecretFromStrings(ctx, kubeClient, testNamespace, secretName,
map[string]string{"credentials": "updated"})
Expect(err).NotTo(HaveOccurred())
By("Waiting for workload to be reloaded")
reloaded, err := adapter.WaitReloaded(ctx, testNamespace, workloadName,
utils.AnnotationLastReloadedFrom, utils.ReloadTimeout)
Expect(err).NotTo(HaveOccurred())
Expect(reloaded).To(BeTrue(), "%s should reload when Secret in mixed projected volume changes", workloadType)
},
Entry("Deployment", utils.WorkloadDeployment),
Entry("DaemonSet", utils.WorkloadDaemonSet),
Entry("StatefulSet", utils.WorkloadStatefulSet),
Entry("ArgoRollout", Label("argo"), utils.WorkloadArgoRollout),
Entry("DeploymentConfig", Label("openshift"), utils.WorkloadDeploymentConfig),
)
})
// ============================================================
// INIT CONTAINER TESTS
// ============================================================
Context("Init Container with envFrom", func() {
DescribeTable("should reload when ConfigMap referenced by init container changes",
func(workloadType utils.WorkloadType) {
adapter := registry.Get(workloadType)
if adapter == nil { Skip(fmt.Sprintf("%s adapter not available (CRD not installed)", workloadType)) }
By("Creating a ConfigMap")
_, err := utils.CreateConfigMap(ctx, kubeClient, testNamespace, configMapName,
map[string]string{"INIT_VAR": "initial"}, nil)
Expect(err).NotTo(HaveOccurred())
By("Creating workload with init container referencing ConfigMap")
err = adapter.Create(ctx, testNamespace, workloadName, utils.WorkloadConfig{
ConfigMapName: configMapName,
UseInitContainer: true,
Annotations: utils.BuildConfigMapReloadAnnotation(configMapName),
})
Expect(err).NotTo(HaveOccurred())
DeferCleanup(func() { _ = adapter.Delete(ctx, testNamespace, workloadName) })
By("Waiting for workload to be ready")
err = adapter.WaitReady(ctx, testNamespace, workloadName, utils.DeploymentReady)
Expect(err).NotTo(HaveOccurred())
By("Updating the ConfigMap")
err = utils.UpdateConfigMap(ctx, kubeClient, testNamespace, configMapName,
map[string]string{"INIT_VAR": "updated"})
Expect(err).NotTo(HaveOccurred())
By("Waiting for workload to be reloaded")
reloaded, err := adapter.WaitReloaded(ctx, testNamespace, workloadName,
utils.AnnotationLastReloadedFrom, utils.ReloadTimeout)
Expect(err).NotTo(HaveOccurred())
Expect(reloaded).To(BeTrue(), "%s with init container ConfigMap should reload", workloadType)
},
Entry("Deployment", utils.WorkloadDeployment),
Entry("DaemonSet", utils.WorkloadDaemonSet),
Entry("StatefulSet", utils.WorkloadStatefulSet),
Entry("ArgoRollout", Label("argo"), utils.WorkloadArgoRollout),
Entry("DeploymentConfig", Label("openshift"), utils.WorkloadDeploymentConfig),
)
DescribeTable("should reload when Secret referenced by init container changes",
func(workloadType utils.WorkloadType) {
adapter := registry.Get(workloadType)
if adapter == nil { Skip(fmt.Sprintf("%s adapter not available (CRD not installed)", workloadType)) }
By("Creating a Secret")
_, err := utils.CreateSecretFromStrings(ctx, kubeClient, testNamespace, secretName,
map[string]string{"INIT_SECRET": "initial"}, nil)
Expect(err).NotTo(HaveOccurred())
By("Creating workload with init container referencing Secret")
err = adapter.Create(ctx, testNamespace, workloadName, utils.WorkloadConfig{
SecretName: secretName,
UseInitContainer: true,
Annotations: utils.BuildSecretReloadAnnotation(secretName),
})
Expect(err).NotTo(HaveOccurred())
DeferCleanup(func() { _ = adapter.Delete(ctx, testNamespace, workloadName) })
By("Waiting for workload to be ready")
err = adapter.WaitReady(ctx, testNamespace, workloadName, utils.DeploymentReady)
Expect(err).NotTo(HaveOccurred())
By("Updating the Secret")
err = utils.UpdateSecretFromStrings(ctx, kubeClient, testNamespace, secretName,
map[string]string{"INIT_SECRET": "updated"})
Expect(err).NotTo(HaveOccurred())
By("Waiting for workload to be reloaded")
reloaded, err := adapter.WaitReloaded(ctx, testNamespace, workloadName,
utils.AnnotationLastReloadedFrom, utils.ReloadTimeout)
Expect(err).NotTo(HaveOccurred())
Expect(reloaded).To(BeTrue(), "%s with init container Secret should reload", workloadType)
},
Entry("Deployment", utils.WorkloadDeployment),
Entry("DaemonSet", utils.WorkloadDaemonSet),
Entry("StatefulSet", utils.WorkloadStatefulSet),
Entry("ArgoRollout", Label("argo"), utils.WorkloadArgoRollout),
Entry("DeploymentConfig", Label("openshift"), utils.WorkloadDeploymentConfig),
)
})
Context("Init Container with Volume Mount", func() {
DescribeTable("should reload when ConfigMap volume mounted in init container changes",
func(workloadType utils.WorkloadType) {
adapter := registry.Get(workloadType)
if adapter == nil { Skip(fmt.Sprintf("%s adapter not available (CRD not installed)", workloadType)) }
By("Creating a ConfigMap")
_, err := utils.CreateConfigMap(ctx, kubeClient, testNamespace, configMapName,
map[string]string{"config.yaml": "initial"}, nil)
Expect(err).NotTo(HaveOccurred())
By("Creating workload with init container using ConfigMap volume mount")
err = adapter.Create(ctx, testNamespace, workloadName, utils.WorkloadConfig{
ConfigMapName: configMapName,
UseInitContainerVolume: true,
Annotations: utils.BuildConfigMapReloadAnnotation(configMapName),
})
Expect(err).NotTo(HaveOccurred())
DeferCleanup(func() { _ = adapter.Delete(ctx, testNamespace, workloadName) })
By("Waiting for workload to be ready")
err = adapter.WaitReady(ctx, testNamespace, workloadName, utils.DeploymentReady)
Expect(err).NotTo(HaveOccurred())
By("Updating the ConfigMap")
err = utils.UpdateConfigMap(ctx, kubeClient, testNamespace, configMapName,
map[string]string{"config.yaml": "updated"})
Expect(err).NotTo(HaveOccurred())
By("Waiting for workload to be reloaded")
reloaded, err := adapter.WaitReloaded(ctx, testNamespace, workloadName,
utils.AnnotationLastReloadedFrom, utils.ReloadTimeout)
Expect(err).NotTo(HaveOccurred())
Expect(reloaded).To(BeTrue(), "%s with init container ConfigMap volume should reload", workloadType)
},
Entry("Deployment", utils.WorkloadDeployment),
Entry("DaemonSet", utils.WorkloadDaemonSet),
Entry("StatefulSet", utils.WorkloadStatefulSet),
Entry("ArgoRollout", Label("argo"), utils.WorkloadArgoRollout),
Entry("DeploymentConfig", Label("openshift"), utils.WorkloadDeploymentConfig),
)
DescribeTable("should reload when Secret volume mounted in init container changes",
func(workloadType utils.WorkloadType) {
adapter := registry.Get(workloadType)
if adapter == nil { Skip(fmt.Sprintf("%s adapter not available (CRD not installed)", workloadType)) }
By("Creating a Secret")
_, err := utils.CreateSecretFromStrings(ctx, kubeClient, testNamespace, secretName,
map[string]string{"credentials": "initial"}, nil)
Expect(err).NotTo(HaveOccurred())
By("Creating workload with init container using Secret volume mount")
err = adapter.Create(ctx, testNamespace, workloadName, utils.WorkloadConfig{
SecretName: secretName,
UseInitContainerVolume: true,
Annotations: utils.BuildSecretReloadAnnotation(secretName),
})
Expect(err).NotTo(HaveOccurred())
DeferCleanup(func() { _ = adapter.Delete(ctx, testNamespace, workloadName) })
By("Waiting for workload to be ready")
err = adapter.WaitReady(ctx, testNamespace, workloadName, utils.DeploymentReady)
Expect(err).NotTo(HaveOccurred())
By("Updating the Secret")
err = utils.UpdateSecretFromStrings(ctx, kubeClient, testNamespace, secretName,
map[string]string{"credentials": "updated"})
Expect(err).NotTo(HaveOccurred())
By("Waiting for workload to be reloaded")
reloaded, err := adapter.WaitReloaded(ctx, testNamespace, workloadName,
utils.AnnotationLastReloadedFrom, utils.ReloadTimeout)
Expect(err).NotTo(HaveOccurred())
Expect(reloaded).To(BeTrue(), "%s with init container Secret volume should reload", workloadType)
},
Entry("Deployment", utils.WorkloadDeployment),
Entry("DaemonSet", utils.WorkloadDaemonSet),
Entry("StatefulSet", utils.WorkloadStatefulSet),
Entry("ArgoRollout", Label("argo"), utils.WorkloadArgoRollout),
Entry("DeploymentConfig", Label("openshift"), utils.WorkloadDeploymentConfig),
)
})
// ============================================================
// AUTO ANNOTATION WITH VALUEFROM TESTS
// ============================================================
Context("Auto Annotation with valueFrom", func() {
DescribeTable("should reload with auto=true when ConfigMap referenced via valueFrom changes",
func(workloadType utils.WorkloadType) {
adapter := registry.Get(workloadType)
if adapter == nil { Skip(fmt.Sprintf("%s adapter not available (CRD not installed)", workloadType)) }
By("Creating a ConfigMap")
_, err := utils.CreateConfigMap(ctx, kubeClient, testNamespace, configMapName,
map[string]string{"auto_config_key": "initial"}, nil)
Expect(err).NotTo(HaveOccurred())
By("Creating workload with auto=true and valueFrom")
err = adapter.Create(ctx, testNamespace, workloadName, utils.WorkloadConfig{
ConfigMapName: configMapName,
UseConfigMapKeyRef: true,
ConfigMapKey: "auto_config_key",
EnvVarName: "AUTO_CONFIG_VAR",
Annotations: utils.BuildAutoTrueAnnotation(),
})
Expect(err).NotTo(HaveOccurred())
DeferCleanup(func() { _ = adapter.Delete(ctx, testNamespace, workloadName) })
By("Waiting for workload to be ready")
err = adapter.WaitReady(ctx, testNamespace, workloadName, utils.DeploymentReady)
Expect(err).NotTo(HaveOccurred())
By("Updating the ConfigMap")
err = utils.UpdateConfigMap(ctx, kubeClient, testNamespace, configMapName,
map[string]string{"auto_config_key": "updated"})
Expect(err).NotTo(HaveOccurred())
By("Waiting for workload to be reloaded")
reloaded, err := adapter.WaitReloaded(ctx, testNamespace, workloadName,
utils.AnnotationLastReloadedFrom, utils.ReloadTimeout)
Expect(err).NotTo(HaveOccurred())
Expect(reloaded).To(BeTrue(), "%s with auto=true and valueFrom should reload", workloadType)
},
Entry("Deployment", utils.WorkloadDeployment),
Entry("DaemonSet", utils.WorkloadDaemonSet),
Entry("StatefulSet", utils.WorkloadStatefulSet),
Entry("ArgoRollout", Label("argo"), utils.WorkloadArgoRollout),
Entry("DeploymentConfig", Label("openshift"), utils.WorkloadDeploymentConfig),
)
})
})

View File

@@ -0,0 +1,912 @@
package core
import (
"fmt"
"time"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
"github.com/stakater/Reloader/test/e2e/utils"
)
var _ = Describe("Workload Reload Tests", func() {
var (
configMapName string
secretName string
workloadName string
)
BeforeEach(func() {
configMapName = utils.RandName("cm")
secretName = utils.RandName("secret")
workloadName = utils.RandName("workload")
})
AfterEach(func() {
_ = utils.DeleteConfigMap(ctx, kubeClient, testNamespace, configMapName)
_ = utils.DeleteSecret(ctx, kubeClient, testNamespace, secretName)
})
// ============================================================
// ANNOTATIONS STRATEGY TESTS
// ============================================================
Context("Annotations Strategy", func() {
// Standard workloads that support annotation-based reload
standardWorkloads := []utils.WorkloadType{
utils.WorkloadDeployment,
utils.WorkloadDaemonSet,
utils.WorkloadStatefulSet,
}
// ConfigMap reload tests for standard workloads
DescribeTable("should reload when ConfigMap changes",
func(workloadType utils.WorkloadType) {
adapter := registry.Get(workloadType)
if adapter == nil { Skip(fmt.Sprintf("%s adapter not available (CRD not installed)", workloadType)) }
By("Creating a ConfigMap")
_, err := utils.CreateConfigMap(ctx, kubeClient, testNamespace, configMapName,
map[string]string{"key": "initial"}, nil)
Expect(err).NotTo(HaveOccurred())
By("Creating workload with ConfigMap reference annotation")
err = adapter.Create(ctx, testNamespace, workloadName, utils.WorkloadConfig{
ConfigMapName: configMapName,
UseConfigMapEnvFrom: true,
Annotations: utils.BuildConfigMapReloadAnnotation(configMapName),
})
Expect(err).NotTo(HaveOccurred())
DeferCleanup(func() { _ = adapter.Delete(ctx, testNamespace, workloadName) })
By("Waiting for workload to be ready")
err = adapter.WaitReady(ctx, testNamespace, workloadName, utils.DeploymentReady)
Expect(err).NotTo(HaveOccurred())
By("Updating the ConfigMap data")
err = utils.UpdateConfigMap(ctx, kubeClient, testNamespace, configMapName,
map[string]string{"key": "updated"})
Expect(err).NotTo(HaveOccurred())
By("Waiting for workload to be reloaded")
reloaded, err := adapter.WaitReloaded(ctx, testNamespace, workloadName,
utils.AnnotationLastReloadedFrom, utils.ReloadTimeout)
Expect(err).NotTo(HaveOccurred())
Expect(reloaded).To(BeTrue(), "%s should have been reloaded", workloadType)
},
Entry("Deployment", utils.WorkloadDeployment),
Entry("DaemonSet", utils.WorkloadDaemonSet),
Entry("StatefulSet", utils.WorkloadStatefulSet),
Entry("ArgoRollout", Label("argo"), utils.WorkloadArgoRollout),
Entry("DeploymentConfig", Label("openshift"), utils.WorkloadDeploymentConfig),
)
// Secret reload tests for standard workloads
DescribeTable("should reload when Secret changes",
func(workloadType utils.WorkloadType) {
adapter := registry.Get(workloadType)
if adapter == nil { Skip(fmt.Sprintf("%s adapter not available (CRD not installed)", workloadType)) }
By("Creating a Secret")
_, err := utils.CreateSecretFromStrings(ctx, kubeClient, testNamespace, secretName,
map[string]string{"password": "initial"}, nil)
Expect(err).NotTo(HaveOccurred())
By("Creating workload with Secret reference annotation")
err = adapter.Create(ctx, testNamespace, workloadName, utils.WorkloadConfig{
SecretName: secretName,
UseSecretEnvFrom: true,
Annotations: utils.BuildSecretReloadAnnotation(secretName),
})
Expect(err).NotTo(HaveOccurred())
DeferCleanup(func() { _ = adapter.Delete(ctx, testNamespace, workloadName) })
By("Waiting for workload to be ready")
err = adapter.WaitReady(ctx, testNamespace, workloadName, utils.DeploymentReady)
Expect(err).NotTo(HaveOccurred())
By("Updating the Secret data")
err = utils.UpdateSecretFromStrings(ctx, kubeClient, testNamespace, secretName,
map[string]string{"password": "updated"})
Expect(err).NotTo(HaveOccurred())
By("Waiting for workload to be reloaded")
reloaded, err := adapter.WaitReloaded(ctx, testNamespace, workloadName,
utils.AnnotationLastReloadedFrom, utils.ReloadTimeout)
Expect(err).NotTo(HaveOccurred())
Expect(reloaded).To(BeTrue(), "%s should have been reloaded", workloadType)
},
Entry("Deployment", utils.WorkloadDeployment),
Entry("DaemonSet", utils.WorkloadDaemonSet),
Entry("StatefulSet", utils.WorkloadStatefulSet),
Entry("ArgoRollout", Label("argo"), utils.WorkloadArgoRollout),
Entry("DeploymentConfig", Label("openshift"), utils.WorkloadDeploymentConfig),
)
// Auto=true annotation tests
DescribeTable("should reload with auto=true annotation when ConfigMap changes",
func(workloadType utils.WorkloadType) {
adapter := registry.Get(workloadType)
if adapter == nil { Skip(fmt.Sprintf("%s adapter not available (CRD not installed)", workloadType)) }
By("Creating a ConfigMap")
_, err := utils.CreateConfigMap(ctx, kubeClient, testNamespace, configMapName,
map[string]string{"key": "initial"}, nil)
Expect(err).NotTo(HaveOccurred())
By("Creating workload with auto=true annotation")
err = adapter.Create(ctx, testNamespace, workloadName, utils.WorkloadConfig{
ConfigMapName: configMapName,
UseConfigMapEnvFrom: true,
Annotations: utils.BuildAutoTrueAnnotation(),
})
Expect(err).NotTo(HaveOccurred())
DeferCleanup(func() { _ = adapter.Delete(ctx, testNamespace, workloadName) })
By("Waiting for workload to be ready")
err = adapter.WaitReady(ctx, testNamespace, workloadName, utils.DeploymentReady)
Expect(err).NotTo(HaveOccurred())
By("Updating the ConfigMap data")
err = utils.UpdateConfigMap(ctx, kubeClient, testNamespace, configMapName,
map[string]string{"key": "updated"})
Expect(err).NotTo(HaveOccurred())
By("Waiting for workload to be reloaded")
reloaded, err := adapter.WaitReloaded(ctx, testNamespace, workloadName,
utils.AnnotationLastReloadedFrom, utils.ReloadTimeout)
Expect(err).NotTo(HaveOccurred())
Expect(reloaded).To(BeTrue(), "%s with auto=true should have been reloaded", workloadType)
},
Entry("Deployment", utils.WorkloadDeployment),
Entry("DaemonSet", utils.WorkloadDaemonSet),
Entry("StatefulSet", utils.WorkloadStatefulSet),
Entry("ArgoRollout", Label("argo"), utils.WorkloadArgoRollout),
Entry("DeploymentConfig", Label("openshift"), utils.WorkloadDeploymentConfig),
)
// Negative tests: label-only changes should NOT trigger reload
DescribeTable("should NOT reload when only ConfigMap labels change (no data change)",
func(workloadType utils.WorkloadType) {
adapter := registry.Get(workloadType)
if adapter == nil { Skip(fmt.Sprintf("%s adapter not available (CRD not installed)", workloadType)) }
By("Creating a ConfigMap")
_, err := utils.CreateConfigMap(ctx, kubeClient, testNamespace, configMapName,
map[string]string{"key": "initial"}, nil)
Expect(err).NotTo(HaveOccurred())
By("Creating workload with ConfigMap reference annotation")
err = adapter.Create(ctx, testNamespace, workloadName, utils.WorkloadConfig{
ConfigMapName: configMapName,
UseConfigMapEnvFrom: true,
Annotations: utils.BuildConfigMapReloadAnnotation(configMapName),
})
Expect(err).NotTo(HaveOccurred())
DeferCleanup(func() { _ = adapter.Delete(ctx, testNamespace, workloadName) })
By("Waiting for workload to be ready")
err = adapter.WaitReady(ctx, testNamespace, workloadName, utils.DeploymentReady)
Expect(err).NotTo(HaveOccurred())
By("Updating only the ConfigMap labels (no data change)")
err = utils.UpdateConfigMapLabels(ctx, kubeClient, testNamespace, configMapName,
map[string]string{"new-label": "new-value"})
Expect(err).NotTo(HaveOccurred())
By("Verifying workload was NOT reloaded (negative test)")
time.Sleep(utils.NegativeTestWait)
reloaded, err := adapter.WaitReloaded(ctx, testNamespace, workloadName,
utils.AnnotationLastReloadedFrom, utils.ShortTimeout)
Expect(err).NotTo(HaveOccurred())
Expect(reloaded).To(BeFalse(), "%s should NOT reload when only ConfigMap labels change", workloadType)
},
Entry("Deployment", utils.WorkloadDeployment),
Entry("DaemonSet", utils.WorkloadDaemonSet),
Entry("StatefulSet", utils.WorkloadStatefulSet),
Entry("ArgoRollout", Label("argo"), utils.WorkloadArgoRollout),
Entry("DeploymentConfig", Label("openshift"), utils.WorkloadDeploymentConfig),
)
DescribeTable("should NOT reload when only Secret labels change (no data change)",
func(workloadType utils.WorkloadType) {
adapter := registry.Get(workloadType)
if adapter == nil { Skip(fmt.Sprintf("%s adapter not available (CRD not installed)", workloadType)) }
By("Creating a Secret")
_, err := utils.CreateSecretFromStrings(ctx, kubeClient, testNamespace, secretName,
map[string]string{"password": "initial"}, nil)
Expect(err).NotTo(HaveOccurred())
By("Creating workload with Secret reference annotation")
err = adapter.Create(ctx, testNamespace, workloadName, utils.WorkloadConfig{
SecretName: secretName,
UseSecretEnvFrom: true,
Annotations: utils.BuildSecretReloadAnnotation(secretName),
})
Expect(err).NotTo(HaveOccurred())
DeferCleanup(func() { _ = adapter.Delete(ctx, testNamespace, workloadName) })
By("Waiting for workload to be ready")
err = adapter.WaitReady(ctx, testNamespace, workloadName, utils.DeploymentReady)
Expect(err).NotTo(HaveOccurred())
By("Updating only the Secret labels (no data change)")
err = utils.UpdateSecretLabels(ctx, kubeClient, testNamespace, secretName,
map[string]string{"new-label": "new-value"})
Expect(err).NotTo(HaveOccurred())
By("Verifying workload was NOT reloaded (negative test)")
time.Sleep(utils.NegativeTestWait)
reloaded, err := adapter.WaitReloaded(ctx, testNamespace, workloadName,
utils.AnnotationLastReloadedFrom, utils.ShortTimeout)
Expect(err).NotTo(HaveOccurred())
Expect(reloaded).To(BeFalse(), "%s should NOT reload when only Secret labels change", workloadType)
},
Entry("Deployment", utils.WorkloadDeployment),
Entry("DaemonSet", utils.WorkloadDaemonSet),
Entry("StatefulSet", utils.WorkloadStatefulSet),
Entry("ArgoRollout", Label("argo"), utils.WorkloadArgoRollout),
Entry("DeploymentConfig", Label("openshift"), utils.WorkloadDeploymentConfig),
)
// CronJob special handling - triggers a Job instead of annotation
Context("CronJob (special handling)", func() {
var cronJobAdapter *utils.CronJobAdapter
BeforeEach(func() {
adapter := registry.Get(utils.WorkloadCronJob)
Expect(adapter).NotTo(BeNil())
var ok bool
cronJobAdapter, ok = adapter.(*utils.CronJobAdapter)
Expect(ok).To(BeTrue(), "Should be able to cast to CronJobAdapter")
})
It("should trigger a Job when ConfigMap changes", func() {
By("Creating a ConfigMap")
_, err := utils.CreateConfigMap(ctx, kubeClient, testNamespace, configMapName,
map[string]string{"key": "initial"}, nil)
Expect(err).NotTo(HaveOccurred())
By("Creating a CronJob with ConfigMap reference annotation")
err = cronJobAdapter.Create(ctx, testNamespace, workloadName, utils.WorkloadConfig{
ConfigMapName: configMapName,
UseConfigMapEnvFrom: true,
Annotations: utils.BuildConfigMapReloadAnnotation(configMapName),
})
Expect(err).NotTo(HaveOccurred())
DeferCleanup(func() { _ = cronJobAdapter.Delete(ctx, testNamespace, workloadName) })
By("Updating the ConfigMap data")
err = utils.UpdateConfigMap(ctx, kubeClient, testNamespace, configMapName,
map[string]string{"key": "updated"})
Expect(err).NotTo(HaveOccurred())
By("Waiting for a Job to be created by CronJob reload")
triggered, err := cronJobAdapter.WaitForTriggeredJob(ctx, testNamespace, workloadName, utils.ReloadTimeout)
Expect(err).NotTo(HaveOccurred())
Expect(triggered).To(BeTrue(), "CronJob should have triggered a Job creation")
})
It("should trigger a Job when Secret changes", func() {
By("Creating a Secret")
_, err := utils.CreateSecretFromStrings(ctx, kubeClient, testNamespace, secretName,
map[string]string{"password": "initial"}, nil)
Expect(err).NotTo(HaveOccurred())
By("Creating a CronJob with Secret reference annotation")
err = cronJobAdapter.Create(ctx, testNamespace, workloadName, utils.WorkloadConfig{
SecretName: secretName,
UseSecretEnvFrom: true,
Annotations: utils.BuildSecretReloadAnnotation(secretName),
})
Expect(err).NotTo(HaveOccurred())
DeferCleanup(func() { _ = cronJobAdapter.Delete(ctx, testNamespace, workloadName) })
By("Updating the Secret data")
err = utils.UpdateSecretFromStrings(ctx, kubeClient, testNamespace, secretName,
map[string]string{"password": "updated"})
Expect(err).NotTo(HaveOccurred())
By("Waiting for a Job to be created by CronJob reload")
triggered, err := cronJobAdapter.WaitForTriggeredJob(ctx, testNamespace, workloadName, utils.ReloadTimeout)
Expect(err).NotTo(HaveOccurred())
Expect(triggered).To(BeTrue(), "CronJob should have triggered a Job creation")
})
It("should trigger a Job with auto=true annotation when ConfigMap changes", func() {
By("Creating a ConfigMap")
_, err := utils.CreateConfigMap(ctx, kubeClient, testNamespace, configMapName,
map[string]string{"key": "initial"}, nil)
Expect(err).NotTo(HaveOccurred())
By("Creating a CronJob with auto=true annotation")
err = cronJobAdapter.Create(ctx, testNamespace, workloadName, utils.WorkloadConfig{
ConfigMapName: configMapName,
UseConfigMapEnvFrom: true,
Annotations: utils.BuildAutoTrueAnnotation(),
})
Expect(err).NotTo(HaveOccurred())
DeferCleanup(func() { _ = cronJobAdapter.Delete(ctx, testNamespace, workloadName) })
By("Updating the ConfigMap data")
err = utils.UpdateConfigMap(ctx, kubeClient, testNamespace, configMapName,
map[string]string{"key": "updated"})
Expect(err).NotTo(HaveOccurred())
By("Waiting for a Job to be created by CronJob reload")
triggered, err := cronJobAdapter.WaitForTriggeredJob(ctx, testNamespace, workloadName, utils.ReloadTimeout)
Expect(err).NotTo(HaveOccurred())
Expect(triggered).To(BeTrue(), "CronJob with auto=true should have triggered a Job creation")
})
})
// Volume mount tests
DescribeTable("should reload when volume-mounted ConfigMap changes",
func(workloadType utils.WorkloadType) {
adapter := registry.Get(workloadType)
if adapter == nil { Skip(fmt.Sprintf("%s adapter not available (CRD not installed)", workloadType)) }
By("Creating a ConfigMap")
_, err := utils.CreateConfigMap(ctx, kubeClient, testNamespace, configMapName,
map[string]string{"config.yaml": "setting: initial"}, nil)
Expect(err).NotTo(HaveOccurred())
By("Creating workload with ConfigMap volume")
err = adapter.Create(ctx, testNamespace, workloadName, utils.WorkloadConfig{
ConfigMapName: configMapName,
UseConfigMapVolume: true,
Annotations: utils.BuildConfigMapReloadAnnotation(configMapName),
})
Expect(err).NotTo(HaveOccurred())
DeferCleanup(func() { _ = adapter.Delete(ctx, testNamespace, workloadName) })
By("Waiting for workload to be ready")
err = adapter.WaitReady(ctx, testNamespace, workloadName, utils.DeploymentReady)
Expect(err).NotTo(HaveOccurred())
By("Updating the ConfigMap data")
err = utils.UpdateConfigMap(ctx, kubeClient, testNamespace, configMapName,
map[string]string{"config.yaml": "setting: updated"})
Expect(err).NotTo(HaveOccurred())
By("Waiting for workload to be reloaded")
reloaded, err := adapter.WaitReloaded(ctx, testNamespace, workloadName,
utils.AnnotationLastReloadedFrom, utils.ReloadTimeout)
Expect(err).NotTo(HaveOccurred())
Expect(reloaded).To(BeTrue(), "%s with volume-mounted ConfigMap should have been reloaded", workloadType)
},
Entry("Deployment", utils.WorkloadDeployment),
Entry("DaemonSet", utils.WorkloadDaemonSet),
Entry("StatefulSet", utils.WorkloadStatefulSet),
Entry("ArgoRollout", Label("argo"), utils.WorkloadArgoRollout),
Entry("DeploymentConfig", Label("openshift"), utils.WorkloadDeploymentConfig),
)
DescribeTable("should reload when volume-mounted Secret changes",
func(workloadType utils.WorkloadType) {
adapter := registry.Get(workloadType)
if adapter == nil { Skip(fmt.Sprintf("%s adapter not available (CRD not installed)", workloadType)) }
By("Creating a Secret")
_, err := utils.CreateSecretFromStrings(ctx, kubeClient, testNamespace, secretName,
map[string]string{"credentials.yaml": "secret: initial"}, nil)
Expect(err).NotTo(HaveOccurred())
By("Creating workload with Secret volume")
err = adapter.Create(ctx, testNamespace, workloadName, utils.WorkloadConfig{
SecretName: secretName,
UseSecretVolume: true,
Annotations: utils.BuildSecretReloadAnnotation(secretName),
})
Expect(err).NotTo(HaveOccurred())
DeferCleanup(func() { _ = adapter.Delete(ctx, testNamespace, workloadName) })
By("Waiting for workload to be ready")
err = adapter.WaitReady(ctx, testNamespace, workloadName, utils.DeploymentReady)
Expect(err).NotTo(HaveOccurred())
By("Updating the Secret data")
err = utils.UpdateSecretFromStrings(ctx, kubeClient, testNamespace, secretName,
map[string]string{"credentials.yaml": "secret: updated"})
Expect(err).NotTo(HaveOccurred())
By("Waiting for workload to be reloaded")
reloaded, err := adapter.WaitReloaded(ctx, testNamespace, workloadName,
utils.AnnotationLastReloadedFrom, utils.ReloadTimeout)
Expect(err).NotTo(HaveOccurred())
Expect(reloaded).To(BeTrue(), "%s with volume-mounted Secret should have been reloaded", workloadType)
},
Entry("Deployment", utils.WorkloadDeployment),
Entry("DaemonSet", utils.WorkloadDaemonSet),
Entry("StatefulSet", utils.WorkloadStatefulSet),
Entry("ArgoRollout", Label("argo"), utils.WorkloadArgoRollout),
Entry("DeploymentConfig", Label("openshift"), utils.WorkloadDeploymentConfig),
)
// Test for workloads without Reloader annotation
DescribeTable("should NOT reload without Reloader annotation",
func(workloadType utils.WorkloadType) {
adapter := registry.Get(workloadType)
if adapter == nil { Skip(fmt.Sprintf("%s adapter not available (CRD not installed)", workloadType)) }
By("Creating a ConfigMap")
_, err := utils.CreateConfigMap(ctx, kubeClient, testNamespace, configMapName,
map[string]string{"key": "value"}, nil)
Expect(err).NotTo(HaveOccurred())
By("Creating workload WITHOUT Reloader annotation")
err = adapter.Create(ctx, testNamespace, workloadName, utils.WorkloadConfig{
ConfigMapName: configMapName,
UseConfigMapEnvFrom: true,
// No Reloader annotations
})
Expect(err).NotTo(HaveOccurred())
DeferCleanup(func() { _ = adapter.Delete(ctx, testNamespace, workloadName) })
By("Waiting for workload to be ready")
err = adapter.WaitReady(ctx, testNamespace, workloadName, utils.DeploymentReady)
Expect(err).NotTo(HaveOccurred())
By("Updating the ConfigMap data")
err = utils.UpdateConfigMap(ctx, kubeClient, testNamespace, configMapName,
map[string]string{"key": "updated"})
Expect(err).NotTo(HaveOccurred())
By("Verifying workload is NOT reloaded (negative test)")
time.Sleep(utils.NegativeTestWait)
reloaded, err := adapter.WaitReloaded(ctx, testNamespace, workloadName,
utils.AnnotationLastReloadedFrom, utils.ShortTimeout)
Expect(err).NotTo(HaveOccurred())
Expect(reloaded).To(BeFalse(), "%s without Reloader annotation should NOT be reloaded", workloadType)
},
Entry("Deployment", utils.WorkloadDeployment),
Entry("DaemonSet", utils.WorkloadDaemonSet),
Entry("StatefulSet", utils.WorkloadStatefulSet),
)
// Variable to track for use in lint
_ = standardWorkloads
// ============================================================
// EDGE CASE TESTS (Deployment-specific)
// ============================================================
Context("Edge Cases", func() {
It("should reload deployment with multiple ConfigMaps when any one changes", func() {
configMapName2 := utils.RandName("cm2")
defer func() { _ = utils.DeleteConfigMap(ctx, kubeClient, testNamespace, configMapName2) }()
adapter := registry.Get(utils.WorkloadDeployment)
Expect(adapter).NotTo(BeNil())
By("Creating two ConfigMaps")
_, err := utils.CreateConfigMap(ctx, kubeClient, testNamespace, configMapName,
map[string]string{"key1": "value1"}, nil)
Expect(err).NotTo(HaveOccurred())
_, err = utils.CreateConfigMap(ctx, kubeClient, testNamespace, configMapName2,
map[string]string{"key2": "value2"}, nil)
Expect(err).NotTo(HaveOccurred())
By("Creating a Deployment referencing both ConfigMaps")
err = adapter.Create(ctx, testNamespace, workloadName, utils.WorkloadConfig{
ConfigMapName: configMapName,
UseConfigMapEnvFrom: true,
Annotations: utils.BuildConfigMapReloadAnnotation(configMapName, configMapName2),
})
Expect(err).NotTo(HaveOccurred())
DeferCleanup(func() { _ = adapter.Delete(ctx, testNamespace, workloadName) })
By("Waiting for Deployment to be ready")
err = adapter.WaitReady(ctx, testNamespace, workloadName, utils.DeploymentReady)
Expect(err).NotTo(HaveOccurred())
By("Updating the second ConfigMap")
err = utils.UpdateConfigMap(ctx, kubeClient, testNamespace, configMapName2,
map[string]string{"key2": "updated-value2"})
Expect(err).NotTo(HaveOccurred())
By("Waiting for Deployment to be reloaded")
reloaded, err := adapter.WaitReloaded(ctx, testNamespace, workloadName,
utils.AnnotationLastReloadedFrom, utils.ReloadTimeout)
Expect(err).NotTo(HaveOccurred())
Expect(reloaded).To(BeTrue(), "Deployment should have been reloaded when second ConfigMap changed")
})
It("should reload deployment with multiple Secrets when any one changes", func() {
secretName2 := utils.RandName("secret2")
defer func() { _ = utils.DeleteSecret(ctx, kubeClient, testNamespace, secretName2) }()
adapter := registry.Get(utils.WorkloadDeployment)
Expect(adapter).NotTo(BeNil())
By("Creating two Secrets")
_, err := utils.CreateSecretFromStrings(ctx, kubeClient, testNamespace, secretName,
map[string]string{"key1": "value1"}, nil)
Expect(err).NotTo(HaveOccurred())
_, err = utils.CreateSecretFromStrings(ctx, kubeClient, testNamespace, secretName2,
map[string]string{"key2": "value2"}, nil)
Expect(err).NotTo(HaveOccurred())
By("Creating a Deployment referencing both Secrets")
err = adapter.Create(ctx, testNamespace, workloadName, utils.WorkloadConfig{
SecretName: secretName,
UseSecretEnvFrom: true,
Annotations: utils.BuildSecretReloadAnnotation(secretName, secretName2),
})
Expect(err).NotTo(HaveOccurred())
DeferCleanup(func() { _ = adapter.Delete(ctx, testNamespace, workloadName) })
By("Waiting for Deployment to be ready")
err = adapter.WaitReady(ctx, testNamespace, workloadName, utils.DeploymentReady)
Expect(err).NotTo(HaveOccurred())
By("Updating the second Secret")
err = utils.UpdateSecretFromStrings(ctx, kubeClient, testNamespace, secretName2,
map[string]string{"key2": "updated-value2"})
Expect(err).NotTo(HaveOccurred())
By("Waiting for Deployment to be reloaded")
reloaded, err := adapter.WaitReloaded(ctx, testNamespace, workloadName,
utils.AnnotationLastReloadedFrom, utils.ReloadTimeout)
Expect(err).NotTo(HaveOccurred())
Expect(reloaded).To(BeTrue(), "Deployment should have been reloaded when second Secret changed")
})
It("should reload deployment multiple times for sequential ConfigMap updates", func() {
adapter := registry.Get(utils.WorkloadDeployment)
Expect(adapter).NotTo(BeNil())
By("Creating a ConfigMap")
_, err := utils.CreateConfigMap(ctx, kubeClient, testNamespace, configMapName,
map[string]string{"key": "v1"}, nil)
Expect(err).NotTo(HaveOccurred())
By("Creating a Deployment with ConfigMap reference annotation")
err = adapter.Create(ctx, testNamespace, workloadName, utils.WorkloadConfig{
ConfigMapName: configMapName,
UseConfigMapEnvFrom: true,
Annotations: utils.BuildConfigMapReloadAnnotation(configMapName),
})
Expect(err).NotTo(HaveOccurred())
DeferCleanup(func() { _ = adapter.Delete(ctx, testNamespace, workloadName) })
By("Waiting for Deployment to be ready")
err = adapter.WaitReady(ctx, testNamespace, workloadName, utils.DeploymentReady)
Expect(err).NotTo(HaveOccurred())
By("First update to ConfigMap")
err = utils.UpdateConfigMap(ctx, kubeClient, testNamespace, configMapName,
map[string]string{"key": "v2"})
Expect(err).NotTo(HaveOccurred())
By("Waiting for first reload")
reloaded, err := adapter.WaitReloaded(ctx, testNamespace, workloadName,
utils.AnnotationLastReloadedFrom, utils.ReloadTimeout)
Expect(err).NotTo(HaveOccurred())
Expect(reloaded).To(BeTrue())
By("Getting first reload annotation value")
deploy, err := utils.GetDeployment(ctx, kubeClient, testNamespace, workloadName)
Expect(err).NotTo(HaveOccurred())
firstReloadValue := deploy.Spec.Template.Annotations[utils.AnnotationLastReloadedFrom]
By("Second update to ConfigMap")
err = utils.UpdateConfigMap(ctx, kubeClient, testNamespace, configMapName,
map[string]string{"key": "v3"})
Expect(err).NotTo(HaveOccurred())
By("Waiting for second reload with different annotation value")
Eventually(func() string {
deploy, err := utils.GetDeployment(ctx, kubeClient, testNamespace, workloadName)
if err != nil {
return ""
}
return deploy.Spec.Template.Annotations[utils.AnnotationLastReloadedFrom]
}, utils.ReloadTimeout, utils.DefaultInterval).ShouldNot(
Equal(firstReloadValue),
"Reload annotation should change after second update",
)
})
It("should reload deployment when either ConfigMap or Secret changes", func() {
adapter := registry.Get(utils.WorkloadDeployment)
Expect(adapter).NotTo(BeNil())
By("Creating a ConfigMap and Secret")
_, err := utils.CreateConfigMap(ctx, kubeClient, testNamespace, configMapName,
map[string]string{"config": "initial"}, nil)
Expect(err).NotTo(HaveOccurred())
_, err = utils.CreateSecretFromStrings(ctx, kubeClient, testNamespace, secretName,
map[string]string{"secret": "initial"}, nil)
Expect(err).NotTo(HaveOccurred())
By("Creating a Deployment referencing both")
err = adapter.Create(ctx, testNamespace, workloadName, utils.WorkloadConfig{
ConfigMapName: configMapName,
SecretName: secretName,
UseConfigMapEnvFrom: true,
UseSecretEnvFrom: true,
Annotations: utils.MergeAnnotations(
utils.BuildConfigMapReloadAnnotation(configMapName),
utils.BuildSecretReloadAnnotation(secretName),
),
})
Expect(err).NotTo(HaveOccurred())
DeferCleanup(func() { _ = adapter.Delete(ctx, testNamespace, workloadName) })
By("Waiting for Deployment to be ready")
err = adapter.WaitReady(ctx, testNamespace, workloadName, utils.DeploymentReady)
Expect(err).NotTo(HaveOccurred())
By("Updating the Secret")
err = utils.UpdateSecretFromStrings(ctx, kubeClient, testNamespace, secretName,
map[string]string{"secret": "updated"})
Expect(err).NotTo(HaveOccurred())
By("Waiting for Deployment to be reloaded")
reloaded, err := adapter.WaitReloaded(ctx, testNamespace, workloadName,
utils.AnnotationLastReloadedFrom, utils.ReloadTimeout)
Expect(err).NotTo(HaveOccurred())
Expect(reloaded).To(BeTrue(), "Deployment should have been reloaded when Secret changed")
})
It("should NOT reload deployment with auto=false annotation", func() {
adapter := registry.Get(utils.WorkloadDeployment)
Expect(adapter).NotTo(BeNil())
By("Creating a ConfigMap")
_, err := utils.CreateConfigMap(ctx, kubeClient, testNamespace, configMapName,
map[string]string{"key": "initial"}, nil)
Expect(err).NotTo(HaveOccurred())
By("Creating a Deployment with auto=false annotation")
err = adapter.Create(ctx, testNamespace, workloadName, utils.WorkloadConfig{
ConfigMapName: configMapName,
UseConfigMapEnvFrom: true,
Annotations: utils.BuildAutoFalseAnnotation(),
})
Expect(err).NotTo(HaveOccurred())
DeferCleanup(func() { _ = adapter.Delete(ctx, testNamespace, workloadName) })
By("Waiting for Deployment to be ready")
err = adapter.WaitReady(ctx, testNamespace, workloadName, utils.DeploymentReady)
Expect(err).NotTo(HaveOccurred())
By("Updating the ConfigMap data")
err = utils.UpdateConfigMap(ctx, kubeClient, testNamespace, configMapName,
map[string]string{"key": "updated"})
Expect(err).NotTo(HaveOccurred())
By("Verifying Deployment is NOT reloaded (auto=false)")
time.Sleep(utils.NegativeTestWait)
reloaded, err := adapter.WaitReloaded(ctx, testNamespace, workloadName,
utils.AnnotationLastReloadedFrom, utils.ShortTimeout)
Expect(err).NotTo(HaveOccurred())
Expect(reloaded).To(BeFalse(), "Deployment with auto=false should NOT have been reloaded")
})
})
})
// ============================================================
// ENVVARS STRATEGY TESTS
// ============================================================
Context("EnvVars Strategy", Label("envvars"), Ordered, func() {
// Redeploy Reloader with envvars strategy for this context
BeforeAll(func() {
By("Redeploying Reloader with envvars strategy")
deployValues := map[string]string{
"reloader.reloadStrategy": "env-vars",
}
// Preserve Argo support if available
if utils.IsArgoRolloutsInstalled(ctx, dynamicClient) {
deployValues["reloader.isArgoRollouts"] = "true"
}
err := testEnv.DeployAndWait(deployValues)
Expect(err).NotTo(HaveOccurred(), "Failed to redeploy Reloader with envvars strategy")
})
AfterAll(func() {
By("Restoring Reloader to annotations strategy")
deployValues := map[string]string{
"reloader.reloadStrategy": "annotations",
}
// Preserve Argo support if available
if utils.IsArgoRolloutsInstalled(ctx, dynamicClient) {
deployValues["reloader.isArgoRollouts"] = "true"
}
err := testEnv.DeployAndWait(deployValues)
Expect(err).NotTo(HaveOccurred(), "Failed to restore Reloader to annotations strategy")
})
// EnvVar workloads (CronJob does NOT support env var strategy)
envVarWorkloads := []utils.WorkloadType{
utils.WorkloadDeployment,
utils.WorkloadDaemonSet,
utils.WorkloadStatefulSet,
}
DescribeTable("should add STAKATER_ env var when ConfigMap changes",
func(workloadType utils.WorkloadType) {
adapter := registry.Get(workloadType)
if adapter == nil { Skip(fmt.Sprintf("%s adapter not available (CRD not installed)", workloadType)) }
if !adapter.SupportsEnvVarStrategy() {
Skip("Workload type does not support env var strategy")
}
By("Creating a ConfigMap")
_, err := utils.CreateConfigMap(ctx, kubeClient, testNamespace, configMapName,
map[string]string{"key": "initial"}, nil)
Expect(err).NotTo(HaveOccurred())
By("Creating workload with ConfigMap reference annotation")
err = adapter.Create(ctx, testNamespace, workloadName, utils.WorkloadConfig{
ConfigMapName: configMapName,
UseConfigMapEnvFrom: true,
Annotations: utils.BuildConfigMapReloadAnnotation(configMapName),
})
Expect(err).NotTo(HaveOccurred())
DeferCleanup(func() { _ = adapter.Delete(ctx, testNamespace, workloadName) })
By("Waiting for workload to be ready")
err = adapter.WaitReady(ctx, testNamespace, workloadName, utils.DeploymentReady)
Expect(err).NotTo(HaveOccurred())
By("Updating the ConfigMap data")
err = utils.UpdateConfigMap(ctx, kubeClient, testNamespace, configMapName,
map[string]string{"key": "updated"})
Expect(err).NotTo(HaveOccurred())
By("Waiting for workload to have STAKATER_ env var")
found, err := adapter.WaitEnvVar(ctx, testNamespace, workloadName,
utils.StakaterEnvVarPrefix, utils.ReloadTimeout)
Expect(err).NotTo(HaveOccurred())
Expect(found).To(BeTrue(), "%s should have STAKATER_ env var after ConfigMap change", workloadType)
},
Entry("Deployment", utils.WorkloadDeployment),
Entry("DaemonSet", utils.WorkloadDaemonSet),
Entry("StatefulSet", utils.WorkloadStatefulSet),
Entry("ArgoRollout", Label("argo"), utils.WorkloadArgoRollout),
Entry("DeploymentConfig", Label("openshift"), utils.WorkloadDeploymentConfig),
)
DescribeTable("should add STAKATER_ env var when Secret changes",
func(workloadType utils.WorkloadType) {
adapter := registry.Get(workloadType)
if adapter == nil { Skip(fmt.Sprintf("%s adapter not available (CRD not installed)", workloadType)) }
if !adapter.SupportsEnvVarStrategy() {
Skip("Workload type does not support env var strategy")
}
By("Creating a Secret")
_, err := utils.CreateSecretFromStrings(ctx, kubeClient, testNamespace, secretName,
map[string]string{"password": "initial"}, nil)
Expect(err).NotTo(HaveOccurred())
By("Creating workload with Secret reference annotation")
err = adapter.Create(ctx, testNamespace, workloadName, utils.WorkloadConfig{
SecretName: secretName,
UseSecretEnvFrom: true,
Annotations: utils.BuildSecretReloadAnnotation(secretName),
})
Expect(err).NotTo(HaveOccurred())
DeferCleanup(func() { _ = adapter.Delete(ctx, testNamespace, workloadName) })
By("Waiting for workload to be ready")
err = adapter.WaitReady(ctx, testNamespace, workloadName, utils.DeploymentReady)
Expect(err).NotTo(HaveOccurred())
By("Updating the Secret data")
err = utils.UpdateSecretFromStrings(ctx, kubeClient, testNamespace, secretName,
map[string]string{"password": "updated"})
Expect(err).NotTo(HaveOccurred())
By("Waiting for workload to have STAKATER_ env var")
found, err := adapter.WaitEnvVar(ctx, testNamespace, workloadName,
utils.StakaterEnvVarPrefix, utils.ReloadTimeout)
Expect(err).NotTo(HaveOccurred())
Expect(found).To(BeTrue(), "%s should have STAKATER_ env var after Secret change", workloadType)
},
Entry("Deployment", utils.WorkloadDeployment),
Entry("DaemonSet", utils.WorkloadDaemonSet),
Entry("StatefulSet", utils.WorkloadStatefulSet),
Entry("ArgoRollout", Label("argo"), utils.WorkloadArgoRollout),
Entry("DeploymentConfig", Label("openshift"), utils.WorkloadDeploymentConfig),
)
// Negative tests for env var strategy
DescribeTable("should NOT add STAKATER_ env var when only ConfigMap labels change",
func(workloadType utils.WorkloadType) {
adapter := registry.Get(workloadType)
if adapter == nil { Skip(fmt.Sprintf("%s adapter not available (CRD not installed)", workloadType)) }
if !adapter.SupportsEnvVarStrategy() {
Skip("Workload type does not support env var strategy")
}
By("Creating a ConfigMap")
_, err := utils.CreateConfigMap(ctx, kubeClient, testNamespace, configMapName,
map[string]string{"key": "value"}, nil)
Expect(err).NotTo(HaveOccurred())
By("Creating workload with ConfigMap reference annotation")
err = adapter.Create(ctx, testNamespace, workloadName, utils.WorkloadConfig{
ConfigMapName: configMapName,
UseConfigMapEnvFrom: true,
Annotations: utils.BuildConfigMapReloadAnnotation(configMapName),
})
Expect(err).NotTo(HaveOccurred())
DeferCleanup(func() { _ = adapter.Delete(ctx, testNamespace, workloadName) })
By("Waiting for workload to be ready")
err = adapter.WaitReady(ctx, testNamespace, workloadName, utils.DeploymentReady)
Expect(err).NotTo(HaveOccurred())
By("Updating only the ConfigMap labels")
err = utils.UpdateConfigMapLabels(ctx, kubeClient, testNamespace, configMapName,
map[string]string{"new-label": "new-value"})
Expect(err).NotTo(HaveOccurred())
By("Verifying workload does NOT have STAKATER_ env var")
time.Sleep(utils.NegativeTestWait)
found, err := adapter.WaitEnvVar(ctx, testNamespace, workloadName,
utils.StakaterEnvVarPrefix, utils.ShortTimeout)
Expect(err).NotTo(HaveOccurred())
Expect(found).To(BeFalse(), "%s should NOT have STAKATER_ env var for label-only change", workloadType)
},
Entry("Deployment", utils.WorkloadDeployment),
Entry("DaemonSet", utils.WorkloadDaemonSet),
Entry("StatefulSet", utils.WorkloadStatefulSet),
)
DescribeTable("should NOT add STAKATER_ env var when only Secret labels change",
func(workloadType utils.WorkloadType) {
adapter := registry.Get(workloadType)
if adapter == nil { Skip(fmt.Sprintf("%s adapter not available (CRD not installed)", workloadType)) }
if !adapter.SupportsEnvVarStrategy() {
Skip("Workload type does not support env var strategy")
}
By("Creating a Secret")
_, err := utils.CreateSecretFromStrings(ctx, kubeClient, testNamespace, secretName,
map[string]string{"password": "value"}, nil)
Expect(err).NotTo(HaveOccurred())
By("Creating workload with Secret reference annotation")
err = adapter.Create(ctx, testNamespace, workloadName, utils.WorkloadConfig{
SecretName: secretName,
UseSecretEnvFrom: true,
Annotations: utils.BuildSecretReloadAnnotation(secretName),
})
Expect(err).NotTo(HaveOccurred())
DeferCleanup(func() { _ = adapter.Delete(ctx, testNamespace, workloadName) })
By("Waiting for workload to be ready")
err = adapter.WaitReady(ctx, testNamespace, workloadName, utils.DeploymentReady)
Expect(err).NotTo(HaveOccurred())
By("Updating only the Secret labels")
err = utils.UpdateSecretLabels(ctx, kubeClient, testNamespace, secretName,
map[string]string{"new-label": "new-value"})
Expect(err).NotTo(HaveOccurred())
By("Verifying workload does NOT have STAKATER_ env var")
time.Sleep(utils.NegativeTestWait)
found, err := adapter.WaitEnvVar(ctx, testNamespace, workloadName,
utils.StakaterEnvVarPrefix, utils.ShortTimeout)
Expect(err).NotTo(HaveOccurred())
Expect(found).To(BeFalse(), "%s should NOT have STAKATER_ env var for label-only change", workloadType)
},
Entry("Deployment", utils.WorkloadDeployment),
Entry("DaemonSet", utils.WorkloadDaemonSet),
Entry("StatefulSet", utils.WorkloadStatefulSet),
)
// Variable to track for use in lint
_ = envVarWorkloads
})
})

View File

@@ -0,0 +1,84 @@
package e2e
import (
"context"
"fmt"
"os"
"os/exec"
"testing"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
"github.com/stakater/Reloader/test/e2e/utils"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/clientcmd"
)
var (
kubeClient kubernetes.Interface
projectDir string
testImage string
ctx context.Context
cancel context.CancelFunc
)
func TestE2E(t *testing.T) {
RegisterFailHandler(Fail)
RunSpecs(t, "Reloader E2E Suite")
}
var _ = BeforeSuite(func() {
var err error
ctx, cancel = context.WithCancel(context.Background())
// Get project directory
projectDir, err = utils.GetProjectDir()
Expect(err).NotTo(HaveOccurred(), "Failed to get project directory")
// Get test image from environment or use default
testImage = utils.GetTestImage()
GinkgoWriter.Printf("Using test image: %s\n", testImage)
GinkgoWriter.Printf("Project directory: %s\n", projectDir)
// Build image if SKIP_BUILD is not set
if os.Getenv("SKIP_BUILD") != "true" {
GinkgoWriter.Println("Building Docker image...")
cmd := exec.Command("make", "docker-build", fmt.Sprintf("IMG=%s", testImage))
output, err := utils.Run(cmd)
Expect(err).NotTo(HaveOccurred(), "Failed to build Docker image: %s", output)
GinkgoWriter.Println("Docker image built successfully")
} else {
GinkgoWriter.Println("Skipping Docker build (SKIP_BUILD=true)")
}
// Load image to Kind cluster
GinkgoWriter.Println("Loading image to Kind cluster...")
err = utils.LoadImageToKindCluster(testImage)
Expect(err).NotTo(HaveOccurred(), "Failed to load image to Kind cluster")
GinkgoWriter.Println("Image loaded to Kind cluster successfully")
// Setup Kubernetes client
kubeconfig := utils.GetKubeconfig()
GinkgoWriter.Printf("Using kubeconfig: %s\n", kubeconfig)
config, err := clientcmd.BuildConfigFromFlags("", kubeconfig)
Expect(err).NotTo(HaveOccurred(), "Failed to build config from kubeconfig")
kubeClient, err = kubernetes.NewForConfig(config)
Expect(err).NotTo(HaveOccurred(), "Failed to create Kubernetes client")
// Verify cluster connectivity
GinkgoWriter.Println("Verifying cluster connectivity...")
_, err = kubeClient.CoreV1().Namespaces().List(ctx, metav1.ListOptions{Limit: 1})
Expect(err).NotTo(HaveOccurred(), "Failed to connect to Kubernetes cluster")
GinkgoWriter.Println("Cluster connectivity verified")
})
var _ = AfterSuite(func() {
if cancel != nil {
cancel()
}
GinkgoWriter.Println("E2E Suite cleanup complete")
})

View File

@@ -0,0 +1,106 @@
package flags
import (
"time"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
"github.com/stakater/Reloader/test/e2e/utils"
)
var _ = Describe("Auto Reload All Flag Tests", func() {
var (
deploymentName string
configMapName string
autoNamespace string
)
BeforeEach(func() {
deploymentName = utils.RandName("deploy")
configMapName = utils.RandName("cm")
autoNamespace = "auto-" + utils.RandName("ns")
})
AfterEach(func() {
_ = utils.DeleteDeployment(ctx, kubeClient, autoNamespace, deploymentName)
_ = utils.DeleteConfigMap(ctx, kubeClient, autoNamespace, configMapName)
})
Context("with autoReloadAll=true flag", func() {
BeforeEach(func() {
err := utils.CreateNamespace(ctx, kubeClient, autoNamespace)
Expect(err).NotTo(HaveOccurred())
err = deployReloaderWithFlags(map[string]string{
"reloader.autoReloadAll": "true",
})
Expect(err).NotTo(HaveOccurred())
err = waitForReloaderReady()
Expect(err).NotTo(HaveOccurred())
})
AfterEach(func() {
_ = undeployReloader()
_ = utils.DeleteNamespace(ctx, kubeClient, autoNamespace)
})
It("should reload workloads without any annotations when autoReloadAll is true", func() {
By("Creating a ConfigMap")
_, err := utils.CreateConfigMap(ctx, kubeClient, autoNamespace, configMapName,
map[string]string{"key": "initial"}, nil)
Expect(err).NotTo(HaveOccurred())
By("Creating a Deployment WITHOUT any Reloader annotations")
_, err = utils.CreateDeployment(ctx, kubeClient, autoNamespace, deploymentName,
utils.WithConfigMapEnvFrom(configMapName),
)
Expect(err).NotTo(HaveOccurred())
By("Waiting for Deployment to be ready")
err = utils.WaitForDeploymentReady(ctx, kubeClient, autoNamespace, deploymentName, utils.DeploymentReady)
Expect(err).NotTo(HaveOccurred())
By("Updating the ConfigMap")
err = utils.UpdateConfigMap(ctx, kubeClient, autoNamespace, configMapName,
map[string]string{"key": "updated"})
Expect(err).NotTo(HaveOccurred())
By("Waiting for Deployment to be reloaded (autoReloadAll=true)")
reloaded, err := utils.WaitForDeploymentReloaded(ctx, kubeClient, autoNamespace, deploymentName,
utils.AnnotationLastReloadedFrom, utils.ReloadTimeout)
Expect(err).NotTo(HaveOccurred())
Expect(reloaded).To(BeTrue(), "Deployment without annotations should reload when autoReloadAll=true")
})
It("should respect auto=false annotation even when autoReloadAll is true", func() {
By("Creating a ConfigMap")
_, err := utils.CreateConfigMap(ctx, kubeClient, autoNamespace, configMapName,
map[string]string{"key": "initial"}, nil)
Expect(err).NotTo(HaveOccurred())
By("Creating a Deployment with auto=false annotation")
_, err = utils.CreateDeployment(ctx, kubeClient, autoNamespace, deploymentName,
utils.WithConfigMapEnvFrom(configMapName),
utils.WithAnnotations(utils.BuildAutoFalseAnnotation()),
)
Expect(err).NotTo(HaveOccurred())
By("Waiting for Deployment to be ready")
err = utils.WaitForDeploymentReady(ctx, kubeClient, autoNamespace, deploymentName, utils.DeploymentReady)
Expect(err).NotTo(HaveOccurred())
By("Updating the ConfigMap")
err = utils.UpdateConfigMap(ctx, kubeClient, autoNamespace, configMapName,
map[string]string{"key": "updated"})
Expect(err).NotTo(HaveOccurred())
By("Verifying Deployment was NOT reloaded (auto=false overrides autoReloadAll)")
time.Sleep(utils.NegativeTestWait)
reloaded, err := utils.WaitForDeploymentReloaded(ctx, kubeClient, autoNamespace, deploymentName,
utils.AnnotationLastReloadedFrom, utils.ShortTimeout)
Expect(err).NotTo(HaveOccurred())
Expect(reloaded).To(BeFalse(), "Deployment with auto=false should NOT reload even with autoReloadAll=true")
})
})
})

View File

@@ -0,0 +1,71 @@
package flags
import (
"context"
"testing"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
"github.com/stakater/Reloader/test/e2e/utils"
"k8s.io/client-go/kubernetes"
)
var (
kubeClient kubernetes.Interface
testNamespace string
ctx context.Context
testEnv *utils.TestEnvironment
)
func TestFlags(t *testing.T) {
RegisterFailHandler(Fail)
RunSpecs(t, "Flag-Based E2E Suite")
}
var _ = BeforeSuite(func() {
var err error
ctx = context.Background()
// Setup test environment (but don't deploy Reloader - tests do that with specific flags)
testEnv, err = utils.SetupTestEnvironment(ctx, "reloader-flags")
Expect(err).NotTo(HaveOccurred(), "Failed to setup test environment")
// Export for use in tests
kubeClient = testEnv.KubeClient
testNamespace = testEnv.Namespace
// Note: Unlike other suites, we don't deploy Reloader here.
// Each test deploys with specific flag configurations.
})
var _ = AfterSuite(func() {
if testEnv != nil {
err := testEnv.Cleanup()
Expect(err).NotTo(HaveOccurred(), "Failed to cleanup test environment")
}
GinkgoWriter.Println("Flags E2E Suite cleanup complete")
})
// deployReloaderWithFlags deploys Reloader with the specified Helm value overrides.
// This is a convenience function for tests that need to deploy with specific flags.
func deployReloaderWithFlags(values map[string]string) error {
// Always include annotations strategy
if values == nil {
values = make(map[string]string)
}
if _, ok := values["reloader.reloadStrategy"]; !ok {
values["reloader.reloadStrategy"] = "annotations"
}
return testEnv.DeployAndWait(values)
}
// undeployReloader removes the Reloader installation.
func undeployReloader() error {
return utils.UndeployReloader(testNamespace, testEnv.ReleaseName)
}
// waitForReloaderReady waits for the Reloader deployment to be ready.
func waitForReloaderReady() error {
return testEnv.WaitForReloader()
}

View File

@@ -0,0 +1,193 @@
package flags
import (
"time"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
"github.com/stakater/Reloader/test/e2e/utils"
)
var _ = Describe("Ignore Resources Flag Tests", func() {
var (
deploymentName string
configMapName string
secretName string
ignoreNS string
)
BeforeEach(func() {
deploymentName = utils.RandName("deploy")
configMapName = utils.RandName("cm")
secretName = utils.RandName("secret")
ignoreNS = "ignore-" + utils.RandName("ns")
})
AfterEach(func() {
_ = utils.DeleteDeployment(ctx, kubeClient, ignoreNS, deploymentName)
_ = utils.DeleteConfigMap(ctx, kubeClient, ignoreNS, configMapName)
_ = utils.DeleteSecret(ctx, kubeClient, ignoreNS, secretName)
})
Context("with ignoreSecrets=true flag", func() {
BeforeEach(func() {
// Create test namespace
err := utils.CreateNamespace(ctx, kubeClient, ignoreNS)
Expect(err).NotTo(HaveOccurred())
// Deploy Reloader with ignoreSecrets flag
err = deployReloaderWithFlags(map[string]string{
"reloader.ignoreSecrets": "true",
})
Expect(err).NotTo(HaveOccurred())
err = waitForReloaderReady()
Expect(err).NotTo(HaveOccurred())
})
AfterEach(func() {
_ = undeployReloader()
_ = utils.DeleteNamespace(ctx, kubeClient, ignoreNS)
})
It("should NOT reload when Secret changes with ignoreSecrets=true", func() {
By("Creating a Secret")
_, err := utils.CreateSecretFromStrings(ctx, kubeClient, ignoreNS, secretName,
map[string]string{"password": "initial"}, nil)
Expect(err).NotTo(HaveOccurred())
By("Creating a Deployment with auto annotation referencing the Secret")
_, err = utils.CreateDeployment(ctx, kubeClient, ignoreNS, deploymentName,
utils.WithSecretEnvFrom(secretName),
utils.WithAnnotations(utils.BuildAutoTrueAnnotation()),
)
Expect(err).NotTo(HaveOccurred())
By("Waiting for Deployment to be ready")
err = utils.WaitForDeploymentReady(ctx, kubeClient, ignoreNS, deploymentName, utils.DeploymentReady)
Expect(err).NotTo(HaveOccurred())
By("Updating the Secret")
err = utils.UpdateSecretFromStrings(ctx, kubeClient, ignoreNS, secretName,
map[string]string{"password": "updated"})
Expect(err).NotTo(HaveOccurred())
By("Verifying Deployment was NOT reloaded (ignoreSecrets=true)")
time.Sleep(utils.NegativeTestWait)
reloaded, err := utils.WaitForDeploymentReloaded(ctx, kubeClient, ignoreNS, deploymentName,
utils.AnnotationLastReloadedFrom, utils.ShortTimeout)
Expect(err).NotTo(HaveOccurred())
Expect(reloaded).To(BeFalse(), "Deployment should NOT reload when ignoreSecrets=true")
})
It("should still reload when ConfigMap changes with ignoreSecrets=true", func() {
By("Creating a ConfigMap")
_, err := utils.CreateConfigMap(ctx, kubeClient, ignoreNS, configMapName,
map[string]string{"key": "initial"}, nil)
Expect(err).NotTo(HaveOccurred())
By("Creating a Deployment with auto annotation referencing the ConfigMap")
_, err = utils.CreateDeployment(ctx, kubeClient, ignoreNS, deploymentName,
utils.WithConfigMapEnvFrom(configMapName),
utils.WithAnnotations(utils.BuildAutoTrueAnnotation()),
)
Expect(err).NotTo(HaveOccurred())
By("Waiting for Deployment to be ready")
err = utils.WaitForDeploymentReady(ctx, kubeClient, ignoreNS, deploymentName, utils.DeploymentReady)
Expect(err).NotTo(HaveOccurred())
By("Updating the ConfigMap")
err = utils.UpdateConfigMap(ctx, kubeClient, ignoreNS, configMapName,
map[string]string{"key": "updated"})
Expect(err).NotTo(HaveOccurred())
By("Waiting for Deployment to be reloaded (ConfigMap should still work)")
reloaded, err := utils.WaitForDeploymentReloaded(ctx, kubeClient, ignoreNS, deploymentName,
utils.AnnotationLastReloadedFrom, utils.ReloadTimeout)
Expect(err).NotTo(HaveOccurred())
Expect(reloaded).To(BeTrue(), "ConfigMap changes should still trigger reload with ignoreSecrets=true")
})
})
Context("with ignoreConfigMaps=true flag", func() {
BeforeEach(func() {
// Create test namespace
err := utils.CreateNamespace(ctx, kubeClient, ignoreNS)
Expect(err).NotTo(HaveOccurred())
// Deploy Reloader with ignoreConfigMaps flag
err = deployReloaderWithFlags(map[string]string{
"reloader.ignoreConfigMaps": "true",
})
Expect(err).NotTo(HaveOccurred())
err = waitForReloaderReady()
Expect(err).NotTo(HaveOccurred())
})
AfterEach(func() {
_ = undeployReloader()
_ = utils.DeleteNamespace(ctx, kubeClient, ignoreNS)
})
It("should NOT reload when ConfigMap changes with ignoreConfigMaps=true", func() {
By("Creating a ConfigMap")
_, err := utils.CreateConfigMap(ctx, kubeClient, ignoreNS, configMapName,
map[string]string{"key": "initial"}, nil)
Expect(err).NotTo(HaveOccurred())
By("Creating a Deployment with auto annotation referencing the ConfigMap")
_, err = utils.CreateDeployment(ctx, kubeClient, ignoreNS, deploymentName,
utils.WithConfigMapEnvFrom(configMapName),
utils.WithAnnotations(utils.BuildAutoTrueAnnotation()),
)
Expect(err).NotTo(HaveOccurred())
By("Waiting for Deployment to be ready")
err = utils.WaitForDeploymentReady(ctx, kubeClient, ignoreNS, deploymentName, utils.DeploymentReady)
Expect(err).NotTo(HaveOccurred())
By("Updating the ConfigMap")
err = utils.UpdateConfigMap(ctx, kubeClient, ignoreNS, configMapName,
map[string]string{"key": "updated"})
Expect(err).NotTo(HaveOccurred())
By("Verifying Deployment was NOT reloaded (ignoreConfigMaps=true)")
time.Sleep(utils.NegativeTestWait)
reloaded, err := utils.WaitForDeploymentReloaded(ctx, kubeClient, ignoreNS, deploymentName,
utils.AnnotationLastReloadedFrom, utils.ShortTimeout)
Expect(err).NotTo(HaveOccurred())
Expect(reloaded).To(BeFalse(), "Deployment should NOT reload when ignoreConfigMaps=true")
})
It("should still reload when Secret changes with ignoreConfigMaps=true", func() {
By("Creating a Secret")
_, err := utils.CreateSecretFromStrings(ctx, kubeClient, ignoreNS, secretName,
map[string]string{"password": "initial"}, nil)
Expect(err).NotTo(HaveOccurred())
By("Creating a Deployment with auto annotation referencing the Secret")
_, err = utils.CreateDeployment(ctx, kubeClient, ignoreNS, deploymentName,
utils.WithSecretEnvFrom(secretName),
utils.WithAnnotations(utils.BuildAutoTrueAnnotation()),
)
Expect(err).NotTo(HaveOccurred())
By("Waiting for Deployment to be ready")
err = utils.WaitForDeploymentReady(ctx, kubeClient, ignoreNS, deploymentName, utils.DeploymentReady)
Expect(err).NotTo(HaveOccurred())
By("Updating the Secret")
err = utils.UpdateSecretFromStrings(ctx, kubeClient, ignoreNS, secretName,
map[string]string{"password": "updated"})
Expect(err).NotTo(HaveOccurred())
By("Waiting for Deployment to be reloaded (Secret should still work)")
reloaded, err := utils.WaitForDeploymentReloaded(ctx, kubeClient, ignoreNS, deploymentName,
utils.AnnotationLastReloadedFrom, utils.ReloadTimeout)
Expect(err).NotTo(HaveOccurred())
Expect(reloaded).To(BeTrue(), "Secret changes should still trigger reload with ignoreConfigMaps=true")
})
})
})

View File

@@ -0,0 +1,159 @@
package flags
import (
"time"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
"github.com/stakater/Reloader/test/e2e/utils"
)
var _ = Describe("Ignored Workloads Flag Tests", func() {
var (
cronJobName string
configMapName string
ignoreNS string
)
BeforeEach(func() {
cronJobName = utils.RandName("cj")
configMapName = utils.RandName("cm")
ignoreNS = "ignore-wl-" + utils.RandName("ns")
})
AfterEach(func() {
_ = utils.DeleteCronJob(ctx, kubeClient, ignoreNS, cronJobName)
_ = utils.DeleteConfigMap(ctx, kubeClient, ignoreNS, configMapName)
})
Context("with ignoreCronJobs=true flag", func() {
BeforeEach(func() {
// Create test namespace
err := utils.CreateNamespace(ctx, kubeClient, ignoreNS)
Expect(err).NotTo(HaveOccurred())
// Deploy Reloader with ignoreCronJobs flag
err = deployReloaderWithFlags(map[string]string{
"reloader.ignoreCronJobs": "true",
})
Expect(err).NotTo(HaveOccurred())
err = waitForReloaderReady()
Expect(err).NotTo(HaveOccurred())
})
AfterEach(func() {
_ = undeployReloader()
_ = utils.DeleteNamespace(ctx, kubeClient, ignoreNS)
})
It("should NOT reload CronJobs when ignoreCronJobs=true", func() {
By("Creating a ConfigMap")
_, err := utils.CreateConfigMap(ctx, kubeClient, ignoreNS, configMapName,
map[string]string{"key": "initial"}, nil)
Expect(err).NotTo(HaveOccurred())
By("Creating a CronJob with auto annotation referencing the ConfigMap")
_, err = utils.CreateCronJob(ctx, kubeClient, ignoreNS, cronJobName,
utils.WithCronJobConfigMapEnvFrom(configMapName),
utils.WithCronJobAnnotations(utils.BuildAutoTrueAnnotation()),
)
Expect(err).NotTo(HaveOccurred())
By("Updating the ConfigMap")
err = utils.UpdateConfigMap(ctx, kubeClient, ignoreNS, configMapName,
map[string]string{"key": "updated"})
Expect(err).NotTo(HaveOccurred())
By("Verifying CronJob was NOT reloaded (ignoreCronJobs=true)")
time.Sleep(utils.NegativeTestWait)
reloaded, err := utils.WaitForCronJobReloaded(ctx, kubeClient, ignoreNS, cronJobName,
utils.AnnotationLastReloadedFrom, utils.ShortTimeout)
Expect(err).NotTo(HaveOccurred())
Expect(reloaded).To(BeFalse(), "CronJob should NOT reload when ignoreCronJobs=true")
})
It("should still reload Deployments when ignoreCronJobs=true", func() {
deploymentName := utils.RandName("deploy")
By("Creating a ConfigMap")
_, err := utils.CreateConfigMap(ctx, kubeClient, ignoreNS, configMapName,
map[string]string{"key": "initial"}, nil)
Expect(err).NotTo(HaveOccurred())
By("Creating a Deployment with auto annotation referencing the ConfigMap")
_, err = utils.CreateDeployment(ctx, kubeClient, ignoreNS, deploymentName,
utils.WithConfigMapEnvFrom(configMapName),
utils.WithAnnotations(utils.BuildAutoTrueAnnotation()),
)
Expect(err).NotTo(HaveOccurred())
defer func() {
_ = utils.DeleteDeployment(ctx, kubeClient, ignoreNS, deploymentName)
}()
By("Waiting for Deployment to be ready")
err = utils.WaitForDeploymentReady(ctx, kubeClient, ignoreNS, deploymentName, utils.DeploymentReady)
Expect(err).NotTo(HaveOccurred())
By("Updating the ConfigMap")
err = utils.UpdateConfigMap(ctx, kubeClient, ignoreNS, configMapName,
map[string]string{"key": "updated-deploy"})
Expect(err).NotTo(HaveOccurred())
By("Waiting for Deployment to be reloaded (Deployment should still work)")
reloaded, err := utils.WaitForDeploymentReloaded(ctx, kubeClient, ignoreNS, deploymentName,
utils.AnnotationLastReloadedFrom, utils.ReloadTimeout)
Expect(err).NotTo(HaveOccurred())
Expect(reloaded).To(BeTrue(), "Deployment should still reload with ignoreCronJobs=true")
})
})
Context("with both ignoreCronJobs=true and ignoreJobs=true flags", func() {
BeforeEach(func() {
// Create test namespace
err := utils.CreateNamespace(ctx, kubeClient, ignoreNS)
Expect(err).NotTo(HaveOccurred())
// Deploy Reloader with both ignore flags
err = deployReloaderWithFlags(map[string]string{
"reloader.ignoreCronJobs": "true",
"reloader.ignoreJobs": "true",
})
Expect(err).NotTo(HaveOccurred())
err = waitForReloaderReady()
Expect(err).NotTo(HaveOccurred())
})
AfterEach(func() {
_ = undeployReloader()
_ = utils.DeleteNamespace(ctx, kubeClient, ignoreNS)
})
It("should NOT reload CronJobs when both job flags are true", func() {
By("Creating a ConfigMap")
_, err := utils.CreateConfigMap(ctx, kubeClient, ignoreNS, configMapName,
map[string]string{"key": "initial"}, nil)
Expect(err).NotTo(HaveOccurred())
By("Creating a CronJob with auto annotation")
_, err = utils.CreateCronJob(ctx, kubeClient, ignoreNS, cronJobName,
utils.WithCronJobConfigMapEnvFrom(configMapName),
utils.WithCronJobAnnotations(utils.BuildAutoTrueAnnotation()),
)
Expect(err).NotTo(HaveOccurred())
By("Updating the ConfigMap")
err = utils.UpdateConfigMap(ctx, kubeClient, ignoreNS, configMapName,
map[string]string{"key": "updated"})
Expect(err).NotTo(HaveOccurred())
By("Verifying CronJob was NOT reloaded")
time.Sleep(utils.NegativeTestWait)
reloaded, err := utils.WaitForCronJobReloaded(ctx, kubeClient, ignoreNS, cronJobName,
utils.AnnotationLastReloadedFrom, utils.ShortTimeout)
Expect(err).NotTo(HaveOccurred())
Expect(reloaded).To(BeFalse(), "CronJob should NOT reload when ignoreCronJobs=true and ignoreJobs=true")
})
})
})

View File

@@ -0,0 +1,114 @@
package flags
import (
"time"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
"github.com/stakater/Reloader/test/e2e/utils"
)
var _ = Describe("Namespace Ignore Flag Tests", func() {
var (
deploymentName string
configMapName string
ignoredNamespace string
watchedNamespace string
)
BeforeEach(func() {
deploymentName = utils.RandName("deploy")
configMapName = utils.RandName("cm")
ignoredNamespace = "ignored-" + utils.RandName("ns")
watchedNamespace = "watched-" + utils.RandName("ns")
})
AfterEach(func() {
_ = utils.DeleteDeployment(ctx, kubeClient, ignoredNamespace, deploymentName)
_ = utils.DeleteDeployment(ctx, kubeClient, watchedNamespace, deploymentName)
_ = utils.DeleteConfigMap(ctx, kubeClient, ignoredNamespace, configMapName)
_ = utils.DeleteConfigMap(ctx, kubeClient, watchedNamespace, configMapName)
})
Context("with ignoreNamespaces flag", func() {
BeforeEach(func() {
err := utils.CreateNamespace(ctx, kubeClient, ignoredNamespace)
Expect(err).NotTo(HaveOccurred())
err = utils.CreateNamespace(ctx, kubeClient, watchedNamespace)
Expect(err).NotTo(HaveOccurred())
err = deployReloaderWithFlags(map[string]string{
"reloader.ignoreNamespaces": ignoredNamespace,
})
Expect(err).NotTo(HaveOccurred())
err = waitForReloaderReady()
Expect(err).NotTo(HaveOccurred())
})
AfterEach(func() {
_ = undeployReloader()
_ = utils.DeleteNamespace(ctx, kubeClient, ignoredNamespace)
_ = utils.DeleteNamespace(ctx, kubeClient, watchedNamespace)
})
It("should NOT reload in ignored namespace", func() {
By("Creating a ConfigMap in the ignored namespace")
_, err := utils.CreateConfigMap(ctx, kubeClient, ignoredNamespace, configMapName,
map[string]string{"key": "initial"}, nil)
Expect(err).NotTo(HaveOccurred())
By("Creating a Deployment in the ignored namespace")
_, err = utils.CreateDeployment(ctx, kubeClient, ignoredNamespace, deploymentName,
utils.WithConfigMapEnvFrom(configMapName),
utils.WithAnnotations(utils.BuildConfigMapReloadAnnotation(configMapName)),
)
Expect(err).NotTo(HaveOccurred())
By("Waiting for Deployment to be ready")
err = utils.WaitForDeploymentReady(ctx, kubeClient, ignoredNamespace, deploymentName, utils.DeploymentReady)
Expect(err).NotTo(HaveOccurred())
By("Updating the ConfigMap")
err = utils.UpdateConfigMap(ctx, kubeClient, ignoredNamespace, configMapName,
map[string]string{"key": "updated"})
Expect(err).NotTo(HaveOccurred())
By("Verifying Deployment was NOT reloaded (ignored namespace)")
time.Sleep(utils.NegativeTestWait)
reloaded, err := utils.WaitForDeploymentReloaded(ctx, kubeClient, ignoredNamespace, deploymentName,
utils.AnnotationLastReloadedFrom, utils.ShortTimeout)
Expect(err).NotTo(HaveOccurred())
Expect(reloaded).To(BeFalse(), "Deployment in ignored namespace should NOT be reloaded")
})
It("should reload in watched (non-ignored) namespace", func() {
By("Creating a ConfigMap in the watched namespace")
_, err := utils.CreateConfigMap(ctx, kubeClient, watchedNamespace, configMapName,
map[string]string{"key": "initial"}, nil)
Expect(err).NotTo(HaveOccurred())
By("Creating a Deployment in the watched namespace")
_, err = utils.CreateDeployment(ctx, kubeClient, watchedNamespace, deploymentName,
utils.WithConfigMapEnvFrom(configMapName),
utils.WithAnnotations(utils.BuildConfigMapReloadAnnotation(configMapName)),
)
Expect(err).NotTo(HaveOccurred())
By("Waiting for Deployment to be ready")
err = utils.WaitForDeploymentReady(ctx, kubeClient, watchedNamespace, deploymentName, utils.DeploymentReady)
Expect(err).NotTo(HaveOccurred())
By("Updating the ConfigMap")
err = utils.UpdateConfigMap(ctx, kubeClient, watchedNamespace, configMapName,
map[string]string{"key": "updated"})
Expect(err).NotTo(HaveOccurred())
By("Waiting for Deployment to be reloaded")
reloaded, err := utils.WaitForDeploymentReloaded(ctx, kubeClient, watchedNamespace, deploymentName,
utils.AnnotationLastReloadedFrom, utils.ReloadTimeout)
Expect(err).NotTo(HaveOccurred())
Expect(reloaded).To(BeTrue(), "Deployment in non-ignored namespace should be reloaded")
})
})
})

View File

@@ -0,0 +1,116 @@
package flags
import (
"time"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
"github.com/stakater/Reloader/test/e2e/utils"
)
var _ = Describe("Namespace Selector Flag Tests", func() {
var (
deploymentName string
configMapName string
matchingNS string
nonMatchingNS string
)
BeforeEach(func() {
deploymentName = utils.RandName("deploy")
configMapName = utils.RandName("cm")
matchingNS = "match-" + utils.RandName("ns")
nonMatchingNS = "nomatch-" + utils.RandName("ns")
})
AfterEach(func() {
_ = utils.DeleteDeployment(ctx, kubeClient, matchingNS, deploymentName)
_ = utils.DeleteDeployment(ctx, kubeClient, nonMatchingNS, deploymentName)
_ = utils.DeleteConfigMap(ctx, kubeClient, matchingNS, configMapName)
_ = utils.DeleteConfigMap(ctx, kubeClient, nonMatchingNS, configMapName)
})
Context("with namespaceSelector flag", func() {
BeforeEach(func() {
err := utils.CreateNamespaceWithLabels(ctx, kubeClient, matchingNS,
map[string]string{"env": "test"})
Expect(err).NotTo(HaveOccurred())
err = utils.CreateNamespace(ctx, kubeClient, nonMatchingNS)
Expect(err).NotTo(HaveOccurred())
err = deployReloaderWithFlags(map[string]string{
"reloader.namespaceSelector": "env=test",
})
Expect(err).NotTo(HaveOccurred())
err = waitForReloaderReady()
Expect(err).NotTo(HaveOccurred())
})
AfterEach(func() {
_ = undeployReloader()
_ = utils.DeleteNamespace(ctx, kubeClient, matchingNS)
_ = utils.DeleteNamespace(ctx, kubeClient, nonMatchingNS)
})
It("should reload workloads in matching namespaces", func() {
By("Creating a ConfigMap in matching namespace")
_, err := utils.CreateConfigMap(ctx, kubeClient, matchingNS, configMapName,
map[string]string{"key": "initial"}, nil)
Expect(err).NotTo(HaveOccurred())
By("Creating a Deployment in matching namespace with auto annotation")
_, err = utils.CreateDeployment(ctx, kubeClient, matchingNS, deploymentName,
utils.WithConfigMapEnvFrom(configMapName),
utils.WithAnnotations(utils.BuildAutoTrueAnnotation()),
)
Expect(err).NotTo(HaveOccurred())
By("Waiting for Deployment to be ready")
err = utils.WaitForDeploymentReady(ctx, kubeClient, matchingNS, deploymentName, utils.DeploymentReady)
Expect(err).NotTo(HaveOccurred())
By("Updating the ConfigMap")
err = utils.UpdateConfigMap(ctx, kubeClient, matchingNS, configMapName,
map[string]string{"key": "updated"})
Expect(err).NotTo(HaveOccurred())
By("Waiting for Deployment to be reloaded")
reloaded, err := utils.WaitForDeploymentReloaded(ctx, kubeClient, matchingNS, deploymentName,
utils.AnnotationLastReloadedFrom, utils.ReloadTimeout)
Expect(err).NotTo(HaveOccurred())
Expect(reloaded).To(BeTrue(), "Deployment in matching namespace should be reloaded")
})
It("should NOT reload workloads in non-matching namespaces", func() {
By("Creating a ConfigMap in non-matching namespace")
_, err := utils.CreateConfigMap(ctx, kubeClient, nonMatchingNS, configMapName,
map[string]string{"key": "initial"}, nil)
Expect(err).NotTo(HaveOccurred())
By("Creating a Deployment in non-matching namespace with auto annotation")
_, err = utils.CreateDeployment(ctx, kubeClient, nonMatchingNS, deploymentName,
utils.WithConfigMapEnvFrom(configMapName),
utils.WithAnnotations(utils.BuildAutoTrueAnnotation()),
)
Expect(err).NotTo(HaveOccurred())
By("Waiting for Deployment to be ready")
err = utils.WaitForDeploymentReady(ctx, kubeClient, nonMatchingNS, deploymentName, utils.DeploymentReady)
Expect(err).NotTo(HaveOccurred())
By("Updating the ConfigMap")
err = utils.UpdateConfigMap(ctx, kubeClient, nonMatchingNS, configMapName,
map[string]string{"key": "updated"})
Expect(err).NotTo(HaveOccurred())
By("Verifying Deployment was NOT reloaded (non-matching namespace)")
time.Sleep(utils.NegativeTestWait)
reloaded, err := utils.WaitForDeploymentReloaded(ctx, kubeClient, nonMatchingNS, deploymentName,
utils.AnnotationLastReloadedFrom, utils.ShortTimeout)
Expect(err).NotTo(HaveOccurred())
Expect(reloaded).To(BeFalse(), "Deployment in non-matching namespace should NOT be reloaded")
})
})
})

View File

@@ -0,0 +1,143 @@
package flags
import (
"time"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
"github.com/stakater/Reloader/test/e2e/utils"
)
var _ = Describe("Reload On Create Flag Tests", func() {
var (
deploymentName string
configMapName string
createNamespace string
)
BeforeEach(func() {
deploymentName = utils.RandName("deploy")
configMapName = utils.RandName("cm")
createNamespace = "create-" + utils.RandName("ns")
})
AfterEach(func() {
_ = utils.DeleteDeployment(ctx, kubeClient, createNamespace, deploymentName)
_ = utils.DeleteConfigMap(ctx, kubeClient, createNamespace, configMapName)
})
Context("with reloadOnCreate=true flag", func() {
BeforeEach(func() {
// Create test namespace
err := utils.CreateNamespace(ctx, kubeClient, createNamespace)
Expect(err).NotTo(HaveOccurred())
// Deploy Reloader with reloadOnCreate flag
err = deployReloaderWithFlags(map[string]string{
"reloader.reloadOnCreate": "true",
})
Expect(err).NotTo(HaveOccurred())
err = waitForReloaderReady()
Expect(err).NotTo(HaveOccurred())
})
AfterEach(func() {
_ = undeployReloader()
_ = utils.DeleteNamespace(ctx, kubeClient, createNamespace)
})
It("should reload when a new ConfigMap is created", func() {
By("Creating a Deployment with annotation for a ConfigMap that doesn't exist yet")
_, err := utils.CreateDeployment(ctx, kubeClient, createNamespace, deploymentName,
utils.WithAnnotations(utils.BuildConfigMapReloadAnnotation(configMapName)),
)
Expect(err).NotTo(HaveOccurred())
By("Waiting for Deployment to be ready")
err = utils.WaitForDeploymentReady(ctx, kubeClient, createNamespace, deploymentName, utils.DeploymentReady)
Expect(err).NotTo(HaveOccurred())
By("Creating the ConfigMap that the Deployment references")
_, err = utils.CreateConfigMap(ctx, kubeClient, createNamespace, configMapName,
map[string]string{"key": "value"}, nil)
Expect(err).NotTo(HaveOccurred())
By("Waiting for Deployment to be reloaded (reloadOnCreate=true)")
reloaded, err := utils.WaitForDeploymentReloaded(ctx, kubeClient, createNamespace, deploymentName,
utils.AnnotationLastReloadedFrom, utils.ReloadTimeout)
Expect(err).NotTo(HaveOccurred())
Expect(reloaded).To(BeTrue(), "Deployment should reload when referenced ConfigMap is created")
})
It("should reload when a new Secret is created", func() {
secretName := utils.RandName("secret")
defer func() { _ = utils.DeleteSecret(ctx, kubeClient, createNamespace, secretName) }()
By("Creating a Deployment with annotation for a Secret that doesn't exist yet")
_, err := utils.CreateDeployment(ctx, kubeClient, createNamespace, deploymentName,
utils.WithAnnotations(utils.BuildSecretReloadAnnotation(secretName)),
)
Expect(err).NotTo(HaveOccurred())
By("Waiting for Deployment to be ready")
err = utils.WaitForDeploymentReady(ctx, kubeClient, createNamespace, deploymentName, utils.DeploymentReady)
Expect(err).NotTo(HaveOccurred())
By("Creating the Secret that the Deployment references")
_, err = utils.CreateSecretFromStrings(ctx, kubeClient, createNamespace, secretName,
map[string]string{"password": "secret"}, nil)
Expect(err).NotTo(HaveOccurred())
By("Waiting for Deployment to be reloaded (reloadOnCreate=true)")
reloaded, err := utils.WaitForDeploymentReloaded(ctx, kubeClient, createNamespace, deploymentName,
utils.AnnotationLastReloadedFrom, utils.ReloadTimeout)
Expect(err).NotTo(HaveOccurred())
Expect(reloaded).To(BeTrue(), "Deployment should reload when referenced Secret is created")
})
})
Context("with reloadOnCreate=false (default)", func() {
BeforeEach(func() {
// Create test namespace
err := utils.CreateNamespace(ctx, kubeClient, createNamespace)
Expect(err).NotTo(HaveOccurred())
// Deploy Reloader without reloadOnCreate flag (default is false)
err = deployReloaderWithFlags(map[string]string{})
Expect(err).NotTo(HaveOccurred())
err = waitForReloaderReady()
Expect(err).NotTo(HaveOccurred())
})
AfterEach(func() {
_ = undeployReloader()
_ = utils.DeleteNamespace(ctx, kubeClient, createNamespace)
})
It("should NOT reload when a new ConfigMap is created (default behavior)", func() {
By("Creating a Deployment with annotation for a ConfigMap that doesn't exist yet")
_, err := utils.CreateDeployment(ctx, kubeClient, createNamespace, deploymentName,
utils.WithAnnotations(utils.BuildConfigMapReloadAnnotation(configMapName)),
)
Expect(err).NotTo(HaveOccurred())
By("Waiting for Deployment to be ready")
err = utils.WaitForDeploymentReady(ctx, kubeClient, createNamespace, deploymentName, utils.DeploymentReady)
Expect(err).NotTo(HaveOccurred())
By("Creating the ConfigMap that the Deployment references")
_, err = utils.CreateConfigMap(ctx, kubeClient, createNamespace, configMapName,
map[string]string{"key": "value"}, nil)
Expect(err).NotTo(HaveOccurred())
By("Verifying Deployment was NOT reloaded (reloadOnCreate=false)")
time.Sleep(utils.NegativeTestWait)
reloaded, err := utils.WaitForDeploymentReloaded(ctx, kubeClient, createNamespace, deploymentName,
utils.AnnotationLastReloadedFrom, utils.ShortTimeout)
Expect(err).NotTo(HaveOccurred())
Expect(reloaded).To(BeFalse(), "Deployment should NOT reload on create when reloadOnCreate=false")
})
})
})

View File

@@ -0,0 +1,154 @@
package flags
import (
"time"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
"github.com/stakater/Reloader/test/e2e/utils"
)
var _ = Describe("Reload On Delete Flag Tests", func() {
var (
deploymentName string
configMapName string
deleteNamespace string
)
BeforeEach(func() {
deploymentName = utils.RandName("deploy")
configMapName = utils.RandName("cm")
deleteNamespace = "delete-" + utils.RandName("ns")
})
AfterEach(func() {
_ = utils.DeleteDeployment(ctx, kubeClient, deleteNamespace, deploymentName)
_ = utils.DeleteConfigMap(ctx, kubeClient, deleteNamespace, configMapName)
})
Context("with reloadOnDelete=true flag", func() {
BeforeEach(func() {
// Create test namespace
err := utils.CreateNamespace(ctx, kubeClient, deleteNamespace)
Expect(err).NotTo(HaveOccurred())
// Deploy Reloader with reloadOnDelete flag
err = deployReloaderWithFlags(map[string]string{
"reloader.reloadOnDelete": "true",
})
Expect(err).NotTo(HaveOccurred())
err = waitForReloaderReady()
Expect(err).NotTo(HaveOccurred())
})
AfterEach(func() {
_ = undeployReloader()
_ = utils.DeleteNamespace(ctx, kubeClient, deleteNamespace)
})
It("should reload when a referenced ConfigMap is deleted", func() {
By("Creating a ConfigMap")
_, err := utils.CreateConfigMap(ctx, kubeClient, deleteNamespace, configMapName,
map[string]string{"key": "value"}, nil)
Expect(err).NotTo(HaveOccurred())
By("Creating a Deployment with annotation for the ConfigMap")
_, err = utils.CreateDeployment(ctx, kubeClient, deleteNamespace, deploymentName,
utils.WithAnnotations(utils.BuildConfigMapReloadAnnotation(configMapName)),
)
Expect(err).NotTo(HaveOccurred())
By("Waiting for Deployment to be ready")
err = utils.WaitForDeploymentReady(ctx, kubeClient, deleteNamespace, deploymentName, utils.DeploymentReady)
Expect(err).NotTo(HaveOccurred())
By("Deleting the ConfigMap")
err = utils.DeleteConfigMap(ctx, kubeClient, deleteNamespace, configMapName)
Expect(err).NotTo(HaveOccurred())
By("Waiting for Deployment to be reloaded (reloadOnDelete=true)")
reloaded, err := utils.WaitForDeploymentReloaded(ctx, kubeClient, deleteNamespace, deploymentName,
utils.AnnotationLastReloadedFrom, utils.ReloadTimeout)
Expect(err).NotTo(HaveOccurred())
Expect(reloaded).To(BeTrue(), "Deployment should reload when referenced ConfigMap is deleted")
})
It("should reload when a referenced Secret is deleted", func() {
secretName := utils.RandName("secret")
By("Creating a Secret")
_, err := utils.CreateSecretFromStrings(ctx, kubeClient, deleteNamespace, secretName,
map[string]string{"password": "secret"}, nil)
Expect(err).NotTo(HaveOccurred())
By("Creating a Deployment with annotation for the Secret")
_, err = utils.CreateDeployment(ctx, kubeClient, deleteNamespace, deploymentName,
utils.WithAnnotations(utils.BuildSecretReloadAnnotation(secretName)),
)
Expect(err).NotTo(HaveOccurred())
By("Waiting for Deployment to be ready")
err = utils.WaitForDeploymentReady(ctx, kubeClient, deleteNamespace, deploymentName, utils.DeploymentReady)
Expect(err).NotTo(HaveOccurred())
By("Deleting the Secret")
err = utils.DeleteSecret(ctx, kubeClient, deleteNamespace, secretName)
Expect(err).NotTo(HaveOccurred())
By("Waiting for Deployment to be reloaded (reloadOnDelete=true)")
reloaded, err := utils.WaitForDeploymentReloaded(ctx, kubeClient, deleteNamespace, deploymentName,
utils.AnnotationLastReloadedFrom, utils.ReloadTimeout)
Expect(err).NotTo(HaveOccurred())
Expect(reloaded).To(BeTrue(), "Deployment should reload when referenced Secret is deleted")
})
})
Context("with reloadOnDelete=false (default)", func() {
BeforeEach(func() {
// Create test namespace
err := utils.CreateNamespace(ctx, kubeClient, deleteNamespace)
Expect(err).NotTo(HaveOccurred())
// Deploy Reloader without reloadOnDelete flag (default is false)
err = deployReloaderWithFlags(map[string]string{})
Expect(err).NotTo(HaveOccurred())
err = waitForReloaderReady()
Expect(err).NotTo(HaveOccurred())
})
AfterEach(func() {
_ = undeployReloader()
_ = utils.DeleteNamespace(ctx, kubeClient, deleteNamespace)
})
It("should NOT reload when a referenced ConfigMap is deleted (default behavior)", func() {
By("Creating a ConfigMap")
_, err := utils.CreateConfigMap(ctx, kubeClient, deleteNamespace, configMapName,
map[string]string{"key": "value"}, nil)
Expect(err).NotTo(HaveOccurred())
By("Creating a Deployment with annotation for the ConfigMap")
_, err = utils.CreateDeployment(ctx, kubeClient, deleteNamespace, deploymentName,
utils.WithAnnotations(utils.BuildConfigMapReloadAnnotation(configMapName)),
)
Expect(err).NotTo(HaveOccurred())
By("Waiting for Deployment to be ready")
err = utils.WaitForDeploymentReady(ctx, kubeClient, deleteNamespace, deploymentName, utils.DeploymentReady)
Expect(err).NotTo(HaveOccurred())
By("Deleting the ConfigMap")
err = utils.DeleteConfigMap(ctx, kubeClient, deleteNamespace, configMapName)
Expect(err).NotTo(HaveOccurred())
By("Verifying Deployment was NOT reloaded (reloadOnDelete=false)")
time.Sleep(utils.NegativeTestWait)
reloaded, err := utils.WaitForDeploymentReloaded(ctx, kubeClient, deleteNamespace, deploymentName,
utils.AnnotationLastReloadedFrom, utils.ShortTimeout)
Expect(err).NotTo(HaveOccurred())
Expect(reloaded).To(BeFalse(), "Deployment should NOT reload on delete when reloadOnDelete=false")
})
})
})

View File

@@ -0,0 +1,114 @@
package flags
import (
"time"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
"github.com/stakater/Reloader/test/e2e/utils"
)
var _ = Describe("Resource Label Selector Flag Tests", func() {
var (
deploymentName string
matchingCM string
nonMatchingCM string
resourceNS string
)
BeforeEach(func() {
deploymentName = utils.RandName("deploy")
matchingCM = utils.RandName("match-cm")
nonMatchingCM = utils.RandName("nomatch-cm")
resourceNS = "resource-" + utils.RandName("ns")
})
AfterEach(func() {
_ = utils.DeleteDeployment(ctx, kubeClient, resourceNS, deploymentName)
_ = utils.DeleteConfigMap(ctx, kubeClient, resourceNS, matchingCM)
_ = utils.DeleteConfigMap(ctx, kubeClient, resourceNS, nonMatchingCM)
})
Context("with resourceLabelSelector flag", func() {
BeforeEach(func() {
// Create test namespace
err := utils.CreateNamespace(ctx, kubeClient, resourceNS)
Expect(err).NotTo(HaveOccurred())
// Deploy Reloader with resourceLabelSelector flag
err = deployReloaderWithFlags(map[string]string{
"reloader.resourceLabelSelector": "reload=true",
})
Expect(err).NotTo(HaveOccurred())
err = waitForReloaderReady()
Expect(err).NotTo(HaveOccurred())
})
AfterEach(func() {
_ = undeployReloader()
_ = utils.DeleteNamespace(ctx, kubeClient, resourceNS)
})
It("should reload when labeled ConfigMap changes", func() {
By("Creating a ConfigMap with matching label")
_, err := utils.CreateConfigMapWithLabels(ctx, kubeClient, resourceNS, matchingCM,
map[string]string{"key": "initial"},
map[string]string{"reload": "true"},
nil) // no annotations
Expect(err).NotTo(HaveOccurred())
By("Creating a Deployment with auto annotation")
_, err = utils.CreateDeployment(ctx, kubeClient, resourceNS, deploymentName,
utils.WithConfigMapEnvFrom(matchingCM),
utils.WithAnnotations(utils.BuildAutoTrueAnnotation()),
)
Expect(err).NotTo(HaveOccurred())
By("Waiting for Deployment to be ready")
err = utils.WaitForDeploymentReady(ctx, kubeClient, resourceNS, deploymentName, utils.DeploymentReady)
Expect(err).NotTo(HaveOccurred())
By("Updating the labeled ConfigMap")
err = utils.UpdateConfigMap(ctx, kubeClient, resourceNS, matchingCM,
map[string]string{"key": "updated"})
Expect(err).NotTo(HaveOccurred())
By("Waiting for Deployment to be reloaded")
reloaded, err := utils.WaitForDeploymentReloaded(ctx, kubeClient, resourceNS, deploymentName,
utils.AnnotationLastReloadedFrom, utils.ReloadTimeout)
Expect(err).NotTo(HaveOccurred())
Expect(reloaded).To(BeTrue(), "Deployment should be reloaded when labeled ConfigMap changes")
})
It("should NOT reload when unlabeled ConfigMap changes", func() {
By("Creating a ConfigMap WITHOUT matching label")
_, err := utils.CreateConfigMap(ctx, kubeClient, resourceNS, nonMatchingCM,
map[string]string{"key": "initial"}, nil)
Expect(err).NotTo(HaveOccurred())
By("Creating a Deployment with auto annotation")
_, err = utils.CreateDeployment(ctx, kubeClient, resourceNS, deploymentName,
utils.WithConfigMapEnvFrom(nonMatchingCM),
utils.WithAnnotations(utils.BuildAutoTrueAnnotation()),
)
Expect(err).NotTo(HaveOccurred())
By("Waiting for Deployment to be ready")
err = utils.WaitForDeploymentReady(ctx, kubeClient, resourceNS, deploymentName, utils.DeploymentReady)
Expect(err).NotTo(HaveOccurred())
By("Updating the unlabeled ConfigMap")
err = utils.UpdateConfigMap(ctx, kubeClient, resourceNS, nonMatchingCM,
map[string]string{"key": "updated"})
Expect(err).NotTo(HaveOccurred())
By("Verifying Deployment was NOT reloaded (unlabeled ConfigMap)")
time.Sleep(utils.NegativeTestWait)
reloaded, err := utils.WaitForDeploymentReloaded(ctx, kubeClient, resourceNS, deploymentName,
utils.AnnotationLastReloadedFrom, utils.ShortTimeout)
Expect(err).NotTo(HaveOccurred())
Expect(reloaded).To(BeFalse(), "Deployment should NOT reload when unlabeled ConfigMap changes")
})
})
})

View File

@@ -0,0 +1,170 @@
package flags
import (
"time"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
"github.com/stakater/Reloader/test/e2e/utils"
)
var _ = Describe("Watch Globally Flag Tests", func() {
var (
deploymentName string
configMapName string
otherNS string
)
BeforeEach(func() {
deploymentName = utils.RandName("deploy")
configMapName = utils.RandName("cm")
otherNS = "other-" + utils.RandName("ns")
})
AfterEach(func() {
// Clean up resources in both namespaces
_ = utils.DeleteDeployment(ctx, kubeClient, testNamespace, deploymentName)
_ = utils.DeleteConfigMap(ctx, kubeClient, testNamespace, configMapName)
_ = utils.DeleteDeployment(ctx, kubeClient, otherNS, deploymentName)
_ = utils.DeleteConfigMap(ctx, kubeClient, otherNS, configMapName)
})
Context("with watchGlobally=false flag", func() {
BeforeEach(func() {
// Create the other namespace for testing cross-namespace behavior
err := utils.CreateNamespace(ctx, kubeClient, otherNS)
Expect(err).NotTo(HaveOccurred())
// Deploy Reloader with watchGlobally=false
// This makes Reloader only watch resources in its own namespace (testNamespace)
err = deployReloaderWithFlags(map[string]string{
"reloader.watchGlobally": "false",
})
Expect(err).NotTo(HaveOccurred())
err = waitForReloaderReady()
Expect(err).NotTo(HaveOccurred())
})
AfterEach(func() {
_ = undeployReloader()
_ = utils.DeleteNamespace(ctx, kubeClient, otherNS)
})
It("should reload workloads in Reloader's namespace when watchGlobally=false", func() {
By("Creating a ConfigMap in Reloader's namespace")
_, err := utils.CreateConfigMap(ctx, kubeClient, testNamespace, configMapName,
map[string]string{"key": "initial"}, nil)
Expect(err).NotTo(HaveOccurred())
By("Creating a Deployment in Reloader's namespace with auto annotation")
_, err = utils.CreateDeployment(ctx, kubeClient, testNamespace, deploymentName,
utils.WithConfigMapEnvFrom(configMapName),
utils.WithAnnotations(utils.BuildAutoTrueAnnotation()),
)
Expect(err).NotTo(HaveOccurred())
By("Waiting for Deployment to be ready")
err = utils.WaitForDeploymentReady(ctx, kubeClient, testNamespace, deploymentName, utils.DeploymentReady)
Expect(err).NotTo(HaveOccurred())
By("Updating the ConfigMap")
err = utils.UpdateConfigMap(ctx, kubeClient, testNamespace, configMapName,
map[string]string{"key": "updated"})
Expect(err).NotTo(HaveOccurred())
By("Waiting for Deployment to be reloaded (same namespace should work)")
reloaded, err := utils.WaitForDeploymentReloaded(ctx, kubeClient, testNamespace, deploymentName,
utils.AnnotationLastReloadedFrom, utils.ReloadTimeout)
Expect(err).NotTo(HaveOccurred())
Expect(reloaded).To(BeTrue(), "Deployment in Reloader's namespace should reload with watchGlobally=false")
})
It("should NOT reload workloads in other namespaces when watchGlobally=false", func() {
By("Creating a ConfigMap in another namespace")
_, err := utils.CreateConfigMap(ctx, kubeClient, otherNS, configMapName,
map[string]string{"key": "initial"}, nil)
Expect(err).NotTo(HaveOccurred())
By("Creating a Deployment in another namespace with auto annotation")
_, err = utils.CreateDeployment(ctx, kubeClient, otherNS, deploymentName,
utils.WithConfigMapEnvFrom(configMapName),
utils.WithAnnotations(utils.BuildAutoTrueAnnotation()),
)
Expect(err).NotTo(HaveOccurred())
By("Waiting for Deployment to be ready")
err = utils.WaitForDeploymentReady(ctx, kubeClient, otherNS, deploymentName, utils.DeploymentReady)
Expect(err).NotTo(HaveOccurred())
By("Updating the ConfigMap in the other namespace")
err = utils.UpdateConfigMap(ctx, kubeClient, otherNS, configMapName,
map[string]string{"key": "updated"})
Expect(err).NotTo(HaveOccurred())
By("Verifying Deployment was NOT reloaded (different namespace with watchGlobally=false)")
time.Sleep(utils.NegativeTestWait)
reloaded, err := utils.WaitForDeploymentReloaded(ctx, kubeClient, otherNS, deploymentName,
utils.AnnotationLastReloadedFrom, utils.ShortTimeout)
Expect(err).NotTo(HaveOccurred())
Expect(reloaded).To(BeFalse(), "Deployment in other namespace should NOT reload with watchGlobally=false")
})
})
Context("with watchGlobally=true flag (default)", func() {
var globalNS string
BeforeEach(func() {
globalNS = "global-" + utils.RandName("ns")
// Create test namespace
err := utils.CreateNamespace(ctx, kubeClient, globalNS)
Expect(err).NotTo(HaveOccurred())
// Deploy Reloader with watchGlobally=true (default)
err = deployReloaderWithFlags(map[string]string{
"reloader.watchGlobally": "true",
})
Expect(err).NotTo(HaveOccurred())
err = waitForReloaderReady()
Expect(err).NotTo(HaveOccurred())
})
AfterEach(func() {
_ = utils.DeleteDeployment(ctx, kubeClient, globalNS, deploymentName)
_ = utils.DeleteConfigMap(ctx, kubeClient, globalNS, configMapName)
_ = undeployReloader()
_ = utils.DeleteNamespace(ctx, kubeClient, globalNS)
})
It("should reload workloads in any namespace when watchGlobally=true", func() {
By("Creating a ConfigMap in a different namespace")
_, err := utils.CreateConfigMap(ctx, kubeClient, globalNS, configMapName,
map[string]string{"key": "initial"}, nil)
Expect(err).NotTo(HaveOccurred())
By("Creating a Deployment in a different namespace with auto annotation")
_, err = utils.CreateDeployment(ctx, kubeClient, globalNS, deploymentName,
utils.WithConfigMapEnvFrom(configMapName),
utils.WithAnnotations(utils.BuildAutoTrueAnnotation()),
)
Expect(err).NotTo(HaveOccurred())
By("Waiting for Deployment to be ready")
err = utils.WaitForDeploymentReady(ctx, kubeClient, globalNS, deploymentName, utils.DeploymentReady)
Expect(err).NotTo(HaveOccurred())
By("Updating the ConfigMap")
err = utils.UpdateConfigMap(ctx, kubeClient, globalNS, configMapName,
map[string]string{"key": "updated"})
Expect(err).NotTo(HaveOccurred())
By("Waiting for Deployment to be reloaded (watchGlobally=true)")
reloaded, err := utils.WaitForDeploymentReloaded(ctx, kubeClient, globalNS, deploymentName,
utils.AnnotationLastReloadedFrom, utils.ReloadTimeout)
Expect(err).NotTo(HaveOccurred())
Expect(reloaded).To(BeTrue(), "Deployment in any namespace should reload with watchGlobally=true")
})
})
})

View File

@@ -0,0 +1,207 @@
package utils
// Annotation key constants used by Reloader.
// These follow the pattern: {scope}.reloader.stakater.com/{action}
// where scope can be empty (all resources), "configmap", "secret", "deployment", etc.
const (
// ============================================================
// Core reload annotations
// ============================================================
// AnnotationLastReloadedFrom is set by Reloader on workloads to track the last resource
// that triggered a reload. Format: "{namespace}/{resource-type}/{resource-name}"
AnnotationLastReloadedFrom = "reloader.stakater.com/last-reloaded-from"
// AnnotationConfigMapReload triggers reload when specified ConfigMap(s) change.
// Value: comma-separated list of ConfigMap names, e.g., "config1,config2"
AnnotationConfigMapReload = "configmap.reloader.stakater.com/reload"
// AnnotationSecretReload triggers reload when specified Secret(s) change.
// Value: comma-separated list of Secret names, e.g., "secret1,secret2"
AnnotationSecretReload = "secret.reloader.stakater.com/reload"
// ============================================================
// Auto-reload annotations
// ============================================================
// AnnotationAuto enables auto-reload for all referenced ConfigMaps and Secrets.
// Value: "true" or "false"
AnnotationAuto = "reloader.stakater.com/auto"
// AnnotationConfigMapAuto enables auto-reload for all referenced ConfigMaps only.
// Value: "true" or "false"
AnnotationConfigMapAuto = "configmap.reloader.stakater.com/auto"
// AnnotationSecretAuto enables auto-reload for all referenced Secrets only.
// Value: "true" or "false"
AnnotationSecretAuto = "secret.reloader.stakater.com/auto"
// ============================================================
// Exclude annotations (used with auto=true to exclude specific resources)
// ============================================================
// AnnotationConfigMapExclude excludes specified ConfigMaps from auto-reload.
// Value: comma-separated list of ConfigMap names
AnnotationConfigMapExclude = "configmaps.exclude.reloader.stakater.com/reload"
// AnnotationSecretExclude excludes specified Secrets from auto-reload.
// Value: comma-separated list of Secret names
AnnotationSecretExclude = "secrets.exclude.reloader.stakater.com/reload"
// ============================================================
// Search annotations (for regex matching)
// ============================================================
// AnnotationSearch enables regex search mode for ConfigMap/Secret names.
// Value: "true"
// Used with reload annotation where value is a regex pattern.
AnnotationSearch = "reloader.stakater.com/search"
// AnnotationMatch is an alias for AnnotationSearch.
// Value: "true"
AnnotationMatch = "reloader.stakater.com/match"
// ============================================================
// Resource-level annotations (placed on ConfigMap/Secret)
// ============================================================
// AnnotationIgnore prevents Reloader from triggering reloads for this resource.
// Place this on a ConfigMap or Secret to exclude it from reload triggers.
// Value: "true"
AnnotationIgnore = "reloader.stakater.com/ignore"
// ============================================================
// Pause/period annotations
// ============================================================
// AnnotationDeploymentPausePeriod sets a pause period before triggering reload.
// Value: duration string, e.g., "10s", "1m"
AnnotationDeploymentPausePeriod = "deployment.reloader.stakater.com/pause-period"
// AnnotationDeploymentPausedAt is set by Reloader when a workload is paused.
// Value: RFC3339 timestamp
AnnotationDeploymentPausedAt = "deployment.reloader.stakater.com/paused-at"
// ============================================================
// Argo Rollouts specific annotations
// ============================================================
// AnnotationRolloutStrategy specifies the strategy for Argo Rollouts.
// Value: "restart" (sets spec.restartAt)
AnnotationRolloutStrategy = "reloader.stakater.com/rollout-strategy"
)
// Annotation values.
const (
// AnnotationValueTrue is the string "true" for annotation values.
AnnotationValueTrue = "true"
// AnnotationValueFalse is the string "false" for annotation values.
AnnotationValueFalse = "false"
// AnnotationValueRestart is the "restart" strategy value for Argo Rollouts.
AnnotationValueRestart = "restart"
)
// BuildConfigMapReloadAnnotation creates an annotation map for ConfigMap reload.
func BuildConfigMapReloadAnnotation(configMapNames ...string) map[string]string {
return map[string]string{
AnnotationConfigMapReload: joinNames(configMapNames),
}
}
// BuildSecretReloadAnnotation creates an annotation map for Secret reload.
func BuildSecretReloadAnnotation(secretNames ...string) map[string]string {
return map[string]string{
AnnotationSecretReload: joinNames(secretNames),
}
}
// BuildAutoTrueAnnotation creates an annotation map with auto=true.
func BuildAutoTrueAnnotation() map[string]string {
return map[string]string{
AnnotationAuto: AnnotationValueTrue,
}
}
// BuildAutoFalseAnnotation creates an annotation map with auto=false.
func BuildAutoFalseAnnotation() map[string]string {
return map[string]string{
AnnotationAuto: AnnotationValueFalse,
}
}
// BuildConfigMapAutoAnnotation creates an annotation map with configmap auto=true.
func BuildConfigMapAutoAnnotation() map[string]string {
return map[string]string{
AnnotationConfigMapAuto: AnnotationValueTrue,
}
}
// BuildSecretAutoAnnotation creates an annotation map with secret auto=true.
func BuildSecretAutoAnnotation() map[string]string {
return map[string]string{
AnnotationSecretAuto: AnnotationValueTrue,
}
}
// BuildSearchAnnotation creates an annotation map to enable search mode.
func BuildSearchAnnotation() map[string]string {
return map[string]string{
AnnotationSearch: AnnotationValueTrue,
}
}
// BuildMatchAnnotation creates an annotation map to enable match mode.
func BuildMatchAnnotation() map[string]string {
return map[string]string{
AnnotationMatch: AnnotationValueTrue,
}
}
// BuildIgnoreAnnotation creates an annotation map to ignore a resource.
func BuildIgnoreAnnotation() map[string]string {
return map[string]string{
AnnotationIgnore: AnnotationValueTrue,
}
}
// BuildRolloutRestartStrategyAnnotation creates an annotation for Argo Rollout restart strategy.
func BuildRolloutRestartStrategyAnnotation() map[string]string {
return map[string]string{
AnnotationRolloutStrategy: AnnotationValueRestart,
}
}
// BuildConfigMapExcludeAnnotation creates an annotation to exclude ConfigMaps from auto-reload.
func BuildConfigMapExcludeAnnotation(configMapNames ...string) map[string]string {
return map[string]string{
AnnotationConfigMapExclude: joinNames(configMapNames),
}
}
// BuildSecretExcludeAnnotation creates an annotation to exclude Secrets from auto-reload.
func BuildSecretExcludeAnnotation(secretNames ...string) map[string]string {
return map[string]string{
AnnotationSecretExclude: joinNames(secretNames),
}
}
// BuildPausePeriodAnnotation creates an annotation for deployment pause period.
func BuildPausePeriodAnnotation(duration string) map[string]string {
return map[string]string{
AnnotationDeploymentPausePeriod: duration,
}
}
// joinNames joins names with comma separator.
func joinNames(names []string) string {
if len(names) == 0 {
return ""
}
result := names[0]
for i := 1; i < len(names); i++ {
result += "," + names[i]
}
return result
}

View File

@@ -0,0 +1,306 @@
package utils
import (
"testing"
)
func TestBuildConfigMapReloadAnnotation(t *testing.T) {
tests := []struct {
name string
configMaps []string
expected map[string]string
}{
{
name: "single ConfigMap",
configMaps: []string{"my-config"},
expected: map[string]string{
AnnotationConfigMapReload: "my-config",
},
},
{
name: "multiple ConfigMaps",
configMaps: []string{"config1", "config2", "config3"},
expected: map[string]string{
AnnotationConfigMapReload: "config1,config2,config3",
},
},
{
name: "empty list",
configMaps: []string{},
expected: map[string]string{
AnnotationConfigMapReload: "",
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
result := BuildConfigMapReloadAnnotation(tt.configMaps...)
if len(result) != len(tt.expected) {
t.Errorf("BuildConfigMapReloadAnnotation() returned %d entries, want %d", len(result), len(tt.expected))
}
for k, v := range tt.expected {
if result[k] != v {
t.Errorf("BuildConfigMapReloadAnnotation()[%q] = %q, want %q", k, result[k], v)
}
}
})
}
}
func TestBuildSecretReloadAnnotation(t *testing.T) {
tests := []struct {
name string
secrets []string
expected map[string]string
}{
{
name: "single Secret",
secrets: []string{"my-secret"},
expected: map[string]string{
AnnotationSecretReload: "my-secret",
},
},
{
name: "multiple Secrets",
secrets: []string{"secret1", "secret2"},
expected: map[string]string{
AnnotationSecretReload: "secret1,secret2",
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
result := BuildSecretReloadAnnotation(tt.secrets...)
for k, v := range tt.expected {
if result[k] != v {
t.Errorf("BuildSecretReloadAnnotation()[%q] = %q, want %q", k, result[k], v)
}
}
})
}
}
func TestBuildAutoAnnotations(t *testing.T) {
t.Run("BuildAutoTrueAnnotation", func(t *testing.T) {
result := BuildAutoTrueAnnotation()
if result[AnnotationAuto] != AnnotationValueTrue {
t.Errorf("BuildAutoTrueAnnotation()[%q] = %q, want %q",
AnnotationAuto, result[AnnotationAuto], AnnotationValueTrue)
}
})
t.Run("BuildAutoFalseAnnotation", func(t *testing.T) {
result := BuildAutoFalseAnnotation()
if result[AnnotationAuto] != AnnotationValueFalse {
t.Errorf("BuildAutoFalseAnnotation()[%q] = %q, want %q",
AnnotationAuto, result[AnnotationAuto], AnnotationValueFalse)
}
})
t.Run("BuildConfigMapAutoAnnotation", func(t *testing.T) {
result := BuildConfigMapAutoAnnotation()
if result[AnnotationConfigMapAuto] != AnnotationValueTrue {
t.Errorf("BuildConfigMapAutoAnnotation()[%q] = %q, want %q",
AnnotationConfigMapAuto, result[AnnotationConfigMapAuto], AnnotationValueTrue)
}
})
t.Run("BuildSecretAutoAnnotation", func(t *testing.T) {
result := BuildSecretAutoAnnotation()
if result[AnnotationSecretAuto] != AnnotationValueTrue {
t.Errorf("BuildSecretAutoAnnotation()[%q] = %q, want %q",
AnnotationSecretAuto, result[AnnotationSecretAuto], AnnotationValueTrue)
}
})
}
func TestBuildSearchMatchAnnotations(t *testing.T) {
t.Run("BuildSearchAnnotation", func(t *testing.T) {
result := BuildSearchAnnotation()
if result[AnnotationSearch] != AnnotationValueTrue {
t.Errorf("BuildSearchAnnotation()[%q] = %q, want %q",
AnnotationSearch, result[AnnotationSearch], AnnotationValueTrue)
}
})
t.Run("BuildMatchAnnotation", func(t *testing.T) {
result := BuildMatchAnnotation()
if result[AnnotationMatch] != AnnotationValueTrue {
t.Errorf("BuildMatchAnnotation()[%q] = %q, want %q",
AnnotationMatch, result[AnnotationMatch], AnnotationValueTrue)
}
})
}
func TestBuildIgnoreAnnotation(t *testing.T) {
result := BuildIgnoreAnnotation()
if result[AnnotationIgnore] != AnnotationValueTrue {
t.Errorf("BuildIgnoreAnnotation()[%q] = %q, want %q",
AnnotationIgnore, result[AnnotationIgnore], AnnotationValueTrue)
}
}
func TestBuildRolloutRestartStrategyAnnotation(t *testing.T) {
result := BuildRolloutRestartStrategyAnnotation()
if result[AnnotationRolloutStrategy] != AnnotationValueRestart {
t.Errorf("BuildRolloutRestartStrategyAnnotation()[%q] = %q, want %q",
AnnotationRolloutStrategy, result[AnnotationRolloutStrategy], AnnotationValueRestart)
}
}
func TestBuildExcludeAnnotations(t *testing.T) {
t.Run("BuildConfigMapExcludeAnnotation single", func(t *testing.T) {
result := BuildConfigMapExcludeAnnotation("excluded-cm")
if result[AnnotationConfigMapExclude] != "excluded-cm" {
t.Errorf("BuildConfigMapExcludeAnnotation()[%q] = %q, want %q",
AnnotationConfigMapExclude, result[AnnotationConfigMapExclude], "excluded-cm")
}
})
t.Run("BuildConfigMapExcludeAnnotation multiple", func(t *testing.T) {
result := BuildConfigMapExcludeAnnotation("cm1", "cm2", "cm3")
expected := "cm1,cm2,cm3"
if result[AnnotationConfigMapExclude] != expected {
t.Errorf("BuildConfigMapExcludeAnnotation()[%q] = %q, want %q",
AnnotationConfigMapExclude, result[AnnotationConfigMapExclude], expected)
}
})
t.Run("BuildSecretExcludeAnnotation single", func(t *testing.T) {
result := BuildSecretExcludeAnnotation("excluded-secret")
if result[AnnotationSecretExclude] != "excluded-secret" {
t.Errorf("BuildSecretExcludeAnnotation()[%q] = %q, want %q",
AnnotationSecretExclude, result[AnnotationSecretExclude], "excluded-secret")
}
})
t.Run("BuildSecretExcludeAnnotation multiple", func(t *testing.T) {
result := BuildSecretExcludeAnnotation("s1", "s2")
expected := "s1,s2"
if result[AnnotationSecretExclude] != expected {
t.Errorf("BuildSecretExcludeAnnotation()[%q] = %q, want %q",
AnnotationSecretExclude, result[AnnotationSecretExclude], expected)
}
})
}
func TestBuildPausePeriodAnnotation(t *testing.T) {
tests := []struct {
name string
duration string
expected string
}{
{
name: "10 seconds",
duration: "10s",
expected: "10s",
},
{
name: "1 minute",
duration: "1m",
expected: "1m",
},
{
name: "30 minutes",
duration: "30m",
expected: "30m",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
result := BuildPausePeriodAnnotation(tt.duration)
if result[AnnotationDeploymentPausePeriod] != tt.expected {
t.Errorf("BuildPausePeriodAnnotation(%q)[%q] = %q, want %q",
tt.duration, AnnotationDeploymentPausePeriod,
result[AnnotationDeploymentPausePeriod], tt.expected)
}
})
}
}
func TestJoinNames(t *testing.T) {
tests := []struct {
name string
names []string
expected string
}{
{
name: "empty slice",
names: []string{},
expected: "",
},
{
name: "single name",
names: []string{"one"},
expected: "one",
},
{
name: "two names",
names: []string{"one", "two"},
expected: "one,two",
},
{
name: "three names",
names: []string{"alpha", "beta", "gamma"},
expected: "alpha,beta,gamma",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
result := joinNames(tt.names)
if result != tt.expected {
t.Errorf("joinNames(%v) = %q, want %q", tt.names, result, tt.expected)
}
})
}
}
func TestAnnotationConstants(t *testing.T) {
// Verify annotation constants have expected values
// This ensures we don't accidentally change the annotation keys
tests := []struct {
name string
constant string
expected string
}{
{"AnnotationLastReloadedFrom", AnnotationLastReloadedFrom, "reloader.stakater.com/last-reloaded-from"},
{"AnnotationConfigMapReload", AnnotationConfigMapReload, "configmap.reloader.stakater.com/reload"},
{"AnnotationSecretReload", AnnotationSecretReload, "secret.reloader.stakater.com/reload"},
{"AnnotationAuto", AnnotationAuto, "reloader.stakater.com/auto"},
{"AnnotationConfigMapAuto", AnnotationConfigMapAuto, "configmap.reloader.stakater.com/auto"},
{"AnnotationSecretAuto", AnnotationSecretAuto, "secret.reloader.stakater.com/auto"},
{"AnnotationConfigMapExclude", AnnotationConfigMapExclude, "configmaps.exclude.reloader.stakater.com/reload"},
{"AnnotationSecretExclude", AnnotationSecretExclude, "secrets.exclude.reloader.stakater.com/reload"},
{"AnnotationSearch", AnnotationSearch, "reloader.stakater.com/search"},
{"AnnotationMatch", AnnotationMatch, "reloader.stakater.com/match"},
{"AnnotationIgnore", AnnotationIgnore, "reloader.stakater.com/ignore"},
{"AnnotationDeploymentPausePeriod", AnnotationDeploymentPausePeriod, "deployment.reloader.stakater.com/pause-period"},
{"AnnotationDeploymentPausedAt", AnnotationDeploymentPausedAt, "deployment.reloader.stakater.com/paused-at"},
{"AnnotationRolloutStrategy", AnnotationRolloutStrategy, "reloader.stakater.com/rollout-strategy"},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if tt.constant != tt.expected {
t.Errorf("%s = %q, want %q", tt.name, tt.constant, tt.expected)
}
})
}
}
func TestAnnotationValues(t *testing.T) {
// Verify annotation value constants
if AnnotationValueTrue != "true" {
t.Errorf("AnnotationValueTrue = %q, want \"true\"", AnnotationValueTrue)
}
if AnnotationValueFalse != "false" {
t.Errorf("AnnotationValueFalse = %q, want \"false\"", AnnotationValueFalse)
}
if AnnotationValueRestart != "restart" {
t.Errorf("AnnotationValueRestart = %q, want \"restart\"", AnnotationValueRestart)
}
}

308
test/e2e/utils/argo.go Normal file
View File

@@ -0,0 +1,308 @@
package utils
import (
"context"
"time"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/dynamic"
)
// ArgoRolloutGVR returns the GroupVersionResource for Argo Rollouts.
var ArgoRolloutGVR = schema.GroupVersionResource{
Group: "argoproj.io",
Version: "v1alpha1",
Resource: "rollouts",
}
// RolloutOption is a functional option for configuring an Argo Rollout.
type RolloutOption func(*unstructured.Unstructured)
// IsArgoRolloutsInstalled checks if Argo Rollouts CRD is installed in the cluster.
func IsArgoRolloutsInstalled(ctx context.Context, dynamicClient dynamic.Interface) bool {
// Try to list rollouts - if CRD exists, this will succeed (possibly with empty list)
_, err := dynamicClient.Resource(ArgoRolloutGVR).Namespace("default").List(ctx, metav1.ListOptions{Limit: 1})
return err == nil
}
// CreateArgoRollout creates an Argo Rollout with the given options.
func CreateArgoRollout(ctx context.Context, dynamicClient dynamic.Interface, namespace, name string, opts ...RolloutOption) error {
rollout := &unstructured.Unstructured{
Object: map[string]interface{}{
"apiVersion": "argoproj.io/v1alpha1",
"kind": "Rollout",
"metadata": map[string]interface{}{
"name": name,
"namespace": namespace,
},
"spec": map[string]interface{}{
"replicas": int64(1),
"selector": map[string]interface{}{
"matchLabels": map[string]interface{}{
"app": name,
},
},
"template": map[string]interface{}{
"metadata": map[string]interface{}{
"labels": map[string]interface{}{
"app": name,
},
},
"spec": map[string]interface{}{
"containers": []interface{}{
map[string]interface{}{
"name": "app",
"image": "busybox:1.36",
"command": []interface{}{"sh", "-c", "sleep 3600"},
},
},
},
},
"strategy": map[string]interface{}{
"canary": map[string]interface{}{
"steps": []interface{}{
map[string]interface{}{
"setWeight": int64(100),
},
},
},
},
},
},
}
// Apply options
for _, opt := range opts {
opt(rollout)
}
_, err := dynamicClient.Resource(ArgoRolloutGVR).Namespace(namespace).Create(ctx, rollout, metav1.CreateOptions{})
return err
}
// DeleteArgoRollout deletes an Argo Rollout.
func DeleteArgoRollout(ctx context.Context, dynamicClient dynamic.Interface, namespace, name string) error {
err := dynamicClient.Resource(ArgoRolloutGVR).Namespace(namespace).Delete(ctx, name, metav1.DeleteOptions{})
return err
}
// GetArgoRollout retrieves an Argo Rollout.
func GetArgoRollout(ctx context.Context, dynamicClient dynamic.Interface, namespace, name string) (*unstructured.Unstructured, error) {
return dynamicClient.Resource(ArgoRolloutGVR).Namespace(namespace).Get(ctx, name, metav1.GetOptions{})
}
// WithRolloutConfigMapEnvFrom adds a ConfigMap envFrom to the Rollout.
func WithRolloutConfigMapEnvFrom(configMapName string) RolloutOption {
return func(rollout *unstructured.Unstructured) {
containers, _, _ := unstructured.NestedSlice(rollout.Object, "spec", "template", "spec", "containers")
if len(containers) > 0 {
container := containers[0].(map[string]interface{})
envFrom, _, _ := unstructured.NestedSlice(container, "envFrom")
envFrom = append(envFrom, map[string]interface{}{
"configMapRef": map[string]interface{}{
"name": configMapName,
},
})
container["envFrom"] = envFrom
containers[0] = container
_ = unstructured.SetNestedSlice(rollout.Object, containers, "spec", "template", "spec", "containers")
}
}
}
// WithRolloutSecretEnvFrom adds a Secret envFrom to the Rollout.
func WithRolloutSecretEnvFrom(secretName string) RolloutOption {
return func(rollout *unstructured.Unstructured) {
containers, _, _ := unstructured.NestedSlice(rollout.Object, "spec", "template", "spec", "containers")
if len(containers) > 0 {
container := containers[0].(map[string]interface{})
envFrom, _, _ := unstructured.NestedSlice(container, "envFrom")
envFrom = append(envFrom, map[string]interface{}{
"secretRef": map[string]interface{}{
"name": secretName,
},
})
container["envFrom"] = envFrom
containers[0] = container
_ = unstructured.SetNestedSlice(rollout.Object, containers, "spec", "template", "spec", "containers")
}
}
}
// WithRolloutConfigMapVolume adds a ConfigMap volume to the Rollout.
func WithRolloutConfigMapVolume(configMapName string) RolloutOption {
return func(rollout *unstructured.Unstructured) {
// Add volume
volumes, _, _ := unstructured.NestedSlice(rollout.Object, "spec", "template", "spec", "volumes")
volumes = append(volumes, map[string]interface{}{
"name": configMapName + "-volume",
"configMap": map[string]interface{}{
"name": configMapName,
},
})
_ = unstructured.SetNestedSlice(rollout.Object, volumes, "spec", "template", "spec", "volumes")
// Add volumeMount
containers, _, _ := unstructured.NestedSlice(rollout.Object, "spec", "template", "spec", "containers")
if len(containers) > 0 {
container := containers[0].(map[string]interface{})
volumeMounts, _, _ := unstructured.NestedSlice(container, "volumeMounts")
volumeMounts = append(volumeMounts, map[string]interface{}{
"name": configMapName + "-volume",
"mountPath": "/etc/config/" + configMapName,
})
container["volumeMounts"] = volumeMounts
containers[0] = container
_ = unstructured.SetNestedSlice(rollout.Object, containers, "spec", "template", "spec", "containers")
}
}
}
// WithRolloutSecretVolume adds a Secret volume to the Rollout.
func WithRolloutSecretVolume(secretName string) RolloutOption {
return func(rollout *unstructured.Unstructured) {
// Add volume
volumes, _, _ := unstructured.NestedSlice(rollout.Object, "spec", "template", "spec", "volumes")
volumes = append(volumes, map[string]interface{}{
"name": secretName + "-volume",
"secret": map[string]interface{}{
"secretName": secretName,
},
})
_ = unstructured.SetNestedSlice(rollout.Object, volumes, "spec", "template", "spec", "volumes")
// Add volumeMount
containers, _, _ := unstructured.NestedSlice(rollout.Object, "spec", "template", "spec", "containers")
if len(containers) > 0 {
container := containers[0].(map[string]interface{})
volumeMounts, _, _ := unstructured.NestedSlice(container, "volumeMounts")
volumeMounts = append(volumeMounts, map[string]interface{}{
"name": secretName + "-volume",
"mountPath": "/etc/secrets/" + secretName,
})
container["volumeMounts"] = volumeMounts
containers[0] = container
_ = unstructured.SetNestedSlice(rollout.Object, containers, "spec", "template", "spec", "containers")
}
}
}
// WithRolloutAnnotations adds annotations to the Rollout's pod template.
func WithRolloutAnnotations(annotations map[string]string) RolloutOption {
return func(rollout *unstructured.Unstructured) {
annotationsMap := make(map[string]interface{})
for k, v := range annotations {
annotationsMap[k] = v
}
_ = unstructured.SetNestedMap(rollout.Object, annotationsMap, "spec", "template", "metadata", "annotations")
}
}
// WithRolloutObjectAnnotations adds annotations to the Rollout's top-level metadata.
// Use this for annotations that are read from the Rollout object itself (like rollout-strategy).
func WithRolloutObjectAnnotations(annotations map[string]string) RolloutOption {
return func(rollout *unstructured.Unstructured) {
annotationsMap := make(map[string]interface{})
for k, v := range annotations {
annotationsMap[k] = v
}
_ = unstructured.SetNestedMap(rollout.Object, annotationsMap, "metadata", "annotations")
}
}
// WaitForRolloutReady waits for an Argo Rollout to be ready.
func WaitForRolloutReady(ctx context.Context, dynamicClient dynamic.Interface, namespace, name string, timeout time.Duration) error {
return wait.PollUntilContextTimeout(ctx, DefaultInterval, timeout, true, func(ctx context.Context) (bool, error) {
rollout, err := dynamicClient.Resource(ArgoRolloutGVR).Namespace(namespace).Get(ctx, name, metav1.GetOptions{})
if err != nil {
return false, nil // Keep polling
}
// Check status.phase == "Healthy" or replicas == availableReplicas
status, found, _ := unstructured.NestedMap(rollout.Object, "status")
if !found {
return false, nil
}
phase, _, _ := unstructured.NestedString(status, "phase")
if phase == "Healthy" {
return true, nil
}
// Alternative: check replicas
replicas, _, _ := unstructured.NestedInt64(rollout.Object, "spec", "replicas")
availableReplicas, _, _ := unstructured.NestedInt64(status, "availableReplicas")
if replicas > 0 && replicas == availableReplicas {
return true, nil
}
return false, nil
})
}
// WaitForRolloutReloaded waits for an Argo Rollout's pod template to have the reloader annotation.
func WaitForRolloutReloaded(ctx context.Context, dynamicClient dynamic.Interface, namespace, name, annotationKey string, timeout time.Duration) (bool, error) {
var found bool
err := wait.PollUntilContextTimeout(ctx, DefaultInterval, timeout, true, func(ctx context.Context) (bool, error) {
rollout, err := dynamicClient.Resource(ArgoRolloutGVR).Namespace(namespace).Get(ctx, name, metav1.GetOptions{})
if err != nil {
return false, nil
}
// Check pod template annotations
annotations, _, _ := unstructured.NestedStringMap(rollout.Object, "spec", "template", "metadata", "annotations")
if annotations != nil {
if _, ok := annotations[annotationKey]; ok {
found = true
return true, nil
}
}
return false, nil
})
if err != nil && err != context.DeadlineExceeded {
return false, err
}
return found, nil
}
// GetRolloutPodTemplateAnnotations retrieves the pod template annotations from an Argo Rollout.
func GetRolloutPodTemplateAnnotations(ctx context.Context, dynamicClient dynamic.Interface, namespace, name string) (map[string]string, error) {
rollout, err := dynamicClient.Resource(ArgoRolloutGVR).Namespace(namespace).Get(ctx, name, metav1.GetOptions{})
if err != nil {
return nil, err
}
annotations, _, _ := unstructured.NestedStringMap(rollout.Object, "spec", "template", "metadata", "annotations")
return annotations, nil
}
// WaitForRolloutRestartAt waits for an Argo Rollout's spec.restartAt field to be set.
// This is used when the restart strategy is specified.
func WaitForRolloutRestartAt(ctx context.Context, dynamicClient dynamic.Interface, namespace, name string, timeout time.Duration) (bool, error) {
var found bool
err := wait.PollUntilContextTimeout(ctx, DefaultInterval, timeout, true, func(ctx context.Context) (bool, error) {
rollout, err := dynamicClient.Resource(ArgoRolloutGVR).Namespace(namespace).Get(ctx, name, metav1.GetOptions{})
if err != nil {
return false, nil
}
// Check if spec.restartAt is set
restartAt, exists, _ := unstructured.NestedString(rollout.Object, "spec", "restartAt")
if exists && restartAt != "" {
found = true
return true, nil
}
return false, nil
})
if err != nil && err != context.DeadlineExceeded {
return false, err
}
return found, nil
}

224
test/e2e/utils/helm.go Normal file
View File

@@ -0,0 +1,224 @@
package utils
import (
"fmt"
"os"
"os/exec"
"path/filepath"
"strings"
"time"
)
// Helm-related constants.
const (
// DefaultTestImage is the default image to test if RELOADER_IMAGE is not set.
DefaultTestImage = "ghcr.io/stakater/reloader:test"
// DefaultHelmReleaseName is the Helm release name for Reloader.
DefaultHelmReleaseName = "reloader"
// DefaultHelmChartPath is the path to the Helm chart relative to project root.
DefaultHelmChartPath = "deployments/kubernetes/chart/reloader"
// StakaterEnvVarPrefix is the prefix for Stakater environment variables.
StakaterEnvVarPrefix = "STAKATER_"
)
// DeployOptions configures how Reloader is deployed.
type DeployOptions struct {
// Namespace to deploy Reloader into.
Namespace string
// Image is the full image reference (e.g., "ghcr.io/stakater/reloader:test").
Image string
// Values are additional Helm values to set (key=value pairs).
Values map[string]string
// ReleaseName is the Helm release name. Defaults to DefaultHelmReleaseName.
ReleaseName string
// Timeout for Helm operations. Defaults to "120s".
Timeout string
}
// DeployReloader deploys Reloader using Helm with the specified options.
func DeployReloader(opts DeployOptions) error {
projectDir, err := GetProjectDir()
if err != nil {
return fmt.Errorf("getting project dir: %w", err)
}
if opts.ReleaseName == "" {
opts.ReleaseName = DefaultHelmReleaseName
}
if opts.Timeout == "" {
opts.Timeout = "120s"
}
if opts.Image == "" {
opts.Image = GetTestImage()
}
// Clean up any existing cluster-scoped resources before deploying
// This prevents "already exists" errors when a previous test didn't clean up properly
cleanupClusterResources(opts.ReleaseName)
chartPath := filepath.Join(projectDir, DefaultHelmChartPath)
args := []string{
"upgrade", "--install", opts.ReleaseName,
chartPath,
"--namespace", opts.Namespace,
"--create-namespace",
"--reset-values", // Important: reset values to ensure clean state between tests
"--set", fmt.Sprintf("image.repository=%s", GetImageRepository(opts.Image)),
"--set", fmt.Sprintf("image.tag=%s", GetImageTag(opts.Image)),
"--set", "image.pullPolicy=IfNotPresent",
"--wait",
"--timeout", opts.Timeout,
}
// Add custom values
for key, value := range opts.Values {
args = append(args, "--set", fmt.Sprintf("%s=%s", key, value))
}
cmd := exec.Command("helm", args...)
output, err := Run(cmd)
if err != nil {
return fmt.Errorf("helm install failed: %s: %w", output, err)
}
return nil
}
// UndeployReloader removes the Reloader Helm release and cleans up cluster-scoped resources.
// This function waits for all resources to be fully deleted to prevent race conditions
// between test suites.
func UndeployReloader(namespace, releaseName string) error {
if releaseName == "" {
releaseName = DefaultHelmReleaseName
}
// Use --wait to ensure Helm waits for resources to be deleted
cmd := exec.Command("helm", "uninstall", releaseName, "--namespace", namespace, "--ignore-not-found", "--wait")
output, err := Run(cmd)
if err != nil {
return fmt.Errorf("helm uninstall failed: %s: %w", output, err)
}
// Clean up cluster-scoped resources that Helm might not delete
// Use --wait to ensure resources are fully deleted before returning
clusterResources := []struct {
kind string
name string
}{
{"clusterrole", releaseName + "-reloader-role"},
{"clusterrolebinding", releaseName + "-reloader-role-binding"},
}
for _, res := range clusterResources {
cmd := exec.Command("kubectl", "delete", res.kind, res.name, "--ignore-not-found", "--wait=true")
_, _ = Run(cmd) // Ignore errors - resource may not exist
}
// Additional wait to ensure controller is fully stopped and resources are cleaned up
// This prevents race conditions when the next test tries to deploy immediately
waitForReloaderGone(namespace, releaseName)
return nil
}
// waitForReloaderGone waits for the Reloader deployment to be fully removed.
func waitForReloaderGone(namespace, releaseName string) {
deploymentName := ReloaderDeploymentName(releaseName)
// Poll until deployment is gone (max 30 seconds)
for i := 0; i < 30; i++ {
cmd := exec.Command("kubectl", "get", "deployment", deploymentName, "-n", namespace, "--ignore-not-found", "-o", "name")
output, _ := Run(cmd)
if strings.TrimSpace(output) == "" {
return
}
time.Sleep(1 * time.Second)
}
}
// cleanupClusterResources removes cluster-scoped resources that might be left over
// from a previous test run. This is called before deploying to ensure clean state.
func cleanupClusterResources(releaseName string) {
if releaseName == "" {
releaseName = DefaultHelmReleaseName
}
clusterResources := []struct {
kind string
name string
}{
{"clusterrole", releaseName + "-reloader-role"},
{"clusterrolebinding", releaseName + "-reloader-role-binding"},
}
for _, res := range clusterResources {
cmd := exec.Command("kubectl", "delete", res.kind, res.name, "--ignore-not-found", "--wait=true")
_, _ = Run(cmd)
}
// Small wait to ensure API server has processed the deletions
time.Sleep(500 * time.Millisecond)
}
// GetTestImage returns the test image from environment or the default.
func GetTestImage() string {
if img := os.Getenv("RELOADER_IMAGE"); img != "" {
return img
}
return DefaultTestImage
}
// GetImageRepository extracts the repository (without tag) from a full image reference.
// Example: "ghcr.io/stakater/reloader:v1.0.0" -> "ghcr.io/stakater/reloader"
func GetImageRepository(image string) string {
for i := len(image) - 1; i >= 0; i-- {
if image[i] == ':' {
return image[:i]
}
if image[i] == '/' {
// No tag found, return as-is
break
}
}
return image
}
// GetImageTag extracts the tag from a full image reference.
// Example: "ghcr.io/stakater/reloader:v1.0.0" -> "v1.0.0"
// Returns "latest" if no tag is found.
func GetImageTag(image string) string {
for i := len(image) - 1; i >= 0; i-- {
if image[i] == ':' {
return image[i+1:]
}
if image[i] == '/' {
// No tag found
break
}
}
return "latest"
}
// ReloaderDeploymentName returns the full deployment name for Reloader.
func ReloaderDeploymentName(releaseName string) string {
if releaseName == "" {
releaseName = DefaultHelmReleaseName
}
return releaseName + "-reloader"
}
// ReloaderPodSelector returns the label selector for Reloader pods.
func ReloaderPodSelector(releaseName string) string {
if releaseName == "" {
releaseName = DefaultHelmReleaseName
}
return "app=" + releaseName + "-reloader"
}

157
test/e2e/utils/helm_test.go Normal file
View File

@@ -0,0 +1,157 @@
package utils
import (
"testing"
)
func TestGetImageRepository(t *testing.T) {
tests := []struct {
name string
image string
expected string
}{
{
name: "full image with tag",
image: "ghcr.io/stakater/reloader:v1.0.0",
expected: "ghcr.io/stakater/reloader",
},
{
name: "image with latest tag",
image: "nginx:latest",
expected: "nginx",
},
{
name: "image without tag",
image: "ghcr.io/stakater/reloader",
expected: "ghcr.io/stakater/reloader",
},
{
name: "image with digest (not fully supported)",
image: "nginx@sha256:abc123",
expected: "nginx@sha256", // Note: digest handling is limited
},
{
name: "simple image name",
image: "nginx",
expected: "nginx",
},
{
name: "image with port in registry",
image: "localhost:5000/myimage:v1",
expected: "localhost:5000/myimage",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
result := GetImageRepository(tt.image)
if result != tt.expected {
t.Errorf("GetImageRepository(%q) = %q, want %q", tt.image, result, tt.expected)
}
})
}
}
func TestGetImageTag(t *testing.T) {
tests := []struct {
name string
image string
expected string
}{
{
name: "full image with tag",
image: "ghcr.io/stakater/reloader:v1.0.0",
expected: "v1.0.0",
},
{
name: "image with latest tag",
image: "nginx:latest",
expected: "latest",
},
{
name: "image without tag",
image: "ghcr.io/stakater/reloader",
expected: "latest",
},
{
name: "simple image name",
image: "nginx",
expected: "latest",
},
{
name: "image with port in registry",
image: "localhost:5000/myimage:v1",
expected: "v1",
},
{
name: "tag with sha",
image: "myimage:sha-abc123",
expected: "sha-abc123",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
result := GetImageTag(tt.image)
if result != tt.expected {
t.Errorf("GetImageTag(%q) = %q, want %q", tt.image, result, tt.expected)
}
})
}
}
func TestReloaderDeploymentName(t *testing.T) {
tests := []struct {
name string
releaseName string
expected string
}{
{
name: "default release name",
releaseName: "",
expected: "reloader-reloader",
},
{
name: "custom release name",
releaseName: "my-reloader",
expected: "my-reloader-reloader",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
result := ReloaderDeploymentName(tt.releaseName)
if result != tt.expected {
t.Errorf("ReloaderDeploymentName(%q) = %q, want %q", tt.releaseName, result, tt.expected)
}
})
}
}
func TestReloaderPodSelector(t *testing.T) {
tests := []struct {
name string
releaseName string
expected string
}{
{
name: "default release name",
releaseName: "",
expected: "app=reloader-reloader",
},
{
name: "custom release name",
releaseName: "my-reloader",
expected: "app=my-reloader-reloader",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
result := ReloaderPodSelector(tt.releaseName)
if result != tt.expected {
t.Errorf("ReloaderPodSelector(%q) = %q, want %q", tt.releaseName, result, tt.expected)
}
})
}
}

27
test/e2e/utils/kind.go Normal file
View File

@@ -0,0 +1,27 @@
package utils
import (
"fmt"
"os"
"os/exec"
)
// GetKindClusterName returns the Kind cluster name from the KIND_CLUSTER environment variable,
// or "kind" as the default.
func GetKindClusterName() string {
if cluster := os.Getenv("KIND_CLUSTER"); cluster != "" {
return cluster
}
return "kind"
}
// LoadImageToKindCluster loads a Docker image into the Kind cluster using the default cluster name.
func LoadImageToKindCluster(image string) error {
cmd := exec.Command("kind", "load", "docker-image", image, "--name", GetKindClusterName())
output, err := Run(cmd)
if err != nil {
return fmt.Errorf("failed to load image %s to Kind cluster: %w\nOutput: %s",
image, err, output)
}
return nil
}

265
test/e2e/utils/openshift.go Normal file
View File

@@ -0,0 +1,265 @@
package utils
import (
"context"
"time"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/discovery"
"k8s.io/client-go/dynamic"
)
// DeploymentConfigGVR returns the GroupVersionResource for OpenShift DeploymentConfigs.
var DeploymentConfigGVR = schema.GroupVersionResource{
Group: "apps.openshift.io",
Version: "v1",
Resource: "deploymentconfigs",
}
// DCOption is a functional option for configuring a DeploymentConfig.
type DCOption func(*unstructured.Unstructured)
// HasDeploymentConfigSupport checks if the cluster has OpenShift DeploymentConfig API available.
func HasDeploymentConfigSupport(discoveryClient discovery.DiscoveryInterface) bool {
_, apiLists, err := discoveryClient.ServerGroupsAndResources()
if err != nil {
return false
}
for _, apiList := range apiLists {
for _, resource := range apiList.APIResources {
if resource.Kind == "DeploymentConfig" {
return true
}
}
}
return false
}
// CreateDeploymentConfig creates an OpenShift DeploymentConfig with the given options.
func CreateDeploymentConfig(ctx context.Context, dynamicClient dynamic.Interface, namespace, name string, opts ...DCOption) error {
dc := &unstructured.Unstructured{
Object: map[string]interface{}{
"apiVersion": "apps.openshift.io/v1",
"kind": "DeploymentConfig",
"metadata": map[string]interface{}{
"name": name,
"namespace": namespace,
},
"spec": map[string]interface{}{
"replicas": int64(1),
"selector": map[string]interface{}{
"app": name,
},
"template": map[string]interface{}{
"metadata": map[string]interface{}{
"labels": map[string]interface{}{
"app": name,
},
},
"spec": map[string]interface{}{
"containers": []interface{}{
map[string]interface{}{
"name": "app",
"image": "busybox:1.36",
"command": []interface{}{"sh", "-c", "sleep 3600"},
},
},
},
},
"triggers": []interface{}{
map[string]interface{}{
"type": "ConfigChange",
},
},
},
},
}
// Apply options
for _, opt := range opts {
opt(dc)
}
_, err := dynamicClient.Resource(DeploymentConfigGVR).Namespace(namespace).Create(ctx, dc, metav1.CreateOptions{})
return err
}
// DeleteDeploymentConfig deletes a DeploymentConfig.
func DeleteDeploymentConfig(ctx context.Context, dynamicClient dynamic.Interface, namespace, name string) error {
return dynamicClient.Resource(DeploymentConfigGVR).Namespace(namespace).Delete(ctx, name, metav1.DeleteOptions{})
}
// GetDeploymentConfig retrieves a DeploymentConfig.
func GetDeploymentConfig(ctx context.Context, dynamicClient dynamic.Interface, namespace, name string) (*unstructured.Unstructured, error) {
return dynamicClient.Resource(DeploymentConfigGVR).Namespace(namespace).Get(ctx, name, metav1.GetOptions{})
}
// WithDCConfigMapEnvFrom adds a ConfigMap envFrom to the DeploymentConfig.
func WithDCConfigMapEnvFrom(configMapName string) DCOption {
return func(dc *unstructured.Unstructured) {
containers, _, _ := unstructured.NestedSlice(dc.Object, "spec", "template", "spec", "containers")
if len(containers) > 0 {
container := containers[0].(map[string]interface{})
envFrom, _, _ := unstructured.NestedSlice(container, "envFrom")
envFrom = append(envFrom, map[string]interface{}{
"configMapRef": map[string]interface{}{
"name": configMapName,
},
})
container["envFrom"] = envFrom
containers[0] = container
_ = unstructured.SetNestedSlice(dc.Object, containers, "spec", "template", "spec", "containers")
}
}
}
// WithDCSecretEnvFrom adds a Secret envFrom to the DeploymentConfig.
func WithDCSecretEnvFrom(secretName string) DCOption {
return func(dc *unstructured.Unstructured) {
containers, _, _ := unstructured.NestedSlice(dc.Object, "spec", "template", "spec", "containers")
if len(containers) > 0 {
container := containers[0].(map[string]interface{})
envFrom, _, _ := unstructured.NestedSlice(container, "envFrom")
envFrom = append(envFrom, map[string]interface{}{
"secretRef": map[string]interface{}{
"name": secretName,
},
})
container["envFrom"] = envFrom
containers[0] = container
_ = unstructured.SetNestedSlice(dc.Object, containers, "spec", "template", "spec", "containers")
}
}
}
// WithDCConfigMapVolume adds a ConfigMap volume to the DeploymentConfig.
func WithDCConfigMapVolume(configMapName string) DCOption {
return func(dc *unstructured.Unstructured) {
// Add volume
volumes, _, _ := unstructured.NestedSlice(dc.Object, "spec", "template", "spec", "volumes")
volumes = append(volumes, map[string]interface{}{
"name": configMapName + "-volume",
"configMap": map[string]interface{}{
"name": configMapName,
},
})
_ = unstructured.SetNestedSlice(dc.Object, volumes, "spec", "template", "spec", "volumes")
// Add volumeMount
containers, _, _ := unstructured.NestedSlice(dc.Object, "spec", "template", "spec", "containers")
if len(containers) > 0 {
container := containers[0].(map[string]interface{})
volumeMounts, _, _ := unstructured.NestedSlice(container, "volumeMounts")
volumeMounts = append(volumeMounts, map[string]interface{}{
"name": configMapName + "-volume",
"mountPath": "/etc/config/" + configMapName,
})
container["volumeMounts"] = volumeMounts
containers[0] = container
_ = unstructured.SetNestedSlice(dc.Object, containers, "spec", "template", "spec", "containers")
}
}
}
// WithDCSecretVolume adds a Secret volume to the DeploymentConfig.
func WithDCSecretVolume(secretName string) DCOption {
return func(dc *unstructured.Unstructured) {
// Add volume
volumes, _, _ := unstructured.NestedSlice(dc.Object, "spec", "template", "spec", "volumes")
volumes = append(volumes, map[string]interface{}{
"name": secretName + "-volume",
"secret": map[string]interface{}{
"secretName": secretName,
},
})
_ = unstructured.SetNestedSlice(dc.Object, volumes, "spec", "template", "spec", "volumes")
// Add volumeMount
containers, _, _ := unstructured.NestedSlice(dc.Object, "spec", "template", "spec", "containers")
if len(containers) > 0 {
container := containers[0].(map[string]interface{})
volumeMounts, _, _ := unstructured.NestedSlice(container, "volumeMounts")
volumeMounts = append(volumeMounts, map[string]interface{}{
"name": secretName + "-volume",
"mountPath": "/etc/secrets/" + secretName,
})
container["volumeMounts"] = volumeMounts
containers[0] = container
_ = unstructured.SetNestedSlice(dc.Object, containers, "spec", "template", "spec", "containers")
}
}
}
// WithDCAnnotations adds annotations to the DeploymentConfig's pod template.
func WithDCAnnotations(annotations map[string]string) DCOption {
return func(dc *unstructured.Unstructured) {
annotationsMap := make(map[string]interface{})
for k, v := range annotations {
annotationsMap[k] = v
}
_ = unstructured.SetNestedMap(dc.Object, annotationsMap, "spec", "template", "metadata", "annotations")
}
}
// WaitForDeploymentConfigReady waits for a DeploymentConfig to be ready.
func WaitForDeploymentConfigReady(ctx context.Context, dynamicClient dynamic.Interface, namespace, name string, timeout time.Duration) error {
return wait.PollUntilContextTimeout(ctx, DefaultInterval, timeout, true, func(ctx context.Context) (bool, error) {
dc, err := dynamicClient.Resource(DeploymentConfigGVR).Namespace(namespace).Get(ctx, name, metav1.GetOptions{})
if err != nil {
return false, nil // Keep polling
}
// Check replicas == readyReplicas
replicas, _, _ := unstructured.NestedInt64(dc.Object, "spec", "replicas")
readyReplicas, _, _ := unstructured.NestedInt64(dc.Object, "status", "readyReplicas")
if replicas > 0 && replicas == readyReplicas {
return true, nil
}
return false, nil
})
}
// WaitForDeploymentConfigReloaded waits for a DeploymentConfig's pod template to have the reloader annotation.
func WaitForDeploymentConfigReloaded(ctx context.Context, dynamicClient dynamic.Interface, namespace, name, annotationKey string, timeout time.Duration) (bool, error) {
var found bool
err := wait.PollUntilContextTimeout(ctx, DefaultInterval, timeout, true, func(ctx context.Context) (bool, error) {
dc, err := dynamicClient.Resource(DeploymentConfigGVR).Namespace(namespace).Get(ctx, name, metav1.GetOptions{})
if err != nil {
return false, nil
}
// Check pod template annotations
annotations, _, _ := unstructured.NestedStringMap(dc.Object, "spec", "template", "metadata", "annotations")
if annotations != nil {
if _, ok := annotations[annotationKey]; ok {
found = true
return true, nil
}
}
return false, nil
})
if err != nil && err != context.DeadlineExceeded {
return false, err
}
return found, nil
}
// GetDeploymentConfigPodTemplateAnnotations retrieves the pod template annotations from a DeploymentConfig.
func GetDeploymentConfigPodTemplateAnnotations(ctx context.Context, dynamicClient dynamic.Interface, namespace, name string) (map[string]string, error) {
dc, err := dynamicClient.Resource(DeploymentConfigGVR).Namespace(namespace).Get(ctx, name, metav1.GetOptions{})
if err != nil {
return nil, err
}
annotations, _, _ := unstructured.NestedStringMap(dc.Object, "spec", "template", "metadata", "annotations")
return annotations, nil
}

26
test/e2e/utils/rand.go Normal file
View File

@@ -0,0 +1,26 @@
package utils
import (
"math/rand"
"time"
)
const letters = "abcdefghijklmnopqrstuvwxyz"
var randSource = rand.New(rand.NewSource(time.Now().UnixNano())) //nolint:gosec
// RandSeq generates a random lowercase string of length n.
// This is useful for creating unique resource names in tests.
func RandSeq(n int) string {
b := make([]byte, n)
for i := range b {
b[i] = letters[randSource.Intn(len(letters))]
}
return string(b)
}
// RandName generates a unique name with the given prefix.
// Format: prefix-xxxxx where x is a random lowercase letter.
func RandName(prefix string) string {
return prefix + "-" + RandSeq(5)
}

135
test/e2e/utils/rand_test.go Normal file
View File

@@ -0,0 +1,135 @@
package utils
import (
"regexp"
"testing"
)
func TestRandSeq(t *testing.T) {
tests := []struct {
name string
length int
}{
{"length 0", 0},
{"length 1", 1},
{"length 5", 5},
{"length 10", 10},
{"length 100", 100},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
result := RandSeq(tt.length)
// Verify length
if len(result) != tt.length {
t.Errorf("RandSeq(%d) returned string of length %d, want %d",
tt.length, len(result), tt.length)
}
// Verify only lowercase letters
if tt.length > 0 {
matched, _ := regexp.MatchString("^[a-z]+$", result)
if !matched {
t.Errorf("RandSeq(%d) = %q, contains non-lowercase letters", tt.length, result)
}
}
})
}
}
func TestRandSeqRandomness(t *testing.T) {
// Generate multiple sequences and verify they're different
// (with very high probability)
const iterations = 10
const length = 20
seen := make(map[string]bool)
for i := 0; i < iterations; i++ {
s := RandSeq(length)
if seen[s] {
// This is extremely unlikely with 20 chars (26^20 possibilities)
t.Errorf("RandSeq generated duplicate: %q", s)
}
seen[s] = true
}
// Verify we got 10 unique strings
if len(seen) != iterations {
t.Errorf("Expected %d unique strings, got %d", iterations, len(seen))
}
}
func TestRandName(t *testing.T) {
tests := []struct {
name string
prefix string
}{
{"deploy prefix", "deploy"},
{"cm prefix", "cm"},
{"secret prefix", "secret"},
{"test-app prefix", "test-app"},
{"empty prefix", ""},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
result := RandName(tt.prefix)
// Verify format: prefix-xxxxx
expectedPrefix := tt.prefix + "-"
if len(result) <= len(expectedPrefix) {
t.Errorf("RandName(%q) = %q, too short", tt.prefix, result)
return
}
// Check prefix
if result[:len(expectedPrefix)] != expectedPrefix {
t.Errorf("RandName(%q) = %q, doesn't start with %q",
tt.prefix, result, expectedPrefix)
}
// Check random suffix is 5 lowercase letters
suffix := result[len(expectedPrefix):]
if len(suffix) != 5 {
t.Errorf("RandName(%q) suffix length = %d, want 5", tt.prefix, len(suffix))
}
matched, _ := regexp.MatchString("^[a-z]{5}$", suffix)
if !matched {
t.Errorf("RandName(%q) suffix = %q, should be 5 lowercase letters",
tt.prefix, suffix)
}
})
}
}
func TestRandNameUniqueness(t *testing.T) {
// Generate multiple names with same prefix and verify uniqueness
const prefix = "test"
const iterations = 100
seen := make(map[string]bool)
for i := 0; i < iterations; i++ {
name := RandName(prefix)
if seen[name] {
t.Errorf("RandName generated duplicate: %q", name)
}
seen[name] = true
}
}
func TestRandNameKubernetesCompatibility(t *testing.T) {
// Verify generated names are valid Kubernetes resource names
// Must match: [a-z0-9]([-a-z0-9]*[a-z0-9])?
prefixes := []string{"deploy", "cm", "secret", "test-app", "my-resource"}
k8sNamePattern := regexp.MustCompile(`^[a-z0-9]([-a-z0-9]*[a-z0-9])?$`)
for _, prefix := range prefixes {
name := RandName(prefix)
if !k8sNamePattern.MatchString(name) {
t.Errorf("RandName(%q) = %q is not a valid Kubernetes name", prefix, name)
}
}
}

1094
test/e2e/utils/resources.go Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,12 @@
package utils
// MergeAnnotations merges multiple annotation maps into one.
func MergeAnnotations(maps ...map[string]string) map[string]string {
result := make(map[string]string)
for _, m := range maps {
for k, v := range m {
result[k] = v
}
}
return result
}

View File

@@ -0,0 +1,148 @@
package utils
import (
"testing"
)
func TestMergeAnnotations(t *testing.T) {
tests := []struct {
name string
maps []map[string]string
expected map[string]string
}{
{
name: "no maps",
maps: []map[string]string{},
expected: map[string]string{},
},
{
name: "single map",
maps: []map[string]string{
{"key1": "value1"},
},
expected: map[string]string{
"key1": "value1",
},
},
{
name: "two maps no overlap",
maps: []map[string]string{
{"key1": "value1"},
{"key2": "value2"},
},
expected: map[string]string{
"key1": "value1",
"key2": "value2",
},
},
{
name: "three maps with overlap - last wins",
maps: []map[string]string{
{"key1": "value1", "shared": "first"},
{"key2": "value2", "shared": "second"},
{"key3": "value3", "shared": "third"},
},
expected: map[string]string{
"key1": "value1",
"key2": "value2",
"key3": "value3",
"shared": "third", // Last map wins
},
},
{
name: "empty map in the middle",
maps: []map[string]string{
{"key1": "value1"},
{},
{"key2": "value2"},
},
expected: map[string]string{
"key1": "value1",
"key2": "value2",
},
},
{
name: "nil map in the middle",
maps: []map[string]string{
{"key1": "value1"},
nil,
{"key2": "value2"},
},
expected: map[string]string{
"key1": "value1",
"key2": "value2",
},
},
{
name: "realistic use case - auto annotation with reload annotation",
maps: []map[string]string{
BuildAutoTrueAnnotation(),
BuildConfigMapReloadAnnotation("my-config"),
},
expected: map[string]string{
AnnotationAuto: AnnotationValueTrue,
AnnotationConfigMapReload: "my-config",
},
},
{
name: "realistic use case - pause period with reload annotation",
maps: []map[string]string{
BuildConfigMapReloadAnnotation("config1"),
BuildPausePeriodAnnotation("10s"),
},
expected: map[string]string{
AnnotationConfigMapReload: "config1",
AnnotationDeploymentPausePeriod: "10s",
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
result := MergeAnnotations(tt.maps...)
if len(result) != len(tt.expected) {
t.Errorf("MergeAnnotations() returned %d entries, want %d", len(result), len(tt.expected))
t.Errorf("Got: %v", result)
t.Errorf("Want: %v", tt.expected)
return
}
for k, v := range tt.expected {
if result[k] != v {
t.Errorf("MergeAnnotations()[%q] = %q, want %q", k, result[k], v)
}
}
})
}
}
func TestMergeAnnotationsDoesNotModifyInput(t *testing.T) {
// Ensure MergeAnnotations doesn't modify the input maps
map1 := map[string]string{"key1": "value1"}
map2 := map[string]string{"key2": "value2"}
_ = MergeAnnotations(map1, map2)
// Verify original maps are unchanged
if len(map1) != 1 || map1["key1"] != "value1" {
t.Errorf("map1 was modified: %v", map1)
}
if len(map2) != 1 || map2["key2"] != "value2" {
t.Errorf("map2 was modified: %v", map2)
}
}
func TestMergeAnnotationsReturnsNewMap(t *testing.T) {
// Ensure MergeAnnotations returns a new map, not a reference to an input
input := map[string]string{"key1": "value1"}
result := MergeAnnotations(input)
// Modify the result
result["key2"] = "value2"
// Verify original is unchanged
if _, exists := input["key2"]; exists {
t.Error("modifying result affected input map - should return a new map")
}
}

154
test/e2e/utils/testenv.go Normal file
View File

@@ -0,0 +1,154 @@
package utils
import (
"context"
"fmt"
. "github.com/onsi/ginkgo/v2"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/discovery"
"k8s.io/client-go/dynamic"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/clientcmd"
)
// TestEnvironment holds the common test environment state.
type TestEnvironment struct {
Ctx context.Context
Cancel context.CancelFunc
KubeClient kubernetes.Interface
DynamicClient dynamic.Interface
DiscoveryClient discovery.DiscoveryInterface
Namespace string
ReleaseName string // Unique Helm release name to prevent cluster-scoped resource conflicts
TestImage string
ProjectDir string
}
// SetupTestEnvironment creates a new test environment with kubernetes clients.
// It creates a unique namespace with the given prefix.
func SetupTestEnvironment(ctx context.Context, namespacePrefix string) (*TestEnvironment, error) {
env := &TestEnvironment{
Ctx: ctx,
TestImage: GetTestImage(),
}
var err error
// Get project directory
env.ProjectDir, err = GetProjectDir()
if err != nil {
return nil, fmt.Errorf("getting project directory: %w", err)
}
// Setup Kubernetes client
kubeconfig := GetKubeconfig()
GinkgoWriter.Printf("Using kubeconfig: %s\n", kubeconfig)
config, err := clientcmd.BuildConfigFromFlags("", kubeconfig)
if err != nil {
return nil, fmt.Errorf("building config from kubeconfig: %w", err)
}
env.KubeClient, err = kubernetes.NewForConfig(config)
if err != nil {
return nil, fmt.Errorf("creating kubernetes client: %w", err)
}
env.DynamicClient, err = dynamic.NewForConfig(config)
if err != nil {
return nil, fmt.Errorf("creating dynamic client: %w", err)
}
env.DiscoveryClient, err = discovery.NewDiscoveryClientForConfig(config)
if err != nil {
return nil, fmt.Errorf("creating discovery client: %w", err)
}
// Verify cluster connectivity
GinkgoWriter.Println("Verifying cluster connectivity...")
_, err = env.KubeClient.CoreV1().Namespaces().List(ctx, metav1.ListOptions{Limit: 1})
if err != nil {
return nil, fmt.Errorf("connecting to kubernetes cluster: %w", err)
}
GinkgoWriter.Println("Cluster connectivity verified")
// Create test namespace with random suffix
env.Namespace = RandName(namespacePrefix)
// Use a unique release name to prevent cluster-scoped resource conflicts between test suites
env.ReleaseName = RandName("reloader")
GinkgoWriter.Printf("Creating test namespace: %s\n", env.Namespace)
GinkgoWriter.Printf("Using Helm release name: %s\n", env.ReleaseName)
if err := CreateNamespace(ctx, env.KubeClient, env.Namespace); err != nil {
return nil, fmt.Errorf("creating test namespace: %w", err)
}
GinkgoWriter.Printf("Using test image: %s\n", env.TestImage)
GinkgoWriter.Printf("Project directory: %s\n", env.ProjectDir)
return env, nil
}
// Cleanup cleans up the test environment resources.
func (e *TestEnvironment) Cleanup() error {
if e.Namespace == "" {
return nil
}
GinkgoWriter.Printf("Cleaning up test namespace: %s\n", e.Namespace)
GinkgoWriter.Printf("Cleaning up Helm release: %s\n", e.ReleaseName)
// Collect Reloader logs before cleanup (useful for debugging)
logs, err := GetPodLogs(e.Ctx, e.KubeClient, e.Namespace, ReloaderPodSelector(e.ReleaseName))
if err == nil && logs != "" {
GinkgoWriter.Println("Reloader logs:")
GinkgoWriter.Println(logs)
}
// Undeploy Reloader using the suite's release name
_ = UndeployReloader(e.Namespace, e.ReleaseName)
// Delete test namespace
if err := DeleteNamespace(e.Ctx, e.KubeClient, e.Namespace); err != nil {
return fmt.Errorf("deleting namespace: %w", err)
}
return nil
}
// DeployReloaderWithStrategy deploys Reloader with the specified reload strategy.
func (e *TestEnvironment) DeployReloaderWithStrategy(strategy string) error {
return e.DeployReloaderWithValues(map[string]string{
"reloader.reloadStrategy": strategy,
})
}
// DeployReloaderWithValues deploys Reloader with the specified Helm values.
// Each test suite uses a unique release name to prevent cluster-scoped resource conflicts.
func (e *TestEnvironment) DeployReloaderWithValues(values map[string]string) error {
GinkgoWriter.Printf("Deploying Reloader with values: %v\n", values)
return DeployReloader(DeployOptions{
Namespace: e.Namespace,
ReleaseName: e.ReleaseName,
Image: e.TestImage,
Values: values,
})
}
// WaitForReloader waits for the Reloader deployment to be ready.
func (e *TestEnvironment) WaitForReloader() error {
GinkgoWriter.Println("Waiting for Reloader to be ready...")
return WaitForDeploymentReady(e.Ctx, e.KubeClient, e.Namespace, ReloaderDeploymentName(e.ReleaseName), DeploymentReady)
}
// DeployAndWait deploys Reloader with the given values and waits for it to be ready.
func (e *TestEnvironment) DeployAndWait(values map[string]string) error {
if err := e.DeployReloaderWithValues(values); err != nil {
return fmt.Errorf("deploying Reloader: %w", err)
}
if err := e.WaitForReloader(); err != nil {
return fmt.Errorf("waiting for Reloader: %w", err)
}
GinkgoWriter.Println("Reloader is ready")
return nil
}

114
test/e2e/utils/utils.go Normal file
View File

@@ -0,0 +1,114 @@
// Package utils provides helper functions for e2e tests.
package utils
import (
"bytes"
"fmt"
"os"
"os/exec"
"path/filepath"
"strings"
. "github.com/onsi/ginkgo/v2" //nolint:revive,staticcheck
)
// Run executes the provided command and returns its combined stdout/stderr output.
// The command is executed from the project directory.
func Run(cmd *exec.Cmd) (string, error) {
dir, err := GetProjectDir()
if err != nil {
return "", fmt.Errorf("failed to get project dir: %w", err)
}
cmd.Dir = dir
if err := os.Chdir(cmd.Dir); err != nil {
_, _ = fmt.Fprintf(GinkgoWriter, "chdir dir: %q\n", err)
}
cmd.Env = append(os.Environ(), "GO111MODULE=on")
command := strings.Join(cmd.Args, " ")
_, _ = fmt.Fprintf(GinkgoWriter, "running: %q\n", command)
var stdout, stderr bytes.Buffer
cmd.Stdout = &stdout
cmd.Stderr = &stderr
err = cmd.Run()
output := stdout.String() + stderr.String()
if err != nil {
return output, fmt.Errorf("%q failed with error %q: %w", command, output, err)
}
return output, nil
}
// GetProjectDir returns the root directory of the project.
// It works by finding the directory containing go.mod.
func GetProjectDir() (string, error) {
wd, err := os.Getwd()
if err != nil {
return "", fmt.Errorf("failed to get current working directory: %w", err)
}
// Walk up the directory tree looking for go.mod
dir := wd
for {
if _, err := os.Stat(filepath.Join(dir, "go.mod")); err == nil {
return dir, nil
}
parent := filepath.Dir(dir)
if parent == dir {
// Reached root without finding go.mod
break
}
dir = parent
}
// Fallback: try to strip common test paths
wd = strings.ReplaceAll(wd, "/test/e2e", "")
wd = strings.ReplaceAll(wd, "/test/e2e/annotations", "")
wd = strings.ReplaceAll(wd, "/test/e2e/envvars", "")
wd = strings.ReplaceAll(wd, "/test/e2e/flags", "")
wd = strings.ReplaceAll(wd, "/test/e2e/advanced", "")
wd = strings.ReplaceAll(wd, "/test/e2e/argo", "")
wd = strings.ReplaceAll(wd, "/test/e2e/openshift", "")
return wd, nil
}
// GetNonEmptyLines splits the given output string into individual lines,
// filtering out empty lines.
func GetNonEmptyLines(output string) []string {
var result []string
lines := strings.Split(output, "\n")
for _, line := range lines {
trimmed := strings.TrimSpace(line)
if trimmed != "" {
result = append(result, trimmed)
}
}
return result
}
// GetEnvOrDefault returns the value of the environment variable named by key,
// or defaultValue if the variable is not present or empty.
func GetEnvOrDefault(key, defaultValue string) string {
if value := os.Getenv(key); value != "" {
return value
}
return defaultValue
}
// GetKubeconfig returns the path to the kubeconfig file.
// It checks KUBECONFIG environment variable first, then falls back to ~/.kube/config.
func GetKubeconfig() string {
if kubeconfig := os.Getenv("KUBECONFIG"); kubeconfig != "" {
return kubeconfig
}
home, err := os.UserHomeDir()
if err != nil {
return ""
}
return filepath.Join(home, ".kube", "config")
}

498
test/e2e/utils/wait.go Normal file
View File

@@ -0,0 +1,498 @@
package utils
import (
"context"
"fmt"
"strings"
"time"
appsv1 "k8s.io/api/apps/v1"
batchv1 "k8s.io/api/batch/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/kubernetes"
)
// Timeout and interval constants for polling operations.
const (
DefaultTimeout = 30 * time.Second // General operations
DefaultInterval = 1 * time.Second // Polling interval (faster feedback)
ShortTimeout = 5 * time.Second // Quick checks
NegativeTestWait = 3 * time.Second // Wait before checking negative conditions
DeploymentReady = 60 * time.Second // Workload readiness (buffer for CI)
ReloadTimeout = 15 * time.Second // Time for reload to trigger
)
// WaitForDeploymentReady waits for a deployment to have all replicas available.
func WaitForDeploymentReady(ctx context.Context, client kubernetes.Interface, namespace, name string, timeout time.Duration) error {
return wait.PollUntilContextTimeout(ctx, DefaultInterval, timeout, true, func(ctx context.Context) (bool, error) {
deploy, err := client.AppsV1().Deployments(namespace).Get(ctx, name, metav1.GetOptions{})
if err != nil {
return false, nil // Keep polling
}
// Check if deployment is ready
if deploy.Status.ReadyReplicas == *deploy.Spec.Replicas &&
deploy.Status.UpdatedReplicas == *deploy.Spec.Replicas &&
deploy.Status.AvailableReplicas == *deploy.Spec.Replicas {
return true, nil
}
return false, nil
})
}
// WaitForDeploymentReloaded waits for a deployment's pod template to have the reloader annotation.
// Returns true if the annotation was found, false if timeout occurred.
func WaitForDeploymentReloaded(ctx context.Context, client kubernetes.Interface, namespace, name, annotationKey string, timeout time.Duration) (bool, error) {
var found bool
err := wait.PollUntilContextTimeout(ctx, DefaultInterval, timeout, true, func(ctx context.Context) (bool, error) {
deploy, err := client.AppsV1().Deployments(namespace).Get(ctx, name, metav1.GetOptions{})
if err != nil {
return false, nil // Keep polling
}
// Check pod template annotations
if deploy.Spec.Template.Annotations != nil {
if _, ok := deploy.Spec.Template.Annotations[annotationKey]; ok {
found = true
return true, nil
}
}
return false, nil
})
if err != nil && err != context.DeadlineExceeded {
return false, err
}
return found, nil
}
// WaitForDaemonSetReloaded waits for a DaemonSet's pod template to have the reloader annotation.
func WaitForDaemonSetReloaded(ctx context.Context, client kubernetes.Interface, namespace, name, annotationKey string, timeout time.Duration) (bool, error) {
var found bool
err := wait.PollUntilContextTimeout(ctx, DefaultInterval, timeout, true, func(ctx context.Context) (bool, error) {
ds, err := client.AppsV1().DaemonSets(namespace).Get(ctx, name, metav1.GetOptions{})
if err != nil {
return false, nil
}
if ds.Spec.Template.Annotations != nil {
if _, ok := ds.Spec.Template.Annotations[annotationKey]; ok {
found = true
return true, nil
}
}
return false, nil
})
if err != nil && err != context.DeadlineExceeded {
return false, err
}
return found, nil
}
// WaitForStatefulSetReloaded waits for a StatefulSet's pod template to have the reloader annotation.
func WaitForStatefulSetReloaded(ctx context.Context, client kubernetes.Interface, namespace, name, annotationKey string, timeout time.Duration) (bool, error) {
var found bool
err := wait.PollUntilContextTimeout(ctx, DefaultInterval, timeout, true, func(ctx context.Context) (bool, error) {
ss, err := client.AppsV1().StatefulSets(namespace).Get(ctx, name, metav1.GetOptions{})
if err != nil {
return false, nil
}
if ss.Spec.Template.Annotations != nil {
if _, ok := ss.Spec.Template.Annotations[annotationKey]; ok {
found = true
return true, nil
}
}
return false, nil
})
if err != nil && err != context.DeadlineExceeded {
return false, err
}
return found, nil
}
// WaitForCronJobReloaded waits for a CronJob's pod template to have the reloader annotation.
func WaitForCronJobReloaded(ctx context.Context, client kubernetes.Interface, namespace, name, annotationKey string, timeout time.Duration) (bool, error) {
var found bool
err := wait.PollUntilContextTimeout(ctx, DefaultInterval, timeout, true, func(ctx context.Context) (bool, error) {
cj, err := client.BatchV1().CronJobs(namespace).Get(ctx, name, metav1.GetOptions{})
if err != nil {
return false, nil
}
if cj.Spec.JobTemplate.Spec.Template.Annotations != nil {
if _, ok := cj.Spec.JobTemplate.Spec.Template.Annotations[annotationKey]; ok {
found = true
return true, nil
}
}
return false, nil
})
if err != nil && err != context.DeadlineExceeded {
return false, err
}
return found, nil
}
// WaitForJobCreated waits for a Job to be created with the given label selector.
func WaitForJobCreated(ctx context.Context, client kubernetes.Interface, namespace, labelSelector string, timeout time.Duration) (bool, error) {
var found bool
err := wait.PollUntilContextTimeout(ctx, DefaultInterval, timeout, true, func(ctx context.Context) (bool, error) {
jobs, err := client.BatchV1().Jobs(namespace).List(ctx, metav1.ListOptions{
LabelSelector: labelSelector,
})
if err != nil {
return false, nil
}
if len(jobs.Items) > 0 {
found = true
return true, nil
}
return false, nil
})
if err != nil && err != context.DeadlineExceeded {
return false, err
}
return found, nil
}
// WaitForCronJobTriggeredJob waits for a Job to be created by the specified CronJob.
// It checks owner references to find Jobs created by Reloader's manual trigger.
func WaitForCronJobTriggeredJob(ctx context.Context, client kubernetes.Interface, namespace, cronJobName string, timeout time.Duration) (bool, error) {
var found bool
err := wait.PollUntilContextTimeout(ctx, DefaultInterval, timeout, true, func(ctx context.Context) (bool, error) {
jobs, err := client.BatchV1().Jobs(namespace).List(ctx, metav1.ListOptions{})
if err != nil {
return false, nil
}
for _, job := range jobs.Items {
// Check if this job is owned by the CronJob
for _, ownerRef := range job.OwnerReferences {
if ownerRef.Kind == "CronJob" && ownerRef.Name == cronJobName {
// Check for the manual instantiate annotation (added by Reloader)
if job.Annotations != nil {
if _, ok := job.Annotations["cronjob.kubernetes.io/instantiate"]; ok {
found = true
return true, nil
}
}
}
}
}
return false, nil
})
if err != nil && err != context.DeadlineExceeded {
return false, err
}
return found, nil
}
// WaitForDeploymentEnvVar waits for a deployment's containers to have an environment variable
// with the given prefix (e.g., "STAKATER_").
func WaitForDeploymentEnvVar(ctx context.Context, client kubernetes.Interface, namespace, name, prefix string, timeout time.Duration) (bool, error) {
var found bool
err := wait.PollUntilContextTimeout(ctx, DefaultInterval, timeout, true, func(ctx context.Context) (bool, error) {
deploy, err := client.AppsV1().Deployments(namespace).Get(ctx, name, metav1.GetOptions{})
if err != nil {
return false, nil
}
if hasEnvVarWithPrefix(deploy.Spec.Template.Spec.Containers, prefix) {
found = true
return true, nil
}
return false, nil
})
if err != nil && err != context.DeadlineExceeded {
return false, err
}
return found, nil
}
// WaitForDaemonSetEnvVar waits for a DaemonSet's containers to have an environment variable
// with the given prefix.
func WaitForDaemonSetEnvVar(ctx context.Context, client kubernetes.Interface, namespace, name, prefix string, timeout time.Duration) (bool, error) {
var found bool
err := wait.PollUntilContextTimeout(ctx, DefaultInterval, timeout, true, func(ctx context.Context) (bool, error) {
ds, err := client.AppsV1().DaemonSets(namespace).Get(ctx, name, metav1.GetOptions{})
if err != nil {
return false, nil
}
if hasEnvVarWithPrefix(ds.Spec.Template.Spec.Containers, prefix) {
found = true
return true, nil
}
return false, nil
})
if err != nil && err != context.DeadlineExceeded {
return false, err
}
return found, nil
}
// WaitForStatefulSetEnvVar waits for a StatefulSet's containers to have an environment variable
// with the given prefix.
func WaitForStatefulSetEnvVar(ctx context.Context, client kubernetes.Interface, namespace, name, prefix string, timeout time.Duration) (bool, error) {
var found bool
err := wait.PollUntilContextTimeout(ctx, DefaultInterval, timeout, true, func(ctx context.Context) (bool, error) {
ss, err := client.AppsV1().StatefulSets(namespace).Get(ctx, name, metav1.GetOptions{})
if err != nil {
return false, nil
}
if hasEnvVarWithPrefix(ss.Spec.Template.Spec.Containers, prefix) {
found = true
return true, nil
}
return false, nil
})
if err != nil && err != context.DeadlineExceeded {
return false, err
}
return found, nil
}
// WaitForDeploymentPaused waits for a deployment to have the paused-at annotation.
func WaitForDeploymentPaused(ctx context.Context, client kubernetes.Interface, namespace, name, pausedAtAnnotation string, timeout time.Duration) (bool, error) {
var found bool
err := wait.PollUntilContextTimeout(ctx, DefaultInterval, timeout, true, func(ctx context.Context) (bool, error) {
deploy, err := client.AppsV1().Deployments(namespace).Get(ctx, name, metav1.GetOptions{})
if err != nil {
return false, nil
}
// Check deployment annotations (not pod template)
if deploy.Annotations != nil {
if _, ok := deploy.Annotations[pausedAtAnnotation]; ok {
found = true
return true, nil
}
}
return false, nil
})
if err != nil && err != context.DeadlineExceeded {
return false, err
}
return found, nil
}
// WaitForDeploymentUnpaused waits for a deployment to NOT have the paused-at annotation.
func WaitForDeploymentUnpaused(ctx context.Context, client kubernetes.Interface, namespace, name, pausedAtAnnotation string, timeout time.Duration) (bool, error) {
var unpaused bool
err := wait.PollUntilContextTimeout(ctx, DefaultInterval, timeout, true, func(ctx context.Context) (bool, error) {
deploy, err := client.AppsV1().Deployments(namespace).Get(ctx, name, metav1.GetOptions{})
if err != nil {
return false, nil
}
// Check if paused-at annotation is gone
if deploy.Annotations == nil {
unpaused = true
return true, nil
}
if _, ok := deploy.Annotations[pausedAtAnnotation]; !ok {
unpaused = true
return true, nil
}
return false, nil
})
if err != nil && err != context.DeadlineExceeded {
return false, err
}
return unpaused, nil
}
// WaitForDaemonSetReady waits for a DaemonSet to have all pods ready.
func WaitForDaemonSetReady(ctx context.Context, client kubernetes.Interface, namespace, name string, timeout time.Duration) error {
return wait.PollUntilContextTimeout(ctx, DefaultInterval, timeout, true, func(ctx context.Context) (bool, error) {
ds, err := client.AppsV1().DaemonSets(namespace).Get(ctx, name, metav1.GetOptions{})
if err != nil {
return false, nil
}
if ds.Status.DesiredNumberScheduled > 0 &&
ds.Status.NumberReady == ds.Status.DesiredNumberScheduled {
return true, nil
}
return false, nil
})
}
// WaitForStatefulSetReady waits for a StatefulSet to have all replicas ready.
func WaitForStatefulSetReady(ctx context.Context, client kubernetes.Interface, namespace, name string, timeout time.Duration) error {
return wait.PollUntilContextTimeout(ctx, DefaultInterval, timeout, true, func(ctx context.Context) (bool, error) {
ss, err := client.AppsV1().StatefulSets(namespace).Get(ctx, name, metav1.GetOptions{})
if err != nil {
return false, nil
}
if ss.Status.ReadyReplicas == *ss.Spec.Replicas {
return true, nil
}
return false, nil
})
}
// GetDeployment retrieves a deployment by name.
func GetDeployment(ctx context.Context, client kubernetes.Interface, namespace, name string) (*appsv1.Deployment, error) {
return client.AppsV1().Deployments(namespace).Get(ctx, name, metav1.GetOptions{})
}
// GetDaemonSet retrieves a DaemonSet by name.
func GetDaemonSet(ctx context.Context, client kubernetes.Interface, namespace, name string) (*appsv1.DaemonSet, error) {
return client.AppsV1().DaemonSets(namespace).Get(ctx, name, metav1.GetOptions{})
}
// GetStatefulSet retrieves a StatefulSet by name.
func GetStatefulSet(ctx context.Context, client kubernetes.Interface, namespace, name string) (*appsv1.StatefulSet, error) {
return client.AppsV1().StatefulSets(namespace).Get(ctx, name, metav1.GetOptions{})
}
// GetCronJob retrieves a CronJob by name.
func GetCronJob(ctx context.Context, client kubernetes.Interface, namespace, name string) (*batchv1.CronJob, error) {
return client.BatchV1().CronJobs(namespace).Get(ctx, name, metav1.GetOptions{})
}
// WaitForCronJobExists waits for a CronJob to exist in the cluster.
// This is useful for giving Reloader time to detect and index the CronJob before making changes.
func WaitForCronJobExists(ctx context.Context, client kubernetes.Interface, namespace, name string, timeout time.Duration) error {
return wait.PollUntilContextTimeout(ctx, DefaultInterval, timeout, true, func(ctx context.Context) (bool, error) {
_, err := client.BatchV1().CronJobs(namespace).Get(ctx, name, metav1.GetOptions{})
if err != nil {
return false, nil // Keep polling
}
return true, nil
})
}
// GetJob retrieves a Job by name.
func GetJob(ctx context.Context, client kubernetes.Interface, namespace, name string) (*batchv1.Job, error) {
return client.BatchV1().Jobs(namespace).Get(ctx, name, metav1.GetOptions{})
}
// hasEnvVarWithPrefix checks if any container has an environment variable with the given prefix.
func hasEnvVarWithPrefix(containers []corev1.Container, prefix string) bool {
for _, container := range containers {
for _, env := range container.Env {
if strings.HasPrefix(env.Name, prefix) {
return true
}
}
}
return false
}
// WaitForJobRecreated waits for a Job to be deleted and recreated with a new UID.
// Returns the new Job's UID if recreation was detected.
func WaitForJobRecreated(ctx context.Context, client kubernetes.Interface, namespace, name, originalUID string, timeout time.Duration) (string, bool, error) {
var newUID string
var recreated bool
err := wait.PollUntilContextTimeout(ctx, DefaultInterval, timeout, true, func(ctx context.Context) (bool, error) {
job, err := client.BatchV1().Jobs(namespace).Get(ctx, name, metav1.GetOptions{})
if err != nil {
// Job not found means it's been deleted, keep polling for recreation
return false, nil
}
// Check if the UID has changed (indicating recreation)
if string(job.UID) != originalUID {
newUID = string(job.UID)
recreated = true
return true, nil
}
return false, nil
})
if err != nil && err != context.DeadlineExceeded {
return "", false, err
}
return newUID, recreated, nil
}
// WaitForJobNotFound waits for a Job to be deleted.
func WaitForJobNotFound(ctx context.Context, client kubernetes.Interface, namespace, name string, timeout time.Duration) (bool, error) {
var deleted bool
err := wait.PollUntilContextTimeout(ctx, DefaultInterval, timeout, true, func(ctx context.Context) (bool, error) {
_, err := client.BatchV1().Jobs(namespace).Get(ctx, name, metav1.GetOptions{})
if err != nil {
deleted = true
return true, nil
}
return false, nil
})
if err != nil && err != context.DeadlineExceeded {
return false, err
}
return deleted, nil
}
// WaitForJobExists waits for a Job to exist in the cluster.
func WaitForJobExists(ctx context.Context, client kubernetes.Interface, namespace, name string, timeout time.Duration) error {
return wait.PollUntilContextTimeout(ctx, DefaultInterval, timeout, true, func(ctx context.Context) (bool, error) {
_, err := client.BatchV1().Jobs(namespace).Get(ctx, name, metav1.GetOptions{})
if err != nil {
return false, nil // Keep polling
}
return true, nil
})
}
// GetPodLogs retrieves logs from pods matching the given label selector.
func GetPodLogs(ctx context.Context, client kubernetes.Interface, namespace, labelSelector string) (string, error) {
pods, err := client.CoreV1().Pods(namespace).List(ctx, metav1.ListOptions{
LabelSelector: labelSelector,
})
if err != nil {
return "", fmt.Errorf("failed to list pods: %w", err)
}
var allLogs strings.Builder
for _, pod := range pods.Items {
for _, container := range pod.Spec.Containers {
logs, err := client.CoreV1().Pods(namespace).GetLogs(pod.Name, &corev1.PodLogOptions{
Container: container.Name,
}).Do(ctx).Raw()
if err != nil {
allLogs.WriteString(fmt.Sprintf("Error getting logs for %s/%s: %v\n", pod.Name, container.Name, err))
continue
}
allLogs.WriteString(fmt.Sprintf("=== %s/%s ===\n%s\n", pod.Name, container.Name, string(logs)))
}
}
return allLogs.String(), nil
}

View File

@@ -0,0 +1,160 @@
package utils
import (
"context"
"time"
"k8s.io/client-go/dynamic"
"k8s.io/client-go/kubernetes"
)
// WorkloadType represents the type of Kubernetes workload.
type WorkloadType string
const (
WorkloadDeployment WorkloadType = "Deployment"
WorkloadDaemonSet WorkloadType = "DaemonSet"
WorkloadStatefulSet WorkloadType = "StatefulSet"
WorkloadCronJob WorkloadType = "CronJob"
WorkloadJob WorkloadType = "Job"
WorkloadArgoRollout WorkloadType = "ArgoRollout"
WorkloadDeploymentConfig WorkloadType = "DeploymentConfig"
)
// ReloadStrategy represents the reload strategy used by Reloader.
type ReloadStrategy string
const (
StrategyAnnotations ReloadStrategy = "annotations"
StrategyEnvVars ReloadStrategy = "envvars"
)
// WorkloadConfig holds configuration for workload creation.
type WorkloadConfig struct {
// Resource references
ConfigMapName string
SecretName string
// Annotations to set on the workload
Annotations map[string]string
// Reference methods (flags - multiple can be true)
UseConfigMapEnvFrom bool
UseSecretEnvFrom bool
UseConfigMapVolume bool
UseSecretVolume bool
UseProjectedVolume bool
UseConfigMapKeyRef bool
UseSecretKeyRef bool
UseInitContainer bool
UseInitContainerVolume bool
// For valueFrom references
ConfigMapKey string
SecretKey string
EnvVarName string
// Special options
MultipleContainers int // Number of containers (0 or 1 means single container)
}
// WorkloadAdapter provides a unified interface for all workload types.
// This allows tests to be parameterized across different workload types.
type WorkloadAdapter interface {
// Type returns the workload type.
Type() WorkloadType
// Create creates the workload with the given config.
Create(ctx context.Context, namespace, name string, cfg WorkloadConfig) error
// Delete removes the workload.
Delete(ctx context.Context, namespace, name string) error
// WaitReady waits for the workload to be ready.
WaitReady(ctx context.Context, namespace, name string, timeout time.Duration) error
// WaitReloaded waits for the workload to have the reload annotation.
// Returns true if the annotation was found, false if timeout occurred.
WaitReloaded(ctx context.Context, namespace, name, annotationKey string, timeout time.Duration) (bool, error)
// WaitEnvVar waits for the workload to have a STAKATER_ env var (for envvars strategy).
// Returns true if the env var was found, false if timeout occurred.
WaitEnvVar(ctx context.Context, namespace, name, prefix string, timeout time.Duration) (bool, error)
// SupportsEnvVarStrategy returns true if the workload supports env var reload strategy.
// CronJob does not support this as it uses job creation instead.
SupportsEnvVarStrategy() bool
// RequiresSpecialHandling returns true for workloads that need special handling.
// For example, CronJob triggers a new job instead of rolling restart.
RequiresSpecialHandling() bool
}
// AdapterRegistry holds adapters for all workload types.
type AdapterRegistry struct {
kubeClient kubernetes.Interface
dynamicClient dynamic.Interface
adapters map[WorkloadType]WorkloadAdapter
}
// NewAdapterRegistry creates a new adapter registry with all standard adapters.
func NewAdapterRegistry(kubeClient kubernetes.Interface, dynamicClient dynamic.Interface) *AdapterRegistry {
r := &AdapterRegistry{
kubeClient: kubeClient,
dynamicClient: dynamicClient,
adapters: make(map[WorkloadType]WorkloadAdapter),
}
// Register standard adapters
r.adapters[WorkloadDeployment] = NewDeploymentAdapter(kubeClient)
r.adapters[WorkloadDaemonSet] = NewDaemonSetAdapter(kubeClient)
r.adapters[WorkloadStatefulSet] = NewStatefulSetAdapter(kubeClient)
r.adapters[WorkloadCronJob] = NewCronJobAdapter(kubeClient)
r.adapters[WorkloadJob] = NewJobAdapter(kubeClient)
// Argo and OpenShift adapters are registered separately via RegisterAdapter
// as they require specific cluster support
return r
}
// RegisterAdapter registers a custom adapter for a workload type.
// Use this to add Argo Rollout or DeploymentConfig adapters.
func (r *AdapterRegistry) RegisterAdapter(adapter WorkloadAdapter) {
r.adapters[adapter.Type()] = adapter
}
// Get returns the adapter for the given workload type.
// Returns nil if the adapter is not registered.
func (r *AdapterRegistry) Get(wt WorkloadType) WorkloadAdapter {
return r.adapters[wt]
}
// GetStandardWorkloads returns the standard workload types that are always available.
func (r *AdapterRegistry) GetStandardWorkloads() []WorkloadType {
return []WorkloadType{
WorkloadDeployment,
WorkloadDaemonSet,
WorkloadStatefulSet,
}
}
// GetAllWorkloads returns all registered workload types.
func (r *AdapterRegistry) GetAllWorkloads() []WorkloadType {
result := make([]WorkloadType, 0, len(r.adapters))
for wt := range r.adapters {
result = append(result, wt)
}
return result
}
// GetEnvVarWorkloads returns workload types that support env var reload strategy.
func (r *AdapterRegistry) GetEnvVarWorkloads() []WorkloadType {
result := make([]WorkloadType, 0)
for wt, adapter := range r.adapters {
if adapter.SupportsEnvVarStrategy() {
result = append(result, wt)
}
}
return result
}

View File

@@ -0,0 +1,340 @@
package utils
import (
"context"
"fmt"
"strings"
"time"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/dynamic"
)
// ArgoRolloutAdapter implements WorkloadAdapter for Argo Rollouts.
type ArgoRolloutAdapter struct {
dynamicClient dynamic.Interface
}
// NewArgoRolloutAdapter creates a new ArgoRolloutAdapter.
func NewArgoRolloutAdapter(dynamicClient dynamic.Interface) *ArgoRolloutAdapter {
return &ArgoRolloutAdapter{dynamicClient: dynamicClient}
}
// Type returns the workload type.
func (a *ArgoRolloutAdapter) Type() WorkloadType {
return WorkloadArgoRollout
}
// Create creates an Argo Rollout with the given config.
func (a *ArgoRolloutAdapter) Create(ctx context.Context, namespace, name string, cfg WorkloadConfig) error {
opts := buildRolloutOptions(cfg)
return CreateArgoRollout(ctx, a.dynamicClient, namespace, name, opts...)
}
// Delete removes the Argo Rollout.
func (a *ArgoRolloutAdapter) Delete(ctx context.Context, namespace, name string) error {
return DeleteArgoRollout(ctx, a.dynamicClient, namespace, name)
}
// WaitReady waits for the Argo Rollout to be ready.
func (a *ArgoRolloutAdapter) WaitReady(ctx context.Context, namespace, name string, timeout time.Duration) error {
return WaitForRolloutReady(ctx, a.dynamicClient, namespace, name, timeout)
}
// WaitReloaded waits for the Argo Rollout to have the reload annotation.
func (a *ArgoRolloutAdapter) WaitReloaded(ctx context.Context, namespace, name, annotationKey string, timeout time.Duration) (bool, error) {
return WaitForRolloutReloaded(ctx, a.dynamicClient, namespace, name, annotationKey, timeout)
}
// WaitEnvVar waits for the Argo Rollout to have a STAKATER_ env var.
func (a *ArgoRolloutAdapter) WaitEnvVar(ctx context.Context, namespace, name, prefix string, timeout time.Duration) (bool, error) {
return WaitForRolloutEnvVar(ctx, a.dynamicClient, namespace, name, prefix, timeout)
}
// SupportsEnvVarStrategy returns true as Argo Rollouts support env var reload strategy.
func (a *ArgoRolloutAdapter) SupportsEnvVarStrategy() bool {
return true
}
// RequiresSpecialHandling returns false as Argo Rollouts use standard rolling restart.
func (a *ArgoRolloutAdapter) RequiresSpecialHandling() bool {
return false
}
// buildRolloutOptions converts WorkloadConfig to RolloutOption slice.
func buildRolloutOptions(cfg WorkloadConfig) []RolloutOption {
var opts []RolloutOption
// Add annotations (to pod template)
if len(cfg.Annotations) > 0 {
opts = append(opts, WithRolloutAnnotations(cfg.Annotations))
}
// Add envFrom references
if cfg.UseConfigMapEnvFrom && cfg.ConfigMapName != "" {
opts = append(opts, WithRolloutConfigMapEnvFrom(cfg.ConfigMapName))
}
if cfg.UseSecretEnvFrom && cfg.SecretName != "" {
opts = append(opts, WithRolloutSecretEnvFrom(cfg.SecretName))
}
// Add volume mounts
if cfg.UseConfigMapVolume && cfg.ConfigMapName != "" {
opts = append(opts, WithRolloutConfigMapVolume(cfg.ConfigMapName))
}
if cfg.UseSecretVolume && cfg.SecretName != "" {
opts = append(opts, WithRolloutSecretVolume(cfg.SecretName))
}
// Add projected volume
if cfg.UseProjectedVolume {
opts = append(opts, WithRolloutProjectedVolume(cfg.ConfigMapName, cfg.SecretName))
}
// Add valueFrom references
if cfg.UseConfigMapKeyRef && cfg.ConfigMapName != "" {
key := cfg.ConfigMapKey
if key == "" {
key = "key"
}
envVar := cfg.EnvVarName
if envVar == "" {
envVar = "CONFIG_VAR"
}
opts = append(opts, WithRolloutConfigMapKeyRef(cfg.ConfigMapName, key, envVar))
}
if cfg.UseSecretKeyRef && cfg.SecretName != "" {
key := cfg.SecretKey
if key == "" {
key = "key"
}
envVar := cfg.EnvVarName
if envVar == "" {
envVar = "SECRET_VAR"
}
opts = append(opts, WithRolloutSecretKeyRef(cfg.SecretName, key, envVar))
}
// Add init container with envFrom
if cfg.UseInitContainer {
opts = append(opts, WithRolloutInitContainer(cfg.ConfigMapName, cfg.SecretName))
}
// Add init container with volume mount
if cfg.UseInitContainerVolume {
opts = append(opts, WithRolloutInitContainerVolume(cfg.ConfigMapName, cfg.SecretName))
}
return opts
}
// WithRolloutProjectedVolume adds a projected volume with ConfigMap and/or Secret sources to a Rollout.
func WithRolloutProjectedVolume(cmName, secretName string) RolloutOption {
return func(rollout *unstructured.Unstructured) {
volumeName := "projected-config"
sources := []interface{}{}
if cmName != "" {
sources = append(sources, map[string]interface{}{
"configMap": map[string]interface{}{
"name": cmName,
},
})
}
if secretName != "" {
sources = append(sources, map[string]interface{}{
"secret": map[string]interface{}{
"name": secretName,
},
})
}
// Add volume
volumes, _, _ := unstructured.NestedSlice(rollout.Object, "spec", "template", "spec", "volumes")
volumes = append(volumes, map[string]interface{}{
"name": volumeName,
"projected": map[string]interface{}{
"sources": sources,
},
})
_ = unstructured.SetNestedSlice(rollout.Object, volumes, "spec", "template", "spec", "volumes")
// Add volumeMount
containers, _, _ := unstructured.NestedSlice(rollout.Object, "spec", "template", "spec", "containers")
if len(containers) > 0 {
container := containers[0].(map[string]interface{})
volumeMounts, _, _ := unstructured.NestedSlice(container, "volumeMounts")
volumeMounts = append(volumeMounts, map[string]interface{}{
"name": volumeName,
"mountPath": "/etc/projected",
})
container["volumeMounts"] = volumeMounts
containers[0] = container
_ = unstructured.SetNestedSlice(rollout.Object, containers, "spec", "template", "spec", "containers")
}
}
}
// WithRolloutConfigMapKeyRef adds an env var with valueFrom.configMapKeyRef to a Rollout.
func WithRolloutConfigMapKeyRef(cmName, key, envVarName string) RolloutOption {
return func(rollout *unstructured.Unstructured) {
containers, _, _ := unstructured.NestedSlice(rollout.Object, "spec", "template", "spec", "containers")
if len(containers) > 0 {
container := containers[0].(map[string]interface{})
env, _, _ := unstructured.NestedSlice(container, "env")
env = append(env, map[string]interface{}{
"name": envVarName,
"valueFrom": map[string]interface{}{
"configMapKeyRef": map[string]interface{}{
"name": cmName,
"key": key,
},
},
})
container["env"] = env
containers[0] = container
_ = unstructured.SetNestedSlice(rollout.Object, containers, "spec", "template", "spec", "containers")
}
}
}
// WithRolloutSecretKeyRef adds an env var with valueFrom.secretKeyRef to a Rollout.
func WithRolloutSecretKeyRef(secretName, key, envVarName string) RolloutOption {
return func(rollout *unstructured.Unstructured) {
containers, _, _ := unstructured.NestedSlice(rollout.Object, "spec", "template", "spec", "containers")
if len(containers) > 0 {
container := containers[0].(map[string]interface{})
env, _, _ := unstructured.NestedSlice(container, "env")
env = append(env, map[string]interface{}{
"name": envVarName,
"valueFrom": map[string]interface{}{
"secretKeyRef": map[string]interface{}{
"name": secretName,
"key": key,
},
},
})
container["env"] = env
containers[0] = container
_ = unstructured.SetNestedSlice(rollout.Object, containers, "spec", "template", "spec", "containers")
}
}
}
// WithRolloutInitContainer adds an init container that references ConfigMap and/or Secret.
func WithRolloutInitContainer(cmName, secretName string) RolloutOption {
return func(rollout *unstructured.Unstructured) {
initContainer := map[string]interface{}{
"name": "init",
"image": DefaultImage,
"command": []interface{}{"sh", "-c", "echo init done"},
}
envFrom := []interface{}{}
if cmName != "" {
envFrom = append(envFrom, map[string]interface{}{
"configMapRef": map[string]interface{}{
"name": cmName,
},
})
}
if secretName != "" {
envFrom = append(envFrom, map[string]interface{}{
"secretRef": map[string]interface{}{
"name": secretName,
},
})
}
if len(envFrom) > 0 {
initContainer["envFrom"] = envFrom
}
initContainers, _, _ := unstructured.NestedSlice(rollout.Object, "spec", "template", "spec", "initContainers")
initContainers = append(initContainers, initContainer)
_ = unstructured.SetNestedSlice(rollout.Object, initContainers, "spec", "template", "spec", "initContainers")
}
}
// WithRolloutInitContainerVolume adds an init container with ConfigMap/Secret volume mounts.
func WithRolloutInitContainerVolume(cmName, secretName string) RolloutOption {
return func(rollout *unstructured.Unstructured) {
initContainer := map[string]interface{}{
"name": "init",
"image": DefaultImage,
"command": []interface{}{"sh", "-c", "echo init done"},
}
volumeMounts := []interface{}{}
volumes, _, _ := unstructured.NestedSlice(rollout.Object, "spec", "template", "spec", "volumes")
if cmName != "" {
volumeName := fmt.Sprintf("init-cm-%s", cmName)
volumes = append(volumes, map[string]interface{}{
"name": volumeName,
"configMap": map[string]interface{}{
"name": cmName,
},
})
volumeMounts = append(volumeMounts, map[string]interface{}{
"name": volumeName,
"mountPath": fmt.Sprintf("/etc/init-config/%s", cmName),
})
}
if secretName != "" {
volumeName := fmt.Sprintf("init-secret-%s", secretName)
volumes = append(volumes, map[string]interface{}{
"name": volumeName,
"secret": map[string]interface{}{
"secretName": secretName,
},
})
volumeMounts = append(volumeMounts, map[string]interface{}{
"name": volumeName,
"mountPath": fmt.Sprintf("/etc/init-secrets/%s", secretName),
})
}
if len(volumeMounts) > 0 {
initContainer["volumeMounts"] = volumeMounts
}
_ = unstructured.SetNestedSlice(rollout.Object, volumes, "spec", "template", "spec", "volumes")
initContainers, _, _ := unstructured.NestedSlice(rollout.Object, "spec", "template", "spec", "initContainers")
initContainers = append(initContainers, initContainer)
_ = unstructured.SetNestedSlice(rollout.Object, initContainers, "spec", "template", "spec", "initContainers")
}
}
// WaitForRolloutEnvVar waits for an Argo Rollout's container to have an env var with the given prefix.
func WaitForRolloutEnvVar(ctx context.Context, dynamicClient dynamic.Interface, namespace, name, prefix string, timeout time.Duration) (bool, error) {
var found bool
err := wait.PollUntilContextTimeout(ctx, DefaultInterval, timeout, true, func(ctx context.Context) (bool, error) {
rollout, err := dynamicClient.Resource(ArgoRolloutGVR).Namespace(namespace).Get(ctx, name, metav1.GetOptions{})
if err != nil {
return false, nil
}
containers, _, _ := unstructured.NestedSlice(rollout.Object, "spec", "template", "spec", "containers")
for _, c := range containers {
container := c.(map[string]interface{})
env, _, _ := unstructured.NestedSlice(container, "env")
for _, e := range env {
envVar := e.(map[string]interface{})
if name, ok := envVar["name"].(string); ok && strings.HasPrefix(name, prefix) {
found = true
return true, nil
}
}
}
return false, nil
})
if err != nil && err != context.DeadlineExceeded {
return false, err
}
return found, nil
}

View File

@@ -0,0 +1,223 @@
package utils
import (
"context"
"time"
batchv1 "k8s.io/api/batch/v1"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/kubernetes"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// CronJobAdapter implements WorkloadAdapter for Kubernetes CronJobs.
type CronJobAdapter struct {
client kubernetes.Interface
}
// NewCronJobAdapter creates a new CronJobAdapter.
func NewCronJobAdapter(client kubernetes.Interface) *CronJobAdapter {
return &CronJobAdapter{client: client}
}
// Type returns the workload type.
func (a *CronJobAdapter) Type() WorkloadType {
return WorkloadCronJob
}
// Create creates a CronJob with the given config.
func (a *CronJobAdapter) Create(ctx context.Context, namespace, name string, cfg WorkloadConfig) error {
opts := buildCronJobOptions(cfg)
_, err := CreateCronJob(ctx, a.client, namespace, name, opts...)
return err
}
// Delete removes the CronJob.
func (a *CronJobAdapter) Delete(ctx context.Context, namespace, name string) error {
return DeleteCronJob(ctx, a.client, namespace, name)
}
// WaitReady waits for the CronJob to exist (CronJobs are "ready" immediately after creation).
func (a *CronJobAdapter) WaitReady(ctx context.Context, namespace, name string, timeout time.Duration) error {
return WaitForCronJobExists(ctx, a.client, namespace, name, timeout)
}
// WaitReloaded waits for the CronJob to have the reload annotation OR for a triggered Job.
// For CronJobs, Reloader can either:
// 1. Add an annotation to the pod template
// 2. Trigger a new Job (which is the special handling case)
func (a *CronJobAdapter) WaitReloaded(ctx context.Context, namespace, name, annotationKey string, timeout time.Duration) (bool, error) {
return WaitForCronJobReloaded(ctx, a.client, namespace, name, annotationKey, timeout)
}
// WaitEnvVar is not supported for CronJobs as they don't use env var reload strategy.
func (a *CronJobAdapter) WaitEnvVar(ctx context.Context, namespace, name, prefix string, timeout time.Duration) (bool, error) {
// CronJobs don't support env var strategy
return false, nil
}
// SupportsEnvVarStrategy returns false as CronJobs don't support env var reload strategy.
func (a *CronJobAdapter) SupportsEnvVarStrategy() bool {
return false
}
// RequiresSpecialHandling returns true as CronJobs use job triggering instead of rolling restart.
func (a *CronJobAdapter) RequiresSpecialHandling() bool {
return true
}
// WaitForTriggeredJob waits for Reloader to trigger a new Job from this CronJob.
func (a *CronJobAdapter) WaitForTriggeredJob(ctx context.Context, namespace, cronJobName string, timeout time.Duration) (bool, error) {
return WaitForCronJobTriggeredJob(ctx, a.client, namespace, cronJobName, timeout)
}
// buildCronJobOptions converts WorkloadConfig to CronJobOption slice.
func buildCronJobOptions(cfg WorkloadConfig) []CronJobOption {
var opts []CronJobOption
// Add annotations
if len(cfg.Annotations) > 0 {
opts = append(opts, WithCronJobAnnotations(cfg.Annotations))
}
// Add envFrom references
if cfg.UseConfigMapEnvFrom && cfg.ConfigMapName != "" {
opts = append(opts, WithCronJobConfigMapEnvFrom(cfg.ConfigMapName))
}
if cfg.UseSecretEnvFrom && cfg.SecretName != "" {
opts = append(opts, WithCronJobSecretEnvFrom(cfg.SecretName))
}
// Add volume mounts
if cfg.UseConfigMapVolume && cfg.ConfigMapName != "" {
opts = append(opts, WithCronJobConfigMapVolume(cfg.ConfigMapName))
}
if cfg.UseSecretVolume && cfg.SecretName != "" {
opts = append(opts, WithCronJobSecretVolume(cfg.SecretName))
}
// Add projected volume
if cfg.UseProjectedVolume {
opts = append(opts, WithCronJobProjectedVolume(cfg.ConfigMapName, cfg.SecretName))
}
return opts
}
// WithCronJobConfigMapVolume adds a volume mount for a ConfigMap to a CronJob.
func WithCronJobConfigMapVolume(name string) CronJobOption {
return func(cj *batchv1.CronJob) {
volumeName := "cm-" + name
cj.Spec.JobTemplate.Spec.Template.Spec.Volumes = append(
cj.Spec.JobTemplate.Spec.Template.Spec.Volumes,
corev1.Volume{
Name: volumeName,
VolumeSource: corev1.VolumeSource{
ConfigMap: &corev1.ConfigMapVolumeSource{
LocalObjectReference: corev1.LocalObjectReference{Name: name},
},
},
},
)
cj.Spec.JobTemplate.Spec.Template.Spec.Containers[0].VolumeMounts = append(
cj.Spec.JobTemplate.Spec.Template.Spec.Containers[0].VolumeMounts,
corev1.VolumeMount{
Name: volumeName,
MountPath: "/etc/config/" + name,
},
)
}
}
// WithCronJobSecretVolume adds a volume mount for a Secret to a CronJob.
func WithCronJobSecretVolume(name string) CronJobOption {
return func(cj *batchv1.CronJob) {
volumeName := "secret-" + name
cj.Spec.JobTemplate.Spec.Template.Spec.Volumes = append(
cj.Spec.JobTemplate.Spec.Template.Spec.Volumes,
corev1.Volume{
Name: volumeName,
VolumeSource: corev1.VolumeSource{
Secret: &corev1.SecretVolumeSource{
SecretName: name,
},
},
},
)
cj.Spec.JobTemplate.Spec.Template.Spec.Containers[0].VolumeMounts = append(
cj.Spec.JobTemplate.Spec.Template.Spec.Containers[0].VolumeMounts,
corev1.VolumeMount{
Name: volumeName,
MountPath: "/etc/secrets/" + name,
},
)
}
}
// WithCronJobProjectedVolume adds a projected volume with ConfigMap and/or Secret sources to a CronJob.
func WithCronJobProjectedVolume(cmName, secretName string) CronJobOption {
return func(cj *batchv1.CronJob) {
volumeName := "projected-config"
sources := []corev1.VolumeProjection{}
if cmName != "" {
sources = append(sources, corev1.VolumeProjection{
ConfigMap: &corev1.ConfigMapProjection{
LocalObjectReference: corev1.LocalObjectReference{Name: cmName},
},
})
}
if secretName != "" {
sources = append(sources, corev1.VolumeProjection{
Secret: &corev1.SecretProjection{
LocalObjectReference: corev1.LocalObjectReference{Name: secretName},
},
})
}
cj.Spec.JobTemplate.Spec.Template.Spec.Volumes = append(
cj.Spec.JobTemplate.Spec.Template.Spec.Volumes,
corev1.Volume{
Name: volumeName,
VolumeSource: corev1.VolumeSource{
Projected: &corev1.ProjectedVolumeSource{
Sources: sources,
},
},
},
)
cj.Spec.JobTemplate.Spec.Template.Spec.Containers[0].VolumeMounts = append(
cj.Spec.JobTemplate.Spec.Template.Spec.Containers[0].VolumeMounts,
corev1.VolumeMount{
Name: volumeName,
MountPath: "/etc/projected",
},
)
}
}
// WaitForCronJobEnvVar waits for a CronJob's containers to have an environment variable
// with the given prefix. Note: CronJobs don't typically use this strategy.
func WaitForCronJobEnvVar(ctx context.Context, client kubernetes.Interface, namespace, name, prefix string, timeout time.Duration) (bool, error) {
var found bool
err := wait.PollUntilContextTimeout(ctx, DefaultInterval, timeout, true, func(ctx context.Context) (bool, error) {
cj, err := client.BatchV1().CronJobs(namespace).Get(ctx, name, metav1.GetOptions{})
if err != nil {
return false, nil
}
if hasEnvVarWithPrefix(cj.Spec.JobTemplate.Spec.Template.Spec.Containers, prefix) {
found = true
return true, nil
}
return false, nil
})
if err != nil && err != context.DeadlineExceeded {
return false, err
}
return found, nil
}

View File

@@ -0,0 +1,246 @@
package utils
import (
"context"
"fmt"
"time"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
"k8s.io/client-go/kubernetes"
)
// DaemonSetAdapter implements WorkloadAdapter for Kubernetes DaemonSets.
type DaemonSetAdapter struct {
client kubernetes.Interface
}
// NewDaemonSetAdapter creates a new DaemonSetAdapter.
func NewDaemonSetAdapter(client kubernetes.Interface) *DaemonSetAdapter {
return &DaemonSetAdapter{client: client}
}
// Type returns the workload type.
func (a *DaemonSetAdapter) Type() WorkloadType {
return WorkloadDaemonSet
}
// Create creates a DaemonSet with the given config.
func (a *DaemonSetAdapter) Create(ctx context.Context, namespace, name string, cfg WorkloadConfig) error {
opts := buildDaemonSetOptions(cfg)
_, err := CreateDaemonSet(ctx, a.client, namespace, name, opts...)
return err
}
// Delete removes the DaemonSet.
func (a *DaemonSetAdapter) Delete(ctx context.Context, namespace, name string) error {
return DeleteDaemonSet(ctx, a.client, namespace, name)
}
// WaitReady waits for the DaemonSet to be ready.
func (a *DaemonSetAdapter) WaitReady(ctx context.Context, namespace, name string, timeout time.Duration) error {
return WaitForDaemonSetReady(ctx, a.client, namespace, name, timeout)
}
// WaitReloaded waits for the DaemonSet to have the reload annotation.
func (a *DaemonSetAdapter) WaitReloaded(ctx context.Context, namespace, name, annotationKey string, timeout time.Duration) (bool, error) {
return WaitForDaemonSetReloaded(ctx, a.client, namespace, name, annotationKey, timeout)
}
// WaitEnvVar waits for the DaemonSet to have a STAKATER_ env var.
func (a *DaemonSetAdapter) WaitEnvVar(ctx context.Context, namespace, name, prefix string, timeout time.Duration) (bool, error) {
return WaitForDaemonSetEnvVar(ctx, a.client, namespace, name, prefix, timeout)
}
// SupportsEnvVarStrategy returns true as DaemonSets support env var reload strategy.
func (a *DaemonSetAdapter) SupportsEnvVarStrategy() bool {
return true
}
// RequiresSpecialHandling returns false as DaemonSets use standard rolling restart.
func (a *DaemonSetAdapter) RequiresSpecialHandling() bool {
return false
}
// buildDaemonSetOptions converts WorkloadConfig to DaemonSetOption slice.
func buildDaemonSetOptions(cfg WorkloadConfig) []DaemonSetOption {
var opts []DaemonSetOption
// Add annotations
if len(cfg.Annotations) > 0 {
opts = append(opts, WithDaemonSetAnnotations(cfg.Annotations))
}
// Add envFrom references
if cfg.UseConfigMapEnvFrom && cfg.ConfigMapName != "" {
opts = append(opts, WithDaemonSetConfigMapEnvFrom(cfg.ConfigMapName))
}
if cfg.UseSecretEnvFrom && cfg.SecretName != "" {
opts = append(opts, WithDaemonSetSecretEnvFrom(cfg.SecretName))
}
// Add volume mounts
if cfg.UseConfigMapVolume && cfg.ConfigMapName != "" {
opts = append(opts, WithDaemonSetConfigMapVolume(cfg.ConfigMapName))
}
if cfg.UseSecretVolume && cfg.SecretName != "" {
opts = append(opts, WithDaemonSetSecretVolume(cfg.SecretName))
}
// Add projected volume
if cfg.UseProjectedVolume {
opts = append(opts, WithDaemonSetProjectedVolume(cfg.ConfigMapName, cfg.SecretName))
}
// Add valueFrom references
if cfg.UseConfigMapKeyRef && cfg.ConfigMapName != "" {
key := cfg.ConfigMapKey
if key == "" {
key = "key"
}
envVar := cfg.EnvVarName
if envVar == "" {
envVar = "CONFIG_VAR"
}
opts = append(opts, WithDaemonSetConfigMapKeyRef(cfg.ConfigMapName, key, envVar))
}
if cfg.UseSecretKeyRef && cfg.SecretName != "" {
key := cfg.SecretKey
if key == "" {
key = "key"
}
envVar := cfg.EnvVarName
if envVar == "" {
envVar = "SECRET_VAR"
}
opts = append(opts, WithDaemonSetSecretKeyRef(cfg.SecretName, key, envVar))
}
// Add init container with envFrom
if cfg.UseInitContainer {
opts = append(opts, WithDaemonSetInitContainer(cfg.ConfigMapName, cfg.SecretName))
}
// Add init container with volume mount
if cfg.UseInitContainerVolume {
opts = append(opts, WithDaemonSetInitContainerVolume(cfg.ConfigMapName, cfg.SecretName))
}
return opts
}
// WithDaemonSetConfigMapVolume adds a volume mount for a ConfigMap to a DaemonSet.
func WithDaemonSetConfigMapVolume(name string) DaemonSetOption {
return func(ds *appsv1.DaemonSet) {
volumeName := fmt.Sprintf("cm-%s", name)
ds.Spec.Template.Spec.Volumes = append(ds.Spec.Template.Spec.Volumes, corev1.Volume{
Name: volumeName,
VolumeSource: corev1.VolumeSource{
ConfigMap: &corev1.ConfigMapVolumeSource{
LocalObjectReference: corev1.LocalObjectReference{Name: name},
},
},
})
ds.Spec.Template.Spec.Containers[0].VolumeMounts = append(
ds.Spec.Template.Spec.Containers[0].VolumeMounts,
corev1.VolumeMount{
Name: volumeName,
MountPath: fmt.Sprintf("/etc/config/%s", name),
},
)
}
}
// WithDaemonSetSecretVolume adds a volume mount for a Secret to a DaemonSet.
func WithDaemonSetSecretVolume(name string) DaemonSetOption {
return func(ds *appsv1.DaemonSet) {
volumeName := fmt.Sprintf("secret-%s", name)
ds.Spec.Template.Spec.Volumes = append(ds.Spec.Template.Spec.Volumes, corev1.Volume{
Name: volumeName,
VolumeSource: corev1.VolumeSource{
Secret: &corev1.SecretVolumeSource{
SecretName: name,
},
},
})
ds.Spec.Template.Spec.Containers[0].VolumeMounts = append(
ds.Spec.Template.Spec.Containers[0].VolumeMounts,
corev1.VolumeMount{
Name: volumeName,
MountPath: fmt.Sprintf("/etc/secrets/%s", name),
},
)
}
}
// WithDaemonSetInitContainer adds an init container that references ConfigMap and/or Secret.
func WithDaemonSetInitContainer(cmName, secretName string) DaemonSetOption {
return func(ds *appsv1.DaemonSet) {
initContainer := corev1.Container{
Name: "init",
Image: DefaultImage,
Command: []string{"sh", "-c", "echo init done"},
}
if cmName != "" {
initContainer.EnvFrom = append(initContainer.EnvFrom, corev1.EnvFromSource{
ConfigMapRef: &corev1.ConfigMapEnvSource{
LocalObjectReference: corev1.LocalObjectReference{Name: cmName},
},
})
}
if secretName != "" {
initContainer.EnvFrom = append(initContainer.EnvFrom, corev1.EnvFromSource{
SecretRef: &corev1.SecretEnvSource{
LocalObjectReference: corev1.LocalObjectReference{Name: secretName},
},
})
}
ds.Spec.Template.Spec.InitContainers = append(ds.Spec.Template.Spec.InitContainers, initContainer)
}
}
// WithDaemonSetInitContainerVolume adds an init container with ConfigMap/Secret volume mounts.
func WithDaemonSetInitContainerVolume(cmName, secretName string) DaemonSetOption {
return func(ds *appsv1.DaemonSet) {
initContainer := corev1.Container{
Name: "init",
Image: DefaultImage,
Command: []string{"sh", "-c", "echo init done"},
}
if cmName != "" {
volumeName := fmt.Sprintf("init-cm-%s", cmName)
ds.Spec.Template.Spec.Volumes = append(ds.Spec.Template.Spec.Volumes, corev1.Volume{
Name: volumeName,
VolumeSource: corev1.VolumeSource{
ConfigMap: &corev1.ConfigMapVolumeSource{
LocalObjectReference: corev1.LocalObjectReference{Name: cmName},
},
},
})
initContainer.VolumeMounts = append(initContainer.VolumeMounts, corev1.VolumeMount{
Name: volumeName,
MountPath: fmt.Sprintf("/etc/init-config/%s", cmName),
})
}
if secretName != "" {
volumeName := fmt.Sprintf("init-secret-%s", secretName)
ds.Spec.Template.Spec.Volumes = append(ds.Spec.Template.Spec.Volumes, corev1.Volume{
Name: volumeName,
VolumeSource: corev1.VolumeSource{
Secret: &corev1.SecretVolumeSource{
SecretName: secretName,
},
},
})
initContainer.VolumeMounts = append(initContainer.VolumeMounts, corev1.VolumeMount{
Name: volumeName,
MountPath: fmt.Sprintf("/etc/init-secrets/%s", secretName),
})
}
ds.Spec.Template.Spec.InitContainers = append(ds.Spec.Template.Spec.InitContainers, initContainer)
}
}

View File

@@ -0,0 +1,132 @@
package utils
import (
"context"
"time"
"k8s.io/client-go/kubernetes"
)
// DeploymentAdapter implements WorkloadAdapter for Kubernetes Deployments.
type DeploymentAdapter struct {
client kubernetes.Interface
}
// NewDeploymentAdapter creates a new DeploymentAdapter.
func NewDeploymentAdapter(client kubernetes.Interface) *DeploymentAdapter {
return &DeploymentAdapter{client: client}
}
// Type returns the workload type.
func (a *DeploymentAdapter) Type() WorkloadType {
return WorkloadDeployment
}
// Create creates a Deployment with the given config.
func (a *DeploymentAdapter) Create(ctx context.Context, namespace, name string, cfg WorkloadConfig) error {
opts := buildDeploymentOptions(cfg)
_, err := CreateDeployment(ctx, a.client, namespace, name, opts...)
return err
}
// Delete removes the Deployment.
func (a *DeploymentAdapter) Delete(ctx context.Context, namespace, name string) error {
return DeleteDeployment(ctx, a.client, namespace, name)
}
// WaitReady waits for the Deployment to be ready.
func (a *DeploymentAdapter) WaitReady(ctx context.Context, namespace, name string, timeout time.Duration) error {
return WaitForDeploymentReady(ctx, a.client, namespace, name, timeout)
}
// WaitReloaded waits for the Deployment to have the reload annotation.
func (a *DeploymentAdapter) WaitReloaded(ctx context.Context, namespace, name, annotationKey string, timeout time.Duration) (bool, error) {
return WaitForDeploymentReloaded(ctx, a.client, namespace, name, annotationKey, timeout)
}
// WaitEnvVar waits for the Deployment to have a STAKATER_ env var.
func (a *DeploymentAdapter) WaitEnvVar(ctx context.Context, namespace, name, prefix string, timeout time.Duration) (bool, error) {
return WaitForDeploymentEnvVar(ctx, a.client, namespace, name, prefix, timeout)
}
// SupportsEnvVarStrategy returns true as Deployments support env var reload strategy.
func (a *DeploymentAdapter) SupportsEnvVarStrategy() bool {
return true
}
// RequiresSpecialHandling returns false as Deployments use standard rolling restart.
func (a *DeploymentAdapter) RequiresSpecialHandling() bool {
return false
}
// buildDeploymentOptions converts WorkloadConfig to DeploymentOption slice.
func buildDeploymentOptions(cfg WorkloadConfig) []DeploymentOption {
var opts []DeploymentOption
// Add annotations
if len(cfg.Annotations) > 0 {
opts = append(opts, WithAnnotations(cfg.Annotations))
}
// Add envFrom references
if cfg.UseConfigMapEnvFrom && cfg.ConfigMapName != "" {
opts = append(opts, WithConfigMapEnvFrom(cfg.ConfigMapName))
}
if cfg.UseSecretEnvFrom && cfg.SecretName != "" {
opts = append(opts, WithSecretEnvFrom(cfg.SecretName))
}
// Add volume mounts
if cfg.UseConfigMapVolume && cfg.ConfigMapName != "" {
opts = append(opts, WithConfigMapVolume(cfg.ConfigMapName))
}
if cfg.UseSecretVolume && cfg.SecretName != "" {
opts = append(opts, WithSecretVolume(cfg.SecretName))
}
// Add projected volume
if cfg.UseProjectedVolume {
opts = append(opts, WithProjectedVolume(cfg.ConfigMapName, cfg.SecretName))
}
// Add valueFrom references
if cfg.UseConfigMapKeyRef && cfg.ConfigMapName != "" {
key := cfg.ConfigMapKey
if key == "" {
key = "key"
}
envVar := cfg.EnvVarName
if envVar == "" {
envVar = "CONFIG_VAR"
}
opts = append(opts, WithConfigMapKeyRef(cfg.ConfigMapName, key, envVar))
}
if cfg.UseSecretKeyRef && cfg.SecretName != "" {
key := cfg.SecretKey
if key == "" {
key = "key"
}
envVar := cfg.EnvVarName
if envVar == "" {
envVar = "SECRET_VAR"
}
opts = append(opts, WithSecretKeyRef(cfg.SecretName, key, envVar))
}
// Add init container with envFrom
if cfg.UseInitContainer {
opts = append(opts, WithInitContainer(cfg.ConfigMapName, cfg.SecretName))
}
// Add init container with volume mount
if cfg.UseInitContainerVolume {
opts = append(opts, WithInitContainerVolume(cfg.ConfigMapName, cfg.SecretName))
}
// Add multiple containers
if cfg.MultipleContainers > 1 {
opts = append(opts, WithMultipleContainers(cfg.MultipleContainers))
}
return opts
}

View File

@@ -0,0 +1,207 @@
package utils
import (
"context"
"time"
batchv1 "k8s.io/api/batch/v1"
corev1 "k8s.io/api/core/v1"
"k8s.io/client-go/kubernetes"
)
// JobAdapter implements WorkloadAdapter for Kubernetes Jobs.
// Note: Jobs are handled specially by Reloader - they are recreated rather than updated.
type JobAdapter struct {
client kubernetes.Interface
}
// NewJobAdapter creates a new JobAdapter.
func NewJobAdapter(client kubernetes.Interface) *JobAdapter {
return &JobAdapter{client: client}
}
// Type returns the workload type.
func (a *JobAdapter) Type() WorkloadType {
return WorkloadJob
}
// Create creates a Job with the given config.
func (a *JobAdapter) Create(ctx context.Context, namespace, name string, cfg WorkloadConfig) error {
opts := buildJobOptions(cfg)
_, err := CreateJob(ctx, a.client, namespace, name, opts...)
return err
}
// Delete removes the Job.
func (a *JobAdapter) Delete(ctx context.Context, namespace, name string) error {
return DeleteJob(ctx, a.client, namespace, name)
}
// WaitReady waits for the Job to exist.
func (a *JobAdapter) WaitReady(ctx context.Context, namespace, name string, timeout time.Duration) error {
return WaitForJobExists(ctx, a.client, namespace, name, timeout)
}
// WaitReloaded waits for the Job to be recreated (new UID).
// For Jobs, Reloader recreates the Job rather than updating annotations.
func (a *JobAdapter) WaitReloaded(ctx context.Context, namespace, name, annotationKey string, timeout time.Duration) (bool, error) {
// For Jobs, we check if it was recreated by looking for a new UID
// This requires storing the original UID before the test
// For simplicity, we use the same pattern as other workloads
// The test should verify recreation using WaitForJobRecreated instead
return false, nil
}
// WaitEnvVar is not supported for Jobs as they don't use env var reload strategy.
func (a *JobAdapter) WaitEnvVar(ctx context.Context, namespace, name, prefix string, timeout time.Duration) (bool, error) {
return false, nil
}
// SupportsEnvVarStrategy returns false as Jobs don't support env var reload strategy.
func (a *JobAdapter) SupportsEnvVarStrategy() bool {
return false
}
// RequiresSpecialHandling returns true as Jobs are recreated by Reloader.
func (a *JobAdapter) RequiresSpecialHandling() bool {
return true
}
// GetOriginalUID retrieves the current UID of the Job for recreation verification.
func (a *JobAdapter) GetOriginalUID(ctx context.Context, namespace, name string) (string, error) {
job, err := GetJob(ctx, a.client, namespace, name)
if err != nil {
return "", err
}
return string(job.UID), nil
}
// WaitForRecreation waits for the Job to be recreated with a new UID.
func (a *JobAdapter) WaitForRecreation(ctx context.Context, namespace, name, originalUID string, timeout time.Duration) (string, bool, error) {
return WaitForJobRecreated(ctx, a.client, namespace, name, originalUID, timeout)
}
// buildJobOptions converts WorkloadConfig to JobOption slice.
func buildJobOptions(cfg WorkloadConfig) []JobOption {
var opts []JobOption
// Add annotations
if len(cfg.Annotations) > 0 {
opts = append(opts, WithJobAnnotations(cfg.Annotations))
}
// Add envFrom references
if cfg.UseConfigMapEnvFrom && cfg.ConfigMapName != "" {
opts = append(opts, WithJobConfigMapEnvFrom(cfg.ConfigMapName))
}
if cfg.UseSecretEnvFrom && cfg.SecretName != "" {
opts = append(opts, WithJobSecretEnvFrom(cfg.SecretName))
}
// Add volume mounts
if cfg.UseConfigMapVolume && cfg.ConfigMapName != "" {
opts = append(opts, WithJobConfigMapVolume(cfg.ConfigMapName))
}
if cfg.UseSecretVolume && cfg.SecretName != "" {
opts = append(opts, WithJobSecretVolume(cfg.SecretName))
}
// Add projected volume
if cfg.UseProjectedVolume {
opts = append(opts, WithJobProjectedVolume(cfg.ConfigMapName, cfg.SecretName))
}
return opts
}
// WithJobConfigMapVolume adds a volume mount for a ConfigMap to a Job.
func WithJobConfigMapVolume(name string) JobOption {
return func(j *batchv1.Job) {
volumeName := "cm-" + name
j.Spec.Template.Spec.Volumes = append(
j.Spec.Template.Spec.Volumes,
corev1.Volume{
Name: volumeName,
VolumeSource: corev1.VolumeSource{
ConfigMap: &corev1.ConfigMapVolumeSource{
LocalObjectReference: corev1.LocalObjectReference{Name: name},
},
},
},
)
j.Spec.Template.Spec.Containers[0].VolumeMounts = append(
j.Spec.Template.Spec.Containers[0].VolumeMounts,
corev1.VolumeMount{
Name: volumeName,
MountPath: "/etc/config/" + name,
},
)
}
}
// WithJobSecretVolume adds a volume mount for a Secret to a Job.
func WithJobSecretVolume(name string) JobOption {
return func(j *batchv1.Job) {
volumeName := "secret-" + name
j.Spec.Template.Spec.Volumes = append(
j.Spec.Template.Spec.Volumes,
corev1.Volume{
Name: volumeName,
VolumeSource: corev1.VolumeSource{
Secret: &corev1.SecretVolumeSource{
SecretName: name,
},
},
},
)
j.Spec.Template.Spec.Containers[0].VolumeMounts = append(
j.Spec.Template.Spec.Containers[0].VolumeMounts,
corev1.VolumeMount{
Name: volumeName,
MountPath: "/etc/secrets/" + name,
},
)
}
}
// WithJobProjectedVolume adds a projected volume with ConfigMap and/or Secret sources to a Job.
func WithJobProjectedVolume(cmName, secretName string) JobOption {
return func(j *batchv1.Job) {
volumeName := "projected-config"
sources := []corev1.VolumeProjection{}
if cmName != "" {
sources = append(sources, corev1.VolumeProjection{
ConfigMap: &corev1.ConfigMapProjection{
LocalObjectReference: corev1.LocalObjectReference{Name: cmName},
},
})
}
if secretName != "" {
sources = append(sources, corev1.VolumeProjection{
Secret: &corev1.SecretProjection{
LocalObjectReference: corev1.LocalObjectReference{Name: secretName},
},
})
}
j.Spec.Template.Spec.Volumes = append(
j.Spec.Template.Spec.Volumes,
corev1.Volume{
Name: volumeName,
VolumeSource: corev1.VolumeSource{
Projected: &corev1.ProjectedVolumeSource{
Sources: sources,
},
},
},
)
j.Spec.Template.Spec.Containers[0].VolumeMounts = append(
j.Spec.Template.Spec.Containers[0].VolumeMounts,
corev1.VolumeMount{
Name: volumeName,
MountPath: "/etc/projected",
},
)
}
}

View File

@@ -0,0 +1,340 @@
package utils
import (
"context"
"fmt"
"strings"
"time"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/dynamic"
)
// DeploymentConfigAdapter implements WorkloadAdapter for OpenShift DeploymentConfigs.
type DeploymentConfigAdapter struct {
dynamicClient dynamic.Interface
}
// NewDeploymentConfigAdapter creates a new DeploymentConfigAdapter.
func NewDeploymentConfigAdapter(dynamicClient dynamic.Interface) *DeploymentConfigAdapter {
return &DeploymentConfigAdapter{dynamicClient: dynamicClient}
}
// Type returns the workload type.
func (a *DeploymentConfigAdapter) Type() WorkloadType {
return WorkloadDeploymentConfig
}
// Create creates a DeploymentConfig with the given config.
func (a *DeploymentConfigAdapter) Create(ctx context.Context, namespace, name string, cfg WorkloadConfig) error {
opts := buildDCOptions(cfg)
return CreateDeploymentConfig(ctx, a.dynamicClient, namespace, name, opts...)
}
// Delete removes the DeploymentConfig.
func (a *DeploymentConfigAdapter) Delete(ctx context.Context, namespace, name string) error {
return DeleteDeploymentConfig(ctx, a.dynamicClient, namespace, name)
}
// WaitReady waits for the DeploymentConfig to be ready.
func (a *DeploymentConfigAdapter) WaitReady(ctx context.Context, namespace, name string, timeout time.Duration) error {
return WaitForDeploymentConfigReady(ctx, a.dynamicClient, namespace, name, timeout)
}
// WaitReloaded waits for the DeploymentConfig to have the reload annotation.
func (a *DeploymentConfigAdapter) WaitReloaded(ctx context.Context, namespace, name, annotationKey string, timeout time.Duration) (bool, error) {
return WaitForDeploymentConfigReloaded(ctx, a.dynamicClient, namespace, name, annotationKey, timeout)
}
// WaitEnvVar waits for the DeploymentConfig to have a STAKATER_ env var.
func (a *DeploymentConfigAdapter) WaitEnvVar(ctx context.Context, namespace, name, prefix string, timeout time.Duration) (bool, error) {
return WaitForDeploymentConfigEnvVar(ctx, a.dynamicClient, namespace, name, prefix, timeout)
}
// SupportsEnvVarStrategy returns true as DeploymentConfigs support env var reload strategy.
func (a *DeploymentConfigAdapter) SupportsEnvVarStrategy() bool {
return true
}
// RequiresSpecialHandling returns false as DeploymentConfigs use standard rolling restart.
func (a *DeploymentConfigAdapter) RequiresSpecialHandling() bool {
return false
}
// buildDCOptions converts WorkloadConfig to DCOption slice.
func buildDCOptions(cfg WorkloadConfig) []DCOption {
var opts []DCOption
// Add annotations (to pod template)
if len(cfg.Annotations) > 0 {
opts = append(opts, WithDCAnnotations(cfg.Annotations))
}
// Add envFrom references
if cfg.UseConfigMapEnvFrom && cfg.ConfigMapName != "" {
opts = append(opts, WithDCConfigMapEnvFrom(cfg.ConfigMapName))
}
if cfg.UseSecretEnvFrom && cfg.SecretName != "" {
opts = append(opts, WithDCSecretEnvFrom(cfg.SecretName))
}
// Add volume mounts
if cfg.UseConfigMapVolume && cfg.ConfigMapName != "" {
opts = append(opts, WithDCConfigMapVolume(cfg.ConfigMapName))
}
if cfg.UseSecretVolume && cfg.SecretName != "" {
opts = append(opts, WithDCSecretVolume(cfg.SecretName))
}
// Add projected volume
if cfg.UseProjectedVolume {
opts = append(opts, WithDCProjectedVolume(cfg.ConfigMapName, cfg.SecretName))
}
// Add valueFrom references
if cfg.UseConfigMapKeyRef && cfg.ConfigMapName != "" {
key := cfg.ConfigMapKey
if key == "" {
key = "key"
}
envVar := cfg.EnvVarName
if envVar == "" {
envVar = "CONFIG_VAR"
}
opts = append(opts, WithDCConfigMapKeyRef(cfg.ConfigMapName, key, envVar))
}
if cfg.UseSecretKeyRef && cfg.SecretName != "" {
key := cfg.SecretKey
if key == "" {
key = "key"
}
envVar := cfg.EnvVarName
if envVar == "" {
envVar = "SECRET_VAR"
}
opts = append(opts, WithDCSecretKeyRef(cfg.SecretName, key, envVar))
}
// Add init container with envFrom
if cfg.UseInitContainer {
opts = append(opts, WithDCInitContainer(cfg.ConfigMapName, cfg.SecretName))
}
// Add init container with volume mount
if cfg.UseInitContainerVolume {
opts = append(opts, WithDCInitContainerVolume(cfg.ConfigMapName, cfg.SecretName))
}
return opts
}
// WithDCProjectedVolume adds a projected volume with ConfigMap and/or Secret sources to a DeploymentConfig.
func WithDCProjectedVolume(cmName, secretName string) DCOption {
return func(dc *unstructured.Unstructured) {
volumeName := "projected-config"
sources := []interface{}{}
if cmName != "" {
sources = append(sources, map[string]interface{}{
"configMap": map[string]interface{}{
"name": cmName,
},
})
}
if secretName != "" {
sources = append(sources, map[string]interface{}{
"secret": map[string]interface{}{
"name": secretName,
},
})
}
// Add volume
volumes, _, _ := unstructured.NestedSlice(dc.Object, "spec", "template", "spec", "volumes")
volumes = append(volumes, map[string]interface{}{
"name": volumeName,
"projected": map[string]interface{}{
"sources": sources,
},
})
_ = unstructured.SetNestedSlice(dc.Object, volumes, "spec", "template", "spec", "volumes")
// Add volumeMount
containers, _, _ := unstructured.NestedSlice(dc.Object, "spec", "template", "spec", "containers")
if len(containers) > 0 {
container := containers[0].(map[string]interface{})
volumeMounts, _, _ := unstructured.NestedSlice(container, "volumeMounts")
volumeMounts = append(volumeMounts, map[string]interface{}{
"name": volumeName,
"mountPath": "/etc/projected",
})
container["volumeMounts"] = volumeMounts
containers[0] = container
_ = unstructured.SetNestedSlice(dc.Object, containers, "spec", "template", "spec", "containers")
}
}
}
// WithDCConfigMapKeyRef adds an env var with valueFrom.configMapKeyRef to a DeploymentConfig.
func WithDCConfigMapKeyRef(cmName, key, envVarName string) DCOption {
return func(dc *unstructured.Unstructured) {
containers, _, _ := unstructured.NestedSlice(dc.Object, "spec", "template", "spec", "containers")
if len(containers) > 0 {
container := containers[0].(map[string]interface{})
env, _, _ := unstructured.NestedSlice(container, "env")
env = append(env, map[string]interface{}{
"name": envVarName,
"valueFrom": map[string]interface{}{
"configMapKeyRef": map[string]interface{}{
"name": cmName,
"key": key,
},
},
})
container["env"] = env
containers[0] = container
_ = unstructured.SetNestedSlice(dc.Object, containers, "spec", "template", "spec", "containers")
}
}
}
// WithDCSecretKeyRef adds an env var with valueFrom.secretKeyRef to a DeploymentConfig.
func WithDCSecretKeyRef(secretName, key, envVarName string) DCOption {
return func(dc *unstructured.Unstructured) {
containers, _, _ := unstructured.NestedSlice(dc.Object, "spec", "template", "spec", "containers")
if len(containers) > 0 {
container := containers[0].(map[string]interface{})
env, _, _ := unstructured.NestedSlice(container, "env")
env = append(env, map[string]interface{}{
"name": envVarName,
"valueFrom": map[string]interface{}{
"secretKeyRef": map[string]interface{}{
"name": secretName,
"key": key,
},
},
})
container["env"] = env
containers[0] = container
_ = unstructured.SetNestedSlice(dc.Object, containers, "spec", "template", "spec", "containers")
}
}
}
// WithDCInitContainer adds an init container that references ConfigMap and/or Secret via envFrom.
func WithDCInitContainer(cmName, secretName string) DCOption {
return func(dc *unstructured.Unstructured) {
initContainer := map[string]interface{}{
"name": "init",
"image": DefaultImage,
"command": []interface{}{"sh", "-c", "echo init done"},
}
envFrom := []interface{}{}
if cmName != "" {
envFrom = append(envFrom, map[string]interface{}{
"configMapRef": map[string]interface{}{
"name": cmName,
},
})
}
if secretName != "" {
envFrom = append(envFrom, map[string]interface{}{
"secretRef": map[string]interface{}{
"name": secretName,
},
})
}
if len(envFrom) > 0 {
initContainer["envFrom"] = envFrom
}
initContainers, _, _ := unstructured.NestedSlice(dc.Object, "spec", "template", "spec", "initContainers")
initContainers = append(initContainers, initContainer)
_ = unstructured.SetNestedSlice(dc.Object, initContainers, "spec", "template", "spec", "initContainers")
}
}
// WithDCInitContainerVolume adds an init container with ConfigMap/Secret volume mounts to a DeploymentConfig.
func WithDCInitContainerVolume(cmName, secretName string) DCOption {
return func(dc *unstructured.Unstructured) {
initContainer := map[string]interface{}{
"name": "init",
"image": DefaultImage,
"command": []interface{}{"sh", "-c", "echo init done"},
}
volumeMounts := []interface{}{}
volumes, _, _ := unstructured.NestedSlice(dc.Object, "spec", "template", "spec", "volumes")
if cmName != "" {
volumeName := fmt.Sprintf("init-cm-%s", cmName)
volumes = append(volumes, map[string]interface{}{
"name": volumeName,
"configMap": map[string]interface{}{
"name": cmName,
},
})
volumeMounts = append(volumeMounts, map[string]interface{}{
"name": volumeName,
"mountPath": fmt.Sprintf("/etc/init-config/%s", cmName),
})
}
if secretName != "" {
volumeName := fmt.Sprintf("init-secret-%s", secretName)
volumes = append(volumes, map[string]interface{}{
"name": volumeName,
"secret": map[string]interface{}{
"secretName": secretName,
},
})
volumeMounts = append(volumeMounts, map[string]interface{}{
"name": volumeName,
"mountPath": fmt.Sprintf("/etc/init-secrets/%s", secretName),
})
}
if len(volumeMounts) > 0 {
initContainer["volumeMounts"] = volumeMounts
}
_ = unstructured.SetNestedSlice(dc.Object, volumes, "spec", "template", "spec", "volumes")
initContainers, _, _ := unstructured.NestedSlice(dc.Object, "spec", "template", "spec", "initContainers")
initContainers = append(initContainers, initContainer)
_ = unstructured.SetNestedSlice(dc.Object, initContainers, "spec", "template", "spec", "initContainers")
}
}
// WaitForDeploymentConfigEnvVar waits for a DeploymentConfig's container to have an env var with the given prefix.
func WaitForDeploymentConfigEnvVar(ctx context.Context, dynamicClient dynamic.Interface, namespace, name, prefix string, timeout time.Duration) (bool, error) {
var found bool
err := wait.PollUntilContextTimeout(ctx, DefaultInterval, timeout, true, func(ctx context.Context) (bool, error) {
dc, err := dynamicClient.Resource(DeploymentConfigGVR).Namespace(namespace).Get(ctx, name, metav1.GetOptions{})
if err != nil {
return false, nil
}
containers, _, _ := unstructured.NestedSlice(dc.Object, "spec", "template", "spec", "containers")
for _, c := range containers {
container := c.(map[string]interface{})
env, _, _ := unstructured.NestedSlice(container, "env")
for _, e := range env {
envVar := e.(map[string]interface{})
if envName, ok := envVar["name"].(string); ok && strings.HasPrefix(envName, prefix) {
found = true
return true, nil
}
}
}
return false, nil
})
if err != nil && err != context.DeadlineExceeded {
return false, err
}
return found, nil
}

View File

@@ -0,0 +1,246 @@
package utils
import (
"context"
"fmt"
"time"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
"k8s.io/client-go/kubernetes"
)
// StatefulSetAdapter implements WorkloadAdapter for Kubernetes StatefulSets.
type StatefulSetAdapter struct {
client kubernetes.Interface
}
// NewStatefulSetAdapter creates a new StatefulSetAdapter.
func NewStatefulSetAdapter(client kubernetes.Interface) *StatefulSetAdapter {
return &StatefulSetAdapter{client: client}
}
// Type returns the workload type.
func (a *StatefulSetAdapter) Type() WorkloadType {
return WorkloadStatefulSet
}
// Create creates a StatefulSet with the given config.
func (a *StatefulSetAdapter) Create(ctx context.Context, namespace, name string, cfg WorkloadConfig) error {
opts := buildStatefulSetOptions(cfg)
_, err := CreateStatefulSet(ctx, a.client, namespace, name, opts...)
return err
}
// Delete removes the StatefulSet.
func (a *StatefulSetAdapter) Delete(ctx context.Context, namespace, name string) error {
return DeleteStatefulSet(ctx, a.client, namespace, name)
}
// WaitReady waits for the StatefulSet to be ready.
func (a *StatefulSetAdapter) WaitReady(ctx context.Context, namespace, name string, timeout time.Duration) error {
return WaitForStatefulSetReady(ctx, a.client, namespace, name, timeout)
}
// WaitReloaded waits for the StatefulSet to have the reload annotation.
func (a *StatefulSetAdapter) WaitReloaded(ctx context.Context, namespace, name, annotationKey string, timeout time.Duration) (bool, error) {
return WaitForStatefulSetReloaded(ctx, a.client, namespace, name, annotationKey, timeout)
}
// WaitEnvVar waits for the StatefulSet to have a STAKATER_ env var.
func (a *StatefulSetAdapter) WaitEnvVar(ctx context.Context, namespace, name, prefix string, timeout time.Duration) (bool, error) {
return WaitForStatefulSetEnvVar(ctx, a.client, namespace, name, prefix, timeout)
}
// SupportsEnvVarStrategy returns true as StatefulSets support env var reload strategy.
func (a *StatefulSetAdapter) SupportsEnvVarStrategy() bool {
return true
}
// RequiresSpecialHandling returns false as StatefulSets use standard rolling restart.
func (a *StatefulSetAdapter) RequiresSpecialHandling() bool {
return false
}
// buildStatefulSetOptions converts WorkloadConfig to StatefulSetOption slice.
func buildStatefulSetOptions(cfg WorkloadConfig) []StatefulSetOption {
var opts []StatefulSetOption
// Add annotations
if len(cfg.Annotations) > 0 {
opts = append(opts, WithStatefulSetAnnotations(cfg.Annotations))
}
// Add envFrom references
if cfg.UseConfigMapEnvFrom && cfg.ConfigMapName != "" {
opts = append(opts, WithStatefulSetConfigMapEnvFrom(cfg.ConfigMapName))
}
if cfg.UseSecretEnvFrom && cfg.SecretName != "" {
opts = append(opts, WithStatefulSetSecretEnvFrom(cfg.SecretName))
}
// Add volume mounts
if cfg.UseConfigMapVolume && cfg.ConfigMapName != "" {
opts = append(opts, WithStatefulSetConfigMapVolume(cfg.ConfigMapName))
}
if cfg.UseSecretVolume && cfg.SecretName != "" {
opts = append(opts, WithStatefulSetSecretVolume(cfg.SecretName))
}
// Add projected volume
if cfg.UseProjectedVolume {
opts = append(opts, WithStatefulSetProjectedVolume(cfg.ConfigMapName, cfg.SecretName))
}
// Add valueFrom references
if cfg.UseConfigMapKeyRef && cfg.ConfigMapName != "" {
key := cfg.ConfigMapKey
if key == "" {
key = "key"
}
envVar := cfg.EnvVarName
if envVar == "" {
envVar = "CONFIG_VAR"
}
opts = append(opts, WithStatefulSetConfigMapKeyRef(cfg.ConfigMapName, key, envVar))
}
if cfg.UseSecretKeyRef && cfg.SecretName != "" {
key := cfg.SecretKey
if key == "" {
key = "key"
}
envVar := cfg.EnvVarName
if envVar == "" {
envVar = "SECRET_VAR"
}
opts = append(opts, WithStatefulSetSecretKeyRef(cfg.SecretName, key, envVar))
}
// Add init container with envFrom
if cfg.UseInitContainer {
opts = append(opts, WithStatefulSetInitContainer(cfg.ConfigMapName, cfg.SecretName))
}
// Add init container with volume mount
if cfg.UseInitContainerVolume {
opts = append(opts, WithStatefulSetInitContainerVolume(cfg.ConfigMapName, cfg.SecretName))
}
return opts
}
// WithStatefulSetConfigMapVolume adds a volume mount for a ConfigMap to a StatefulSet.
func WithStatefulSetConfigMapVolume(name string) StatefulSetOption {
return func(ss *appsv1.StatefulSet) {
volumeName := fmt.Sprintf("cm-%s", name)
ss.Spec.Template.Spec.Volumes = append(ss.Spec.Template.Spec.Volumes, corev1.Volume{
Name: volumeName,
VolumeSource: corev1.VolumeSource{
ConfigMap: &corev1.ConfigMapVolumeSource{
LocalObjectReference: corev1.LocalObjectReference{Name: name},
},
},
})
ss.Spec.Template.Spec.Containers[0].VolumeMounts = append(
ss.Spec.Template.Spec.Containers[0].VolumeMounts,
corev1.VolumeMount{
Name: volumeName,
MountPath: fmt.Sprintf("/etc/config/%s", name),
},
)
}
}
// WithStatefulSetSecretVolume adds a volume mount for a Secret to a StatefulSet.
func WithStatefulSetSecretVolume(name string) StatefulSetOption {
return func(ss *appsv1.StatefulSet) {
volumeName := fmt.Sprintf("secret-%s", name)
ss.Spec.Template.Spec.Volumes = append(ss.Spec.Template.Spec.Volumes, corev1.Volume{
Name: volumeName,
VolumeSource: corev1.VolumeSource{
Secret: &corev1.SecretVolumeSource{
SecretName: name,
},
},
})
ss.Spec.Template.Spec.Containers[0].VolumeMounts = append(
ss.Spec.Template.Spec.Containers[0].VolumeMounts,
corev1.VolumeMount{
Name: volumeName,
MountPath: fmt.Sprintf("/etc/secrets/%s", name),
},
)
}
}
// WithStatefulSetInitContainer adds an init container that references ConfigMap and/or Secret.
func WithStatefulSetInitContainer(cmName, secretName string) StatefulSetOption {
return func(ss *appsv1.StatefulSet) {
initContainer := corev1.Container{
Name: "init",
Image: DefaultImage,
Command: []string{"sh", "-c", "echo init done"},
}
if cmName != "" {
initContainer.EnvFrom = append(initContainer.EnvFrom, corev1.EnvFromSource{
ConfigMapRef: &corev1.ConfigMapEnvSource{
LocalObjectReference: corev1.LocalObjectReference{Name: cmName},
},
})
}
if secretName != "" {
initContainer.EnvFrom = append(initContainer.EnvFrom, corev1.EnvFromSource{
SecretRef: &corev1.SecretEnvSource{
LocalObjectReference: corev1.LocalObjectReference{Name: secretName},
},
})
}
ss.Spec.Template.Spec.InitContainers = append(ss.Spec.Template.Spec.InitContainers, initContainer)
}
}
// WithStatefulSetInitContainerVolume adds an init container with ConfigMap/Secret volume mounts.
func WithStatefulSetInitContainerVolume(cmName, secretName string) StatefulSetOption {
return func(ss *appsv1.StatefulSet) {
initContainer := corev1.Container{
Name: "init",
Image: DefaultImage,
Command: []string{"sh", "-c", "echo init done"},
}
if cmName != "" {
volumeName := fmt.Sprintf("init-cm-%s", cmName)
ss.Spec.Template.Spec.Volumes = append(ss.Spec.Template.Spec.Volumes, corev1.Volume{
Name: volumeName,
VolumeSource: corev1.VolumeSource{
ConfigMap: &corev1.ConfigMapVolumeSource{
LocalObjectReference: corev1.LocalObjectReference{Name: cmName},
},
},
})
initContainer.VolumeMounts = append(initContainer.VolumeMounts, corev1.VolumeMount{
Name: volumeName,
MountPath: fmt.Sprintf("/etc/init-config/%s", cmName),
})
}
if secretName != "" {
volumeName := fmt.Sprintf("init-secret-%s", secretName)
ss.Spec.Template.Spec.Volumes = append(ss.Spec.Template.Spec.Volumes, corev1.Volume{
Name: volumeName,
VolumeSource: corev1.VolumeSource{
Secret: &corev1.SecretVolumeSource{
SecretName: secretName,
},
},
})
initContainer.VolumeMounts = append(initContainer.VolumeMounts, corev1.VolumeMount{
Name: volumeName,
MountPath: fmt.Sprintf("/etc/init-secrets/%s", secretName),
})
}
ss.Spec.Template.Spec.InitContainers = append(ss.Spec.Template.Spec.InitContainers, initContainer)
}
}